当前位置: 首页>>代码示例>>Java>>正文


Java FileContext.getFileContext方法代码示例

本文整理汇总了Java中org.apache.hadoop.fs.FileContext.getFileContext方法的典型用法代码示例。如果您正苦于以下问题:Java FileContext.getFileContext方法的具体用法?Java FileContext.getFileContext怎么用?Java FileContext.getFileContext使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.fs.FileContext的用法示例。


在下文中一共展示了FileContext.getFileContext方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: setUp

import org.apache.hadoop.fs.FileContext; //导入方法依赖的package包/类
@Before
public void setUp() throws Exception {
  // create the test root on local_fs
  fcTarget = FileContext.getLocalFSFileContext();
  chrootedTo = fileContextTestHelper.getAbsoluteTestRootPath(fcTarget);
  // In case previous test was killed before cleanup
  fcTarget.delete(chrootedTo, true);
  
  fcTarget.mkdir(chrootedTo, FileContext.DEFAULT_PERM, true);

  Configuration conf = new Configuration();

  // ChRoot to the root of the testDirectory
  fc = FileContext.getFileContext(
      new ChRootedFs(fcTarget.getDefaultFileSystem(), chrootedTo), conf);
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:17,代码来源:TestChRootedFs.java

示例2: close

import org.apache.hadoop.fs.FileContext; //导入方法依赖的package包/类
@Override
public void close() throws IOException {
  // Output the result to a file Results in the output dir
  FileContext fc;
  try {
    fc = FileContext.getFileContext(jobConf);
  } catch (IOException ioe) {
    System.err.println("Can not initialize the file system: " + 
        ioe.getLocalizedMessage());
    return;
  }
  FSDataOutputStream o = fc.create(FileOutputFormat.getTaskOutputPath(jobConf, "Results"),
      EnumSet.of(CreateFlag.CREATE));
     
  PrintStream out = new PrintStream(o);
  printResults(out);
  out.close();
  o.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:LoadGeneratorMR.java

示例3: setup

import org.apache.hadoop.fs.FileContext; //导入方法依赖的package包/类
@Before
public void setup() throws Exception {
  conf = new HdfsConfiguration();
  fsHelper = new FileSystemTestHelper();
  // Set up java key store
  String testRoot = fsHelper.getTestRootDir();
  testRootDir = new File(testRoot).getAbsoluteFile();
  conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, getKeyProviderURI());
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
  // Lower the batch size for testing
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES,
      2);
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  Logger.getLogger(EncryptionZoneManager.class).setLevel(Level.TRACE);
  fs = cluster.getFileSystem();
  fsWrapper = new FileSystemTestWrapper(fs);
  fcWrapper = new FileContextTestWrapper(
      FileContext.getFileContext(cluster.getURI(), conf));
  dfsAdmin = new HdfsAdmin(cluster.getURI(), conf);
  setProvider();
  // Create a test key
  DFSTestUtil.createKey(TEST_KEY, cluster, conf);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:TestEncryptionZones.java

示例4: setUp

import org.apache.hadoop.fs.FileContext; //导入方法依赖的package包/类
@Before
public void setUp() throws Exception {
  initializeTargetTestRoot();
  
  // Make  user and data dirs - we creates links to them in the mount table
  fcTarget.mkdir(new Path(targetTestRoot,"user"),
      FileContext.DEFAULT_PERM, true);
  fcTarget.mkdir(new Path(targetTestRoot,"data"),
      FileContext.DEFAULT_PERM, true);
  fcTarget.mkdir(new Path(targetTestRoot,"dir2"),
      FileContext.DEFAULT_PERM, true);
  fcTarget.mkdir(new Path(targetTestRoot,"dir3"),
      FileContext.DEFAULT_PERM, true);
  FileContextTestHelper.createFile(fcTarget, new Path(targetTestRoot,"aFile"));
  
  
  // Now we use the mount fs to set links to user and dir
  // in the test root
  
  // Set up the defaultMT in the config with our mount point links
  conf = new Configuration();
  ConfigUtil.addLink(conf, "/targetRoot", targetTestRoot.toUri());
  ConfigUtil.addLink(conf, "/user",
      new Path(targetTestRoot,"user").toUri());
  ConfigUtil.addLink(conf, "/user2",
      new Path(targetTestRoot,"user").toUri());
  ConfigUtil.addLink(conf, "/data",
      new Path(targetTestRoot,"data").toUri());
  ConfigUtil.addLink(conf, "/internalDir/linkToDir2",
      new Path(targetTestRoot,"dir2").toUri());
  ConfigUtil.addLink(conf, "/internalDir/internalDir2/linkToDir3",
      new Path(targetTestRoot,"dir3").toUri());
  ConfigUtil.addLink(conf, "/danglingLink",
      new Path(targetTestRoot,"missingTarget").toUri());
  ConfigUtil.addLink(conf, "/linkToAFile",
      new Path(targetTestRoot,"aFile").toUri());
  
  fcView = FileContext.getFileContext(FsConstants.VIEWFS_URI, conf);
  // Also try viewfs://default/    - note authority is name of mount table
}
 
开发者ID:naver,项目名称:hadoop,代码行数:41,代码来源:ViewFsBaseTest.java

示例5: testURIEmptyPath

import org.apache.hadoop.fs.FileContext; //导入方法依赖的package包/类
@Test
public void testURIEmptyPath() throws Exception {
  Configuration conf = new Configuration();
  ConfigUtil.addLink(conf, "/user", new URI("file://foo"));

  FileContext.getFileContext(FsConstants.VIEWFS_URI, conf);
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:8,代码来源:TestViewFsURIs.java

示例6: setUp

import org.apache.hadoop.fs.FileContext; //导入方法依赖的package包/类
@Override
@Before
public void setUp() throws Exception {
  // create the test root on local_fs
  fcTarget = FileContext.getLocalFSFileContext();
  super.setUp(); // this sets up conf (and fcView which we replace)
  
  // Now create a viewfs using a mount table called "default"
  // hence viewfs://default/
  schemeWithAuthority = 
    new URI(FsConstants.VIEWFS_SCHEME, "default", "/", null, null);
  fcView = FileContext.getFileContext(schemeWithAuthority, conf);  
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:14,代码来源:TestViewFsWithAuthorityLocalFs.java

示例7: AvroFileReader

import org.apache.hadoop.fs.FileContext; //导入方法依赖的package包/类
public AvroFileReader(FileSystem fs, Path filePath, Map<String, Object> config) throws IOException {
    super(fs, filePath, new GenericRecordToStruct(), config);

    AvroFSInput input = new AvroFSInput(FileContext.getFileContext(filePath.toUri()), filePath);
    this.reader = new DataFileReader<>(input, new SpecificDatumReader<>(this.schema));
    this.offset = new AvroOffset(0);
}
 
开发者ID:mmolimar,项目名称:kafka-connect-fs,代码行数:8,代码来源:AvroFileReader.java

示例8: LogReader

import org.apache.hadoop.fs.FileContext; //导入方法依赖的package包/类
public LogReader(Configuration conf, Path remoteAppLogFile)
    throws IOException {
  FileContext fileContext =
      FileContext.getFileContext(remoteAppLogFile.toUri(), conf);
  this.fsDataIStream = fileContext.open(remoteAppLogFile);
  reader =
      new TFile.Reader(this.fsDataIStream, fileContext.getFileStatus(
          remoteAppLogFile).getLen(), conf);
  this.scanner = reader.createScanner();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:11,代码来源:AggregatedLogFormat.java

示例9: clusterSetupAtBegining

import org.apache.hadoop.fs.FileContext; //导入方法依赖的package包/类
@BeforeClass
public static void clusterSetupAtBegining() throws IOException,
    LoginException, URISyntaxException {
  SupportsBlocks = true;
  CONF.setBoolean(
      DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);

  cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(2).build();
  cluster.waitClusterUp();
  fc = FileContext.getFileContext(cluster.getURI(0), CONF);
  Path defaultWorkingDirectory = fc.makeQualified( new Path("/user/" + 
      UserGroupInformation.getCurrentUser().getShortUserName()));
  fc.mkdir(defaultWorkingDirectory, FileContext.DEFAULT_PERM, true);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:15,代码来源:TestViewFsHdfs.java

示例10: getPreviousJobHistoryPath

import org.apache.hadoop.fs.FileContext; //导入方法依赖的package包/类
public static Path getPreviousJobHistoryPath(
    Configuration conf, ApplicationAttemptId applicationAttemptId)
    throws IOException {
  String jobId =
      TypeConverter.fromYarn(applicationAttemptId.getApplicationId())
        .toString();
  String jobhistoryDir =
      JobHistoryUtils.getConfiguredHistoryStagingDirPrefix(conf, jobId);
  Path histDirPath = FileContext.getFileContext(conf).makeQualified(
          new Path(jobhistoryDir));
  FileContext fc = FileContext.getFileContext(histDirPath.toUri(), conf);
  return fc.makeQualified(JobHistoryUtils.getStagingJobHistoryFile(
      histDirPath,jobId, (applicationAttemptId.getAttemptId() - 1)));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:15,代码来源:JobHistoryUtils.java

示例11: loadConfFile

import org.apache.hadoop.fs.FileContext; //导入方法依赖的package包/类
@Override
public Configuration loadConfFile() throws IOException {
  Path confPath = getConfFile();
  FileContext fc = FileContext.getFileContext(confPath.toUri(), conf);
  Configuration jobConf = new Configuration(false);
  jobConf.addResource(fc.open(confPath), confPath.toString());
  return jobConf;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:9,代码来源:JobImpl.java

示例12: clusterSetupAtBeginning

import org.apache.hadoop.fs.FileContext; //导入方法依赖的package包/类
@BeforeClass
public static void clusterSetupAtBeginning() throws IOException {
  cluster = new MiniDFSCluster.Builder(clusterConf)
      .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2))
      .numDataNodes(2)
      .build();
  cluster.waitClusterUp();

  fc = FileContext.getFileContext(cluster.getURI(0), clusterConf);
  fc2 = FileContext.getFileContext(cluster.getURI(1), clusterConf);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:12,代码来源:TestViewFsWithXAttrs.java

示例13: initialize

import org.apache.hadoop.fs.FileContext; //导入方法依赖的package包/类
@Override
public void initialize(URI uri, Configuration conf) throws IOException {
  super.initialize(uri, conf);
  fc = FileContext.getFileContext(conf);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:6,代码来源:TestFileContextXAttr.java

示例14: serviceInit

import org.apache.hadoop.fs.FileContext; //导入方法依赖的package包/类
@Override
public void serviceInit(Configuration conf) throws Exception {
  conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
  if (conf.get(MRJobConfig.MR_AM_STAGING_DIR) == null) {
    conf.set(MRJobConfig.MR_AM_STAGING_DIR, new File(getTestWorkDir(),
        "apps_staging_dir/").getAbsolutePath());
  }

  // By default, VMEM monitoring disabled, PMEM monitoring enabled.
  if (!conf.getBoolean(
      MRConfig.MAPREDUCE_MINICLUSTER_CONTROL_RESOURCE_MONITORING,
      MRConfig.DEFAULT_MAPREDUCE_MINICLUSTER_CONTROL_RESOURCE_MONITORING)) {
    conf.setBoolean(YarnConfiguration.NM_PMEM_CHECK_ENABLED, false);
    conf.setBoolean(YarnConfiguration.NM_VMEM_CHECK_ENABLED, false);
  }

  conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY,  "000");

  try {
    Path stagingPath = FileContext.getFileContext(conf).makeQualified(
        new Path(conf.get(MRJobConfig.MR_AM_STAGING_DIR)));
    /*
     * Re-configure the staging path on Windows if the file system is localFs.
     * We need to use a absolute path that contains the drive letter. The unit
     * test could run on a different drive than the AM. We can run into the
     * issue that job files are localized to the drive where the test runs on,
     * while the AM starts on a different drive and fails to find the job
     * metafiles. Using absolute path can avoid this ambiguity.
     */
    if (Path.WINDOWS) {
      if (LocalFileSystem.class.isInstance(stagingPath.getFileSystem(conf))) {
        conf.set(MRJobConfig.MR_AM_STAGING_DIR,
            new File(conf.get(MRJobConfig.MR_AM_STAGING_DIR))
                .getAbsolutePath());
      }
    }
    FileContext fc=FileContext.getFileContext(stagingPath.toUri(), conf);
    if (fc.util().exists(stagingPath)) {
      LOG.info(stagingPath + " exists! deleting...");
      fc.delete(stagingPath, true);
    }
    LOG.info("mkdir: " + stagingPath);
    //mkdir the staging directory so that right permissions are set while running as proxy user
    fc.mkdir(stagingPath, null, true);
    //mkdir done directory as well 
    String doneDir = JobHistoryUtils.getConfiguredHistoryServerDoneDirPrefix(conf);
    Path doneDirPath = fc.makeQualified(new Path(doneDir));
    fc.mkdir(doneDirPath, null, true);
  } catch (IOException e) {
    throw new YarnRuntimeException("Could not create staging directory. ", e);
  }
  conf.set(MRConfig.MASTER_ADDRESS, "test"); // The default is local because of
                                           // which shuffle doesn't happen
  //configure the shuffle service in NM
  conf.setStrings(YarnConfiguration.NM_AUX_SERVICES,
      new String[] { ShuffleHandler.MAPREDUCE_SHUFFLE_SERVICEID });
  conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT,
      ShuffleHandler.MAPREDUCE_SHUFFLE_SERVICEID), ShuffleHandler.class,
      Service.class);

  // Non-standard shuffle port
  conf.setInt(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY, 0);

  conf.setClass(YarnConfiguration.NM_CONTAINER_EXECUTOR,
      DefaultContainerExecutor.class, ContainerExecutor.class);

  // TestMRJobs is for testing non-uberized operation only; see TestUberAM
  // for corresponding uberized tests.
  conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);

  super.serviceInit(conf);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:73,代码来源:MiniMRYarnCluster.java

示例15: WindowsSecureContainerExecutor

import org.apache.hadoop.fs.FileContext; //导入方法依赖的package包/类
public WindowsSecureContainerExecutor() 
    throws IOException, URISyntaxException {
  super(FileContext.getFileContext(new ElevatedFileSystem(), 
      new Configuration()));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:6,代码来源:WindowsSecureContainerExecutor.java


注:本文中的org.apache.hadoop.fs.FileContext.getFileContext方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。