当前位置: 首页>>代码示例>>Java>>正文


Java FileContext.getLocalFSFileContext方法代码示例

本文整理汇总了Java中org.apache.hadoop.fs.FileContext.getLocalFSFileContext方法的典型用法代码示例。如果您正苦于以下问题:Java FileContext.getLocalFSFileContext方法的具体用法?Java FileContext.getLocalFSFileContext怎么用?Java FileContext.getLocalFSFileContext使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.fs.FileContext的用法示例。


在下文中一共展示了FileContext.getLocalFSFileContext方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: setUp

import org.apache.hadoop.fs.FileContext; //导入方法依赖的package包/类
@Before
public void setUp() throws Exception {
  // create the test root on local_fs
  fcTarget = FileContext.getLocalFSFileContext();
  chrootedTo = fileContextTestHelper.getAbsoluteTestRootPath(fcTarget);
  // In case previous test was killed before cleanup
  fcTarget.delete(chrootedTo, true);
  
  fcTarget.mkdir(chrootedTo, FileContext.DEFAULT_PERM, true);

  Configuration conf = new Configuration();

  // ChRoot to the root of the testDirectory
  fc = FileContext.getFileContext(
      new ChRootedFs(fcTarget.getDefaultFileSystem(), chrootedTo), conf);
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:17,代码来源:TestChRootedFs.java

示例2: createTmpFile

import org.apache.hadoop.fs.FileContext; //导入方法依赖的package包/类
byte[] createTmpFile(Path dst, Random r, int len)
    throws IOException {
  // use unmodified local context
  FileContext lfs = FileContext.getLocalFSFileContext();
  dst = lfs.makeQualified(dst);
  lfs.mkdir(dst.getParent(), null, true);
  byte[] bytes = new byte[len];
  FSDataOutputStream out = null;
  try {
    out = lfs.create(dst, EnumSet.of(CREATE, OVERWRITE));
    r.nextBytes(bytes);
    out.write(bytes);
  } finally {
    if (out != null) out.close();
  }
  return bytes;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:TestDefaultContainerExecutor.java

示例3: setupForViewFsLocalFs

import org.apache.hadoop.fs.FileContext; //导入方法依赖的package包/类
static public FileContext setupForViewFsLocalFs(FileContextTestHelper helper) throws Exception {
  /**
   * create the test root on local_fs - the  mount table will point here
   */
  FileContext fsTarget = FileContext.getLocalFSFileContext();
  Path targetOfTests = helper.getTestRootPath(fsTarget);
  // In case previous test was killed before cleanup
  fsTarget.delete(targetOfTests, true);
  
  fsTarget.mkdir(targetOfTests, FileContext.DEFAULT_PERM, true);
  Configuration conf = new Configuration();
  
  // Set up viewfs link for test dir as described above
  String testDir = helper.getTestRootPath(fsTarget).toUri()
      .getPath();
  linkUpFirstComponents(conf, testDir, fsTarget, "test dir");
  
  
  // Set up viewfs link for home dir as described above
  setUpHomeDir(conf, fsTarget);
    
  // the test path may be relative to working dir - we need to make that work:
  // Set up viewfs link for wd as described above
  String wdDir = fsTarget.getWorkingDirectory().toUri().getPath();
  linkUpFirstComponents(conf, wdDir, fsTarget, "working dir");
  
  FileContext fc = FileContext.getFileContext(FsConstants.VIEWFS_URI, conf);
  fc.setWorkingDirectory(new Path(wdDir)); // in case testdir relative to wd.
  Log.info("Working dir is: " + fc.getWorkingDirectory());
  //System.out.println("SRCOfTests = "+ getTestRootPath(fc, "test"));
  //System.out.println("TargetOfTests = "+ targetOfTests.toUri());
  return fc;
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:34,代码来源:ViewFsTestSetup.java

示例4: setup

import org.apache.hadoop.fs.FileContext; //导入方法依赖的package包/类
@BeforeClass
public static void setup() throws AccessControlException,
    FileNotFoundException, IllegalArgumentException, IOException {
  //Do not error out if metrics are inited multiple times
  DefaultMetricsSystem.setMiniClusterMode(true);
  File dir = new File(stagingDir);
  stagingDir = dir.getAbsolutePath();
  localFS = FileContext.getLocalFSFileContext();
  localFS.delete(new Path(testDir.getAbsolutePath()), true);
  testDir.mkdir();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:12,代码来源:TestMRAppMaster.java

示例5: createLogFile

import org.apache.hadoop.fs.FileContext; //导入方法依赖的package包/类
/**
 * Create simple log file
 * 
 * @return
 * @throws IOException
 */

private Path createLogFile() throws IOException {

  FileContext files = FileContext.getLocalFSFileContext();

  Path ws = new Path(workSpace.getAbsoluteFile().getAbsolutePath());

  files.delete(ws, true);
  Path workSpacePath = new Path(workSpace.getAbsolutePath(), "log");
  files.mkdir(workSpacePath, null, true);

  LOG.info("create logfile.log");
  Path logfile1 = new Path(workSpacePath, "logfile.log");

  FSDataOutputStream os = files.create(logfile1,
      EnumSet.of(CreateFlag.CREATE));
  os.writeBytes("4 3" + EL + "1 3" + EL + "4 44" + EL);
  os.writeBytes("2 3" + EL + "1 3" + EL + "0 45" + EL);
  os.writeBytes("4 3" + EL + "1 3" + EL + "1 44" + EL);

  os.flush();
  os.close();
  LOG.info("create logfile1.log");

  Path logfile2 = new Path(workSpacePath, "logfile1.log");

  os = files.create(logfile2, EnumSet.of(CreateFlag.CREATE));
  os.writeBytes("4 3" + EL + "1 3" + EL + "3 44" + EL);
  os.writeBytes("2 3" + EL + "1 3" + EL + "0 45" + EL);
  os.writeBytes("4 3" + EL + "1 3" + EL + "1 44" + EL);

  os.flush();
  os.close();

  return workSpacePath;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:43,代码来源:TestLogalyzer.java

示例6: setup

import org.apache.hadoop.fs.FileContext; //导入方法依赖的package包/类
@Before
public void setup() {
  assumeTrue(Shell.LINUX);
  File f = new File("./src/test/resources/mock-container-executor");
  if(!FileUtil.canExecute(f)) {
    FileUtil.setExecutable(f, true);
  }
  String executorPath = f.getAbsolutePath();
  Configuration conf = new Configuration();
  yarnImage = "yarnImage";
  long time = System.currentTimeMillis();
  conf.set(YarnConfiguration.NM_LINUX_CONTAINER_EXECUTOR_PATH, executorPath);
  conf.set(YarnConfiguration.NM_LOCAL_DIRS, "/tmp/nm-local-dir" + time);
  conf.set(YarnConfiguration.NM_LOG_DIRS, "/tmp/userlogs" + time);
  conf.set(YarnConfiguration.NM_DOCKER_CONTAINER_EXECUTOR_IMAGE_NAME, yarnImage);
  conf.set(YarnConfiguration.NM_DOCKER_CONTAINER_EXECUTOR_EXEC_NAME , DOCKER_LAUNCH_COMMAND);
  dockerContainerExecutor = new DockerContainerExecutor();
  dirsHandler = new LocalDirsHandlerService();
  dirsHandler.init(conf);
  dockerContainerExecutor.setConf(conf);
  lfs = null;
  try {
    lfs = FileContext.getLocalFSFileContext();
    workDir = new Path("/tmp/temp-"+ System.currentTimeMillis());
    lfs.mkdir(workDir, FsPermission.getDirDefault(), true);
  } catch (IOException e) {
    throw new RuntimeException(e);
  }

}
 
开发者ID:naver,项目名称:hadoop,代码行数:31,代码来源:TestDockerContainerExecutorWithMocks.java

示例7: setupForTests

import org.apache.hadoop.fs.FileContext; //导入方法依赖的package包/类
@Before
public void setupForTests() throws IOException {
  conf = new Configuration();
  localFs = FileContext.getLocalFSFileContext(conf);
  testDir.mkdirs();
  testFile.createNewFile();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:8,代码来源:TestDirectoryCollection.java

示例8: deleteTmpFiles

import org.apache.hadoop.fs.FileContext; //导入方法依赖的package包/类
@AfterClass
public static void deleteTmpFiles() throws IOException {
  FileContext lfs = FileContext.getLocalFSFileContext();
  try {
    lfs.delete(BASE_TMP_PATH, true);
  } catch (FileNotFoundException e) {
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:9,代码来源:TestDefaultContainerExecutor.java

示例9: setUp

import org.apache.hadoop.fs.FileContext; //导入方法依赖的package包/类
@Override
@Before
public void setUp() throws Exception {
  // create the test root on local_fs
  fcTarget = FileContext.getLocalFSFileContext();
  super.setUp(); // this sets up conf (and fcView which we replace)
  
  // Now create a viewfs using a mount table called "default"
  // hence viewfs://default/
  schemeWithAuthority = 
    new URI(FsConstants.VIEWFS_SCHEME, "default", "/", null, null);
  fcView = FileContext.getFileContext(schemeWithAuthority, conf);  
}
 
开发者ID:naver,项目名称:hadoop,代码行数:14,代码来源:TestViewFsWithAuthorityLocalFs.java

示例10: HPCLogAggregateHandler

import org.apache.hadoop.fs.FileContext; //导入方法依赖的package包/类
public HPCLogAggregateHandler(String applicationId, String user) {
  super(HPCLogAggregateHandler.class.getName());
  this.applicationId = applicationId;
  this.user = user;
  try {
    this.lfs = FileContext.getLocalFSFileContext();
  } catch (UnsupportedFileSystemException e) {
    throw new RuntimeException(e);
  }
}
 
开发者ID:intel-hpdd,项目名称:scheduling-connector-for-hadoop,代码行数:11,代码来源:HPCLogAggregateHandler.java

示例11: serviceInit

import org.apache.hadoop.fs.FileContext; //导入方法依赖的package包/类
/**
 * Method which initializes the timertask and its interval time.
 * 
 */
@Override
protected void serviceInit(Configuration config) throws Exception {
  // Clone the configuration as we may do modifications to dirs-list
  Configuration conf = new Configuration(config);
  diskHealthCheckInterval = conf.getLong(
      YarnConfiguration.NM_DISK_HEALTH_CHECK_INTERVAL_MS,
      YarnConfiguration.DEFAULT_NM_DISK_HEALTH_CHECK_INTERVAL_MS);
  monitoringTimerTask = new MonitoringTimerTask(conf);
  isDiskHealthCheckerEnabled = conf.getBoolean(
      YarnConfiguration.NM_DISK_HEALTH_CHECK_ENABLE, true);
  minNeededHealthyDisksFactor = conf.getFloat(
      YarnConfiguration.NM_MIN_HEALTHY_DISKS_FRACTION,
      YarnConfiguration.DEFAULT_NM_MIN_HEALTHY_DISKS_FRACTION);
  lastDisksCheckTime = System.currentTimeMillis();
  super.serviceInit(conf);

  FileContext localFs;
  try {
    localFs = FileContext.getLocalFSFileContext(config);
  } catch (IOException e) {
    throw new YarnRuntimeException("Unable to get the local filesystem", e);
  }
  FsPermission perm = new FsPermission((short)0755);
  boolean createSucceeded = localDirs.createNonExistentDirs(localFs, perm);
  createSucceeded &= logDirs.createNonExistentDirs(localFs, perm);
  if (!createSucceeded) {
    updateDirsAfterTest();
  }

  // Check the disk health immediately to weed out bad directories
  // before other init code attempts to use them.
  checkDirs();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:38,代码来源:LocalDirsHandlerService.java

示例12: DockerContainerExecutor

import org.apache.hadoop.fs.FileContext; //导入方法依赖的package包/类
public DockerContainerExecutor() {
  try {
    this.lfs = FileContext.getLocalFSFileContext();
    this.dockerImagePattern = Pattern.compile(DOCKER_IMAGE_PATTERN);
  } catch (UnsupportedFileSystemException e) {
    throw new RuntimeException(e);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:9,代码来源:DockerContainerExecutor.java

示例13: DefaultContainerExecutor

import org.apache.hadoop.fs.FileContext; //导入方法依赖的package包/类
public DefaultContainerExecutor() {
  try {
    this.lfs = FileContext.getLocalFSFileContext();
  } catch (UnsupportedFileSystemException e) {
    throw new RuntimeException(e);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:8,代码来源:DefaultContainerExecutor.java

示例14: testLocalizerTokenIsGettingRemoved

import org.apache.hadoop.fs.FileContext; //导入方法依赖的package包/类
@Test
@SuppressWarnings("unchecked")
public void testLocalizerTokenIsGettingRemoved() throws Exception {
  FileContext fs = FileContext.getLocalFSFileContext();
  spylfs = spy(fs.getDefaultFileSystem());
  ContainerLocalizer localizer = setupContainerLocalizerForTest();
  doNothing().when(localizer).localizeFiles(any(LocalizationProtocol.class),
      any(CompletionService.class), any(UserGroupInformation.class));
  localizer.runLocalization(nmAddr);
  verify(spylfs, times(1)).delete(tokenPath, false);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:12,代码来源:TestContainerLocalizer.java

示例15: deleteTestDir

import org.apache.hadoop.fs.FileContext; //导入方法依赖的package包/类
@AfterClass
public static void deleteTestDir() throws IOException {
  FileContext fs = FileContext.getLocalFSFileContext();
  fs.delete(new Path("target", TestFSDownload.class.getSimpleName()), true);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:6,代码来源:TestFSDownload.java


注:本文中的org.apache.hadoop.fs.FileContext.getLocalFSFileContext方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。