当前位置: 首页>>代码示例>>Java>>正文


Java RawLocalFileSystem类代码示例

本文整理汇总了Java中org.apache.hadoop.fs.RawLocalFileSystem的典型用法代码示例。如果您正苦于以下问题:Java RawLocalFileSystem类的具体用法?Java RawLocalFileSystem怎么用?Java RawLocalFileSystem使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


RawLocalFileSystem类属于org.apache.hadoop.fs包,在下文中一共展示了RawLocalFileSystem类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: startMiniCluster

import org.apache.hadoop.fs.RawLocalFileSystem; //导入依赖的package包/类
@BeforeClass
public static void startMiniCluster() throws Exception {
  File targetDir = new File(System.getProperty("user.dir"), "target");
  File macDir = new File(targetDir, DelimitedIngestMiniClusterTest.class.getSimpleName() + "_cluster");
  if (macDir.exists()) {
    FileUtils.deleteDirectory(macDir);
  }
  MiniAccumuloConfigImpl config = new MiniAccumuloConfigImpl(macDir, ROOT_PASSWORD);
  config.setNumTservers(1);
  config.setInstanceName(INSTANCE_NAME);
  config.setSiteConfig(Collections.singletonMap("fs.file.impl", RawLocalFileSystem.class.getName()));
  config.useMiniDFS(true);
  MAC = new MiniAccumuloClusterImpl(config);
  MAC.start();
  FS = FileSystem.get(MAC.getMiniDfs().getConfiguration(0));

  ARGS = new DelimitedIngestArguments();
  ARGS.setUsername("root");
  ARGS.setPassword(ROOT_PASSWORD);
  ARGS.setInstanceName(INSTANCE_NAME);
  ARGS.setZooKeepers(MAC.getZooKeepers());
  ARGS.setConfiguration(MAC.getMiniDfs().getConfiguration(0));
}
 
开发者ID:joshelser,项目名称:accumulo-delimited-ingest,代码行数:24,代码来源:DelimitedIngestMiniClusterTest.java

示例2: testInitExistingWorkingDirectoryInSafeMode

import org.apache.hadoop.fs.RawLocalFileSystem; //导入依赖的package包/类
@Test
public void testInitExistingWorkingDirectoryInSafeMode() throws Exception {
  LOG.info("Starting testInitExistingWorkingDirectoryInSafeMode");
  tearDown();

  // Setup file system to inject startup conditions
  FileSystem fs = spy(new RawLocalFileSystem());
  doReturn(true).when(fs).isDirectory(any(Path.class));

  try {
    initAndStartStore(fs);
  } catch (Exception e) {
    Assert.fail("Exception should not be thrown: " + e);
  }

  // Make sure that directory creation was not attempted
  verify(fs, times(1)).isDirectory(any(Path.class));
  verify(fs, times(0)).mkdirs(any(Path.class));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:TestFileSystemApplicationHistoryStore.java

示例3: testInitNonExistingWorkingDirectoryInSafeMode

import org.apache.hadoop.fs.RawLocalFileSystem; //导入依赖的package包/类
@Test
public void testInitNonExistingWorkingDirectoryInSafeMode() throws Exception {
  LOG.info("Starting testInitNonExistingWorkingDirectoryInSafeMode");
  tearDown();

  // Setup file system to inject startup conditions
  FileSystem fs = spy(new RawLocalFileSystem());
  doReturn(false).when(fs).isDirectory(any(Path.class));
  doThrow(new IOException()).when(fs).mkdirs(any(Path.class));

  try {
    initAndStartStore(fs);
    Assert.fail("Exception should have been thrown");
  } catch (Exception e) {
    // Expected failure
  }

  // Make sure that directory creation was attempted
  verify(fs, times(1)).isDirectory(any(Path.class));
  verify(fs, times(1)).mkdirs(any(Path.class));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:TestFileSystemApplicationHistoryStore.java

示例4: sameVolRename

import org.apache.hadoop.fs.RawLocalFileSystem; //导入依赖的package包/类
/**
 * Rename srcPath to dstPath on the same volume. This is the same
 * as RawLocalFileSystem's rename method, except that it will not
 * fall back to a copy, and it will create the target directory
 * if it doesn't exist.
 */
private void sameVolRename(Path srcPath,
    Path dstPath) throws IOException {
  RawLocalFileSystem rfs = (RawLocalFileSystem)this.rfs;
  File src = rfs.pathToFile(srcPath);
  File dst = rfs.pathToFile(dstPath);
  if (!dst.getParentFile().exists()) {
    if (!dst.getParentFile().mkdirs()) {
      throw new IOException("Unable to rename " + src + " to "
          + dst + ": couldn't create parent directory"); 
    }
  }
  
  if (!src.renameTo(dst)) {
    throw new IOException("Unable to rename " + src + " to " + dst);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:MapTask.java

示例5: generateConfig

import org.apache.hadoop.fs.RawLocalFileSystem; //导入依赖的package包/类
private final String generateConfig(int rsPort) {
  StringBuilder sb = new StringBuilder();
  Map<String, Object> confMap = new TreeMap<String, Object>();
  confMap.put(HConstants.CLUSTER_DISTRIBUTED, true);
  if (rsPort > 0) {
    confMap.put(HConstants.REGIONSERVER_PORT, rsPort);
    confMap.put(HConstants.REGIONSERVER_INFO_PORT_AUTO, true);
  }

  confMap.put(HConstants.ZOOKEEPER_CLIENT_PORT, zkClientPort);
  confMap.put(HConstants.MASTER_PORT, masterPort);
  confMap.put(HConstants.HREGION_MAX_FILESIZE, MAX_FILE_SIZE_OVERRIDE);
  confMap.put("fs.file.impl", RawLocalFileSystem.class.getName());

  sb.append("<configuration>\n");
  for (Map.Entry<String, Object> entry : confMap.entrySet()) {
    sb.append("  <property>\n");
    sb.append("    <name>" + entry.getKey() + "</name>\n");
    sb.append("    <value>" + entry.getValue() + "</value>\n");
    sb.append("  </property>\n");
  }
  sb.append("</configuration>\n");
  return sb.toString();
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:25,代码来源:ProcessBasedLocalHBaseCluster.java

示例6: copyToLocalFileSystem

import org.apache.hadoop.fs.RawLocalFileSystem; //导入依赖的package包/类
/**
 * Download the file from dfs to local file.
 *
 * @param fs
 * @param destinationFile
 * @param dfsFile
 * @param conf
 * @return
 * @throws IOException
 */
public static File copyToLocalFileSystem(FileSystem fs, String destinationPath, String destinationFile, String dfsFile, Configuration conf)
    throws IOException
{
  File destinationDir = new File(destinationPath);
  if (!destinationDir.exists() && !destinationDir.mkdirs()) {
    throw new RuntimeException("Unable to create local directory");
  }
  try (RawLocalFileSystem localFileSystem = new RawLocalFileSystem()) {
    // allow app user to access local dir
    FsPermission permissions = new FsPermission(FsAction.ALL, FsAction.NONE, FsAction.NONE);
    localFileSystem.setPermission(new Path(destinationDir.getAbsolutePath()), permissions);

    Path dfsFilePath = new Path(dfsFile);
    File localFile = new File(destinationDir, destinationFile);
    FileUtil.copy(fs, dfsFilePath, localFile, false, conf);
    // set permissions on actual file to be read-only for user
    permissions = new FsPermission(FsAction.READ, FsAction.NONE, FsAction.NONE);
    localFileSystem.setPermission(new Path(localFile.getAbsolutePath()), permissions);
    return localFile;
  }
}
 
开发者ID:apache,项目名称:apex-core,代码行数:32,代码来源:FSUtil.java

示例7: ExternalSortExec

import org.apache.hadoop.fs.RawLocalFileSystem; //导入依赖的package包/类
private ExternalSortExec(final TaskAttemptContext context, final SortNode plan)
    throws PhysicalPlanningException {
  super(context, plan.getInSchema(), plan.getOutSchema(), null, plan.getSortKeys());

  this.plan = plan;
  this.defaultFanout = context.getConf().getIntVar(ConfVars.EXECUTOR_EXTERNAL_SORT_FANOUT);
  if (defaultFanout < 2) {
    throw new PhysicalPlanningException(ConfVars.EXECUTOR_EXTERNAL_SORT_FANOUT.varname + " cannot be lower than 2");
  }
  // TODO - sort buffer and core num should be changed to use the allocated container resource.
  this.sortBufferBytesNum = context.getQueryContext().getInt(SessionVars.EXTSORT_BUFFER_SIZE) * StorageUnit.MB;
  this.allocatedCoreNum = context.getConf().getIntVar(ConfVars.EXECUTOR_EXTERNAL_SORT_THREAD_NUM);
  this.localDirAllocator = new LocalDirAllocator(ConfVars.WORKER_TEMPORAL_DIR.varname);
  this.localFS = new RawLocalFileSystem();
  this.intermediateMeta = CatalogUtil.newTableMeta(BuiltinStorages.DRAW, context.getConf());
  this.inputStats = new TableStats();
  this.sortAlgorithm = getSortAlgorithm(context.getQueryContext(), sortSpecs);
  LOG.info(sortAlgorithm.name() + " sort is selected");
}
 
开发者ID:apache,项目名称:tajo,代码行数:20,代码来源:ExternalSortExec.java

示例8: init

import org.apache.hadoop.fs.RawLocalFileSystem; //导入依赖的package包/类
public void init() throws IOException {

    keySchema = PlannerUtil.sortSpecsToSchema(sortSpecs);
    keyProjector = new KeyProjector(inSchema, keySchema.toArray());

    BSTIndex bst = new BSTIndex(context.getConf());
    this.comp = new BaseTupleComparator(keySchema, sortSpecs);
    Path storeTablePath = new Path(context.getWorkDir(), "output");
    LOG.info("Output data directory: " + storeTablePath);

    FileSystem fs = new RawLocalFileSystem();
    fs.mkdirs(storeTablePath);
    this.appender = (FileAppender) ((FileTablespace) TablespaceManager.getDefault())
        .getAppender(meta, outSchema, new Path(storeTablePath, "output"));
    this.appender.enableStats(keySchema.getAllColumns());
    this.appender.init();
    this.indexWriter = bst.getIndexWriter(new Path(storeTablePath, "index"),
        BSTIndex.TWO_LEVEL_INDEX, keySchema, comp, true);
    this.indexWriter.init();

    super.init();
  }
 
开发者ID:apache,项目名称:tajo,代码行数:23,代码来源:RangeShuffleFileWriteExec.java

示例9: ExternalSortExec

import org.apache.hadoop.fs.RawLocalFileSystem; //导入依赖的package包/类
private ExternalSortExec(final TaskAttemptContext context, final AbstractStorageManager sm, final SortNode plan)
    throws PhysicalPlanningException {
  super(context, plan.getInSchema(), plan.getOutSchema(), null, plan.getSortKeys());

  this.plan = plan;
  this.meta = CatalogUtil.newTableMeta(StoreType.ROWFILE);

  this.defaultFanout = context.getConf().getIntVar(ConfVars.EXECUTOR_EXTERNAL_SORT_FANOUT);
  if (defaultFanout < 2) {
    throw new PhysicalPlanningException(ConfVars.EXECUTOR_EXTERNAL_SORT_FANOUT.varname + " cannot be lower than 2");
  }
  // TODO - sort buffer and core num should be changed to use the allocated container resource.
  this.sortBufferBytesNum = context.getConf().getLongVar(ConfVars.EXECUTOR_EXTERNAL_SORT_BUFFER_SIZE) * 1048576L;
  this.allocatedCoreNum = context.getConf().getIntVar(ConfVars.EXECUTOR_EXTERNAL_SORT_THREAD_NUM);
  this.executorService = Executors.newFixedThreadPool(this.allocatedCoreNum);
  this.inMemoryTable = new ArrayList<Tuple>(100000);

  this.sortTmpDir = getExecutorTmpDir();
  localDirAllocator = new LocalDirAllocator(ConfVars.WORKER_TEMPORAL_DIR.varname);
  localFS = new RawLocalFileSystem();
}
 
开发者ID:apache,项目名称:incubator-tajo,代码行数:22,代码来源:ExternalSortExec.java

示例10: buildLogCopier

import org.apache.hadoop.fs.RawLocalFileSystem; //导入依赖的package包/类
private LogCopier buildLogCopier(Config config, Path sinkLogDir, Path appWorkDir) throws IOException {
  FileSystem rawLocalFs = this.closer.register(new RawLocalFileSystem());
  rawLocalFs.initialize(URI.create(ConfigurationKeys.LOCAL_FS_URI), new Configuration());

  LogCopier.Builder builder = LogCopier.newBuilder()
          .useSrcFileSystem(this.fs)
          .useDestFileSystem(rawLocalFs)
          .readFrom(getHdfsLogDir(appWorkDir))
          .writeTo(sinkLogDir)
          .acceptsLogFileExtensions(ImmutableSet.of(ApplicationConstants.STDOUT, ApplicationConstants.STDERR));
  if (config.hasPath(GobblinYarnConfigurationKeys.LOG_COPIER_MAX_FILE_SIZE)) {
    builder.useMaxBytesPerLogFile(config.getBytes(GobblinYarnConfigurationKeys.LOG_COPIER_MAX_FILE_SIZE));
  }
  if (config.hasPath(GobblinYarnConfigurationKeys.LOG_COPIER_SCHEDULER)) {
    builder.useScheduler(config.getString(GobblinYarnConfigurationKeys.LOG_COPIER_SCHEDULER));
  }
  return builder.build();
}
 
开发者ID:apache,项目名称:incubator-gobblin,代码行数:19,代码来源:GobblinYarnAppLauncher.java

示例11: test1

import org.apache.hadoop.fs.RawLocalFileSystem; //导入依赖的package包/类
public void test1() throws IOException, ParseException {

    String schema = "s1:string, s2:string";
    String storage = "[s1, s2]COMPRESS BY gz SECURE BY uid:user1 gid:users perm:744 SERIALIZE BY pig";
    RawLocalFileSystem rawLFS = new RawLocalFileSystem();
    fs = new LocalFileSystem(rawLFS);
    Path path1 = new Path(path.toString() + "1");
    Runtime.getRuntime().exec("rm -rf " + path1.toString());

    fs = path.getFileSystem(conf);
    BasicTable.Writer writer = new BasicTable.Writer(path1, schema, storage,
        conf);
    writer.finish();
    ByteArrayOutputStream bos = new ByteArrayOutputStream();
    PrintStream ps = new PrintStream(bos);
    System.out.println("start dumpinfo ===========");
    BasicTable.dumpInfo(path1.toString(), ps, conf);

    Assert.assertEquals(true, bos.toString().contains("Serializer: pig"));
    Assert.assertEquals(true, bos.toString().contains("Compressor: gz"));
    Assert.assertEquals(true, bos.toString().contains(
        "Schema : s1:string,s2:string"));
  }
 
开发者ID:sigmoidanalytics,项目名称:spork-streaming,代码行数:24,代码来源:TestStorageGrammar.java

示例12: testWriteRecord5

import org.apache.hadoop.fs.RawLocalFileSystem; //导入依赖的package包/类
@Test
public void testWriteRecord5() throws IOException, ParseException {
  String STR_SCHEMA = "r1:record(f1:int, f2:long), r2:record(r3:record(f3:float, f4))";
  String STR_STORAGE = "[r1.f1]; [r2.r3]; [r1.f2, r2.r3.f3]";
  conf = new Configuration();
  conf.setInt("table.output.tfile.minBlock.size", 64 * 1024);
  conf.setInt("table.input.split.minSize", 64 * 1024);
  conf.set("table.output.tfile.compression", "none");

  RawLocalFileSystem rawLFS = new RawLocalFileSystem();
  fs = new LocalFileSystem(rawLFS);
  path = new Path(fs.getWorkingDirectory(), this.getClass().getSimpleName());
  fs = path.getFileSystem(conf);
  // drop any previous tables
  BasicTable.drop(path, conf);
  // Build Table and column groups
  BasicTable.Writer writer = null;
  try {
    writer = new BasicTable.Writer(path, STR_SCHEMA, STR_STORAGE, conf);
    Assert.fail("Should throw exception");
  } catch (Exception e) {
    System.out.println(e);
  }
}
 
开发者ID:sigmoidanalytics,项目名称:spork-streaming,代码行数:25,代码来源:TestNegative.java

示例13: testWriteRecord6

import org.apache.hadoop.fs.RawLocalFileSystem; //导入依赖的package包/类
@Test
public void testWriteRecord6() throws IOException, ParseException {
  String STR_SCHEMA = "r1:record(f1:int, f2:long), r2:record(r3:record(f3:float, f4))";
  String STR_STORAGE = "[r1.f1]; [r1.f2, r2.r3.f3]; [r2.r3]";
  conf = new Configuration();
  conf.setInt("table.output.tfile.minBlock.size", 64 * 1024);
  conf.setInt("table.input.split.minSize", 64 * 1024);
  conf.set("table.output.tfile.compression", "none");

  RawLocalFileSystem rawLFS = new RawLocalFileSystem();
  fs = new LocalFileSystem(rawLFS);
  path = new Path(fs.getWorkingDirectory(), this.getClass().getSimpleName());
  fs = path.getFileSystem(conf);
  // drop any previous tables
  BasicTable.drop(path, conf);
  // Build Table and column groups
  BasicTable.Writer writer = null;
  try {
    writer = new BasicTable.Writer(path, STR_SCHEMA, STR_STORAGE, conf);
    Assert.fail("Should throw exception");
  } catch (Exception e) {
    System.out.println(e);
  }
}
 
开发者ID:sigmoidanalytics,项目名称:spork-streaming,代码行数:25,代码来源:TestNegative.java

示例14: testWriteMap1

import org.apache.hadoop.fs.RawLocalFileSystem; //导入依赖的package包/类
@Test
public void testWriteMap1() throws IOException, ParseException {
  String STR_SCHEMA = " m2:map(map(map(string)))";
  String STR_STORAGE = "[m2#{k}#{j}]";

  conf = new Configuration();
  conf.setInt("table.output.tfile.minBlock.size", 64 * 1024);
  conf.setInt("table.input.split.minSize", 64 * 1024);
  conf.set("table.output.tfile.compression", "none");

  RawLocalFileSystem rawLFS = new RawLocalFileSystem();
  fs = new LocalFileSystem(rawLFS);
  path = new Path(fs.getWorkingDirectory(), this.getClass().getSimpleName());
  fs = path.getFileSystem(conf);
  // drop any previous tables
  BasicTable.drop(path, conf);
  // Build Table and column groups
  BasicTable.Writer writer = null;
  try {
    writer = new BasicTable.Writer(path, STR_SCHEMA, STR_STORAGE, conf);
    Assert.fail("Should throw exception");
  } catch (Exception e) {
    System.out.println(e);
  }
}
 
开发者ID:sigmoidanalytics,项目名称:spork-streaming,代码行数:26,代码来源:TestNegative.java

示例15: testWriteMap2

import org.apache.hadoop.fs.RawLocalFileSystem; //导入依赖的package包/类
@Test
public void testWriteMap2() throws IOException, ParseException {
  String STR_SCHEMA = " m2:map(map(map(string)))";
  String STR_STORAGE = "[m2.{k}]";

  conf = new Configuration();
  conf.setInt("table.output.tfile.minBlock.size", 64 * 1024);
  conf.setInt("table.input.split.minSize", 64 * 1024);
  conf.set("table.output.tfile.compression", "none");

  RawLocalFileSystem rawLFS = new RawLocalFileSystem();
  fs = new LocalFileSystem(rawLFS);
  path = new Path(fs.getWorkingDirectory(), this.getClass().getSimpleName());
  fs = path.getFileSystem(conf);
  // drop any previous tables
  BasicTable.drop(path, conf);
  // Build Table and column groups
  BasicTable.Writer writer = null;
  try {
    writer = new BasicTable.Writer(path, STR_SCHEMA, STR_STORAGE, conf);
    Assert.fail("Should throw exception");
  } catch (Exception e) {
    System.out.println(e);
  }
}
 
开发者ID:sigmoidanalytics,项目名称:spork-streaming,代码行数:26,代码来源:TestNegative.java


注:本文中的org.apache.hadoop.fs.RawLocalFileSystem类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。