当前位置: 首页>>代码示例>>Java>>正文


Java RawLocalFileSystem.setConf方法代码示例

本文整理汇总了Java中org.apache.hadoop.fs.RawLocalFileSystem.setConf方法的典型用法代码示例。如果您正苦于以下问题:Java RawLocalFileSystem.setConf方法的具体用法?Java RawLocalFileSystem.setConf怎么用?Java RawLocalFileSystem.setConf使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.fs.RawLocalFileSystem的用法示例。


在下文中一共展示了RawLocalFileSystem.setConf方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: setUpOnce

import org.apache.hadoop.fs.RawLocalFileSystem; //导入方法依赖的package包/类
@BeforeClass
public static void setUpOnce() throws IOException {
  // set default file system to local file system
  conf.set("fs.file.impl", "org.apache.hadoop.fs.LocalFileSystem");

  // must set a conf here to the underlying FS, or it barks
  RawLocalFileSystem rawLFS = new RawLocalFileSystem();
  rawLFS.setConf(conf);
  fs = new LocalFileSystem(rawLFS);
  path = new Path(fs.getWorkingDirectory(), outputFile);
  System.out.println("output file: " + path);
}
 
开发者ID:sigmoidanalytics,项目名称:spork-streaming,代码行数:13,代码来源:TestColumnGroupOpen.java

示例2: testConcurrentCommitTaskWithSubDir

import org.apache.hadoop.fs.RawLocalFileSystem; //导入方法依赖的package包/类
private void testConcurrentCommitTaskWithSubDir(int version)
    throws Exception {
  final Job job = Job.getInstance();
  FileOutputFormat.setOutputPath(job, outDir);
  final Configuration conf = job.getConfiguration();
  conf.set(MRJobConfig.TASK_ATTEMPT_ID, attempt);
  conf.setInt(FileOutputCommitter.FILEOUTPUTCOMMITTER_ALGORITHM_VERSION,
      version);

  conf.setClass("fs.file.impl", RLFS.class, FileSystem.class);
  FileSystem.closeAll();

  final JobContext jContext = new JobContextImpl(conf, taskID.getJobID());
  final FileOutputCommitter amCommitter =
      new FileOutputCommitter(outDir, jContext);
  amCommitter.setupJob(jContext);

  final TaskAttemptContext[] taCtx = new TaskAttemptContextImpl[2];
  taCtx[0] = new TaskAttemptContextImpl(conf, taskID);
  taCtx[1] = new TaskAttemptContextImpl(conf, taskID1);

  final TextOutputFormat[] tof = new TextOutputFormat[2];
  for (int i = 0; i < tof.length; i++) {
    tof[i] = new TextOutputFormat() {
      @Override
      public Path getDefaultWorkFile(TaskAttemptContext context,
          String extension) throws IOException {
        final FileOutputCommitter foc = (FileOutputCommitter)
            getOutputCommitter(context);
        return new Path(new Path(foc.getWorkPath(), SUB_DIR),
            getUniqueFile(context, getOutputName(context), extension));
      }
    };
  }

  final ExecutorService executor = Executors.newFixedThreadPool(2);
  try {
    for (int i = 0; i < taCtx.length; i++) {
      final int taskIdx = i;
      executor.submit(new Callable<Void>() {
        @Override
        public Void call() throws IOException, InterruptedException {
          final OutputCommitter outputCommitter =
              tof[taskIdx].getOutputCommitter(taCtx[taskIdx]);
          outputCommitter.setupTask(taCtx[taskIdx]);
          final RecordWriter rw =
              tof[taskIdx].getRecordWriter(taCtx[taskIdx]);
          writeOutput(rw, taCtx[taskIdx]);
          outputCommitter.commitTask(taCtx[taskIdx]);
          return null;
        }
      });
    }
  } finally {
    executor.shutdown();
    while (!executor.awaitTermination(1, TimeUnit.SECONDS)) {
      LOG.info("Awaiting thread termination!");
    }
  }

  amCommitter.commitJob(jContext);
  final RawLocalFileSystem lfs = new RawLocalFileSystem();
  lfs.setConf(conf);
  assertFalse("Must not end up with sub_dir/sub_dir",
      lfs.exists(new Path(OUT_SUB_DIR, SUB_DIR)));

  // validate output
  validateContent(OUT_SUB_DIR);
  FileUtil.fullyDelete(new File(outDir.toString()));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:71,代码来源:TestFileOutputCommitter.java

示例3: setup

import org.apache.hadoop.fs.RawLocalFileSystem; //导入方法依赖的package包/类
@Before
public void setup() throws Exception {
  
  Logger.getLogger(AbstractQueryLogic.class).setLevel(Level.DEBUG);
  Logger.getLogger(QueryLogic.class).setLevel(Level.DEBUG);
  Logger.getLogger(RangeCalculator.class).setLevel(Level.DEBUG);
  
  conf.set(AggregatingRecordReader.START_TOKEN, "<page>");
  conf.set(AggregatingRecordReader.END_TOKEN, "</page>");
  conf.set(WikipediaConfiguration.TABLE_NAME, TABLE_NAME);
  conf.set(WikipediaConfiguration.NUM_PARTITIONS, "1");
  conf.set(WikipediaConfiguration.NUM_GROUPS, "1");
  
  MockInstance i = new MockInstance();
  c = i.getConnector("root", new PasswordToken(""));
  WikipediaIngester.createTables(c.tableOperations(), TABLE_NAME, false);
  for (String table : TABLE_NAMES) {
    writerMap.put(new Text(table), c.createBatchWriter(table, 1000L, 1000L, 1));
  }
  
  TaskAttemptID id = new TaskAttemptID();
  TaskAttemptContext context = new TaskAttemptContext(conf, id);
  
  RawLocalFileSystem fs = new RawLocalFileSystem();
  fs.setConf(conf);
  
  URL url = ClassLoader.getSystemResource("enwiki-20110901-001.xml");
  Assert.assertNotNull(url);
  File data = new File(url.toURI());
  Path tmpFile = new Path(data.getAbsolutePath());
  
  // Setup the Mapper
  WikipediaInputSplit split = new WikipediaInputSplit(new FileSplit(tmpFile, 0, fs.pathToFile(tmpFile).length(), null), 0);
  AggregatingRecordReader rr = new AggregatingRecordReader();
  Path ocPath = new Path(tmpFile, "oc");
  OutputCommitter oc = new FileOutputCommitter(ocPath, context);
  fs.deleteOnExit(ocPath);
  StandaloneStatusReporter sr = new StandaloneStatusReporter();
  rr.initialize(split, context);
  MockAccumuloRecordWriter rw = new MockAccumuloRecordWriter();
  WikipediaMapper mapper = new WikipediaMapper();
  
  // Load data into Mock Accumulo
  Mapper<LongWritable,Text,Text,Mutation>.Context con = mapper.new Context(conf, id, rr, rw, oc, sr, split);
  mapper.run(con);
  
  // Flush and close record writers.
  rw.close(context);
  
  table = new QueryLogic();
  table.setMetadataTableName(METADATA_TABLE_NAME);
  table.setTableName(TABLE_NAME);
  table.setIndexTableName(INDEX_TABLE_NAME);
  table.setReverseIndexTableName(RINDEX_TABLE_NAME);
  table.setUseReadAheadIterator(false);
  table.setUnevaluatedFields(Collections.singletonList("TEXT"));
}
 
开发者ID:apache,项目名称:accumulo-wikisearch,代码行数:58,代码来源:TestQueryLogic.java

示例4: setUpOnce

import org.apache.hadoop.fs.RawLocalFileSystem; //导入方法依赖的package包/类
@BeforeClass
public static void setUpOnce() throws IOException, ParseException {
  // set default file system to local file system
  conf.set("fs.file.impl", "org.apache.hadoop.fs.LocalFileSystem");

  // must set a conf here to the underlying FS, or it barks
  RawLocalFileSystem rawLFS = new RawLocalFileSystem();
  rawLFS.setConf(conf);
  fs = new LocalFileSystem(rawLFS);
  path = new Path(fs.getWorkingDirectory(), outputFile);
  System.out.println("output file: " + path);
  
  if (fs.exists(path)) {
      ColumnGroup.drop(path, conf);
  }

  schema = new Schema("a:string,b:string,c:string,d:string,e:string,f:string,g:string");

  ColumnGroup.Writer writer = new ColumnGroup.Writer(path, schema, false, path.getName(),
      "pig", "gz", null, null, (short) -1, true, conf);
  TableInserter ins = writer.getInserter("part0", true);

  // row 1
  Tuple row = TypesUtils.createTuple(writer.getSchema());
  row.set(0, "a1");
  row.set(1, "b1");
  row.set(2, "c1");
  row.set(3, "d1");
  row.set(4, "e1");
  row.set(5, "f1");
  row.set(6, "g1");
  ins.insert(new BytesWritable("k1".getBytes()), row);

  // row 2
  TypesUtils.resetTuple(row);
  row.set(0, "a2");
  row.set(1, "b2");
  row.set(2, "c2");
  row.set(3, "d2");
  row.set(4, "e2");
  row.set(5, "f2");
  row.set(6, "g2");
  ins.insert(new BytesWritable("k2".getBytes()), row);
  ins.close();

  writer.close();
}
 
开发者ID:sigmoidanalytics,项目名称:spork-streaming,代码行数:48,代码来源:TestColumnGroupProjections.java


注:本文中的org.apache.hadoop.fs.RawLocalFileSystem.setConf方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。