當前位置: 首頁>>代碼示例>>Java>>正文


Java RawLocalFileSystem.setConf方法代碼示例

本文整理匯總了Java中org.apache.hadoop.fs.RawLocalFileSystem.setConf方法的典型用法代碼示例。如果您正苦於以下問題:Java RawLocalFileSystem.setConf方法的具體用法?Java RawLocalFileSystem.setConf怎麽用?Java RawLocalFileSystem.setConf使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.fs.RawLocalFileSystem的用法示例。


在下文中一共展示了RawLocalFileSystem.setConf方法的4個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: setUpOnce

import org.apache.hadoop.fs.RawLocalFileSystem; //導入方法依賴的package包/類
@BeforeClass
public static void setUpOnce() throws IOException {
  // set default file system to local file system
  conf.set("fs.file.impl", "org.apache.hadoop.fs.LocalFileSystem");

  // must set a conf here to the underlying FS, or it barks
  RawLocalFileSystem rawLFS = new RawLocalFileSystem();
  rawLFS.setConf(conf);
  fs = new LocalFileSystem(rawLFS);
  path = new Path(fs.getWorkingDirectory(), outputFile);
  System.out.println("output file: " + path);
}
 
開發者ID:sigmoidanalytics,項目名稱:spork-streaming,代碼行數:13,代碼來源:TestColumnGroupOpen.java

示例2: testConcurrentCommitTaskWithSubDir

import org.apache.hadoop.fs.RawLocalFileSystem; //導入方法依賴的package包/類
private void testConcurrentCommitTaskWithSubDir(int version)
    throws Exception {
  final Job job = Job.getInstance();
  FileOutputFormat.setOutputPath(job, outDir);
  final Configuration conf = job.getConfiguration();
  conf.set(MRJobConfig.TASK_ATTEMPT_ID, attempt);
  conf.setInt(FileOutputCommitter.FILEOUTPUTCOMMITTER_ALGORITHM_VERSION,
      version);

  conf.setClass("fs.file.impl", RLFS.class, FileSystem.class);
  FileSystem.closeAll();

  final JobContext jContext = new JobContextImpl(conf, taskID.getJobID());
  final FileOutputCommitter amCommitter =
      new FileOutputCommitter(outDir, jContext);
  amCommitter.setupJob(jContext);

  final TaskAttemptContext[] taCtx = new TaskAttemptContextImpl[2];
  taCtx[0] = new TaskAttemptContextImpl(conf, taskID);
  taCtx[1] = new TaskAttemptContextImpl(conf, taskID1);

  final TextOutputFormat[] tof = new TextOutputFormat[2];
  for (int i = 0; i < tof.length; i++) {
    tof[i] = new TextOutputFormat() {
      @Override
      public Path getDefaultWorkFile(TaskAttemptContext context,
          String extension) throws IOException {
        final FileOutputCommitter foc = (FileOutputCommitter)
            getOutputCommitter(context);
        return new Path(new Path(foc.getWorkPath(), SUB_DIR),
            getUniqueFile(context, getOutputName(context), extension));
      }
    };
  }

  final ExecutorService executor = Executors.newFixedThreadPool(2);
  try {
    for (int i = 0; i < taCtx.length; i++) {
      final int taskIdx = i;
      executor.submit(new Callable<Void>() {
        @Override
        public Void call() throws IOException, InterruptedException {
          final OutputCommitter outputCommitter =
              tof[taskIdx].getOutputCommitter(taCtx[taskIdx]);
          outputCommitter.setupTask(taCtx[taskIdx]);
          final RecordWriter rw =
              tof[taskIdx].getRecordWriter(taCtx[taskIdx]);
          writeOutput(rw, taCtx[taskIdx]);
          outputCommitter.commitTask(taCtx[taskIdx]);
          return null;
        }
      });
    }
  } finally {
    executor.shutdown();
    while (!executor.awaitTermination(1, TimeUnit.SECONDS)) {
      LOG.info("Awaiting thread termination!");
    }
  }

  amCommitter.commitJob(jContext);
  final RawLocalFileSystem lfs = new RawLocalFileSystem();
  lfs.setConf(conf);
  assertFalse("Must not end up with sub_dir/sub_dir",
      lfs.exists(new Path(OUT_SUB_DIR, SUB_DIR)));

  // validate output
  validateContent(OUT_SUB_DIR);
  FileUtil.fullyDelete(new File(outDir.toString()));
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:71,代碼來源:TestFileOutputCommitter.java

示例3: setup

import org.apache.hadoop.fs.RawLocalFileSystem; //導入方法依賴的package包/類
@Before
public void setup() throws Exception {
  
  Logger.getLogger(AbstractQueryLogic.class).setLevel(Level.DEBUG);
  Logger.getLogger(QueryLogic.class).setLevel(Level.DEBUG);
  Logger.getLogger(RangeCalculator.class).setLevel(Level.DEBUG);
  
  conf.set(AggregatingRecordReader.START_TOKEN, "<page>");
  conf.set(AggregatingRecordReader.END_TOKEN, "</page>");
  conf.set(WikipediaConfiguration.TABLE_NAME, TABLE_NAME);
  conf.set(WikipediaConfiguration.NUM_PARTITIONS, "1");
  conf.set(WikipediaConfiguration.NUM_GROUPS, "1");
  
  MockInstance i = new MockInstance();
  c = i.getConnector("root", new PasswordToken(""));
  WikipediaIngester.createTables(c.tableOperations(), TABLE_NAME, false);
  for (String table : TABLE_NAMES) {
    writerMap.put(new Text(table), c.createBatchWriter(table, 1000L, 1000L, 1));
  }
  
  TaskAttemptID id = new TaskAttemptID();
  TaskAttemptContext context = new TaskAttemptContext(conf, id);
  
  RawLocalFileSystem fs = new RawLocalFileSystem();
  fs.setConf(conf);
  
  URL url = ClassLoader.getSystemResource("enwiki-20110901-001.xml");
  Assert.assertNotNull(url);
  File data = new File(url.toURI());
  Path tmpFile = new Path(data.getAbsolutePath());
  
  // Setup the Mapper
  WikipediaInputSplit split = new WikipediaInputSplit(new FileSplit(tmpFile, 0, fs.pathToFile(tmpFile).length(), null), 0);
  AggregatingRecordReader rr = new AggregatingRecordReader();
  Path ocPath = new Path(tmpFile, "oc");
  OutputCommitter oc = new FileOutputCommitter(ocPath, context);
  fs.deleteOnExit(ocPath);
  StandaloneStatusReporter sr = new StandaloneStatusReporter();
  rr.initialize(split, context);
  MockAccumuloRecordWriter rw = new MockAccumuloRecordWriter();
  WikipediaMapper mapper = new WikipediaMapper();
  
  // Load data into Mock Accumulo
  Mapper<LongWritable,Text,Text,Mutation>.Context con = mapper.new Context(conf, id, rr, rw, oc, sr, split);
  mapper.run(con);
  
  // Flush and close record writers.
  rw.close(context);
  
  table = new QueryLogic();
  table.setMetadataTableName(METADATA_TABLE_NAME);
  table.setTableName(TABLE_NAME);
  table.setIndexTableName(INDEX_TABLE_NAME);
  table.setReverseIndexTableName(RINDEX_TABLE_NAME);
  table.setUseReadAheadIterator(false);
  table.setUnevaluatedFields(Collections.singletonList("TEXT"));
}
 
開發者ID:apache,項目名稱:accumulo-wikisearch,代碼行數:58,代碼來源:TestQueryLogic.java

示例4: setUpOnce

import org.apache.hadoop.fs.RawLocalFileSystem; //導入方法依賴的package包/類
@BeforeClass
public static void setUpOnce() throws IOException, ParseException {
  // set default file system to local file system
  conf.set("fs.file.impl", "org.apache.hadoop.fs.LocalFileSystem");

  // must set a conf here to the underlying FS, or it barks
  RawLocalFileSystem rawLFS = new RawLocalFileSystem();
  rawLFS.setConf(conf);
  fs = new LocalFileSystem(rawLFS);
  path = new Path(fs.getWorkingDirectory(), outputFile);
  System.out.println("output file: " + path);
  
  if (fs.exists(path)) {
      ColumnGroup.drop(path, conf);
  }

  schema = new Schema("a:string,b:string,c:string,d:string,e:string,f:string,g:string");

  ColumnGroup.Writer writer = new ColumnGroup.Writer(path, schema, false, path.getName(),
      "pig", "gz", null, null, (short) -1, true, conf);
  TableInserter ins = writer.getInserter("part0", true);

  // row 1
  Tuple row = TypesUtils.createTuple(writer.getSchema());
  row.set(0, "a1");
  row.set(1, "b1");
  row.set(2, "c1");
  row.set(3, "d1");
  row.set(4, "e1");
  row.set(5, "f1");
  row.set(6, "g1");
  ins.insert(new BytesWritable("k1".getBytes()), row);

  // row 2
  TypesUtils.resetTuple(row);
  row.set(0, "a2");
  row.set(1, "b2");
  row.set(2, "c2");
  row.set(3, "d2");
  row.set(4, "e2");
  row.set(5, "f2");
  row.set(6, "g2");
  ins.insert(new BytesWritable("k2".getBytes()), row);
  ins.close();

  writer.close();
}
 
開發者ID:sigmoidanalytics,項目名稱:spork-streaming,代碼行數:48,代碼來源:TestColumnGroupProjections.java


注:本文中的org.apache.hadoop.fs.RawLocalFileSystem.setConf方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。