当前位置: 首页>>代码示例>>Java>>正文


Java MapDriver.newMapDriver方法代码示例

本文整理汇总了Java中org.apache.hadoop.mrunit.mapreduce.MapDriver.newMapDriver方法的典型用法代码示例。如果您正苦于以下问题:Java MapDriver.newMapDriver方法的具体用法?Java MapDriver.newMapDriver怎么用?Java MapDriver.newMapDriver使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.mrunit.mapreduce.MapDriver的用法示例。


在下文中一共展示了MapDriver.newMapDriver方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: setUp

import org.apache.hadoop.mrunit.mapreduce.MapDriver; //导入方法依赖的package包/类
@BeforeClass
public void setUp() throws IOException
{
  BootstrapPhaseTwoMapper mapper = new BootstrapPhaseTwoMapper();
  mapDriver = MapDriver.newMapDriver(mapper);
  Configuration config = mapDriver.getConfiguration();
  config.set(StarTreeBootstrapPhaseTwoConstants.STAR_TREE_BOOTSTRAP_PHASE2_CONFIG_PATH.toString(), ClassLoader.getSystemResource(CONF_FILE).toString());

  Path configPath = new Path(ClassLoader.getSystemResource(CONF_FILE).toString());
  FileSystem fs = FileSystem.get(config);
  StarTreeConfig starTreeConfig = StarTreeConfig.decode(fs.open(configPath));
  starTreeBootstrapConfig = StarTreeBootstrapPhaseTwoConfig.fromStarTreeConfig(starTreeConfig);
  thirdEyeRoot = System.getProperty("java.io.tmpdir") ;
  config.set(StarTreeGenerationConstants.STAR_TREE_GEN_OUTPUT_PATH.toString(), thirdEyeRoot + File.separator + "startree_generation");

  BootstrapPhaseTwoReducer reducer = new BootstrapPhaseTwoReducer();
  reduceDriver = ReduceDriver.newReduceDriver(reducer);
  config = reduceDriver.getConfiguration();
  config.set(StarTreeBootstrapPhaseTwoConstants.STAR_TREE_BOOTSTRAP_PHASE2_CONFIG_PATH.toString(), ClassLoader.getSystemResource(CONF_FILE).toString());
  config.set(StarTreeBootstrapPhaseTwoConstants.STAR_TREE_BOOTSTRAP_PHASE2_OUTPUT_PATH.toString(), thirdEyeRoot + File.separator + "startree_bootstrap_phase2");
  config.set(StarTreeGenerationConstants.STAR_TREE_GEN_OUTPUT_PATH.toString(), thirdEyeRoot + File.separator + "startree_generation");
}
 
开发者ID:Hanmourang,项目名称:Pinot,代码行数:23,代码来源:TestStarTreeBootstrapPhase2.java

示例2: setUp

import org.apache.hadoop.mrunit.mapreduce.MapDriver; //导入方法依赖的package包/类
@Before
public void setUp() {
	AnalyzerBeansConfiguration analyzerBeansConfiguration = buildAnalyzerBeansConfiguration();
	AnalysisJob analysisJob = buildAnalysisJob(analyzerBeansConfiguration);
	String analyzerBeansConfigurationDatastores = ConfigurationSerializer
			.serializeAnalyzerBeansConfigurationDataStores(analyzerBeansConfiguration);
	String analysisJobXml = ConfigurationSerializer
			.serializeAnalysisJobToXml(analyzerBeansConfiguration,
					analysisJob);
	HBaseTableMapper hBaseTableMapper = new HBaseTableMapper();
	mapDriver = MapDriver.newMapDriver(hBaseTableMapper);
	mapDriver
			.getConfiguration()
			.set("io.serializations",
					"org.apache.hadoop.hbase.mapreduce.ResultSerialization,"
					+ "org.apache.hadoop.hbase.mapreduce.KeyValueSerialization,"
					+ "org.apache.hadoop.hbase.mapreduce.MutationSerialization,"
					+ "org.apache.hadoop.io.serializer.JavaSerialization,"
					+ "org.apache.hadoop.io.serializer.WritableSerialization");
	mapDriver.getConfiguration().set(
			HBaseTool.ANALYZER_BEANS_CONFIGURATION_DATASTORES_KEY,
			analyzerBeansConfigurationDatastores);
	mapDriver.getConfiguration().set(HBaseTool.ANALYSIS_JOB_XML_KEY,
			analysisJobXml);
}
 
开发者ID:tomaszguzialek,项目名称:hadoop-datacleaner,代码行数:26,代码来源:HBaseTableMapperTest.java

示例3: setUp

import org.apache.hadoop.mrunit.mapreduce.MapDriver; //导入方法依赖的package包/类
/**
 * Set up the FlintHadoop tests
 * @throws InstantiationException
 * @throws IllegalAccessException
 */
@Before
public void setUp() throws InstantiationException, IllegalAccessException {
    FlintHadoop.FlintMap mapper = new FlintHadoop.FlintMap();
   // FlintHadoop.FlintReduce reducer = new FlintHadoop.FlintReduce();
    mapDriver = MapDriver.newMapDriver(mapper);
    //reduceDriver = ReduceDriver.newReduceDriver(reducer);
    //mapRedDriver = MapReduceDriver.newMapReduceDriver(mapper, reducer);

    mapDriver.getConfiguration().set("mapred.output.dir", tmpDir.getAbsolutePath());
    //reduceDriver.getConfiguration().set("mapred.output.dir", tmpDir.getAbsolutePath());
    //mapRedDriver.getConfiguration().set("mapred.output.dir", tmpDir.getAbsolutePath());

    testPdf1CheckResult =  new Flint().check(new File(testPdf1Path)).get(0);
    testPdf2CheckResult =  new Flint().check(new File(testPdf2Path)).get(0);
}
 
开发者ID:openpreserve,项目名称:flint,代码行数:21,代码来源:FlintHadoopTest.java

示例4: init

import org.apache.hadoop.mrunit.mapreduce.MapDriver; //导入方法依赖的package包/类
@BeforeClass
public static void init() throws IOException {
  LindenMapper mapper = new LindenMapper();
  mDriver = MapDriver.newMapDriver(mapper);
  int numShards = 1;
  Shard[] shards = LindenJob.createShards(indexPath, numShards);
  Shard.setIndexShards(mDriver.getConfiguration(), shards);
}
 
开发者ID:XiaoMi,项目名称:linden,代码行数:9,代码来源:LindenMapredTest.java

示例5: setUp

import org.apache.hadoop.mrunit.mapreduce.MapDriver; //导入方法依赖的package包/类
@Before
public void setUp() {
  CopyMapper mapper = new Migration.CopyMapper();
  IdentityReducer reducer = new Migration.IdentityReducer();
  mapDriver = MapDriver.newMapDriver(mapper);
  reduceDriver = ReduceDriver.newReduceDriver(reducer);
  mapReduceDriver = MapReduceDriver.newMapReduceDriver(mapper, reducer);
}
 
开发者ID:XiaoMi,项目名称:galaxy-fds-migration-tool,代码行数:9,代码来源:MigrationTest.java

示例6: setUp

import org.apache.hadoop.mrunit.mapreduce.MapDriver; //导入方法依赖的package包/类
@Before
public void setUp()
{
    SMSCDRMapper mapper = new SMSCDRMapper();
    SMSCDRReducer reducer = new SMSCDRReducer();
    mapDriver = MapDriver.newMapDriver(mapper);
    reduceDriver = ReduceDriver.newReduceDriver(reducer);
    mapReduceDriver = MapReduceDriver.newMapReduceDriver(mapper, reducer);
}
 
开发者ID:dkpro,项目名称:dkpro-c4corpus,代码行数:10,代码来源:MRUnitTest.java

示例7: setUp

import org.apache.hadoop.mrunit.mapreduce.MapDriver; //导入方法依赖的package包/类
@Before
public void setUp() throws Exception {
    Bill2PostcodeCategoryTurnoverTmpMapper mapper = new Bill2PostcodeCategoryTurnoverTmpMapper();
    mapDriver = MapDriver.newMapDriver(mapper);

    AvroTestUtil avroTestUtil = new AvroTestUtil(mapDriver.getConfiguration());
    avroTestUtil.setInputKeySchema(SerializableBill.SCHEMA$);
    avroTestUtil.setMapOutputKeySchema(PostcodeCategoryTurnoverTmp.SCHEMA$);
}
 
开发者ID:ch4mpy,项目名称:hadoop2,代码行数:10,代码来源:Bill2PostcodeCategoryTurnoverTmpMapperTest.java

示例8: setUp

import org.apache.hadoop.mrunit.mapreduce.MapDriver; //导入方法依赖的package包/类
@Before
public void setUp() throws Exception {
	Mapper<Text, Text, Text, Text> mapper = new N3Mapper();
	Reducer<Text,Text,Text,Text> reducer = new N3Reducer();

	mapDriver = MapDriver.newMapDriver(mapper);
	reduceDriver = ReduceDriver.newReduceDriver(reducer);
	mapReduceDriver = MapReduceDriver.newMapReduceDriver(mapper, reducer);
}
 
开发者ID:therelaxist,项目名称:spring-usc,代码行数:10,代码来源:TestN3MapReduce.java

示例9: setUp

import org.apache.hadoop.mrunit.mapreduce.MapDriver; //导入方法依赖的package包/类
@BeforeSuite
public void setUp() throws IOException
{
  StarTreeGenerationMapper mapper = new StarTreeGenerationMapper();
  mapDriver = MapDriver.newMapDriver(mapper);
  Configuration config = mapDriver.getConfiguration();
  config.set(StarTreeGenerationConstants.STAR_TREE_GEN_CONFIG_PATH.toString(), ClassLoader.getSystemResource(CONF_FILE).toString());
  Path configPath = new Path(ClassLoader.getSystemResource(CONF_FILE).toString());
  FileSystem fs = FileSystem.get(config);
  StarTreeConfig starTreeConfig = StarTreeConfig.decode(fs.open(configPath));
  starTreeGenerationConfig = StarTreeGenerationConfig.fromStarTreeConfig(starTreeConfig);
  thirdEyeRoot = System.getProperty("java.io.tmpdir") ;
  config.set(StarTreeGenerationConstants.STAR_TREE_GEN_OUTPUT_PATH.toString(), thirdEyeRoot + File.separator + "startree_generation");
}
 
开发者ID:Hanmourang,项目名称:Pinot,代码行数:15,代码来源:TestStarTreeGeneration.java

示例10: setUp

import org.apache.hadoop.mrunit.mapreduce.MapDriver; //导入方法依赖的package包/类
@Before
public void setUp() throws Exception {
	Mapper<Writable,Text, Text, Text> mapper = new IdentityN3Mapper();
	Reducer<Text,Text,Text,Text> reducer = new N3ToJSONReducer();

	mapDriver = MapDriver.newMapDriver(mapper);
	reduceDriver = ReduceDriver.newReduceDriver(reducer);
	mapReduceDriver = MapReduceDriver.newMapReduceDriver(mapper, reducer);
}
 
开发者ID:therelaxist,项目名称:spring-usc,代码行数:10,代码来源:TestIdentityN3ToJSONReducer.java

示例11: before

import org.apache.hadoop.mrunit.mapreduce.MapDriver; //导入方法依赖的package包/类
@Before
public void before() {
    CsvFieldCountMapper mapper = new CsvFieldCountMapper();
    mapDriver = MapDriver.newMapDriver(mapper);
    Configuration conf = mapDriver.getConfiguration();
    conf.setInt(CsvFieldCountMapper.CSV_FIELD_IDX, 2);
    conf.set(CsvFieldCountMapper.FILTER_CACHE_FILE_NAME, "fr_urban_postcodes.txt");
    mapDriver.addCacheFile(new File("target/test-classes/referential/fr_urban_postcodes.txt").toURI());
}
 
开发者ID:ch4mpy,项目名称:hadoop2,代码行数:10,代码来源:CsvFieldCountMapperTest.java

示例12: setUp

import org.apache.hadoop.mrunit.mapreduce.MapDriver; //导入方法依赖的package包/类
@Before
public void setUp() throws Exception {
	Mapper<Writable,Text, Text, Text> mapper = new IdentityJSONMapper();
	Reducer<Text,Text,Text,Text> reducer = new JSONReducer();

	mapDriver = MapDriver.newMapDriver(mapper);
	reduceDriver = ReduceDriver.newReduceDriver(reducer);
	mapReduceDriver = MapReduceDriver.newMapReduceDriver(mapper, reducer);
}
 
开发者ID:therelaxist,项目名称:spring-usc,代码行数:10,代码来源:TestJSONIdentityMapReduce.java

示例13: before

import org.apache.hadoop.mrunit.mapreduce.MapDriver; //导入方法依赖的package包/类
@Before
public void before() throws IOException {
    BillByProductIdAvroMapper mapper = new BillByProductIdAvroMapper();
    mapDriver = MapDriver.newMapDriver(mapper);
    AvroTestUtil avroTestUtil = new AvroTestUtil(mapDriver.getConfiguration());
    avroTestUtil.setInputKeySchema(SerializableBill.SCHEMA$);
    avroTestUtil.setMapOutputValueSchema(SerializableBill.SCHEMA$);
}
 
开发者ID:ch4mpy,项目名称:hadoop2,代码行数:9,代码来源:BillByProductIdAvroMapperTest.java

示例14: setUp

import org.apache.hadoop.mrunit.mapreduce.MapDriver; //导入方法依赖的package包/类
@Before
public void setUp() throws Exception {
    PostcodeCategoryTurnoverTmpByProductIdMapper mapper = new PostcodeCategoryTurnoverTmpByProductIdMapper();
    mapDriver = MapDriver.newMapDriver(mapper);

    AvroTestUtil avroTestUtil = new AvroTestUtil(mapDriver.getConfiguration());
    avroTestUtil.setInputKeySchema(PostcodeCategoryTurnoverTmp.SCHEMA$);
    avroTestUtil.setMapOutputValueSchema(PostcodeCategoryTurnoverTmp.SCHEMA$);
}
 
开发者ID:ch4mpy,项目名称:hadoop2,代码行数:10,代码来源:PostcodeCategoryTurnoverTmpByProductIdMapperTest.java

示例15: setUp

import org.apache.hadoop.mrunit.mapreduce.MapDriver; //导入方法依赖的package包/类
@Before
public void setUp() throws Exception {
    mapDriver = MapDriver.newMapDriver(new PostcodeCategoryTurnoverTmpByPostcodeCategoryMapper());

    AvroTestUtil avroTestUtil = new AvroTestUtil(mapDriver.getConfiguration());
    avroTestUtil.setInputKeySchema(PostcodeCategoryTurnoverTmp.SCHEMA$);
    avroTestUtil.setMapOutputValueSchema(PostcodeCategoryTurnover.SCHEMA$);
}
 
开发者ID:ch4mpy,项目名称:hadoop2,代码行数:9,代码来源:PostcodeCategoryTurnoverTmpByPostcodeCategoryMapperTest.java


注:本文中的org.apache.hadoop.mrunit.mapreduce.MapDriver.newMapDriver方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。