本文整理汇总了Java中org.apache.hadoop.mrunit.mapreduce.MapDriver.newMapDriver方法的典型用法代码示例。如果您正苦于以下问题:Java MapDriver.newMapDriver方法的具体用法?Java MapDriver.newMapDriver怎么用?Java MapDriver.newMapDriver使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.mrunit.mapreduce.MapDriver
的用法示例。
在下文中一共展示了MapDriver.newMapDriver方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: setUp
import org.apache.hadoop.mrunit.mapreduce.MapDriver; //导入方法依赖的package包/类
@BeforeClass
public void setUp() throws IOException
{
BootstrapPhaseTwoMapper mapper = new BootstrapPhaseTwoMapper();
mapDriver = MapDriver.newMapDriver(mapper);
Configuration config = mapDriver.getConfiguration();
config.set(StarTreeBootstrapPhaseTwoConstants.STAR_TREE_BOOTSTRAP_PHASE2_CONFIG_PATH.toString(), ClassLoader.getSystemResource(CONF_FILE).toString());
Path configPath = new Path(ClassLoader.getSystemResource(CONF_FILE).toString());
FileSystem fs = FileSystem.get(config);
StarTreeConfig starTreeConfig = StarTreeConfig.decode(fs.open(configPath));
starTreeBootstrapConfig = StarTreeBootstrapPhaseTwoConfig.fromStarTreeConfig(starTreeConfig);
thirdEyeRoot = System.getProperty("java.io.tmpdir") ;
config.set(StarTreeGenerationConstants.STAR_TREE_GEN_OUTPUT_PATH.toString(), thirdEyeRoot + File.separator + "startree_generation");
BootstrapPhaseTwoReducer reducer = new BootstrapPhaseTwoReducer();
reduceDriver = ReduceDriver.newReduceDriver(reducer);
config = reduceDriver.getConfiguration();
config.set(StarTreeBootstrapPhaseTwoConstants.STAR_TREE_BOOTSTRAP_PHASE2_CONFIG_PATH.toString(), ClassLoader.getSystemResource(CONF_FILE).toString());
config.set(StarTreeBootstrapPhaseTwoConstants.STAR_TREE_BOOTSTRAP_PHASE2_OUTPUT_PATH.toString(), thirdEyeRoot + File.separator + "startree_bootstrap_phase2");
config.set(StarTreeGenerationConstants.STAR_TREE_GEN_OUTPUT_PATH.toString(), thirdEyeRoot + File.separator + "startree_generation");
}
示例2: setUp
import org.apache.hadoop.mrunit.mapreduce.MapDriver; //导入方法依赖的package包/类
@Before
public void setUp() {
AnalyzerBeansConfiguration analyzerBeansConfiguration = buildAnalyzerBeansConfiguration();
AnalysisJob analysisJob = buildAnalysisJob(analyzerBeansConfiguration);
String analyzerBeansConfigurationDatastores = ConfigurationSerializer
.serializeAnalyzerBeansConfigurationDataStores(analyzerBeansConfiguration);
String analysisJobXml = ConfigurationSerializer
.serializeAnalysisJobToXml(analyzerBeansConfiguration,
analysisJob);
HBaseTableMapper hBaseTableMapper = new HBaseTableMapper();
mapDriver = MapDriver.newMapDriver(hBaseTableMapper);
mapDriver
.getConfiguration()
.set("io.serializations",
"org.apache.hadoop.hbase.mapreduce.ResultSerialization,"
+ "org.apache.hadoop.hbase.mapreduce.KeyValueSerialization,"
+ "org.apache.hadoop.hbase.mapreduce.MutationSerialization,"
+ "org.apache.hadoop.io.serializer.JavaSerialization,"
+ "org.apache.hadoop.io.serializer.WritableSerialization");
mapDriver.getConfiguration().set(
HBaseTool.ANALYZER_BEANS_CONFIGURATION_DATASTORES_KEY,
analyzerBeansConfigurationDatastores);
mapDriver.getConfiguration().set(HBaseTool.ANALYSIS_JOB_XML_KEY,
analysisJobXml);
}
示例3: setUp
import org.apache.hadoop.mrunit.mapreduce.MapDriver; //导入方法依赖的package包/类
/**
* Set up the FlintHadoop tests
* @throws InstantiationException
* @throws IllegalAccessException
*/
@Before
public void setUp() throws InstantiationException, IllegalAccessException {
FlintHadoop.FlintMap mapper = new FlintHadoop.FlintMap();
// FlintHadoop.FlintReduce reducer = new FlintHadoop.FlintReduce();
mapDriver = MapDriver.newMapDriver(mapper);
//reduceDriver = ReduceDriver.newReduceDriver(reducer);
//mapRedDriver = MapReduceDriver.newMapReduceDriver(mapper, reducer);
mapDriver.getConfiguration().set("mapred.output.dir", tmpDir.getAbsolutePath());
//reduceDriver.getConfiguration().set("mapred.output.dir", tmpDir.getAbsolutePath());
//mapRedDriver.getConfiguration().set("mapred.output.dir", tmpDir.getAbsolutePath());
testPdf1CheckResult = new Flint().check(new File(testPdf1Path)).get(0);
testPdf2CheckResult = new Flint().check(new File(testPdf2Path)).get(0);
}
示例4: init
import org.apache.hadoop.mrunit.mapreduce.MapDriver; //导入方法依赖的package包/类
@BeforeClass
public static void init() throws IOException {
LindenMapper mapper = new LindenMapper();
mDriver = MapDriver.newMapDriver(mapper);
int numShards = 1;
Shard[] shards = LindenJob.createShards(indexPath, numShards);
Shard.setIndexShards(mDriver.getConfiguration(), shards);
}
示例5: setUp
import org.apache.hadoop.mrunit.mapreduce.MapDriver; //导入方法依赖的package包/类
@Before
public void setUp() {
CopyMapper mapper = new Migration.CopyMapper();
IdentityReducer reducer = new Migration.IdentityReducer();
mapDriver = MapDriver.newMapDriver(mapper);
reduceDriver = ReduceDriver.newReduceDriver(reducer);
mapReduceDriver = MapReduceDriver.newMapReduceDriver(mapper, reducer);
}
示例6: setUp
import org.apache.hadoop.mrunit.mapreduce.MapDriver; //导入方法依赖的package包/类
@Before
public void setUp()
{
SMSCDRMapper mapper = new SMSCDRMapper();
SMSCDRReducer reducer = new SMSCDRReducer();
mapDriver = MapDriver.newMapDriver(mapper);
reduceDriver = ReduceDriver.newReduceDriver(reducer);
mapReduceDriver = MapReduceDriver.newMapReduceDriver(mapper, reducer);
}
示例7: setUp
import org.apache.hadoop.mrunit.mapreduce.MapDriver; //导入方法依赖的package包/类
@Before
public void setUp() throws Exception {
Bill2PostcodeCategoryTurnoverTmpMapper mapper = new Bill2PostcodeCategoryTurnoverTmpMapper();
mapDriver = MapDriver.newMapDriver(mapper);
AvroTestUtil avroTestUtil = new AvroTestUtil(mapDriver.getConfiguration());
avroTestUtil.setInputKeySchema(SerializableBill.SCHEMA$);
avroTestUtil.setMapOutputKeySchema(PostcodeCategoryTurnoverTmp.SCHEMA$);
}
示例8: setUp
import org.apache.hadoop.mrunit.mapreduce.MapDriver; //导入方法依赖的package包/类
@Before
public void setUp() throws Exception {
Mapper<Text, Text, Text, Text> mapper = new N3Mapper();
Reducer<Text,Text,Text,Text> reducer = new N3Reducer();
mapDriver = MapDriver.newMapDriver(mapper);
reduceDriver = ReduceDriver.newReduceDriver(reducer);
mapReduceDriver = MapReduceDriver.newMapReduceDriver(mapper, reducer);
}
示例9: setUp
import org.apache.hadoop.mrunit.mapreduce.MapDriver; //导入方法依赖的package包/类
@BeforeSuite
public void setUp() throws IOException
{
StarTreeGenerationMapper mapper = new StarTreeGenerationMapper();
mapDriver = MapDriver.newMapDriver(mapper);
Configuration config = mapDriver.getConfiguration();
config.set(StarTreeGenerationConstants.STAR_TREE_GEN_CONFIG_PATH.toString(), ClassLoader.getSystemResource(CONF_FILE).toString());
Path configPath = new Path(ClassLoader.getSystemResource(CONF_FILE).toString());
FileSystem fs = FileSystem.get(config);
StarTreeConfig starTreeConfig = StarTreeConfig.decode(fs.open(configPath));
starTreeGenerationConfig = StarTreeGenerationConfig.fromStarTreeConfig(starTreeConfig);
thirdEyeRoot = System.getProperty("java.io.tmpdir") ;
config.set(StarTreeGenerationConstants.STAR_TREE_GEN_OUTPUT_PATH.toString(), thirdEyeRoot + File.separator + "startree_generation");
}
示例10: setUp
import org.apache.hadoop.mrunit.mapreduce.MapDriver; //导入方法依赖的package包/类
@Before
public void setUp() throws Exception {
Mapper<Writable,Text, Text, Text> mapper = new IdentityN3Mapper();
Reducer<Text,Text,Text,Text> reducer = new N3ToJSONReducer();
mapDriver = MapDriver.newMapDriver(mapper);
reduceDriver = ReduceDriver.newReduceDriver(reducer);
mapReduceDriver = MapReduceDriver.newMapReduceDriver(mapper, reducer);
}
示例11: before
import org.apache.hadoop.mrunit.mapreduce.MapDriver; //导入方法依赖的package包/类
@Before
public void before() {
CsvFieldCountMapper mapper = new CsvFieldCountMapper();
mapDriver = MapDriver.newMapDriver(mapper);
Configuration conf = mapDriver.getConfiguration();
conf.setInt(CsvFieldCountMapper.CSV_FIELD_IDX, 2);
conf.set(CsvFieldCountMapper.FILTER_CACHE_FILE_NAME, "fr_urban_postcodes.txt");
mapDriver.addCacheFile(new File("target/test-classes/referential/fr_urban_postcodes.txt").toURI());
}
示例12: setUp
import org.apache.hadoop.mrunit.mapreduce.MapDriver; //导入方法依赖的package包/类
@Before
public void setUp() throws Exception {
Mapper<Writable,Text, Text, Text> mapper = new IdentityJSONMapper();
Reducer<Text,Text,Text,Text> reducer = new JSONReducer();
mapDriver = MapDriver.newMapDriver(mapper);
reduceDriver = ReduceDriver.newReduceDriver(reducer);
mapReduceDriver = MapReduceDriver.newMapReduceDriver(mapper, reducer);
}
示例13: before
import org.apache.hadoop.mrunit.mapreduce.MapDriver; //导入方法依赖的package包/类
@Before
public void before() throws IOException {
BillByProductIdAvroMapper mapper = new BillByProductIdAvroMapper();
mapDriver = MapDriver.newMapDriver(mapper);
AvroTestUtil avroTestUtil = new AvroTestUtil(mapDriver.getConfiguration());
avroTestUtil.setInputKeySchema(SerializableBill.SCHEMA$);
avroTestUtil.setMapOutputValueSchema(SerializableBill.SCHEMA$);
}
示例14: setUp
import org.apache.hadoop.mrunit.mapreduce.MapDriver; //导入方法依赖的package包/类
@Before
public void setUp() throws Exception {
PostcodeCategoryTurnoverTmpByProductIdMapper mapper = new PostcodeCategoryTurnoverTmpByProductIdMapper();
mapDriver = MapDriver.newMapDriver(mapper);
AvroTestUtil avroTestUtil = new AvroTestUtil(mapDriver.getConfiguration());
avroTestUtil.setInputKeySchema(PostcodeCategoryTurnoverTmp.SCHEMA$);
avroTestUtil.setMapOutputValueSchema(PostcodeCategoryTurnoverTmp.SCHEMA$);
}
示例15: setUp
import org.apache.hadoop.mrunit.mapreduce.MapDriver; //导入方法依赖的package包/类
@Before
public void setUp() throws Exception {
mapDriver = MapDriver.newMapDriver(new PostcodeCategoryTurnoverTmpByPostcodeCategoryMapper());
AvroTestUtil avroTestUtil = new AvroTestUtil(mapDriver.getConfiguration());
avroTestUtil.setInputKeySchema(PostcodeCategoryTurnoverTmp.SCHEMA$);
avroTestUtil.setMapOutputValueSchema(PostcodeCategoryTurnover.SCHEMA$);
}