本文整理汇总了Java中org.apache.hadoop.mrunit.mapreduce.MapReduceDriver.newMapReduceDriver方法的典型用法代码示例。如果您正苦于以下问题:Java MapReduceDriver.newMapReduceDriver方法的具体用法?Java MapReduceDriver.newMapReduceDriver怎么用?Java MapReduceDriver.newMapReduceDriver使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.mrunit.mapreduce.MapReduceDriver
的用法示例。
在下文中一共展示了MapReduceDriver.newMapReduceDriver方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: setUp
import org.apache.hadoop.mrunit.mapreduce.MapReduceDriver; //导入方法依赖的package包/类
@Before
public void setUp() {
AnalyzerBeansConfiguration analyzerBeansConfiguration = buildAnalyzerBeansConfigurationLocalFS(CSV_FILE_PATH);
analysisJob = buildAnalysisJob(analyzerBeansConfiguration, CSV_FILE_PATH);
String analyzerBeansConfigurationDatastores = ConfigurationSerializer
.serializeAnalyzerBeansConfigurationDataStores(analyzerBeansConfiguration);
String analysisJobXml = ConfigurationSerializer.serializeAnalysisJobToXml(analyzerBeansConfiguration,
analysisJob);
FlatFileMapper flatFileMapper = new FlatFileMapper();
FlatFileReducer flatFileReducer = new FlatFileReducer();
mapDriver = MapDriver.newMapDriver(flatFileMapper);
mapDriver.getConfiguration().set(FlatFileTool.ANALYZER_BEANS_CONFIGURATION_DATASTORES_KEY,
analyzerBeansConfigurationDatastores);
mapDriver.getConfiguration().set(FlatFileTool.ANALYSIS_JOB_XML_KEY, analysisJobXml);
reduceDriver = ReduceDriver.newReduceDriver(flatFileReducer);
reduceDriver.getConfiguration().set(FlatFileTool.ANALYZER_BEANS_CONFIGURATION_DATASTORES_KEY,
analyzerBeansConfigurationDatastores);
reduceDriver.getConfiguration().set(FlatFileTool.ANALYSIS_JOB_XML_KEY, analysisJobXml);
mapReduceDriver = MapReduceDriver.newMapReduceDriver(flatFileMapper, flatFileReducer);
}
示例2: setUp
import org.apache.hadoop.mrunit.mapreduce.MapReduceDriver; //导入方法依赖的package包/类
@Before
public void setUp() {
CopyMapper mapper = new Migration.CopyMapper();
IdentityReducer reducer = new Migration.IdentityReducer();
mapDriver = MapDriver.newMapDriver(mapper);
reduceDriver = ReduceDriver.newReduceDriver(reducer);
mapReduceDriver = MapReduceDriver.newMapReduceDriver(mapper, reducer);
}
示例3: setUp
import org.apache.hadoop.mrunit.mapreduce.MapReduceDriver; //导入方法依赖的package包/类
@Before
public void setUp()
{
SMSCDRMapper mapper = new SMSCDRMapper();
SMSCDRReducer reducer = new SMSCDRReducer();
mapDriver = MapDriver.newMapDriver(mapper);
reduceDriver = ReduceDriver.newReduceDriver(reducer);
mapReduceDriver = MapReduceDriver.newMapReduceDriver(mapper, reducer);
}
示例4: setup
import org.apache.hadoop.mrunit.mapreduce.MapReduceDriver; //导入方法依赖的package包/类
@Before
public void setup() {
TestMapper mapper = new TestMapper();
TestReducer reducer = new TestReducer();
driver = MapReduceDriver.newMapReduceDriver(mapper, reducer);
Configuration conf = driver.getConfiguration();
Job job = mock(Job.class);
when(job.getConfiguration()).thenReturn(conf);
CompositeSortKeySerialization.configureMapOutputKey(job, Text.class, IntWritable.class);
// MRUnit sets these differently than standard MapReduce:
driver.setKeyGroupingComparator(new CompositeSortKey.GroupingComparator<Text, IntWritable>());
}
示例5: before
import org.apache.hadoop.mrunit.mapreduce.MapReduceDriver; //导入方法依赖的package包/类
@Before
public void before() {
PostcodeMapper mapper = new PostcodeMapper();
PostcodeReducer combiner = new PostcodeReducer();
PostcodeReducer reducer = new PostcodeReducer();
mapReduceDriver = MapReduceDriver.newMapReduceDriver(mapper, reducer, combiner);
}
示例6: before
import org.apache.hadoop.mrunit.mapreduce.MapReduceDriver; //导入方法依赖的package包/类
@Before
public void before() throws URISyntaxException {
CsvFieldCountMapper mapper = new CsvFieldCountMapper();
LongSumReducer<Text> combiner = new LongSumReducer<Text>();
LongSumReducer<Text> reducer = new LongSumReducer<Text>();
mapReduceDriver = MapReduceDriver.newMapReduceDriver(mapper, reducer, combiner);
Configuration conf = mapReduceDriver.getConfiguration();
conf.setInt(CsvFieldCountMapper.CSV_FIELD_IDX, 2);
conf.set(CsvFieldCountMapper.FILTER_CACHE_FILE_NAME, "fr_urban_postcodes.txt");
mapReduceDriver.addCacheFile(new File("target/test-classes/referential/fr_urban_postcodes.txt").toURI());
}
示例7: before
import org.apache.hadoop.mrunit.mapreduce.MapReduceDriver; //导入方法依赖的package包/类
@Before
public void before() throws URISyntaxException {
CsvFieldCountMapper mapper = new CsvFieldCountMapper();
LongSumReducer<Text> combiner = new LongSumReducer<Text>();
LongSumReducer<Text> reducer = new LongSumReducer<Text>();
mapReduceDriver = MapReduceDriver.newMapReduceDriver(mapper, reducer, combiner);
Configuration conf = mapReduceDriver.getConfiguration();
conf.setInt(CsvFieldCountMapper.CSV_FIELD_IDX, 2);
}
示例8: setUp
import org.apache.hadoop.mrunit.mapreduce.MapReduceDriver; //导入方法依赖的package包/类
@Before
public void setUp() throws Exception {
createTestMetadata();
// hack for distributed cache
FileUtils.deleteDirectory(new File("./meta"));
FileUtils.copyDirectory(new File(getTestConfig().getMetadataUrl().toString()), new File("./meta"));
NDCuboidMapper mapper = new NDCuboidMapper();
CuboidReducer reducer = new CuboidReducer();
mapReduceDriver = MapReduceDriver.newMapReduceDriver(mapper, reducer);
}
示例9: setUp
import org.apache.hadoop.mrunit.mapreduce.MapReduceDriver; //导入方法依赖的package包/类
@Before
public void setUp() throws Exception {
Mapper<Writable,Text, Text, Text> mapper = new IdentityJSONMapper();
Reducer<Text,Text,Text,Text> reducer = new JSONReducer();
mapDriver = MapDriver.newMapDriver(mapper);
reduceDriver = ReduceDriver.newReduceDriver(reducer);
mapReduceDriver = MapReduceDriver.newMapReduceDriver(mapper, reducer);
}
示例10: setUp
import org.apache.hadoop.mrunit.mapreduce.MapReduceDriver; //导入方法依赖的package包/类
@Before
public void setUp() throws Exception {
Mapper<Writable,Text, Text, Text> mapper = new JSONTypeFilterMapper();
Reducer<Text,Text,Text,Text> reducer = new JSONReducer();
mapDriver = MapDriver.newMapDriver(mapper);
reduceDriver = ReduceDriver.newReduceDriver(reducer);
mapReduceDriver = MapReduceDriver.newMapReduceDriver(mapper, reducer);
}
示例11: setUp
import org.apache.hadoop.mrunit.mapreduce.MapReduceDriver; //导入方法依赖的package包/类
@Before
public void setUp() throws Exception {
Mapper<Text, Text, Text, Text> mapper = new JSONMapper();
Reducer<Text,Text,Text,Text> reducer = new JSONReducer();
mapDriver = MapDriver.newMapDriver(mapper);
org.apache.hadoop.conf.Configuration conf = mapDriver.getConfiguration();
conf.set("model.uri", TestJSONMapReduce.class.getClassLoader().getResource("people-model.ttl").toURI().toString());
conf.set("rdf.generation.root", "http://isi.edu/integration/karma/dev#TriplesMap_c6f9c495-90e4-4c83-aa62-0ab1841a1871");
reduceDriver = ReduceDriver.newReduceDriver(reducer);
mapReduceDriver = MapReduceDriver.newMapReduceDriver(mapper, reducer);
}
示例12: setUpBeforeClass
import org.apache.hadoop.mrunit.mapreduce.MapReduceDriver; //导入方法依赖的package包/类
@BeforeClass
public static void setUpBeforeClass() throws Exception {
Mapper<Writable,Text, Text, Text> mapper = new IdentityJSONMapper();
Reducer<Text,Text,Text,Text> reducer = new JSONReducer();
mapReduceDriver = MapReduceDriver.newMapReduceDriver(mapper, reducer);
filePath = System.getProperty("json.filepath");
}
示例13: setUp
import org.apache.hadoop.mrunit.mapreduce.MapReduceDriver; //导入方法依赖的package包/类
@Before
public void setUp() throws Exception {
Mapper<Text, Text, Text, Text> mapper = new N3Mapper();
Reducer<Text,Text,Text,Text> reducer = new N3Reducer();
mapDriver = MapDriver.newMapDriver(mapper);
reduceDriver = ReduceDriver.newReduceDriver(reducer);
mapReduceDriver = MapReduceDriver.newMapReduceDriver(mapper, reducer);
}
示例14: setUp
import org.apache.hadoop.mrunit.mapreduce.MapReduceDriver; //导入方法依赖的package包/类
@Before
public void setUp() throws Exception {
Mapper<Writable,Text, Text, Text> mapper = new IdentityN3Mapper();
Reducer<Text,Text,Text,Text> reducer = new N3ToJSONReducer();
mapDriver = MapDriver.newMapDriver(mapper);
reduceDriver = ReduceDriver.newReduceDriver(reducer);
mapReduceDriver = MapReduceDriver.newMapReduceDriver(mapper, reducer);
}
示例15: init
import org.apache.hadoop.mrunit.mapreduce.MapReduceDriver; //导入方法依赖的package包/类
@Before
public void init(){
ToItemCooccurrenceMapper mapper = new ToItemCooccurrenceMapper();
ToItemCooccurrenceReducer reducer = new ToItemCooccurrenceReducer();
mapDriver = MapDriver.newMapDriver(mapper);
reduceDriver = ReduceDriver.newReduceDriver(reducer);
mapReduceDriver = MapReduceDriver.newMapReduceDriver(mapper, reducer);
//mapReduceDriver = new MapReduceDriver<LongWritable, Text,VarLongWritable, Text, VarLongWritable, VectorWritable>(mapper,reducer);
}
开发者ID:faustineinsun,项目名称:MahoutHadoopUseCase,代码行数:10,代码来源:UserVectorToCooccurrenceMapReduceTest.java