本文整理汇总了Java中org.apache.hadoop.io.MapWritable类的典型用法代码示例。如果您正苦于以下问题:Java MapWritable类的具体用法?Java MapWritable怎么用?Java MapWritable使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
MapWritable类属于org.apache.hadoop.io包,在下文中一共展示了MapWritable类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: convertToMap
import org.apache.hadoop.io.MapWritable; //导入依赖的package包/类
private static Map<String, Map<String,String>> convertToMap(MapWritable inputMap) {
Map<String, Map<String,String>> mapResult = Maps.newHashMap();
for (Writable attributeText : inputMap.keySet()) {
MapWritable partialInsideMap = (MapWritable) inputMap.get(attributeText);
Map<String,String> partialOutputMap = Maps.newHashMap();
for (Writable rule : partialInsideMap.keySet()) {
Text regola = (Text) rule;
Text valore = (Text) partialInsideMap.get(rule);
partialOutputMap.put(regola.toString(), valore.toString());
}
mapResult.put(((Text)attributeText).toString(), partialOutputMap);
}
return mapResult;
}
示例2: reduce
import org.apache.hadoop.io.MapWritable; //导入依赖的package包/类
@Override
protected void reduce(StatsUserDimension key, Iterable<TimeOutputValue> values, Context context)
throws IOException, InterruptedException {
this.unique.clear();
// 开始计算uuid的个数
for (TimeOutputValue value : values) {
this.unique.add(value.getId());// uid,用户ID
}
MapWritable map = new MapWritable();// 相当于java中的hashmap
map.put(new IntWritable(-1), new IntWritable(this.unique.size()));
outputValue.setValue(map);
// 设置kpi名称
String kpiName = key.getStatsCommon().getKpi().getKpiName();
if (KpiType.NEW_INSTALL_USER.name.equals(kpiName)) {
// 计算stats_user表中的新增用户
outputValue.setKpi(KpiType.NEW_INSTALL_USER);
} else if (KpiType.BROWSER_NEW_INSTALL_USER.name.equals(kpiName)) {
// 计算stats_device_browser表中的新增用户
outputValue.setKpi(KpiType.BROWSER_NEW_INSTALL_USER);
}
context.write(key, outputValue);
}
示例3: configureGenericRecordExportInputFormat
import org.apache.hadoop.io.MapWritable; //导入依赖的package包/类
private void configureGenericRecordExportInputFormat(Job job, String tableName)
throws IOException {
ConnManager connManager = context.getConnManager();
Map<String, Integer> columnTypeInts;
if (options.getCall() == null) {
columnTypeInts = connManager.getColumnTypes(
tableName,
options.getSqlQuery());
} else {
columnTypeInts = connManager.getColumnTypesForProcedure(
options.getCall());
}
String[] specifiedColumns = options.getColumns();
MapWritable columnTypes = new MapWritable();
for (Map.Entry<String, Integer> e : columnTypeInts.entrySet()) {
String column = e.getKey();
column = (specifiedColumns == null) ? column : options.getColumnNameCaseInsensitive(column);
if (column != null) {
Text columnName = new Text(column);
Text columnType = new Text(connManager.toJavaType(tableName, column, e.getValue()));
columnTypes.put(columnName, columnType);
}
}
DefaultStringifier.store(job.getConfiguration(), columnTypes,
AvroExportMapper.AVRO_COLUMN_TYPES_MAP);
}
示例4: setup
import org.apache.hadoop.io.MapWritable; //导入依赖的package包/类
@Override
protected void setup(Context context) throws IOException, InterruptedException {
super.setup(context);
Configuration conf = context.getConfiguration();
// Instantiate a copy of the user's class to hold and parse the record.
String recordClassName = conf.get(
ExportJobBase.SQOOP_EXPORT_TABLE_CLASS_KEY);
if (null == recordClassName) {
throw new IOException("Export table class name ("
+ ExportJobBase.SQOOP_EXPORT_TABLE_CLASS_KEY
+ ") is not set!");
}
try {
Class cls = Class.forName(recordClassName, true,
Thread.currentThread().getContextClassLoader());
recordImpl = (SqoopRecord) ReflectionUtils.newInstance(cls, conf);
} catch (ClassNotFoundException cnfe) {
throw new IOException(cnfe);
}
if (null == recordImpl) {
throw new IOException("Could not instantiate object of type "
+ recordClassName);
}
columnTypes = DefaultStringifier.load(conf, AVRO_COLUMN_TYPES_MAP,
MapWritable.class);
}
示例5: readFields
import org.apache.hadoop.io.MapWritable; //导入依赖的package包/类
@Override
public void readFields(DataInput in) throws IOException {
score = in.readFloat();
lastCheck = new Date(in.readLong());
homepageUrl = Text.readString(in);
dnsFailures = in.readInt();
connectionFailures = in.readInt();
unfetched= in.readInt();
fetched= in.readInt();
notModified= in.readInt();
redirTemp= in.readInt();
redirPerm = in.readInt();
gone = in.readInt();
metaData = new org.apache.hadoop.io.MapWritable();
metaData.readFields(in);
}
示例6: getSelectorByQueryType
import org.apache.hadoop.io.MapWritable; //导入依赖的package包/类
/**
* Pulls the correct selector from the MapWritable data element given the queryType
* <p>
* Pulls first element of array if element is an array type
*/
public static String getSelectorByQueryType(MapWritable dataMap, QuerySchema qSchema, DataSchema dSchema)
{
String selector;
String fieldName = qSchema.getSelectorName();
if (dSchema.isArrayElement(fieldName))
{
if (dataMap.get(dSchema.getTextName(fieldName)) instanceof WritableArrayWritable)
{
String[] selectorArray = ((WritableArrayWritable) dataMap.get(dSchema.getTextName(fieldName))).toStrings();
selector = selectorArray[0];
}
else
{
String[] elementArray = ((ArrayWritable) dataMap.get(dSchema.getTextName(fieldName))).toStrings();
selector = elementArray[0];
}
}
else
{
selector = dataMap.get(dSchema.getTextName(fieldName)).toString();
}
return selector;
}
示例7: call
import org.apache.hadoop.io.MapWritable; //导入依赖的package包/类
@Override
public Boolean call(MapWritable dataElement) throws Exception
{
accum.incNumRecordsReceived(1);
// Perform the filter
boolean passFilter = ((DataFilter) filter).filterDataElement(dataElement, dSchema);
if (passFilter)
{
accum.incNumRecordsAfterFilter(1);
}
else
// false, then we filter out the record
{
accum.incNumRecordsFiltered(1);
}
return passFilter;
}
示例8: performQuery
import org.apache.hadoop.io.MapWritable; //导入依赖的package包/类
/**
* Method to read in data from an allowed input source/format and perform the query
*/
public void performQuery() throws IOException, PIRException
{
logger.info("Performing query: ");
JavaRDD<MapWritable> inputRDD;
switch (dataInputFormat)
{
case InputFormatConst.BASE_FORMAT:
inputRDD = readData();
break;
case InputFormatConst.ES:
inputRDD = readDataES();
break;
default:
throw new PIRException("Unknown data input format " + dataInputFormat);
}
performQuery(inputRDD);
}
示例9: performQuery
import org.apache.hadoop.io.MapWritable; //导入依赖的package包/类
/**
* Method to read in data from an allowed input source/format and perform the query
*/
public void performQuery() throws IOException, PIRException
{
logger.info("Performing query: ");
JavaDStream<MapWritable> inputRDD = null;
if (dataInputFormat.equals(InputFormatConst.BASE_FORMAT))
{
inputRDD = readData();
}
else if (dataInputFormat.equals(InputFormatConst.ES))
{
inputRDD = readDataES();
}
else
{
throw new PIRException("Unknown data input format " + dataInputFormat);
}
performQuery(inputRDD);
}
示例10: initialize
import org.apache.hadoop.io.MapWritable; //导入依赖的package包/类
@Override
public void initialize(InputSplit inputSplit, TaskAttemptContext context) throws IOException
{
key = new Text();
value = new MapWritable();
jsonParser = new JSONParser();
lineReader = new LineRecordReader();
lineReader.initialize(inputSplit, context);
queryString = context.getConfiguration().get("query", "?q=*");
// Load the data schemas
FileSystem fs = FileSystem.get(context.getConfiguration());
try
{
SystemConfiguration.setProperty("data.schemas", context.getConfiguration().get("data.schemas"));
DataSchemaLoader.initialize(true, fs);
} catch (Exception e)
{
e.printStackTrace();
}
String dataSchemaName = context.getConfiguration().get("dataSchemaName");
dataSchema = DataSchemaRegistry.get(dataSchemaName);
}
示例11: createConf
import org.apache.hadoop.io.MapWritable; //导入依赖的package包/类
private Configuration createConf() throws IOException {
Configuration conf = HdpBootstrap.hadoopConfig();
HadoopCfgUtils.setGenericOptions(conf);
Job job = new Job(conf);
job.setInputFormatClass(EsInputFormat.class);
job.setOutputFormatClass(PrintStreamOutputFormat.class);
job.setOutputKeyClass(Text.class);
boolean type = random.nextBoolean();
Class<?> mapType = (type ? MapWritable.class : LinkedMapWritable.class);
job.setOutputValueClass(mapType);
conf.set(ConfigurationOptions.ES_QUERY, query);
conf.set(ConfigurationOptions.ES_READ_METADATA, String.valueOf(readMetadata));
conf.set(ConfigurationOptions.ES_OUTPUT_JSON, String.valueOf(readAsJson));
QueryTestParams.provisionQueries(conf);
job.setNumReduceTasks(0);
//PrintStreamOutputFormat.stream(conf, Stream.OUT);
Configuration cfg = job.getConfiguration();
HdpBootstrap.addProperties(cfg, TestSettings.TESTING_PROPS, false);
return cfg;
}
示例12: createReadJobConf
import org.apache.hadoop.io.MapWritable; //导入依赖的package包/类
private JobConf createReadJobConf() throws IOException {
JobConf conf = HdpBootstrap.hadoopConfig();
conf.setInputFormat(EsInputFormat.class);
conf.setOutputFormat(PrintStreamOutputFormat.class);
conf.setOutputKeyClass(Text.class);
boolean type = random.nextBoolean();
Class<?> mapType = (type ? MapWritable.class : LinkedMapWritable.class);
conf.setOutputValueClass(MapWritable.class);
HadoopCfgUtils.setGenericOptions(conf);
conf.setNumReduceTasks(0);
conf.set(ConfigurationOptions.ES_READ_METADATA, String.valueOf(random.nextBoolean()));
conf.set(ConfigurationOptions.ES_READ_METADATA_VERSION, String.valueOf(true));
conf.set(ConfigurationOptions.ES_OUTPUT_JSON, "true");
FileInputFormat.setInputPaths(conf, new Path(TestUtils.gibberishDat(conf)));
return conf;
}
示例13: createJobConf
import org.apache.hadoop.io.MapWritable; //导入依赖的package包/类
private JobConf createJobConf() throws IOException {
JobConf conf = HdpBootstrap.hadoopConfig();
conf.setInputFormat(EsInputFormat.class);
conf.setOutputFormat(PrintStreamOutputFormat.class);
conf.setOutputKeyClass(Text.class);
boolean type = random.nextBoolean();
Class<?> mapType = (type ? MapWritable.class : LinkedMapWritable.class);
conf.setOutputValueClass(mapType);
HadoopCfgUtils.setGenericOptions(conf);
conf.set(ConfigurationOptions.ES_QUERY, query);
conf.setNumReduceTasks(0);
conf.set(ConfigurationOptions.ES_READ_METADATA, String.valueOf(readMetadata));
conf.set(ConfigurationOptions.ES_READ_METADATA_VERSION, String.valueOf(true));
conf.set(ConfigurationOptions.ES_OUTPUT_JSON, String.valueOf(readAsJson));
QueryTestParams.provisionQueries(conf);
FileInputFormat.setInputPaths(conf, new Path(TestUtils.sampleArtistsDat()));
HdpBootstrap.addProperties(conf, TestSettings.TESTING_PROPS, false);
return conf;
}
示例14: next
import org.apache.hadoop.io.MapWritable; //导入依赖的package包/类
@Override
public boolean next(LongWritable keyHolder, MapWritable valueHolder)
throws IOException {
if (StringUtils.isBlank(facetMapping)) {
SolrDocument doc = cursor.nextDocument();
if (doc == null) {
return false;
}
keyHolder.set(pos++);
Object[] values = new Object[solrColumns.length];
for (int i = 0; i < solrColumns.length; i++) {
values[i] = doc.getFieldValue(solrColumns[i]);
}
setValueHolder(valueHolder, values);
} else {
FacetEntry facetEntry = cursor.nextFacetEntry();
if (facetEntry == null) {
return false;
}
keyHolder.set(pos++);
setValueHolder(valueHolder, new Object[] { facetEntry.getValue(),
facetEntry.getCount() });
}
return true;
}
示例15: write
import org.apache.hadoop.io.MapWritable; //导入依赖的package包/类
@Override
public void write(Writable w) throws IOException {
MapWritable map = (MapWritable) w;
SolrInputDocument doc = new SolrInputDocument();
for (final Map.Entry<Writable, Writable> entry : map.entrySet()) {
String key = entry.getKey().toString();
if (entry.getValue() instanceof TimestampWritable) {
Timestamp t = ((TimestampWritable)entry.getValue()).getTimestamp();
doc.setField(key, dateFormat.format( new Date(t.getTime()) ));
} else if (entry.getValue() instanceof ShortWritable) {
doc.setField(key, ((ShortWritable)entry.getValue()).get());
} else {
doc.setField(key, entry.getValue().toString());
}
}
log.debug("doc:"+doc.toString());
table.save(doc);
}