当前位置: 首页>>代码示例>>Java>>正文


Java MapWritable类代码示例

本文整理汇总了Java中org.apache.hadoop.io.MapWritable的典型用法代码示例。如果您正苦于以下问题:Java MapWritable类的具体用法?Java MapWritable怎么用?Java MapWritable使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


MapWritable类属于org.apache.hadoop.io包,在下文中一共展示了MapWritable类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: convertToMap

import org.apache.hadoop.io.MapWritable; //导入依赖的package包/类
private static Map<String, Map<String,String>> convertToMap(MapWritable inputMap) {
	Map<String, Map<String,String>> mapResult = Maps.newHashMap();
	
	for (Writable attributeText : inputMap.keySet()) {
		MapWritable partialInsideMap = (MapWritable) inputMap.get(attributeText);
		Map<String,String> partialOutputMap = Maps.newHashMap();
		
		for (Writable rule : partialInsideMap.keySet()) {
			Text regola = (Text) rule;
			Text valore = (Text) partialInsideMap.get(rule);
			
			partialOutputMap.put(regola.toString(), valore.toString());
		}
		
		mapResult.put(((Text)attributeText).toString(), partialOutputMap);
	}
	
	return mapResult;
}
 
开发者ID:disheng,项目名称:alfred-mpi,代码行数:20,代码来源:MapWritableConverter.java

示例2: reduce

import org.apache.hadoop.io.MapWritable; //导入依赖的package包/类
@Override
protected void reduce(StatsUserDimension key, Iterable<TimeOutputValue> values, Context context)
		throws IOException, InterruptedException {
	this.unique.clear();

	// 开始计算uuid的个数
	for (TimeOutputValue value : values) {
		this.unique.add(value.getId());// uid,用户ID
	}
	MapWritable map = new MapWritable();// 相当于java中的hashmap
	map.put(new IntWritable(-1), new IntWritable(this.unique.size()));
	outputValue.setValue(map);

	// 设置kpi名称
	String kpiName = key.getStatsCommon().getKpi().getKpiName();
	if (KpiType.NEW_INSTALL_USER.name.equals(kpiName)) {
		// 计算stats_user表中的新增用户
		outputValue.setKpi(KpiType.NEW_INSTALL_USER);
	} else if (KpiType.BROWSER_NEW_INSTALL_USER.name.equals(kpiName)) {
		// 计算stats_device_browser表中的新增用户
		outputValue.setKpi(KpiType.BROWSER_NEW_INSTALL_USER);
	}
	context.write(key, outputValue);
}
 
开发者ID:liuhaozzu,项目名称:big_data,代码行数:25,代码来源:NewInstallUserReducer.java

示例3: configureGenericRecordExportInputFormat

import org.apache.hadoop.io.MapWritable; //导入依赖的package包/类
private void configureGenericRecordExportInputFormat(Job job, String tableName)
    throws IOException {
  ConnManager connManager = context.getConnManager();
  Map<String, Integer> columnTypeInts;
  if (options.getCall() == null) {
    columnTypeInts = connManager.getColumnTypes(
        tableName,
        options.getSqlQuery());
  } else {
    columnTypeInts = connManager.getColumnTypesForProcedure(
        options.getCall());
  }
  String[] specifiedColumns = options.getColumns();
  MapWritable columnTypes = new MapWritable();
  for (Map.Entry<String, Integer> e : columnTypeInts.entrySet()) {
    String column = e.getKey();
    column = (specifiedColumns == null) ? column : options.getColumnNameCaseInsensitive(column);
    if (column != null) {
      Text columnName = new Text(column);
      Text columnType = new Text(connManager.toJavaType(tableName, column, e.getValue()));
      columnTypes.put(columnName, columnType);
    }
  }
  DefaultStringifier.store(job.getConfiguration(), columnTypes,
      AvroExportMapper.AVRO_COLUMN_TYPES_MAP);
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:27,代码来源:JdbcExportJob.java

示例4: setup

import org.apache.hadoop.io.MapWritable; //导入依赖的package包/类
@Override
protected void setup(Context context) throws IOException, InterruptedException {
  super.setup(context);

  Configuration conf = context.getConfiguration();

  // Instantiate a copy of the user's class to hold and parse the record.
  String recordClassName = conf.get(
      ExportJobBase.SQOOP_EXPORT_TABLE_CLASS_KEY);
  if (null == recordClassName) {
    throw new IOException("Export table class name ("
        + ExportJobBase.SQOOP_EXPORT_TABLE_CLASS_KEY
        + ") is not set!");
  }

  try {
    Class cls = Class.forName(recordClassName, true,
        Thread.currentThread().getContextClassLoader());
    recordImpl = (SqoopRecord) ReflectionUtils.newInstance(cls, conf);
  } catch (ClassNotFoundException cnfe) {
    throw new IOException(cnfe);
  }

  if (null == recordImpl) {
    throw new IOException("Could not instantiate object of type "
        + recordClassName);
  }

  columnTypes = DefaultStringifier.load(conf, AVRO_COLUMN_TYPES_MAP,
      MapWritable.class);
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:32,代码来源:ParquetExportMapper.java

示例5: readFields

import org.apache.hadoop.io.MapWritable; //导入依赖的package包/类
@Override
public void readFields(DataInput in) throws IOException {
  score = in.readFloat();
  lastCheck = new Date(in.readLong());
  homepageUrl = Text.readString(in);

  dnsFailures = in.readInt();
  connectionFailures = in.readInt();

  unfetched= in.readInt();
  fetched= in.readInt();
  notModified= in.readInt();
  redirTemp= in.readInt();
  redirPerm = in.readInt();
  gone = in.readInt();

  metaData = new org.apache.hadoop.io.MapWritable();
  metaData.readFields(in);
}
 
开发者ID:jorcox,项目名称:GeoCrawler,代码行数:20,代码来源:HostDatum.java

示例6: getSelectorByQueryType

import org.apache.hadoop.io.MapWritable; //导入依赖的package包/类
/**
 * Pulls the correct selector from the MapWritable data element given the queryType
 * <p>
 * Pulls first element of array if element is an array type
 */
public static String getSelectorByQueryType(MapWritable dataMap, QuerySchema qSchema, DataSchema dSchema)
{
  String selector;

  String fieldName = qSchema.getSelectorName();
  if (dSchema.isArrayElement(fieldName))
  {
    if (dataMap.get(dSchema.getTextName(fieldName)) instanceof WritableArrayWritable)
    {
      String[] selectorArray = ((WritableArrayWritable) dataMap.get(dSchema.getTextName(fieldName))).toStrings();
      selector = selectorArray[0];
    }
    else
    {
      String[] elementArray = ((ArrayWritable) dataMap.get(dSchema.getTextName(fieldName))).toStrings();
      selector = elementArray[0];
    }
  }
  else
  {
    selector = dataMap.get(dSchema.getTextName(fieldName)).toString();
  }

  return selector;
}
 
开发者ID:apache,项目名称:incubator-pirk,代码行数:31,代码来源:QueryUtils.java

示例7: call

import org.apache.hadoop.io.MapWritable; //导入依赖的package包/类
@Override
public Boolean call(MapWritable dataElement) throws Exception
{
  accum.incNumRecordsReceived(1);

  // Perform the filter
  boolean passFilter = ((DataFilter) filter).filterDataElement(dataElement, dSchema);

  if (passFilter)
  {
    accum.incNumRecordsAfterFilter(1);
  }
  else
  // false, then we filter out the record
  {
    accum.incNumRecordsFiltered(1);
  }

  return passFilter;
}
 
开发者ID:apache,项目名称:incubator-pirk,代码行数:21,代码来源:FilterData.java

示例8: performQuery

import org.apache.hadoop.io.MapWritable; //导入依赖的package包/类
/**
 * Method to read in data from an allowed input source/format and perform the query
 */
public void performQuery() throws IOException, PIRException
{
  logger.info("Performing query: ");

  JavaRDD<MapWritable> inputRDD;
  switch (dataInputFormat)
  {
    case InputFormatConst.BASE_FORMAT:
      inputRDD = readData();
      break;
    case InputFormatConst.ES:
      inputRDD = readDataES();
      break;
    default:
      throw new PIRException("Unknown data input format " + dataInputFormat);
  }

  performQuery(inputRDD);
}
 
开发者ID:apache,项目名称:incubator-pirk,代码行数:23,代码来源:ComputeResponse.java

示例9: performQuery

import org.apache.hadoop.io.MapWritable; //导入依赖的package包/类
/**
 * Method to read in data from an allowed input source/format and perform the query
 */
public void performQuery() throws IOException, PIRException
{
  logger.info("Performing query: ");

  JavaDStream<MapWritable> inputRDD = null;
  if (dataInputFormat.equals(InputFormatConst.BASE_FORMAT))
  {
    inputRDD = readData();
  }
  else if (dataInputFormat.equals(InputFormatConst.ES))
  {
    inputRDD = readDataES();
  }
  else
  {
    throw new PIRException("Unknown data input format " + dataInputFormat);
  }

  performQuery(inputRDD);
}
 
开发者ID:apache,项目名称:incubator-pirk,代码行数:24,代码来源:ComputeStreamingResponse.java

示例10: initialize

import org.apache.hadoop.io.MapWritable; //导入依赖的package包/类
@Override
public void initialize(InputSplit inputSplit, TaskAttemptContext context) throws IOException
{
  key = new Text();
  value = new MapWritable();
  jsonParser = new JSONParser();

  lineReader = new LineRecordReader();
  lineReader.initialize(inputSplit, context);

  queryString = context.getConfiguration().get("query", "?q=*");

  // Load the data schemas
  FileSystem fs = FileSystem.get(context.getConfiguration());
  try
  {
    SystemConfiguration.setProperty("data.schemas", context.getConfiguration().get("data.schemas"));
    DataSchemaLoader.initialize(true, fs);
  } catch (Exception e)
  {
    e.printStackTrace();
  }
  String dataSchemaName = context.getConfiguration().get("dataSchemaName");
  dataSchema = DataSchemaRegistry.get(dataSchemaName);
}
 
开发者ID:apache,项目名称:incubator-pirk,代码行数:26,代码来源:JSONRecordReader.java

示例11: createConf

import org.apache.hadoop.io.MapWritable; //导入依赖的package包/类
private Configuration createConf() throws IOException {
    Configuration conf = HdpBootstrap.hadoopConfig();
    HadoopCfgUtils.setGenericOptions(conf);
    Job job = new Job(conf);
    job.setInputFormatClass(EsInputFormat.class);
    job.setOutputFormatClass(PrintStreamOutputFormat.class);
    job.setOutputKeyClass(Text.class);

    boolean type = random.nextBoolean();
    Class<?> mapType = (type ? MapWritable.class : LinkedMapWritable.class);

    job.setOutputValueClass(mapType);
    conf.set(ConfigurationOptions.ES_QUERY, query);

    conf.set(ConfigurationOptions.ES_READ_METADATA, String.valueOf(readMetadata));
    conf.set(ConfigurationOptions.ES_OUTPUT_JSON, String.valueOf(readAsJson));

    QueryTestParams.provisionQueries(conf);
    job.setNumReduceTasks(0);
    //PrintStreamOutputFormat.stream(conf, Stream.OUT);

    Configuration cfg = job.getConfiguration();
    HdpBootstrap.addProperties(cfg, TestSettings.TESTING_PROPS, false);
    return cfg;
}
 
开发者ID:xushjie1987,项目名称:es-hadoop-v2.2.0,代码行数:26,代码来源:AbstractMRNewApiSearchTest.java

示例12: createReadJobConf

import org.apache.hadoop.io.MapWritable; //导入依赖的package包/类
private JobConf createReadJobConf() throws IOException {
    JobConf conf = HdpBootstrap.hadoopConfig();

    conf.setInputFormat(EsInputFormat.class);
    conf.setOutputFormat(PrintStreamOutputFormat.class);
    conf.setOutputKeyClass(Text.class);
    boolean type = random.nextBoolean();
    Class<?> mapType = (type ? MapWritable.class : LinkedMapWritable.class);
    conf.setOutputValueClass(MapWritable.class);
    HadoopCfgUtils.setGenericOptions(conf);
    conf.setNumReduceTasks(0);

    conf.set(ConfigurationOptions.ES_READ_METADATA, String.valueOf(random.nextBoolean()));
    conf.set(ConfigurationOptions.ES_READ_METADATA_VERSION, String.valueOf(true));
    conf.set(ConfigurationOptions.ES_OUTPUT_JSON, "true");

    FileInputFormat.setInputPaths(conf, new Path(TestUtils.gibberishDat(conf)));
    return conf;
}
 
开发者ID:xushjie1987,项目名称:es-hadoop-v2.2.0,代码行数:20,代码来源:AbstractExtraMRTests.java

示例13: createJobConf

import org.apache.hadoop.io.MapWritable; //导入依赖的package包/类
private JobConf createJobConf() throws IOException {
    JobConf conf = HdpBootstrap.hadoopConfig();

    conf.setInputFormat(EsInputFormat.class);
    conf.setOutputFormat(PrintStreamOutputFormat.class);
    conf.setOutputKeyClass(Text.class);
    boolean type = random.nextBoolean();
    Class<?> mapType = (type ? MapWritable.class : LinkedMapWritable.class);
    conf.setOutputValueClass(mapType);
    HadoopCfgUtils.setGenericOptions(conf);
    conf.set(ConfigurationOptions.ES_QUERY, query);
    conf.setNumReduceTasks(0);

    conf.set(ConfigurationOptions.ES_READ_METADATA, String.valueOf(readMetadata));
    conf.set(ConfigurationOptions.ES_READ_METADATA_VERSION, String.valueOf(true));
    conf.set(ConfigurationOptions.ES_OUTPUT_JSON, String.valueOf(readAsJson));

    QueryTestParams.provisionQueries(conf);
    FileInputFormat.setInputPaths(conf, new Path(TestUtils.sampleArtistsDat()));

    HdpBootstrap.addProperties(conf, TestSettings.TESTING_PROPS, false);
    return conf;
}
 
开发者ID:xushjie1987,项目名称:es-hadoop-v2.2.0,代码行数:24,代码来源:AbstractMROldApiSearchTest.java

示例14: next

import org.apache.hadoop.io.MapWritable; //导入依赖的package包/类
@Override
public boolean next(LongWritable keyHolder, MapWritable valueHolder)
		throws IOException {
	if (StringUtils.isBlank(facetMapping)) {
		SolrDocument doc = cursor.nextDocument();
		if (doc == null) {
			return false;
		}
		keyHolder.set(pos++);
		Object[] values = new Object[solrColumns.length];
		for (int i = 0; i < solrColumns.length; i++) {
			values[i] = doc.getFieldValue(solrColumns[i]);
		}
		setValueHolder(valueHolder, values);
	} else {
		FacetEntry facetEntry = cursor.nextFacetEntry();
		if (facetEntry == null) {
			return false;
		}
		keyHolder.set(pos++);
		setValueHolder(valueHolder, new Object[] { facetEntry.getValue(),
				facetEntry.getCount() });
	}
	return true;
}
 
开发者ID:vroyer,项目名称:hive-solr-search,代码行数:26,代码来源:SolrReader.java

示例15: write

import org.apache.hadoop.io.MapWritable; //导入依赖的package包/类
@Override
public void write(Writable w) throws IOException {
	MapWritable map = (MapWritable) w;
	SolrInputDocument doc = new SolrInputDocument();
	for (final Map.Entry<Writable, Writable> entry : map.entrySet()) {
		String key = entry.getKey().toString();
		
		if (entry.getValue() instanceof TimestampWritable) {
			Timestamp t = ((TimestampWritable)entry.getValue()).getTimestamp();
			doc.setField(key, dateFormat.format( new Date(t.getTime()) ));
		} else if (entry.getValue() instanceof ShortWritable) {
		    doc.setField(key, ((ShortWritable)entry.getValue()).get());
		} else {
			doc.setField(key, entry.getValue().toString());
		}
		
	}
	log.debug("doc:"+doc.toString());
	table.save(doc);
}
 
开发者ID:vroyer,项目名称:hive-solr-search,代码行数:21,代码来源:SolrWriter.java


注:本文中的org.apache.hadoop.io.MapWritable类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。