當前位置: 首頁>>代碼示例>>Java>>正文


Java MapWritable類代碼示例

本文整理匯總了Java中org.apache.hadoop.io.MapWritable的典型用法代碼示例。如果您正苦於以下問題:Java MapWritable類的具體用法?Java MapWritable怎麽用?Java MapWritable使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。


MapWritable類屬於org.apache.hadoop.io包,在下文中一共展示了MapWritable類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: convertToMap

import org.apache.hadoop.io.MapWritable; //導入依賴的package包/類
private static Map<String, Map<String,String>> convertToMap(MapWritable inputMap) {
	Map<String, Map<String,String>> mapResult = Maps.newHashMap();
	
	for (Writable attributeText : inputMap.keySet()) {
		MapWritable partialInsideMap = (MapWritable) inputMap.get(attributeText);
		Map<String,String> partialOutputMap = Maps.newHashMap();
		
		for (Writable rule : partialInsideMap.keySet()) {
			Text regola = (Text) rule;
			Text valore = (Text) partialInsideMap.get(rule);
			
			partialOutputMap.put(regola.toString(), valore.toString());
		}
		
		mapResult.put(((Text)attributeText).toString(), partialOutputMap);
	}
	
	return mapResult;
}
 
開發者ID:disheng,項目名稱:alfred-mpi,代碼行數:20,代碼來源:MapWritableConverter.java

示例2: reduce

import org.apache.hadoop.io.MapWritable; //導入依賴的package包/類
@Override
protected void reduce(StatsUserDimension key, Iterable<TimeOutputValue> values, Context context)
		throws IOException, InterruptedException {
	this.unique.clear();

	// 開始計算uuid的個數
	for (TimeOutputValue value : values) {
		this.unique.add(value.getId());// uid,用戶ID
	}
	MapWritable map = new MapWritable();// 相當於java中的hashmap
	map.put(new IntWritable(-1), new IntWritable(this.unique.size()));
	outputValue.setValue(map);

	// 設置kpi名稱
	String kpiName = key.getStatsCommon().getKpi().getKpiName();
	if (KpiType.NEW_INSTALL_USER.name.equals(kpiName)) {
		// 計算stats_user表中的新增用戶
		outputValue.setKpi(KpiType.NEW_INSTALL_USER);
	} else if (KpiType.BROWSER_NEW_INSTALL_USER.name.equals(kpiName)) {
		// 計算stats_device_browser表中的新增用戶
		outputValue.setKpi(KpiType.BROWSER_NEW_INSTALL_USER);
	}
	context.write(key, outputValue);
}
 
開發者ID:liuhaozzu,項目名稱:big_data,代碼行數:25,代碼來源:NewInstallUserReducer.java

示例3: configureGenericRecordExportInputFormat

import org.apache.hadoop.io.MapWritable; //導入依賴的package包/類
private void configureGenericRecordExportInputFormat(Job job, String tableName)
    throws IOException {
  ConnManager connManager = context.getConnManager();
  Map<String, Integer> columnTypeInts;
  if (options.getCall() == null) {
    columnTypeInts = connManager.getColumnTypes(
        tableName,
        options.getSqlQuery());
  } else {
    columnTypeInts = connManager.getColumnTypesForProcedure(
        options.getCall());
  }
  String[] specifiedColumns = options.getColumns();
  MapWritable columnTypes = new MapWritable();
  for (Map.Entry<String, Integer> e : columnTypeInts.entrySet()) {
    String column = e.getKey();
    column = (specifiedColumns == null) ? column : options.getColumnNameCaseInsensitive(column);
    if (column != null) {
      Text columnName = new Text(column);
      Text columnType = new Text(connManager.toJavaType(tableName, column, e.getValue()));
      columnTypes.put(columnName, columnType);
    }
  }
  DefaultStringifier.store(job.getConfiguration(), columnTypes,
      AvroExportMapper.AVRO_COLUMN_TYPES_MAP);
}
 
開發者ID:aliyun,項目名稱:aliyun-maxcompute-data-collectors,代碼行數:27,代碼來源:JdbcExportJob.java

示例4: setup

import org.apache.hadoop.io.MapWritable; //導入依賴的package包/類
@Override
protected void setup(Context context) throws IOException, InterruptedException {
  super.setup(context);

  Configuration conf = context.getConfiguration();

  // Instantiate a copy of the user's class to hold and parse the record.
  String recordClassName = conf.get(
      ExportJobBase.SQOOP_EXPORT_TABLE_CLASS_KEY);
  if (null == recordClassName) {
    throw new IOException("Export table class name ("
        + ExportJobBase.SQOOP_EXPORT_TABLE_CLASS_KEY
        + ") is not set!");
  }

  try {
    Class cls = Class.forName(recordClassName, true,
        Thread.currentThread().getContextClassLoader());
    recordImpl = (SqoopRecord) ReflectionUtils.newInstance(cls, conf);
  } catch (ClassNotFoundException cnfe) {
    throw new IOException(cnfe);
  }

  if (null == recordImpl) {
    throw new IOException("Could not instantiate object of type "
        + recordClassName);
  }

  columnTypes = DefaultStringifier.load(conf, AVRO_COLUMN_TYPES_MAP,
      MapWritable.class);
}
 
開發者ID:aliyun,項目名稱:aliyun-maxcompute-data-collectors,代碼行數:32,代碼來源:ParquetExportMapper.java

示例5: readFields

import org.apache.hadoop.io.MapWritable; //導入依賴的package包/類
@Override
public void readFields(DataInput in) throws IOException {
  score = in.readFloat();
  lastCheck = new Date(in.readLong());
  homepageUrl = Text.readString(in);

  dnsFailures = in.readInt();
  connectionFailures = in.readInt();

  unfetched= in.readInt();
  fetched= in.readInt();
  notModified= in.readInt();
  redirTemp= in.readInt();
  redirPerm = in.readInt();
  gone = in.readInt();

  metaData = new org.apache.hadoop.io.MapWritable();
  metaData.readFields(in);
}
 
開發者ID:jorcox,項目名稱:GeoCrawler,代碼行數:20,代碼來源:HostDatum.java

示例6: getSelectorByQueryType

import org.apache.hadoop.io.MapWritable; //導入依賴的package包/類
/**
 * Pulls the correct selector from the MapWritable data element given the queryType
 * <p>
 * Pulls first element of array if element is an array type
 */
public static String getSelectorByQueryType(MapWritable dataMap, QuerySchema qSchema, DataSchema dSchema)
{
  String selector;

  String fieldName = qSchema.getSelectorName();
  if (dSchema.isArrayElement(fieldName))
  {
    if (dataMap.get(dSchema.getTextName(fieldName)) instanceof WritableArrayWritable)
    {
      String[] selectorArray = ((WritableArrayWritable) dataMap.get(dSchema.getTextName(fieldName))).toStrings();
      selector = selectorArray[0];
    }
    else
    {
      String[] elementArray = ((ArrayWritable) dataMap.get(dSchema.getTextName(fieldName))).toStrings();
      selector = elementArray[0];
    }
  }
  else
  {
    selector = dataMap.get(dSchema.getTextName(fieldName)).toString();
  }

  return selector;
}
 
開發者ID:apache,項目名稱:incubator-pirk,代碼行數:31,代碼來源:QueryUtils.java

示例7: call

import org.apache.hadoop.io.MapWritable; //導入依賴的package包/類
@Override
public Boolean call(MapWritable dataElement) throws Exception
{
  accum.incNumRecordsReceived(1);

  // Perform the filter
  boolean passFilter = ((DataFilter) filter).filterDataElement(dataElement, dSchema);

  if (passFilter)
  {
    accum.incNumRecordsAfterFilter(1);
  }
  else
  // false, then we filter out the record
  {
    accum.incNumRecordsFiltered(1);
  }

  return passFilter;
}
 
開發者ID:apache,項目名稱:incubator-pirk,代碼行數:21,代碼來源:FilterData.java

示例8: performQuery

import org.apache.hadoop.io.MapWritable; //導入依賴的package包/類
/**
 * Method to read in data from an allowed input source/format and perform the query
 */
public void performQuery() throws IOException, PIRException
{
  logger.info("Performing query: ");

  JavaRDD<MapWritable> inputRDD;
  switch (dataInputFormat)
  {
    case InputFormatConst.BASE_FORMAT:
      inputRDD = readData();
      break;
    case InputFormatConst.ES:
      inputRDD = readDataES();
      break;
    default:
      throw new PIRException("Unknown data input format " + dataInputFormat);
  }

  performQuery(inputRDD);
}
 
開發者ID:apache,項目名稱:incubator-pirk,代碼行數:23,代碼來源:ComputeResponse.java

示例9: performQuery

import org.apache.hadoop.io.MapWritable; //導入依賴的package包/類
/**
 * Method to read in data from an allowed input source/format and perform the query
 */
public void performQuery() throws IOException, PIRException
{
  logger.info("Performing query: ");

  JavaDStream<MapWritable> inputRDD = null;
  if (dataInputFormat.equals(InputFormatConst.BASE_FORMAT))
  {
    inputRDD = readData();
  }
  else if (dataInputFormat.equals(InputFormatConst.ES))
  {
    inputRDD = readDataES();
  }
  else
  {
    throw new PIRException("Unknown data input format " + dataInputFormat);
  }

  performQuery(inputRDD);
}
 
開發者ID:apache,項目名稱:incubator-pirk,代碼行數:24,代碼來源:ComputeStreamingResponse.java

示例10: initialize

import org.apache.hadoop.io.MapWritable; //導入依賴的package包/類
@Override
public void initialize(InputSplit inputSplit, TaskAttemptContext context) throws IOException
{
  key = new Text();
  value = new MapWritable();
  jsonParser = new JSONParser();

  lineReader = new LineRecordReader();
  lineReader.initialize(inputSplit, context);

  queryString = context.getConfiguration().get("query", "?q=*");

  // Load the data schemas
  FileSystem fs = FileSystem.get(context.getConfiguration());
  try
  {
    SystemConfiguration.setProperty("data.schemas", context.getConfiguration().get("data.schemas"));
    DataSchemaLoader.initialize(true, fs);
  } catch (Exception e)
  {
    e.printStackTrace();
  }
  String dataSchemaName = context.getConfiguration().get("dataSchemaName");
  dataSchema = DataSchemaRegistry.get(dataSchemaName);
}
 
開發者ID:apache,項目名稱:incubator-pirk,代碼行數:26,代碼來源:JSONRecordReader.java

示例11: createConf

import org.apache.hadoop.io.MapWritable; //導入依賴的package包/類
private Configuration createConf() throws IOException {
    Configuration conf = HdpBootstrap.hadoopConfig();
    HadoopCfgUtils.setGenericOptions(conf);
    Job job = new Job(conf);
    job.setInputFormatClass(EsInputFormat.class);
    job.setOutputFormatClass(PrintStreamOutputFormat.class);
    job.setOutputKeyClass(Text.class);

    boolean type = random.nextBoolean();
    Class<?> mapType = (type ? MapWritable.class : LinkedMapWritable.class);

    job.setOutputValueClass(mapType);
    conf.set(ConfigurationOptions.ES_QUERY, query);

    conf.set(ConfigurationOptions.ES_READ_METADATA, String.valueOf(readMetadata));
    conf.set(ConfigurationOptions.ES_OUTPUT_JSON, String.valueOf(readAsJson));

    QueryTestParams.provisionQueries(conf);
    job.setNumReduceTasks(0);
    //PrintStreamOutputFormat.stream(conf, Stream.OUT);

    Configuration cfg = job.getConfiguration();
    HdpBootstrap.addProperties(cfg, TestSettings.TESTING_PROPS, false);
    return cfg;
}
 
開發者ID:xushjie1987,項目名稱:es-hadoop-v2.2.0,代碼行數:26,代碼來源:AbstractMRNewApiSearchTest.java

示例12: createReadJobConf

import org.apache.hadoop.io.MapWritable; //導入依賴的package包/類
private JobConf createReadJobConf() throws IOException {
    JobConf conf = HdpBootstrap.hadoopConfig();

    conf.setInputFormat(EsInputFormat.class);
    conf.setOutputFormat(PrintStreamOutputFormat.class);
    conf.setOutputKeyClass(Text.class);
    boolean type = random.nextBoolean();
    Class<?> mapType = (type ? MapWritable.class : LinkedMapWritable.class);
    conf.setOutputValueClass(MapWritable.class);
    HadoopCfgUtils.setGenericOptions(conf);
    conf.setNumReduceTasks(0);

    conf.set(ConfigurationOptions.ES_READ_METADATA, String.valueOf(random.nextBoolean()));
    conf.set(ConfigurationOptions.ES_READ_METADATA_VERSION, String.valueOf(true));
    conf.set(ConfigurationOptions.ES_OUTPUT_JSON, "true");

    FileInputFormat.setInputPaths(conf, new Path(TestUtils.gibberishDat(conf)));
    return conf;
}
 
開發者ID:xushjie1987,項目名稱:es-hadoop-v2.2.0,代碼行數:20,代碼來源:AbstractExtraMRTests.java

示例13: createJobConf

import org.apache.hadoop.io.MapWritable; //導入依賴的package包/類
private JobConf createJobConf() throws IOException {
    JobConf conf = HdpBootstrap.hadoopConfig();

    conf.setInputFormat(EsInputFormat.class);
    conf.setOutputFormat(PrintStreamOutputFormat.class);
    conf.setOutputKeyClass(Text.class);
    boolean type = random.nextBoolean();
    Class<?> mapType = (type ? MapWritable.class : LinkedMapWritable.class);
    conf.setOutputValueClass(mapType);
    HadoopCfgUtils.setGenericOptions(conf);
    conf.set(ConfigurationOptions.ES_QUERY, query);
    conf.setNumReduceTasks(0);

    conf.set(ConfigurationOptions.ES_READ_METADATA, String.valueOf(readMetadata));
    conf.set(ConfigurationOptions.ES_READ_METADATA_VERSION, String.valueOf(true));
    conf.set(ConfigurationOptions.ES_OUTPUT_JSON, String.valueOf(readAsJson));

    QueryTestParams.provisionQueries(conf);
    FileInputFormat.setInputPaths(conf, new Path(TestUtils.sampleArtistsDat()));

    HdpBootstrap.addProperties(conf, TestSettings.TESTING_PROPS, false);
    return conf;
}
 
開發者ID:xushjie1987,項目名稱:es-hadoop-v2.2.0,代碼行數:24,代碼來源:AbstractMROldApiSearchTest.java

示例14: next

import org.apache.hadoop.io.MapWritable; //導入依賴的package包/類
@Override
public boolean next(LongWritable keyHolder, MapWritable valueHolder)
		throws IOException {
	if (StringUtils.isBlank(facetMapping)) {
		SolrDocument doc = cursor.nextDocument();
		if (doc == null) {
			return false;
		}
		keyHolder.set(pos++);
		Object[] values = new Object[solrColumns.length];
		for (int i = 0; i < solrColumns.length; i++) {
			values[i] = doc.getFieldValue(solrColumns[i]);
		}
		setValueHolder(valueHolder, values);
	} else {
		FacetEntry facetEntry = cursor.nextFacetEntry();
		if (facetEntry == null) {
			return false;
		}
		keyHolder.set(pos++);
		setValueHolder(valueHolder, new Object[] { facetEntry.getValue(),
				facetEntry.getCount() });
	}
	return true;
}
 
開發者ID:vroyer,項目名稱:hive-solr-search,代碼行數:26,代碼來源:SolrReader.java

示例15: write

import org.apache.hadoop.io.MapWritable; //導入依賴的package包/類
@Override
public void write(Writable w) throws IOException {
	MapWritable map = (MapWritable) w;
	SolrInputDocument doc = new SolrInputDocument();
	for (final Map.Entry<Writable, Writable> entry : map.entrySet()) {
		String key = entry.getKey().toString();
		
		if (entry.getValue() instanceof TimestampWritable) {
			Timestamp t = ((TimestampWritable)entry.getValue()).getTimestamp();
			doc.setField(key, dateFormat.format( new Date(t.getTime()) ));
		} else if (entry.getValue() instanceof ShortWritable) {
		    doc.setField(key, ((ShortWritable)entry.getValue()).get());
		} else {
			doc.setField(key, entry.getValue().toString());
		}
		
	}
	log.debug("doc:"+doc.toString());
	table.save(doc);
}
 
開發者ID:vroyer,項目名稱:hive-solr-search,代碼行數:21,代碼來源:SolrWriter.java


注:本文中的org.apache.hadoop.io.MapWritable類示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。