当前位置: 首页>>代码示例>>Java>>正文


Java ObjectWritable.get方法代码示例

本文整理汇总了Java中org.apache.hadoop.io.ObjectWritable.get方法的典型用法代码示例。如果您正苦于以下问题:Java ObjectWritable.get方法的具体用法?Java ObjectWritable.get怎么用?Java ObjectWritable.get使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.io.ObjectWritable的用法示例。


在下文中一共展示了ObjectWritable.get方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: invoke

import org.apache.hadoop.io.ObjectWritable; //导入方法依赖的package包/类
@Override
public Object invoke(Object proxy, Method method, Object[] args)
	throws Throwable {
	final boolean logDebug = LOG.isDebugEnabled();
	long startTime = 0;
	if (logDebug) {
	    startTime = System.currentTimeMillis();
	}

	 ObjectWritable value = (ObjectWritable)
			 //
	    client.call(new Invocation(method, args), remoteId);
	 if (logDebug) {
	    long callTime = System.currentTimeMillis() - startTime;
	    LOG.debug("Call: " + method.getName() + " " + callTime);
	  }
	  return value.get();
	  }
 
开发者ID:spafka,项目名称:spark_deep,代码行数:19,代码来源:RPC.java

示例2: invoke

import org.apache.hadoop.io.ObjectWritable; //导入方法依赖的package包/类
public Object invoke(Object proxy, Method method, Object[] args)
  throws Throwable {
  final boolean logDebug = LOG.isDebugEnabled();
  long startTime = 0;
  if (logDebug) {
    startTime = System.currentTimeMillis();
  }

  ObjectWritable value = (ObjectWritable)
    client.call(new Invocation(method, args), remoteId);
  if (logDebug) {
    long callTime = System.currentTimeMillis() - startTime;
    LOG.debug("Call: " + method.getName() + " " + callTime);
  }
  return value.get();
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:17,代码来源:RPC.java

示例3: reduce

import org.apache.hadoop.io.ObjectWritable; //导入方法依赖的package包/类
/**
 * Inverts outlinks to inlinks while attaching node information to the
 * outlink.
 */
public void reduce(Text key, Iterator<ObjectWritable> values,
    OutputCollector<Text, LinkNode> output, Reporter reporter)
    throws IOException {

  String fromUrl = key.toString();
  List<LinkDatum> outlinks = new ArrayList<LinkDatum>();
  Node node = null;
  
  // loop through all values aggregating outlinks, saving node
  while (values.hasNext()) {
    ObjectWritable write = values.next();
    Object obj = write.get();
    if (obj instanceof Node) {
      node = (Node) obj;
    } else if (obj instanceof LinkDatum) {
      outlinks.add(WritableUtils.clone((LinkDatum) obj, conf));
    }
  }

  // only collect if there are outlinks
  int numOutlinks = node.getNumOutlinks();
  if (numOutlinks > 0) {
    for (int i = 0; i < outlinks.size(); i++) {
      LinkDatum outlink = outlinks.get(i);
      String toUrl = outlink.getUrl();

      // collect the outlink as an inlink with the node
      output.collect(new Text(toUrl), new LinkNode(fromUrl, node));
    }
  }
}
 
开发者ID:jorcox,项目名称:GeoCrawler,代码行数:36,代码来源:LinkDumper.java

示例4: reduce

import org.apache.hadoop.io.ObjectWritable; //导入方法依赖的package包/类
/**
 * Takes any node that has inlinks and sets up a route for all of its
 * outlinks. These routes will then be followed to a maximum depth inside of
 * the Looper job.
 */
public void reduce(Text key, Iterator<ObjectWritable> values,
  OutputCollector<Text, Route> output, Reporter reporter)
  throws IOException {

  String url = key.toString();
  Node node = null;
  List<LinkDatum> outlinkList = new ArrayList<LinkDatum>();

  // collect all outlinks and assign node
  while (values.hasNext()) {
    ObjectWritable objWrite = values.next();
    Object obj = objWrite.get();
    if (obj instanceof LinkDatum) {
      outlinkList.add((LinkDatum)obj);
    }
    else if (obj instanceof Node) {
      node = (Node)obj;
    }
  }

  // has to have inlinks otherwise cycle not possible
  if (node != null) {

    int numInlinks = node.getNumInlinks();
    if (numInlinks > 0) {

      // initialize and collect a route for every outlink
      for (LinkDatum datum : outlinkList) {
        String outlinkUrl = datum.getUrl();
        Route route = new Route();
        route.setFound(false);
        route.setLookingFor(url);
        route.setOutlinkUrl(outlinkUrl);
        output.collect(new Text(outlinkUrl), route);
      }
    }
  }
}
 
开发者ID:yahoo,项目名称:anthelion,代码行数:44,代码来源:Loops.java

示例5: fromWritable

import org.apache.hadoop.io.ObjectWritable; //导入方法依赖的package包/类
public Object fromWritable(
		final ByteArrayId adapterID,
		final ObjectWritable writable ) {
	final Object innerObj = writable.get();
	return (innerObj instanceof Writable) ? getHadoopWritableSerializerForAdapter(
			adapterID).fromWritable(
			(Writable) innerObj) : innerObj;
}
 
开发者ID:locationtech,项目名称:geowave,代码行数:9,代码来源:HadoopWritableSerializationTool.java

示例6: reduce

import org.apache.hadoop.io.ObjectWritable; //导入方法依赖的package包/类
/**
 * Inverts outlinks to inlinks, attaches current score for the outlink from
 * the NodeDb of the WebGraph.
 */
public void reduce(Text key, Iterator<ObjectWritable> values,
    OutputCollector<Text, LinkDatum> output, Reporter reporter)
    throws IOException {

  String fromUrl = key.toString();
  List<LinkDatum> outlinks = new ArrayList<LinkDatum>();
  Node node = null;

  // aggregate outlinks, assign other values
  while (values.hasNext()) {
    ObjectWritable write = values.next();
    Object obj = write.get();
    if (obj instanceof Node) {
      node = (Node) obj;
    } else if (obj instanceof LinkDatum) {
      outlinks.add(WritableUtils.clone((LinkDatum) obj, conf));
    }
  }

  // get the number of outlinks and the current inlink and outlink scores
  // from the node of the url
  int numOutlinks = node.getNumOutlinks();
  float inlinkScore = node.getInlinkScore();
  float outlinkScore = node.getOutlinkScore();
  LOG.debug(fromUrl + ": num outlinks " + numOutlinks);

  // can't invert if no outlinks
  if (numOutlinks > 0) {
    for (int i = 0; i < outlinks.size(); i++) {
      LinkDatum outlink = outlinks.get(i);
      String toUrl = outlink.getUrl();

      outlink.setUrl(fromUrl);
      outlink.setScore(outlinkScore);

      // collect the inverted outlink
      output.collect(new Text(toUrl), outlink);
      LOG.debug(toUrl + ": inverting inlink from " + fromUrl
          + " origscore: " + inlinkScore + " numOutlinks: " + numOutlinks
          + " inlinkscore: " + outlinkScore);
    }
  }
}
 
开发者ID:jorcox,项目名称:GeoCrawler,代码行数:48,代码来源:LinkRank.java

示例7: readObject

import org.apache.hadoop.io.ObjectWritable; //导入方法依赖的package包/类
private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException {
  ObjectWritable ow = new ObjectWritable();
  ow.setConf(new Configuration(false));
  ow.readFields(in);
  this.inputSplit = (InputSplit) ow.get();
}
 
开发者ID:apache,项目名称:beam,代码行数:7,代码来源:HadoopInputFormatIO.java

示例8: reduce

import org.apache.hadoop.io.ObjectWritable; //导入方法依赖的package包/类
/**
 * Creates new CrawlDatum objects with the updated score from the NodeDb or
 * with a cleared score.
 */
public void reduce(Text key, Iterator<ObjectWritable> values,
  OutputCollector<Text, CrawlDatum> output, Reporter reporter)
  throws IOException {

  String url = key.toString();
  Node node = null;
  CrawlDatum datum = null;

  // set the node and the crawl datum, should be one of each unless no node
  // for url in the crawldb
  while (values.hasNext()) {
    ObjectWritable next = values.next();
    Object value = next.get();
    if (value instanceof Node) {
      node = (Node)value;
    }
    else if (value instanceof CrawlDatum) {
      datum = (CrawlDatum)value;
    }
  }

  // datum should never be null, could happen if somehow the url was 
  // normalized or changed after being pulled from the crawldb
  if (datum != null) {

    if (node != null) {
      
      // set the inlink score in the nodedb
      float inlinkScore = node.getInlinkScore();
      datum.setScore(inlinkScore);
      LOG.debug(url + ": setting to score " + inlinkScore);
    }
    else {
      
      // clear out the score in the crawldb
      datum.setScore(clearScore);
      LOG.debug(url + ": setting to clear score of " + clearScore);
    }

    output.collect(key, datum);
  }
  else {
    LOG.debug(url + ": no datum");
  }
}
 
开发者ID:yahoo,项目名称:anthelion,代码行数:50,代码来源:ScoreUpdater.java

示例9: reduce

import org.apache.hadoop.io.ObjectWritable; //导入方法依赖的package包/类
/**
 * Inverts outlinks to inlinks, attaches current score for the outlink from
 * the NodeDb of the WebGraph and removes any outlink that is contained
 * within the loopset.
 */
public void reduce(Text key, Iterator<ObjectWritable> values,
  OutputCollector<Text, LinkDatum> output, Reporter reporter)
  throws IOException {

  String fromUrl = key.toString();
  List<LinkDatum> outlinks = new ArrayList<LinkDatum>();
  Node node = null;
  LoopSet loops = null;

  // aggregate outlinks, assign other values
  while (values.hasNext()) {
    ObjectWritable write = values.next();
    Object obj = write.get();
    if (obj instanceof Node) {
      node = (Node)obj;
    }
    else if (obj instanceof LinkDatum) {
      outlinks.add((LinkDatum)WritableUtils.clone((LinkDatum)obj, conf));
    }
    else if (obj instanceof LoopSet) {
      loops = (LoopSet)obj;
    }
  }

  // Check for the possibility of a LoopSet object without Node and LinkDatum objects. This can happen
  // with webgraphs that receive deletes (e.g. link.delete.gone and/or URL filters or normalizers) but
  // without an updated Loops database.
  // See: https://issues.apache.org/jira/browse/NUTCH-1299
  if (node == null && loops != null) {
    // Nothing to do
    LOG.warn("LoopSet without Node object received for " + key.toString() + " . You should either not use Loops as input of the LinkRank program or rerun the Loops program over the WebGraph.");
    return;
  }

  // get the number of outlinks and the current inlink and outlink scores
  // from the node of the url
  int numOutlinks = node.getNumOutlinks();
  float inlinkScore = node.getInlinkScore();
  float outlinkScore = node.getOutlinkScore();
  LOG.debug(fromUrl + ": num outlinks " + numOutlinks);

  // can't invert if no outlinks
  if (numOutlinks > 0) {

    Set<String> loopSet = (loops != null) ? loops.getLoopSet() : null;
    for (int i = 0; i < outlinks.size(); i++) {
      LinkDatum outlink = outlinks.get(i);
      String toUrl = outlink.getUrl();

      // remove any url that is contained in the loopset
      if (loopSet != null && loopSet.contains(toUrl)) {
        LOG.debug(fromUrl + ": Skipping inverting inlink from loop "
          + toUrl);
        continue;
      }
      outlink.setUrl(fromUrl);
      outlink.setScore(outlinkScore);

      // collect the inverted outlink
      output.collect(new Text(toUrl), outlink);
      LOG.debug(toUrl + ": inverting inlink from " + fromUrl
        + " origscore: " + inlinkScore + " numOutlinks: " + numOutlinks
        + " inlinkscore: " + outlinkScore);
    }
  }
}
 
开发者ID:yahoo,项目名称:anthelion,代码行数:72,代码来源:LinkRank.java

示例10: reduce

import org.apache.hadoop.io.ObjectWritable; //导入方法依赖的package包/类
/**
 * Inverts outlinks to inlinks while attaching node information to the 
 * outlink.
 */
public void reduce(Text key, Iterator<ObjectWritable> values,
  OutputCollector<Text, LinkNode> output, Reporter reporter)
  throws IOException {

  String fromUrl = key.toString();
  List<LinkDatum> outlinks = new ArrayList<LinkDatum>();
  Node node = null;
  LoopSet loops = null;

  // loop through all values aggregating outlinks, saving node and loopset
  while (values.hasNext()) {
    ObjectWritable write = values.next();
    Object obj = write.get();
    if (obj instanceof Node) {
      node = (Node)obj;
    }
    else if (obj instanceof LinkDatum) {
      outlinks.add((LinkDatum)WritableUtils.clone((LinkDatum)obj, conf));
    }
    else if (obj instanceof LoopSet) {
      loops = (LoopSet)obj;
    }
  }

  // only collect if there are outlinks
  int numOutlinks = node.getNumOutlinks();
  if (numOutlinks > 0) {

    Set<String> loopSet = (loops != null) ? loops.getLoopSet() : null;
    for (int i = 0; i < outlinks.size(); i++) {
      LinkDatum outlink = outlinks.get(i);
      String toUrl = outlink.getUrl();
      
      // remove any url that is in the loopset, same as LinkRank
      if (loopSet != null && loopSet.contains(toUrl)) {
        continue;
      }
      
      // collect the outlink as an inlink with the node 
      output.collect(new Text(toUrl), new LinkNode(fromUrl, node));
    }
  }
}
 
开发者ID:yahoo,项目名称:anthelion,代码行数:48,代码来源:LinkDumper.java


注:本文中的org.apache.hadoop.io.ObjectWritable.get方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。