当前位置: 首页>>代码示例>>Java>>正文


Java FloatWritable.get方法代码示例

本文整理汇总了Java中org.apache.hadoop.io.FloatWritable.get方法的典型用法代码示例。如果您正苦于以下问题:Java FloatWritable.get方法的具体用法?Java FloatWritable.get怎么用?Java FloatWritable.get使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.io.FloatWritable的用法示例。


在下文中一共展示了FloatWritable.get方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: reduce

import org.apache.hadoop.io.FloatWritable; //导入方法依赖的package包/类
/**
 * Flips and collects the url and numeric sort value.
 */
public void reduce(FloatWritable key, Iterator<Text> values,
    OutputCollector<Text, FloatWritable> output, Reporter reporter)
    throws IOException {

  // take the negative of the negative to get original value, sometimes 0
  // value are a little weird
  float val = key.get();
  FloatWritable number = new FloatWritable(val == 0 ? 0 : -val);
  long numCollected = 0;

  // collect all values, this time with the url as key
  while (values.hasNext() && (numCollected < topn)) {
    Text url = WritableUtils.clone(values.next(), conf);
    output.collect(url, number);
    numCollected++;
  }
}
 
开发者ID:jorcox,项目名称:GeoCrawler,代码行数:21,代码来源:NodeDumper.java

示例2: reduce

import org.apache.hadoop.io.FloatWritable; //导入方法依赖的package包/类
public void reduce(Text key, Iterable<FloatWritable> values, Context context)
		throws IOException, InterruptedException {

	float max=0;
	for (FloatWritable val : values) {
		if(val.get()>max){
			max=val.get();
		}
		
	}
	context.write(key, new FloatWritable(max));
}
 
开发者ID:aadishgoel2013,项目名称:Hadoop-Codes,代码行数:13,代码来源:MaximumAverageReducer.java

示例3: reduce

import org.apache.hadoop.io.FloatWritable; //导入方法依赖的package包/类
public void reduce(Text key, Iterable<FloatWritable> values, Context context
		) throws IOException, InterruptedException {
	float starSum = 0; // initialize the sum for each business's ratings
	int totalRatings = 0;
	for (FloatWritable val : values) {
		starSum += val.get();
		totalRatings++;
	}

	float avgRating = starSum/totalRatings; // average rating of each business
	reducersideMap.put(key.toString(), new Float(avgRating));
}
 
开发者ID:BhargaviRavula,项目名称:Bigdata,代码行数:13,代码来源:Question3.java

示例4: iterate

import org.apache.hadoop.io.FloatWritable; //导入方法依赖的package包/类
public boolean iterate(FloatWritable mean, FloatWritable covar) {
    if (mean == null || covar == null) {
        return true;
    }
    if (partial == null) {
        this.partial = new PartialResult();
    }
    float covar_f = covar.get();
    partial.sum_mean_div_covar += (mean.get() / covar_f);
    partial.sum_inv_covar += (1.f / covar_f);
    return true;
}
 
开发者ID:apache,项目名称:incubator-hivemall,代码行数:13,代码来源:ArgminKLDistanceUDAF.java

示例5: evaluate

import org.apache.hadoop.io.FloatWritable; //导入方法依赖的package包/类
@Nonnull
public DoubleWritable evaluate(@Nullable List<FloatWritable> Pu,
        @Nullable List<FloatWritable> Qi, @Nullable DoubleWritable mu) throws HiveException {
    final double muValue = (mu == null) ? 0.d : mu.get();
    if (Pu == null || Qi == null) {
        return new DoubleWritable(muValue);
    }

    final int PuSize = Pu.size();
    final int QiSize = Qi.size();
    // workaround for TD
    if (PuSize == 0) {
        return new DoubleWritable(muValue);
    } else if (QiSize == 0) {
        return new DoubleWritable(muValue);
    }

    if (QiSize != PuSize) {
        throw new HiveException("|Pu| " + PuSize + " was not equal to |Qi| " + QiSize);
    }

    double ret = muValue;
    for (int k = 0; k < PuSize; k++) {
        FloatWritable Pu_k = Pu.get(k);
        if (Pu_k == null) {
            continue;
        }
        FloatWritable Qi_k = Qi.get(k);
        if (Qi_k == null) {
            continue;
        }
        ret += Pu_k.get() * Qi_k.get();
    }
    return new DoubleWritable(ret);
}
 
开发者ID:apache,项目名称:incubator-hivemall,代码行数:36,代码来源:MFPredictionUDF.java

示例6: evaluate

import org.apache.hadoop.io.FloatWritable; //导入方法依赖的package包/类
@Nullable
public List<String> evaluate(@Nullable final List<FloatWritable> features,
        @Nullable String biasName) {
    if (features == null) {
        return null;
    }
    final int size = features.size();
    if (size == 0) {
        return Collections.emptyList();
    }

    final StringBuilder buf = new StringBuilder(64);
    final ArrayList<String> list = new ArrayList<String>(size);
    for (int i = 0; i < size; i++) {
        final FloatWritable o = features.get(i);
        if (o != null) {
            final String s;
            final float v = o.get();
            if (biasName != null) {
                s = buf.append(biasName).append(':').append(v).toString();
            } else {
                s = buf.append(i).append(':').append(v).toString();
            }
            list.add(s);
            StringUtils.clear(buf);
        }
    }
    return list;
}
 
开发者ID:apache,项目名称:incubator-hivemall,代码行数:30,代码来源:ToSparseFeaturesUDF.java

示例7: evaluate

import org.apache.hadoop.io.FloatWritable; //导入方法依赖的package包/类
public DoubleWritable evaluate(Map<IntWritable, FloatWritable> map, List<IntWritable> keys) {
    double sum = 0d;
    for (IntWritable k : keys) {
        FloatWritable v = map.get(k);
        if (v != null) {
            sum += (double) v.get();
        }
    }
    return val(sum);
}
 
开发者ID:apache,项目名称:incubator-hivemall,代码行数:11,代码来源:MapGetSumUDF.java

示例8: reduce

import org.apache.hadoop.io.FloatWritable; //导入方法依赖的package包/类
public void reduce(Text key, Iterable<FloatWritable> values, Context context)
		throws IOException, InterruptedException {
	
	float max = 0;
	
	for(FloatWritable value:values ){

		if(max < value.get()){
			max=value.get();
			
		}
		
	}
	
	context.write(key, new FloatWritable(max));
	
}
 
开发者ID:aadishgoel2013,项目名称:Hadoop-Codes,代码行数:18,代码来源:MaxTempReducer.java

示例9: next

import org.apache.hadoop.io.FloatWritable; //导入方法依赖的package包/类
public synchronized boolean next(FloatWritable key, NullWritable value)
    throws IOException {
  progress = key.get();
  return true;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:6,代码来源:PipesNonJavaInputFormat.java

示例10: setFetchSchedule

import org.apache.hadoop.io.FloatWritable; //导入方法依赖的package包/类
@Override
public CrawlDatum setFetchSchedule(Text url, CrawlDatum datum,
    long prevFetchTime, long prevModifiedTime, long fetchTime,
    long modifiedTime, int state) {
  super.setFetchSchedule(url, datum, prevFetchTime, prevModifiedTime,
      fetchTime, modifiedTime, state);

  float interval = datum.getFetchInterval();
  long refTime = fetchTime;

  // https://issues.apache.org/jira/browse/NUTCH-1430
  interval = (interval == 0) ? defaultInterval : interval;

  if (datum.getMetaData().containsKey(Nutch.WRITABLE_FIXED_INTERVAL_KEY)) {
    // Is fetch interval preset in CrawlDatum MD? Then use preset interval
    FloatWritable customIntervalWritable = (FloatWritable) (datum
        .getMetaData().get(Nutch.WRITABLE_FIXED_INTERVAL_KEY));
    interval = customIntervalWritable.get();
  } else {
    if (modifiedTime <= 0)
      modifiedTime = fetchTime;
    switch (state) {
    case FetchSchedule.STATUS_MODIFIED:
      interval *= (1.0f - DEC_RATE);
      break;
    case FetchSchedule.STATUS_NOTMODIFIED:
      interval *= (1.0f + INC_RATE);
      break;
    case FetchSchedule.STATUS_UNKNOWN:
      break;
    }
    if (SYNC_DELTA) {
      // try to synchronize with the time of change
      long delta = (fetchTime - modifiedTime) / 1000L;
      if (delta > interval)
        interval = delta;
      refTime = fetchTime - Math.round(delta * SYNC_DELTA_RATE * 1000);
    }
    if (interval < MIN_INTERVAL) {
      interval = MIN_INTERVAL;
    } else if (interval > MAX_INTERVAL) {
      interval = MAX_INTERVAL;
    }
  }

  datum.setFetchInterval(interval);
  datum.setFetchTime(refTime + Math.round(interval * 1000.0));
  datum.setModifiedTime(modifiedTime);
  return datum;
}
 
开发者ID:jorcox,项目名称:GeoCrawler,代码行数:51,代码来源:AdaptiveFetchSchedule.java


注:本文中的org.apache.hadoop.io.FloatWritable.get方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。