本文整理汇总了Java中org.apache.hadoop.io.FloatWritable.get方法的典型用法代码示例。如果您正苦于以下问题:Java FloatWritable.get方法的具体用法?Java FloatWritable.get怎么用?Java FloatWritable.get使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.io.FloatWritable
的用法示例。
在下文中一共展示了FloatWritable.get方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: reduce
import org.apache.hadoop.io.FloatWritable; //导入方法依赖的package包/类
/**
* Flips and collects the url and numeric sort value.
*/
public void reduce(FloatWritable key, Iterator<Text> values,
OutputCollector<Text, FloatWritable> output, Reporter reporter)
throws IOException {
// take the negative of the negative to get original value, sometimes 0
// value are a little weird
float val = key.get();
FloatWritable number = new FloatWritable(val == 0 ? 0 : -val);
long numCollected = 0;
// collect all values, this time with the url as key
while (values.hasNext() && (numCollected < topn)) {
Text url = WritableUtils.clone(values.next(), conf);
output.collect(url, number);
numCollected++;
}
}
示例2: reduce
import org.apache.hadoop.io.FloatWritable; //导入方法依赖的package包/类
public void reduce(Text key, Iterable<FloatWritable> values, Context context)
throws IOException, InterruptedException {
float max=0;
for (FloatWritable val : values) {
if(val.get()>max){
max=val.get();
}
}
context.write(key, new FloatWritable(max));
}
示例3: reduce
import org.apache.hadoop.io.FloatWritable; //导入方法依赖的package包/类
public void reduce(Text key, Iterable<FloatWritable> values, Context context
) throws IOException, InterruptedException {
float starSum = 0; // initialize the sum for each business's ratings
int totalRatings = 0;
for (FloatWritable val : values) {
starSum += val.get();
totalRatings++;
}
float avgRating = starSum/totalRatings; // average rating of each business
reducersideMap.put(key.toString(), new Float(avgRating));
}
示例4: iterate
import org.apache.hadoop.io.FloatWritable; //导入方法依赖的package包/类
public boolean iterate(FloatWritable mean, FloatWritable covar) {
if (mean == null || covar == null) {
return true;
}
if (partial == null) {
this.partial = new PartialResult();
}
float covar_f = covar.get();
partial.sum_mean_div_covar += (mean.get() / covar_f);
partial.sum_inv_covar += (1.f / covar_f);
return true;
}
示例5: evaluate
import org.apache.hadoop.io.FloatWritable; //导入方法依赖的package包/类
@Nonnull
public DoubleWritable evaluate(@Nullable List<FloatWritable> Pu,
@Nullable List<FloatWritable> Qi, @Nullable DoubleWritable mu) throws HiveException {
final double muValue = (mu == null) ? 0.d : mu.get();
if (Pu == null || Qi == null) {
return new DoubleWritable(muValue);
}
final int PuSize = Pu.size();
final int QiSize = Qi.size();
// workaround for TD
if (PuSize == 0) {
return new DoubleWritable(muValue);
} else if (QiSize == 0) {
return new DoubleWritable(muValue);
}
if (QiSize != PuSize) {
throw new HiveException("|Pu| " + PuSize + " was not equal to |Qi| " + QiSize);
}
double ret = muValue;
for (int k = 0; k < PuSize; k++) {
FloatWritable Pu_k = Pu.get(k);
if (Pu_k == null) {
continue;
}
FloatWritable Qi_k = Qi.get(k);
if (Qi_k == null) {
continue;
}
ret += Pu_k.get() * Qi_k.get();
}
return new DoubleWritable(ret);
}
示例6: evaluate
import org.apache.hadoop.io.FloatWritable; //导入方法依赖的package包/类
@Nullable
public List<String> evaluate(@Nullable final List<FloatWritable> features,
@Nullable String biasName) {
if (features == null) {
return null;
}
final int size = features.size();
if (size == 0) {
return Collections.emptyList();
}
final StringBuilder buf = new StringBuilder(64);
final ArrayList<String> list = new ArrayList<String>(size);
for (int i = 0; i < size; i++) {
final FloatWritable o = features.get(i);
if (o != null) {
final String s;
final float v = o.get();
if (biasName != null) {
s = buf.append(biasName).append(':').append(v).toString();
} else {
s = buf.append(i).append(':').append(v).toString();
}
list.add(s);
StringUtils.clear(buf);
}
}
return list;
}
示例7: evaluate
import org.apache.hadoop.io.FloatWritable; //导入方法依赖的package包/类
public DoubleWritable evaluate(Map<IntWritable, FloatWritable> map, List<IntWritable> keys) {
double sum = 0d;
for (IntWritable k : keys) {
FloatWritable v = map.get(k);
if (v != null) {
sum += (double) v.get();
}
}
return val(sum);
}
示例8: reduce
import org.apache.hadoop.io.FloatWritable; //导入方法依赖的package包/类
public void reduce(Text key, Iterable<FloatWritable> values, Context context)
throws IOException, InterruptedException {
float max = 0;
for(FloatWritable value:values ){
if(max < value.get()){
max=value.get();
}
}
context.write(key, new FloatWritable(max));
}
示例9: next
import org.apache.hadoop.io.FloatWritable; //导入方法依赖的package包/类
public synchronized boolean next(FloatWritable key, NullWritable value)
throws IOException {
progress = key.get();
return true;
}
示例10: setFetchSchedule
import org.apache.hadoop.io.FloatWritable; //导入方法依赖的package包/类
@Override
public CrawlDatum setFetchSchedule(Text url, CrawlDatum datum,
long prevFetchTime, long prevModifiedTime, long fetchTime,
long modifiedTime, int state) {
super.setFetchSchedule(url, datum, prevFetchTime, prevModifiedTime,
fetchTime, modifiedTime, state);
float interval = datum.getFetchInterval();
long refTime = fetchTime;
// https://issues.apache.org/jira/browse/NUTCH-1430
interval = (interval == 0) ? defaultInterval : interval;
if (datum.getMetaData().containsKey(Nutch.WRITABLE_FIXED_INTERVAL_KEY)) {
// Is fetch interval preset in CrawlDatum MD? Then use preset interval
FloatWritable customIntervalWritable = (FloatWritable) (datum
.getMetaData().get(Nutch.WRITABLE_FIXED_INTERVAL_KEY));
interval = customIntervalWritable.get();
} else {
if (modifiedTime <= 0)
modifiedTime = fetchTime;
switch (state) {
case FetchSchedule.STATUS_MODIFIED:
interval *= (1.0f - DEC_RATE);
break;
case FetchSchedule.STATUS_NOTMODIFIED:
interval *= (1.0f + INC_RATE);
break;
case FetchSchedule.STATUS_UNKNOWN:
break;
}
if (SYNC_DELTA) {
// try to synchronize with the time of change
long delta = (fetchTime - modifiedTime) / 1000L;
if (delta > interval)
interval = delta;
refTime = fetchTime - Math.round(delta * SYNC_DELTA_RATE * 1000);
}
if (interval < MIN_INTERVAL) {
interval = MIN_INTERVAL;
} else if (interval > MAX_INTERVAL) {
interval = MAX_INTERVAL;
}
}
datum.setFetchInterval(interval);
datum.setFetchTime(refTime + Math.round(interval * 1000.0));
datum.setModifiedTime(modifiedTime);
return datum;
}