本文整理汇总了Java中org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils.copyToStandardObject方法的典型用法代码示例。如果您正苦于以下问题:Java ObjectInspectorUtils.copyToStandardObject方法的具体用法?Java ObjectInspectorUtils.copyToStandardObject怎么用?Java ObjectInspectorUtils.copyToStandardObject使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils
的用法示例。
在下文中一共展示了ObjectInspectorUtils.copyToStandardObject方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: parseFeatures
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils; //导入方法依赖的package包/类
@Nullable
FeatureValue[] parseFeatures(@Nonnull final List<?> features) {
final int size = features.size();
if (size == 0) {
return null;
}
final ObjectInspector featureInspector = featureListOI.getListElementObjectInspector();
final FeatureValue[] featureVector = new FeatureValue[size];
for (int i = 0; i < size; i++) {
Object f = features.get(i);
if (f == null) {
continue;
}
final FeatureValue fv;
if (parseFeature) {
fv = FeatureValue.parse(f);
} else {
Object k = ObjectInspectorUtils.copyToStandardObject(f, featureInspector);
fv = new FeatureValue(k, 1.f);
}
featureVector[i] = fv;
}
return featureVector;
}
示例2: parseFeatures
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils; //导入方法依赖的package包/类
@Nullable
protected final FeatureValue[] parseFeatures(@Nonnull final List<?> features) {
final int size = features.size();
if (size == 0) {
return null;
}
final ObjectInspector featureInspector = featureListOI.getListElementObjectInspector();
final FeatureValue[] featureVector = new FeatureValue[size];
for (int i = 0; i < size; i++) {
Object f = features.get(i);
if (f == null) {
continue;
}
final FeatureValue fv;
if (parseFeature) {
fv = FeatureValue.parse(f);
} else {
Object k = ObjectInspectorUtils.copyToStandardObject(f, featureInspector);
fv = new FeatureValue(k, 1.f);
}
featureVector[i] = fv;
}
return featureVector;
}
示例3: evaluate
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils; //导入方法依赖的package包/类
@Override
public Object evaluate(DeferredObject[] arguments) throws HiveException {
ret.clear();
for (int i = 0; i < arguments.length; i++) {
final Object arrayObject = arguments[i].get();
if (arrayObject == null) {
continue;
}
final ListObjectInspector arrayOI = (ListObjectInspector) argumentOIs[i];
final int arraylength = arrayOI.getListLength(arrayObject);
for (int j = 0; j < arraylength; j++) {
Object rawObj = arrayOI.getListElement(arrayObject, j);
ObjectInspector elemOI = arrayOI.getListElementObjectInspector();
Object obj = ObjectInspectorUtils.copyToStandardObject(rawObj, elemOI);
ret.add(obj);
}
}
return ret;
}
示例4: iterate
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils; //导入方法依赖的package包/类
@Override
public void iterate(@SuppressWarnings("deprecation") AggregationBuffer agg,
Object[] parameters) throws HiveException {
assert (parameters.length == 3);
if (parameters[0] == null) {
return;
}
Object key = ObjectInspectorUtils.copyToStandardObject(parameters[0], inputKeyOI);
Object value = ObjectInspectorUtils.copyToStandardObject(parameters[1], inputValueOI);
int size = Math.abs(HiveUtils.getInt(parameters[2], sizeOI)); // size could be negative for tail-k
MapAggregationBuffer myagg = (MapAggregationBuffer) agg;
if (myagg.container == null) {
initBuffer(myagg, size);
}
myagg.container.put(key, value);
}
示例5: merge
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils; //导入方法依赖的package包/类
@Override
public void merge(@SuppressWarnings("deprecation") AggregationBuffer agg, Object partial)
throws HiveException {
if (partial == null) {
return;
}
MapAggregationBuffer myagg = (MapAggregationBuffer) agg;
Object partialMapObj = internalMergeOI.getStructFieldData(partial, partialMapField);
Map<?, ?> partialMap = partialMapOI.getMap(HiveUtils.castLazyBinaryObject(partialMapObj));
if (partialMap == null) {
return;
}
if (myagg.container == null) {
Object sizeObj = internalMergeOI.getStructFieldData(partial, sizeField);
int size = HiveUtils.getInt(sizeObj, sizeOI);
initBuffer(myagg, size);
}
for (Map.Entry<?, ?> e : partialMap.entrySet()) {
Object key = ObjectInspectorUtils.copyToStandardObject(e.getKey(), inputKeyOI);
Object value = ObjectInspectorUtils.copyToStandardObject(e.getValue(), inputValueOI);
myagg.container.put(key, value);
}
}
示例6: iterate
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils; //导入方法依赖的package包/类
@Override
public void iterate(@SuppressWarnings("deprecation") AggregationBuffer agg,
Object[] parameters) throws HiveException {
if (parameters[0] == null) {
return;
}
Object value = ObjectInspectorUtils.copyToStandardObject(parameters[0], valueOI);
final Object key;
if (sortByKey) {
if (parameters[1] == null) {
return;
}
key = ObjectInspectorUtils.copyToStandardObject(parameters[1], keyOI);
} else {
// set value to key
key = ObjectInspectorUtils.copyToStandardObject(parameters[0], valueOI);
}
TupleWithKey tuple = new TupleWithKey(key, value);
QueueAggregationBuffer myagg = (QueueAggregationBuffer) agg;
myagg.iterate(tuple);
}
示例7: parseFeatures
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils; //导入方法依赖的package包/类
@Nonnull
protected final List<FeatureValue> parseFeatures(@Nonnull final List<?> features,
@Nonnull final ObjectInspector featureInspector, final boolean parseFeature) {
final int numFeatures = features.size();
if (numFeatures == 0) {
return Collections.emptyList();
}
final List<FeatureValue> list = new ArrayList<FeatureValue>(numFeatures);
for (Object f : features) {
if (f == null) {
continue;
}
final FeatureValue fv;
if (parseFeature) {
fv = FeatureValue.parse(f);
} else {
Object o = ObjectInspectorUtils.copyToStandardObject(f, featureInspector,
ObjectInspectorCopyOption.WRITABLE);
Writable k = WritableUtils.toWritable(o);
fv = new FeatureValue(k, 1.f);
}
list.add(fv);
}
return list;
}
示例8: iterate
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils; //导入方法依赖的package包/类
@Override
public void iterate(AggregationBuffer aggregate, Object[] parameters) throws HiveException {
FunnelAggregateBuffer funnelAggregate = (FunnelAggregateBuffer) aggregate;
// Add the funnel steps if not already stored
if (funnelAggregate.funnelSteps.isEmpty()) {
// Funnel steps start at index 2
addFunnelSteps(funnelAggregate, Arrays.copyOfRange(parameters, 2, parameters.length));
}
// Get the action_column value and add it (if it matches a funnel)
Object action = parameters[0];
Object timestamp = parameters[1];
if (action != null && timestamp != null) {
// Get the action value
Object actionValue = ObjectInspectorUtils.copyToStandardObject(action, actionObjectInspector);
// Get the timestamp value
Object timestampValue = ObjectInspectorUtils.copyToStandardObject(timestamp, timestampObjectInspector);
// If the action is not null and it is one of the funnels we are looking for, keep it
if (actionValue != null && timestampValue != null && funnelAggregate.funnelSet.contains(actionValue)) {
funnelAggregate.actions.add(actionValue);
funnelAggregate.timestamps.add(timestampValue);
}
}
}
示例9: parseFeatures
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils; //导入方法依赖的package包/类
@Nullable
public final FeatureValue[] parseFeatures(@Nonnull final List<?> features) {
final int size = features.size();
if (size == 0) {
return null;
}
final ObjectInspector featureInspector = featureListOI.getListElementObjectInspector();
final FeatureValue[] featureVector = new FeatureValue[size];
for (int i = 0; i < size; i++) {
Object f = features.get(i);
if (f == null) {
continue;
}
final FeatureValue fv;
if (featureType == FeatureType.STRING) {
String s = f.toString();
fv = FeatureValue.parseFeatureAsString(s);
} else {
Object k = ObjectInspectorUtils.copyToStandardObject(f, featureInspector,
ObjectInspectorCopyOption.JAVA); // should be Integer or Long
fv = new FeatureValue(k, 1.f);
}
featureVector[i] = fv;
}
return featureVector;
}
示例10: merge
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils; //导入方法依赖的package包/类
@Override
public void merge(@SuppressWarnings("deprecation") AggregationBuffer agg, Object partial)
throws HiveException {
if (partial == null) {
return;
}
final MaxAgg maxagg = (MaxAgg) agg;
final List<Object> otherObjects;
if (partial instanceof Object[]) {
otherObjects = Arrays.asList((Object[]) partial);
} else if (partial instanceof LazyBinaryStruct) {
otherObjects = ((LazyBinaryStruct) partial).getFieldsAsList();
} else if (inputStructOI != null) {
otherObjects = inputStructOI.getStructFieldsDataAsList(partial);
} else {
throw new HiveException("Invalid type: " + partial.getClass().getName());
}
boolean isMax = false;
if (maxagg.objects == null) {
isMax = true;
} else {
int cmp = ObjectInspectorUtils.compare(maxagg.objects[0], outputOIs[0],
otherObjects.get(0), inputOIs[0]);
if (cmp < 0) {
isMax = true;
}
}
if (isMax) {
int length = otherObjects.size();
maxagg.objects = new Object[length];
for (int i = 0; i < length; i++) {
maxagg.objects[i] = ObjectInspectorUtils.copyToStandardObject(
otherObjects.get(i), inputOIs[i]);
}
}
}
示例11: copyToWritable
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils; //导入方法依赖的package包/类
@Nonnull
public static Writable copyToWritable(@Nonnull final Object obj,
@CheckForNull final PrimitiveObjectInspector oi) {
Preconditions.checkNotNull(oi);
Object ret = ObjectInspectorUtils.copyToStandardObject(obj, oi,
ObjectInspectorCopyOption.WRITABLE);
return (Writable) ret;
}
示例12: process
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils; //导入方法依赖的package包/类
@Override
public void process(Object[] args) throws HiveException {
List<?> features = (List<?>) featureListOI.getList(args[0]);
FeatureValue[] featureVector = parseFeatures(features);
if (featureVector == null) {
return;
}
Object label = ObjectInspectorUtils.copyToStandardObject(args[1], labelInputOI);
if (label == null) {
throw new UDFArgumentException("label value must not be NULL");
}
count++;
train(featureVector, label);
}
示例13: process
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils; //导入方法依赖的package包/类
@Override
public void process(Object[] args) throws HiveException {
final int argStartIndex = hasOption ? 2 : 1;
final Object[] row = new Object[args.length - argStartIndex];
for (int i = argStartIndex; i < args.length; i++) {
Object arg = args[i];
ObjectInspector argOI = argOIs[i];
row[i - argStartIndex] = ObjectInspectorUtils.copyToStandardObject(arg, argOI,
ObjectInspectorCopyOption.DEFAULT);
}
amplifier.add(row);
}
示例14: putIntoMap
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils; //导入方法依赖的package包/类
protected void putIntoMap(Object key, Object value, MapAggregationBuffer myagg) {
Object pKeyCopy = ObjectInspectorUtils.copyToStandardObject(key, this.inputKeyOI);
Object pValueCopy = ObjectInspectorUtils.copyToStandardObject(value, this.inputValueOI);
myagg.container.put(pKeyCopy, pValueCopy);
}
示例15: process
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils; //导入方法依赖的package包/类
@Override
public void process(Object[] args) throws HiveException {
final Object arg1 = args[1];
if (isSameGroup(arg1) == false) {
Object group = ObjectInspectorUtils.copyToStandardObject(arg1, argOIs[1],
ObjectInspectorCopyOption.DEFAULT); // arg1 and group may be null
this._previousGroup = group;
if (_queue != null) {
drainQueue();
}
if (_constantK == false) {
final int k = PrimitiveObjectInspectorUtils.getInt(args[0], kOI);
if (k == 0) {
return;
}
if (k != _prevK) {
this._queue = getQueue(k);
this._prevK = k;
}
}
}
final double key = PrimitiveObjectInspectorUtils.getDouble(args[2], cmpKeyOI);
final Object[] row;
TupleWithKey tuple = this._tuple;
if (_tuple == null) {
row = new Object[args.length - 1];
tuple = new TupleWithKey(key, row);
this._tuple = tuple;
} else {
row = tuple.getRow();
tuple.setKey(key);
}
for (int i = 3; i < args.length; i++) {
Object arg = args[i];
ObjectInspector argOI = argOIs[i];
row[i - 1] = ObjectInspectorUtils.copyToStandardObject(arg, argOI,
ObjectInspectorCopyOption.DEFAULT);
}
if (_queue.offer(tuple)) {
this._tuple = null;
}
}