本文整理汇总了Java中org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator类的典型用法代码示例。如果您正苦于以下问题:Java GenericUDAFEvaluator类的具体用法?Java GenericUDAFEvaluator怎么用?Java GenericUDAFEvaluator使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
GenericUDAFEvaluator类属于org.apache.hadoop.hive.ql.udf.generic包,在下文中一共展示了GenericUDAFEvaluator类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getEvaluator
import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator; //导入依赖的package包/类
@Override
public GenericUDAFEvaluator getEvaluator(@Nonnull TypeInfo[] typeInfo) throws SemanticException {
if (typeInfo.length != 2 && typeInfo.length != 3) {
throw new UDFArgumentTypeException(typeInfo.length - 1,
"_FUNC_ takes two or three arguments");
}
ListTypeInfo arg1type = HiveUtils.asListTypeInfo(typeInfo[0]);
if (!HiveUtils.isPrimitiveTypeInfo(arg1type.getListElementTypeInfo())) {
throw new UDFArgumentTypeException(0,
"The first argument `array rankItems` is invalid form: " + typeInfo[0]);
}
ListTypeInfo arg2type = HiveUtils.asListTypeInfo(typeInfo[1]);
if (!HiveUtils.isPrimitiveTypeInfo(arg2type.getListElementTypeInfo())) {
throw new UDFArgumentTypeException(1,
"The second argument `array correctItems` is invalid form: " + typeInfo[1]);
}
return new HitRateUDAF.Evaluator();
}
示例2: getEvaluator
import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator; //导入依赖的package包/类
@Override
public GenericUDAFEvaluator getEvaluator(@Nonnull TypeInfo[] typeInfo) throws SemanticException {
if (typeInfo.length != 2 && typeInfo.length != 3) {
throw new UDFArgumentTypeException(typeInfo.length - 1,
"_FUNC_ takes two or three arguments");
}
ListTypeInfo arg1type = HiveUtils.asListTypeInfo(typeInfo[0]);
if (!HiveUtils.isPrimitiveTypeInfo(arg1type.getListElementTypeInfo())) {
throw new UDFArgumentTypeException(0,
"The first argument `array rankItems` is invalid form: " + typeInfo[0]);
}
ListTypeInfo arg2type = HiveUtils.asListTypeInfo(typeInfo[1]);
if (!HiveUtils.isPrimitiveTypeInfo(arg2type.getListElementTypeInfo())) {
throw new UDFArgumentTypeException(1,
"The second argument `array correctItems` is invalid form: " + typeInfo[1]);
}
return new Evaluator();
}
示例3: getEvaluator
import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator; //导入依赖的package包/类
@Override
public GenericUDAFEvaluator getEvaluator(@Nonnull TypeInfo[] typeInfo) throws SemanticException {
if (typeInfo.length != 2 && typeInfo.length != 3) {
throw new UDFArgumentTypeException(typeInfo.length - 1,
"_FUNC_ takes two or three arguments");
}
if (HiveUtils.isNumberTypeInfo(typeInfo[0]) && HiveUtils.isIntegerTypeInfo(typeInfo[1])) {
return new ClassificationEvaluator();
} else {
ListTypeInfo arg1type = HiveUtils.asListTypeInfo(typeInfo[0]);
if (!HiveUtils.isPrimitiveTypeInfo(arg1type.getListElementTypeInfo())) {
throw new UDFArgumentTypeException(0,
"The first argument `array rankItems` is invalid form: " + typeInfo[0]);
}
ListTypeInfo arg2type = HiveUtils.asListTypeInfo(typeInfo[1]);
if (!HiveUtils.isPrimitiveTypeInfo(arg2type.getListElementTypeInfo())) {
throw new UDFArgumentTypeException(1,
"The second argument `array correctItems` is invalid form: " + typeInfo[1]);
}
return new RankingEvaluator();
}
}
示例4: getEvaluator
import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator; //导入依赖的package包/类
@Override
public GenericUDAFEvaluator getEvaluator(@Nonnull TypeInfo[] typeInfo) throws SemanticException {
if (typeInfo.length != 2 && typeInfo.length != 3) {
throw new UDFArgumentTypeException(typeInfo.length - 1,
"_FUNC_ takes two or three arguments");
}
ListTypeInfo arg1type = HiveUtils.asListTypeInfo(typeInfo[0]);
if (!HiveUtils.isPrimitiveTypeInfo(arg1type.getListElementTypeInfo())
&& !HiveUtils.isStructTypeInfo(arg1type.getListElementTypeInfo())) {
throw new UDFArgumentTypeException(0,
"The first argument `array rankItems` is invalid form: " + typeInfo[0]);
}
ListTypeInfo arg2type = HiveUtils.asListTypeInfo(typeInfo[1]);
if (!HiveUtils.isPrimitiveTypeInfo(arg2type.getListElementTypeInfo())) {
throw new UDFArgumentTypeException(1,
"The second argument `array correctItems` is invalid form: " + typeInfo[1]);
}
return new Evaluator();
}
示例5: getEvaluator
import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator; //导入依赖的package包/类
@Override
public GenericUDAFEvaluator getEvaluator(GenericUDAFParameterInfo info)
throws SemanticException {
final ObjectInspector[] OIs = info.getParameterObjectInspectors();
if (OIs.length != 2) {
throw new UDFArgumentLengthException("Specify two arguments: " + OIs.length);
}
if (!HiveUtils.isNumberListOI(OIs[0])) {
throw new UDFArgumentTypeException(0,
"Only array<number> type argument is acceptable but " + OIs[0].getTypeName()
+ " was passed as `features`");
}
if (!HiveUtils.isListOI(OIs[1])
|| !HiveUtils.isIntegerOI(((ListObjectInspector) OIs[1]).getListElementObjectInspector())) {
throw new UDFArgumentTypeException(1,
"Only array<int> type argument is acceptable but " + OIs[1].getTypeName()
+ " was passed as `labels`");
}
return new SignalNoiseRatioUDAFEvaluator();
}
示例6: getEvaluator
import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator; //导入依赖的package包/类
@Override
public GenericUDAFEvaluator getEvaluator(@Nonnull TypeInfo[] argTypes) throws SemanticException {
final int numFeatures = argTypes.length;
if (numFeatures == 0) {
throw new UDFArgumentException("_FUNC_ requires at least 1 argument");
}
for (int i = 0; i < numFeatures; i++) {
if (argTypes[i] == null) {
throw new UDFArgumentTypeException(i,
"Null type is found. Only primitive type arguments are accepted.");
}
if (argTypes[i].getCategory() != ObjectInspector.Category.PRIMITIVE) {
throw new UDFArgumentTypeException(i,
"Only primitive type arguments are accepted but " + argTypes[i].getTypeName()
+ " was passed as parameter 1.");
}
}
return new GenericUDAFOnehotEncodingEvaluator();
}
示例7: getEvaluator
import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator; //导入依赖的package包/类
@Override
public GenericUDAFEvaluator getEvaluator(GenericUDAFParameterInfo info)
throws SemanticException {
ObjectInspector[] OIs = info.getParameterObjectInspectors();
if (OIs.length != 2) {
throw new UDFArgumentLengthException("Specify two arguments.");
}
if (!HiveUtils.isNumberListOI(OIs[0])) {
throw new UDFArgumentTypeException(0,
"Only array<number> type argument is acceptable but " + OIs[0].getTypeName()
+ " was passed as `matrix0_row`");
}
if (!HiveUtils.isNumberListOI(OIs[1])) {
throw new UDFArgumentTypeException(1,
"Only array<number> type argument is acceptable but " + OIs[1].getTypeName()
+ " was passed as `matrix1_row`");
}
return new TransposeAndDotUDAFEvaluator();
}
示例8: testMidAUC
import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator; //导入依赖的package包/类
@Test
public void testMidAUC() throws Exception {
final double[] scores = new double[] {0.8, 0.7, 0.5, 0.3, 0.2};
// if TP and FP appear alternately, AUC=0.5
final int[] labels1 = new int[] {1, 0, 1, 0, 1};
evaluator.init(GenericUDAFEvaluator.Mode.PARTIAL1, inputOIs);
evaluator.reset(agg);
for (int i = 0; i < scores.length; i++) {
evaluator.iterate(agg, new Object[] {scores[i], labels1[i]});
}
Assert.assertEquals(0.5, agg.get(), 1e-5);
final int[] labels2 = new int[] {0, 1, 0, 1, 0};
evaluator.init(GenericUDAFEvaluator.Mode.PARTIAL1, inputOIs);
evaluator.reset(agg);
for (int i = 0; i < scores.length; i++) {
evaluator.iterate(agg, new Object[] {scores[i], labels2[i]});
}
Assert.assertEquals(0.5, agg.get(), 1e-5);
}
示例9: test100
import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator; //导入依赖的package包/类
@Test
public void test100() throws Exception {
final double[] scores = new double[] {0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.8, 0.8,
0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.7, 0.7, 0.7, 0.7,
0.7, 0.7, 0.7, 0.7, 0.7, 0.7, 0.7, 0.7, 0.7, 0.7, 0.7, 0.7, 0.7, 0.6, 0.6, 0.6,
0.6, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.4, 0.4, 0.4, 0.4,
0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3,
0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.1,
0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1};
final int[] labels = new int[] {1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0,
0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1,
1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1,
1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0};
evaluator.init(GenericUDAFEvaluator.Mode.PARTIAL1, inputOIs);
evaluator.reset(agg);
for (int i = 0; i < scores.length; i++) {
evaluator.iterate(agg, new Object[] {scores[i], labels[i]});
}
// should equal to scikit-learn's result
Assert.assertEquals(0.567226890756, agg.get(), 1e-5);
}
示例10: testBinaryMultiSamplesAverageBinary
import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator; //导入依赖的package包/类
@Test
public void testBinaryMultiSamplesAverageBinary() throws Exception {
final int[] actual = {0, 1, 0, 0, 0, 1, 0, 0};
final int[] predicted = {1, 0, 0, 1, 0, 1, 0, 1};
double beta = 1.;
String average = "binary";
binarySetUp(actual[0], predicted[0], beta, average);
evaluator.init(GenericUDAFEvaluator.Mode.PARTIAL1, inputOIs);
evaluator.reset(agg);
for (int i = 0; i < actual.length; i++) {
evaluator.iterate(agg, new Object[] {actual[i], predicted[i]});
}
// should equal to turi's result
// https://turi.com/learn/userguide/evaluation/classification.html#fscores-f1-fbeta-
Assert.assertEquals(0.3333d, agg.get(), 1e-4);
}
示例11: testBinaryMultiSamplesAverageMacro
import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator; //导入依赖的package包/类
@Test(expected = HiveException.class)
public void testBinaryMultiSamplesAverageMacro() throws Exception {
final int[] actual = {0, 1, 0, 0, 0, 1, 0, 0};
final int[] predicted = {1, 0, 0, 1, 0, 1, 0, 1};
double beta = 1.;
String average = "macro";
binarySetUp(actual[0], predicted[0], beta, average);
evaluator.init(GenericUDAFEvaluator.Mode.PARTIAL1, inputOIs);
evaluator.reset(agg);
for (int i = 0; i < actual.length; i++) {
evaluator.iterate(agg, new Object[] {actual[i], predicted[i]});
}
agg.get();
}
示例12: testBinaryMultiSamples
import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator; //导入依赖的package包/类
@Test
public void testBinaryMultiSamples() throws Exception {
final int[] actual = {0, 1, 0, 0, 0, 1, 0, 0};
final int[] predicted = {1, 0, 0, 1, 0, 1, 0, 1};
double beta = 1.;
String average = "micro";
binarySetUp(actual[0], predicted[0], beta, average);
evaluator.init(GenericUDAFEvaluator.Mode.PARTIAL1, inputOIs);
evaluator.reset(agg);
for (int i = 0; i < actual.length; i++) {
evaluator.iterate(agg, new Object[] {actual[i], predicted[i]});
}
Assert.assertEquals(0.5d, agg.get(), 1e-4);
}
示例13: testBinaryMultiSamplesBeta2
import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator; //导入依赖的package包/类
@Test
public void testBinaryMultiSamplesBeta2() throws Exception {
final int[] actual = {0, 1, 0, 0, 0, 1, 0, 0};
final int[] predicted = {1, 0, 0, 1, 0, 1, 0, 1};
double beta = 2.0;
String average = "binary";
binarySetUp(actual[0], predicted[0], beta, average);
evaluator.init(GenericUDAFEvaluator.Mode.PARTIAL1, inputOIs);
evaluator.reset(agg);
for (int i = 0; i < actual.length; i++) {
evaluator.iterate(agg, new Object[] {actual[i], predicted[i]});
}
Assert.assertEquals(0.4166d, agg.get(), 1e-4);
}
示例14: testMultiLabelNegativeBeta
import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator; //导入依赖的package包/类
@Test(expected = HiveException.class)
public void testMultiLabelNegativeBeta() throws Exception {
List<Integer> actual = Arrays.asList(1, 3, 2, 6);
List<Integer> predicted = Arrays.asList(1, 2, 4);
double beta = -1.0d;
String average = "micro";
setUpWithArguments(beta, average);
evaluator.init(GenericUDAFEvaluator.Mode.PARTIAL1, inputOIs);
evaluator.reset(agg);
evaluator.iterate(agg, new Object[] {actual, predicted});
// FMeasure for beta has negative value is not defined
agg.get();
}
示例15: testMultiLabelF1score
import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator; //导入依赖的package包/类
@Test
public void testMultiLabelF1score() throws Exception {
List<Integer> actual = Arrays.asList(1, 3, 2, 6);
List<Integer> predicted = Arrays.asList(1, 2, 4);
double beta = 1.0;
String average = " micro";
setUpWithArguments(beta, average);
evaluator.init(GenericUDAFEvaluator.Mode.PARTIAL1, inputOIs);
evaluator.reset(agg);
evaluator.iterate(agg, new Object[] {actual, predicted});
// should equal to spark's micro f1 measure result
// https://spark.apache.org/docs/latest/mllib-evaluation-metrics.html#multilabel-classification
Assert.assertEquals(0.5714285714285714, agg.get(), 1e-5);
}