本文整理汇总了Java中org.apache.hadoop.hive.ql.exec.vector.LongColumnVector类的典型用法代码示例。如果您正苦于以下问题:Java LongColumnVector类的具体用法?Java LongColumnVector怎么用?Java LongColumnVector使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
LongColumnVector类属于org.apache.hadoop.hive.ql.exec.vector包,在下文中一共展示了LongColumnVector类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: process
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector; //导入依赖的package包/类
@Override
public void process(WayContainer container) {
DecimalColumnVector lat = (DecimalColumnVector) batch.cols[3];
DecimalColumnVector lon = (DecimalColumnVector) batch.cols[4];
ListColumnVector nds = (ListColumnVector) batch.cols[5];
checkLimit();
addCommonProperties(container);
lat.isNull[row] = true;
lon.isNull[row] = true;
lat.set(row, (HiveDecimal) null);
lon.set(row, (HiveDecimal) null);
Way way = container.getEntity();
nds.lengths[row] = way.getWayNodes().size();
nds.childCount += nds.lengths[row];
nds.child.ensureSize(nds.childCount, nds.offsets[row] != 0);
for (int j = 0; j < way.getWayNodes().size(); j++) {
StructColumnVector ndsStruct = (StructColumnVector) nds.child;
((LongColumnVector) ndsStruct.fields[0]).vector[(int) nds.offsets[row] + j] = way.getWayNodes().get(j).getNodeId();
}
}
示例2: StringDictionaryTreeReader
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector; //导入依赖的package包/类
protected StringDictionaryTreeReader(int columnId, InStream present, InStream data,
InStream length, InStream dictionary, OrcProto.ColumnEncoding encoding)
throws IOException {
super(columnId, present);
scratchlcv = new LongColumnVector();
if (data != null && encoding != null) {
this.reader = createIntegerReader(encoding.getKind(), data, false, false);
}
if (dictionary != null && encoding != null) {
readDictionaryStream(dictionary);
}
if (length != null && encoding != null) {
readDictionaryLengthStream(length, encoding);
}
}
示例3: compareTimecolumn
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector; //导入依赖的package包/类
static void compareTimecolumn(ColumnVector oneTSColumn, String columnType, boolean[] isNull, long currentBatchRow,
Vec h2oFrame, Long startRowIndex) {
long[] oneColumn = ((LongColumnVector) oneTSColumn).vector;
long frameRowIndex = startRowIndex;
for (int rowIndex = 0; rowIndex < currentBatchRow; rowIndex++) {
if (isNull[rowIndex])
assertEquals("Na is found: ", true, h2oFrame.isNA(frameRowIndex));
else {
if (columnType.contains("timestamp"))
assertEquals("Numerical elements should equal: ", oneColumn[rowIndex]/1000000, h2oFrame.at8(frameRowIndex),
ERRORMARGIN);
else
assertEquals("Numerical elements should equal: ", correctTimeStamp(oneColumn[rowIndex]),
h2oFrame.at8(frameRowIndex), ERRORMARGIN);
}
frameRowIndex++;
}
}
示例4: CompareLongcolumn
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector; //导入依赖的package包/类
static void CompareLongcolumn(ColumnVector oneLongColumn, boolean[] isNull,
long currentBatchRow, Vec h2oFrame, Long startRowIndex) {
long[] oneColumn= ((LongColumnVector) oneLongColumn).vector;
long frameRowIndex = startRowIndex;
for (int rowIndex = 0; rowIndex < currentBatchRow; rowIndex++) {
if (isNull[rowIndex])
assertEquals("Na is found: ", true, h2oFrame.isNA(frameRowIndex));
else {
if (h2oFrame.isNA(frameRowIndex))
continue;
else
assertEquals("Numerical elements should equal: ", oneColumn[rowIndex], h2oFrame.at8(frameRowIndex));
}
frameRowIndex++;
}
}
示例5: set
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector; //导入依赖的package包/类
@Override
public void set( final PrimitiveObject[] primitiveObjectArray , final LongColumnVector columnVector , final int index ) throws IOException{
try{
columnVector.vector[index] = primitiveObjectArray[index].getLong();
}catch( NumberFormatException | NullPointerException e ){
VectorizedBatchUtil.setNullColIsNullValue( columnVector , index );
}
}
示例6: set
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector; //导入依赖的package包/类
@Override
public void set( final PrimitiveObject[] primitiveObjectArray , final LongColumnVector columnVector , final int index ) throws IOException{
try{
long longNumber = (long)( primitiveObjectArray[index].getByte() );
columnVector.vector[index] = longNumber;
}catch( NumberFormatException | NullPointerException e ){
VectorizedBatchUtil.setNullColIsNullValue( columnVector , index );
}
}
示例7: setColumnVector
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector; //导入依赖的package包/类
@Override
public void setColumnVector( final ColumnVector vector , final IExpressionIndex indexList , final int start , final int length ) throws IOException{
LongColumnVector columnVector = (LongColumnVector)vector;
PrimitiveObject[] primitiveObjectArray = column.getPrimitiveObjectArray( indexList , start , length );
for( int i = 0 ; i < length ; i++ ){
if( primitiveObjectArray[i] == null ){
VectorizedBatchUtil.setNullColIsNullValue( columnVector , i );
}
else{
setter.set( primitiveObjectArray , columnVector , i );
}
}
}
示例8: set
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector; //导入依赖的package包/类
@Override
public void set( final PrimitiveObject[] primitiveObjectArray , final LongColumnVector columnVector , final int index ) throws IOException{
try{
long longNumber = (long)( primitiveObjectArray[index].getShort() );
columnVector.vector[index] = longNumber;
}catch( NumberFormatException | NullPointerException e ){
VectorizedBatchUtil.setNullColIsNullValue( columnVector , index );
}
}
示例9: set
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector; //导入依赖的package包/类
@Override
public void set( final PrimitiveObject[] primitiveObjectArray , final LongColumnVector columnVector , final int index ) throws IOException{
try{
long longNumber = (long)( primitiveObjectArray[index].getInt() );
columnVector.vector[index] = longNumber;
}catch( NumberFormatException | NullPointerException e ){
VectorizedBatchUtil.setNullColIsNullValue( columnVector , index );
}
}
示例10: BinaryTreeReader
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector; //导入依赖的package包/类
protected BinaryTreeReader(int columnId, InStream present, InStream data, InStream length,
OrcProto.ColumnEncoding encoding) throws IOException {
super(columnId, present);
scratchlcv = new LongColumnVector();
this.stream = data;
if (length != null && encoding != null) {
checkEncoding(encoding);
this.lengths = createIntegerReader(encoding.getKind(), length, false, false);
}
}
示例11: StringDirectTreeReader
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector; //导入依赖的package包/类
protected StringDirectTreeReader(int columnId, InStream present, InStream data,
InStream length, OrcProto.ColumnEncoding.Kind encoding) throws IOException {
super(columnId, present);
this.scratchlcv = new LongColumnVector();
this.stream = data;
if (length != null && encoding != null) {
this.lengths = createIntegerReader(encoding, length, false, false);
}
}
示例12: convert
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector; //导入依赖的package包/类
public void convert(JsonElement value, ColumnVector vect, int row) {
if (value == null || value.isJsonNull()) {
vect.noNulls = false;
vect.isNull[row] = true;
} else {
LongColumnVector vector = (LongColumnVector) vect;
vector.vector[row] = value.getAsBoolean() ? 1 : 0;
}
}
示例13: addCommonProperties
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector; //导入依赖的package包/类
private void addCommonProperties(EntityContainer container) {
LongColumnVector id = (LongColumnVector) batch.cols[0];
BytesColumnVector type = (BytesColumnVector) batch.cols[1];
MapColumnVector tags = (MapColumnVector) batch.cols[2];
ListColumnVector nds = (ListColumnVector) batch.cols[5];
ListColumnVector members = (ListColumnVector) batch.cols[6];
LongColumnVector changeset = (LongColumnVector) batch.cols[7];
TimestampColumnVector timestamp = (TimestampColumnVector) batch.cols[8];
LongColumnVector uid = (LongColumnVector) batch.cols[9];
BytesColumnVector user = (BytesColumnVector) batch.cols[10];
LongColumnVector version = (LongColumnVector) batch.cols[11];
LongColumnVector visible = (LongColumnVector) batch.cols[12];
Entity entity = container.getEntity();
id.vector[row] = entity.getId();
changeset.vector[row] = entity.getChangesetId();
type.setVal(row, entity.getType().toString().toLowerCase().getBytes());
tags.offsets[row] = tags.childCount;
tags.lengths[row] = entity.getTags().size(); // number of key/value pairings
tags.childCount += tags.lengths[row];
tags.keys.ensureSize(tags.childCount, tags.offsets[row] != 0);
tags.values.ensureSize(tags.childCount, tags.offsets[row] != 0);
int i = 0;
for (Tag tag : entity.getTags()) {
((BytesColumnVector) tags.keys).setVal((int) tags.offsets[row] + i, tag.getKey().getBytes());
((BytesColumnVector) tags.values).setVal((int) tags.offsets[row] + i, tag.getValue().getBytes());
i++;
}
timestamp.time[row] = entity.getTimestamp().getTime();
timestamp.nanos[row] = 0;
uid.vector[row] = entity.getUser().getId();
user.setVal(row, entity.getUser().getName().getBytes());
version.vector[row] = entity.getVersion();
visible.vector[row] = 1;
if (entity.getMetaTags().get("visible") == Boolean.FALSE) {
visible.vector[row] = 0;
}
nds.offsets[row] = nds.childCount;
nds.lengths[row] = 0;
members.offsets[row] = members.childCount;
members.lengths[row] = 0;
}
示例14: setValue
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector; //导入依赖的package包/类
static void setValue(JSONWriter writer, ColumnVector vector,
TypeDescription schema, int row) throws JSONException {
if (vector.isRepeating) {
row = 0;
}
if (vector.noNulls || !vector.isNull[row]) {
switch (schema.getCategory()) {
case BOOLEAN:
writer.value(((LongColumnVector) vector).vector[row] != 0);
break;
case BYTE:
case SHORT:
case INT:
case LONG:
writer.value(((LongColumnVector) vector).vector[row]);
break;
case FLOAT:
case DOUBLE:
writer.value(((DoubleColumnVector) vector).vector[row]);
break;
case STRING:
case CHAR:
case VARCHAR:
writer.value(((BytesColumnVector) vector).toString(row));
break;
case DECIMAL:
writer.value(((DecimalColumnVector) vector).vector[row]
.toString());
break;
case DATE:
writer.value(new DateWritable(
(int) ((LongColumnVector) vector).vector[row])
.toString());
break;
case TIMESTAMP:
writer.value(((TimestampColumnVector) vector)
.asScratchTimestamp(row).toString());
break;
case LIST:
setList(writer, (ListColumnVector) vector, schema, row);
break;
case STRUCT:
setStruct(writer, (StructColumnVector) vector, schema, row);
break;
case UNION:
// printUnion(writer, (UnionColumnVector) vector, schema, row);
break;
case BINARY:
// printBinary(writer, (BytesColumnVector) vector, row);
break;
case MAP:
// printMap(writer, (MapColumnVector) vector, schema, row);
break;
default:
throw new IllegalArgumentException("Unknown type "
+ schema.toString());
}
} else {
writer.value(null);
}
}
示例15: convertFromSourceToTargetDataType
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector; //导入依赖的package包/类
protected static Object convertFromSourceToTargetDataType( ColumnVector columnVector, int currentBatchRow,
int orcValueMetaInterface ) {
if ( columnVector.isNull[currentBatchRow] ) {
return null;
}
switch ( orcValueMetaInterface ) {
case ValueMetaInterface.TYPE_INET:
try {
return InetAddress.getByName( new String( ( (BytesColumnVector) columnVector ).vector[ currentBatchRow ],
( (BytesColumnVector) columnVector ).start[ currentBatchRow ],
( (BytesColumnVector) columnVector ).length[ currentBatchRow ] ) );
} catch ( UnknownHostException e ) {
e.printStackTrace();
}
case ValueMetaInterface.TYPE_STRING:
return new String( ( (BytesColumnVector) columnVector ).vector[ currentBatchRow ],
( (BytesColumnVector) columnVector ).start[ currentBatchRow ],
( (BytesColumnVector) columnVector ).length[ currentBatchRow ] );
case ValueMetaInterface.TYPE_INTEGER:
return (long) ( (LongColumnVector) columnVector ).vector[ currentBatchRow ];
case ValueMetaInterface.TYPE_NUMBER:
return ( (DoubleColumnVector) columnVector ).vector[ currentBatchRow ];
case ValueMetaInterface.TYPE_BIGNUMBER:
HiveDecimalWritable obj = ( (DecimalColumnVector) columnVector ).vector[ currentBatchRow ];
return obj.getHiveDecimal().bigDecimalValue();
case ValueMetaInterface.TYPE_TIMESTAMP:
Timestamp timestamp = new Timestamp( ( (TimestampColumnVector) columnVector ).time[ currentBatchRow ] );
timestamp.setNanos( ( (TimestampColumnVector) columnVector ).nanos[ currentBatchRow ] );
return timestamp;
case ValueMetaInterface.TYPE_DATE:
LocalDate localDate = LocalDate.ofEpochDay( 0 ).plusDays( ( (LongColumnVector) columnVector ).vector[ currentBatchRow ] );
Date dateValue = Date.from( localDate.atStartOfDay( ZoneId.systemDefault() ).toInstant() );
return dateValue;
case ValueMetaInterface.TYPE_BOOLEAN:
return ( (LongColumnVector) columnVector ).vector[ currentBatchRow ] == 0 ? false : true;
case ValueMetaInterface.TYPE_BINARY:
byte[] origBytes = ( (BytesColumnVector) columnVector ).vector[ currentBatchRow ];
int startPos = ( (BytesColumnVector) columnVector ).start[ currentBatchRow ];
byte[] newBytes = Arrays.copyOfRange( origBytes, startPos,
startPos + ( (BytesColumnVector) columnVector ).length[ currentBatchRow ] );
return newBytes;
}
//if none of the cases match return a null
return null;
}