本文整理汇总了Java中org.apache.hadoop.hbase.index.client.DataType类的典型用法代码示例。如果您正苦于以下问题:Java DataType类的具体用法?Java DataType怎么用?Java DataType使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
DataType类属于org.apache.hadoop.hbase.index.client包,在下文中一共展示了DataType类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: resultToString
import org.apache.hadoop.hbase.index.client.DataType; //导入依赖的package包/类
protected String resultToString(Result result) {
StringBuilder sb = new StringBuilder();
sb.append("{").append(keyToString(result.getRow())).append(":");
for (Cell cell : result.listCells()) {
byte[] f = CellUtil.cloneFamily(cell);
byte[] q = CellUtil.cloneQualifier(cell);
RangeDescription range = rangeMap.get(Bytes.add(f, q));
sb.append("[").append(Bytes.toString(f)).append(":").append(Bytes.toString(q)).append("->");
if (notPrintingSet.contains(q)) sb.append("skipped random value");
else sb.append(DataType.byteToString(range.dataType, CellUtil.cloneValue(cell)));
sb.append("]");
}
sb.append("}");
return sb.toString();
}
示例2: LCStatInfo2
import org.apache.hadoop.hbase.index.client.DataType; //导入依赖的package包/类
public LCStatInfo2(byte[] family, byte[] qualifier, DataType type, int parts, String min,
String max) throws IOException {
this.family = family;
this.qualifier = qualifier;
this.type = type;
this.isSet = false;
switch (type) {
case INT:
parseInt(parts, min, max);
break;
case LONG:
parseLong(parts, min, max);
break;
case DOUBLE:
parseDouble(parts, min, max);
break;
default:
throw new IOException("LCDBG, StatInfo ranges not support type: " + type);
}
}
示例3: parseStatString
import org.apache.hadoop.hbase.index.client.DataType; //导入依赖的package包/类
/**
* create map based on statDesc
* 1. for set, "family qualifier DataType set [v1] [v2] [...]"
* 2. for array, "family qualifier DataType min max parts"
*/
public static Map<TableName, LCStatInfo2> parseStatString(IndexTableRelation indexTableRelation,
String statDesc) throws IOException {
Map<TableName, LCStatInfo2> map = new HashMap<>();
String[] lines = statDesc.split(LC_TABLE_DESC_RANGE_DELIMITER);
for (String line : lines) {
String[] parts = line.split("\t");
byte[] family = Bytes.toBytes(parts[0]);
byte[] qualifier = Bytes.toBytes(parts[1]);
TableName tableName = indexTableRelation.getIndexTableName(family, qualifier);
LCStatInfo2 statInfo;
try {
if ("set".equalsIgnoreCase(parts[3])) {
statInfo = new LCStatInfo2(family, qualifier, DataType.valueOf(parts[2]), parts, 4);
} else {
statInfo = new LCStatInfo2(family, qualifier, DataType.valueOf(parts[2]),
Integer.valueOf(parts[5]), parts[3], parts[4]);
}
} catch (IOException e) {
throw new IOException("exception for parsing line: " + line, e);
}
map.put(tableName, statInfo);
}
return map;
}
示例4: getStringOfValueAndType
import org.apache.hadoop.hbase.index.client.DataType; //导入依赖的package包/类
public static String getStringOfValueAndType(final DataType type, final byte[] data) {
if (data == null) return "null";
if (type == DataType.SHORT || type == DataType.INT) {
return String.valueOf(Bytes.toInt(data));
}
if (type == DataType.DOUBLE) {
return String.valueOf(Bytes.toDouble(data));
}
if (type == DataType.LONG) {
return String.valueOf(Bytes.toLong(data));
}
if (type == DataType.STRING) {
return Bytes.toString(data);
}
return "mWinterGetStringOfValueAndType type not supported!";
}
示例5: compareValues
import org.apache.hadoop.hbase.index.client.DataType; //导入依赖的package包/类
public static int compareValues(byte[] b1, byte[] b2, DataType type) {
switch (type) {
case INT:
// jdk 1.7
// return Integer.compare(Bytes.toInt(b1), Bytes.toInt(b2));
// jdk 1.6
return compareInt(Bytes.toInt(b1), Bytes.toInt(b2));
case LONG:
return compareLong(Bytes.toLong(b1), Bytes.toLong(b2));
case DOUBLE:
return Double.compare(Bytes.toDouble(b1), Bytes.toDouble(b2));
case STRING:
return Bytes.toString(b1).compareTo(Bytes.toString(b2));
default:
break;
}
new Exception("winter compareWithQualifier not supportted type: " + type).printStackTrace();
return 0;
}
示例6: parsingStringToBytesWithType
import org.apache.hadoop.hbase.index.client.DataType; //导入依赖的package包/类
public static byte[] parsingStringToBytesWithType(DataType type, String s) {
switch (type) {
case INT:
return Bytes.toBytes(Integer.valueOf(s));
case DOUBLE:
return Bytes.toBytes(Double.valueOf(s));
case LONG:
return Bytes.toBytes(Long.valueOf(s));
case SHORT:
return Bytes.toBytes(Short.valueOf(s));
case BOOLEAN:
return Bytes.toBytes(Boolean.valueOf(s));
case STRING:
return Bytes.toBytes(s);
}
return null;
}
示例7: readFields
import org.apache.hadoop.hbase.index.client.DataType; //导入依赖的package包/类
@Override public void readFields(DataInput in) throws IOException {
family = WritableUtils.readCompressedByteArray(in);
qualifier = WritableUtils.readCompressedByteArray(in);
dataType = DataType.valueOf(WritableUtils.readString(in));
isIndex = WritableUtils.readVInt(in) == 1;
hashCode = calHashCode();
}
示例8: getColumnInfoMap
import org.apache.hadoop.hbase.index.client.DataType; //导入依赖的package包/类
/**
* Get each column's data type of this table.
*
* @return
* @throws IOException
*/
public Map<byte[], DataType> getColumnInfoMap(TableName tableName) throws IOException {
String tempInfo = admin.getTableDescriptor(tableName).getValue("DATA_FORMAT");
Map<byte[], DataType> columnTypeMap = null;
if (tempInfo != null) {
columnTypeMap = new TreeMap<byte[], DataType>(Bytes.BYTES_COMPARATOR);
String[] temp = tempInfo.split(",");
for (int i = 0; i < temp.length; i++) {
int loc = temp[i].lastIndexOf(':');
if (loc != -1) {
columnTypeMap.put(Bytes.toBytes(temp[i].substring(0, loc)),
DataType.valueOf(temp[i].substring(loc + 1)));
} else {
LOG.warn("Failed to read column type!" + temp[i]);
}
}
}
return columnTypeMap;
}
示例9: readFields
import org.apache.hadoop.hbase.index.client.DataType; //导入依赖的package包/类
@Override public void readFields(DataInput in) throws IOException {
family = Bytes.readByteArray(in);
qualifier = Bytes.readByteArray(in);
boolean startNull = Bytes.toBoolean(Bytes.readByteArray(in));
if (startNull) {
start = null;
startOp = CompareFilter.CompareOp.NO_OP;
} else {
start = Bytes.readByteArray(in);
startOp = CompareFilter.CompareOp.valueOf(Bytes.toString(Bytes.readByteArray(in)));
}
boolean stopNull = Bytes.toBoolean(Bytes.readByteArray(in));
if (stopNull) {
stop = null;
stopOp = CompareFilter.CompareOp.NO_OP;
} else {
stop = Bytes.readByteArray(in);
stopOp = CompareFilter.CompareOp.valueOf(Bytes.toString(Bytes.readByteArray(in)));
}
startTs = in.readLong();
stopTs = in.readLong();
dataType = DataType.valueOf(Bytes.toString(Bytes.readByteArray(in)));
}
示例10: getColumnInfoMap
import org.apache.hadoop.hbase.index.client.DataType; //导入依赖的package包/类
/**
* Get each column's data type of this table.
* @return
* @throws IOException
*/
public Map<byte[], DataType> getColumnInfoMap(byte[] tableName) throws IOException {
String tempInfo = admin.getTableDescriptor(tableName).getValue("DATA_FORMAT");
Map<byte[], DataType> columnTypeMap = null;
if (tempInfo != null) {
columnTypeMap = new TreeMap<byte[], DataType>(Bytes.BYTES_COMPARATOR);
String[] temp = tempInfo.split(",");
for (int i = 0; i < temp.length; i++) {
int loc = temp[i].lastIndexOf(':');
if (loc != -1) {
columnTypeMap.put(Bytes.toBytes(temp[i].substring(0, loc)),
DataType.valueOf(temp[i].substring(loc + 1)));
} else {
LOG.warn("Failed to read column type!" + temp[i]);
}
}
}
return columnTypeMap;
}
示例11: mWinterCalRangeKey
import org.apache.hadoop.hbase.index.client.DataType; //导入依赖的package包/类
private byte[] mWinterCalRangeKey(byte[] qualifier, byte[] key) throws IOException {
if (key == null) {
return null;
}
// winter the column indicated the target column-family to scan!
DataType type = LCCIndexConstant.getType(lccIndexQualifierType, qualifier);
byte[] lccValue = null;
if (type == DataType.DOUBLE) {
lccValue = Bytes.toBytes(LCCIndexConstant.paddedStringDouble(Bytes.toDouble(key)));
} else if (type == DataType.INT) {
lccValue = Bytes.toBytes(LCCIndexConstant.paddedStringInt(Bytes.toInt(key)));
} else if (type == DataType.LONG) {
lccValue = Bytes.toBytes(LCCIndexConstant.paddedStringLong(Bytes.toLong(key)));
} else if (type == DataType.STRING) {
// lccValue = Bytes.toBytes(LCCIndexConstant.paddedStringString(Bytes.toString(key)));
lccValue = key;
} else {
throw new IOException("winter range generating new endkey not implemented yet: "
+ Bytes.toString(qualifier));
}
return lccValue;
}
示例12: mWinterValueCoincident
import org.apache.hadoop.hbase.index.client.DataType; //导入依赖的package包/类
private boolean mWinterValueCoincident(CompareOp op, byte[] v1, byte[] v2, DataType type) {
int ret = LCCIndexConstant.compareValues(v1, v2, type);
switch (op) {
case GREATER:
return ret > 0;
case GREATER_OR_EQUAL:
return ret >= 0;
case LESS:
return ret < 0;
case LESS_OR_EQUAL:
return ret <= 0;
case NOT_EQUAL:
return ret != 0;
case EQUAL:
return ret == 0;
case NO_OP:
default:
System.out.println("winter what does this op mean?");
break;
}
return true;
}
示例13: LCStatInfo
import org.apache.hadoop.hbase.index.client.DataType; //导入依赖的package包/类
public LCStatInfo(String name, DataType type, int parts, String min, String max)
throws IOException {
this.name = name;
this.type = type;
this.isSet = false;
switch (type) {
case INT:
parseInt(parts, min, max);
break;
case LONG:
parseLong(parts, min, max);
break;
case DOUBLE:
parseDouble(parts, min, max);
break;
default:
new Exception("winter StatInfo ranges not supportted type: " + type).printStackTrace();
throw new IOException("winter StatInfo ranges not supportted type: " + type);
}
}
示例14: parseStatString
import org.apache.hadoop.hbase.index.client.DataType; //导入依赖的package包/类
public static List<LCStatInfo> parseStatString(String str, List<String> includedQualifiers)
throws IOException {
List<LCStatInfo> list = new ArrayList<LCStatInfo>();
String[] lines = str.split(LCCIndexConstant.LCC_TABLE_DESC_RANGE_DELIMITER);
for (String line : lines) {
String[] parts = line.split("\t");
if (includedQualifiers != null && !includedQualifiers.contains(parts[0])) {
continue;
}
if ("set".equalsIgnoreCase(parts[2])) {
list.add(new LCStatInfo(parts[0], DataType.valueOf(parts[1]), parts, 3));
} else {
list.add(new LCStatInfo(parts[0], DataType.valueOf(parts[1]), Integer.valueOf(parts[2]),
parts[3], parts[4]));
}
}
return list;
}
示例15: LCCIndexMemStoreScanner
import org.apache.hadoop.hbase.index.client.DataType; //导入依赖的package包/类
public LCCIndexMemStoreScanner(KeyValueScanner scanner,
TreeMap<byte[], DataType> lccIndexQualifier, byte[] target) throws IOException {
super();
dataList = new LinkedList<KeyValue>();
this.lccIndexQualifier = new TreeMap<byte[], DataType>(Bytes.BYTES_COMPARATOR);
if (target == null || !lccIndexQualifier.containsKey(target)) {
throw new IOException("winter index column " + Bytes.toString(target) + " is uknown type");
}
this.lccIndexQualifier.put(target, lccIndexQualifier.get(target));
// this.lccIndexQualifier = lccIndexQualifier;
long start = System.currentTimeMillis();
init(scanner);
System.out.println("winter LCCIndexMemStoreScanner cost "
+ (System.currentTimeMillis() - start) / 1000.0
+ " seconds to build lcc memstore scanner from memstore, the size of this scanner is: "
+ dataList.size());
}