本文整理汇总了Java中org.apache.hadoop.hbase.index.ColumnQualifier.ValueType.String方法的典型用法代码示例。如果您正苦于以下问题:Java ValueType.String方法的具体用法?Java ValueType.String怎么用?Java ValueType.String使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.index.ColumnQualifier.ValueType
的用法示例。
在下文中一共展示了ValueType.String方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getMaxLength
import org.apache.hadoop.hbase.index.ColumnQualifier.ValueType; //导入方法依赖的package包/类
private int getMaxLength(ValueType type, int maxValueLength) {
if ((type == ValueType.Int || type == ValueType.Float) && maxValueLength != 4) {
Log.warn("With integer or float datatypes, the maxValueLength has to be 4 bytes");
return 4;
}
if ((type == ValueType.Double || type == ValueType.Long) && maxValueLength != 8) {
Log.warn("With Double and Long datatypes, the maxValueLength has to be 8 bytes");
return 8;
}
if ((type == ValueType.Short || type == ValueType.Char) && maxValueLength != 2) {
Log.warn("With Short and Char datatypes, the maxValueLength has to be 2 bytes");
return 2;
}
if (type == ValueType.Byte && maxValueLength != 1) {
Log.warn("With Byte datatype, the maxValueLength has to be 1 bytes");
return 1;
}
if (type == ValueType.String && maxValueLength == 0) {
Log.warn("With String datatype, the minimun value length is 2");
maxValueLength = 2;
}
return maxValueLength;
}
示例2: testColumnQualifierSerialization
import org.apache.hadoop.hbase.index.ColumnQualifier.ValueType; //导入方法依赖的package包/类
@Test(timeout = 180000)
public void testColumnQualifierSerialization() throws Exception {
ByteArrayOutputStream bos = null;
DataOutputStream dos = null;
ByteArrayInputStream bis = null;
DataInputStream dis = null;
try {
bos = new ByteArrayOutputStream();
dos = new DataOutputStream(bos);
ColumnQualifier cq =
new ColumnQualifier("cf", "cq", ValueType.String, 10, new SpatialPartition(0, 5));
cq.write(dos);
dos.flush();
byte[] byteArray = bos.toByteArray();
bis = new ByteArrayInputStream(byteArray);
dis = new DataInputStream(bis);
ColumnQualifier c = new ColumnQualifier();
c.readFields(dis);
assertTrue("ColumnQualifier state mismatch.", c.equals(cq));
} finally {
if (null != bos) {
bos.close();
}
if (null != dos) {
dos.close();
}
if (null != bis) {
bis.close();
}
if (null != dis) {
dis.close();
}
}
}
示例3: checkForType
import org.apache.hadoop.hbase.index.ColumnQualifier.ValueType; //导入方法依赖的package包/类
private ValueType checkForType(ValueType type) {
if (type == null) {
type = ValueType.String;
}
return type;
}
示例4: testColumnQualifierSerialization
import org.apache.hadoop.hbase.index.ColumnQualifier.ValueType; //导入方法依赖的package包/类
@Test
public void testColumnQualifierSerialization() throws Exception {
ByteArrayOutputStream bos = null;
DataOutputStream dos = null;
ByteArrayInputStream bis = null;
DataInputStream dis = null;
try {
bos = new ByteArrayOutputStream();
dos = new DataOutputStream(bos);
ColumnQualifier cq =
new ColumnQualifier("cf", "cq", ValueType.String, 10, new SeparatorPartition("--", 10));
cq.write(dos);
dos.flush();
byte[] byteArray = bos.toByteArray();
bis = new ByteArrayInputStream(byteArray);
dis = new DataInputStream(bis);
ColumnQualifier c = new ColumnQualifier();
c.readFields(dis);
ValuePartition vp = c.getValuePartition();
if (vp instanceof SeparatorPartition) {
} else {
fail("value partition details no read properly.");
}
assertTrue("ColumnQualifier state mismatch.", c.equals(cq));
} finally {
if (null != bos) {
bos.close();
}
if (null != dos) {
dos.close();
}
if (null != bis) {
bis.close();
}
if (null != dis) {
dis.close();
}
}
}