本文整理汇总了Java中org.apache.hadoop.hbase.index.ColumnQualifier.ValueType类的典型用法代码示例。如果您正苦于以下问题:Java ValueType类的具体用法?Java ValueType怎么用?Java ValueType使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
ValueType类属于org.apache.hadoop.hbase.index.ColumnQualifier包,在下文中一共展示了ValueType类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getMaxLength
import org.apache.hadoop.hbase.index.ColumnQualifier.ValueType; //导入依赖的package包/类
private int getMaxLength(ValueType type, int maxValueLength) {
if ((type == ValueType.Int || type == ValueType.Float) && maxValueLength != 4) {
Log.warn("With integer or float datatypes, the maxValueLength has to be 4 bytes");
return 4;
}
if ((type == ValueType.Double || type == ValueType.Long) && maxValueLength != 8) {
Log.warn("With Double and Long datatypes, the maxValueLength has to be 8 bytes");
return 8;
}
if ((type == ValueType.Short || type == ValueType.Char) && maxValueLength != 2) {
Log.warn("With Short and Char datatypes, the maxValueLength has to be 2 bytes");
return 2;
}
if (type == ValueType.Byte && maxValueLength != 1) {
Log.warn("With Byte datatype, the maxValueLength has to be 1 bytes");
return 1;
}
if (type == ValueType.String && maxValueLength == 0) {
Log.warn("With String datatype, the minimun value length is 2");
maxValueLength = 2;
}
return maxValueLength;
}
示例2: testAddIndexWithDuplicaIndexNames
import org.apache.hadoop.hbase.index.ColumnQualifier.ValueType; //导入依赖的package包/类
@Test
public void testAddIndexWithDuplicaIndexNames() throws Exception {
TableIndices indices = new TableIndices();
IndexSpecification iSpec = null;
try {
iSpec = new IndexSpecification("index_name");
iSpec.addIndexColumn(new HColumnDescriptor("cf"), "cq", ValueType.String, 10);
indices.addIndex(iSpec);
iSpec = new IndexSpecification("index_name");
iSpec.addIndexColumn(new HColumnDescriptor("cf"), "cq", ValueType.String, 10);
indices.addIndex(iSpec);
fail("Duplicate index names should not present for same table.");
} catch (IllegalArgumentException e) {
}
}
示例3: testAddIndexWithIndexNameLengthGreaterThanMaxLength
import org.apache.hadoop.hbase.index.ColumnQualifier.ValueType; //导入依赖的package包/类
@Test
public void testAddIndexWithIndexNameLengthGreaterThanMaxLength() throws Exception {
TableIndices indices = new TableIndices();
IndexSpecification iSpec = null;
try {
StringBuilder sb = new StringBuilder();
for (int i = 0; i < 7; i++) {
sb.append("index_name");
}
iSpec = new IndexSpecification(new String(sb));
iSpec.addIndexColumn(new HColumnDescriptor("cf"), "cq", ValueType.String, 10);
indices.addIndex(iSpec);
fail("Index name length should not be more than maximum length "
+ Constants.DEF_MAX_INDEX_NAME_LENGTH + '.');
} catch (IllegalArgumentException e) {
}
}
示例4: testPreCreateShouldNotBeSuccessfulIfIndicesAreNotSameAtBothTypeAndLength
import org.apache.hadoop.hbase.index.ColumnQualifier.ValueType; //导入依赖的package包/类
@Test(timeout = 180000)
public void testPreCreateShouldNotBeSuccessfulIfIndicesAreNotSameAtBothTypeAndLength()
throws IOException, KeeperException, InterruptedException {
String userTableName = "testNotConsisIndex4";
HTableDescriptor ihtd = new HTableDescriptor(TableName.valueOf(userTableName));
HColumnDescriptor hcd = new HColumnDescriptor("col");
IndexSpecification iSpec1 = new IndexSpecification("Index1");
iSpec1.addIndexColumn(hcd, "q1", ValueType.String, 10);
iSpec1.addIndexColumn(hcd, "q2", ValueType.String, 10);
ihtd.addFamily(hcd);
IndexSpecification iSpec2 = new IndexSpecification("Index2");
iSpec2.addIndexColumn(hcd, "q1", ValueType.Int, 10);
iSpec2.addIndexColumn(hcd, "q2", ValueType.String, 7);
TableIndices indices = new TableIndices();
indices.addIndex(iSpec1);
indices.addIndex(iSpec2);
ihtd.setValue(Constants.INDEX_SPEC_KEY, indices.toByteArray());
boolean returnVal = false;
try {
admin.createTable(ihtd);
fail("IOException should be thrown");
} catch (IOException e) {
returnVal = true;
}
Assert.assertTrue(returnVal);
}
示例5: createTableIfMissing
import org.apache.hadoop.hbase.index.ColumnQualifier.ValueType; //导入依赖的package包/类
private void createTableIfMissing() throws IOException {
try {
HTableDescriptor ihtd = new HTableDescriptor(TableName.valueOf(TABLE_NAME));
for (byte[] family : FAMILIES) {
ihtd.addFamily(new HColumnDescriptor(family));
}
IndexSpecification iSpec = new IndexSpecification("ScanIndex");
iSpec.addIndexColumn(new HColumnDescriptor(Bytes.toString(FAMILY_A)),
Bytes.toString(QUALIFIER_NAME) + "1", ValueType.String, 10);
TableIndices indices = new TableIndices();
indices.addIndex(iSpec);
HBaseAdmin admin = new IndexAdmin(util.getConfiguration());
admin.createTable(ihtd);
admin.close();
} catch (TableExistsException tee) {
}
}
示例6: testPutWithOneUnitLengthSeparatorWithoutValue
import org.apache.hadoop.hbase.index.ColumnQualifier.ValueType; //导入依赖的package包/类
@Test(timeout = 180000)
public void testPutWithOneUnitLengthSeparatorWithoutValue() throws IOException {
Path basedir = new Path(DIR + "TestIndexPut");
Configuration conf = TEST_UTIL.getConfiguration();
HTableDescriptor htd =
new HTableDescriptor(TableName.valueOf("testPutWithOneUnitLengthSeparatorWithoutValue"));
HRegionInfo info = new HRegionInfo(htd.getTableName(), "ABC".getBytes(), "BBB".getBytes(), false);
HRegion region = HRegion.createHRegion(info, basedir, conf, htd);
IndexSpecification spec = new IndexSpecification("index");
spec.addIndexColumn(new HColumnDescriptor("col"), "ql1", new SeparatorPartition("_", 4),
ValueType.String, 10);
byte[] value1 = "2ndFloor_solitaire_huawei__karnataka".getBytes();
Put p = new Put("row".getBytes());
p.add("col".getBytes(), "ql1".getBytes(), value1);
Put indexPut = IndexUtils.prepareIndexPut(p, spec, region);
byte[] indexRowKey = indexPut.getRow();
byte[] actualResult = new byte[10];
System.arraycopy(indexRowKey, 22, actualResult, 0, actualResult.length);
byte[] expectedResult = new byte[10];
Assert.assertTrue(Bytes.equals(actualResult, expectedResult));
}
示例7: testIndexPutWithOffsetAndLength
import org.apache.hadoop.hbase.index.ColumnQualifier.ValueType; //导入依赖的package包/类
@Test(timeout = 180000)
public void testIndexPutWithOffsetAndLength() throws IOException {
Path basedir = new Path(DIR + "TestIndexPut");
Configuration conf = TEST_UTIL.getConfiguration();
HTableDescriptor htd =
new HTableDescriptor(TableName.valueOf("testIndexPutWithOffsetAndLength"));
HRegionInfo info = new HRegionInfo(htd.getTableName(), "ABC".getBytes(), "BBB".getBytes(), false);
HRegion region = HRegion.createHRegion(info, basedir, conf, htd);
IndexSpecification spec = new IndexSpecification("index");
spec.addIndexColumn(new HColumnDescriptor("col"), "ql1", new SpatialPartition(20, 2),
ValueType.String, 18);
byte[] value1 = "AB---CD---EF---GH---IJ---KL---MN---OP---".getBytes();
Put p = new Put("row".getBytes());
p.add("col".getBytes(), "ql1".getBytes(), value1);
Put indexPut = IndexUtils.prepareIndexPut(p, spec, region);
byte[] indexRowKey = indexPut.getRow();
byte[] actualResult = new byte[2];
System.arraycopy(indexRowKey, 22, actualResult, 0, actualResult.length);
byte[] expectedResult = new byte[2];
System.arraycopy("IJ".getBytes(), 0, expectedResult, 0, "IJ".getBytes().length);
Assert.assertTrue(Bytes.equals(actualResult, expectedResult));
}
示例8: testIndexPutwithPositiveIntDataTypes
import org.apache.hadoop.hbase.index.ColumnQualifier.ValueType; //导入依赖的package包/类
@Test(timeout = 180000)
public void testIndexPutwithPositiveIntDataTypes() throws IOException {
Path basedir = new Path(DIR + "TestIndexPut");
Configuration conf = TEST_UTIL.getConfiguration();
HTableDescriptor htd =
new HTableDescriptor(TableName.valueOf("testIndexPutwithPositiveIntDataTypes"));
HRegionInfo info = new HRegionInfo(htd.getTableName(), "ABC".getBytes(), "BBB".getBytes(), false);
// HLog hlog = UTIL.getMiniHBaseCluster().getRegionServer(0).getWAL();
HRegion region = HRegion.createHRegion(info, basedir, conf, htd);
IndexSpecification spec = new IndexSpecification("index");
spec.addIndexColumn(new HColumnDescriptor("col"), "ql1", ValueType.Int, 4);
spec.addIndexColumn(new HColumnDescriptor("col"), "ql2", ValueType.Float, 4);
byte[] value1 = Bytes.toBytes(1000);
Put p = new Put("row".getBytes());
p.add("col".getBytes(), "ql1".getBytes(), value1);
Put indexPut1 = IndexUtils.prepareIndexPut(p, spec, region);
int a = 1000;
byte[] expectedResult = Bytes.toBytes(a ^ (1 << 31));
byte[] actualResult = new byte[4];
byte[] indexRowKey = indexPut1.getRow();
System.arraycopy(indexRowKey, 22, actualResult, 0, actualResult.length);
Assert.assertTrue(Bytes.equals(expectedResult, actualResult));
}
示例9: testIndexPutWithNegativeIntDataTypes
import org.apache.hadoop.hbase.index.ColumnQualifier.ValueType; //导入依赖的package包/类
@Test(timeout = 180000)
public void testIndexPutWithNegativeIntDataTypes() throws IOException {
Path basedir = new Path(DIR + "TestIndexPut");
Configuration conf = TEST_UTIL.getConfiguration();
HTableDescriptor htd =
new HTableDescriptor(TableName.valueOf("testIndexPutWithNegativeIntDataTypes"));
HRegionInfo info = new HRegionInfo(htd.getTableName(), "ABC".getBytes(), "BBB".getBytes(), false);
// HLog hlog = UTIL.getMiniHBaseCluster().getRegionServer(0).getWAL();
HRegion region = HRegion.createHRegion(info, basedir, conf, htd);
IndexSpecification spec = new IndexSpecification("index");
spec.addIndexColumn(new HColumnDescriptor("col"), "ql1", ValueType.Int, 4);
spec.addIndexColumn(new HColumnDescriptor("col"), "ql2", ValueType.Float, 4);
byte[] value1 = Bytes.toBytes(-2562351);
Put p = new Put("row".getBytes());
p.add("col".getBytes(), "ql1".getBytes(), value1);
Put indexPut = IndexUtils.prepareIndexPut(p, spec, region);
int a = -2562351;
byte[] expectedResult = Bytes.toBytes(a ^ (1 << 31));
byte[] actualResult = new byte[4];
byte[] indexRowKey = indexPut.getRow();
System.arraycopy(indexRowKey, 22, actualResult, 0, actualResult.length);
Assert.assertTrue(Bytes.equals(expectedResult, actualResult));
}
示例10: testIndexPutWithLongDataTypes
import org.apache.hadoop.hbase.index.ColumnQualifier.ValueType; //导入依赖的package包/类
@Test(timeout = 180000)
public void testIndexPutWithLongDataTypes() throws IOException {
Path basedir = new Path(DIR + "TestIndexPut");
Configuration conf = TEST_UTIL.getConfiguration();
HTableDescriptor htd = new HTableDescriptor("testIndexPutWithNegativeIntDataTypes");
HRegionInfo info = new HRegionInfo(htd.getTableName(), "ABC".getBytes(), "BBB".getBytes(), false);
// HLog hlog = UTIL.getMiniHBaseCluster().getRegionServer(0).getWAL();
HRegion region = HRegion.createHRegion(info, basedir, conf, htd);
IndexSpecification spec = new IndexSpecification("index");
spec.addIndexColumn(new HColumnDescriptor("col"), "ql1", ValueType.Long, 4);
byte[] value1 = Bytes.toBytes(-2562351L);
Put p = new Put("row".getBytes());
p.add("col".getBytes(), "ql1".getBytes(), value1);
Put indexPut = IndexUtils.prepareIndexPut(p, spec, region);
long a = -2562351L;
byte[] expectedResult = Bytes.toBytes(a ^ (1L << 63));
byte[] actualResult = new byte[8];
byte[] indexRowKey = indexPut.getRow();
System.arraycopy(indexRowKey, 22, actualResult, 0, actualResult.length);
Assert.assertTrue(Bytes.equals(expectedResult, actualResult));
}
示例11: testIndexPutWithShortDataTypes
import org.apache.hadoop.hbase.index.ColumnQualifier.ValueType; //导入依赖的package包/类
@Test(timeout = 180000)
public void testIndexPutWithShortDataTypes() throws IOException {
Path basedir = new Path(DIR + "TestIndexPut");
Configuration conf = TEST_UTIL.getConfiguration();
HTableDescriptor htd = new HTableDescriptor("testIndexPutWithNegativeIntDataTypes");
HRegionInfo info = new HRegionInfo(htd.getTableName(), "ABC".getBytes(), "BBB".getBytes(), false);
// HLog hlog = UTIL.getMiniHBaseCluster().getRegionServer(0).getWAL();
HRegion region = HRegion.createHRegion(info, basedir, conf, htd);
IndexSpecification spec = new IndexSpecification("index");
spec.addIndexColumn(new HColumnDescriptor("col"), "ql1", ValueType.Short, 4);
short s = 1000;
byte[] value1 = Bytes.toBytes(s);
Put p = new Put("row".getBytes());
p.add("col".getBytes(), "ql1".getBytes(), value1);
Put indexPut = IndexUtils.prepareIndexPut(p, spec, region);
byte[] expectedResult = Bytes.toBytes(s);
expectedResult[0] ^= 1 << 7;
byte[] actualResult = new byte[2];
byte[] indexRowKey = indexPut.getRow();
System.arraycopy(indexRowKey, 22, actualResult, 0, actualResult.length);
Assert.assertTrue(Bytes.equals(expectedResult, actualResult));
}
示例12: testIndexPutWithByteDataTypes
import org.apache.hadoop.hbase.index.ColumnQualifier.ValueType; //导入依赖的package包/类
@Test(timeout = 180000)
public void testIndexPutWithByteDataTypes() throws IOException {
Path basedir = new Path(DIR + "TestIndexPut");
Configuration conf = TEST_UTIL.getConfiguration();
HTableDescriptor htd = new HTableDescriptor("testIndexPutWithNegativeIntDataTypes");
HRegionInfo info = new HRegionInfo(htd.getTableName(), "ABC".getBytes(), "BBB".getBytes(), false);
// HLog hlog = UTIL.getMiniHBaseCluster().getRegionServer(0).getWAL();
HRegion region = HRegion.createHRegion(info, basedir, conf, htd);
IndexSpecification spec = new IndexSpecification("index");
spec.addIndexColumn(new HColumnDescriptor("col"), "ql1", ValueType.Short, 4);
byte b = 100;
byte[] value1 = Bytes.toBytes(b);
Put p = new Put("row".getBytes());
p.add("col".getBytes(), "ql1".getBytes(), value1);
Put indexPut = IndexUtils.prepareIndexPut(p, spec, region);
byte[] expectedResult = Bytes.toBytes(b);
expectedResult[0] ^= 1 << 7;
byte[] actualResult = new byte[2];
byte[] indexRowKey = indexPut.getRow();
System.arraycopy(indexRowKey, 22, actualResult, 0, actualResult.length);
Assert.assertTrue(Bytes.equals(expectedResult, actualResult));
}
示例13: testIndexPutWithCharDataTypes
import org.apache.hadoop.hbase.index.ColumnQualifier.ValueType; //导入依赖的package包/类
@Test(timeout = 180000)
public void testIndexPutWithCharDataTypes() throws IOException {
Path basedir = new Path(DIR + "TestIndexPut");
Configuration conf = TEST_UTIL.getConfiguration();
HTableDescriptor htd = new HTableDescriptor("testIndexPutWithNegativeIntDataTypes");
HRegionInfo info = new HRegionInfo(htd.getTableName(), "ABC".getBytes(), "BBB".getBytes(), false);
// HLog hlog = UTIL.getMiniHBaseCluster().getRegionServer(0).getWAL();
HRegion region = HRegion.createHRegion(info, basedir, conf, htd);
IndexSpecification spec = new IndexSpecification("index");
spec.addIndexColumn(new HColumnDescriptor("col"), "ql1", ValueType.Char, 4);
char c = 'A';
byte[] value1 = new byte[2];
value1[1] = (byte) c;
c >>= 8;
value1[0] = (byte) c;
Put p = new Put("row".getBytes());
p.add("col".getBytes(), "ql1".getBytes(), value1);
Put indexPut = IndexUtils.prepareIndexPut(p, spec, region);
byte[] actualResult = new byte[2];
byte[] indexRowKey = indexPut.getRow();
System.arraycopy(indexRowKey, 22, actualResult, 0, actualResult.length);
Assert.assertTrue(Bytes.equals(value1, actualResult));
}
示例14: testSingleIndexExpressionWithOneEqualsExpression
import org.apache.hadoop.hbase.index.ColumnQualifier.ValueType; //导入依赖的package包/类
@Test
public void testSingleIndexExpressionWithOneEqualsExpression() throws Exception {
String indexName = "idx1";
SingleIndexExpression singleIndexExpression = new SingleIndexExpression(indexName);
byte[] value = "1".getBytes();
Column column = new Column(FAMILY1, QUALIFIER1);
EqualsExpression equalsExpression = new EqualsExpression(column, value);
singleIndexExpression.addEqualsExpression(equalsExpression);
Scan scan = new Scan();
scan.setAttribute(Constants.INDEX_EXPRESSION, IndexUtils.toBytes(singleIndexExpression));
Filter filter = new SingleColumnValueFilter(FAMILY1, QUALIFIER1, CompareOp.EQUAL, value);
scan.setFilter(filter);
ScanFilterEvaluator evaluator = new ScanFilterEvaluator();
List<IndexSpecification> indices = new ArrayList<IndexSpecification>();
IndexSpecification index = new IndexSpecification(indexName);
HColumnDescriptor colDesc = new HColumnDescriptor(FAMILY1);
index.addIndexColumn(colDesc, COL1, ValueType.String, 10);
indices.add(index);
HRegion region =
initHRegion(tableName.getBytes(), null, null,
"testSingleIndexExpressionWithOneEqualsExpression", TEST_UTIL.getConfiguration(), FAMILY1);
IndexRegionScanner scanner = evaluator.evaluate(scan, indices, new byte[0], region, tableName);
// TODO add assertions
}
示例15: testNoIndexExpression
import org.apache.hadoop.hbase.index.ColumnQualifier.ValueType; //导入依赖的package包/类
@Test
public void testNoIndexExpression() throws Exception {
IndexExpression exp = new NoIndexExpression();
Scan scan = new Scan();
scan.setAttribute(Constants.INDEX_EXPRESSION, IndexUtils.toBytes(exp));
byte[] value1 = Bytes.toBytes("asdf");
scan.setFilter(new SingleColumnValueFilter(FAMILY1, QUALIFIER1, CompareOp.EQUAL, value1));
List<IndexSpecification> indices = new ArrayList<IndexSpecification>();
IndexSpecification is1 = new IndexSpecification("idx1");
HColumnDescriptor colDesc = new HColumnDescriptor(FAMILY1);
is1.addIndexColumn(colDesc, COL1, ValueType.String, 15);
indices.add(is1);
ScanFilterEvaluator evaluator = new ScanFilterEvaluator();
HRegion region =
initHRegion(tableName.getBytes(), null, null, "testNoIndexExpression",
TEST_UTIL.getConfiguration(), FAMILY1);
IndexRegionScanner scanner = evaluator.evaluate(scan, indices, new byte[0], region, tableName);
assertNull(scanner);
}