本文整理汇总了Java中org.apache.hadoop.hbase.util.Writables.getBytes方法的典型用法代码示例。如果您正苦于以下问题:Java Writables.getBytes方法的具体用法?Java Writables.getBytes怎么用?Java Writables.getBytes使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.util.Writables
的用法示例。
在下文中一共展示了Writables.getBytes方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: addKeyToZK
import org.apache.hadoop.hbase.util.Writables; //导入方法依赖的package包/类
public void addKeyToZK(AuthenticationKey key) {
String keyZNode = getKeyNode(key.getKeyId());
try {
byte[] keyData = Writables.getBytes(key);
// TODO: is there any point in retrying beyond what ZK client does?
ZKUtil.createSetData(watcher, keyZNode, keyData);
} catch (KeeperException ke) {
LOG.fatal("Unable to synchronize master key "+key.getKeyId()+
" to znode "+keyZNode, ke);
watcher.abort("Unable to synchronize secret key "+
key.getKeyId()+" in zookeeper", ke);
} catch (IOException ioe) {
// this can only happen from an error serializing the key
watcher.abort("Failed serializing key "+key.getKeyId(), ioe);
}
}
示例2: updateKeyInZK
import org.apache.hadoop.hbase.util.Writables; //导入方法依赖的package包/类
public void updateKeyInZK(AuthenticationKey key) {
String keyZNode = getKeyNode(key.getKeyId());
try {
byte[] keyData = Writables.getBytes(key);
try {
ZKUtil.updateExistingNodeData(watcher, keyZNode, keyData, -1);
} catch (KeeperException.NoNodeException ne) {
// node was somehow removed, try adding it back
ZKUtil.createSetData(watcher, keyZNode, keyData);
}
} catch (KeeperException ke) {
LOG.fatal("Unable to update master key "+key.getKeyId()+
" in znode "+keyZNode);
watcher.abort("Unable to synchronize secret key "+
key.getKeyId()+" in zookeeper", ke);
} catch (IOException ioe) {
// this can only happen from an error serializing the key
watcher.abort("Failed serializing key "+key.getKeyId(), ioe);
}
}
示例3: testKeyValue2
import org.apache.hadoop.hbase.util.Writables; //导入方法依赖的package包/类
@Test public void testKeyValue2() throws Exception {
final String name = "testKeyValue2";
byte[] row = name.getBytes();
byte[] fam = "fam".getBytes();
byte[] qf = "qf".getBytes();
long ts = System.currentTimeMillis();
byte[] val = "val".getBytes();
KeyValue kv = new KeyValue(row, fam, qf, ts, val);
byte [] mb = Writables.getBytes(kv);
KeyValue deserializedKv =
(KeyValue)Writables.getWritable(mb, new KeyValue());
assertTrue(Bytes.equals(kv.getBuffer(), deserializedKv.getBuffer()));
assertEquals(kv.getOffset(), deserializedKv.getOffset());
assertEquals(kv.getLength(), deserializedKv.getLength());
}
示例4: testPut2
import org.apache.hadoop.hbase.util.Writables; //导入方法依赖的package包/类
@Test public void testPut2() throws Exception{
byte[] row = "testAbort,,1243116656250".getBytes();
byte[] fam = "historian".getBytes();
byte[] qf1 = "creation".getBytes();
long ts = 9223372036854775807L;
byte[] val = "dont-care".getBytes();
Put put = new Put(row);
put.add(fam, qf1, ts, val);
byte[] sb = Writables.getBytes(put);
Put desPut = (Put)Writables.getWritable(sb, new Put());
assertTrue(Bytes.equals(put.getRow(), desPut.getRow()));
List<KeyValue> list = null;
List<KeyValue> desList = null;
for(Map.Entry<byte[], List<KeyValue>> entry : put.getFamilyMap().entrySet()){
assertTrue(desPut.getFamilyMap().containsKey(entry.getKey()));
list = entry.getValue();
desList = desPut.getFamilyMap().get(entry.getKey());
for(int i=0; i<list.size(); i++){
assertTrue(list.get(i).equals(desList.get(i)));
}
}
}
示例5: testDelete
import org.apache.hadoop.hbase.util.Writables; //导入方法依赖的package包/类
@Test public void testDelete() throws Exception{
byte[] row = "row".getBytes();
byte[] fam = "fam".getBytes();
byte[] qf1 = "qf1".getBytes();
long ts = System.currentTimeMillis();
Delete delete = new Delete(row);
delete.deleteColumn(fam, qf1, ts);
byte[] sb = Writables.getBytes(delete);
Delete desDelete = (Delete)Writables.getWritable(sb, new Delete());
assertTrue(Bytes.equals(delete.getRow(), desDelete.getRow()));
List<KeyValue> list = null;
List<KeyValue> desList = null;
for(Map.Entry<byte[], List<KeyValue>> entry :
delete.getFamilyMap().entrySet()){
assertTrue(desDelete.getFamilyMap().containsKey(entry.getKey()));
list = entry.getValue();
desList = desDelete.getFamilyMap().get(entry.getKey());
for(int i=0; i<list.size(); i++){
assertTrue(list.get(i).equals(desList.get(i)));
}
}
}
示例6: testTableDescriptor
import org.apache.hadoop.hbase.util.Writables; //导入方法依赖的package包/类
@Test public void testTableDescriptor() throws Exception {
final String name = "testTableDescriptor";
HTableDescriptor htd = createTableDescriptor(name);
byte [] mb = Writables.getBytes(htd);
HTableDescriptor deserializedHtd =
(HTableDescriptor)Writables.getWritable(mb, new HTableDescriptor());
assertEquals(htd.getTableName(), deserializedHtd.getTableName());
}
示例7: tickleNodeSplit
import org.apache.hadoop.hbase.util.Writables; //导入方法依赖的package包/类
private static int tickleNodeSplit(ZooKeeperWatcher zkw,
HRegionInfo parent, HRegionInfo a, HRegionInfo b, ServerName serverName,
final int znodeVersion)
throws KeeperException, IOException {
byte [] payload = Writables.getBytes(a, b);
return ZKAssign.transitionNode(zkw, parent, serverName,
EventType.RS_ZK_REGION_SPLIT, EventType.RS_ZK_REGION_SPLIT,
znodeVersion, payload);
}
示例8: testReadFields
import org.apache.hadoop.hbase.util.Writables; //导入方法依赖的package包/类
@Test
public void testReadFields() throws IOException {
HServerAddress hsa1 = new HServerAddress("localhost", 1234);
HServerAddress hsa2 = new HServerAddress("localhost", 1235);
byte [] bytes = Writables.getBytes(hsa1);
HServerAddress deserialized =
(HServerAddress)Writables.getWritable(bytes, new HServerAddress());
assertEquals(hsa1, deserialized);
bytes = Writables.getBytes(hsa2);
deserialized =
(HServerAddress)Writables.getWritable(bytes, new HServerAddress());
assertNotSame(hsa1, deserialized);
}
示例9: testMigrateHRegionInfoFromVersion0toVersion1
import org.apache.hadoop.hbase.util.Writables; //导入方法依赖的package包/类
@Test
public void testMigrateHRegionInfoFromVersion0toVersion1()
throws IOException {
HTableDescriptor htd =
getHTableDescriptor("testMigrateHRegionInfoFromVersion0toVersion1");
HRegionInfo090x ninety =
new HRegionInfo090x(htd, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
byte [] bytes = Writables.getBytes(ninety);
// Now deserialize into an HRegionInfo
HRegionInfo hri = Writables.getHRegionInfo(bytes);
Assert.assertEquals(hri.getTableNameAsString(),
ninety.getTableDesc().getNameAsString());
Assert.assertEquals(HRegionInfo.VERSION, hri.getVersion());
}
示例10: testReadFields
import org.apache.hadoop.hbase.util.Writables; //导入方法依赖的package包/类
@Test
public void testReadFields() throws IOException {
HServerAddress hsa1 = new HServerAddress("localhost", 1234);
HServerInfo hsi1 = new HServerInfo(hsa1, 1L, 5678);
HServerAddress hsa2 = new HServerAddress("localhost", 1235);
HServerInfo hsi2 = new HServerInfo(hsa2, 1L, 5678);
byte [] bytes = Writables.getBytes(hsi1);
HServerInfo deserialized =
(HServerInfo)Writables.getWritable(bytes, new HServerInfo());
assertEquals(hsi1, deserialized);
bytes = Writables.getBytes(hsi2);
deserialized = (HServerInfo)Writables.getWritable(bytes, new HServerInfo());
assertNotSame(hsa1, deserialized);
}
示例11: testHServerLoadVersioning
import org.apache.hadoop.hbase.util.Writables; //导入方法依赖的package包/类
@Test
public void testHServerLoadVersioning() throws IOException {
Set<String> cps = new HashSet<String>(0);
Map<byte [], RegionLoad> regions = new TreeMap<byte [], RegionLoad>(Bytes.BYTES_COMPARATOR);
regions.put(HConstants.META_TABLE_NAME,
new HServerLoad092.RegionLoad(HConstants.META_TABLE_NAME, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, cps));
HServerLoad092 hsl092 = new HServerLoad092(0, 0, 0, 0, regions, cps);
byte [] hsl092bytes = Writables.getBytes(hsl092);
HServerLoad hsl = (HServerLoad)Writables.getWritable(hsl092bytes, new HServerLoad());
// TO BE CONTINUED
}
示例12: testKeyValue
import org.apache.hadoop.hbase.util.Writables; //导入方法依赖的package包/类
@Test public void testKeyValue() throws Exception {
final String name = "testKeyValue";
byte [] row = Bytes.toBytes(name);
byte [] family = Bytes.toBytes(name);
byte [] qualifier = Bytes.toBytes(name);
KeyValue original = new KeyValue(row, family, qualifier);
byte [] bytes = Writables.getBytes(original);
KeyValue newone = (KeyValue)Writables.getWritable(bytes, new KeyValue());
assertTrue(KeyValue.COMPARATOR.compare(original, newone) == 0);
}
示例13: testTableDescriptor
import org.apache.hadoop.hbase.util.Writables; //导入方法依赖的package包/类
@Test public void testTableDescriptor() throws Exception {
final String name = "testTableDescriptor";
HTableDescriptor htd = createTableDescriptor(name);
byte [] mb = Writables.getBytes(htd);
HTableDescriptor deserializedHtd =
(HTableDescriptor)Writables.getWritable(mb, new HTableDescriptor());
assertEquals(htd.getNameAsString(), deserializedHtd.getNameAsString());
}
示例14: testRegionInfo
import org.apache.hadoop.hbase.util.Writables; //导入方法依赖的package包/类
/**
* Test RegionInfo serialization
* @throws Exception
*/
@Test public void testRegionInfo() throws Exception {
HRegionInfo hri = createRandomRegion("testRegionInfo");
byte [] hrib = Writables.getBytes(hri);
HRegionInfo deserializedHri =
(HRegionInfo)Writables.getWritable(hrib, new HRegionInfo());
assertEquals(hri.getEncodedName(), deserializedHri.getEncodedName());
//assertEquals(hri.getTableDesc().getFamilies().size(),
// deserializedHri.getTableDesc().getFamilies().size());
}
示例15: testRegionInfos
import org.apache.hadoop.hbase.util.Writables; //导入方法依赖的package包/类
@Test public void testRegionInfos() throws Exception {
HRegionInfo hri = createRandomRegion("testRegionInfos");
byte [] hrib = Writables.getBytes(hri);
byte [] triple = new byte [3 * hrib.length];
System.arraycopy(hrib, 0, triple, 0, hrib.length);
System.arraycopy(hrib, 0, triple, hrib.length, hrib.length);
System.arraycopy(hrib, 0, triple, hrib.length * 2, hrib.length);
List<HRegionInfo> regions = Writables.getHRegionInfos(triple, 0, triple.length);
assertTrue(regions.size() == 3);
assertTrue(regions.get(0).equals(regions.get(1)));
assertTrue(regions.get(0).equals(regions.get(2)));
}