本文整理汇总了Java中org.apache.hadoop.hbase.exceptions.DeserializationException类的典型用法代码示例。如果您正苦于以下问题:Java DeserializationException类的具体用法?Java DeserializationException怎么用?Java DeserializationException使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
DeserializationException类属于org.apache.hadoop.hbase.exceptions包,在下文中一共展示了DeserializationException类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: refreshLabelsCache
import org.apache.hadoop.hbase.exceptions.DeserializationException; //导入依赖的package包/类
public void refreshLabelsCache(byte[] data) throws IOException {
List<VisibilityLabel> visibilityLabels = null;
try {
visibilityLabels = VisibilityUtils.readLabelsFromZKData(data);
} catch (DeserializationException dse) {
throw new IOException(dse);
}
this.lock.writeLock().lock();
try {
labels.clear();
ordinalVsLabels.clear();
for (VisibilityLabel visLabel : visibilityLabels) {
String label = Bytes.toString(visLabel.getLabel().toByteArray());
labels.put(label, visLabel.getOrdinal());
ordinalVsLabels.put(visLabel.getOrdinal(), label);
}
} finally {
this.lock.writeLock().unlock();
}
}
示例2: refreshTableCacheFromWritable
import org.apache.hadoop.hbase.exceptions.DeserializationException; //导入依赖的package包/类
public void refreshTableCacheFromWritable(TableName table,
byte[] data) throws IOException {
if (data != null && data.length > 0) {
ListMultimap<String,TablePermission> perms;
try {
perms = AccessControlLists.readPermissions(data, conf);
} catch (DeserializationException e) {
throw new IOException(e);
}
if (perms != null) {
if (Bytes.equals(table.getName(), AccessControlLists.ACL_GLOBAL_NAME)) {
updateGlobalCache(perms);
} else {
updateTableCache(table, perms);
}
}
} else {
LOG.debug("Skipping permission cache refresh because writable data is empty");
}
}
示例3: parseFrom
import org.apache.hadoop.hbase.exceptions.DeserializationException; //导入依赖的package包/类
/**
* @param pbBytes A pb serialized {@link MultipleColumnPrefixFilter} instance
* @return An instance of {@link MultipleColumnPrefixFilter} made from <code>bytes</code>
* @throws DeserializationException
* @see #toByteArray
*/
public static MultipleColumnPrefixFilter parseFrom(final byte [] pbBytes)
throws DeserializationException {
FilterProtos.MultipleColumnPrefixFilter proto;
try {
proto = FilterProtos.MultipleColumnPrefixFilter.parseFrom(pbBytes);
} catch (InvalidProtocolBufferException e) {
throw new DeserializationException(e);
}
int numPrefixes = proto.getSortedPrefixesCount();
byte [][] prefixes = new byte[numPrefixes][];
for (int i = 0; i < numPrefixes; ++i) {
prefixes[i] = proto.getSortedPrefixes(i).toByteArray();
}
return new MultipleColumnPrefixFilter(prefixes);
}
示例4: readTableDescriptor
import org.apache.hadoop.hbase.exceptions.DeserializationException; //导入依赖的package包/类
private static HTableDescriptor readTableDescriptor(FileSystem fs,
FileStatus status) throws IOException {
int len = Ints.checkedCast(status.getLen());
byte [] content = new byte[len];
FSDataInputStream fsDataInputStream = fs.open(status.getPath());
try {
fsDataInputStream.readFully(content);
} finally {
fsDataInputStream.close();
}
HTableDescriptor htd = null;
try {
htd = HTableDescriptor.parseFrom(content);
} catch (DeserializationException e) {
throw new IOException("content=" + Bytes.toShort(content), e);
}
return htd;
}
示例5: verifyRegionState
import org.apache.hadoop.hbase.exceptions.DeserializationException; //导入依赖的package包/类
/**
* Verifies that the specified region is in the specified state in ZooKeeper.
* <p>
* Returns true if region is in transition and in the specified state in
* ZooKeeper. Returns false if the region does not exist in ZK or is in
* a different state.
* <p>
* Method synchronizes() with ZK so will yield an up-to-date result but is
* a slow read.
* @param zkw
* @param region
* @param expectedState
* @return true if region exists and is in expected state
* @throws DeserializationException
*/
static boolean verifyRegionState(ZooKeeperWatcher zkw, HRegionInfo region, EventType expectedState)
throws KeeperException, DeserializationException {
String encoded = region.getEncodedName();
String node = ZKAssign.getNodeName(zkw, encoded);
zkw.sync(node);
// Read existing data of the node
byte [] existingBytes = null;
try {
existingBytes = ZKUtil.getDataAndWatch(zkw, node);
} catch (KeeperException.NoNodeException nne) {
return false;
} catch (KeeperException e) {
throw e;
}
if (existingBytes == null) return false;
RegionTransition rt = RegionTransition.parseFrom(existingBytes);
return rt.getEventType().equals(expectedState);
}
示例6: testVersion
import org.apache.hadoop.hbase.exceptions.DeserializationException; //导入依赖的package包/类
@Test
public void testVersion() throws DeserializationException, IOException {
HBaseTestingUtility htu = new HBaseTestingUtility();
final FileSystem fs = htu.getTestFileSystem();
final Path rootdir = htu.getDataTestDir();
assertNull(FSUtils.getVersion(fs, rootdir));
// Write out old format version file. See if we can read it in and convert.
Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME);
FSDataOutputStream s = fs.create(versionFile);
final String version = HConstants.FILE_SYSTEM_VERSION;
s.writeUTF(version);
s.close();
assertTrue(fs.exists(versionFile));
FileStatus [] status = fs.listStatus(versionFile);
assertNotNull(status);
assertTrue(status.length > 0);
String newVersion = FSUtils.getVersion(fs, rootdir);
assertEquals(version.length(), newVersion.length());
assertEquals(version, newVersion);
// File will have been converted. Exercise the pb format
assertEquals(version, FSUtils.getVersion(fs, rootdir));
FSUtils.checkVersion(fs, rootdir, true);
}
示例7: OpenRegion
import org.apache.hadoop.hbase.exceptions.DeserializationException; //导入依赖的package包/类
private void OpenRegion(Server server, RegionServerServices rss,
HTableDescriptor htd, HRegionInfo hri, OpenRegionCoordination coordination)
throws IOException, NodeExistsException, KeeperException, DeserializationException {
// Create it OFFLINE node, which is what Master set before sending OPEN RPC
ZKAssign.createNodeOffline(server.getZooKeeper(), hri, server.getServerName());
OpenRegionCoordination.OpenRegionDetails ord =
coordination.getDetailsForNonCoordinatedOpening();
OpenRegionHandler openHandler =
new OpenRegionHandler(server, rss, hri, htd, -1, coordination, ord);
rss.getRegionsInTransitionInRS().put(hri.getEncodedNameAsBytes(), Boolean.TRUE);
openHandler.process();
// This parse is not used?
RegionTransition.parseFrom(ZKAssign.getData(server.getZooKeeper(), hri.getEncodedName()));
// delete the node, which is what Master do after the region is opened
ZKAssign.deleteNode(server.getZooKeeper(), hri.getEncodedName(),
EventType.RS_ZK_REGION_OPENED, server.getServerName());
}
示例8: parseFrom
import org.apache.hadoop.hbase.exceptions.DeserializationException; //导入依赖的package包/类
/**
* @param bytes A pb serialized {@link ClusterId} instance with pb magic prefix
* @return An instance of {@link ClusterId} made from <code>bytes</code>
* @throws DeserializationException
* @see #toByteArray()
*/
public static ClusterId parseFrom(final byte [] bytes) throws DeserializationException {
if (ProtobufUtil.isPBMagicPrefix(bytes)) {
int pblen = ProtobufUtil.lengthOfPBMagic();
ClusterIdProtos.ClusterId.Builder builder = ClusterIdProtos.ClusterId.newBuilder();
ClusterIdProtos.ClusterId cid = null;
try {
ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen);
cid = builder.build();
} catch (IOException e) {
throw new DeserializationException(e);
}
return convert(cid);
} else {
// Presume it was written out this way, the old way.
return new ClusterId(Bytes.toString(bytes));
}
}
示例9: parseStateFrom
import org.apache.hadoop.hbase.exceptions.DeserializationException; //导入依赖的package包/类
/**
* @param bytes Content of a state znode.
* @return State parsed from the passed bytes.
* @throws DeserializationException
*/
private static ZooKeeperProtos.ReplicationState.State parseStateFrom(final byte[] bytes)
throws DeserializationException {
ProtobufUtil.expectPBMagicPrefix(bytes);
int pblen = ProtobufUtil.lengthOfPBMagic();
ZooKeeperProtos.ReplicationState.Builder builder =
ZooKeeperProtos.ReplicationState.newBuilder();
ZooKeeperProtos.ReplicationState state;
try {
ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen);
state = builder.build();
return state.getState();
} catch (IOException e) {
throw new DeserializationException(e);
}
}
示例10: parsePeerFrom
import org.apache.hadoop.hbase.exceptions.DeserializationException; //导入依赖的package包/类
/**
* @param bytes Content of a peer znode.
* @return ClusterKey parsed from the passed bytes.
* @throws DeserializationException
*/
private static ReplicationPeerConfig parsePeerFrom(final byte[] bytes)
throws DeserializationException {
if (ProtobufUtil.isPBMagicPrefix(bytes)) {
int pblen = ProtobufUtil.lengthOfPBMagic();
ZooKeeperProtos.ReplicationPeer.Builder builder =
ZooKeeperProtos.ReplicationPeer.newBuilder();
ZooKeeperProtos.ReplicationPeer peer;
try {
ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen);
peer = builder.build();
} catch (IOException e) {
throw new DeserializationException(e);
}
return convert(peer);
} else {
if (bytes.length > 0) {
return new ReplicationPeerConfig().setClusterKey(Bytes.toString(bytes));
}
return new ReplicationPeerConfig().setClusterKey("");
}
}
示例11: parseFrom
import org.apache.hadoop.hbase.exceptions.DeserializationException; //导入依赖的package包/类
/**
* @param bytes A pb serialized {@link HTableDescriptor} instance with pb magic prefix
* @return An instance of {@link HTableDescriptor} made from <code>bytes</code>
* @throws DeserializationException
* @throws IOException
* @see #toByteArray()
*/
public static HTableDescriptor parseFrom(final byte [] bytes)
throws DeserializationException, IOException {
if (!ProtobufUtil.isPBMagicPrefix(bytes)) {
return (HTableDescriptor)Writables.getWritable(bytes, new HTableDescriptor());
}
int pblen = ProtobufUtil.lengthOfPBMagic();
TableSchema.Builder builder = TableSchema.newBuilder();
TableSchema ts;
try {
ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen);
ts = builder.build();
} catch (IOException e) {
throw new DeserializationException(e);
}
return convert(ts);
}
示例12: parseWALPositionFrom
import org.apache.hadoop.hbase.exceptions.DeserializationException; //导入依赖的package包/类
/**
* @param bytes - Content of a WAL position znode.
* @return long - The current WAL position.
* @throws DeserializationException
*/
public static long parseWALPositionFrom(final byte[] bytes) throws DeserializationException {
if (bytes == null) {
throw new DeserializationException("Unable to parse null WAL position.");
}
if (ProtobufUtil.isPBMagicPrefix(bytes)) {
int pblen = ProtobufUtil.lengthOfPBMagic();
ZooKeeperProtos.ReplicationHLogPosition.Builder builder =
ZooKeeperProtos.ReplicationHLogPosition.newBuilder();
ZooKeeperProtos.ReplicationHLogPosition position;
try {
ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen);
position = builder.build();
} catch (IOException e) {
throw new DeserializationException(e);
}
return position.getPosition();
} else {
if (bytes.length > 0) {
return Bytes.toLong(bytes);
}
return 0;
}
}
示例13: parseRegionStoreSequenceIds
import org.apache.hadoop.hbase.exceptions.DeserializationException; //导入依赖的package包/类
/**
* @param bytes Content of serialized data of RegionStoreSequenceIds
* @return a RegionStoreSequenceIds object
* @throws DeserializationException
*/
public static RegionStoreSequenceIds parseRegionStoreSequenceIds(final byte[] bytes)
throws DeserializationException {
if (bytes == null || !ProtobufUtil.isPBMagicPrefix(bytes)) {
throw new DeserializationException("Unable to parse RegionStoreSequenceIds.");
}
RegionStoreSequenceIds.Builder regionSequenceIdsBuilder =
ClusterStatusProtos.RegionStoreSequenceIds.newBuilder();
int pblen = ProtobufUtil.lengthOfPBMagic();
RegionStoreSequenceIds storeIds = null;
try {
ProtobufUtil.mergeFrom(regionSequenceIdsBuilder, bytes, pblen, bytes.length - pblen);
storeIds = regionSequenceIdsBuilder.build();
} catch (IOException e) {
throw new DeserializationException(e);
}
return storeIds;
}
示例14: testPb
import org.apache.hadoop.hbase.exceptions.DeserializationException; //导入依赖的package包/类
@Test
public void testPb() throws DeserializationException, IOException {
HTableDescriptor htd = new HTableDescriptor(HTableDescriptor.META_TABLEDESC);
final int v = 123;
htd.setMaxFileSize(v);
htd.setDurability(Durability.ASYNC_WAL);
htd.setReadOnly(true);
htd.setRegionReplication(2);
byte [] bytes = htd.toByteArray();
HTableDescriptor deserializedHtd = HTableDescriptor.parseFrom(bytes);
assertEquals(htd, deserializedHtd);
assertEquals(v, deserializedHtd.getMaxFileSize());
assertTrue(deserializedHtd.isReadOnly());
assertEquals(Durability.ASYNC_WAL, deserializedHtd.getDurability());
assertEquals(deserializedHtd.getRegionReplication(), 2);
}
示例15: parseFrom
import org.apache.hadoop.hbase.exceptions.DeserializationException; //导入依赖的package包/类
/**
* @param pbBytes A pb serialized {@link FirstKeyValueMatchingQualifiersFilter} instance
* @return An instance of {@link FirstKeyValueMatchingQualifiersFilter} made from <code>bytes</code>
* @throws DeserializationException
* @see #toByteArray
*/
public static FirstKeyValueMatchingQualifiersFilter parseFrom(final byte [] pbBytes)
throws DeserializationException {
FilterProtos.FirstKeyValueMatchingQualifiersFilter proto;
try {
proto = FilterProtos.FirstKeyValueMatchingQualifiersFilter.parseFrom(pbBytes);
} catch (InvalidProtocolBufferException e) {
throw new DeserializationException(e);
}
TreeSet<byte []> qualifiers = new TreeSet<byte []>(Bytes.BYTES_COMPARATOR);
for (ByteString qualifier : proto.getQualifiersList()) {
qualifiers.add(qualifier.toByteArray());
}
return new FirstKeyValueMatchingQualifiersFilter(qualifiers);
}