本文整理汇总了Java中org.apache.hadoop.hbase.protobuf.generated.HBaseProtos类的典型用法代码示例。如果您正苦于以下问题:Java HBaseProtos类的具体用法?Java HBaseProtos怎么用?Java HBaseProtos使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
HBaseProtos类属于org.apache.hadoop.hbase.protobuf.generated包,在下文中一共展示了HBaseProtos类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: refreshNodes
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; //导入依赖的package包/类
private void refreshNodes(List<ZKUtil.NodeAndData> nodes) throws IOException {
for (ZKUtil.NodeAndData n : nodes) {
if (n.isEmpty()) continue;
String path = n.getNode();
String namespace = ZKUtil.getNodeName(path);
byte[] nodeData = n.getData();
if (LOG.isDebugEnabled()) {
LOG.debug("Updating namespace cache from node "+namespace+" with data: "+
Bytes.toStringBinary(nodeData));
}
NamespaceDescriptor ns =
ProtobufUtil.toNamespaceDescriptor(
HBaseProtos.NamespaceDescriptor.parseFrom(nodeData));
cache.put(ns.getName(), ns);
}
}
示例2: hasMinimumVersion
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; //导入依赖的package包/类
public static boolean hasMinimumVersion(HBaseProtos.VersionInfo versionInfo,
int major,
int minor) {
if (versionInfo != null) {
try {
String[] components = versionInfo.getVersion().split("\\.");
int clientMajor = components.length > 0 ? Integer.parseInt(components[0]) : 0;
if (clientMajor != major) {
return clientMajor > major;
}
int clientMinor = components.length > 1 ? Integer.parseInt(components[1]) : 0;
return clientMinor >= minor;
} catch (NumberFormatException e) {
return false;
}
}
return false;
}
示例3: deserializeStateData
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; //导入依赖的package包/类
@Override
public void deserializeStateData(final InputStream stream) throws IOException {
super.deserializeStateData(stream);
MasterProcedureProtos.CreateTableStateData state =
MasterProcedureProtos.CreateTableStateData.parseDelimitedFrom(stream);
user = MasterProcedureUtil.toUserInfo(state.getUserInfo());
hTableDescriptor = HTableDescriptor.convert(state.getTableSchema());
if (state.getRegionInfoCount() == 0) {
newRegions = null;
} else {
newRegions = new ArrayList<HRegionInfo>(state.getRegionInfoCount());
for (HBaseProtos.RegionInfo hri: state.getRegionInfoList()) {
newRegions.add(HRegionInfo.convert(hri));
}
}
}
示例4: deserializeStateData
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; //导入依赖的package包/类
@Override
public void deserializeStateData(final InputStream stream) throws IOException {
super.deserializeStateData(stream);
MasterProcedureProtos.TruncateTableStateData state =
MasterProcedureProtos.TruncateTableStateData.parseDelimitedFrom(stream);
user = MasterProcedureUtil.toUserInfo(state.getUserInfo());
if (state.hasTableSchema()) {
hTableDescriptor = HTableDescriptor.convert(state.getTableSchema());
tableName = hTableDescriptor.getTableName();
} else {
tableName = ProtobufUtil.toTableName(state.getTableName());
}
preserveSplits = state.getPreserveSplits();
if (state.getRegionInfoCount() == 0) {
regions = null;
} else {
regions = new ArrayList<HRegionInfo>(state.getRegionInfoCount());
for (HBaseProtos.RegionInfo hri: state.getRegionInfoList()) {
regions.add(HRegionInfo.convert(hri));
}
}
}
示例5: deserializeStateData
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; //导入依赖的package包/类
@Override
public void deserializeStateData(final InputStream stream) throws IOException {
super.deserializeStateData(stream);
MasterProcedureProtos.DeleteTableStateData state =
MasterProcedureProtos.DeleteTableStateData.parseDelimitedFrom(stream);
user = MasterProcedureUtil.toUserInfo(state.getUserInfo());
tableName = ProtobufUtil.toTableName(state.getTableName());
if (state.getRegionInfoCount() == 0) {
regions = null;
} else {
regions = new ArrayList<HRegionInfo>(state.getRegionInfoCount());
for (HBaseProtos.RegionInfo hri: state.getRegionInfoList()) {
regions.add(HRegionInfo.convert(hri));
}
}
}
示例6: list
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; //导入依赖的package包/类
public synchronized NavigableSet<NamespaceDescriptor> list() throws IOException {
NavigableSet<NamespaceDescriptor> ret =
Sets.newTreeSet(NamespaceDescriptor.NAMESPACE_DESCRIPTOR_COMPARATOR);
ResultScanner scanner = getNamespaceTable().getScanner(HTableDescriptor.NAMESPACE_FAMILY_INFO_BYTES);
try {
for(Result r : scanner) {
byte[] val = CellUtil.cloneValue(r.getColumnLatestCell(
HTableDescriptor.NAMESPACE_FAMILY_INFO_BYTES,
HTableDescriptor.NAMESPACE_COL_DESC_BYTES));
ret.add(ProtobufUtil.toNamespaceDescriptor(
HBaseProtos.NamespaceDescriptor.parseFrom(val)));
}
} finally {
scanner.close();
}
return ret;
}
示例7: doWork
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; //导入依赖的package包/类
@Override
protected int doWork() throws Exception {
Connection connection = null;
Admin admin = null;
try {
connection = ConnectionFactory.createConnection(getConf());
admin = connection.getAdmin();
HBaseProtos.SnapshotDescription.Type type = HBaseProtos.SnapshotDescription.Type.FLUSH;
if (snapshotType != null) {
type = HBaseProtos.SnapshotDescription.Type.valueOf(snapshotName.toUpperCase());
}
admin.snapshot(snapshotName, TableName.valueOf(tableName), type);
} catch (Exception e) {
return -1;
} finally {
if (admin != null) {
admin.close();
}
if (connection != null) {
connection.close();
}
}
return 0;
}
示例8: createServerLoadProto
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; //导入依赖的package包/类
private ClusterStatusProtos.ServerLoad createServerLoadProto() {
HBaseProtos.RegionSpecifier rSpecOne =
HBaseProtos.RegionSpecifier.newBuilder()
.setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.ENCODED_REGION_NAME)
.setValue(ByteString.copyFromUtf8("ASDFGQWERT")).build();
HBaseProtos.RegionSpecifier rSpecTwo =
HBaseProtos.RegionSpecifier.newBuilder()
.setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.ENCODED_REGION_NAME)
.setValue(ByteString.copyFromUtf8("QWERTYUIOP")).build();
ClusterStatusProtos.RegionLoad rlOne =
ClusterStatusProtos.RegionLoad.newBuilder().setRegionSpecifier(rSpecOne).setStores(10)
.setStorefiles(101).setStoreUncompressedSizeMB(106).setStorefileSizeMB(520)
.setStorefileIndexSizeMB(42).setRootIndexSizeKB(201).setReadRequestsCount(Integer.MAX_VALUE).setWriteRequestsCount(Integer.MAX_VALUE).build();
ClusterStatusProtos.RegionLoad rlTwo =
ClusterStatusProtos.RegionLoad.newBuilder().setRegionSpecifier(rSpecTwo).setStores(3)
.setStorefiles(13).setStoreUncompressedSizeMB(23).setStorefileSizeMB(300)
.setStorefileIndexSizeMB(40).setRootIndexSizeKB(303).setReadRequestsCount(Integer.MAX_VALUE).setWriteRequestsCount(Integer.MAX_VALUE).build();
ClusterStatusProtos.ServerLoad sl =
ClusterStatusProtos.ServerLoad.newBuilder().addRegionLoads(rlOne).
addRegionLoads(rlTwo).build();
return sl;
}
示例9: toScanMetrics
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; //导入依赖的package包/类
public static ScanMetrics toScanMetrics(final byte[] bytes) {
Parser<MapReduceProtos.ScanMetrics> parser = MapReduceProtos.ScanMetrics.PARSER;
MapReduceProtos.ScanMetrics pScanMetrics = null;
try {
pScanMetrics = parser.parseFrom(bytes);
} catch (InvalidProtocolBufferException e) {
//Ignored there are just no key values to add.
}
ScanMetrics scanMetrics = new ScanMetrics();
if (pScanMetrics != null) {
for (HBaseProtos.NameInt64Pair pair : pScanMetrics.getMetricsList()) {
if (pair.hasName() && pair.hasValue()) {
scanMetrics.setCounter(pair.getName(), pair.getValue());
}
}
}
return scanMetrics;
}
示例10: toTimeUnit
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; //导入依赖的package包/类
/**
* Convert a protocol buffer TimeUnit to a client TimeUnit
* @param proto
* @return the converted client TimeUnit
*/
public static TimeUnit toTimeUnit(final HBaseProtos.TimeUnit proto) {
switch (proto) {
case NANOSECONDS:
return TimeUnit.NANOSECONDS;
case MICROSECONDS:
return TimeUnit.MICROSECONDS;
case MILLISECONDS:
return TimeUnit.MILLISECONDS;
case SECONDS:
return TimeUnit.SECONDS;
case MINUTES:
return TimeUnit.MINUTES;
case HOURS:
return TimeUnit.HOURS;
case DAYS:
return TimeUnit.DAYS;
default:
throw new RuntimeException("Invalid TimeUnit " + proto);
}
}
示例11: toProtoTimeUnit
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; //导入依赖的package包/类
/**
* Convert a client TimeUnit to a protocol buffer TimeUnit
* @param timeUnit
* @return the converted protocol buffer TimeUnit
*/
public static HBaseProtos.TimeUnit toProtoTimeUnit(final TimeUnit timeUnit) {
switch (timeUnit) {
case NANOSECONDS:
return HBaseProtos.TimeUnit.NANOSECONDS;
case MICROSECONDS:
return HBaseProtos.TimeUnit.MICROSECONDS;
case MILLISECONDS:
return HBaseProtos.TimeUnit.MILLISECONDS;
case SECONDS:
return HBaseProtos.TimeUnit.SECONDS;
case MINUTES:
return HBaseProtos.TimeUnit.MINUTES;
case HOURS:
return HBaseProtos.TimeUnit.HOURS;
case DAYS:
return HBaseProtos.TimeUnit.DAYS;
default:
throw new RuntimeException("Invalid TimeUnit " + timeUnit);
}
}
示例12: listNamespaceDescriptors
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; //导入依赖的package包/类
/**
* List available namespace descriptors
* @return List of descriptors
* @throws IOException
*/
@Override
public NamespaceDescriptor[] listNamespaceDescriptors() throws IOException {
return
executeCallable(new MasterCallable<NamespaceDescriptor[]>(getConnection()) {
@Override
public NamespaceDescriptor[] call(int callTimeout) throws Exception {
PayloadCarryingRpcController controller = rpcControllerFactory.newController();
controller.setCallTimeout(callTimeout);
List<HBaseProtos.NamespaceDescriptor> list =
master.listNamespaceDescriptors(controller,
ListNamespaceDescriptorsRequest.newBuilder().build())
.getNamespaceDescriptorList();
NamespaceDescriptor[] res = new NamespaceDescriptor[list.size()];
for(int i = 0; i < list.size(); i++) {
res[i] = ProtobufUtil.toNamespaceDescriptor(list.get(i));
}
return res;
}
});
}
示例13: listTableNamesByNamespace
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; //导入依赖的package包/类
/**
* Get list of table names by namespace
* @param name namespace name
* @return The list of table names in the namespace
* @throws IOException
*/
@Override
public TableName[] listTableNamesByNamespace(final String name) throws IOException {
return
executeCallable(new MasterCallable<TableName[]>(getConnection()) {
@Override
public TableName[] call(int callTimeout) throws Exception {
PayloadCarryingRpcController controller = rpcControllerFactory.newController();
controller.setCallTimeout(callTimeout);
List<HBaseProtos.TableName> tableNames =
master.listTableNamesByNamespace(controller, ListTableNamesByNamespaceRequest.
newBuilder().setNamespaceName(name).build())
.getTableNameList();
TableName[] result = new TableName[tableNames.size()];
for (int i = 0; i < tableNames.size(); i++) {
result[i] = ProtobufUtil.toTableName(tableNames.get(i));
}
return result;
}
});
}
示例14: assertSnapshotRequestIsValid
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; //导入依赖的package包/类
/**
* Check to make sure that the description of the snapshot requested is valid
* @param snapshot description of the snapshot
* @throws IllegalArgumentException if the name of the snapshot or the name of the table to
* snapshot are not valid names.
*/
public static void assertSnapshotRequestIsValid(HBaseProtos.SnapshotDescription snapshot)
throws IllegalArgumentException {
// make sure the snapshot name is valid
TableName.isLegalTableQualifierName(Bytes.toBytes(snapshot.getName()), true);
if(snapshot.hasTable()) {
// make sure the table name is valid, this will implicitly check validity
TableName tableName = TableName.valueOf(snapshot.getTable());
if (tableName.isSystemTable()) {
throw new IllegalArgumentException("System table snapshots are not allowed");
}
}
}
示例15: convert
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; //导入依赖的package包/类
FilterProtos.SingleColumnValueFilter convert() {
FilterProtos.SingleColumnValueFilter.Builder builder =
FilterProtos.SingleColumnValueFilter.newBuilder();
if (this.columnFamily != null) {
builder.setColumnFamily(ByteStringer.wrap(this.columnFamily));
}
if (this.columnQualifier != null) {
builder.setColumnQualifier(ByteStringer.wrap(this.columnQualifier));
}
HBaseProtos.CompareType compareOp = CompareType.valueOf(this.compareOp.name());
builder.setCompareOp(compareOp);
builder.setComparator(ProtobufUtil.toComparator(this.comparator));
builder.setFilterIfMissing(this.filterIfMissing);
builder.setLatestVersionOnly(this.latestVersionOnly);
return builder.build();
}