当前位置: 首页>>代码示例>>Java>>正文


Java HTableDescriptor.convert方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.HTableDescriptor.convert方法的典型用法代码示例。如果您正苦于以下问题:Java HTableDescriptor.convert方法的具体用法?Java HTableDescriptor.convert怎么用?Java HTableDescriptor.convert使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.HTableDescriptor的用法示例。


在下文中一共展示了HTableDescriptor.convert方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: deserializeStateData

import org.apache.hadoop.hbase.HTableDescriptor; //导入方法依赖的package包/类
@Override
public void deserializeStateData(final InputStream stream) throws IOException {
  super.deserializeStateData(stream);

  MasterProcedureProtos.CreateTableStateData state =
    MasterProcedureProtos.CreateTableStateData.parseDelimitedFrom(stream);
  user = MasterProcedureUtil.toUserInfo(state.getUserInfo());
  hTableDescriptor = HTableDescriptor.convert(state.getTableSchema());
  if (state.getRegionInfoCount() == 0) {
    newRegions = null;
  } else {
    newRegions = new ArrayList<HRegionInfo>(state.getRegionInfoCount());
    for (HBaseProtos.RegionInfo hri: state.getRegionInfoList()) {
      newRegions.add(HRegionInfo.convert(hri));
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:18,代码来源:CreateTableProcedure.java

示例2: deserializeStateData

import org.apache.hadoop.hbase.HTableDescriptor; //导入方法依赖的package包/类
@Override
public void deserializeStateData(final InputStream stream) throws IOException {
  super.deserializeStateData(stream);

  MasterProcedureProtos.TruncateTableStateData state =
    MasterProcedureProtos.TruncateTableStateData.parseDelimitedFrom(stream);
  user = MasterProcedureUtil.toUserInfo(state.getUserInfo());
  if (state.hasTableSchema()) {
    hTableDescriptor = HTableDescriptor.convert(state.getTableSchema());
    tableName = hTableDescriptor.getTableName();
  } else {
    tableName = ProtobufUtil.toTableName(state.getTableName());
  }
  preserveSplits = state.getPreserveSplits();
  if (state.getRegionInfoCount() == 0) {
    regions = null;
  } else {
    regions = new ArrayList<HRegionInfo>(state.getRegionInfoCount());
    for (HBaseProtos.RegionInfo hri: state.getRegionInfoList()) {
      regions.add(HRegionInfo.convert(hri));
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:24,代码来源:TruncateTableProcedure.java

示例3: getHTableDescriptor

import org.apache.hadoop.hbase.HTableDescriptor; //导入方法依赖的package包/类
/**
 * Connects to the master to get the table descriptor.
 * @param tableName table name
 * @throws IOException if the connection to master fails or if the table
 *  is not found.
 * @deprecated Use {@link Admin#getTableDescriptor(TableName)} instead
 */
@Deprecated
@Override
public HTableDescriptor getHTableDescriptor(final TableName tableName)
throws IOException {
  if (tableName == null) return null;
  MasterKeepAliveConnection master = getKeepAliveMasterService();
  GetTableDescriptorsResponse htds;
  try {
    GetTableDescriptorsRequest req =
      RequestConverter.buildGetTableDescriptorsRequest(tableName);
    htds = master.getTableDescriptors(null, req);
  } catch (ServiceException se) {
    throw ProtobufUtil.getRemoteException(se);
  } finally {
    master.close();
  }
  if (!htds.getTableSchemaList().isEmpty()) {
    return HTableDescriptor.convert(htds.getTableSchemaList().get(0));
  }
  throw new TableNotFoundException(tableName.getNameAsString());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:29,代码来源:ConnectionManager.java

示例4: deserializeStateData

import org.apache.hadoop.hbase.HTableDescriptor; //导入方法依赖的package包/类
@Override
public void deserializeStateData(final InputStream stream) throws IOException {
  super.deserializeStateData(stream);

  MasterProcedureProtos.ModifyColumnFamilyStateData modifyCFMsg =
      MasterProcedureProtos.ModifyColumnFamilyStateData.parseDelimitedFrom(stream);
  user = MasterProcedureUtil.toUserInfo(modifyCFMsg.getUserInfo());
  tableName = ProtobufUtil.toTableName(modifyCFMsg.getTableName());
  cfDescriptor = HColumnDescriptor.convert(modifyCFMsg.getColumnfamilySchema());
  if (modifyCFMsg.hasUnmodifiedTableSchema()) {
    unmodifiedHTableDescriptor = HTableDescriptor.convert(modifyCFMsg.getUnmodifiedTableSchema());
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:14,代码来源:ModifyColumnFamilyProcedure.java

示例5: deserializeStateData

import org.apache.hadoop.hbase.HTableDescriptor; //导入方法依赖的package包/类
@Override
public void deserializeStateData(final InputStream stream) throws IOException {
  super.deserializeStateData(stream);

  MasterProcedureProtos.AddColumnFamilyStateData addCFMsg =
      MasterProcedureProtos.AddColumnFamilyStateData.parseDelimitedFrom(stream);
  user = MasterProcedureUtil.toUserInfo(addCFMsg.getUserInfo());
  tableName = ProtobufUtil.toTableName(addCFMsg.getTableName());
  cfDescriptor = HColumnDescriptor.convert(addCFMsg.getColumnfamilySchema());
  if (addCFMsg.hasUnmodifiedTableSchema()) {
    unmodifiedHTableDescriptor = HTableDescriptor.convert(addCFMsg.getUnmodifiedTableSchema());
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:14,代码来源:AddColumnFamilyProcedure.java

示例6: deserializeStateData

import org.apache.hadoop.hbase.HTableDescriptor; //导入方法依赖的package包/类
@Override
public void deserializeStateData(final InputStream stream) throws IOException {
  super.deserializeStateData(stream);
  MasterProcedureProtos.DeleteColumnFamilyStateData deleteCFMsg =
      MasterProcedureProtos.DeleteColumnFamilyStateData.parseDelimitedFrom(stream);
  user = MasterProcedureUtil.toUserInfo(deleteCFMsg.getUserInfo());
  tableName = ProtobufUtil.toTableName(deleteCFMsg.getTableName());
  familyName = deleteCFMsg.getColumnfamilyName().toByteArray();

  if (deleteCFMsg.hasUnmodifiedTableSchema()) {
    unmodifiedHTableDescriptor = HTableDescriptor.convert(deleteCFMsg.getUnmodifiedTableSchema());
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:14,代码来源:DeleteColumnFamilyProcedure.java

示例7: deserializeStateData

import org.apache.hadoop.hbase.HTableDescriptor; //导入方法依赖的package包/类
@Override
public void deserializeStateData(final InputStream stream) throws IOException {
  super.deserializeStateData(stream);

  MasterProcedureProtos.ModifyTableStateData modifyTableMsg =
      MasterProcedureProtos.ModifyTableStateData.parseDelimitedFrom(stream);
  user = MasterProcedureUtil.toUserInfo(modifyTableMsg.getUserInfo());
  modifiedHTableDescriptor = HTableDescriptor.convert(modifyTableMsg.getModifiedTableSchema());
  deleteColumnFamilyInModify = modifyTableMsg.getDeleteColumnFamilyInModify();

  if (modifyTableMsg.hasUnmodifiedTableSchema()) {
    unmodifiedHTableDescriptor =
        HTableDescriptor.convert(modifyTableMsg.getUnmodifiedTableSchema());
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:16,代码来源:ModifyTableProcedure.java

示例8: createTable

import org.apache.hadoop.hbase.HTableDescriptor; //导入方法依赖的package包/类
@Override
public CreateTableResponse createTable(RpcController controller, CreateTableRequest req)
throws ServiceException {
  HTableDescriptor hTableDescriptor = HTableDescriptor.convert(req.getTableSchema());
  byte [][] splitKeys = ProtobufUtil.getSplitKeysArray(req);
  try {
    long procId =
        master.createTable(hTableDescriptor, splitKeys, req.getNonceGroup(), req.getNonce());
    return CreateTableResponse.newBuilder().setProcId(procId).build();
  } catch (IOException ioe) {
    throw new ServiceException(ioe);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:14,代码来源:MasterRpcServices.java

示例9: readFields

import org.apache.hadoop.hbase.HTableDescriptor; //导入方法依赖的package包/类
@Override
public void readFields(DataInput in) throws IOException {
  int len = in.readInt();
  byte[] buf = new byte[len];
  in.readFully(buf);
  TableSnapshotRegionSplit split = TableSnapshotRegionSplit.PARSER.parseFrom(buf);
  this.htd = HTableDescriptor.convert(split.getTable());
  this.regionInfo = HRegionInfo.convert(split.getRegion());
  List<String> locationsList = split.getLocationsList();
  this.locations = locationsList.toArray(new String[locationsList.size()]);

  this.scan = Bytes.toString(Bytes.readByteArray(in));
  this.restoreDir = Bytes.toString(Bytes.readByteArray(in));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:15,代码来源:TableSnapshotInputFormatImpl.java

示例10: getHTableDescriptorArray

import org.apache.hadoop.hbase.HTableDescriptor; //导入方法依赖的package包/类
/**
 * Get HTableDescriptor[] from GetTableDescriptorsResponse protobuf
 *
 * @param proto the GetTableDescriptorsResponse
 * @return HTableDescriptor[]
 */
public static HTableDescriptor[] getHTableDescriptorArray(GetTableDescriptorsResponse proto) {
  if (proto == null) return null;

  HTableDescriptor[] ret = new HTableDescriptor[proto.getTableSchemaCount()];
  for (int i = 0; i < proto.getTableSchemaCount(); ++i) {
    ret[i] = HTableDescriptor.convert(proto.getTableSchema(i));
  }
  return ret;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:16,代码来源:ProtobufUtil.java


注:本文中的org.apache.hadoop.hbase.HTableDescriptor.convert方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。