本文整理汇总了Java中org.apache.hadoop.hbase.client.Put.addImmutable方法的典型用法代码示例。如果您正苦于以下问题:Java Put.addImmutable方法的具体用法?Java Put.addImmutable怎么用?Java Put.addImmutable使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.client.Put
的用法示例。
在下文中一共展示了Put.addImmutable方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: call
import org.apache.hadoop.hbase.client.Put; //导入方法依赖的package包/类
@Override
public Boolean call() throws Exception {
// Table implements Closable so we use the try with resource structure here.
// https://docs.oracle.com/javase/tutorial/essential/exceptions/tryResourceClose.html
try (Table t = connection.getTable(tableName)) {
byte[] value = Bytes.toBytes(Double.toString(ThreadLocalRandom.current().nextDouble()));
int rows = 30;
// Array to put the batch
ArrayList<Put> puts = new ArrayList<>(rows);
for (int i = 0; i < 30; i++) {
byte[] rk = Bytes.toBytes(ThreadLocalRandom.current().nextLong());
Put p = new Put(rk);
p.addImmutable(FAMILY, QUAL, value);
puts.add(p);
}
// now that we've assembled the batch it's time to push it to hbase.
t.put(puts);
}
return true;
}
示例2: migrateSplitIfNecessary
import org.apache.hadoop.hbase.client.Put; //导入方法依赖的package包/类
static void migrateSplitIfNecessary(final Result r, final Put p, final byte [] which)
throws IOException {
byte [] hriSplitBytes = getBytes(r, which);
if (!isMigrated(hriSplitBytes)) {
//This will 'migrate' the HRI from 092.x and 0.94.x to 0.96+ by reading the
//writable serialization
HRegionInfo hri = parseFrom(hriSplitBytes);
p.addImmutable(HConstants.CATALOG_FAMILY, which, hri.toByteArray());
}
}
示例3: addSystemLabel
import org.apache.hadoop.hbase.client.Put; //导入方法依赖的package包/类
protected void addSystemLabel(Region region, Map<String, Integer> labels,
Map<String, List<Integer>> userAuths) throws IOException {
if (!labels.containsKey(SYSTEM_LABEL)) {
Put p = new Put(Bytes.toBytes(SYSTEM_LABEL_ORDINAL));
p.addImmutable(LABELS_TABLE_FAMILY, LABEL_QUALIFIER, Bytes.toBytes(SYSTEM_LABEL));
region.put(p);
labels.put(SYSTEM_LABEL, SYSTEM_LABEL_ORDINAL);
}
}
示例4: addUserPermission
import org.apache.hadoop.hbase.client.Put; //导入方法依赖的package包/类
/**
* Stores a new user permission grant in the access control lists table.
* @param conf the configuration
* @param userPerm the details of the permission to be granted
* @throws IOException in the case of an error accessing the metadata table
*/
static void addUserPermission(Configuration conf, UserPermission userPerm)
throws IOException {
Permission.Action[] actions = userPerm.getActions();
byte[] rowKey = userPermissionRowKey(userPerm);
Put p = new Put(rowKey);
byte[] key = userPermissionKey(userPerm);
if ((actions == null) || (actions.length == 0)) {
String msg = "No actions associated with user '" + Bytes.toString(userPerm.getUser()) + "'";
LOG.warn(msg);
throw new IOException(msg);
}
byte[] value = new byte[actions.length];
for (int i = 0; i < actions.length; i++) {
value[i] = actions[i].code();
}
p.addImmutable(ACL_LIST_FAMILY, key, value);
if (LOG.isDebugEnabled()) {
LOG.debug("Writing permission with rowKey "+
Bytes.toString(rowKey)+" "+
Bytes.toString(key)+": "+Bytes.toStringBinary(value)
);
}
// TODO: Pass in a Connection rather than create one each time.
try (Connection connection = ConnectionFactory.createConnection(conf)) {
try (Table table = connection.getTable(ACL_TABLE_NAME)) {
table.put(p);
}
}
}
示例5: makePutFromRegionInfo
import org.apache.hadoop.hbase.client.Put; //导入方法依赖的package包/类
/**
* Generates and returns a Put containing the region info for the catalog table
* and the servers
* @param regionInfo
* @param favoredNodeList
* @return Put object
*/
static Put makePutFromRegionInfo(HRegionInfo regionInfo, List<ServerName>favoredNodeList)
throws IOException {
Put put = null;
if (favoredNodeList != null) {
put = MetaTableAccessor.makePutFromRegionInfo(regionInfo);
byte[] favoredNodes = getFavoredNodes(favoredNodeList);
put.addImmutable(HConstants.CATALOG_FAMILY, FAVOREDNODES_QUALIFIER,
EnvironmentEdgeManager.currentTime(), favoredNodes);
LOG.info("Create the region " + regionInfo.getRegionNameAsString() +
" with favored nodes " + Bytes.toString(favoredNodes));
}
return put;
}
示例6: upsert
import org.apache.hadoop.hbase.client.Put; //导入方法依赖的package包/类
private void upsert(Table table, NamespaceDescriptor ns) throws IOException {
validateTableAndRegionCount(ns);
Put p = new Put(Bytes.toBytes(ns.getName()));
p.addImmutable(HTableDescriptor.NAMESPACE_FAMILY_INFO_BYTES,
HTableDescriptor.NAMESPACE_COL_DESC_BYTES,
ProtobufUtil.toProtoNamespaceDescriptor(ns).toByteArray());
table.put(p);
try {
zkNamespaceManager.update(ns);
} catch(IOException ex) {
String msg = "Failed to update namespace information in ZK. Aborting.";
LOG.fatal(msg, ex);
masterServices.abort(msg, ex);
}
}
示例7: addEmptyLocation
import org.apache.hadoop.hbase.client.Put; //导入方法依赖的package包/类
private static Put addEmptyLocation(final Put p, int replicaId){
p.addImmutable(HConstants.CATALOG_FAMILY, MetaTableAccessor.getServerColumn(replicaId), null);
p.addImmutable(HConstants.CATALOG_FAMILY, MetaTableAccessor.getStartCodeColumn(replicaId),
null);
p.addImmutable(HConstants.CATALOG_FAMILY, MetaTableAccessor.getSeqNumColumn(replicaId), null);
return p;
}
示例8: addLocation
import org.apache.hadoop.hbase.client.Put; //导入方法依赖的package包/类
public Put addLocation(final Put p, final ServerName sn, long openSeqNum) {
p.addImmutable(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER,
Bytes.toBytes(sn.getHostAndPort()));
p.addImmutable(HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER,
Bytes.toBytes(sn.getStartcode()));
p.addImmutable(HConstants.CATALOG_FAMILY, HConstants.SEQNUM_QUALIFIER,
Bytes.toBytes(openSeqNum));
return p;
}
示例9: addEmptyLocation
import org.apache.hadoop.hbase.client.Put; //导入方法依赖的package包/类
private static Put addEmptyLocation(final Put p, int replicaId) {
p.addImmutable(HConstants.CATALOG_FAMILY, MetaTableAccessor.getServerColumn(replicaId), null);
p.addImmutable(HConstants.CATALOG_FAMILY, MetaTableAccessor.getStartCodeColumn(replicaId),
null);
p.addImmutable(HConstants.CATALOG_FAMILY, MetaTableAccessor.getSeqNumColumn(replicaId), null);
return p;
}
示例10: addDaughtersToPut
import org.apache.hadoop.hbase.client.Put; //导入方法依赖的package包/类
/**
* Adds split daughters to the Put
*/
public static Put addDaughtersToPut(Put put, HRegionInfo splitA, HRegionInfo splitB) {
if (splitA != null) {
put.addImmutable(
HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER, splitA.toByteArray());
}
if (splitB != null) {
put.addImmutable(
HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER, splitB.toByteArray());
}
return put;
}
示例11: mergeRegions
import org.apache.hadoop.hbase.client.Put; //导入方法依赖的package包/类
/**
* Merge the two regions into one in an atomic operation. Deletes the two
* merging regions in hbase:meta and adds the merged region with the information of
* two merging regions.
* @param connection connection we're using
* @param mergedRegion the merged region
* @param regionA
* @param regionB
* @param sn the location of the region
* @param masterSystemTime
* @throws IOException
*/
public static void mergeRegions(final Connection connection, HRegionInfo mergedRegion,
HRegionInfo regionA, HRegionInfo regionB, ServerName sn, int regionReplication,
long masterSystemTime)
throws IOException {
Table meta = getMetaHTable(connection);
try {
HRegionInfo copyOfMerged = new HRegionInfo(mergedRegion);
// use the maximum of what master passed us vs local time.
long time = Math.max(EnvironmentEdgeManager.currentTime(), masterSystemTime);
// Put for parent
Put putOfMerged = makePutFromRegionInfo(copyOfMerged, time);
putOfMerged.addImmutable(HConstants.CATALOG_FAMILY, HConstants.MERGEA_QUALIFIER,
regionA.toByteArray());
putOfMerged.addImmutable(HConstants.CATALOG_FAMILY, HConstants.MERGEB_QUALIFIER,
regionB.toByteArray());
// Deletes for merging regions
Delete deleteA = makeDeleteFromRegionInfo(regionA, time);
Delete deleteB = makeDeleteFromRegionInfo(regionB, time);
// The merged is a new region, openSeqNum = 1 is fine.
addLocation(putOfMerged, sn, 1, -1, mergedRegion.getReplicaId());
// Add empty locations for region replicas of the merged region so that number of replicas can
// be cached whenever the primary region is looked up from meta
for (int i = 1; i < regionReplication; i++) {
addEmptyLocation(putOfMerged, i);
}
byte[] tableRow = Bytes.toBytes(mergedRegion.getRegionNameAsString()
+ HConstants.DELIMITER);
multiMutate(meta, tableRow, putOfMerged, deleteA, deleteB);
} finally {
meta.close();
}
}
示例12: addLocation
import org.apache.hadoop.hbase.client.Put; //导入方法依赖的package包/类
public static Put addLocation(final Put p, final ServerName sn, long openSeqNum,
long time, int replicaId){
if (time <= 0) {
time = EnvironmentEdgeManager.currentTime();
}
p.addImmutable(HConstants.CATALOG_FAMILY, getServerColumn(replicaId), time,
Bytes.toBytes(sn.getHostAndPort()));
p.addImmutable(HConstants.CATALOG_FAMILY, getStartCodeColumn(replicaId), time,
Bytes.toBytes(sn.getStartcode()));
p.addImmutable(HConstants.CATALOG_FAMILY, getSeqNumColumn(replicaId), time,
Bytes.toBytes(openSeqNum));
return p;
}
示例13: addEmptyLocation
import org.apache.hadoop.hbase.client.Put; //导入方法依赖的package包/类
public static Put addEmptyLocation(final Put p, int replicaId) {
long now = EnvironmentEdgeManager.currentTime();
p.addImmutable(HConstants.CATALOG_FAMILY, getServerColumn(replicaId), now, null);
p.addImmutable(HConstants.CATALOG_FAMILY, getStartCodeColumn(replicaId), now, null);
p.addImmutable(HConstants.CATALOG_FAMILY, getSeqNumColumn(replicaId), now, null);
return p;
}
示例14: addRegionInfo
import org.apache.hadoop.hbase.client.Put; //导入方法依赖的package包/类
private static Put addRegionInfo(final Put p, final HRegionInfo hri)
throws IOException {
p.addImmutable(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
hri.toByteArray());
return p;
}
示例15: updateBinary
import org.apache.hadoop.hbase.client.Put; //导入方法依赖的package包/类
Response updateBinary(final byte[] message, final HttpHeaders headers,
final boolean replace) {
servlet.getMetrics().incrementRequests(1);
if (servlet.isReadOnly()) {
servlet.getMetrics().incrementFailedPutRequests(1);
return Response.status(Response.Status.FORBIDDEN)
.type(MIMETYPE_TEXT).entity("Forbidden" + CRLF)
.build();
}
Table table = null;
try {
byte[] row = rowspec.getRow();
byte[][] columns = rowspec.getColumns();
byte[] column = null;
if (columns != null) {
column = columns[0];
}
long timestamp = HConstants.LATEST_TIMESTAMP;
List<String> vals = headers.getRequestHeader("X-Row");
if (vals != null && !vals.isEmpty()) {
row = Bytes.toBytes(vals.get(0));
}
vals = headers.getRequestHeader("X-Column");
if (vals != null && !vals.isEmpty()) {
column = Bytes.toBytes(vals.get(0));
}
vals = headers.getRequestHeader("X-Timestamp");
if (vals != null && !vals.isEmpty()) {
timestamp = Long.valueOf(vals.get(0));
}
if (column == null) {
servlet.getMetrics().incrementFailedPutRequests(1);
return Response.status(Response.Status.BAD_REQUEST)
.type(MIMETYPE_TEXT).entity("Bad request: Column found to be null." + CRLF)
.build();
}
Put put = new Put(row);
byte parts[][] = KeyValue.parseColumn(column);
if (parts.length != 2) {
return Response.status(Response.Status.BAD_REQUEST)
.type(MIMETYPE_TEXT).entity("Bad request" + CRLF)
.build();
}
put.addImmutable(parts[0], parts[1], timestamp, message);
table = servlet.getTable(tableResource.getName());
table.put(put);
if (LOG.isDebugEnabled()) {
LOG.debug("PUT " + put.toString());
}
servlet.getMetrics().incrementSucessfulPutRequests(1);
return Response.ok().build();
} catch (Exception e) {
servlet.getMetrics().incrementFailedPutRequests(1);
return processException(e);
} finally {
if (table != null) try {
table.close();
} catch (IOException ioe) {
LOG.debug(ioe);
}
}
}