本文整理汇总了Java中org.apache.hadoop.hbase.client.Durability类的典型用法代码示例。如果您正苦于以下问题:Java Durability类的具体用法?Java Durability怎么用?Java Durability使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
Durability类属于org.apache.hadoop.hbase.client包,在下文中一共展示了Durability类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testRow
import org.apache.hadoop.hbase.client.Durability; //导入依赖的package包/类
@Override
void testRow(final int i) throws IOException {
byte[] row = format(i);
Put put = new Put(row);
byte[] value = generateData(this.rand, ROW_LENGTH);
if (useTags) {
byte[] tag = generateData(this.rand, TAG_LENGTH);
Tag[] tags = new Tag[noOfTags];
for (int n = 0; n < noOfTags; n++) {
Tag t = new Tag((byte) n, tag);
tags[n] = t;
}
KeyValue kv = new KeyValue(row, FAMILY_NAME, QUALIFIER_NAME, HConstants.LATEST_TIMESTAMP,
value, tags);
put.add(kv);
} else {
put.add(FAMILY_NAME, QUALIFIER_NAME, value);
}
put.setDurability(writeToWAL ? Durability.SYNC_WAL : Durability.SKIP_WAL);
mutator.mutate(put);
}
示例2: write
import org.apache.hadoop.hbase.client.Durability; //导入依赖的package包/类
/**
* Writes an action (Put or Delete) to the specified table.
*
* @param tableName
* the table being updated.
* @param action
* the update, either a put or a delete.
* @throws IllegalArgumentException
* if the action is not a put or a delete.
*/
@Override
public void write(ImmutableBytesWritable tableName, Mutation action) throws IOException {
BufferedMutator mutator = getBufferedMutator(tableName);
// The actions are not immutable, so we defensively copy them
if (action instanceof Put) {
Put put = new Put((Put) action);
put.setDurability(useWriteAheadLogging ? Durability.SYNC_WAL
: Durability.SKIP_WAL);
mutator.mutate(put);
} else if (action instanceof Delete) {
Delete delete = new Delete((Delete) action);
mutator.mutate(delete);
} else
throw new IllegalArgumentException(
"action must be either Delete or Put");
}
示例3: toDurability
import org.apache.hadoop.hbase.client.Durability; //导入依赖的package包/类
/**
* Convert a protobuf Durability into a client Durability
*/
public static Durability toDurability(
final ClientProtos.MutationProto.Durability proto) {
switch(proto) {
case USE_DEFAULT:
return Durability.USE_DEFAULT;
case SKIP_WAL:
return Durability.SKIP_WAL;
case ASYNC_WAL:
return Durability.ASYNC_WAL;
case SYNC_WAL:
return Durability.SYNC_WAL;
case FSYNC_WAL:
return Durability.FSYNC_WAL;
default:
return Durability.USE_DEFAULT;
}
}
示例4: testRow
import org.apache.hadoop.hbase.client.Durability; //导入依赖的package包/类
@Override
void testRow(final int i) throws IOException {
byte[] row = getRandomRow(this.rand, opts.totalRows);
Put put = new Put(row);
for (int column = 0; column < opts.columns; column++) {
byte [] qualifier = column == 0? COLUMN_ZERO: Bytes.toBytes("" + column);
byte[] value = generateData(this.rand, getValueLength(this.rand));
if (opts.useTags) {
byte[] tag = generateData(this.rand, TAG_LENGTH);
Tag[] tags = new Tag[opts.noOfTags];
for (int n = 0; n < opts.noOfTags; n++) {
Tag t = new Tag((byte) n, tag);
tags[n] = t;
}
KeyValue kv = new KeyValue(row, FAMILY_NAME, qualifier, HConstants.LATEST_TIMESTAMP,
value, tags);
put.add(kv);
updateValueSize(kv.getValueLength());
} else {
put.add(FAMILY_NAME, qualifier, value);
updateValueSize(value.length);
}
}
put.setDurability(opts.writeToWAL ? Durability.SYNC_WAL : Durability.SKIP_WAL);
mutator.mutate(put);
}
示例5: createMultiRegionsWithPBSerialization
import org.apache.hadoop.hbase.client.Durability; //导入依赖的package包/类
int createMultiRegionsWithPBSerialization(final Configuration c,
final TableName tableName,
byte [][] startKeys) throws IOException {
Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
HTable meta = new HTable(c, TableName.META_TABLE_NAME);
List<HRegionInfo> newRegions
= new ArrayList<HRegionInfo>(startKeys.length);
int count = 0;
for (int i = 0; i < startKeys.length; i++) {
int j = (i + 1) % startKeys.length;
HRegionInfo hri = new HRegionInfo(tableName, startKeys[i], startKeys[j]);
Put put = MetaTableAccessor.makePutFromRegionInfo(hri);
put.setDurability(Durability.SKIP_WAL);
meta.put(put);
LOG.info("createMultiRegionsWithPBSerialization: PUT inserted " + hri.toString());
newRegions.add(hri);
count++;
}
meta.close();
return count;
}
示例6: createLocalHRegion
import org.apache.hadoop.hbase.client.Durability; //导入依赖的package包/类
/**
* @param tableName
* @param startKey
* @param stopKey
* @param callingMethod
* @param conf
* @param isReadOnly
* @param families
* @throws IOException
* @return A region on which you must call
* {@link HRegion#closeHRegion(HRegion)} when done.
*/
public HRegion createLocalHRegion(byte[] tableName, byte[] startKey, byte[] stopKey,
String callingMethod, Configuration conf, boolean isReadOnly, Durability durability,
WAL wal, byte[]... families) throws IOException {
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
htd.setReadOnly(isReadOnly);
for (byte[] family : families) {
HColumnDescriptor hcd = new HColumnDescriptor(family);
// Set default to be three versions.
hcd.setMaxVersions(Integer.MAX_VALUE);
htd.addFamily(hcd);
}
htd.setDurability(durability);
HRegionInfo info = new HRegionInfo(htd.getTableName(), startKey, stopKey, false);
return createLocalHRegion(info, htd, wal);
}
示例7: insertData
import org.apache.hadoop.hbase.client.Durability; //导入依赖的package包/类
private static int insertData(TableName tableName, String column, double prob) throws IOException {
byte[] k = new byte[3];
byte[][] famAndQf = KeyValue.parseColumn(Bytes.toBytes(column));
List<Put> puts = new ArrayList<>();
for (int i = 0; i < 9; i++) {
Put put = new Put(Bytes.toBytes("row" + i));
put.setDurability(Durability.SKIP_WAL);
put.add(famAndQf[0], famAndQf[1], k);
put.setCellVisibility(new CellVisibility("(" + SECRET + "|" + CONFIDENTIAL + ")" + "&" + "!"
+ TOPSECRET));
puts.add(put);
}
try (Table table = new HTable(TEST_UTIL.getConfiguration(), tableName)) {
table.put(puts);
}
return puts.size();
}
示例8: writeTestDataBatch
import org.apache.hadoop.hbase.client.Durability; //导入依赖的package包/类
static void writeTestDataBatch(Configuration conf, TableName tableName,
int batchId) throws Exception {
LOG.debug("Writing test data batch " + batchId);
List<Put> puts = new ArrayList<>();
for (int i = 0; i < NUM_ROWS_PER_BATCH; ++i) {
Put put = new Put(getRowKey(batchId, i));
for (int j = 0; j < NUM_COLS_PER_ROW; ++j) {
put.add(CF_BYTES, getQualifier(j),
getValue(batchId, i, j));
}
put.setDurability(Durability.SKIP_WAL);
puts.add(put);
}
try (Connection conn = ConnectionFactory.createConnection(conf);
Table table = conn.getTable(tableName)) {
table.put(puts);
}
}
示例9: testIncrementWithReturnResultsSetToFalse
import org.apache.hadoop.hbase.client.Durability; //导入依赖的package包/类
@Test
public void testIncrementWithReturnResultsSetToFalse() throws Exception {
byte[] row1 = Bytes.toBytes("row1");
byte[] col1 = Bytes.toBytes("col1");
// Setting up region
final WALFactory wals = new WALFactory(CONF, null, "testIncrementWithReturnResultsSetToFalse");
byte[] tableName = Bytes.toBytes("testIncrementWithReturnResultsSetToFalse");
final WAL wal = wals.getWAL(tableName);
HRegion region = createHRegion(tableName, "increment", wal, Durability.USE_DEFAULT);
Increment inc1 = new Increment(row1);
inc1.setReturnResults(false);
inc1.addColumn(FAMILY, col1, 1);
Result res = region.increment(inc1);
assertNull(res);
}
示例10: createHRegion
import org.apache.hadoop.hbase.client.Durability; //导入依赖的package包/类
private HRegion createHRegion (byte [] tableName, String callingMethod,
WAL log, Durability durability)
throws IOException {
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
htd.setDurability(durability);
HColumnDescriptor hcd = new HColumnDescriptor(FAMILY);
htd.addFamily(hcd);
HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
Path path = new Path(DIR + callingMethod);
if (FS.exists(path)) {
if (!FS.delete(path, true)) {
throw new IOException("Failed delete of " + path);
}
}
return HRegion.createHRegion(info, path, CONF, htd, log);
}
示例11: addWideContent
import org.apache.hadoop.hbase.client.Durability; //导入依赖的package包/类
private int addWideContent(HRegion region) throws IOException {
int count = 0;
for (char c = 'a'; c <= 'c'; c++) {
byte[] row = Bytes.toBytes("ab" + c);
int i, j;
long ts = System.currentTimeMillis();
for (i = 0; i < 100; i++) {
byte[] b = Bytes.toBytes(String.format("%10d", i));
for (j = 0; j < 100; j++) {
Put put = new Put(row);
put.setDurability(Durability.SKIP_WAL);
put.add(COLUMNS[rng.nextInt(COLUMNS.length)], b, ++ts, b);
region.put(put);
count++;
}
}
}
return count;
}
示例12: durabilityFromThrift
import org.apache.hadoop.hbase.client.Durability; //导入依赖的package包/类
private static Durability durabilityFromThrift(TDurability tDurability) {
switch (tDurability.getValue()) {
case 1: return Durability.SKIP_WAL;
case 2: return Durability.ASYNC_WAL;
case 3: return Durability.SYNC_WAL;
case 4: return Durability.FSYNC_WAL;
default: return null;
}
}
示例13: prePut
import org.apache.hadoop.hbase.client.Durability; //导入依赖的package包/类
@Override
public void prePut(ObserverContext<RegionCoprocessorEnvironment> e, Put put,
WALEdit edit, Durability durability) throws IOException {
// check the put against the stored constraints
for (Constraint c : constraints) {
c.check(put);
}
// if we made it here, then the Put is valid
}
示例14: prePut
import org.apache.hadoop.hbase.client.Durability; //导入依赖的package包/类
@Override
public void prePut(final ObserverContext<RegionCoprocessorEnvironment> c,
final Put put, final WALEdit edit, final Durability durability)
throws IOException {
User user = getActiveUser();
checkForReservedTagPresence(user, put);
// Require WRITE permission to the table, CF, or top visible value, if any.
// NOTE: We don't need to check the permissions for any earlier Puts
// because we treat the ACLs in each Put as timestamped like any other
// HBase value. A new ACL in a new Put applies to that Put. It doesn't
// change the ACL of any previous Put. This allows simple evolution of
// security policy over time without requiring expensive updates.
RegionCoprocessorEnvironment env = c.getEnvironment();
Map<byte[],? extends Collection<Cell>> families = put.getFamilyCellMap();
AuthResult authResult = permissionGranted(OpType.PUT, user, env, families, Action.WRITE);
logResult(authResult);
if (!authResult.isAllowed()) {
if (cellFeaturesEnabled && !compatibleEarlyTermination) {
put.setAttribute(CHECK_COVERING_PERM, TRUE);
} else if (authorizationEnabled) {
throw new AccessDeniedException("Insufficient permissions " + authResult.toContextString());
}
}
// Add cell ACLs from the operation to the cells themselves
byte[] bytes = put.getAttribute(AccessControlConstants.OP_ATTRIBUTE_ACL);
if (bytes != null) {
if (cellFeaturesEnabled) {
addCellPermissions(bytes, put.getFamilyCellMap());
} else {
throw new DoNotRetryIOException("Cell ACLs cannot be persisted");
}
}
}
示例15: getRegion
import org.apache.hadoop.hbase.client.Durability; //导入依赖的package包/类
private HRegion getRegion(final Configuration conf, final String tableName) throws IOException {
WAL wal = new FSHLog(FileSystem.get(conf), TEST_UTIL.getDataTestDir(),
TEST_UTIL.getDataTestDir().toString(), conf);
return (HRegion)TEST_UTIL.createLocalHRegion(Bytes.toBytes(tableName),
HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, tableName, conf,
false, Durability.SKIP_WAL, wal, INCREMENT_BYTES);
}