本文整理匯總了Java中org.apache.hadoop.hbase.client.Increment.setDurability方法的典型用法代碼示例。如果您正苦於以下問題:Java Increment.setDurability方法的具體用法?Java Increment.setDurability怎麽用?Java Increment.setDurability使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.hbase.client.Increment
的用法示例。
在下文中一共展示了Increment.setDurability方法的9個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: testIncrWithReadOnlyTable
import org.apache.hadoop.hbase.client.Increment; //導入方法依賴的package包/類
@Test
public void testIncrWithReadOnlyTable() throws Exception {
byte[] TABLE = Bytes.toBytes("readOnlyTable");
this.region = initHRegion(TABLE, getName(), CONF, true, Bytes.toBytes("somefamily"));
boolean exceptionCaught = false;
Increment inc = new Increment(Bytes.toBytes("somerow"));
inc.setDurability(Durability.SKIP_WAL);
inc.addColumn(Bytes.toBytes("somefamily"), Bytes.toBytes("somequalifier"), 1L);
try {
region.increment(inc);
} catch (IOException e) {
exceptionCaught = true;
} finally {
HRegion.closeHRegion(this.region);
this.region = null;
}
assertTrue(exceptionCaught == true);
}
示例2: incrementFromThrift
import org.apache.hadoop.hbase.client.Increment; //導入方法依賴的package包/類
public static Increment incrementFromThrift(TIncrement in) throws IOException {
Increment out = new Increment(in.getRow());
for (TColumnIncrement column : in.getColumns()) {
out.addColumn(column.getFamily(), column.getQualifier(), column.getAmount());
}
if (in.isSetAttributes()) {
addAttributes(out,in.getAttributes());
}
if (in.isSetDurability()) {
out.setDurability(durabilityFromThrift(in.getDurability()));
}
if(in.getCellVisibility() != null) {
out.setCellVisibility(new CellVisibility(in.getCellVisibility().getExpression()));
}
return out;
}
示例3: run
import org.apache.hadoop.hbase.client.Increment; //導入方法依賴的package包/類
@Override
public void run() {
for (int i=0; i<numIncrements; i++) {
try {
Increment inc = new Increment(row);
inc.addColumn(fam1, qual1, amount);
inc.addColumn(fam1, qual2, amount*2);
inc.addColumn(fam2, qual3, amount*3);
inc.setDurability(Durability.ASYNC_WAL);
region.increment(inc);
// verify: Make sure we only see completed increments
Get g = new Get(row);
Result result = region.get(g);
assertEquals(Bytes.toLong(result.getValue(fam1, qual1))*2, Bytes.toLong(result.getValue(fam1, qual2)));
assertEquals(Bytes.toLong(result.getValue(fam1, qual1))*3, Bytes.toLong(result.getValue(fam2, qual3)));
} catch (IOException e) {
e.printStackTrace();
}
}
}
示例4: getIncrementFromTuple
import org.apache.hadoop.hbase.client.Increment; //導入方法依賴的package包/類
/**
* Creates a HBase {@link Increment} from a Storm {@link Tuple}
*
* @param tuple
* The {@link Tuple}
* @param increment
* The amount to increment the counter by
* @return {@link Increment}
*/
public Increment getIncrementFromTuple(final Tuple tuple, final long increment) {
byte[] rowKey = Bytes.toBytes(tuple.getStringByField(tupleRowKeyField));
Increment inc = new Increment(rowKey);
inc.setDurability(durability);
if (columnFamilies.size() > 0) {
for (String cf : columnFamilies.keySet()) {
byte[] cfBytes = Bytes.toBytes(cf);
for (String cq : columnFamilies.get(cf)) {
byte[] val;
try {
val = Bytes.toBytes(tuple.getStringByField(cq));
} catch (IllegalArgumentException ex) {
// if cq isn't a tuple field, use cq for counter instead of tuple
// value
val = Bytes.toBytes(cq);
}
inc.addColumn(cfBytes, val, increment);
}
}
}
return inc;
}
示例5: testResettingCounters
import org.apache.hadoop.hbase.client.Increment; //導入方法依賴的package包/類
@Test
public void testResettingCounters() throws Exception {
HBaseTestingUtility htu = new HBaseTestingUtility();
Configuration conf = htu.getConfiguration();
FileSystem fs = FileSystem.get(conf);
byte [] table = Bytes.toBytes("table");
byte [][] families = new byte [][] {
Bytes.toBytes("family1"),
Bytes.toBytes("family2"),
Bytes.toBytes("family3")
};
int numQualifiers = 10;
byte [][] qualifiers = new byte [numQualifiers][];
for (int i=0; i<numQualifiers; i++) qualifiers[i] = Bytes.toBytes("qf" + i);
int numRows = 10;
byte [][] rows = new byte [numRows][];
for (int i=0; i<numRows; i++) rows[i] = Bytes.toBytes("r" + i);
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(table));
for (byte [] family : families) htd.addFamily(new HColumnDescriptor(family));
HRegionInfo hri = new HRegionInfo(htd.getTableName(), null, null, false);
String testDir = htu.getDataTestDir() + "/TestResettingCounters/";
Path path = new Path(testDir);
if (fs.exists(path)) {
if (!fs.delete(path, true)) {
throw new IOException("Failed delete of " + path);
}
}
HRegion region = HRegion.createHRegion(hri, path, conf, htd);
try {
Increment odd = new Increment(rows[0]);
odd.setDurability(Durability.SKIP_WAL);
Increment even = new Increment(rows[0]);
even.setDurability(Durability.SKIP_WAL);
Increment all = new Increment(rows[0]);
all.setDurability(Durability.SKIP_WAL);
for (int i=0;i<numQualifiers;i++) {
if (i % 2 == 0) even.addColumn(families[0], qualifiers[i], 1);
else odd.addColumn(families[0], qualifiers[i], 1);
all.addColumn(families[0], qualifiers[i], 1);
}
// increment odd qualifiers 5 times and flush
for (int i=0;i<5;i++) region.increment(odd, HConstants.NO_NONCE, HConstants.NO_NONCE);
region.flush(true);
// increment even qualifiers 5 times
for (int i=0;i<5;i++) region.increment(even, HConstants.NO_NONCE, HConstants.NO_NONCE);
// increment all qualifiers, should have value=6 for all
Result result = region.increment(all, HConstants.NO_NONCE, HConstants.NO_NONCE);
assertEquals(numQualifiers, result.size());
Cell [] kvs = result.rawCells();
for (int i=0;i<kvs.length;i++) {
System.out.println(kvs[i].toString());
assertTrue(CellUtil.matchingQualifier(kvs[i], qualifiers[i]));
assertEquals(6, Bytes.toLong(CellUtil.cloneValue(kvs[i])));
}
} finally {
HRegion.closeHRegion(region);
}
HRegion.closeHRegion(region);
}
示例6: run
import org.apache.hadoop.hbase.client.Increment; //導入方法依賴的package包/類
@Override
public void run() {
for (int i = 0; i < numIncrements; i++) {
try {
Increment inc = new Increment(row);
inc.addColumn(fam1, qual1, amount);
inc.addColumn(fam1, qual2, amount*2);
inc.addColumn(fam2, qual3, amount*3);
inc.setDurability(Durability.ASYNC_WAL);
region.increment(inc, HConstants.NO_NONCE, HConstants.NO_NONCE);
// verify: Make sure we only see completed increments
Get g = new Get(row);
Result result = region.get(g);
if (result != null) {
assertTrue(result.getValue(fam1, qual1) != null);
assertTrue(result.getValue(fam1, qual2) != null);
assertEquals(Bytes.toLong(result.getValue(fam1, qual1))*2,
Bytes.toLong(result.getValue(fam1, qual2)));
assertTrue(result.getValue(fam2, qual3) != null);
assertEquals(Bytes.toLong(result.getValue(fam1, qual1))*3,
Bytes.toLong(result.getValue(fam2, qual3)));
}
} catch (IOException e) {
e.printStackTrace();
}
}
}
示例7: createIncrement
import org.apache.hadoop.hbase.client.Increment; //導入方法依賴的package包/類
/**
* Creates an HBase Increment for a counter.
*
* @param rowKey The row key.
* @param cols The columns to include.
* @param durability The durability of the increment.
*/
private Increment createIncrement(byte[] rowKey, ColumnList cols, Durability durability, long timeToLiveMillis) {
Increment inc = new Increment(rowKey);
inc.setDurability(durability);
inc.setTTL(timeToLiveMillis);
cols.getCounters().forEach(cnt -> inc.addColumn(cnt.getFamily(), cnt.getQualifier(), cnt.getIncrement()));
return inc;
}
示例8: testIncrementTimestampsAreMonotonic
import org.apache.hadoop.hbase.client.Increment; //導入方法依賴的package包/類
@Test
public void testIncrementTimestampsAreMonotonic() throws IOException {
HRegion region = initHRegion(tableName, name.getMethodName(), CONF, fam1);
ManualEnvironmentEdge edge = new ManualEnvironmentEdge();
EnvironmentEdgeManager.injectEdge(edge);
edge.setValue(10);
Increment inc = new Increment(row);
inc.setDurability(Durability.SKIP_WAL);
inc.addColumn(fam1, qual1, 1L);
region.increment(inc);
Result result = region.get(new Get(row));
Cell c = result.getColumnLatestCell(fam1, qual1);
assertNotNull(c);
assertEquals(c.getTimestamp(), 10L);
edge.setValue(1); // clock goes back
region.increment(inc);
result = region.get(new Get(row));
c = result.getColumnLatestCell(fam1, qual1);
assertEquals(c.getTimestamp(), 10L);
assertEquals(Bytes.toLong(c.getValueArray(), c.getValueOffset(), c.getValueLength()), 2L);
}
示例9: testResettingCounters
import org.apache.hadoop.hbase.client.Increment; //導入方法依賴的package包/類
@Test
public void testResettingCounters() throws Exception {
HBaseTestingUtility htu = new HBaseTestingUtility();
Configuration conf = htu.getConfiguration();
FileSystem fs = FileSystem.get(conf);
byte [] table = Bytes.toBytes("table");
byte [][] families = new byte [][] {
Bytes.toBytes("family1"),
Bytes.toBytes("family2"),
Bytes.toBytes("family3")
};
int numQualifiers = 10;
byte [][] qualifiers = new byte [numQualifiers][];
for (int i=0; i<numQualifiers; i++) qualifiers[i] = Bytes.toBytes("qf" + i);
int numRows = 10;
byte [][] rows = new byte [numRows][];
for (int i=0; i<numRows; i++) rows[i] = Bytes.toBytes("r" + i);
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(table));
for (byte [] family : families) htd.addFamily(new HColumnDescriptor(family));
HRegionInfo hri = new HRegionInfo(htd.getTableName(), null, null, false);
String testDir = htu.getDataTestDir() + "/TestResettingCounters/";
Path path = new Path(testDir);
if (fs.exists(path)) {
if (!fs.delete(path, true)) {
throw new IOException("Failed delete of " + path);
}
}
HRegion region = HRegion.createHRegion(hri, path, conf, htd);
try {
Increment odd = new Increment(rows[0]);
odd.setDurability(Durability.SKIP_WAL);
Increment even = new Increment(rows[0]);
even.setDurability(Durability.SKIP_WAL);
Increment all = new Increment(rows[0]);
all.setDurability(Durability.SKIP_WAL);
for (int i=0;i<numQualifiers;i++) {
if (i % 2 == 0) even.addColumn(families[0], qualifiers[i], 1);
else odd.addColumn(families[0], qualifiers[i], 1);
all.addColumn(families[0], qualifiers[i], 1);
}
// increment odd qualifiers 5 times and flush
for (int i=0;i<5;i++) region.increment(odd);
region.flushcache();
// increment even qualifiers 5 times
for (int i=0;i<5;i++) region.increment(even);
// increment all qualifiers, should have value=6 for all
Result result = region.increment(all);
assertEquals(numQualifiers, result.size());
Cell [] kvs = result.rawCells();
for (int i=0;i<kvs.length;i++) {
System.out.println(kvs[i].toString());
assertTrue(CellUtil.matchingQualifier(kvs[i], qualifiers[i]));
assertEquals(6, Bytes.toLong(CellUtil.cloneValue(kvs[i])));
}
} finally {
HRegion.closeHRegion(region);
}
HRegion.closeHRegion(region);
}