本文整理汇总了Java中org.apache.hadoop.hbase.client.Connection类的典型用法代码示例。如果您正苦于以下问题:Java Connection类的具体用法?Java Connection怎么用?Java Connection使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
Connection类属于org.apache.hadoop.hbase.client包,在下文中一共展示了Connection类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: main
import org.apache.hadoop.hbase.client.Connection; //导入依赖的package包/类
public static void main(String[] argc) throws Exception {
Configuration conf = HBaseConfiguration.create();
conf.addResource(new Path("file:///", System.getProperty("oozie.action.conf.xml")));
if (System.getenv("HADOOP_TOKEN_FILE_LOCATION") != null) {
conf.set("mapreduce.job.credentials.binary",
System.getenv("HADOOP_TOKEN_FILE_LOCATION"));
}
Connection connection = ConnectionFactory.createConnection(conf);
Admin admin = connection.getAdmin();
System.out.println("Compacting table " + argc[0]);
TableName tableName = TableName.valueOf(argc[0]);
admin.majorCompact(tableName);
while (admin.getCompactionState(tableName).toString() == "MAJOR") {
TimeUnit.SECONDS.sleep(10);
System.out.println("Compacting table " + argc[0]);
}
System.out.println("Done compacting table " + argc[0]);
}
示例2: configure
import org.apache.hadoop.hbase.client.Connection; //导入依赖的package包/类
@Override
public void configure(JobConf job) {
try {
Connection connection = ConnectionFactory.createConnection(HBaseConfiguration.create(job));
TableName tableName = TableName.valueOf("exampleJobConfigurableTable");
// mandatory
initializeTable(connection, tableName);
byte[][] inputColumns = new byte [][] { Bytes.toBytes("columnA"),
Bytes.toBytes("columnB") };
//optional
Scan scan = new Scan();
for (byte[] family : inputColumns) {
scan.addFamily(family);
}
Filter exampleFilter = new RowFilter(CompareOp.EQUAL, new RegexStringComparator("aa.*"));
scan.setFilter(exampleFilter);
setScan(scan);
} catch (IOException exception) {
throw new RuntimeException("Failed to initialize.", exception);
}
}
示例3: testSocketClosed
import org.apache.hadoop.hbase.client.Connection; //导入依赖的package包/类
@Test(expected=RetriesExhaustedException.class)
public void testSocketClosed() throws IOException, InterruptedException {
String tableName = "testSocketClosed";
TableName name = TableName.valueOf(tableName);
UTIL.createTable(name, fam1).close();
Configuration conf = new Configuration(UTIL.getConfiguration());
conf.set(RpcClientFactory.CUSTOM_RPC_CLIENT_IMPL_CONF_KEY,
MyRpcClientImpl.class.getName());
conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2);
Connection connection = ConnectionFactory.createConnection(conf);
Table table = connection.getTable(TableName.valueOf(tableName));
table.get(new Get("asd".getBytes()));
connection.close();
for (Socket socket : MyRpcClientImpl.savedSockets) {
assertTrue("Socket + " + socket + " is not closed", socket.isClosed());
}
}
示例4: verifyUserAllowedforCheckAndDelete
import org.apache.hadoop.hbase.client.Connection; //导入依赖的package包/类
private void verifyUserAllowedforCheckAndDelete(final User user, final byte[] row,
final byte[] q1, final byte[] value) throws IOException, InterruptedException {
user.runAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
try (Connection connection = ConnectionFactory.createConnection(conf)) {
try (Table t = connection.getTable(TEST_TABLE.getTableName())) {
Delete d = new Delete(row);
d.addColumn(TEST_FAMILY1, q1, 120);
t.checkAndDelete(row, TEST_FAMILY1, q1, value, d);
}
}
return null;
}
});
}
示例5: verifyUserDeniedForDeleteExactVersion
import org.apache.hadoop.hbase.client.Connection; //导入依赖的package包/类
private void verifyUserDeniedForDeleteExactVersion(final User user, final byte[] row,
final byte[] q1, final byte[] q2) throws IOException, InterruptedException {
user.runAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
try (Connection connection = ConnectionFactory.createConnection(conf)) {
try (Table t = connection.getTable(TEST_TABLE.getTableName())) {
Delete d = new Delete(row, 127);
d.addColumns(TEST_FAMILY1, q1);
d.addColumns(TEST_FAMILY1, q2);
d.addFamily(TEST_FAMILY2, 129);
t.delete(d);
fail(user.getShortName() + " can not do the delete");
} catch (Exception e) {
}
}
return null;
}
});
}
示例6: Test
import org.apache.hadoop.hbase.client.Connection; //导入依赖的package包/类
/**
* Note that all subclasses of this class must provide a public constructor
* that has the exact same list of arguments.
*/
Test(final Connection con, final TestOptions options, final Status status) {
this.connection = con;
this.conf = con == null ? HBaseConfiguration.create() : this.connection.getConfiguration();
this.opts = options;
this.status = status;
this.testName = this.getClass().getSimpleName();
receiverHost = SpanReceiverHost.getInstance(conf);
if (options.traceRate >= 1.0) {
this.traceSampler = Sampler.ALWAYS;
} else if (options.traceRate > 0.0) {
conf.setDouble("hbase.sampler.fraction", options.traceRate);
this.traceSampler = new ProbabilitySampler(new HBaseHTraceConfiguration(conf));
} else {
this.traceSampler = Sampler.NEVER;
}
everyN = (int) (opts.totalRows / (opts.totalRows * opts.sampleRate));
if (options.isValueZipf()) {
this.zipf = new RandomDistribution.Zipf(this.rand, 1, options.getValueSize(), 1.1);
}
LOG.info("Sampling 1 every " + everyN + " out of " + opts.perClientRunRows + " total rows.");
}
示例7: validateFromSnapshotFromMeta
import org.apache.hadoop.hbase.client.Connection; //导入依赖的package包/类
private void validateFromSnapshotFromMeta(HBaseTestingUtility util, TableName table,
int numRegions, int numReplica, Connection connection) throws IOException {
SnapshotOfRegionAssignmentFromMeta snapshot = new SnapshotOfRegionAssignmentFromMeta(
connection);
snapshot.initialize();
Map<HRegionInfo, ServerName> regionToServerMap = snapshot.getRegionToRegionServerMap();
assert(regionToServerMap.size() == numRegions * numReplica + 1); //'1' for the namespace
Map<ServerName, List<HRegionInfo>> serverToRegionMap = snapshot.getRegionServerToRegionMap();
for (Map.Entry<ServerName, List<HRegionInfo>> entry : serverToRegionMap.entrySet()) {
if (entry.getKey().equals(util.getHBaseCluster().getMaster().getServerName())) {
continue;
}
List<HRegionInfo> regions = entry.getValue();
Set<byte[]> setOfStartKeys = new HashSet<byte[]>();
for (HRegionInfo region : regions) {
byte[] startKey = region.getStartKey();
if (region.getTable().equals(table)) {
setOfStartKeys.add(startKey); //ignore other tables
LOG.info("--STARTKEY " + new String(startKey)+"--");
}
}
// the number of startkeys will be equal to the number of regions hosted in each server
// (each server will be hosting one replica of a region)
assertEquals(numRegions, setOfStartKeys.size());
}
}
示例8: testInsertHBase
import org.apache.hadoop.hbase.client.Connection; //导入依赖的package包/类
/**
* [ { "time": 1456293824385, "host": "09-201509070105", "ip": "127.0.0.1", "svrid":
* "D:/UAV/apache-tomcat-6.0.41::D:/eclipseProject/.metadata/.plugins/org.eclipse.wst.server.core/tmp0", "tag": "L",
* "frames": { "WebTest": [ { "content": "[CE] aaaaa" } ] } } ]
*/
@SuppressWarnings("unchecked")
public static void testInsertHBase() {
// MongoDBHandler
DataStoreMsg msg = new DataStoreMsg();
String rawData = DataStoreUnitTest.getData(insertJson);
msg.put(MonitorDataFrame.MessageType.Log.toString(), rawData);
msg.put(DataStoreProtocol.HBASE_TABLE_NAME, HealthManagerConstants.HBASE_TABLE_LOGDATA);
List<String> servers = DataConvertHelper.toList(zklist, ",");
DataStoreConnection obj = new DataStoreConnection(null, null, null, servers, DataStoreType.HBASE);
obj.putContext(DataStoreProtocol.HBASE_ZK_QUORUM, zklist);
obj.putContext(DataStoreProtocol.HBASE_QUERY_CACHING, caching);
obj.putContext(DataStoreProtocol.HBASE_QUERY_MAXRESULTSIZE, maxResultSize);
obj.putContext(DataStoreProtocol.HBASE_QUERY_REVERSE, true);
obj.putContext(DataStoreProtocol.HBASE_QUERY_PAGESIZE, 3000);
AbstractDataStore<Connection> store = DataStoreFactory.getInstance().build(HealthManagerConstants.DataStore_Log,
obj, new LogDataAdapter(), "");
store.start();
boolean rst = store.doInsert(msg);
store.stop();
DataStoreUnitTest.printTestResult("testInsertHBase", rst);
}
示例9: testQueryHBase
import org.apache.hadoop.hbase.client.Connection; //导入依赖的package包/类
/**
* { "starttime": 145629382438, "endtime": 145629382438, //optional "ip": "127.0.0.1", "svrid":
* "D:/UAV/apache-tomcat-6.0.41::D:/eclipseProject/.metadata/.plugins/org.eclipse.wst.server.core/tmp0", "appid":
* "sms" }
*/
@SuppressWarnings("unchecked")
public static void testQueryHBase() {
DataStoreMsg msg = new DataStoreMsg();
msg.put(DataStoreProtocol.HBASE_QUERY_JSON_KEY, queryJson);
List<String> servers = DataConvertHelper.toList(zklist, ",");
DataStoreConnection obj = new DataStoreConnection(null, null, null, servers, DataStoreType.HBASE);
obj.putContext(DataStoreProtocol.HBASE_ZK_QUORUM, zklist);
obj.putContext(DataStoreProtocol.HBASE_QUERY_CACHING, caching);
obj.putContext(DataStoreProtocol.HBASE_QUERY_MAXRESULTSIZE, maxResultSize);
AbstractDataStore<Connection> store = DataStoreFactory.getInstance().build(HealthManagerConstants.DataStore_Log,
obj, new LogDataAdapter(), "");
store.start();
List<String> rst = store.doQuery(msg);
store.stop();
DataStoreUnitTest.printTestResult("testqueryHBase", rst, queryJson);
}
示例10: getConnection
import org.apache.hadoop.hbase.client.Connection; //导入依赖的package包/类
public Connection getConnection() throws IOException {
Connection resultConn;
if (usableSize != 0) {
resultConn = conns.remove(0);
usableSize --;
} else if (currentSize < maxSize) {
resultConn = ConnectionFactory.createConnection(conf);
currentSize ++;
} else {
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
logger.error("获取HBASE连接出错了!\n"+e.getMessage());
}
return this.getConnection();
}
return resultConn;
}
示例11: testAppend
import org.apache.hadoop.hbase.client.Connection; //导入依赖的package包/类
@Test (timeout=180000)
public void testAppend() throws Exception {
AccessTestAction appendAction = new AccessTestAction() {
@Override
public Object run() throws Exception {
byte[] row = TEST_ROW;
byte[] qualifier = TEST_QUALIFIER;
Put put = new Put(row);
put.add(TEST_FAMILY, qualifier, Bytes.toBytes(1));
Append append = new Append(row);
append.add(TEST_FAMILY, qualifier, Bytes.toBytes(2));
try(Connection conn = ConnectionFactory.createConnection(conf);
Table t = conn.getTable(TEST_TABLE)) {
t.put(put);
t.append(append);
}
return null;
}
};
verifyAllowed(appendAction, SUPERUSER, USER_ADMIN, USER_OWNER, USER_CREATE, USER_RW,
USER_GROUP_WRITE);
verifyDenied(appendAction, USER_RO, USER_NONE, USER_GROUP_CREATE, USER_GROUP_READ,
USER_GROUP_ADMIN);
}
示例12: testManualHBaseInsertion
import org.apache.hadoop.hbase.client.Connection; //导入依赖的package包/类
@Test
public void testManualHBaseInsertion() throws ServiceException, IOException {
IgniteConfiguration cfg = prepareConfig(false);
IgniteConfiguration cfg2 = new IgniteConfiguration(cfg);
cfg.setGridName("first");
cfg2.setGridName("second");
String cacheName = "myCache";
try (Ignite ignite = Ignition.getOrStart(cfg); Ignite ignite2 = Ignition.getOrStart(cfg2)) {
IgniteCache<String, String> cache = ignite.getOrCreateCache(cacheName);
cache.remove("Hello");
assertNull(cache.get("Hello"));
try (Connection conn = getHBaseConnection()) {
TableName tableName = TableName.valueOf(TABLE_NAME);
Table table = conn.getTable(tableName);
Serializer<Object> serializer = ObjectSerializer.INSTANCE;
Put put = new Put(serializer.serialize("Hello"));
put.addColumn(cacheName.getBytes(), QUALIFIER, serializer.serialize("World"));
table.put(put);
}
assertEquals("World", cache.get("Hello"));
}
}
示例13: checkTablePerms
import org.apache.hadoop.hbase.client.Connection; //导入依赖的package包/类
public static void checkTablePerms(HBaseTestingUtility testUtil, TableName table,
Permission... perms) throws IOException {
CheckPermissionsRequest.Builder request = CheckPermissionsRequest.newBuilder();
for (Permission p : perms) {
request.addPermission(ProtobufUtil.toPermission(p));
}
try(Connection conn = ConnectionFactory.createConnection(testUtil.getConfiguration());
Table acl = conn.getTable(table)) {
AccessControlService.BlockingInterface protocol =
AccessControlService.newBlockingStub(acl.coprocessorService(new byte[0]));
try {
protocol.checkPermissions(null, request.build());
} catch (ServiceException se) {
ProtobufUtil.toIOException(se);
}
}
}
示例14: putsToMetaTable
import org.apache.hadoop.hbase.client.Connection; //导入依赖的package包/类
/**
* Put the passed <code>ps</code> to the <code>hbase:meta</code> table.
* @param connection connection we're using
* @param ps Put to add to hbase:meta
* @throws IOException
*/
public static void putsToMetaTable(final Connection connection, final List<Put> ps)
throws IOException {
Table t = getMetaHTable(connection);
try {
t.put(ps);
} finally {
t.close();
}
}
示例15: verifyUserDeniedForPutMultipleVersions
import org.apache.hadoop.hbase.client.Connection; //导入依赖的package包/类
private void verifyUserDeniedForPutMultipleVersions(final User user, final byte[] row,
final byte[] q1, final byte[] q2, final byte[] value) throws IOException,
InterruptedException {
user.runAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
try (Connection connection = ConnectionFactory.createConnection(conf)) {
try (Table t = connection.getTable(TEST_TABLE.getTableName())) {
Put p = new Put(row);
// column Q1 covers version at 123 fr which user2 do not have permission
p.addColumn(TEST_FAMILY1, q1, 124, value);
p.addColumn(TEST_FAMILY1, q2, value);
t.put(p);
fail(user.getShortName() + " cannot do the put.");
} catch (Exception e) {
}
}
return null;
}
});
}