本文整理汇总了Java中org.apache.hadoop.hbase.rest.client.RemoteHTable类的典型用法代码示例。如果您正苦于以下问题:Java RemoteHTable类的具体用法?Java RemoteHTable怎么用?Java RemoteHTable使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
RemoteHTable类属于org.apache.hadoop.hbase.rest.client包,在下文中一共展示了RemoteHTable类的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: validate
import org.apache.hadoop.hbase.rest.client.RemoteHTable; //导入依赖的package包/类
/**
* Prints the imported records
*/
public void validate() throws IOException {
logger.info("Validating the records");
RemoteHTable table = new RemoteHTable(hbaseConnection.getClient(), tableName);
Scan scan = new Scan();
ResultScanner scanner = table.getScanner(scan);
Result result;
int count = 5;
logger.info("The first " + count + " will be displayed");
DefaultDeserializer deserializer = new DefaultDeserializer();
try {
while ((result = scanner.next()) != null && count-- != 0) {
printMap(result.getMap(), " ", deserializer);
}
} finally {
scanner.close();
table.close();
}
}
示例2: main
import org.apache.hadoop.hbase.rest.client.RemoteHTable; //导入依赖的package包/类
public static void main(String[] args) throws IOException {
Configuration conf = HBaseConfiguration.create();
HBaseHelper helper = HBaseHelper.getHelper(conf);
helper.dropTable("testtable");
helper.createTable("testtable", "colfam1");
System.out.println("Adding rows to table...");
helper.fillTable("testtable", 1, 100, 10, "colfam1");
// vv RestExample
Cluster cluster = new Cluster();
cluster.add("localhost", 8080); // co RestExample-1-Cluster Set up a cluster list adding all known REST server hosts.
Client client = new Client(cluster); // co RestExample-2-Client Create the client handling the HTTP communication.
RemoteHTable table = new RemoteHTable(client, "testtable"); // co RestExample-3-Table Create a remote table instance, wrapping the REST access into a familiar interface.
Get get = new Get(Bytes.toBytes("row-30")); // co RestExample-4-Get Perform a get operation as if it were a direct HBase connection.
get.addColumn(Bytes.toBytes("colfam1"), Bytes.toBytes("col-3"));
Result result1 = table.get(get);
System.out.println("Get result1: " + result1);
Scan scan = new Scan();
scan.setStartRow(Bytes.toBytes("row-10"));
scan.setStopRow(Bytes.toBytes("row-15"));
scan.addColumn(Bytes.toBytes("colfam1"), Bytes.toBytes("col-5"));
ResultScanner scanner = table.getScanner(scan); // co RestExample-5-Scan Scan the table, again, the same approach as if using the native Java API.
for (Result result2 : scanner) {
System.out.println("Scan row[" + Bytes.toString(result2.getRow()) +
"]: " + result2);
}
// ^^ RestExample
}
示例3: main
import org.apache.hadoop.hbase.rest.client.RemoteHTable; //导入依赖的package包/类
public static void main(String[] args) {
try {
Configuration conf = HBaseConfiguration.create();
HBaseHelper helper = HBaseHelper.getHelper(conf);
helper.dropTable("test_table");
helper.createTable("test_table", "music", "wallpaper", "others");
System.out.println("Adding rows to table...");
helper.fillTable("test_table", 1, 5, 10, "imtei");
// vv RestExample
Cluster cluster = new Cluster();
cluster.add("centos10-82.letv.cn", 8080); //RestExample-1-Cluster Set up a cluster list adding all known REST server hosts.
Client client = new Client(cluster); //RestExample-2-Client Create the client handling the HTTP communication.
RemoteHTable table = new RemoteHTable(client, "test_table"); //RestExample-3-Table Create a remote table instance, wrapping the REST access into a familiar interface.
Get get = new Get(Bytes.toBytes("row-30")); //RestExample-4-Get Perform a get operation as if it were a direct HBase connection.
get.addColumn(Bytes.toBytes("music"), Bytes.toBytes("col-3"));
Result result1 = table.get(get);
log.info("Get result1: " ,result1);
Scan scan = new Scan();
scan.setStartRow(Bytes.toBytes("row-10"));
scan.setStopRow(Bytes.toBytes("row-15"));
scan.addColumn(Bytes.toBytes("music"), Bytes.toBytes("col-5"));
ResultScanner scanner = table.getScanner(scan); //RestExample-5-Scan Scan the table, again, the same approach as if using the native Java API.
for (Result result2 : scanner) {
log.info("Scan row[{}", Bytes.toString(result2.getRow()), "]: {}", result2);
}
} catch (IOException e) {
log.error(e.getMessage(), e);
}
}
示例4: setUpBeforeClass
import org.apache.hadoop.hbase.rest.client.RemoteHTable; //导入依赖的package包/类
@BeforeClass
public static void setUpBeforeClass() throws Exception {
TEST_UTIL.startMiniCluster();
REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration());
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
LOG.info("Admin Connection=" + admin.getConnection() + ", " +
admin.getConnection().getZooKeeperWatcher());
if (!admin.tableExists(TABLE)) {
HTableDescriptor htd = new HTableDescriptor(TABLE);
htd.addFamily(new HColumnDescriptor(COLUMN_1));
htd.addFamily(new HColumnDescriptor(COLUMN_2));
htd.addFamily(new HColumnDescriptor(COLUMN_3));
admin.createTable(htd);
HTable table = new HTable(TEST_UTIL.getConfiguration(), TABLE);
LOG.info("Table connection=" + table.getConnection() + ", " +
admin.getConnection().getZooKeeperWatcher());
Put put = new Put(ROW_1);
put.add(COLUMN_1, QUALIFIER_1, TS_2, VALUE_1);
table.put(put);
put = new Put(ROW_2);
put.add(COLUMN_1, QUALIFIER_1, TS_1, VALUE_1);
put.add(COLUMN_1, QUALIFIER_1, TS_2, VALUE_2);
put.add(COLUMN_2, QUALIFIER_2, TS_2, VALUE_2);
table.put(put);
table.flushCommits();
}
remoteTable = new RemoteHTable(
new Client(new Cluster().add("localhost",
REST_TEST_UTIL.getServletPort())),
TEST_UTIL.getConfiguration(), TABLE);
}
示例5: setUpBeforeClass
import org.apache.hadoop.hbase.rest.client.RemoteHTable; //导入依赖的package包/类
@BeforeClass
public static void setUpBeforeClass() throws Exception {
TEST_UTIL.startMiniCluster();
REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration());
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
if (!admin.tableExists(TABLE)) {
HTableDescriptor htd = new HTableDescriptor(TABLE);
htd.addFamily(new HColumnDescriptor(COLUMN_1));
htd.addFamily(new HColumnDescriptor(COLUMN_2));
htd.addFamily(new HColumnDescriptor(COLUMN_3));
admin.createTable(htd);
HTable table = new HTable(TEST_UTIL.getConfiguration(), TABLE);
Put put = new Put(ROW_1);
put.add(COLUMN_1, QUALIFIER_1, TS_2, VALUE_1);
table.put(put);
put = new Put(ROW_2);
put.add(COLUMN_1, QUALIFIER_1, TS_1, VALUE_1);
put.add(COLUMN_1, QUALIFIER_1, TS_2, VALUE_2);
put.add(COLUMN_2, QUALIFIER_2, TS_2, VALUE_2);
table.put(put);
table.flushCommits();
}
remoteTable = new RemoteHTable(
new Client(new Cluster().add("localhost",
REST_TEST_UTIL.getServletPort())),
TEST_UTIL.getConfiguration(), TABLE);
}
示例6: getDataFromHbaseRest
import org.apache.hadoop.hbase.rest.client.RemoteHTable; //导入依赖的package包/类
public static void getDataFromHbaseRest() {
ResultScanner scanner = null;// it needs to be initialized to null
Cluster hbaseCluster = new Cluster();//Creating and cluster object
hbaseCluster.add("172.28.182.45", 8080);//passing the IP and post
// Create Rest client instance and get the connection
Client restClient = new Client(hbaseCluster);//pass the cluster object to the cliet
table = new RemoteHTable(restClient, "mywebproject:myclickstream");// Makes a Remote Call
Get get = new Get(Bytes.toBytes("row02"));//Gets the row in question
Result result1=null;// initilizing it to null
try {
result1 = table.get(get);// getting the table and the connection object
byte[] valueWeb = result1.getValue(Bytes.toBytes("web"), Bytes.toBytes("col01"));
byte[] valueWeb01 = result1.getValue(Bytes.toBytes("web"), Bytes.toBytes("col02"));
/*
* getting the colum family: column qualifire values
* */
byte[] valueWebData = result1.getValue(Bytes.toBytes("websitedata"), Bytes.toBytes("col01"));
byte[] valueWebData01 = result1.getValue(Bytes.toBytes("websitedata"), Bytes.toBytes("col02"));
/*
* getting the colum family: column qualifire values
* */
String valueStr = Bytes.toString(valueWeb);
String valueStr1 = Bytes.toString(valueWeb01);
String valueWebdataStr = Bytes.toString(valueWebData);
String valueWebdataStr1 = Bytes.toString(valueWebData01);
System.out.println("GET: \n" + " web: " + valueStr + "\n web: " + valueStr1+"\n "+"Webdata: "+valueWebdataStr);
} catch (IOException e1) {
// TODO Auto-generated catch block
e1.printStackTrace();
}finally{
/*make sure the resultset is set to null befoer exiting the program
* In case its needed keep the object, but whenever the object is removed from the
* rs, please null it. Its a good programming practive.
*/
if(!result1.isEmpty());
result1=null;
}
ResultScanner rsScanner = null;
try {
Scan s = new Scan();
s.addColumn(Bytes.toBytes("web"), Bytes.toBytes("col01"));
s.addColumn(Bytes.toBytes("web"), Bytes.toBytes("col02"));
rsScanner = table.getScanner(s);
for (Result rr = rsScanner.next(); rr != null; rr = rsScanner.next()) {
System.out.println("Found row : " + rr);
}
} catch (Exception e) {
e.printStackTrace();
} finally {
// Make sure you close your scanners when you are done!
rsScanner.close();
}
}
示例7: testSetup
import org.apache.hadoop.hbase.rest.client.RemoteHTable; //导入依赖的package包/类
void testSetup() throws IOException {
this.table = new RemoteHTable(new Client(cluster), conf, tableName);
}