本文整理匯總了Java中org.apache.hadoop.hbase.client.HBaseAdmin.getConfiguration方法的典型用法代碼示例。如果您正苦於以下問題:Java HBaseAdmin.getConfiguration方法的具體用法?Java HBaseAdmin.getConfiguration怎麽用?Java HBaseAdmin.getConfiguration使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.hbase.client.HBaseAdmin
的用法示例。
在下文中一共展示了HBaseAdmin.getConfiguration方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: isMoved
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
public static boolean isMoved(HBaseAdmin admin, String tableName, String regionName, String serverNameTarget) {
try (HTable table = new HTable(admin.getConfiguration(), tableName)) {
NavigableMap<HRegionInfo, ServerName> regionLocations = table.getRegionLocations();
for (Map.Entry<HRegionInfo, ServerName> regionLocation : regionLocations.entrySet()) {
if (regionLocation.getKey().getEncodedName().equals(regionName)) {
return regionLocation.getValue().getServerName().equals(serverNameTarget);
}
}
if (!existsRegion(regionName, regionLocations.keySet()))
return true; // skip moving
} catch (IOException e) {
return false;
}
return false;
}
示例2: generateHBaseDatasetCompositeKeyDate
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
public static void generateHBaseDatasetCompositeKeyDate(HBaseAdmin admin, String tableName, int numberRegions) throws Exception {
if (admin.tableExists(tableName)) {
admin.disableTable(tableName);
admin.deleteTable(tableName);
}
HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(new HColumnDescriptor(FAMILY_F));
if (numberRegions > 1) {
admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions-1));
} else {
admin.createTable(desc);
}
HTable table = new HTable(admin.getConfiguration(), tableName);
Date startDate = new Date(1408924800000L);
long startTime = startDate.getTime();
long MILLISECONDS_IN_A_DAY = (long)1000 * 60 * 60 * 24;
long MILLISECONDS_IN_A_YEAR = MILLISECONDS_IN_A_DAY * 365;
long endTime = startTime + MILLISECONDS_IN_A_YEAR;
long interval = MILLISECONDS_IN_A_DAY / 3;
for (long ts = startTime, counter = 0; ts < endTime; ts += interval, counter ++) {
byte[] rowKey = ByteBuffer.allocate(16) .putLong(ts).array();
for(int i = 0; i < 8; ++i) {
rowKey[8 + i] = (byte)(counter >> (56 - (i * 8)));
}
Put p = new Put(rowKey);
p.add(FAMILY_F, COLUMN_C, "dummy".getBytes());
table.put(p);
}
table.flushCommits();
table.close();
}
示例3: generateHBaseDatasetCompositeKeyInt
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
public static void generateHBaseDatasetCompositeKeyInt(HBaseAdmin admin, String tableName, int numberRegions) throws Exception {
if (admin.tableExists(tableName)) {
admin.disableTable(tableName);
admin.deleteTable(tableName);
}
HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(new HColumnDescriptor(FAMILY_F));
if (numberRegions > 1) {
admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions-1));
} else {
admin.createTable(desc);
}
HTable table = new HTable(admin.getConfiguration(), tableName);
int startVal = 0;
int stopVal = 1000;
int interval = 47;
long counter = 0;
for (int i = startVal; i < stopVal; i += interval, counter ++) {
byte[] rowKey = ByteBuffer.allocate(12).putInt(i).array();
for(int j = 0; j < 8; ++j) {
rowKey[4 + j] = (byte)(counter >> (56 - (j * 8)));
}
Put p = new Put(rowKey);
p.add(FAMILY_F, COLUMN_C, "dummy".getBytes());
table.put(p);
}
table.flushCommits();
table.close();
}
示例4: generateHBaseDatasetDoubleOB
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
public static void generateHBaseDatasetDoubleOB(HBaseAdmin admin, String tableName, int numberRegions) throws Exception {
if (admin.tableExists(tableName)) {
admin.disableTable(tableName);
admin.deleteTable(tableName);
}
HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(new HColumnDescriptor(FAMILY_F));
if (numberRegions > 1) {
admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions-1));
} else {
admin.createTable(desc);
}
HTable table = new HTable(admin.getConfiguration(), tableName);
for (double i = 0.5; i <= 100.00; i += 0.75) {
byte[] bytes = new byte[9];
org.apache.hadoop.hbase.util.PositionedByteRange br =
new org.apache.hadoop.hbase.util.SimplePositionedByteRange(bytes, 0, 9);
org.apache.hadoop.hbase.util.OrderedBytes.encodeFloat64(br, i,
org.apache.hadoop.hbase.util.Order.ASCENDING);
Put p = new Put(bytes);
p.add(FAMILY_F, COLUMN_C, String.format("value %03f", i).getBytes());
table.put(p);
}
table.flushCommits();
table.close();
admin.flush(tableName);
}
示例5: generateHBaseDatasetFloatOB
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
public static void generateHBaseDatasetFloatOB(HBaseAdmin admin, String tableName, int numberRegions) throws Exception {
if (admin.tableExists(tableName)) {
admin.disableTable(tableName);
admin.deleteTable(tableName);
}
HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(new HColumnDescriptor(FAMILY_F));
if (numberRegions > 1) {
admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions-1));
} else {
admin.createTable(desc);
}
HTable table = new HTable(admin.getConfiguration(), tableName);
for (float i = (float)0.5; i <= 100.00; i += 0.75) {
byte[] bytes = new byte[5];
org.apache.hadoop.hbase.util.PositionedByteRange br =
new org.apache.hadoop.hbase.util.SimplePositionedByteRange(bytes, 0, 5);
org.apache.hadoop.hbase.util.OrderedBytes.encodeFloat32(br, i,
org.apache.hadoop.hbase.util.Order.ASCENDING);
Put p = new Put(bytes);
p.add(FAMILY_F, COLUMN_C, String.format("value %03f", i).getBytes());
table.put(p);
}
table.flushCommits();
table.close();
admin.flush(tableName);
}
示例6: generateHBaseDatasetBigIntOB
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
public static void generateHBaseDatasetBigIntOB(HBaseAdmin admin, String tableName, int numberRegions) throws Exception {
if (admin.tableExists(tableName)) {
admin.disableTable(tableName);
admin.deleteTable(tableName);
}
HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(new HColumnDescriptor(FAMILY_F));
if (numberRegions > 1) {
admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions-1));
} else {
admin.createTable(desc);
}
HTable table = new HTable(admin.getConfiguration(), tableName);
long startTime = (long)1438034423 * 1000;
for (long i = startTime; i <= startTime + 100; i ++) {
byte[] bytes = new byte[9];
org.apache.hadoop.hbase.util.PositionedByteRange br =
new org.apache.hadoop.hbase.util.SimplePositionedByteRange(bytes, 0, 9);
org.apache.hadoop.hbase.util.OrderedBytes.encodeInt64(br, i,
org.apache.hadoop.hbase.util.Order.ASCENDING);
Put p = new Put(bytes);
p.add(FAMILY_F, COLUMN_C, String.format("value %d", i).getBytes());
table.put(p);
}
table.flushCommits();
table.close();
admin.flush(tableName);
}
示例7: generateHBaseDatasetIntOB
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
public static void generateHBaseDatasetIntOB(HBaseAdmin admin, String tableName, int numberRegions) throws Exception {
if (admin.tableExists(tableName)) {
admin.disableTable(tableName);
admin.deleteTable(tableName);
}
HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(new HColumnDescriptor(FAMILY_F));
if (numberRegions > 1) {
admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions-1));
} else {
admin.createTable(desc);
}
HTable table = new HTable(admin.getConfiguration(), tableName);
for (int i = -49; i <= 100; i ++) {
byte[] bytes = new byte[5];
org.apache.hadoop.hbase.util.PositionedByteRange br =
new org.apache.hadoop.hbase.util.SimplePositionedByteRange(bytes, 0, 5);
org.apache.hadoop.hbase.util.OrderedBytes.encodeInt32(br, i,
org.apache.hadoop.hbase.util.Order.ASCENDING);
Put p = new Put(bytes);
p.add(FAMILY_F, COLUMN_C, String.format("value %d", i).getBytes());
table.put(p);
}
table.flushCommits();
table.close();
admin.flush(tableName);
}
示例8: generateHBaseDatasetDoubleOBDesc
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
public static void generateHBaseDatasetDoubleOBDesc(HBaseAdmin admin, String tableName, int numberRegions) throws Exception {
if (admin.tableExists(tableName)) {
admin.disableTable(tableName);
admin.deleteTable(tableName);
}
HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(new HColumnDescriptor(FAMILY_F));
if (numberRegions > 1) {
admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions-1));
} else {
admin.createTable(desc);
}
HTable table = new HTable(admin.getConfiguration(), tableName);
for (double i = 0.5; i <= 100.00; i += 0.75) {
byte[] bytes = new byte[9];
org.apache.hadoop.hbase.util.PositionedByteRange br =
new org.apache.hadoop.hbase.util.SimplePositionedByteRange(bytes, 0, 9);
org.apache.hadoop.hbase.util.OrderedBytes.encodeFloat64(br, i,
org.apache.hadoop.hbase.util.Order.DESCENDING);
Put p = new Put(bytes);
p.add(FAMILY_F, COLUMN_C, String.format("value %03f", i).getBytes());
table.put(p);
}
table.flushCommits();
table.close();
admin.flush(tableName);
}
示例9: generateHBaseDatasetFloatOBDesc
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
public static void generateHBaseDatasetFloatOBDesc(HBaseAdmin admin, String tableName, int numberRegions) throws Exception {
if (admin.tableExists(tableName)) {
admin.disableTable(tableName);
admin.deleteTable(tableName);
}
HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(new HColumnDescriptor(FAMILY_F));
if (numberRegions > 1) {
admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions-1));
} else {
admin.createTable(desc);
}
HTable table = new HTable(admin.getConfiguration(), tableName);
for (float i = (float)0.5; i <= 100.00; i += 0.75) {
byte[] bytes = new byte[5];
org.apache.hadoop.hbase.util.PositionedByteRange br =
new org.apache.hadoop.hbase.util.SimplePositionedByteRange(bytes, 0, 5);
org.apache.hadoop.hbase.util.OrderedBytes.encodeFloat32(br, i,
org.apache.hadoop.hbase.util.Order.DESCENDING);
Put p = new Put(bytes);
p.add(FAMILY_F, COLUMN_C, String.format("value %03f", i).getBytes());
table.put(p);
}
table.flushCommits();
table.close();
admin.flush(tableName);
}
示例10: generateHBaseDatasetBigIntOBDesc
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
public static void generateHBaseDatasetBigIntOBDesc(HBaseAdmin admin, String tableName, int numberRegions) throws Exception {
if (admin.tableExists(tableName)) {
admin.disableTable(tableName);
admin.deleteTable(tableName);
}
HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(new HColumnDescriptor(FAMILY_F));
if (numberRegions > 1) {
admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions-1));
} else {
admin.createTable(desc);
}
HTable table = new HTable(admin.getConfiguration(), tableName);
long startTime = (long)1438034423 * 1000;
for (long i = startTime; i <= startTime + 100; i ++) {
byte[] bytes = new byte[9];
org.apache.hadoop.hbase.util.PositionedByteRange br =
new org.apache.hadoop.hbase.util.SimplePositionedByteRange(bytes, 0, 9);
org.apache.hadoop.hbase.util.OrderedBytes.encodeInt64(br, i,
org.apache.hadoop.hbase.util.Order.DESCENDING);
Put p = new Put(bytes);
p.add(FAMILY_F, COLUMN_C, String.format("value %d", i).getBytes());
table.put(p);
}
table.flushCommits();
table.close();
admin.flush(tableName);
}
示例11: generateHBaseDatasetIntOBDesc
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
public static void generateHBaseDatasetIntOBDesc(HBaseAdmin admin, String tableName, int numberRegions) throws Exception {
if (admin.tableExists(tableName)) {
admin.disableTable(tableName);
admin.deleteTable(tableName);
}
HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(new HColumnDescriptor(FAMILY_F));
if (numberRegions > 1) {
admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions-1));
} else {
admin.createTable(desc);
}
HTable table = new HTable(admin.getConfiguration(), tableName);
for (int i = -49; i <= 100; i ++) {
byte[] bytes = new byte[5];
org.apache.hadoop.hbase.util.PositionedByteRange br =
new org.apache.hadoop.hbase.util.SimplePositionedByteRange(bytes, 0, 5);
org.apache.hadoop.hbase.util.OrderedBytes.encodeInt32(br, i,
org.apache.hadoop.hbase.util.Order.DESCENDING);
Put p = new Put(bytes);
p.add(FAMILY_F, COLUMN_C, String.format("value %d", i).getBytes());
table.put(p);
}
table.flushCommits();
table.close();
admin.flush(tableName);
}
示例12: generateHBaseDataset2
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
public static void generateHBaseDataset2(HBaseAdmin admin, String tableName, int numberRegions) throws Exception {
if (admin.tableExists(tableName)) {
admin.disableTable(tableName);
admin.deleteTable(tableName);
}
HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(new HColumnDescriptor("f"));
if (numberRegions > 1) {
admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions-1));
} else {
admin.createTable(desc);
}
HTable table = new HTable(admin.getConfiguration(), tableName);
int rowCount = 0;
byte[] bytes = null;
final int numColumns = 5;
Random random = new Random();
int iteration = 0;
while (rowCount < 1000) {
char rowKeyChar = 'a';
for (int i = 0; i < numberRegions; i++) {
Put p = new Put((""+rowKeyChar+iteration).getBytes());
for (int j = 1; j <= numColumns; j++) {
bytes = new byte[5000]; random.nextBytes(bytes);
p.add("f".getBytes(), ("c"+j).getBytes(), bytes);
}
table.put(p);
++rowKeyChar;
++rowCount;
}
++iteration;
}
table.flushCommits();
table.close();
admin.flush(tableName);
}
示例13: generateHBaseDatasetCompositeKeyTime
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
public static void generateHBaseDatasetCompositeKeyTime(HBaseAdmin admin, String tableName, int numberRegions) throws Exception {
if (admin.tableExists(tableName)) {
admin.disableTable(tableName);
admin.deleteTable(tableName);
}
HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(new HColumnDescriptor(FAMILY_F));
if (numberRegions > 1) {
admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions-1));
} else {
admin.createTable(desc);
}
HTable table = new HTable(admin.getConfiguration(), tableName);
long startTime = 0;
long MILLISECONDS_IN_A_SEC = (long)1000;
long MILLISECONDS_IN_A_DAY = MILLISECONDS_IN_A_SEC * 60 * 60 * 24;
long endTime = startTime + MILLISECONDS_IN_A_DAY;
long smallInterval = 25;
long largeInterval = MILLISECONDS_IN_A_SEC * 42;
long interval = smallInterval;
for (long ts = startTime, counter = 0; ts < endTime; ts += interval, counter ++) {
byte[] rowKey = ByteBuffer.allocate(16) .putLong(ts).array();
for(int i = 0; i < 8; ++i) {
rowKey[8 + i] = (byte)(counter >> (56 - (i * 8)));
}
Put p = new Put(rowKey);
p.add(FAMILY_F, COLUMN_C, "dummy".getBytes());
table.put(p);
if (interval == smallInterval) {
interval = largeInterval;
} else {
interval = smallInterval;
}
}
table.flushCommits();
table.close();
}
示例14: CCIndexAdmin
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
/**
* Construct an IndexAdmin with the given HBaseAdmin.
* @param admin
*/
public CCIndexAdmin(HBaseAdmin admin) {
this.conf = admin.getConfiguration();
this.admin = admin;
}
示例15: getRegionLocations
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
private static void getRegionLocations(HBaseAdmin admin, String tableName) throws IOException {
try (HTable table = new HTable(admin.getConfiguration(), tableName)) {
regionLocations.putAll(table.getRegionLocations());
cachedTableNames.add(tableName);
}
}