當前位置: 首頁>>代碼示例>>Java>>正文


Java HTable類代碼示例

本文整理匯總了Java中org.apache.hadoop.hbase.client.HTable的典型用法代碼示例。如果您正苦於以下問題:Java HTable類的具體用法?Java HTable怎麽用?Java HTable使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。


HTable類屬於org.apache.hadoop.hbase.client包,在下文中一共展示了HTable類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: wipeOutMeta

import org.apache.hadoop.hbase.client.HTable; //導入依賴的package包/類
protected void wipeOutMeta() throws IOException {
  // Mess it up by blowing up meta.
  Admin admin = TEST_UTIL.getHBaseAdmin();
  Scan s = new Scan();
  Table meta = new HTable(conf, TableName.META_TABLE_NAME);
  ResultScanner scanner = meta.getScanner(s);
  List<Delete> dels = new ArrayList<Delete>();
  for (Result r : scanner) {
    HRegionInfo info =
        HRegionInfo.getHRegionInfo(r);
    if(info != null && !info.getTable().getNamespaceAsString()
        .equals(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR)) {
      Delete d = new Delete(r.getRow());
      dels.add(d);
      admin.unassign(r.getRow(), true);
    }
  }
  meta.delete(dels);
  scanner.close();
  meta.close();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:22,代碼來源:OfflineMetaRebuildTestCore.java

示例2: QueryByCondition2

import org.apache.hadoop.hbase.client.HTable; //導入依賴的package包/類
public static void QueryByCondition2(String tableName) {

        try {
            HTablePool pool = new HTablePool(configuration, 1000);
            HTable table = (HTable) pool.getTable(tableName);
            Filter filter = new SingleColumnValueFilter(Bytes
                    .toBytes("column1"), null, CompareOp.EQUAL, Bytes
                    .toBytes("aaa")); // 當列column1的值為aaa時進行查詢
            Scan s = new Scan();
            s.setFilter(filter);
            ResultScanner rs = table.getScanner(s);
            for (Result r : rs) {
                System.out.println("獲得到rowkey:" + new String(r.getRow()));
                for (KeyValue keyValue : r.raw()) {
                    System.out.println("列:" + new String(keyValue.getFamily())
                            + "====值:" + new String(keyValue.getValue()));
                }
            }
        } catch (Exception e) {
            e.printStackTrace();
        }

    }
 
開發者ID:yjp123456,項目名稱:SparkDemo,代碼行數:24,代碼來源:MyClass.java

示例3: setupTableWithRegionReplica

import org.apache.hadoop.hbase.client.HTable; //導入依賴的package包/類
/**
 * Setup a clean table with a certain region_replica count
 *
 * It will set tbl which needs to be closed after test
 *
 * @param tableName
 * @param replicaCount
 * @throws Exception
 */
void setupTableWithRegionReplica(TableName tablename, int replicaCount) throws Exception {
  HTableDescriptor desc = new HTableDescriptor(tablename);
  desc.setRegionReplication(replicaCount);
  HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toString(FAM));
  desc.addFamily(hcd); // If a table has no CF's it doesn't get checked
  createTable(TEST_UTIL, desc, SPLITS);

  tbl = (HTable) connection.getTable(tablename, tableExecutorService);
  List<Put> puts = new ArrayList<Put>();
  for (byte[] row : ROWKEYS) {
    Put p = new Put(row);
    p.add(FAM, Bytes.toBytes("val"), row);
    puts.add(p);
  }
  tbl.put(puts);
  tbl.flushCommits();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:27,代碼來源:TestHBaseFsck.java

示例4: getAllRecord

import org.apache.hadoop.hbase.client.HTable; //導入依賴的package包/類
/**
 * Getting all records  a row from an existing SS tables 
 * @method getAllRecord
 * @inputParameters hbaseBtable Name used
 * @return type: no return type as its a void method 
 * 
 **/
@SuppressWarnings({ "deprecation", "resource" })
public static void getAllRecord(String myHbaseBtableName) {
  ResultScanner hbaseBSs = null;
  try {
    HTable hbaseBtable = new HTable(hbaseBconf, myHbaseBtableName);
    Scan hbaseBScan = new Scan();
    hbaseBSs = hbaseBtable.getScanner(hbaseBScan);
    for (Result r : hbaseBSs) {
      for (KeyValue hbaseBkv : r.raw()) {
        System.out.print(new String(hbaseBkv.getRow()) + " ");
        System.out.print(new String(hbaseBkv.getFamily()) + ":");
        System.out.print(new String(hbaseBkv.getQualifier()) + " ");
        System.out.print(hbaseBkv.getTimestamp() + " ");
        System.out.println(new String(hbaseBkv.getValue()));
      }
    }
  } catch (IOException eio) {
    eip.printStackTrace();
  } finally {
    if (hbaseBSs != null) hbaseBSs.close();
    // closing the ss hbaseBtable 
  }
}
 
開發者ID:PacktPublishing,項目名稱:HBase-High-Performance-Cookbook,代碼行數:31,代碼來源:HBaseRegularClient.java

示例5: run

import org.apache.hadoop.hbase.client.HTable; //導入依賴的package包/類
@Override
public int run(String[] args) throws Exception {
  if (args.length != 1) {
    System.out.println("Usage : " + Delete.class.getSimpleName() + " <node to delete>");
    return 0;
  }
  byte[] val = Bytes.toBytesBinary(args[0]);

  org.apache.hadoop.hbase.client.Delete delete
    = new org.apache.hadoop.hbase.client.Delete(val);

  Table table = new HTable(getConf(), getTableName(getConf()));
  table.delete(delete);
  table.close();

  System.out.println("Delete successful");
  return 0;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:19,代碼來源:IntegrationTestBigLinkedList.java

示例6: setUpBeforeClass

import org.apache.hadoop.hbase.client.HTable; //導入依賴的package包/類
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  // switch TIF to log at DEBUG level
  TEST_UTIL.enableDebug(MultiTableInputFormat.class);
  TEST_UTIL.enableDebug(MultiTableInputFormatBase.class);
  TEST_UTIL.setJobWithoutMRCluster();
  // start mini hbase cluster
  TEST_UTIL.startMiniCluster(3);
  // create and fill table
  for (int i = 0; i < 3; i++) {
    try (HTable table =
        TEST_UTIL.createMultiRegionTable(TableName.valueOf(TABLE_NAME + String.valueOf(i)),
          INPUT_FAMILY, 4)) {
      TEST_UTIL.loadTable(table, INPUT_FAMILY, false);
    }
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:18,代碼來源:TestMultiTableInputFormat.java

示例7: putLob

import org.apache.hadoop.hbase.client.HTable; //導入依賴的package包/類
/**
 * 上傳對象到LOB
 * @param tableName Hyperbase表名
 * @param row rowkey byte形式
 * @param filename 文件名
 * @param fileData 文件
 */
public void putLob(String tableName, String row, String filename, byte[] fileData){
    byte[] rowkey = Bytes.toBytes(row);
    try {
        HTable htable = new HTable(conf, tableName);
        Put put = new Put(rowkey);
        put.add(Bytes.toBytes(family1), Bytes.toBytes(f1_q1), Bytes.toBytes(filename));
        put.add(Bytes.toBytes(family2), Bytes.toBytes(f2_q1), fileData);
        htable.put(put);
        htable.flushCommits();
        htable.close();
    } catch (IOException e1) {
        // TODO Auto-generated catch block
        e1.printStackTrace();
    }
}
 
開發者ID:Transwarp-DE,項目名稱:Transwarp-Sample-Code,代碼行數:23,代碼來源:LobUtil.java

示例8: process

import org.apache.hadoop.hbase.client.HTable; //導入依賴的package包/類
@Override
public void process(Object[] record) throws HiveException {
    final String document = (String) stringOI.getPrimitiveJavaObject(record[0]);

    if (document == null) {
        return;
    }

    String[] tokens = document.split(",");
    String[] results = tokens[1].split(" ");

    try {
        hTable = new HTable(conf, "bi");
        Get get = new Get(Bytes.toBytes(tokens[0]));
        result = hTable.exists(get);
    } catch (Exception e) {
        e.printStackTrace();
    }

    if (!result) {
        for (String r : results) {
            forward(new Object[]{tokens[0], r});
        }
    }
}
 
開發者ID:Transwarp-DE,項目名稱:Transwarp-Sample-Code,代碼行數:26,代碼來源:udtfCheck.java

示例9: runTest

import org.apache.hadoop.hbase.client.HTable; //導入依賴的package包/類
private void runTest(String testName, HTableDescriptor htd, BloomType bloomType,
    boolean preCreateTable, byte[][] tableSplitKeys, byte[][][] hfileRanges) throws Exception {

  for (boolean managed : new boolean[] { true, false }) {
    Path dir = util.getDataTestDirOnTestFS(testName);
    FileSystem fs = util.getTestFileSystem();
    dir = dir.makeQualified(fs);
    Path familyDir = new Path(dir, Bytes.toString(FAMILY));

    int hfileIdx = 0;
    for (byte[][] range : hfileRanges) {
      byte[] from = range[0];
      byte[] to = range[1];
      HFileTestUtil.createHFile(util.getConfiguration(), fs, new Path(familyDir, "hfile_"
          + hfileIdx++), FAMILY, QUALIFIER, from, to, 1000);
    }
    int expectedRows = hfileIdx * 1000;

    if (preCreateTable) {
      util.getHBaseAdmin().createTable(htd, tableSplitKeys);
    }

    final TableName tableName = htd.getTableName();
    if (!util.getHBaseAdmin().tableExists(tableName)) {
      util.getHBaseAdmin().createTable(htd);
    }
    LoadIncrementalHFiles loader = new LoadIncrementalHFiles(util.getConfiguration());

    if (managed) {
      try (HTable table = new HTable(util.getConfiguration(), tableName)) {
        loader.doBulkLoad(dir, table);
        assertEquals(expectedRows, util.countRows(table));
      }
    } else {
      try (Connection conn = ConnectionFactory.createConnection(util.getConfiguration());
          HTable table = (HTable) conn.getTable(tableName)) {
        loader.doBulkLoad(dir, table);
      }
    }

    // verify staging folder has been cleaned up
    Path stagingBasePath = SecureBulkLoadUtil.getBaseStagingDir(util.getConfiguration());
    if (fs.exists(stagingBasePath)) {
      FileStatus[] files = fs.listStatus(stagingBasePath);
      for (FileStatus file : files) {
        assertTrue("Folder=" + file.getPath() + " is not cleaned up.",
            file.getPath().getName() != "DONOTERASE");
      }
    }

    util.deleteTable(tableName);
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:54,代碼來源:TestLoadIncrementalHFiles.java

示例10: insertData

import org.apache.hadoop.hbase.client.HTable; //導入依賴的package包/類
private static int insertData(TableName tableName, String column, double prob) throws IOException {
  byte[] k = new byte[3];
  byte[][] famAndQf = KeyValue.parseColumn(Bytes.toBytes(column));

  List<Put> puts = new ArrayList<>();
  for (int i = 0; i < 9; i++) {
    Put put = new Put(Bytes.toBytes("row" + i));
    put.setDurability(Durability.SKIP_WAL);
    put.add(famAndQf[0], famAndQf[1], k);
    put.setCellVisibility(new CellVisibility("(" + SECRET + "|" + CONFIDENTIAL + ")" + "&" + "!"
        + TOPSECRET));
    puts.add(put);
  }
  try (Table table = new HTable(TEST_UTIL.getConfiguration(), tableName)) {
    table.put(puts);
  }
  return puts.size();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:19,代碼來源:TestScannersWithLabels.java

示例11: jobSetup

import org.apache.hadoop.hbase.client.HTable; //導入依賴的package包/類
@Override
protected void jobSetup(Job job) throws IOException, ImportException {
  super.jobSetup(job);

  // we shouldn't have gotten here if bulk load dir is not set
  // so let's throw a ImportException
  if(getContext().getDestination() == null){
    throw new ImportException("Can't run HBaseBulkImportJob without a " +
        "valid destination directory.");
  }

  TableMapReduceUtil.addDependencyJars(job.getConfiguration(), Preconditions.class);
  FileOutputFormat.setOutputPath(job, getContext().getDestination());
  HTable hTable = new HTable(job.getConfiguration(), options.getHBaseTable());
  HFileOutputFormat.configureIncrementalLoad(job, hTable);
}
 
開發者ID:aliyun,項目名稱:aliyun-maxcompute-data-collectors,代碼行數:17,代碼來源:HBaseBulkImportJob.java

示例12: runIncrementalPELoad

import org.apache.hadoop.hbase.client.HTable; //導入依賴的package包/類
private void runIncrementalPELoad(
    Configuration conf, HTable table, Path outDir)
throws Exception {
  Job job = new Job(conf, "testLocalMRIncrementalLoad");
  job.setWorkingDirectory(util.getDataTestDirOnTestFS("runIncrementalPELoad"));
  job.getConfiguration().setStrings("io.serializations", conf.get("io.serializations"),
      MutationSerialization.class.getName(), ResultSerialization.class.getName(),
      KeyValueSerialization.class.getName());
  setupRandomGeneratorMapper(job);
  HFileOutputFormat2.configureIncrementalLoad(job, table.getTableDescriptor(),
      table.getRegionLocator());
  FileOutputFormat.setOutputPath(job, outDir);

  Assert.assertFalse( util.getTestFileSystem().exists(outDir)) ;

  assertEquals(table.getRegionLocator().getAllRegionLocations().size(), job.getNumReduceTasks());

  assertTrue(job.waitForCompletion(true));
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:20,代碼來源:TestHFileOutputFormat.java

示例13: setUpBeforeClass

import org.apache.hadoop.hbase.client.HTable; //導入依賴的package包/類
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  TEST_UTIL.getConfiguration().setBoolean(QuotaUtil.QUOTA_CONF_KEY, true);
  TEST_UTIL.getConfiguration().setInt("hbase.hstore.compactionThreshold", 10);
  TEST_UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 100);
  TEST_UTIL.getConfiguration().setInt("hbase.client.pause", 250);
  TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6);
  TEST_UTIL.getConfiguration().setBoolean("hbase.master.enabletable.roundrobin", true);
  TEST_UTIL.startMiniCluster(1);
  TEST_UTIL.waitTableAvailable(QuotaTableUtil.QUOTA_TABLE_NAME);
  QuotaCache.setTEST_FORCE_REFRESH(true);

  tables = new HTable[TABLE_NAMES.length];
  for (int i = 0; i < TABLE_NAMES.length; ++i) {
    tables[i] = TEST_UTIL.createTable(TABLE_NAMES[i], FAMILY);
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:18,代碼來源:TestQuotaThrottle.java

示例14: setHTable

import org.apache.hadoop.hbase.client.HTable; //導入依賴的package包/類
/**
 * Allows subclasses to set the {@link HTable}.
 *
 * Will attempt to reuse the underlying Connection for our own needs, including
 * retreiving an Admin interface to the HBase cluster.
 *
 * @param table  The table to get the data from.
 * @throws IOException 
 * @deprecated Use {@link #initializeTable(Connection, TableName)} instead.
 */
@Deprecated
protected void setHTable(HTable table) throws IOException {
  this.table = table;
  this.connection = table.getConnection();
  try {
    this.regionLocator = table.getRegionLocator();
    this.admin = this.connection.getAdmin();
  } catch (NeedUnmanagedConnectionException exception) {
    LOG.warn("You are using an HTable instance that relies on an HBase-managed Connection. " +
        "This is usually due to directly creating an HTable, which is deprecated. Instead, you " +
        "should create a Connection object and then request a Table instance from it. If you " +
        "don't need the Table instance for your own use, you should instead use the " +
        "TableInputFormatBase.initalizeTable method directly.");
    LOG.info("Creating an additional unmanaged connection because user provided one can't be " +
        "used for administrative actions. We'll close it when we close out the table.");
    LOG.debug("Details about our failure to request an administrative interface.", exception);
    // Do we need a "copy the settings from this Connection" method? are things like the User
    // properly maintained by just looking again at the Configuration?
    this.connection = ConnectionFactory.createConnection(this.connection.getConfiguration());
    this.regionLocator = this.connection.getRegionLocator(table.getName());
    this.admin = this.connection.getAdmin();
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:34,代碼來源:TableInputFormatBase.java

示例15: beforeAllTests

import org.apache.hadoop.hbase.client.HTable; //導入依賴的package包/類
@BeforeClass public static void beforeAllTests() throws Exception {
  Configuration c = TEST_UTIL.getConfiguration();
  c.setBoolean("hbase.assignment.usezk", true);
  c.setBoolean("dfs.support.append", true);
  c.setInt("hbase.regionserver.info.port", 0);
  TEST_UTIL.startMiniCluster(2);
  TEST_UTIL.createMultiRegionTable(TABLENAME, FAMILIES);
  HTable t = new HTable(TEST_UTIL.getConfiguration(), TABLENAME);
  countOfRegions = -1;
  try (RegionLocator r = t.getRegionLocator()) {
    countOfRegions = r.getStartKeys().length;
  }
  waitUntilAllRegionsAssigned();
  addToEachStartKey(countOfRegions);
  t.close();
  TEST_UTIL.getHBaseCluster().getMaster().assignmentManager.initializeHandlerTrackers();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:18,代碼來源:TestZKBasedOpenCloseRegion.java


注:本文中的org.apache.hadoop.hbase.client.HTable類示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。