當前位置: 首頁>>代碼示例>>Java>>正文


Java HConnectionManager.createConnection方法代碼示例

本文整理匯總了Java中org.apache.hadoop.hbase.client.HConnectionManager.createConnection方法的典型用法代碼示例。如果您正苦於以下問題:Java HConnectionManager.createConnection方法的具體用法?Java HConnectionManager.createConnection怎麽用?Java HConnectionManager.createConnection使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.hbase.client.HConnectionManager的用法示例。


在下文中一共展示了HConnectionManager.createConnection方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: main

import org.apache.hadoop.hbase.client.HConnectionManager; //導入方法依賴的package包/類
public static void main(String[] args) {

        try {
            Configuration conf = HBaseConfiguration.create();  // create a new hbase connection object. (singleton)
            conn = HConnectionManager.createConnection(conf);

            QueryAll("t");
        } catch (IOException e) {
            e.printStackTrace();
        }


        // createTable("wujintao");
        // insertData("wujintao");

        // QueryByCondition1("wujintao");
        // QueryByCondition2("wujintao");
        //QueryByCondition3("wujintao");
        //deleteRow("wujintao","abcdef");
        //deleteByCondition("wujintao","abcdef");
    }
 
開發者ID:yjp123456,項目名稱:SparkDemo,代碼行數:22,代碼來源:MyClass.java

示例2: open

import org.apache.hadoop.hbase.client.HConnectionManager; //導入方法依賴的package包/類
/**
 * 連接HBase
 */
public static void open() {
	try {
		config = HBaseConfiguration.create();
		conn = HConnectionManager.createConnection(config);
		admin = new HBaseAdmin(conn);
		hbase_table = conn.getTable(ISAXIndex.TABLE_NAME);
	} catch (IOException e) {
		e.printStackTrace();
	}
}
 
開發者ID:ItGql,項目名稱:SparkIsax,代碼行數:14,代碼來源:HBaseUtils.java

示例3: init

import org.apache.hadoop.hbase.client.HConnectionManager; //導入方法依賴的package包/類
@Override
public void init(Context context) throws IOException {
  super.init(context);
  this.conf = HBaseConfiguration.create(ctx.getConfiguration());
  decorateConf();
  this.maxRetriesMultiplier = this.conf.getInt("replication.source.maxretriesmultiplier", 300);
  this.socketTimeoutMultiplier = this.conf.getInt("replication.source.socketTimeoutMultiplier",
      maxRetriesMultiplier);
  // TODO: This connection is replication specific or we should make it particular to
  // replication and make replication specific settings such as compression or codec to use
  // passing Cells.
  this.conn = HConnectionManager.createConnection(this.conf);
  this.sleepForRetries =
      this.conf.getLong("replication.source.sleepforretries", 1000);
  this.metrics = context.getMetrics();
  // ReplicationQueueInfo parses the peerId out of the znode for us
  this.replicationSinkMgr = new ReplicationSinkManager(conn, ctx.getPeerId(), this, this.conf);
  // per sink thread pool
  this.maxThreads = this.conf.getInt(HConstants.REPLICATION_SOURCE_MAXTHREADS_KEY,
    HConstants.REPLICATION_SOURCE_MAXTHREADS_DEFAULT);

  this.exec = new ThreadPoolExecutor(maxThreads, maxThreads, 60, TimeUnit.SECONDS,
      new LinkedBlockingQueue<Runnable>());
  this.exec.allowCoreThreadTimeOut(true);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:26,代碼來源:HBaseInterClusterReplicationEndpoint.java

示例4: initialize

import org.apache.hadoop.hbase.client.HConnectionManager; //導入方法依賴的package包/類
@Override
public void initialize(Configuration conf) throws IOException {
  this.conf = conf;
  this.hBaseAdmin = new HBaseAdmin(conf);
  this.connection = HConnectionManager.createConnection(conf);

  final TableName stateTable = TableName.valueOf(conf.get(TxConstants.TransactionPruning.PRUNE_STATE_TABLE,
                                                          TxConstants.TransactionPruning.DEFAULT_PRUNE_STATE_TABLE));
  LOG.info("Initializing plugin with state table {}:{}", stateTable.getNamespaceAsString(),
           stateTable.getNameAsString());
  createPruneTable(stateTable);
  this.dataJanitorState = new DataJanitorState(new DataJanitorState.TableSupplier() {
    @Override
    public HTableInterface get() throws IOException {
      return connection.getTable(stateTable);
    }
  });
}
 
開發者ID:apache,項目名稱:incubator-tephra,代碼行數:19,代碼來源:HBaseTransactionPruningPlugin.java

示例5: beforeTest

import org.apache.hadoop.hbase.client.HConnectionManager; //導入方法依賴的package包/類
@Before
public void beforeTest() throws Exception {
  pruneStateTable = TableName.valueOf(conf.get(TxConstants.TransactionPruning.PRUNE_STATE_TABLE,
                                               TxConstants.TransactionPruning.DEFAULT_PRUNE_STATE_TABLE));
  HTable table = createTable(pruneStateTable.getName(), new byte[][]{DataJanitorState.FAMILY}, false,
                             // Prune state table is a non-transactional table, hence no transaction co-processor
                             Collections.<String>emptyList());
  table.close();
  connection = HConnectionManager.createConnection(conf);

  dataJanitorState =
    new DataJanitorState(new DataJanitorState.TableSupplier() {
      @Override
      public HTableInterface get() throws IOException {
        return connection.getTable(pruneStateTable);
      }
    });

}
 
開發者ID:apache,項目名稱:incubator-tephra,代碼行數:20,代碼來源:DataJanitorStateTest.java

示例6: init

import org.apache.hadoop.hbase.client.HConnectionManager; //導入方法依賴的package包/類
/**
 * Sets up common resources required by all clients.
 */
public void init() throws IOException {
  Injector injector = Guice.createInjector(
      new ConfigModule(conf),
      new ZKModule(),
      new DiscoveryModules().getDistributedModules(),
      new TransactionModules().getDistributedModules(),
      new TransactionClientModule()
  );

  zkClient = injector.getInstance(ZKClientService.class);
  zkClient.startAndWait();
  txClient = injector.getInstance(TransactionServiceClient.class);

  createTableIfNotExists(conf, TABLE, new byte[][]{ FAMILY });
  conn = HConnectionManager.createConnection(conf);
}
 
開發者ID:apache,項目名稱:incubator-tephra,代碼行數:20,代碼來源:BalanceBooks.java

示例7: waitForConnection

import org.apache.hadoop.hbase.client.HConnectionManager; //導入方法依賴的package包/類
public synchronized static void waitForConnection(long timeout, TimeUnit timeoutUnit) {
    long before = System.currentTimeMillis();
    long after;
    long timeoutMS = TimeUnit.MILLISECONDS.convert(timeout, timeoutUnit);
    do {
        try {
            HConnection hc = HConnectionManager.createConnection(HBaseConfiguration.create());
            hc.close();
            after = System.currentTimeMillis();
            log.info("HBase server to started after about {} ms", after - before);
            return;
        } catch (IOException e) {
            log.info("Exception caught while waiting for the HBase server to start", e);
        }
        after = System.currentTimeMillis();
    } while (timeoutMS > after - before);
    after = System.currentTimeMillis();
    log.warn("HBase server did not start in {} ms", after - before);
}
 
開發者ID:graben1437,項目名稱:titan1withtp3.1,代碼行數:20,代碼來源:HBaseStorageSetup.java

示例8: init

import org.apache.hadoop.hbase.client.HConnectionManager; //導入方法依賴的package包/類
@Override
public void init(Context context) throws IOException {
  super.init(context);
  this.conf = HBaseConfiguration.create(ctx.getConfiguration());
  decorateConf();
  this.maxRetriesMultiplier = this.conf.getInt("replication.source.maxretriesmultiplier", 300);
  this.socketTimeoutMultiplier = this.conf.getInt("replication.source.socketTimeoutMultiplier",
      maxRetriesMultiplier);
  // TODO: This connection is replication specific or we should make it particular to
  // replication and make replication specific settings such as compression or codec to use
  // passing Cells.
  this.conn = HConnectionManager.createConnection(this.conf);
  this.sleepForRetries =
      this.conf.getLong("replication.source.sleepforretries", 1000);
  this.metrics = context.getMetrics();
  // ReplicationQueueInfo parses the peerId out of the znode for us
  this.replicationSinkMgr = new ReplicationSinkManager(conn, ctx.getPeerId(), this, this.conf);
}
 
開發者ID:grokcoder,項目名稱:pbase,代碼行數:19,代碼來源:HBaseInterClusterReplicationEndpoint.java

示例9: getCurrentConnection

import org.apache.hadoop.hbase.client.HConnectionManager; //導入方法依賴的package包/類
private ConnectionInfo getCurrentConnection() throws IOException {
  String userName = effectiveUser.get().getUserName();
  ConnectionInfo connInfo = connections.get(userName);
  if (connInfo == null || !connInfo.updateAccessTime()) {
    Lock lock = locker.acquireLock(userName);
    try {
      connInfo = connections.get(userName);
      if (connInfo == null) {
        User user = userProvider.create(effectiveUser.get());
        HConnection conn = HConnectionManager.createConnection(conf, user);
        connInfo = new ConnectionInfo(conn, userName);
        connections.put(userName, connInfo);
      }
    } finally {
      lock.unlock();
    }
  }
  return connInfo;
}
 
開發者ID:tenggyut,項目名稱:HIndex,代碼行數:20,代碼來源:RESTServlet.java

示例10: initializeAdapter

import org.apache.hadoop.hbase.client.HConnectionManager; //導入方法依賴的package包/類
@Override
public boolean initializeAdapter() {

	// Initialize HBase Table
	Configuration conf = null;
	conf = HBaseConfiguration.create();
	conf.set("hbase.zookeeper.quorum", _quorum);
	conf.set("hbase.zookeeper.property.clientPort", _port);

	try {
		LOGGER.debug("=======Connecting to HBASE===========");
		LOGGER.debug("=======ZOOKEEPER = "
				+ conf.get("hbase.zookeeper.quorum"));
		HConnection connection = HConnectionManager.createConnection(conf);
		table = connection.getTable(_tableName);
		return true;
	} catch (IOException e) {
		// TODO Auto-generated catch block
		LOGGER.debug("=======Unable to Connect to HBASE===========");
		e.printStackTrace();
	}

	return false;
}
 
開發者ID:OpenSOC,項目名稱:opensoc-streaming,代碼行數:25,代碼來源:ThreatHbaseAdapter.java

示例11: createClusterConncetion

import org.apache.hadoop.hbase.client.HConnectionManager; //導入方法依賴的package包/類
/**
 * Creates the cluster conncetion.
 * 
 * @throws IOException
 *           Signals that an I/O exception has occurred.
 */
private static void createClusterConncetion() throws IOException {
  try {
    if (connectionAvailable()) {
      return;
    }
    clusterConnection = HConnectionManager.createConnection(read());
    addShutdownHook();
    System.out.println("Created HConnection and added shutDownHook");
  } catch (IOException e) {
    LOGGER
        .error(
            "Exception occurred while creating HConnection using HConnectionManager",
            e);
    throw e;
  }
}
 
開發者ID:OpenSOC,項目名稱:opensoc-streaming,代碼行數:23,代碼來源:HBaseConfigurationUtil.java

示例12: prepare

import org.apache.hadoop.hbase.client.HConnectionManager; //導入方法依賴的package包/類
@Override
public void prepare(Map stormConf, TopologyContext context,
		OutputCollector collector) {
	
	this.collector = collector;
	try {
		this.connection = HConnectionManager.createConnection(constructConfiguration());
		this.dangerousEventsTable = connection.getTable(DANGEROUS_EVENTS_TABLE_NAME);
		this.eventsCountTable = connection.getTable(EVENTS_COUNT_TABLE_NAME);	
		this.eventsTable = connection.getTable(EVENTS_TABLE_NAME);
		
	} catch (Exception e) {
		String errMsg = "Error retrievinging connection and access to dangerousEventsTable";
		LOG.error(errMsg, e);
		throw new RuntimeException(errMsg, e);
	}		
}
 
開發者ID:patw,項目名稱:storm-sample,代碼行數:18,代碼來源:TruckHBaseBolt.java

示例13: MultiHConnection

import org.apache.hadoop.hbase.client.HConnectionManager; //導入方法依賴的package包/類
/**
 * Create multiple HConnection instances and initialize a thread pool executor
 * @param conf configuration
 * @param noOfConnections total no of HConnections to create
 * @throws IOException
 */
public MultiHConnection(Configuration conf, int noOfConnections)
    throws IOException {
  this.noOfConnections = noOfConnections;
  synchronized (this.hConnectionsLock) {
    hConnections = new HConnection[noOfConnections];
    for (int i = 0; i < noOfConnections; i++) {
      HConnection conn = HConnectionManager.createConnection(conf);
      hConnections[i] = conn;
    }
  }
  createBatchPool(conf);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:19,代碼來源:MultiHConnection.java

示例14: MultiThreadedAction

import org.apache.hadoop.hbase.client.HConnectionManager; //導入方法依賴的package包/類
public MultiThreadedAction(LoadTestDataGenerator dataGen, Configuration conf,
                           TableName tableName,
                           String actionLetter) throws IOException {
  this.conf = conf;
  this.dataGenerator = dataGen;
  this.tableName = tableName;
  this.actionLetter = actionLetter;
  this.connection = HConnectionManager.createConnection(conf);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:10,代碼來源:MultiThreadedAction.java

示例15: initialize

import org.apache.hadoop.hbase.client.HConnectionManager; //導入方法依賴的package包/類
/**
 * Initialize the Invalid List Debug Tool.
 * @param conf {@link Configuration}
 * @throws IOException when not able to create an HBase connection
 */
@Override
@SuppressWarnings("WeakerAccess")
public void initialize(final Configuration conf) throws IOException {
  LOG.debug("InvalidListPruningDebugMain : initialize method called");
  connection = HConnectionManager.createConnection(conf);
  tableName = TableName.valueOf(conf.get(TxConstants.TransactionPruning.PRUNE_STATE_TABLE,
                                         TxConstants.TransactionPruning.DEFAULT_PRUNE_STATE_TABLE));
  dataJanitorState = new DataJanitorState(new DataJanitorState.TableSupplier() {
    @Override
    public HTableInterface get() throws IOException {
      return connection.getTable(tableName);
    }
  });
}
 
開發者ID:apache,項目名稱:incubator-tephra,代碼行數:20,代碼來源:InvalidListPruningDebugTool.java


注:本文中的org.apache.hadoop.hbase.client.HConnectionManager.createConnection方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。