当前位置: 首页>>代码示例>>Java>>正文


Java ConnectionFactory.createConnection方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.client.ConnectionFactory.createConnection方法的典型用法代码示例。如果您正苦于以下问题:Java ConnectionFactory.createConnection方法的具体用法?Java ConnectionFactory.createConnection怎么用?Java ConnectionFactory.createConnection使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.client.ConnectionFactory的用法示例。


在下文中一共展示了ConnectionFactory.createConnection方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: removeUserPermission

import org.apache.hadoop.hbase.client.ConnectionFactory; //导入方法依赖的package包/类
/**
 * Removes a previously granted permission from the stored access control
 * lists.  The {@link TablePermission} being removed must exactly match what
 * is stored -- no wildcard matching is attempted.  Ie, if user "bob" has
 * been granted "READ" access to the "data" table, but only to column family
 * plus qualifier "info:colA", then trying to call this method with only
 * user "bob" and the table name "data" (but without specifying the
 * column qualifier "info:colA") will have no effect.
 *
 * @param conf the configuration
 * @param userPerm the details of the permission to be revoked
 * @throws IOException if there is an error accessing the metadata table
 */
static void removeUserPermission(Configuration conf, UserPermission userPerm)
    throws IOException {
  Delete d = new Delete(userPermissionRowKey(userPerm));
  byte[] key = userPermissionKey(userPerm);

  if (LOG.isDebugEnabled()) {
    LOG.debug("Removing permission "+ userPerm.toString());
  }
  d.addColumns(ACL_LIST_FAMILY, key);
  // TODO: Pass in a Connection rather than create one each time.
  try (Connection connection = ConnectionFactory.createConnection(conf)) {
    try (Table table = connection.getTable(ACL_TABLE_NAME)) {
      table.delete(d);
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:30,代码来源:AccessControlLists.java

示例2: updateMetaWithFavoredNodesInfo

import org.apache.hadoop.hbase.client.ConnectionFactory; //导入方法依赖的package包/类
/**
 * Update meta table with favored nodes info
 * @param regionToFavoredNodes
 * @param conf
 * @throws IOException
 */
public static void updateMetaWithFavoredNodesInfo(
    Map<HRegionInfo, List<ServerName>> regionToFavoredNodes,
    Configuration conf) throws IOException {
  List<Put> puts = new ArrayList<Put>();
  for (Map.Entry<HRegionInfo, List<ServerName>> entry : regionToFavoredNodes.entrySet()) {
    Put put = makePutFromRegionInfo(entry.getKey(), entry.getValue());
    if (put != null) {
      puts.add(put);
    }
  }
  // Write the region assignments to the meta table.
  // TODO: See above overrides take a Connection rather than a Configuration only the
  // Connection is a short circuit connection. That is not going to good in all cases, when
  // master and meta are not colocated. Fix when this favored nodes feature is actually used
  // someday.
  try (Connection connection = ConnectionFactory.createConnection(conf)) {
    try (Table metaTable = connection.getTable(TableName.META_TABLE_NAME)) {
      metaTable.put(puts);
    }
  }
  LOG.info("Added " + puts.size() + " regions in META");
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:29,代码来源:FavoredNodeAssignmentHelper.java

示例3: setup

import org.apache.hadoop.hbase.client.ConnectionFactory; //导入方法依赖的package包/类
@Override
protected void setup(Context context) throws IOException, InterruptedException {
  id = Bytes.toBytes("Job: "+context.getJobID() + " Task: " + context.getTaskAttemptID());
  Configuration conf = context.getConfiguration();
  connection = ConnectionFactory.createConnection(conf);
  instantiateHTable();
  this.width = context.getConfiguration().getInt(GENERATOR_WIDTH_KEY, WIDTH_DEFAULT);
  current = new byte[this.width][];
  int wrapMultiplier = context.getConfiguration().getInt(GENERATOR_WRAP_KEY, WRAP_DEFAULT);
  this.wrap = (long)wrapMultiplier * width;
  this.numNodes = context.getConfiguration().getLong(
      GENERATOR_NUM_ROWS_PER_MAP_KEY, (long)WIDTH_DEFAULT * WRAP_DEFAULT);
  if (this.numNodes < this.wrap) {
    this.wrap = this.numNodes;
  }
  this.multipleUnevenColumnFamilies = isMultiUnevenColumnFamilies(context.getConfiguration());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:18,代码来源:IntegrationTestBigLinkedList.java

示例4: testScanForSuperUserWithFewerLabelAuths

import org.apache.hadoop.hbase.client.ConnectionFactory; //导入方法依赖的package包/类
@Test
public void testScanForSuperUserWithFewerLabelAuths() throws Throwable {
  String[] auths = { SECRET };
  String user = "admin";
  try (Connection conn = ConnectionFactory.createConnection(conf)) {
    VisibilityClient.setAuths(conn, auths, user);
  }
  TableName tableName = TableName.valueOf(TEST_NAME.getMethodName());
  final Table table = createTableAndWriteDataWithLabels(tableName, SECRET + "&" + CONFIDENTIAL
      + "&!" + PRIVATE, SECRET + "&!" + PRIVATE);
  PrivilegedExceptionAction<Void> scanAction = new PrivilegedExceptionAction<Void>() {
    public Void run() throws Exception {
      Scan s = new Scan();
      s.setAuthorizations(new Authorizations(SECRET, CONFIDENTIAL));
      try (Connection connection = ConnectionFactory.createConnection(conf);
           Table t = connection.getTable(table.getName())) {
        ResultScanner scanner = t.getScanner(s);
        Result[] result = scanner.next(5);
        assertTrue(result.length == 2);
      }
      return null;
    }
  };
  SUPERUSER.runAs(scanAction);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:26,代码来源:TestVisibilityLabelsWithACL.java

示例5: testTokenAuth

import org.apache.hadoop.hbase.client.ConnectionFactory; //导入方法依赖的package包/类
private void testTokenAuth(Class<? extends RpcClient> rpcImplClass) throws IOException,
    ServiceException {
  TEST_UTIL.getConfiguration().set(RpcClientFactory.CUSTOM_RPC_CLIENT_IMPL_CONF_KEY,
    rpcImplClass.getName());
  try (Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration());
      Table table = conn.getTable(TableName.META_TABLE_NAME)) {
    CoprocessorRpcChannel rpcChannel = table.coprocessorService(HConstants.EMPTY_START_ROW);
    AuthenticationProtos.AuthenticationService.BlockingInterface service =
        AuthenticationProtos.AuthenticationService.newBlockingStub(rpcChannel);
    WhoAmIResponse response = service.whoAmI(null, WhoAmIRequest.getDefaultInstance());
    assertEquals(USERNAME, response.getUsername());
    assertEquals(AuthenticationMethod.TOKEN.name(), response.getAuthMethod());
    try {
      service.getAuthenticationToken(null, GetAuthenticationTokenRequest.getDefaultInstance());
    } catch (ServiceException e) {
      AccessDeniedException exc = (AccessDeniedException) ProtobufUtil.getRemoteException(e);
      assertTrue(exc.getMessage().contains(
        "Token generation only allowed for Kerberos authenticated clients"));
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:22,代码来源:TestGenerateDelegationToken.java

示例6: testNamespaceUserGrant

import org.apache.hadoop.hbase.client.ConnectionFactory; //导入方法依赖的package包/类
@Test (timeout=180000)
public void testNamespaceUserGrant() throws Exception {
  AccessTestAction getAction = new AccessTestAction() {
    @Override
    public Object run() throws Exception {
      try(Connection conn = ConnectionFactory.createConnection(conf);
          Table t = conn.getTable(TEST_TABLE);) {
        return t.get(new Get(TEST_ROW));
      }
    }
  };

  String namespace = TEST_TABLE.getNamespaceAsString();

  // Grant namespace READ to USER_NONE, this should supersede any table permissions
  grantOnNamespace(TEST_UTIL, USER_NONE.getShortName(), namespace, Permission.Action.READ);
  // Now USER_NONE should be able to read
  verifyAllowed(getAction, USER_NONE);

  // Revoke namespace READ to USER_NONE
  revokeFromNamespace(TEST_UTIL, USER_NONE.getShortName(), namespace, Permission.Action.READ);
  verifyDenied(getAction, USER_NONE);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:24,代码来源:TestAccessController.java

示例7: testLotsOfRegionReplicas

import org.apache.hadoop.hbase.client.ConnectionFactory; //导入方法依赖的package包/类
/**
 * Tests the case where we are creating a table with a lot of regions and replicas. Opening region
 * replicas should not block handlers on RS indefinitely.
 */
@Test (timeout = 120000)
public void testLotsOfRegionReplicas() throws IOException {
  int numRegions = NB_SERVERS * 20;
  int regionReplication = 10;
  String tableName = htd.getTableName().getNameAsString() + "2";
  htd = HTU.createTableDescriptor(tableName);
  htd.setRegionReplication(regionReplication);

  // dont care about splits themselves too much
  byte[] startKey = Bytes.toBytes("aaa");
  byte[] endKey = Bytes.toBytes("zzz");
  byte[][] splits = HTU.getRegionSplitStartKeys(startKey, endKey, numRegions);
  HTU.getHBaseAdmin().createTable(htd, startKey, endKey, numRegions);

  try (Connection connection = ConnectionFactory.createConnection(HTU.getConfiguration());
      Table table = connection.getTable(htd.getTableName())) {

    for (int i = 1; i < splits.length; i++) {
      for (int j = 0; j < regionReplication; j++) {
        Get get = new Get(splits[i]);
        get.setConsistency(Consistency.TIMELINE);
        get.setReplicaId(j);
        table.get(get); // this should not block. Regions should be coming online
      }
    }
  }

  HTU.deleteTableIfAny(TableName.valueOf(tableName));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:34,代码来源:TestRegionReplicaFailover.java

示例8: runTest

import org.apache.hadoop.hbase.client.ConnectionFactory; //导入方法依赖的package包/类
private void runTest(final Class<? extends Test> cmd, TestOptions opts) throws IOException,
    InterruptedException, ClassNotFoundException {
  // Log the configuration we're going to run with. Uses JSON mapper because lazy. It'll do
  // the TestOptions introspection for us and dump the output in a readable format.
  LOG.info(cmd.getSimpleName() + " test run options=" + MAPPER.writeValueAsString(opts));
  try(Connection conn = ConnectionFactory.createConnection(getConf());
      Admin admin = conn.getAdmin()) {
    checkTable(admin, opts);
  }
  if (opts.nomapred) {
    doLocalClients(opts, getConf());
  } else {
    doMapReduce(opts, getConf());
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:16,代码来源:PerformanceEvaluation.java

示例9: testRegionReplicaReplicationForFlushAndCompaction

import org.apache.hadoop.hbase.client.ConnectionFactory; //导入方法依赖的package包/类
@Test (timeout = 240000)
public void testRegionReplicaReplicationForFlushAndCompaction() throws Exception {
  // Tests a table with region replication 3. Writes some data, and causes flushes and
  // compactions. Verifies that the data is readable from the replicas. Note that this
  // does not test whether the replicas actually pick up flushed files and apply compaction
  // to their stores
  int regionReplication = 3;
  TableName tableName = TableName.valueOf("testRegionReplicaReplicationForFlushAndCompaction");
  HTableDescriptor htd = HTU.createTableDescriptor(tableName.toString());
  htd.setRegionReplication(regionReplication);
  HTU.getHBaseAdmin().createTable(htd);

  Connection connection = ConnectionFactory.createConnection(HTU.getConfiguration());
  Table table = connection.getTable(tableName);

  try {
    // load the data to the table

    for (int i = 0; i < 6000; i += 1000) {
      LOG.info("Writing data from " + i + " to " + (i+1000));
      HTU.loadNumericRows(table, HBaseTestingUtility.fam1, i, i+1000);
      LOG.info("flushing table");
      HTU.flush(tableName);
      LOG.info("compacting table");
      HTU.compact(tableName, false);
    }

    verifyReplication(tableName, regionReplication, 0, 1000);
  } finally {
    table.close();
    connection.close();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:34,代码来源:TestRegionReplicaReplicationEndpoint.java

示例10: testVisibilityLabelsWithDeleteFamilyVersion

import org.apache.hadoop.hbase.client.ConnectionFactory; //导入方法依赖的package包/类
@Test
public void testVisibilityLabelsWithDeleteFamilyVersion() throws Exception {
  setAuths();
  final TableName tableName = TableName.valueOf(TEST_NAME.getMethodName());
  long[] ts = new long[] { 123l, 125l };
  try (Table table = createTableAndWriteDataWithLabels(tableName, ts,
      CONFIDENTIAL + "|" + TOPSECRET, SECRET)) {
    PrivilegedExceptionAction<Void> actiona = new PrivilegedExceptionAction<Void>() {
      @Override
      public Void run() throws Exception {
        try (Connection connection = ConnectionFactory.createConnection(conf);
             Table table = connection.getTable(tableName)) {
          Delete d = new Delete(row1);
          d.setCellVisibility(new CellVisibility(TOPSECRET + "|" + CONFIDENTIAL));
          d.deleteFamilyVersion(fam, 123l);
          table.delete(d);
        } catch (Throwable t) {
          throw new IOException(t);
        }
        return null;
      }
    };
    SUPERUSER.runAs(actiona);

    TEST_UTIL.getHBaseAdmin().flush(tableName);
    Scan s = new Scan();
    s.setAuthorizations(new Authorizations(SECRET, PRIVATE, CONFIDENTIAL));
    ResultScanner scanner = table.getScanner(s);
    Result[] next = scanner.next(3);
    assertTrue(next.length == 1);
    CellScanner cellScanner = next[0].cellScanner();
    cellScanner.advance();
    Cell current = cellScanner.current();
    assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(),
        current.getRowLength(), row2, 0, row2.length));
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:38,代码来源:TestVisibilityLabelsWithDeletes.java

示例11: AggregationClient

import org.apache.hadoop.hbase.client.ConnectionFactory; //导入方法依赖的package包/类
/**
 * Constructor with Conf object
 * @param cfg
 */
public AggregationClient(Configuration cfg) {
  try {
    // Create a connection on construction. Will use it making each of the calls below.
    this.connection = ConnectionFactory.createConnection(cfg);
  } catch (IOException e) {
    throw new RuntimeException(e);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:13,代码来源:AggregationClient.java

示例12: testSecondaryRegionWithEmptyRegion

import org.apache.hadoop.hbase.client.ConnectionFactory; //导入方法依赖的package包/类
/**
 * Tests the case where a newly created table with region replicas and no data, the secondary
 * region replicas are available to read immediately.
 */
@Test(timeout = 60000)
public void testSecondaryRegionWithEmptyRegion() throws IOException {
  // Create a new table with region replication, don't put any data. Test that the secondary
  // region replica is available to read.
  try (Connection connection = ConnectionFactory.createConnection(HTU.getConfiguration());
      Table table = connection.getTable(htd.getTableName())) {

    Get get = new Get(row);
    get.setConsistency(Consistency.TIMELINE);
    get.setReplicaId(1);
    table.get(get); // this should not block
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:18,代码来源:TestRegionReplicaFailover.java

示例13: testVisibilityLabelsForUserWithNoAuths

import org.apache.hadoop.hbase.client.ConnectionFactory; //导入方法依赖的package包/类
@Test
public void testVisibilityLabelsForUserWithNoAuths() throws Throwable {
  String user = "admin";
  String[] auths = { SECRET };
  try (Connection conn = ConnectionFactory.createConnection(conf)) {
    VisibilityClient.clearAuths(conn, auths, user); // Removing all auths if any.
    VisibilityClient.setAuths(conn, auths, "user1");
  }
  TableName tableName = TableName.valueOf(TEST_NAME.getMethodName());
  final Table table = createTableAndWriteDataWithLabels(tableName, SECRET);
  SecureTestUtil.grantOnTable(TEST_UTIL, NORMAL_USER1.getShortName(), tableName,
    null, null, Permission.Action.READ);
  SecureTestUtil.grantOnTable(TEST_UTIL, NORMAL_USER2.getShortName(), tableName,
    null, null, Permission.Action.READ);
  PrivilegedExceptionAction<Void> getAction = new PrivilegedExceptionAction<Void>() {
    public Void run() throws Exception {
      Get g = new Get(row1);
      g.setAuthorizations(new Authorizations(SECRET, CONFIDENTIAL));
      try (Connection connection = ConnectionFactory.createConnection(conf);
           Table t = connection.getTable(table.getName())) {
        Result result = t.get(g);
        assertTrue(result.isEmpty());
      }
      return null;
    }
  };
  NORMAL_USER2.runAs(getAction);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:29,代码来源:TestVisibilityLabelsWithACL.java

示例14: getConnection

import org.apache.hadoop.hbase.client.ConnectionFactory; //导入方法依赖的package包/类
public static Connection getConnection() throws IOException {
    Connection connection = ConnectionFactory.createConnection(getConfiguration());
    return connection;
}
 
开发者ID:TFdream,项目名称:hbase-tutorials,代码行数:5,代码来源:HBaseConnectionUtils.java

示例15: setUp

import org.apache.hadoop.hbase.client.ConnectionFactory; //导入方法依赖的package包/类
/**
 * test setup
 * @throws Exception
 */
@BeforeClass
public void setUp() throws Exception {
    final String name = "testTable" + new Random().nextInt(5000);
    tableName = TableName.valueOf(name);
    final HConfig config = HConfig.newBuilder()
                                  .retryCount(5)
                                  .retryBackoff(3000)
                                  .scanBatchSize(50)
                                  .scanCacheSize(50)
                                  .zkQuorum("localhost:2181")
                                  .connectionThreads(4)
                                  .metaLookupThreads(2)
                                  .metaOperationTimeout(5000)
                                  .metricsEnabled(true)
                                  .operationTimeout(5000)
                                  .perRegionMaxTasks(20)
                                  .perServerMaxTasks(40)
                                  .rpcTimeout(7000)
                                  .scannerTimeout(20000)
                                  .threadPoolMaxTasks(100)
                                  .zkSessionTimeout(15000)
                                  .znode("/hbase")
                                  .build();
    final Connection connection = ConnectionFactory.createConnection(config.asConfiguration());

    this.testTable = connection.getTable(this.tableName);
    admin = connection.getAdmin();
    final HColumnDescriptor cfTestDesc
            = new HColumnDescriptor(TEST_CF)
            .setBloomFilterType(BloomType.ROW)
            .setCompactionCompressionType(Compression.Algorithm.SNAPPY)
            .setCompressionType(Compression.Algorithm.SNAPPY)
            .setDataBlockEncoding(DataBlockEncoding.PREFIX)
            .setVersions(1, 1);
    final HTableDescriptor descriptor
            = new HTableDescriptor(tableName)
            .setCompactionEnabled(true)
            .setDurability(Durability.SYNC_WAL)
            .addFamily(cfTestDesc);
    admin.createTable(descriptor);
}
 
开发者ID:i-knowledge,项目名称:hbase-client,代码行数:46,代码来源:BatchIt.java


注:本文中的org.apache.hadoop.hbase.client.ConnectionFactory.createConnection方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。