当前位置: 首页>>代码示例>>Java>>正文


Java ConnectionFactory类代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.client.ConnectionFactory的典型用法代码示例。如果您正苦于以下问题:Java ConnectionFactory类的具体用法?Java ConnectionFactory怎么用?Java ConnectionFactory使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


ConnectionFactory类属于org.apache.hadoop.hbase.client包,在下文中一共展示了ConnectionFactory类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: main

import org.apache.hadoop.hbase.client.ConnectionFactory; //导入依赖的package包/类
public static void main(String[] argc) throws Exception {
  Configuration conf = HBaseConfiguration.create();
  conf.addResource(new Path("file:///", System.getProperty("oozie.action.conf.xml")));

  if (System.getenv("HADOOP_TOKEN_FILE_LOCATION") != null) {
    conf.set("mapreduce.job.credentials.binary",
             System.getenv("HADOOP_TOKEN_FILE_LOCATION"));
  }

  Connection connection = ConnectionFactory.createConnection(conf);
  Admin admin = connection.getAdmin();

  System.out.println("Compacting table " + argc[0]);
  TableName tableName = TableName.valueOf(argc[0]);
  admin.majorCompact(tableName);
  while (admin.getCompactionState(tableName).toString() == "MAJOR") {
    TimeUnit.SECONDS.sleep(10);
    System.out.println("Compacting table " + argc[0]);
  }
  System.out.println("Done compacting table " + argc[0]);
}
 
开发者ID:cbaenziger,项目名称:Oozie_MajorCompaction_Example,代码行数:22,代码来源:MajorCompaction.java

示例2: verifyUserDeniedForDeleteExactVersion

import org.apache.hadoop.hbase.client.ConnectionFactory; //导入依赖的package包/类
private void verifyUserDeniedForDeleteExactVersion(final User user, final byte[] row,
    final byte[] q1, final byte[] q2) throws IOException, InterruptedException {
  user.runAs(new PrivilegedExceptionAction<Void>() {
    @Override
    public Void run() throws Exception {
      try (Connection connection = ConnectionFactory.createConnection(conf)) {
        try (Table t = connection.getTable(TEST_TABLE.getTableName())) {
          Delete d = new Delete(row, 127);
          d.addColumns(TEST_FAMILY1, q1);
          d.addColumns(TEST_FAMILY1, q2);
          d.addFamily(TEST_FAMILY2, 129);
          t.delete(d);
          fail(user.getShortName() + " can not do the delete");
        } catch (Exception e) {

        }
      }
      return null;
    }
  });
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:22,代码来源:TestCellACLWithMultipleVersions.java

示例3: verifyUserAllowedforCheckAndDelete

import org.apache.hadoop.hbase.client.ConnectionFactory; //导入依赖的package包/类
private void verifyUserAllowedforCheckAndDelete(final User user, final byte[] row,
    final byte[] q1, final byte[] value) throws IOException, InterruptedException {
  user.runAs(new PrivilegedExceptionAction<Void>() {
    @Override
    public Void run() throws Exception {
      try (Connection connection = ConnectionFactory.createConnection(conf)) {
        try (Table t = connection.getTable(TEST_TABLE.getTableName())) {
          Delete d = new Delete(row);
          d.addColumn(TEST_FAMILY1, q1, 120);
          t.checkAndDelete(row, TEST_FAMILY1, q1, value, d);
        }
      }
      return null;
    }
  });
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:17,代码来源:TestCellACLWithMultipleVersions.java

示例4: configure

import org.apache.hadoop.hbase.client.ConnectionFactory; //导入依赖的package包/类
@Override
public void configure(JobConf job) {
  try {
    Connection connection = ConnectionFactory.createConnection(HBaseConfiguration.create(job));
    TableName tableName = TableName.valueOf("exampleJobConfigurableTable");
    // mandatory
    initializeTable(connection, tableName);
    byte[][] inputColumns = new byte [][] { Bytes.toBytes("columnA"),
      Bytes.toBytes("columnB") };
    //optional
    Scan scan = new Scan();
    for (byte[] family : inputColumns) {
      scan.addFamily(family);
    }
    Filter exampleFilter = new RowFilter(CompareOp.EQUAL, new RegexStringComparator("aa.*"));
    scan.setFilter(exampleFilter);
    setScan(scan);
  } catch (IOException exception) {
    throw new RuntimeException("Failed to initialize.", exception);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:22,代码来源:TestTableInputFormat.java

示例5: verifyUserDeniedForCheckAndDelete

import org.apache.hadoop.hbase.client.ConnectionFactory; //导入依赖的package包/类
private void verifyUserDeniedForCheckAndDelete(final User user, final byte[] row,
    final byte[] value) throws IOException, InterruptedException {
  user.runAs(new PrivilegedExceptionAction<Void>() {
    @Override
    public Void run() throws Exception {
      try (Connection connection = ConnectionFactory.createConnection(conf)) {
        try (Table t = connection.getTable(TEST_TABLE.getTableName())) {
          Delete d = new Delete(row);
          d.addColumns(TEST_FAMILY1, TEST_Q1);
          t.checkAndDelete(row, TEST_FAMILY1, TEST_Q1, value, d);
          fail(user.getShortName() + " should not be allowed to do checkAndDelete");
        } catch (Exception e) {
        }
      }
      return null;
    }
  });
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:19,代码来源:TestCellACLWithMultipleVersions.java

示例6: testSocketClosed

import org.apache.hadoop.hbase.client.ConnectionFactory; //导入依赖的package包/类
@Test(expected=RetriesExhaustedException.class)
public void testSocketClosed() throws IOException, InterruptedException {
  String tableName = "testSocketClosed";
  TableName name = TableName.valueOf(tableName);
  UTIL.createTable(name, fam1).close();

  Configuration conf = new Configuration(UTIL.getConfiguration());
  conf.set(RpcClientFactory.CUSTOM_RPC_CLIENT_IMPL_CONF_KEY,
    MyRpcClientImpl.class.getName());
  conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2);
  Connection connection = ConnectionFactory.createConnection(conf);
  Table table = connection.getTable(TableName.valueOf(tableName));
  table.get(new Get("asd".getBytes()));
  connection.close();
  for (Socket socket : MyRpcClientImpl.savedSockets) {
    assertTrue("Socket + " +  socket + " is not closed", socket.isClosed());
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:19,代码来源:TestRpcClientLeaks.java

示例7: initialize

import org.apache.hadoop.hbase.client.ConnectionFactory; //导入依赖的package包/类
@Override
protected void initialize(JobContext job) throws IOException {
  Connection connection = ConnectionFactory.createConnection(HBaseConfiguration.create(
      job.getConfiguration()));
  TableName tableName = TableName.valueOf("exampleTable");
  // mandatory
  initializeTable(connection, tableName);
  byte[][] inputColumns = new byte [][] { Bytes.toBytes("columnA"),
    Bytes.toBytes("columnB") };
  //optional
  Scan scan = new Scan();
  for (byte[] family : inputColumns) {
    scan.addFamily(family);
  }
  Filter exampleFilter = new RowFilter(CompareOp.EQUAL, new RegexStringComparator("aa.*"));
  scan.setFilter(exampleFilter);
  setScan(scan);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:19,代码来源:TestTableInputFormat.java

示例8: addLabels

import org.apache.hadoop.hbase.client.ConnectionFactory; //导入依赖的package包/类
public static void addLabels() throws Exception {
  PrivilegedExceptionAction<VisibilityLabelsResponse> action =
      new PrivilegedExceptionAction<VisibilityLabelsResponse>() {
    public VisibilityLabelsResponse run() throws Exception {
      String[] labels = { SECRET, TOPSECRET, CONFIDENTIAL, PUBLIC, PRIVATE, COPYRIGHT, ACCENT,
          UNICODE_VIS_TAG, UC1, UC2 };
      try (Connection conn = ConnectionFactory.createConnection(conf)) {
        VisibilityClient.addLabels(conn, labels);
      } catch (Throwable t) {
        throw new IOException(t);
      }
      return null;
    }
  };
  SUPERUSER.runAs(action);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:17,代码来源:TestVisibilityLabels.java

示例9: bulkLoadHFile

import org.apache.hadoop.hbase.client.ConnectionFactory; //导入依赖的package包/类
private void bulkLoadHFile(
    TableName tableName,
    byte[] family,
    byte[] qualifier,
    byte[][][] hfileRanges,
    int numRowsPerRange) throws Exception {

  Path familyDir = new Path(loadPath, Bytes.toString(family));
  fs.mkdirs(familyDir);
  int hfileIdx = 0;
  for (byte[][] range : hfileRanges) {
    byte[] from = range[0];
    byte[] to = range[1];
    createHFile(new Path(familyDir, "hfile_"+(hfileIdx++)),
        family, qualifier, from, to, numRowsPerRange);
  }
  //set global read so RegionServer can move it
  setPermission(loadPath, FsPermission.valueOf("-rwxrwxrwx"));

  try (Connection conn = ConnectionFactory.createConnection(conf);
       HTable table = (HTable)conn.getTable(tableName)) {
    TEST_UTIL.waitUntilAllRegionsAssigned(tableName);
    LoadIncrementalHFiles loader = new LoadIncrementalHFiles(conf);
    loader.doBulkLoad(loadPath, table);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:27,代码来源:TestAccessController.java

示例10: testSecondaryRegionWithNonEmptyRegion

import org.apache.hadoop.hbase.client.ConnectionFactory; //导入依赖的package包/类
/**
 * Tests the case where if there is some data in the primary region, reopening the region replicas
 * (enable/disable table, etc) makes the region replicas readable.
 * @throws IOException
 */
@Test(timeout = 60000)
public void testSecondaryRegionWithNonEmptyRegion() throws IOException {
  // Create a new table with region replication and load some data
  // than disable and enable the table again and verify the data from secondary
  try (Connection connection = ConnectionFactory.createConnection(HTU.getConfiguration());
      Table table = connection.getTable(htd.getTableName())) {

    HTU.loadNumericRows(table, fam, 0, 1000);

    HTU.getHBaseAdmin().disableTable(htd.getTableName());
    HTU.getHBaseAdmin().enableTable(htd.getTableName());

    HTU.verifyNumericRows(table, fam, 0, 1000, 1);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:21,代码来源:TestRegionReplicaFailover.java

示例11: loadAll

import org.apache.hadoop.hbase.client.ConnectionFactory; //导入依赖的package包/类
/**
 * Load all permissions from the region server holding {@code _acl_},
 * primarily intended for testing purposes.
 */
static Map<byte[], ListMultimap<String,TablePermission>> loadAll(
    Configuration conf) throws IOException {
  Map<byte[], ListMultimap<String,TablePermission>> allPerms =
      new TreeMap<byte[], ListMultimap<String,TablePermission>>(Bytes.BYTES_RAWCOMPARATOR);

  // do a full scan of _acl_, filtering on only first table region rows

  Scan scan = new Scan();
  scan.addFamily(ACL_LIST_FAMILY);

  ResultScanner scanner = null;
  // TODO: Pass in a Connection rather than create one each time.
  try (Connection connection = ConnectionFactory.createConnection(conf)) {
    try (Table table = connection.getTable(ACL_TABLE_NAME)) {
      scanner = table.getScanner(scan);
      try {
        for (Result row : scanner) {
          ListMultimap<String,TablePermission> resultPerms = parsePermissions(row.getRow(), row);
          allPerms.put(row.getRow(), resultPerms);
        }
      } finally {
        if (scanner != null) scanner.close();
      }
    }
  }

  return allPerms;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:33,代码来源:AccessControlLists.java

示例12: getPermissions

import org.apache.hadoop.hbase.client.ConnectionFactory; //导入依赖的package包/类
/**
 * Reads user permission assignments stored in the <code>l:</code> column
 * family of the first table row in <code>_acl_</code>.
 *
 * <p>
 * See {@link AccessControlLists class documentation} for the key structure
 * used for storage.
 * </p>
 */
static ListMultimap<String, TablePermission> getPermissions(Configuration conf,
    byte[] entryName) throws IOException {
  if (entryName == null) entryName = ACL_GLOBAL_NAME;

  // for normal user tables, we just read the table row from _acl_
  ListMultimap<String, TablePermission> perms = ArrayListMultimap.create();
  // TODO: Pass in a Connection rather than create one each time.
  try (Connection connection = ConnectionFactory.createConnection(conf)) {
    try (Table table = connection.getTable(ACL_TABLE_NAME)) {
      Get get = new Get(entryName);
      get.addFamily(ACL_LIST_FAMILY);
      Result row = table.get(get);
      if (!row.isEmpty()) {
        perms = parsePermissions(entryName, row);
      } else {
        LOG.info("No permissions found in " + ACL_TABLE_NAME + " for acl entry "
            + Bytes.toString(entryName));
      }
    }
  }

  return perms;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:33,代码来源:AccessControlLists.java

示例13: checkGlobalPerms

import org.apache.hadoop.hbase.client.ConnectionFactory; //导入依赖的package包/类
public static void checkGlobalPerms(HBaseTestingUtility testUtil, Permission.Action... actions)
    throws IOException {
  Permission[] perms = new Permission[actions.length];
  for (int i = 0; i < actions.length; i++) {
    perms[i] = new Permission(actions[i]);
  }
  CheckPermissionsRequest.Builder request = CheckPermissionsRequest.newBuilder();
  for (Action a : actions) {
    request.addPermission(AccessControlProtos.Permission.newBuilder()
        .setType(AccessControlProtos.Permission.Type.Global)
        .setGlobalPermission(
            AccessControlProtos.GlobalPermission.newBuilder()
                .addAction(ProtobufUtil.toPermissionAction(a)).build()));
  }
  try(Connection conn = ConnectionFactory.createConnection(testUtil.getConfiguration());
      Table acl = conn.getTable(AccessControlLists.ACL_TABLE_NAME)) {
    BlockingRpcChannel channel = acl.coprocessorService(new byte[0]);
    AccessControlService.BlockingInterface protocol =
      AccessControlService.newBlockingStub(channel);
    try {
      protocol.checkPermissions(null, request.build());
    } catch (ServiceException se) {
      ProtobufUtil.toIOException(se);
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:27,代码来源:SecureTestUtil.java

示例14: addLabels

import org.apache.hadoop.hbase.client.ConnectionFactory; //导入依赖的package包/类
public static void addLabels() throws Exception {
  PrivilegedExceptionAction<VisibilityLabelsResponse> action =
      new PrivilegedExceptionAction<VisibilityLabelsResponse>() {
    @Override
    public VisibilityLabelsResponse run() throws Exception {
      String[] labels = { SECRET, TOPSECRET, CONFIDENTIAL, PUBLIC, PRIVATE };
      try (Connection conn = ConnectionFactory.createConnection(conf)) {
        VisibilityClient.addLabels(conn, labels);
      } catch (Throwable t) {
        throw new IOException(t);
      }
      return null;
    }
  };
  SUPERUSER.runAs(action);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:17,代码来源:TestVisibilityLabelsWithDeletes.java

示例15: setHTable

import org.apache.hadoop.hbase.client.ConnectionFactory; //导入依赖的package包/类
/**
 * Allows subclasses to set the {@link HTable}.
 *
 * Will attempt to reuse the underlying Connection for our own needs, including
 * retreiving an Admin interface to the HBase cluster.
 *
 * @param table  The table to get the data from.
 * @throws IOException 
 * @deprecated Use {@link #initializeTable(Connection, TableName)} instead.
 */
@Deprecated
protected void setHTable(HTable table) throws IOException {
  this.table = table;
  this.connection = table.getConnection();
  try {
    this.regionLocator = table.getRegionLocator();
    this.admin = this.connection.getAdmin();
  } catch (NeedUnmanagedConnectionException exception) {
    LOG.warn("You are using an HTable instance that relies on an HBase-managed Connection. " +
        "This is usually due to directly creating an HTable, which is deprecated. Instead, you " +
        "should create a Connection object and then request a Table instance from it. If you " +
        "don't need the Table instance for your own use, you should instead use the " +
        "TableInputFormatBase.initalizeTable method directly.");
    LOG.info("Creating an additional unmanaged connection because user provided one can't be " +
        "used for administrative actions. We'll close it when we close out the table.");
    LOG.debug("Details about our failure to request an administrative interface.", exception);
    // Do we need a "copy the settings from this Connection" method? are things like the User
    // properly maintained by just looking again at the Configuration?
    this.connection = ConnectionFactory.createConnection(this.connection.getConfiguration());
    this.regionLocator = this.connection.getRegionLocator(table.getName());
    this.admin = this.connection.getAdmin();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:34,代码来源:TableInputFormatBase.java


注:本文中的org.apache.hadoop.hbase.client.ConnectionFactory类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。