当前位置: 首页>>代码示例>>Java>>正文


Java HTablePool类代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.client.HTablePool的典型用法代码示例。如果您正苦于以下问题:Java HTablePool类的具体用法?Java HTablePool怎么用?Java HTablePool使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


HTablePool类属于org.apache.hadoop.hbase.client包,在下文中一共展示了HTablePool类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: QueryByCondition1

import org.apache.hadoop.hbase.client.HTablePool; //导入依赖的package包/类
public static void QueryByCondition1(String tableName) {

        HTablePool pool = new HTablePool(configuration, 1000);
        HTable table = (HTable) pool.getTable(tableName);
        try {
            Get scan = new Get("abcdef".getBytes());// 根据rowkey查询
            Result r = table.get(scan);
            System.out.println("获得到rowkey:" + new String(r.getRow()));
            for (KeyValue keyValue : r.raw()) {
                System.out.println("列:" + new String(keyValue.getFamily())
                        + "====值:" + new String(keyValue.getValue()));
            }
        } catch (IOException e) {
            e.printStackTrace();
        }
    }
 
开发者ID:yjp123456,项目名称:SparkDemo,代码行数:17,代码来源:MyClass.java

示例2: QueryByCondition2

import org.apache.hadoop.hbase.client.HTablePool; //导入依赖的package包/类
public static void QueryByCondition2(String tableName) {

        try {
            HTablePool pool = new HTablePool(configuration, 1000);
            HTable table = (HTable) pool.getTable(tableName);
            Filter filter = new SingleColumnValueFilter(Bytes
                    .toBytes("column1"), null, CompareOp.EQUAL, Bytes
                    .toBytes("aaa")); // 当列column1的值为aaa时进行查询
            Scan s = new Scan();
            s.setFilter(filter);
            ResultScanner rs = table.getScanner(s);
            for (Result r : rs) {
                System.out.println("获得到rowkey:" + new String(r.getRow()));
                for (KeyValue keyValue : r.raw()) {
                    System.out.println("列:" + new String(keyValue.getFamily())
                            + "====值:" + new String(keyValue.getValue()));
                }
            }
        } catch (Exception e) {
            e.printStackTrace();
        }

    }
 
开发者ID:yjp123456,项目名称:SparkDemo,代码行数:24,代码来源:MyClass.java

示例3: main

import org.apache.hadoop.hbase.client.HTablePool; //导入依赖的package包/类
public static void main(String[] args) throws IOException {

    if (args.length != 3) {
      System.out.println(usage);
      System.exit(0);
    }

    double lon = Double.parseDouble(args[0]);
    double lat = Double.parseDouble(args[1]);
    int n = Integer.parseInt(args[2]);

    HTablePool pool = new HTablePool();
    KNNQuery q = new KNNQuery(pool);
    Queue<QueryMatch> ret = q.queryKNN(lat, lon, n);

    QueryMatch m;
    while ((m = ret.poll()) != null) {
      System.out.println(m);
    }

    pool.close();
  }
 
开发者ID:East196,项目名称:maker,代码行数:23,代码来源:KNNQuery.java

示例4: initConfiguration

import org.apache.hadoop.hbase.client.HTablePool; //导入依赖的package包/类
private void initConfiguration() {
    if (clusterConfig.get(HbaseConf.cluster_name) == null || "".equals(clusterConfig.get(HbaseConf.cluster_name))) {
        throw new IllegalArgumentException("cluster name can not be null or ''!");
    }

    clusterName = clusterConfig.get(HbaseConf.cluster_name);
    Configuration conf = HBaseConfiguration.create();
    conf.set(HbaseConf.hbase_quorum, clusterConfig.get(HbaseConf.hbase_quorum));
    conf.set(HbaseConf.hbase_clientPort, clusterConfig.get(HbaseConf.hbase_clientPort));
    if (null != clusterConfig.get(HbaseConf.hbase_znode_parent)) {
        conf.set(HbaseConf.hbase_znode_parent, clusterConfig.get(HbaseConf.hbase_znode_parent));
    }

    conf.set("hbase.client.retries.number", "5");
    conf.set("hbase.client.pause", "200");
    conf.set("ipc.ping.interval", "3000");
    conf.setBoolean("hbase.ipc.client.tcpnodelay", true);

    if (this.checkConfiguration(clusterConfig.get(HbaseConf.cluster_name), conf)) {
        conficuration = conf;
        tablePool = new HTablePool(conf, 100);

    }
}
 
开发者ID:loye168,项目名称:tddl5,代码行数:25,代码来源:HbFactory.java

示例5: init

import org.apache.hadoop.hbase.client.HTablePool; //导入依赖的package包/类
/**
    * init dataSource.
    * */
public void init() {
       try {

           System.setProperty("javax.xml.parsers.DocumentBuilderFactory",
                   "com.sun.org.apache.xerces.internal.jaxp.DocumentBuilderFactoryImpl");
           System.setProperty("javax.xml.parsers.SAXParserFactory",
                   "com.sun.org.apache.xerces.internal.jaxp.SAXParserFactoryImpl");

           initHbaseConfiguration();
           
           tablePool = new HTablePool(hbaseConfiguration, tablePoolMaxSize);
           tableFactory = new PooledHTableFactory(tablePool);

           log.info(this);

       } catch (Exception e) {
           log.error(e);
           throw new SimpleHBaseException(e);
       }
   }
 
开发者ID:xushaomin,项目名称:apple-data,代码行数:24,代码来源:HBaseDataSource.java

示例6: main

import org.apache.hadoop.hbase.client.HTablePool; //导入依赖的package包/类
/**
	 * 
	 * @param args
	 * @creatTime 下午1:57:57
	 * @author XuYi
	 * @throws IOException 
	 */
	public static void main(String[] args) throws IOException {
		Configuration config = HBaseConfiguration.create();
		config.set("hbase.zookeeper.quorum", "master.hadoop,slave1.hadoop,slave2.hadoop");
		pool = new HTablePool(config, 10);
//		HTable table = (HTable) pool.getTable(Bytes.toBytes("manageLog"));
//		execute(table);
//		pool.putTable(table);
//		HTable table2 = (HTable) pool.getTable(Bytes.toBytes("manageLog"));
//		execute(table2);
//		pool.putTable(table2);
		for (int i = 0; i < 30; i++) {
			new Thread(new TestThread()).start();
		}
		
	}
 
开发者ID:Justice-love,项目名称:oceandata,代码行数:23,代码来源:Pool.java

示例7: main

import org.apache.hadoop.hbase.client.HTablePool; //导入依赖的package包/类
public static void main(String[] args) throws IOException {
	Configuration conf = HBaseConfiguration.create();
	conf.set("hbase.zookeeper.quorum", "master.hadoop,slave1.hadoop,slave2.hadoop");
	HTablePool pool = new HTablePool(conf, 1, new TableFactory2());
	HTableInterface table = pool.getTable(Bytes.toBytes("test3"));
	
	Get get1 = new Get(Bytes.toBytes("1"));
	table.get(get1);
	System.out.println(table);
	
	table.close();
	
	HTableInterface table2 = pool.getTable(Bytes.toBytes("test3"));
	table.get(get1);
	System.out.println(table2);
	table2.close();
}
 
开发者ID:Justice-love,项目名称:oceandata,代码行数:18,代码来源:PoolTest0921.java

示例8: HBaseDataRepository

import org.apache.hadoop.hbase.client.HTablePool; //导入依赖的package包/类
public HBaseDataRepository( Configuration theConfiguration, int theTablePoolsSize ) {
	Preconditions.checkNotNull( theConfiguration, "need configuration" );
	Preconditions.checkNotNull( theTablePoolsSize > 0, "need a pool size greater than 0" );
	
	configuration = theConfiguration;
	tablePool = new HTablePool( configuration, theTablePoolsSize );

	facilityManager = new SimpleFacilityManager();
	
	StorageTypeFacility storageTypeFacility = new StorageTypeFacility( );
	NameValidator typeNameValidator = new NopNameValidator( );
	NameValidator memberNameValidator = new LowerCaseEntityNameValidator( );
	JsonTranslationFacility jsonFacility = new JsonTranslationFacility( 
			new StorageTypeSource( storageTypeFacility ),
			Readability.MACHINE,
			typeNameValidator,
			memberNameValidator );
	HBaseTranslationFacility mapFacility = new HBaseTranslationFacility( storageTypeFacility, jsonFacility );

	facilityManager.addFacility( StorageTypeFacility.class, storageTypeFacility );
	facilityManager.addFacility( HBaseTranslationFacility.class, mapFacility );
	facilityManager.addFacility( DefinitionFacility.class, new HBaseDefinitionFacility( ) );
	facilityManager.addFacility( LifecycleFacility.class, new StandardLifecycleFacility<HBaseDataRepository, HBaseDataContext>( ) );
}
 
开发者ID:Talvish,项目名称:Tales,代码行数:25,代码来源:HBaseDataRepository.java

示例9: UserProfileExample

import org.apache.hadoop.hbase.client.HTablePool; //导入依赖的package包/类
/**
 * The constructor will start by registering the schemas with the meta store
 * table in HBase, and create the required tables to run.
 */
public UserProfileExample() {
  Configuration conf = HBaseConfiguration.create();
  HTablePool pool = new HTablePool(conf, 10);
  SchemaManager schemaManager = new DefaultSchemaManager(pool);

  registerSchemas(conf, schemaManager);

  userProfileDao = new SpecificAvroDao<UserProfileModel>(pool,
      "cdk_example_user_profiles", "UserProfileModel", schemaManager);
  userActionsDao = new SpecificAvroDao<UserActionsModel>(pool,
      "cdk_example_user_profiles", "UserActionsModel", schemaManager);
  userProfileActionsDao = SpecificAvroDao.buildCompositeDaoWithEntityManager(
      pool, "cdk_example_user_profiles", UserProfileActionsModel.class,
      schemaManager);
}
 
开发者ID:cloudera,项目名称:cdk,代码行数:20,代码来源:UserProfileExample.java

示例10: buildCompositeDao

import org.apache.hadoop.hbase.client.HTablePool; //导入依赖的package包/类
/**
 * Create a CompositeDao, which will return SpecificRecord instances
 * in a Map container.
 *
 * @param tablePool
 *          An HTablePool instance to use for connecting to HBase
 * @param tableName
 *          The table name this dao will read from and write to
 * @param keySchemaString
 *          The Avro schema string that represents the StorageKey structure for row
 *          keys in this table.
 * @param subEntitySchemaStrings
 *          The list of entities that make up the composite.
 * @param keyClass
 *          The class of the SpecificRecord representing the StorageKey of rows this
 *          dao will fetch.
 * @return The CompositeDao instance.
 * @throws SchemaNotFoundException
 * @throws SchemaValidationException
 */
@SuppressWarnings("unchecked")
public static <K extends SpecificRecord, S extends SpecificRecord> Dao<
    Map<String, S>> buildCompositeDao(
    HTablePool tablePool, String tableName,
    List<String> subEntitySchemaStrings) {

  List<EntityMapper<S>> entityMappers = new ArrayList<EntityMapper<S>>();
  for (String subEntitySchemaString : subEntitySchemaStrings) {
    AvroEntitySchema subEntitySchema = parser
        .parseEntitySchema(subEntitySchemaString);
    Class<S> subEntityClass;
    try {
      subEntityClass = (Class<S>) Class.forName(subEntitySchema
          .getAvroSchema().getFullName());
    } catch (ClassNotFoundException e) {
      throw new RuntimeException(e);
    }
    entityMappers.add(SpecificAvroDao.<S> buildEntityMapper(
        subEntitySchemaString, subEntitySchemaString, 
        subEntityClass));
  }

  return new SpecificMapCompositeAvroDao<S>(tablePool, tableName, entityMappers);
}
 
开发者ID:cloudera,项目名称:cdk,代码行数:45,代码来源:SpecificAvroDao.java

示例11: buildCompositeDaoWithEntityManager

import org.apache.hadoop.hbase.client.HTablePool; //导入依赖的package包/类
/**
 * Create a CompositeDao, which will return SpecificRecord instances
 * in a Map container.
 *
 * @param tablePool
 *          An HTablePool instance to use for connecting to HBase.
 * @param tableName
 *          The table name of the managed schema.
 * @param subEntityClasses
 *          The classes that make up the subentities.
 * @param schemaManager
 *          The SchemaManager which will use to create the entity mapper that
 *          will power this dao.
 * @return The CompositeDao instance.
 * @throws SchemaNotFoundException
 */
public static <K extends SpecificRecord, S extends SpecificRecord> Dao<Map<String, S>> buildCompositeDaoWithEntityManager(
    HTablePool tablePool, String tableName, List<Class<S>> subEntityClasses,
    SchemaManager schemaManager) {

  List<EntityMapper<S>> entityMappers = new ArrayList<EntityMapper<S>>();
  for (Class<S> subEntityClass : subEntityClasses) {
    String entityName = getSchemaFromEntityClass(subEntityClass).getName();
    entityMappers.add(new VersionedAvroEntityMapper.Builder()
        .setSchemaManager(schemaManager).setTableName(tableName)
        .setEntityName(entityName).setSpecific(true)
        .<S> build());
  }

  return new SpecificMapCompositeAvroDao<S>(tablePool, tableName,
      entityMappers);
}
 
开发者ID:cloudera,项目名称:cdk,代码行数:33,代码来源:SpecificAvroDao.java

示例12: BaseEntityBatch

import org.apache.hadoop.hbase.client.HTablePool; //导入依赖的package包/类
/**
 * Checks an HTable out of the HTablePool and modifies it to take advantage of
 * batch puts. This is very useful when performing many consecutive puts.
 *
 * @param clientTemplate
 *          The client template to use
 * @param entityMapper
 *          The EntityMapper to use for mapping
 * @param pool
 *          The HBase table pool
 * @param tableName
 *          The name of the HBase table
 * @param writeBufferSize
 *          The batch buffer size in bytes.
 */
public BaseEntityBatch(HBaseClientTemplate clientTemplate,
    EntityMapper<E> entityMapper, HTablePool pool, String tableName,
    long writeBufferSize) {
  this.table = pool.getTable(tableName);
  this.table.setAutoFlush(false);
  this.clientTemplate = clientTemplate;
  this.entityMapper = entityMapper;
  this.state = ReaderWriterState.NEW;

  /**
   * If the writeBufferSize is less than the currentBufferSize, then the
   * buffer will get flushed automatically by HBase. This should never happen,
   * since we're getting a fresh table out of the pool, and the writeBuffer
   * should be empty.
   */
  try {
    table.setWriteBufferSize(writeBufferSize);
  } catch (IOException e) {
    throw new DatasetIOException("Error flushing commits for table ["
        + table + "]", e);
  }
}
 
开发者ID:cloudera,项目名称:cdk,代码行数:38,代码来源:BaseEntityBatch.java

示例13: insertData

import org.apache.hadoop.hbase.client.HTablePool; //导入依赖的package包/类
public static void insertData(String tableName) {
    System.out.println("start insert data ......");
    HTablePool pool = new HTablePool(configuration, 1000);
    HTable table = (HTable) pool.getTable(tableName);
    Put put = new Put("112233bbbcccc".getBytes());// 一个PUT代表一行数据,再NEW一个PUT表示第二行数据,每行一个唯一的ROWKEY,此处rowkey为put构造方法中传入的值
    put.add("column1".getBytes(), null, "aaa".getBytes());// 本行数据的第一列
    put.add("column2".getBytes(), null, "bbb".getBytes());// 本行数据的第三列
    put.add("column3".getBytes(), null, "ccc".getBytes());// 本行数据的第三列
    try {
        table.put(put);
    } catch (IOException e) {
        e.printStackTrace();
    }
    System.out.println("end insert data ......");
}
 
开发者ID:yjp123456,项目名称:SparkDemo,代码行数:16,代码来源:MyClass.java

示例14: RowResultGenerator

import org.apache.hadoop.hbase.client.HTablePool; //导入依赖的package包/类
public RowResultGenerator(final String tableName, final RowSpec rowspec,
    final Filter filter) throws IllegalArgumentException, IOException {
  HTablePool pool = RESTServlet.getInstance().getTablePool(); 
  HTableInterface table = pool.getTable(tableName);
  try {
    Get get = new Get(rowspec.getRow());
    if (rowspec.hasColumns()) {
      for (byte[] col: rowspec.getColumns()) {
        byte[][] split = KeyValue.parseColumn(col);
        if (split.length == 2 && split[1].length != 0) {
          get.addColumn(split[0], split[1]);
        } else {
          get.addFamily(split[0]);
        }
      }
    }
    get.setTimeRange(rowspec.getStartTime(), rowspec.getEndTime());
    get.setMaxVersions(rowspec.getMaxVersions());
    if (filter != null) {
      get.setFilter(filter);
    }
    Result result = table.get(get);
    if (result != null && !result.isEmpty()) {
      valuesI = result.list().iterator();
    }
  } catch (DoNotRetryIOException e) {
    // Warn here because Stargate will return 404 in the case if multiple
    // column families were specified but one did not exist -- currently
    // HBase will fail the whole Get.
    // Specifying multiple columns in a URI should be uncommon usage but
    // help to avoid confusion by leaving a record of what happened here in
    // the log.
    LOG.warn(StringUtils.stringifyException(e));
  } finally {
    table.close();
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:38,代码来源:RowResultGenerator.java

示例15: RESTServlet

import org.apache.hadoop.hbase.client.HTablePool; //导入依赖的package包/类
/**
 * Constructor with existing configuration
 * @param conf existing configuration
 * @throws IOException.
 */
RESTServlet(Configuration conf) throws IOException {
  this.conf = conf;
  int maxSize = conf.getInt("hbase.rest.htablepool.size", 10);
  this.pool = new HTablePool(conf, maxSize);
  this.admin = new HBaseAdmin(conf);
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:12,代码来源:RESTServlet.java


注:本文中的org.apache.hadoop.hbase.client.HTablePool类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。