当前位置: 首页>>代码示例>>Java>>正文


Java Region.put方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.regionserver.Region.put方法的典型用法代码示例。如果您正苦于以下问题:Java Region.put方法的具体用法?Java Region.put怎么用?Java Region.put使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.regionserver.Region的用法示例。


在下文中一共展示了Region.put方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testRegionObserverScanTimeStacking

import org.apache.hadoop.hbase.regionserver.Region; //导入方法依赖的package包/类
@Test
public void testRegionObserverScanTimeStacking() throws Exception {
  byte[] ROW = Bytes.toBytes("testRow");
  byte[] TABLE = Bytes.toBytes(getClass().getName());
  byte[] A = Bytes.toBytes("A");
  byte[][] FAMILIES = new byte[][] { A };

  Configuration conf = HBaseConfiguration.create();
  Region region = initHRegion(TABLE, getClass().getName(), conf, FAMILIES);
  RegionCoprocessorHost h = region.getCoprocessorHost();
  h.load(NoDataFromScan.class, Coprocessor.PRIORITY_HIGHEST, conf);
  h.load(EmptyRegionObsever.class, Coprocessor.PRIORITY_USER, conf);

  Put put = new Put(ROW);
  put.add(A, A, A);
  region.put(put);

  Get get = new Get(ROW);
  Result r = region.get(get);
  assertNull(
    "Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor. Found: "
        + r, r.listCells());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:24,代码来源:TestRegionObserverScannerOpenHook.java

示例2: testRegionObserverFlushTimeStacking

import org.apache.hadoop.hbase.regionserver.Region; //导入方法依赖的package包/类
@Test
public void testRegionObserverFlushTimeStacking() throws Exception {
  byte[] ROW = Bytes.toBytes("testRow");
  byte[] TABLE = Bytes.toBytes(getClass().getName());
  byte[] A = Bytes.toBytes("A");
  byte[][] FAMILIES = new byte[][] { A };

  Configuration conf = HBaseConfiguration.create();
  Region region = initHRegion(TABLE, getClass().getName(), conf, FAMILIES);
  RegionCoprocessorHost h = region.getCoprocessorHost();
  h.load(NoDataFromFlush.class, Coprocessor.PRIORITY_HIGHEST, conf);
  h.load(EmptyRegionObsever.class, Coprocessor.PRIORITY_USER, conf);

  // put a row and flush it to disk
  Put put = new Put(ROW);
  put.add(A, A, A);
  region.put(put);
  region.flush(true);
  Get get = new Get(ROW);
  Result r = region.get(get);
  assertNull(
    "Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor. Found: "
        + r, r.listCells());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:25,代码来源:TestRegionObserverScannerOpenHook.java

示例3: addSystemLabel

import org.apache.hadoop.hbase.regionserver.Region; //导入方法依赖的package包/类
protected void addSystemLabel(Region region, Map<String, Integer> labels,
    Map<String, List<Integer>> userAuths) throws IOException {
  if (!labels.containsKey(SYSTEM_LABEL)) {
    Put p = new Put(Bytes.toBytes(SYSTEM_LABEL_ORDINAL));
    p.addImmutable(LABELS_TABLE_FAMILY, LABEL_QUALIFIER, Bytes.toBytes(SYSTEM_LABEL));
    region.put(p);
    labels.put(SYSTEM_LABEL, SYSTEM_LABEL_ORDINAL);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:10,代码来源:DefaultVisibilityLabelServiceImpl.java

示例4: generateTestData

import org.apache.hadoop.hbase.regionserver.Region; //导入方法依赖的package包/类
private void generateTestData(Region region, int numRows) throws IOException {
  // generating 1Mb values
  LoadTestKVGenerator dataGenerator = new LoadTestKVGenerator(1024 * 1024, 1024 * 1024);
  for (int i = 0; i < numRows; ++i) {
    byte[] key = Bytes.add(region.getRegionInfo().getStartKey(), Bytes.toBytes(i));
    for (int j = 0; j < 1; ++j) {
      Put put = new Put(key);
      byte[] col = Bytes.toBytes(String.valueOf(j));
      byte[] value = dataGenerator.generateRandomSizeValue(key, col);
      put.add(FAMILYNAME, col, value);
      region.put(put);
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:15,代码来源:TestSimpleRegionNormalizerOnCluster.java

示例5: createHFileInRegion

import org.apache.hadoop.hbase.regionserver.Region; //导入方法依赖的package包/类
/**
 * Create a new hfile in the passed region
 * @param region region to operate on
 * @param columnFamily family for which to add data
 * @throws IOException
 */
private void createHFileInRegion(Region region, byte[] columnFamily) throws IOException {
  // put one row in the region
  Put p = new Put(Bytes.toBytes("row"));
  p.add(columnFamily, Bytes.toBytes("Qual"), Bytes.toBytes("v1"));
  region.put(p);
  // flush the region to make a store file
  region.flush(true);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:15,代码来源:TestZooKeeperTableArchiveClient.java

示例6: writeTestData

import org.apache.hadoop.hbase.regionserver.Region; //导入方法依赖的package包/类
private void writeTestData(Region region) throws IOException {
  for (int i = 0; i < NUM_ROWS; ++i) {
    Put put = new Put(Bytes.toBytes("row" + i));
    for (int j = 0; j < NUM_COLS_PER_ROW; ++j) {
      for (long ts = 1; ts < NUM_TIMESTAMPS_PER_COL; ++ts) {
        put.add(CF_BYTES, Bytes.toBytes("col" + j), ts,
            Bytes.toBytes("value" + i + "_" + j + "_" + ts));
      }
    }
    region.put(put);
    if ((i + 1) % ROWS_PER_HFILE == 0) {
      region.flush(true);
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:16,代码来源:TestForceCacheImportantBlocks.java

示例7: doPuts

import org.apache.hadoop.hbase.regionserver.Region; //导入方法依赖的package包/类
private void doPuts(Region region) throws IOException{
  LoadTestKVGenerator dataGenerator = new LoadTestKVGenerator(MIN_VALUE_SIZE, MAX_VALUE_SIZE);
   for (int i = 0; i < NUM_ROWS; ++i) {
    byte[] key = LoadTestKVGenerator.md5PrefixedKey(i).getBytes();
    for (int j = 0; j < NUM_COLS_PER_ROW; ++j) {
      Put put = new Put(key);
      put.setDurability(Durability.ASYNC_WAL);
      byte[] col = Bytes.toBytes(String.valueOf(j));
      byte[] value = dataGenerator.generateRandomSizeValue(key, col);
      if (includeTags) {
        Tag[] tag = new Tag[1];
        tag[0] = new Tag((byte) 1, "Visibility");
        KeyValue kv = new KeyValue(key, CF_BYTES, col, HConstants.LATEST_TIMESTAMP, value, tag);
        put.add(kv);
      } else {
        put.add(CF_BYTES, col, value);
      }
      if(VERBOSE){
        KeyValue kvPut = new KeyValue(key, CF_BYTES, col, value);
        System.err.println(Strings.padFront(i+"", ' ', 4)+" "+kvPut);
      }
      region.put(put);
    }
    if (i % NUM_ROWS_PER_FLUSH == 0) {
      region.flush(true);
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:29,代码来源:TestEncodedSeekers.java

示例8: testNotCachingDataBlocksDuringCompactionInternals

import org.apache.hadoop.hbase.regionserver.Region; //导入方法依赖的package包/类
private void testNotCachingDataBlocksDuringCompactionInternals(boolean useTags)
    throws IOException, InterruptedException {
  if (useTags) {
    TEST_UTIL.getConfiguration().setInt("hfile.format.version", 3);
  } else {
    TEST_UTIL.getConfiguration().setInt("hfile.format.version", 2);
  }
  // TODO: need to change this test if we add a cache size threshold for
  // compactions, or if we implement some other kind of intelligent logic for
  // deciding what blocks to cache-on-write on compaction.
  final String table = "CompactionCacheOnWrite";
  final String cf = "myCF";
  final byte[] cfBytes = Bytes.toBytes(cf);
  final int maxVersions = 3;
  Region region = TEST_UTIL.createTestRegion(table, 
      new HColumnDescriptor(cf)
          .setCompressionType(compress)
          .setBloomFilterType(BLOOM_TYPE)
          .setMaxVersions(maxVersions)
          .setDataBlockEncoding(NoOpDataBlockEncoder.INSTANCE.getDataBlockEncoding())
  );
  int rowIdx = 0;
  long ts = EnvironmentEdgeManager.currentTime();
  for (int iFile = 0; iFile < 5; ++iFile) {
    for (int iRow = 0; iRow < 500; ++iRow) {
      String rowStr = "" + (rowIdx * rowIdx * rowIdx) + "row" + iFile + "_" + 
          iRow;
      Put p = new Put(Bytes.toBytes(rowStr));
      ++rowIdx;
      for (int iCol = 0; iCol < 10; ++iCol) {
        String qualStr = "col" + iCol;
        String valueStr = "value_" + rowStr + "_" + qualStr;
        for (int iTS = 0; iTS < 5; ++iTS) {
          if (useTags) {
            Tag t = new Tag((byte) 1, "visibility");
            Tag[] tags = new Tag[1];
            tags[0] = t;
            KeyValue kv = new KeyValue(Bytes.toBytes(rowStr), cfBytes, Bytes.toBytes(qualStr),
                HConstants.LATEST_TIMESTAMP, Bytes.toBytes(valueStr), tags);
            p.add(kv);
          } else {
            p.addColumn(cfBytes, Bytes.toBytes(qualStr), ts++, Bytes.toBytes(valueStr));
          }
        }
      }
      p.setDurability(Durability.ASYNC_WAL);
      region.put(p);
    }
    region.flush(true);
  }
  clearBlockCache(blockCache);
  assertEquals(0, blockCache.getBlockCount());
  region.compact(false);
  LOG.debug("compactStores() returned");

  for (CachedBlock block: blockCache) {
    assertNotEquals(BlockType.ENCODED_DATA, block.getBlockType());
    assertNotEquals(BlockType.DATA, block.getBlockType());
  }
  ((HRegion)region).close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:62,代码来源:TestCacheOnWrite.java


注:本文中的org.apache.hadoop.hbase.regionserver.Region.put方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。