当前位置: 首页>>代码示例>>Java>>正文


Java IsolationLevel类代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.client.IsolationLevel的典型用法代码示例。如果您正苦于以下问题:Java IsolationLevel类的具体用法?Java IsolationLevel怎么用?Java IsolationLevel使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


IsolationLevel类属于org.apache.hadoop.hbase.client包,在下文中一共展示了IsolationLevel类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: initialize

import org.apache.hadoop.hbase.client.IsolationLevel; //导入依赖的package包/类
public void initialize(InputSplit split, Configuration conf) throws IOException {
  this.scan = TableMapReduceUtil.convertStringToScan(split.getScan());
  this.split = split;
  HTableDescriptor htd = split.htd;
  HRegionInfo hri = this.split.getRegionInfo();
  FileSystem fs = FSUtils.getCurrentFileSystem(conf);


  // region is immutable, this should be fine,
  // otherwise we have to set the thread read point
  scan.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
  // disable caching of data blocks
  scan.setCacheBlocks(false);

  scanner =
      new ClientSideRegionScanner(conf, fs, new Path(split.restoreDir), htd, hri, scan, null);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:18,代码来源:TableSnapshotInputFormatImpl.java

示例2: StoreScanner

import org.apache.hadoop.hbase.client.IsolationLevel; //导入依赖的package包/类
private StoreScanner(Store store, ScanInfo scanInfo, Scan scan,
    List<? extends KeyValueScanner> scanners, ScanType scanType, long smallestReadPoint,
    long earliestPutTs, byte[] dropDeletesFromRow, byte[] dropDeletesToRow) throws IOException {
  this(store, scan, scanInfo, null,
      ((HStore) store).getHRegion().getReadpoint(IsolationLevel.READ_COMMITTED), false);
  if (dropDeletesFromRow == null) {
    matcher =
        new ScanQueryMatcher(scan, scanInfo, null, scanType, smallestReadPoint, earliestPutTs,
            oldestUnexpiredTS, now, store.getCoprocessorHost());
  } else {
    matcher = new ScanQueryMatcher(scan, scanInfo, null, smallestReadPoint, earliestPutTs,
        oldestUnexpiredTS, now, dropDeletesFromRow, dropDeletesToRow, store.getCoprocessorHost());
  }

  // Filter the list of scanners using Bloom filters, time range, TTL, etc.
  scanners = selectScannersFrom(scanners);

  // Seek all scanners to the initial key
  seekScanners(scanners, matcher.getStartKey(), false, parallelSeekEnabled);

  // Combine all seeked scanners with a heap
  resetKVHeap(scanners, store.getComparator());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:24,代码来源:StoreScanner.java

示例3: preStoreScannerOpen

import org.apache.hadoop.hbase.client.IsolationLevel; //导入依赖的package包/类
@Override
public KeyValueScanner preStoreScannerOpen(
    final ObserverContext<RegionCoprocessorEnvironment> c, Store store, final Scan scan,
    final NavigableSet<byte[]> targetCols, KeyValueScanner s) throws IOException {
  TableName tn = store.getTableName();
  if (!tn.isSystemTable()) {
    Long newTtl = ttls.get(store.getTableName());
    Integer newVersions = versions.get(store.getTableName());
    ScanInfo oldSI = store.getScanInfo();
    HColumnDescriptor family = store.getFamily();
    ScanInfo scanInfo = new ScanInfo(TEST_UTIL.getConfiguration(),
        family.getName(), family.getMinVersions(),
        newVersions == null ? family.getMaxVersions() : newVersions,
        newTtl == null ? oldSI.getTtl() : newTtl, family.getKeepDeletedCells(),
        oldSI.getTimeToPurgeDeletes(), oldSI.getComparator());
    return new StoreScanner(store, scanInfo, scan, targetCols,
        ((HStore) store).getHRegion().getReadpoint(IsolationLevel.READ_COMMITTED));
  } else {
    return s;
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:22,代码来源:TestCoprocessorScanPolicy.java

示例4: StoreScanner

import org.apache.hadoop.hbase.client.IsolationLevel; //导入依赖的package包/类
private StoreScanner(Store store, ScanInfo scanInfo, Scan scan,
                     List<? extends KeyValueScanner> scanners, ScanType scanType, long smallestReadPoint,
                     long earliestPutTs, byte[] dropDeletesFromRow, byte[] dropDeletesToRow) throws IOException {
    this(store, false, scan, null, scanInfo.getTtl(), scanInfo.getMinVersions(),
            ((HStore) store).getHRegion().getReadpoint(IsolationLevel.READ_COMMITTED));
    if (dropDeletesFromRow == null) {
        matcher = new ScanQueryMatcher(scan, scanInfo, null, scanType, smallestReadPoint,
                earliestPutTs, oldestUnexpiredTS, now, store.getCoprocessorHost());
    } else {
        matcher = new ScanQueryMatcher(scan, scanInfo, null, smallestReadPoint, earliestPutTs,
                oldestUnexpiredTS, now, dropDeletesFromRow, dropDeletesToRow, store.getCoprocessorHost());
    }

    // Filter the list of scanners using Bloom filters, time range, TTL, etc.
    scanners = selectScannersFrom(scanners);

    // Seek all scanners to the initial key
    seekScanners(scanners, matcher.getStartKey(), false, isParallelSeekEnabled);

    // Combine all seeked scanners with a heap
    resetKVHeap(scanners, store.getComparator());
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:23,代码来源:StoreScanner.java

示例5: preStoreScannerOpen

import org.apache.hadoop.hbase.client.IsolationLevel; //导入依赖的package包/类
@Override
public KeyValueScanner preStoreScannerOpen(
    final ObserverContext<RegionCoprocessorEnvironment> c, Store store, final Scan scan,
    final NavigableSet<byte[]> targetCols, KeyValueScanner s) throws IOException {
  TableName tn = store.getTableName();
  if (!tn.isSystemTable()) {
    Long newTtl = ttls.get(store.getTableName());
    Integer newVersions = versions.get(store.getTableName());
    ScanInfo oldSI = store.getScanInfo();
    HColumnDescriptor family = store.getFamily();
    ScanInfo scanInfo = new ScanInfo(family.getName(), family.getMinVersions(),
        newVersions == null ? family.getMaxVersions() : newVersions,
        newTtl == null ? oldSI.getTtl() : newTtl, family.getKeepDeletedCells(),
        oldSI.getTimeToPurgeDeletes(), oldSI.getComparator());
    return new StoreScanner(store, scanInfo, scan, targetCols,
        ((HStore) store).getHRegion().getReadpoint(IsolationLevel.READ_COMMITTED));
  } else {
    return s;
  }
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:21,代码来源:TestCoprocessorScanPolicy.java

示例6: StoreScanner

import org.apache.hadoop.hbase.client.IsolationLevel; //导入依赖的package包/类
private StoreScanner(Store store, ScanInfo scanInfo, Scan scan,
    List<? extends KeyValueScanner> scanners, ScanType scanType, long smallestReadPoint,
    long earliestPutTs, byte[] dropDeletesFromRow, byte[] dropDeletesToRow) throws IOException {
  this(store, false, scan, null, scanInfo.getTtl(), scanInfo.getMinVersions(),
      ((HStore)store).getHRegion().getReadpoint(IsolationLevel.READ_COMMITTED));
  if (dropDeletesFromRow == null) {
    matcher = new ScanQueryMatcher(scan, scanInfo, null, scanType, smallestReadPoint,
        earliestPutTs, oldestUnexpiredTS, store.getCoprocessorHost());
  } else {
    matcher = new ScanQueryMatcher(scan, scanInfo, null, smallestReadPoint, earliestPutTs,
        oldestUnexpiredTS, dropDeletesFromRow, dropDeletesToRow, store.getCoprocessorHost());
  }

  // Filter the list of scanners using Bloom filters, time range, TTL, etc.
  scanners = selectScannersFrom(scanners);

  // Seek all scanners to the initial key
  seekScanners(scanners, matcher.getStartKey(), false, isParallelSeekEnabled);

  // Combine all seeked scanners with a heap
  resetKVHeap(scanners, store.getComparator());
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:23,代码来源:StoreScanner.java

示例7: assertICV

import org.apache.hadoop.hbase.client.IsolationLevel; //导入依赖的package包/类
private void assertICV(byte [] row,
                       byte [] familiy,
                       byte[] qualifier,
                       long amount,
                       boolean fast) throws IOException {
  // run a get and see?
  Get get = new Get(row);
  if (fast) get.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
  get.addColumn(familiy, qualifier);
  Result result = region.get(get);
  assertEquals(1, result.size());

  Cell kv = result.rawCells()[0];
  long r = Bytes.toLong(CellUtil.cloneValue(kv));
  assertEquals(amount, r);
}
 
开发者ID:apache,项目名称:hbase,代码行数:17,代码来源:TestAtomicOperation.java

示例8: initialize

import org.apache.hadoop.hbase.client.IsolationLevel; //导入依赖的package包/类
public void initialize(InputSplit split, Configuration conf) throws IOException {
  this.scan = TableMapReduceUtil.convertStringToScan(split.getScan());
  this.split = split;
  TableDescriptor htd = split.htd;
  HRegionInfo hri = this.split.getRegionInfo();
  FileSystem fs = FSUtils.getCurrentFileSystem(conf);


  // region is immutable, this should be fine,
  // otherwise we have to set the thread read point
  scan.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
  // disable caching of data blocks
  scan.setCacheBlocks(false);
  scan.setScanMetricsEnabled(true);

  scanner =
      new ClientSideRegionScanner(conf, fs, new Path(split.restoreDir), htd, hri, scan, null);
}
 
开发者ID:apache,项目名称:hbase,代码行数:19,代码来源:TableSnapshotInputFormatImpl.java

示例9: SkeletonClientSideRegionScanner

import org.apache.hadoop.hbase.client.IsolationLevel; //导入依赖的package包/类
public SkeletonClientSideRegionScanner(Configuration conf,
                                          FileSystem fs,
                                          Path rootDir,
                                          HTableDescriptor htd,
                                          HRegionInfo hri,
                                          Scan scan, String hostAndPort) throws IOException {
	if (LOG.isDebugEnabled())
		SpliceLogUtils.debug(LOG, "init for regionInfo=%s, scan=%s", hri,scan);
	scan.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
	this.conf = conf;
	this.fs = fs;
	this.rootDir = rootDir;
	this.htd = htd;
	this.hri = new SpliceHRegionInfo(hri);
	this.scan = scan;
       this.hostAndPort = hostAndPort;
}
 
开发者ID:splicemachine,项目名称:spliceengine,代码行数:18,代码来源:SkeletonClientSideRegionScanner.java

示例10: getReadpoint

import org.apache.hadoop.hbase.client.IsolationLevel; //导入依赖的package包/类
@Override public long getReadpoint(IsolationLevel isolationLevel) {
  if (isolationLevel == IsolationLevel.READ_UNCOMMITTED) {
    // This scan can read even uncommitted transactions
    return Long.MAX_VALUE;
  }
  return mvcc.getReadPoint();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:8,代码来源:HRegion.java

示例11: getIncrementCurrentValue

import org.apache.hadoop.hbase.client.IsolationLevel; //导入依赖的package包/类
/**
 * Do a specific Get on passed <code>columnFamily</code> and column qualifiers from
 * <code>incrementCoordinates</code> only.
 *
 * @param increment
 * @param columnFamily
 * @param increments
 * @return Return the Cells to Increment
 * @throws IOException
 */
private List<Cell> getIncrementCurrentValue(final Increment increment, byte[] columnFamily,
    final List<Cell> increments, final IsolationLevel isolation) throws IOException {
  Get get = new Get(increment.getRow());
  if (isolation != null) get.setIsolationLevel(isolation);
  for (Cell cell : increments) {
    get.addColumn(columnFamily, CellUtil.cloneQualifier(cell));
  }
  TimeRange tr = increment.getTimeRange();
  if (tr != null) {
    get.setTimeRange(tr.getMin(), tr.getMax());
  }
  return get(get, false);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:24,代码来源:HRegion.java

示例12: doScan

import org.apache.hadoop.hbase.client.IsolationLevel; //导入依赖的package包/类
public static void doScan(
    HRegion region, Scan scan, List<Cell> result) throws IOException {
  InternalScanner scanner = null;
  try {
    scan.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
    scanner = region.getScanner(scan);
    result.clear();
    scanner.next(result);
  } finally {
    if (scanner != null) scanner.close();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:13,代码来源:TestRowProcessorEndpoint.java

示例13: preStoreScannerOpen

import org.apache.hadoop.hbase.client.IsolationLevel; //导入依赖的package包/类
@Override
public KeyValueScanner preStoreScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c,
    Store store, Scan scan, NavigableSet<byte[]> targetCols, KeyValueScanner s)
    throws IOException {
  scan.setFilter(new NoDataFilter());
  return new StoreScanner(store, store.getScanInfo(), scan, targetCols,
    ((HStore)store).getHRegion().getReadpoint(IsolationLevel.READ_COMMITTED));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:9,代码来源:TestRegionObserverScannerOpenHook.java

示例14: preStoreScannerOpen

import org.apache.hadoop.hbase.client.IsolationLevel; //导入依赖的package包/类
@Override
public KeyValueScanner preStoreScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
    final Store store, final Scan scan, final NavigableSet<byte[]> targetCols,
    final KeyValueScanner s) throws IOException {
  ScanInfo scanInfo = getScanInfo(store, c.getEnvironment());
  if (scanInfo == null) {
    // take default action
    return null;
  }
  return new StoreScanner(store, scanInfo, scan, targetCols,
    ((HStore)store).getHRegion().getReadpoint(IsolationLevel.READ_COMMITTED));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:13,代码来源:ZooKeeperScanPolicyObserver.java

示例15: initialize

import org.apache.hadoop.hbase.client.IsolationLevel; //导入依赖的package包/类
public void initialize(InputSplit split, Configuration conf) throws IOException {
  this.split = split;
  HTableDescriptor htd = split.htd;
  HRegionInfo hri = this.split.getRegionInfo();
  FileSystem fs = FSUtils.getCurrentFileSystem(conf);

  Path tmpRootDir = new Path(conf.get(RESTORE_DIR_KEY)); // This is the user specified root
  // directory where snapshot was restored

  // create scan
  // TODO: mapred does not support scan as input API. Work around for now.
  if (conf.get(TableInputFormat.SCAN) != null) {
    scan = TableMapReduceUtil.convertStringToScan(conf.get(TableInputFormat.SCAN));
  } else if (conf.get(org.apache.hadoop.hbase.mapred.TableInputFormat.COLUMN_LIST) != null) {
    String[] columns =
      conf.get(org.apache.hadoop.hbase.mapred.TableInputFormat.COLUMN_LIST).split(" ");
    scan = new Scan();
    for (String col : columns) {
      scan.addFamily(Bytes.toBytes(col));
    }
  } else {
    throw new IllegalArgumentException("A Scan is not configured for this job");
  }

  // region is immutable, this should be fine,
  // otherwise we have to set the thread read point
  scan.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
  // disable caching of data blocks
  scan.setCacheBlocks(false);

  scanner = new ClientSideRegionScanner(conf, fs, tmpRootDir, htd, hri, scan, null);
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:33,代码来源:TableSnapshotInputFormatImpl.java


注:本文中的org.apache.hadoop.hbase.client.IsolationLevel类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。