本文整理汇总了Java中org.apache.hadoop.hbase.filter.IncompatibleFilterException类的典型用法代码示例。如果您正苦于以下问题:Java IncompatibleFilterException类的具体用法?Java IncompatibleFilterException怎么用?Java IncompatibleFilterException使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
IncompatibleFilterException类属于org.apache.hadoop.hbase.filter包,在下文中一共展示了IncompatibleFilterException类的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: setBatch
import org.apache.hadoop.hbase.filter.IncompatibleFilterException; //导入依赖的package包/类
/**
* Set the maximum number of values to return for each call to next()
* @param batch the maximum number of values
*/
public Scan setBatch(int batch) {
if (this.hasFilter() && this.filter.hasFilterRow()) {
throw new IncompatibleFilterException(
"Cannot set batch on a scan using a filter" +
" that returns true for filter.hasFilterRow");
}
this.batch = batch;
return this;
}
示例2: setBatch
import org.apache.hadoop.hbase.filter.IncompatibleFilterException; //导入依赖的package包/类
/**
* Set the maximum number of values to return for each call to next()
* @param batch the maximum number of values
*/
public void setBatch(int batch) {
if (this.hasFilter() && this.filter.hasFilterRow()) {
throw new IncompatibleFilterException("Cannot set batch on a scan using a filter"
+ " that returns true for filter.hasFilterRow");
}
this.batch = batch;
}
示例3: getConfiguredScanForJob
import org.apache.hadoop.hbase.filter.IncompatibleFilterException; //导入依赖的package包/类
private static Scan getConfiguredScanForJob(Configuration conf, String[] args) throws IOException {
Scan s = new Scan();
// Optional arguments.
// Set Scan Versions
int versions = args.length > 2? Integer.parseInt(args[2]): 1;
s.setMaxVersions(versions);
// Set Scan Range
long startTime = args.length > 3? Long.parseLong(args[3]): 0L;
long endTime = args.length > 4? Long.parseLong(args[4]): Long.MAX_VALUE;
s.setTimeRange(startTime, endTime);
// Set cache blocks
s.setCacheBlocks(false);
// Set Scan Column Family
boolean raw = Boolean.parseBoolean(conf.get(RAW_SCAN));
if (raw) {
s.setRaw(raw);
}
if (conf.get(TableInputFormat.SCAN_COLUMN_FAMILY) != null) {
s.addFamily(Bytes.toBytes(conf.get(TableInputFormat.SCAN_COLUMN_FAMILY)));
}
// Set RowFilter or Prefix Filter if applicable.
Filter exportFilter = getExportFilter(args);
if (exportFilter!= null) {
LOG.info("Setting Scan Filter for Export.");
s.setFilter(exportFilter);
}
int batching = conf.getInt(EXPORT_BATCHING, -1);
if (batching != -1){
try {
s.setBatch(batching);
} catch (IncompatibleFilterException e) {
LOG.error("Batching could not be set", e);
}
}
LOG.info("versions=" + versions + ", starttime=" + startTime +
", endtime=" + endTime + ", keepDeletedCells=" + raw);
return s;
}
示例4: setBatch
import org.apache.hadoop.hbase.filter.IncompatibleFilterException; //导入依赖的package包/类
/**
* Set the maximum number of values to return for each call to next()
* @param batch the maximum number of values
*/
public void setBatch(int batch) {
if (this.hasFilter() && this.filter.hasFilterRow()) {
throw new IncompatibleFilterException(
"Cannot set batch on a scan using a filter" +
" that returns true for filter.hasFilterRow");
}
this.batch = batch;
}
示例5: setBatch
import org.apache.hadoop.hbase.filter.IncompatibleFilterException; //导入依赖的package包/类
/**
* Set the maximum number of cells to return for each call to next(). Callers should be aware
* that this is not equivalent to calling {@link #setAllowPartialResults(boolean)}.
* If you don't allow partial results, the number of cells in each Result must equal to your
* batch setting unless it is the last Result for current row. So this method is helpful in paging
* queries. If you just want to prevent OOM at client, use setAllowPartialResults(true) is better.
* @param batch the maximum number of values
* @see Result#mayHaveMoreCellsInRow()
*/
public Scan setBatch(int batch) {
if (this.hasFilter() && this.filter.hasFilterRow()) {
throw new IncompatibleFilterException(
"Cannot set batch on a scan using a filter" +
" that returns true for filter.hasFilterRow");
}
this.batch = batch;
return this;
}
示例6: setBatch
import org.apache.hadoop.hbase.filter.IncompatibleFilterException; //导入依赖的package包/类
/**
* Set the maximum number of values to return for each call to next()
*
* @param batch
* the maximum number of values
*/
public void setBatch(int batch) {
if (this.hasFilter() && this.filter.hasFilterRow()) {
throw new IncompatibleFilterException(
"Cannot set batch on a scan using a filter"
+ " that returns true for filter.hasFilterRow");
}
this.batch = batch;
}
示例7: getConfiguredScanForJob
import org.apache.hadoop.hbase.filter.IncompatibleFilterException; //导入依赖的package包/类
private static Scan getConfiguredScanForJob(Configuration conf, String[] args) throws IOException {
Scan s = new Scan();
// Optional arguments.
// Set Scan Versions
int versions = args.length > 2? Integer.parseInt(args[2]): 1;
s.setMaxVersions(versions);
// Set Scan Range
long startTime = args.length > 3? Long.parseLong(args[3]): 0L;
long endTime = args.length > 4? Long.parseLong(args[4]): Long.MAX_VALUE;
s.setTimeRange(startTime, endTime);
// Set cache blocks
s.setCacheBlocks(false);
// set Start and Stop row
if (conf.get(TableInputFormat.SCAN_ROW_START) != null) {
s.setStartRow(Bytes.toBytes(conf.get(TableInputFormat.SCAN_ROW_START)));
}
if (conf.get(TableInputFormat.SCAN_ROW_STOP) != null) {
s.setStopRow(Bytes.toBytes(conf.get(TableInputFormat.SCAN_ROW_STOP)));
}
// Set Scan Column Family
boolean raw = Boolean.parseBoolean(conf.get(RAW_SCAN));
if (raw) {
s.setRaw(raw);
}
if (conf.get(TableInputFormat.SCAN_COLUMN_FAMILY) != null) {
s.addFamily(Bytes.toBytes(conf.get(TableInputFormat.SCAN_COLUMN_FAMILY)));
}
// Set RowFilter or Prefix Filter if applicable.
Filter exportFilter = getExportFilter(args);
if (exportFilter!= null) {
LOG.info("Setting Scan Filter for Export.");
s.setFilter(exportFilter);
}
int batching = conf.getInt(EXPORT_BATCHING, -1);
if (batching != -1){
try {
s.setBatch(batching);
} catch (IncompatibleFilterException e) {
LOG.error("Batching could not be set", e);
}
}
LOG.info("versions=" + versions + ", starttime=" + startTime +
", endtime=" + endTime + ", keepDeletedCells=" + raw);
return s;
}