本文整理汇总了Java中org.apache.cassandra.thrift.SliceRange.setCount方法的典型用法代码示例。如果您正苦于以下问题:Java SliceRange.setCount方法的具体用法?Java SliceRange.setCount怎么用?Java SliceRange.setCount使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.cassandra.thrift.SliceRange
的用法示例。
在下文中一共展示了SliceRange.setCount方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getSlicePredicate
import org.apache.cassandra.thrift.SliceRange; //导入方法依赖的package包/类
private SlicePredicate getSlicePredicate(String[] columnNameList) {
SlicePredicate slicePredicate = new SlicePredicate();
try {
if (columnNameList != null) {
List<ByteBuffer> columnNameByteBufferList = new ArrayList<ByteBuffer>();
for (String columnName: columnNameList) {
byte[] columnNameBytes = columnName.getBytes("UTF-8");
columnNameByteBufferList.add(ByteBuffer.wrap(columnNameBytes));
}
slicePredicate.setColumn_names(columnNameByteBufferList);
} else {
SliceRange sliceRange = new SliceRange();
sliceRange.setStart(new byte[0]);
sliceRange.setFinish(new byte[0]);
// FIXME: The default column count is 100. We should tune the value.
sliceRange.setCount(100000);
slicePredicate.setSlice_range(sliceRange);
}
}
catch (UnsupportedEncodingException exc) {
throw new StorageException("Character encoding exception with key range", exc);
}
return slicePredicate;
}
示例2: CasTimeReader
import org.apache.cassandra.thrift.SliceRange; //导入方法依赖的package包/类
public CasTimeReader() {
try {
TTransport tr = new TFramedTransport(new TSocket("10.15.61.111",
9160));
TProtocol proto = new TBinaryProtocol(tr);
client = new Cassandra.Client(proto);
tr.open();
client.set_keyspace("CadalSecTest");
predicate = new SlicePredicate();
SliceRange range = new SliceRange();
range.setStart(new byte[0]);
range.setFinish(new byte[0]);
range.setCount(10000);
predicate.setSlice_range(range);
columnParent = new ColumnParent();
columnParent.setColumn_family("RecordMinute");
} catch (Exception e) {
System.out.println(e);
}
}
示例3: CasTimeBook
import org.apache.cassandra.thrift.SliceRange; //导入方法依赖的package包/类
public CasTimeBook() {
try {
TTransport tr = new TFramedTransport(new TSocket("10.15.61.111",
9160));
TProtocol proto = new TBinaryProtocol(tr);
client = new Cassandra.Client(proto);
tr.open();
client.set_keyspace("CadalSecTest");
predicate = new SlicePredicate();
SliceRange range = new SliceRange();
range.setStart(new byte[0]);
range.setFinish(new byte[0]);
range.setCount(10000);
predicate.setSlice_range(range);
columnParent = new ColumnParent();
columnParent.setColumn_family("RecordMinute");
} catch (Exception e) {
System.out.println(e);
}
}
示例4: getSliceRange
import org.apache.cassandra.thrift.SliceRange; //导入方法依赖的package包/类
private SliceRange getSliceRange(final SliceQuery slice, final int limit) {
final SliceRange sliceRange = new SliceRange();
sliceRange.setStart(slice.getSliceStart().asByteBuffer());
sliceRange.setFinish(slice.getSliceEnd().asByteBuffer());
sliceRange.setCount(Math.min(limit, slice.getLimit()));
return sliceRange;
}
示例5: get2
import org.apache.cassandra.thrift.SliceRange; //导入方法依赖的package包/类
/**
* get 讀取所有column
*
* @throws Exception
*/
@Test
public void get2() throws Exception {
String KEYSPACE = "mock";
client.set_keyspace(KEYSPACE);
// 讀取所有column
String COLUMN_FAMILY = "student";
ColumnParent columnParent = new ColumnParent(COLUMN_FAMILY);
// 術語
SlicePredicate predicate = new SlicePredicate();
// 範圍
SliceRange sliceRange = new SliceRange();
// sliceRange.setStart(ByteBufferHelper.toByteBuffer(new byte[0]));//開始
sliceRange.setStart(new byte[0]);// 開始
sliceRange.setFinish(new byte[0]);// 結束
sliceRange.setCount(100);// 筆數
//
predicate.setSlice_range(sliceRange);
String ROW_KEY = "Jack";
// 結果
// key, column_parent, predicate, consistency_level
List<ColumnOrSuperColumn> results = client.get_slice(
ByteBufferHelper.toByteBuffer(ROW_KEY), columnParent,
predicate, ConsistencyLevel.ONE);
for (ColumnOrSuperColumn cos : results) {
Column column = cos.getColumn();
System.out.println(ROW_KEY + ", "
+ ByteHelper.toString(column.getName()) + ": "
+ ByteHelper.toString(column.getValue()) + ", "
+ column.getTimestamp());
// Jack, art, 87, 1380788003220
// Jack, grad, 5, 1380788003203
// Jack, math, 97, 1380788003214
}
}
示例6: QueryFromUserChapter
import org.apache.cassandra.thrift.SliceRange; //导入方法依赖的package包/类
/**
* Query From CF -- "UserChapter"
* @param int userid: 'userid' is column key of this CF, like '119115'
*/
public List<String> QueryFromUserChapter(int userid){
// System.out.println("------------QueryFromUserChapter--------------");
List<String> listStr = new ArrayList<String>();
try {
SlicePredicate predicate = new SlicePredicate();
SliceRange range = new SliceRange();
range.start = this.cassandraUtil.toByteBuffer("");
range.finish = this.cassandraUtil.toByteBuffer("");
range.setCount(10000000);
predicate.setSlice_range(range);
ColumnParent parent = new ColumnParent();
parent.column_family = "UserChapter"; // CF name
List<ColumnOrSuperColumn> results = client.get_slice(this.cassandraUtil.toByteBuffer(String.valueOf(userid)), parent, predicate,ConsistencyLevel.ONE);
for (ColumnOrSuperColumn result : results) {
Column column1 = result.column;
listStr.add(new String(column1.getName(), "UTF-8"));
}
return listStr;
} catch (Exception e) {
return listStr;
}
}
示例7: getSplits
import org.apache.cassandra.thrift.SliceRange; //导入方法依赖的package包/类
@Override
public InputSplit[] getSplits(JobConf jobConf, int numSplits) throws IOException {
String ks = jobConf.get(AbstractCassandraSerDe.CASSANDRA_KEYSPACE_NAME);
String cf = jobConf.get(AbstractCassandraSerDe.CASSANDRA_CF_NAME);
int slicePredicateSize = jobConf.getInt(AbstractCassandraSerDe.CASSANDRA_SLICE_PREDICATE_SIZE,
AbstractCassandraSerDe.DEFAULT_SLICE_PREDICATE_SIZE);
int sliceRangeSize = jobConf.getInt(
AbstractCassandraSerDe.CASSANDRA_RANGE_BATCH_SIZE,
AbstractCassandraSerDe.DEFAULT_RANGE_BATCH_SIZE);
int splitSize = jobConf.getInt(
AbstractCassandraSerDe.CASSANDRA_SPLIT_SIZE,
AbstractCassandraSerDe.DEFAULT_SPLIT_SIZE);
String cassandraColumnMapping = jobConf.get(AbstractCassandraSerDe.CASSANDRA_COL_MAPPING);
int rpcPort = jobConf.getInt(AbstractCassandraSerDe.CASSANDRA_PORT, 9160);
String host = jobConf.get(AbstractCassandraSerDe.CASSANDRA_HOST);
String partitioner = jobConf.get(AbstractCassandraSerDe.CASSANDRA_PARTITIONER);
if (cassandraColumnMapping == null) {
throw new IOException("cassandra.columns.mapping required for Cassandra Table.");
}
SliceRange range = new SliceRange();
range.setStart(new byte[0]);
range.setFinish(new byte[0]);
range.setReversed(false);
range.setCount(slicePredicateSize);
SlicePredicate predicate = new SlicePredicate();
predicate.setSlice_range(range);
ConfigHelper.setInputRpcPort(jobConf, "" + rpcPort);
ConfigHelper.setInputInitialAddress(jobConf, host);
ConfigHelper.setInputPartitioner(jobConf, partitioner);
ConfigHelper.setInputSlicePredicate(jobConf, predicate);
ConfigHelper.setInputColumnFamily(jobConf, ks, cf);
ConfigHelper.setRangeBatchSize(jobConf, sliceRangeSize);
ConfigHelper.setInputSplitSize(jobConf, splitSize);
Job job = new Job(jobConf);
JobContext jobContext = new JobContextImpl(job.getConfiguration(), job.getJobID());
Path[] tablePaths = FileInputFormat.getInputPaths(jobContext);
List<org.apache.hadoop.mapreduce.InputSplit> splits = getSplits(jobContext);
InputSplit[] results = new InputSplit[splits.size()];
for (int i = 0; i < splits.size(); ++i) {
HiveCassandraStandardSplit csplit = new HiveCassandraStandardSplit(
(ColumnFamilySplit) splits.get(i), cassandraColumnMapping, tablePaths[0]);
csplit.setKeyspace(ks);
csplit.setColumnFamily(cf);
csplit.setRangeBatchSize(sliceRangeSize);
csplit.setSplitSize(splitSize);
csplit.setHost(host);
csplit.setPort(rpcPort);
csplit.setSlicePredicateSize(slicePredicateSize);
csplit.setPartitioner(partitioner);
csplit.setColumnMapping(cassandraColumnMapping);
results[i] = csplit;
}
return results;
}
示例8: getSplits
import org.apache.cassandra.thrift.SliceRange; //导入方法依赖的package包/类
@Override
public InputSplit[] getSplits(JobConf jobConf, int numSplits) throws IOException {
String ks = jobConf.get(AbstractColumnSerDe.CASSANDRA_KEYSPACE_NAME);
String cf = jobConf.get(AbstractColumnSerDe.CASSANDRA_CF_NAME);
int slicePredicateSize = jobConf.getInt(AbstractColumnSerDe.CASSANDRA_SLICE_PREDICATE_SIZE,
AbstractColumnSerDe.DEFAULT_SLICE_PREDICATE_SIZE);
int sliceRangeSize = jobConf.getInt(
AbstractColumnSerDe.CASSANDRA_RANGE_BATCH_SIZE,
AbstractColumnSerDe.DEFAULT_RANGE_BATCH_SIZE);
int splitSize = jobConf.getInt(
AbstractColumnSerDe.CASSANDRA_SPLIT_SIZE,
AbstractColumnSerDe.DEFAULT_SPLIT_SIZE);
String cassandraColumnMapping = jobConf.get(AbstractColumnSerDe.CASSANDRA_COL_MAPPING);
int rpcPort = jobConf.getInt(AbstractColumnSerDe.CASSANDRA_PORT, 9160);
String host = jobConf.get(AbstractColumnSerDe.CASSANDRA_HOST);
String partitioner = jobConf.get(AbstractColumnSerDe.CASSANDRA_PARTITIONER);
if (cassandraColumnMapping == null) {
throw new IOException("cassandra.columns.mapping required for Cassandra Table.");
}
SliceRange range = new SliceRange();
range.setStart(new byte[0]);
range.setFinish(new byte[0]);
range.setReversed(false);
range.setCount(slicePredicateSize);
SlicePredicate predicate = new SlicePredicate();
predicate.setSlice_range(range);
ConfigHelper.setInputRpcPort(jobConf, "" + rpcPort);
ConfigHelper.setInputInitialAddress(jobConf, host);
ConfigHelper.setInputPartitioner(jobConf, partitioner);
ConfigHelper.setInputSlicePredicate(jobConf, predicate);
ConfigHelper.setInputColumnFamily(jobConf, ks, cf);
ConfigHelper.setRangeBatchSize(jobConf, sliceRangeSize);
ConfigHelper.setInputSplitSize(jobConf, splitSize);
Job job = new Job(jobConf);
JobContext jobContext = new JobContext(job.getConfiguration(), job.getJobID());
Path[] tablePaths = FileInputFormat.getInputPaths(jobContext);
List<org.apache.hadoop.mapreduce.InputSplit> splits = getSplits(jobContext);
InputSplit[] results = new InputSplit[splits.size()];
for (int i = 0; i < splits.size(); ++i) {
HiveCassandraStandardSplit csplit = new HiveCassandraStandardSplit(
(ColumnFamilySplit) splits.get(i), cassandraColumnMapping, tablePaths[0]);
csplit.setKeyspace(ks);
csplit.setColumnFamily(cf);
csplit.setRangeBatchSize(sliceRangeSize);
csplit.setSplitSize(splitSize);
csplit.setHost(host);
csplit.setPort(rpcPort);
csplit.setSlicePredicateSize(slicePredicateSize);
csplit.setPartitioner(partitioner);
csplit.setColumnMapping(cassandraColumnMapping);
results[i] = csplit;
}
return results;
}
示例9: GetAllInfoOfUser
import org.apache.cassandra.thrift.SliceRange; //导入方法依赖的package包/类
/**
* To get all book-page information about an allocated 'username'
* @param String username: like 'zju' 'Yanfei'
*/
public Map<String, List<String>> GetAllInfoOfUser(String username) {
Map<String, List<String>> infoMap = new HashMap<String, List<String>>();
SlicePredicate predicate = new SlicePredicate();// null, new
SliceRange range = new SliceRange();
range.setStart(new byte[0]);
range.setFinish(new byte[0]);
range.setCount(200000);
predicate.setSlice_range(range);
ColumnParent parent = new ColumnParent();
parent.column_family = "UserBookPage";
String bookid;
List<String> listPage = null;
try {
List<ColumnOrSuperColumn> results = client.get_slice(this.cassandraUtil.toByteBuffer(username), parent, predicate, ConsistencyLevel.ONE);
for (ColumnOrSuperColumn result : results) {
listPage = new ArrayList<String>();
SuperColumn superColumn2 = result.super_column;
bookid = "";
bookid = new String(superColumn2.getName(), "UTF-8"); // bookid
List<Column> columns2 = superColumn2.getColumns();
for (Column column : columns2) {
String columnName = new String(column.getName(), "UTF-8");
if(columnName.equalsIgnoreCase("times")){
continue;
}else{
listPage.add(new String(column.getName(), "UTF-8")); // page-number
}
}
infoMap.put(bookid, listPage);
}
return infoMap;
} catch (Exception e) {
e.printStackTrace();
return infoMap;
}
}
示例10: getSplits
import org.apache.cassandra.thrift.SliceRange; //导入方法依赖的package包/类
@Override
public InputSplit[] getSplits(JobConf jobConf, int numSplits) throws IOException {
final String ks = jobConf.get(AbstractCassandraSerDe.CASSANDRA_KEYSPACE_NAME);
final String cf = jobConf.get(AbstractCassandraSerDe.CASSANDRA_CF_NAME);
final int splitSize = jobConf.getInt(AbstractCassandraSerDe.CASSANDRA_SPLIT_SIZE, AbstractCassandraSerDe.DEFAULT_SPLIT_SIZE);
final int rpcPort = jobConf.getInt(AbstractCassandraSerDe.CASSANDRA_PORT, Integer.parseInt(AbstractCassandraSerDe.DEFAULT_CASSANDRA_PORT));
final String host = jobConf.get(AbstractCassandraSerDe.CASSANDRA_HOST);
final String partitionerString = jobConf.get(AbstractCassandraSerDe.CASSANDRA_PARTITIONER);
final String cassandraColumnMapping = jobConf.get(AbstractCassandraSerDe.CASSANDRA_COL_MAPPING);
if (cassandraColumnMapping == null) {
throw new IOException("cassandra.columns.mapping required for Cassandra Table.");
}
final Path dummyPath = new Path(ks + "/" + cf);
SliceRange range = new SliceRange();
range.setStart(new byte[0]);
range.setFinish(new byte[0]);
range.setReversed(false);
range.setCount(Integer.MAX_VALUE);
SlicePredicate predicate = new SlicePredicate();
predicate.setSlice_range(range);
ConfigHelper.setInputPartitioner(jobConf, partitionerString);
ConfigHelper.setInputColumnFamily(jobConf, ks, cf);
ConfigHelper.setInputSplitSize(jobConf, splitSize);
ConfigHelper.setInputInitialAddress(jobConf, host);
ConfigHelper.setInputSlicePredicate(jobConf, predicate);
ConfigHelper.setInputRpcPort(jobConf, Integer.toString(rpcPort));
ColumnFamilyInputFormat cfif = new ColumnFamilyInputFormat();
InputSplit[] cfifSplits = cfif.getSplits(jobConf, numSplits);
InputSplit[] results = new InputSplit[cfifSplits.length];
for (int i = 0; i < cfifSplits.length; i++) {
ColumnFamilySplit cfSplit = (ColumnFamilySplit) cfifSplits[i];
SSTableSplit split = new SSTableSplit(cassandraColumnMapping, cfSplit.getStartToken(), cfSplit.getEndToken(), cfSplit.getLocations(), dummyPath);
split.setKeyspace(ks);
split.setColumnFamily(cf);
split.setEstimatedRows(cfSplit.getLength());
split.setPartitioner(partitionerString);
results[i] = split;
logger.debug("Created split: {}", split);
}
return results;
}