当前位置: 首页>>代码示例>>Java>>正文


Java SliceRange.setStart方法代码示例

本文整理汇总了Java中org.apache.cassandra.thrift.SliceRange.setStart方法的典型用法代码示例。如果您正苦于以下问题:Java SliceRange.setStart方法的具体用法?Java SliceRange.setStart怎么用?Java SliceRange.setStart使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.cassandra.thrift.SliceRange的用法示例。


在下文中一共展示了SliceRange.setStart方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getSlicePredicate

import org.apache.cassandra.thrift.SliceRange; //导入方法依赖的package包/类
private SlicePredicate getSlicePredicate(String[] columnNameList) {
    SlicePredicate slicePredicate = new SlicePredicate();
    try {
        if (columnNameList != null) {
            List<ByteBuffer> columnNameByteBufferList = new ArrayList<ByteBuffer>();
            for (String columnName: columnNameList) {
                byte[] columnNameBytes = columnName.getBytes("UTF-8");
                columnNameByteBufferList.add(ByteBuffer.wrap(columnNameBytes));
            }
            slicePredicate.setColumn_names(columnNameByteBufferList);
        } else {
            SliceRange sliceRange = new SliceRange();
            sliceRange.setStart(new byte[0]);
            sliceRange.setFinish(new byte[0]);
            // FIXME: The default column count is 100. We should tune the value.
            sliceRange.setCount(100000);
            
            slicePredicate.setSlice_range(sliceRange);
        }
    }
    catch (UnsupportedEncodingException exc) {
        throw new StorageException("Character encoding exception with key range", exc);
    }
    return slicePredicate;
}
 
开发者ID:opendaylight,项目名称:archived-net-virt-platform,代码行数:26,代码来源:Connection.java

示例2: CasTimeReader

import org.apache.cassandra.thrift.SliceRange; //导入方法依赖的package包/类
public CasTimeReader() {
	try {
		TTransport tr = new TFramedTransport(new TSocket("10.15.61.111",
				9160));
		TProtocol proto = new TBinaryProtocol(tr);
		client = new Cassandra.Client(proto);
		tr.open();

		client.set_keyspace("CadalSecTest");

		predicate = new SlicePredicate();
		SliceRange range = new SliceRange();
		range.setStart(new byte[0]);
		range.setFinish(new byte[0]);
		range.setCount(10000);
		predicate.setSlice_range(range);

		columnParent = new ColumnParent();
		columnParent.setColumn_family("RecordMinute");
	} catch (Exception e) {
		System.out.println(e);
	}
}
 
开发者ID:YinYanfei,项目名称:CadalWorkspace,代码行数:24,代码来源:CasTimeReader.java

示例3: CasTimeBook

import org.apache.cassandra.thrift.SliceRange; //导入方法依赖的package包/类
public CasTimeBook() {
	try {
		TTransport tr = new TFramedTransport(new TSocket("10.15.61.111",
				9160));
		TProtocol proto = new TBinaryProtocol(tr);
		client = new Cassandra.Client(proto);
		tr.open();

		client.set_keyspace("CadalSecTest");

		predicate = new SlicePredicate();
		SliceRange range = new SliceRange();
		range.setStart(new byte[0]);
		range.setFinish(new byte[0]);
		range.setCount(10000);
		predicate.setSlice_range(range);

		columnParent = new ColumnParent();
		columnParent.setColumn_family("RecordMinute");
	} catch (Exception e) {
		System.out.println(e);
	}
}
 
开发者ID:YinYanfei,项目名称:CadalWorkspace,代码行数:24,代码来源:CasTimeBook.java

示例4: getSliceRange

import org.apache.cassandra.thrift.SliceRange; //导入方法依赖的package包/类
private SliceRange getSliceRange(final SliceQuery slice, final int limit) {
    final SliceRange sliceRange = new SliceRange();
    sliceRange.setStart(slice.getSliceStart().asByteBuffer());
    sliceRange.setFinish(slice.getSliceEnd().asByteBuffer());
    sliceRange.setCount(Math.min(limit, slice.getLimit()));
    return sliceRange;
}
 
开发者ID:graben1437,项目名称:titan1withtp3.1,代码行数:8,代码来源:CassandraBinaryInputFormat.java

示例5: get2

import org.apache.cassandra.thrift.SliceRange; //导入方法依赖的package包/类
/**
 * get 讀取所有column
 *
 * @throws Exception
 */
@Test
public void get2() throws Exception {
	String KEYSPACE = "mock";
	client.set_keyspace(KEYSPACE);

	// 讀取所有column
	String COLUMN_FAMILY = "student";
	ColumnParent columnParent = new ColumnParent(COLUMN_FAMILY);

	// 術語
	SlicePredicate predicate = new SlicePredicate();

	// 範圍
	SliceRange sliceRange = new SliceRange();
	// sliceRange.setStart(ByteBufferHelper.toByteBuffer(new byte[0]));//開始
	sliceRange.setStart(new byte[0]);// 開始
	sliceRange.setFinish(new byte[0]);// 結束
	sliceRange.setCount(100);// 筆數
	//
	predicate.setSlice_range(sliceRange);

	String ROW_KEY = "Jack";
	// 結果
	// key, column_parent, predicate, consistency_level
	List<ColumnOrSuperColumn> results = client.get_slice(
			ByteBufferHelper.toByteBuffer(ROW_KEY), columnParent,
			predicate, ConsistencyLevel.ONE);

	for (ColumnOrSuperColumn cos : results) {
		Column column = cos.getColumn();
		System.out.println(ROW_KEY + ", "
				+ ByteHelper.toString(column.getName()) + ": "
				+ ByteHelper.toString(column.getValue()) + ", "
				+ column.getTimestamp());
		// Jack, art, 87, 1380788003220
		// Jack, grad, 5, 1380788003203
		// Jack, math, 97, 1380788003214
	}
}
 
开发者ID:mixaceh,项目名称:openyu-commons,代码行数:45,代码来源:CassandraThriftDMLTest.java

示例6: main

import org.apache.cassandra.thrift.SliceRange; //导入方法依赖的package包/类
public static void main(String[] args) throws UnsupportedEncodingException,
			InvalidRequestException, UnavailableException, TimedOutException,
			TException, NotFoundException {

		TTransport tr = new TSocket(HOST, PORT);
		//new default in 0.7 is framed transport
		TFramedTransport tf = new TFramedTransport(tr);
		TProtocol proto = new TBinaryProtocol(tf);
		Cassandra.Client client = new Cassandra.Client(proto);
		tf.open();
		client.set_keyspace("Keyspace1");

		String cfName = "Standard1";
		ByteBuffer userIDKey = ByteBuffer.wrap("1".getBytes()); //this is a row key

//		Clock clock = new Clock(System.currentTimeMillis());
		
		ColumnParent cp = new ColumnParent(cfName);

		//insert the name column
		log.debug("Inserting row for key {}" , userIDKey.toString());
		Column nameCol = new Column(ByteBuffer.wrap("name".getBytes(UTF8)));
		nameCol.setValue(ByteBuffer.wrap("George Clinton".getBytes()));
		client.insert(userIDKey, cp, nameCol, CL);

		//insert the Age column
		Column ageCol = new Column(ByteBuffer.wrap("name".getBytes(UTF8)));
		ageCol.setValue(ByteBuffer.wrap("69".getBytes()));
		client.insert(userIDKey, cp, ageCol, CL);
				
		log.debug("Row insert done.");

		// read just the Name column
		log.debug("Reading Name Column:");
		
		//create a representation of the Name column
		ColumnPath colPathName = new ColumnPath(cfName);
		colPathName.setColumn("name".getBytes(UTF8));
		Column col = client.get(userIDKey, colPathName,
				CL).getColumn();

		/*LOG.debug("Column name: " + new String(col.name, UTF8));
		LOG.debug("Column value: " + new String(col.value, UTF8));
		LOG.debug("Column timestamp: " + col.clock.timestamp);*/

		//create a slice predicate representing the columns to read
		//start and finish are the range of columns--here, all
		SlicePredicate predicate = new SlicePredicate();
		SliceRange sliceRange = new SliceRange();
		sliceRange.setStart(new byte[0]);
		sliceRange.setFinish(new byte[0]);
		predicate.setSlice_range(sliceRange);

		log.debug("Complete Row:");
		// read all columns in the row
		ColumnParent parent = new ColumnParent(cfName);
		List<ColumnOrSuperColumn> results = 
			client.get_slice(userIDKey, 
					parent, predicate, CL);
		
		//loop over columns, outputting values
		for (ColumnOrSuperColumn result : results) {
			Column column = result.column;
			log.info("Column: {}, Value: {}", new String(column.getName(), UTF8), new String(column.getValue(), UTF8));
		}
		tf.close();
		
		log.debug("All done.");
	}
 
开发者ID:lhfei,项目名称:hadoop-in-action,代码行数:70,代码来源:SimpleWriteRead.java

示例7: findPOIByHotel

import org.apache.cassandra.thrift.SliceRange; //导入方法依赖的package包/类
public List<POI> findPOIByHotel(String hotel) throws Exception {

		// /query
		SlicePredicate predicate = new SlicePredicate();
		SliceRange sliceRange = new SliceRange();
		sliceRange.setStart(hotel.getBytes());
		sliceRange.setFinish(hotel.getBytes());
		predicate.setSlice_range(sliceRange);

		// read all columns in the row
		String scFamily = "PointOfInterest";
		ColumnParent parent = new ColumnParent(scFamily);

		KeyRange keyRange = new KeyRange();
		keyRange.start_key = bytes("");
		keyRange.end_key = bytes("");

		List<POI> pois = new ArrayList<POI>();

		// instead of a simple list, we get a map whose keys are row keys
		// and the values the list of columns returned for each
		// only row key + first column are indexed
		Connector cl = new Connector();
		Cassandra.Client client = cl.connect();
		List<KeySlice> slices = client.get_range_slices(parent, predicate, keyRange, CL);

		for (KeySlice slice : slices) {
			List<ColumnOrSuperColumn> cols = slice.columns;

			POI poi = new POI();
			poi.name = new String(ByteBufferUtil.string(slice.key));

			for (ColumnOrSuperColumn cosc : cols) {
				SuperColumn sc = cosc.super_column;

				List<Column> colsInSc = sc.columns;

				for (Column c : colsInSc) {
					String colName = new String(c.name.array(), UTF8);
					if (colName.equals("desc")) {
						poi.desc = new String(c.value.array(), UTF8);
					}
					if (colName.equals("phone")) {
						poi.phone = new String(c.value.array(), UTF8);
					}
				}

				LOG.debug("Found something neat nearby: " + poi.name + ". \nDesc: " + poi.desc + ". \nPhone: "
						+ poi.phone);
				pois.add(poi);
			}
		}

		cl.close();
		return pois;
	}
 
开发者ID:lhfei,项目名称:hadoop-in-action,代码行数:57,代码来源:HotelApp.java

示例8: findHotelByCity

import org.apache.cassandra.thrift.SliceRange; //导入方法依赖的package包/类
public List<Hotel> findHotelByCity(String city, String state) throws Exception {

		LOG.debug("Seaching for hotels in " + city + ", " + state);

		String key = city + ":" + state.toUpperCase();

		// /query
		SlicePredicate predicate = new SlicePredicate();
		SliceRange sliceRange = new SliceRange();
		sliceRange.setStart(new byte[0]);
		sliceRange.setFinish(new byte[0]);
		predicate.setSlice_range(sliceRange);

		// read all columns in the row
		String columnFamily = "HotelByCity";
		ColumnParent parent = new ColumnParent(columnFamily);

		KeyRange keyRange = new KeyRange();
		keyRange.setStart_key(key.getBytes());
		keyRange.setEnd_key("".getBytes()); // just outside lexical range
		keyRange.count = 5;

		Connector cl = new Connector();
		Cassandra.Client client = cl.connect();
		List<KeySlice> keySlices = client.get_range_slices(parent, predicate, keyRange, CL);

		List<Hotel> results = new ArrayList<Hotel>();

		for (KeySlice ks : keySlices) {
			List<ColumnOrSuperColumn> coscs = ks.columns;
			LOG.debug(new String("Using key " + ks.key));

			for (ColumnOrSuperColumn cs : coscs) {

				Hotel hotel = new Hotel();
				hotel.name = ByteBufferUtil.string(cs.column.name);
				hotel.city = city;
				hotel.state = state;

				results.add(hotel);
				LOG.debug("Found hotel result for " + hotel.name);
			}
		}
		// /end query
		cl.close();

		return results;
	}
 
开发者ID:lhfei,项目名称:hadoop-in-action,代码行数:49,代码来源:HotelApp.java

示例9: getSplits

import org.apache.cassandra.thrift.SliceRange; //导入方法依赖的package包/类
@Override
public InputSplit[] getSplits(JobConf jobConf, int numSplits) throws IOException {
    String ks = jobConf.get(AbstractCassandraSerDe.CASSANDRA_KEYSPACE_NAME);
    String cf = jobConf.get(AbstractCassandraSerDe.CASSANDRA_CF_NAME);
    int slicePredicateSize = jobConf.getInt(AbstractCassandraSerDe.CASSANDRA_SLICE_PREDICATE_SIZE,
            AbstractCassandraSerDe.DEFAULT_SLICE_PREDICATE_SIZE);
    int sliceRangeSize = jobConf.getInt(
            AbstractCassandraSerDe.CASSANDRA_RANGE_BATCH_SIZE,
            AbstractCassandraSerDe.DEFAULT_RANGE_BATCH_SIZE);
    int splitSize = jobConf.getInt(
            AbstractCassandraSerDe.CASSANDRA_SPLIT_SIZE,
            AbstractCassandraSerDe.DEFAULT_SPLIT_SIZE);
    String cassandraColumnMapping = jobConf.get(AbstractCassandraSerDe.CASSANDRA_COL_MAPPING);
    int rpcPort = jobConf.getInt(AbstractCassandraSerDe.CASSANDRA_PORT, 9160);
    String host = jobConf.get(AbstractCassandraSerDe.CASSANDRA_HOST);
    String partitioner = jobConf.get(AbstractCassandraSerDe.CASSANDRA_PARTITIONER);

    if (cassandraColumnMapping == null) {
        throw new IOException("cassandra.columns.mapping required for Cassandra Table.");
    }

    SliceRange range = new SliceRange();
    range.setStart(new byte[0]);
    range.setFinish(new byte[0]);
    range.setReversed(false);
    range.setCount(slicePredicateSize);
    SlicePredicate predicate = new SlicePredicate();
    predicate.setSlice_range(range);

    ConfigHelper.setInputRpcPort(jobConf, "" + rpcPort);
    ConfigHelper.setInputInitialAddress(jobConf, host);
    ConfigHelper.setInputPartitioner(jobConf, partitioner);
    ConfigHelper.setInputSlicePredicate(jobConf, predicate);
    ConfigHelper.setInputColumnFamily(jobConf, ks, cf);
    ConfigHelper.setRangeBatchSize(jobConf, sliceRangeSize);
    ConfigHelper.setInputSplitSize(jobConf, splitSize);

    Job job = new Job(jobConf);
    JobContext jobContext = new JobContextImpl(job.getConfiguration(), job.getJobID());

    Path[] tablePaths = FileInputFormat.getInputPaths(jobContext);
    List<org.apache.hadoop.mapreduce.InputSplit> splits = getSplits(jobContext);
    InputSplit[] results = new InputSplit[splits.size()];

    for (int i = 0; i < splits.size(); ++i) {
        HiveCassandraStandardSplit csplit = new HiveCassandraStandardSplit(
                (ColumnFamilySplit) splits.get(i), cassandraColumnMapping, tablePaths[0]);
        csplit.setKeyspace(ks);
        csplit.setColumnFamily(cf);
        csplit.setRangeBatchSize(sliceRangeSize);
        csplit.setSplitSize(splitSize);
        csplit.setHost(host);
        csplit.setPort(rpcPort);
        csplit.setSlicePredicateSize(slicePredicateSize);
        csplit.setPartitioner(partitioner);
        csplit.setColumnMapping(cassandraColumnMapping);
        results[i] = csplit;
    }
    return results;
}
 
开发者ID:2013Commons,项目名称:hive-cassandra,代码行数:61,代码来源:HiveCqlInputFormat.java

示例10: getSplits

import org.apache.cassandra.thrift.SliceRange; //导入方法依赖的package包/类
@Override
public InputSplit[] getSplits(JobConf jobConf, int numSplits) throws IOException {
  String ks = jobConf.get(AbstractColumnSerDe.CASSANDRA_KEYSPACE_NAME);
  String cf = jobConf.get(AbstractColumnSerDe.CASSANDRA_CF_NAME);
  int slicePredicateSize = jobConf.getInt(AbstractColumnSerDe.CASSANDRA_SLICE_PREDICATE_SIZE,
      AbstractColumnSerDe.DEFAULT_SLICE_PREDICATE_SIZE);
  int sliceRangeSize = jobConf.getInt(
      AbstractColumnSerDe.CASSANDRA_RANGE_BATCH_SIZE,
      AbstractColumnSerDe.DEFAULT_RANGE_BATCH_SIZE);
  int splitSize = jobConf.getInt(
      AbstractColumnSerDe.CASSANDRA_SPLIT_SIZE,
      AbstractColumnSerDe.DEFAULT_SPLIT_SIZE);
  String cassandraColumnMapping = jobConf.get(AbstractColumnSerDe.CASSANDRA_COL_MAPPING);
  int rpcPort = jobConf.getInt(AbstractColumnSerDe.CASSANDRA_PORT, 9160);
  String host = jobConf.get(AbstractColumnSerDe.CASSANDRA_HOST);
  String partitioner = jobConf.get(AbstractColumnSerDe.CASSANDRA_PARTITIONER);

  if (cassandraColumnMapping == null) {
    throw new IOException("cassandra.columns.mapping required for Cassandra Table.");
  }

  SliceRange range = new SliceRange();
  range.setStart(new byte[0]);
  range.setFinish(new byte[0]);
  range.setReversed(false);
  range.setCount(slicePredicateSize);
  SlicePredicate predicate = new SlicePredicate();
  predicate.setSlice_range(range);

  ConfigHelper.setInputRpcPort(jobConf, "" + rpcPort);
  ConfigHelper.setInputInitialAddress(jobConf, host);
  ConfigHelper.setInputPartitioner(jobConf, partitioner);
  ConfigHelper.setInputSlicePredicate(jobConf, predicate);
  ConfigHelper.setInputColumnFamily(jobConf, ks, cf);
  ConfigHelper.setRangeBatchSize(jobConf, sliceRangeSize);
  ConfigHelper.setInputSplitSize(jobConf, splitSize);

  Job job = new Job(jobConf);
  JobContext jobContext = new JobContext(job.getConfiguration(), job.getJobID());

  Path[] tablePaths = FileInputFormat.getInputPaths(jobContext);
  List<org.apache.hadoop.mapreduce.InputSplit> splits = getSplits(jobContext);
  InputSplit[] results = new InputSplit[splits.size()];

  for (int i = 0; i < splits.size(); ++i) {
    HiveCassandraStandardSplit csplit = new HiveCassandraStandardSplit(
        (ColumnFamilySplit) splits.get(i), cassandraColumnMapping, tablePaths[0]);
    csplit.setKeyspace(ks);
    csplit.setColumnFamily(cf);
    csplit.setRangeBatchSize(sliceRangeSize);
    csplit.setSplitSize(splitSize);
    csplit.setHost(host);
    csplit.setPort(rpcPort);
    csplit.setSlicePredicateSize(slicePredicateSize);
    csplit.setPartitioner(partitioner);
    csplit.setColumnMapping(cassandraColumnMapping);
    results[i] = csplit;
  }
  return results;
}
 
开发者ID:dvasilen,项目名称:Hive-Cassandra,代码行数:61,代码来源:HiveCassandraStandardColumnInputFormat.java

示例11: QueryFromBCPRelation

import org.apache.cassandra.thrift.SliceRange; //导入方法依赖的package包/类
/**
 * To query from 'BCPRelation'
 * @param bookChapterList
 */
public void QueryFromBCPRelation (List<List<String>> bookChapterList){		
	SlicePredicate predicate = new SlicePredicate();
	SliceRange range = new SliceRange();
	
	range.setStart(new byte[0]);
	range.setFinish(new byte[0]); 
	
	predicate.setSlice_range(range);
	
	ColumnParent parent = new ColumnParent();
	parent.column_family = "BCPRelation";

	try{
		for (int i = 0; i < bookChapterList.size(); ++i) {
			List<String> innerResult = new ArrayList<String>();
			
			List<String> innerList = bookChapterList.get(i);
			
			// Deal with a single book info list
			String bookId = innerList.get(0);
			List<String> chapterList = innerList.subList(1, innerList.size());
			
			innerResult.add(bookId);
			
			// Query from cassandra
			List<ColumnOrSuperColumn> results = client.get_slice(this.cassandraUtil.toByteBuffer(bookId), parent, predicate,ConsistencyLevel.ONE);
			
			// Iterator SuperColumn List
			for (ColumnOrSuperColumn result : results) {
				SuperColumn superColumn2 = result.super_column;
				List<Column> columns2 = superColumn2.getColumns();

				// Get detail information of a single chapter
				String chapterLevel = "";
				String chapterTitle = "";
				
				for (Column column : columns2) {
					String columnName = new String(column.getName(), "UTF-8");
					
					if (columnName.equalsIgnoreCase("ChapterLevel")) {
						chapterLevel = new String(column.getValue(), "UTF-8");
					} else if (columnName.equalsIgnoreCase("ChapterLabel")) {
						chapterTitle = new String(column.getValue(), "UTF-8");
					}
				}
				
				// Iterator chapterList insert into map
				for(int j = 0; j < chapterList.size(); ++j){
					if(chapterLevel.equals(chapterList.get(j))) {
						innerResult.add(chapterTitle);
					}
				}
			}
			
			this.finalResult.add(innerResult);
		}

	}catch(Exception e) {
		LOG.warn("Error when dealing bookChapterList");
		e.printStackTrace();
	}
}
 
开发者ID:YinYanfei,项目名称:CadalWorkspace,代码行数:67,代码来源:QueryBCPRelation.java

示例12: GetChapterLevel

import org.apache.cassandra.thrift.SliceRange; //导入方法依赖的package包/类
/**
 * This function is used to deal many pages of a book, to find all chapter_level info of pages.
 * @param bookNo like: 07018720
 * @param pageNoList just like: <<23>, <45>, <2>>
 * @return List of book_page:<<07018720_1.1.0.0.0>, <07018720_2.0.0.0.0>, <07018720_1.2.0.0.0>>
 */
@SuppressWarnings("unchecked")
public List<String> GetChapterLevel(String bookNo, List<Integer> pageNoList){
	
	SlicePredicate predicate = new SlicePredicate();
	SliceRange range = new SliceRange();
	
	range.setStart(new byte[0]);
	range.setFinish(new byte[0]); 
	
	predicate.setSlice_range(range);

	ColumnParent parent = new ColumnParent();
	parent.column_family = "BCPRelation";

	Map<String, String> tmpMap = new HashMap<String, String>();
	List<String> returnStrList = new ArrayList<String>();
		
	try {
		List<ColumnOrSuperColumn> results = client.get_slice(this.cassandraUtil.toByteBuffer(bookNo), parent, predicate,ConsistencyLevel.ONE);

		// Iterator SuperColumn List
		for (ColumnOrSuperColumn result : results) {
			SuperColumn superColumn2 = result.super_column;
			List<Column> columns2 = superColumn2.getColumns();

			// Get detail information of a single chapter
			int startPage = 0;
			int endPage = 0;
			String chapterLevel = "";
			
			for (Column column : columns2) {
				String columnName = new String(column.getName(), "UTF-8");
				
				if (columnName.equalsIgnoreCase("ChapterLevel")) {
					chapterLevel = new String(column.getValue(), "UTF-8");
				} else if (columnName.equalsIgnoreCase("StartPage")) {
					startPage = Integer.valueOf(new String(column.getValue(), "UTF-8")).intValue();
				} else if (columnName.equalsIgnoreCase("EndPage")) {
					endPage = Integer.valueOf(new String(column.getValue(), "UTF-8")).intValue();
				} 
			}
			
			// Iterator PageNoList insert into map
			for(int j = 0; j < pageNoList.size(); ++j){
				if(MatchRange(pageNoList.get(j), startPage, endPage)) {
					tmpMap.put(bookNo + "_" + chapterLevel, "");
				}
			}

		}
		
		// Convert Map to List
		Iterator iter = tmpMap.entrySet().iterator(); 
		while (iter.hasNext()) { 
		    Map.Entry entry = (Map.Entry)iter.next(); 
		    Object key = entry.getKey(); 
		    
		    returnStrList.add(key.toString());			    
		}
		
		return returnStrList;
		
	} catch (Exception e) {
		 e.printStackTrace();
	}
	
	return returnStrList;
}
 
开发者ID:YinYanfei,项目名称:CadalWorkspace,代码行数:75,代码来源:ChapterLevel.java

示例13: GetAllInfoOfUser

import org.apache.cassandra.thrift.SliceRange; //导入方法依赖的package包/类
/**
 * To get all book-page information about an allocated 'username'
 * @param String username: like 'zju' 'Yanfei'
 */
public Map<String, List<String>> GetAllInfoOfUser(String username) {
	
	Map<String, List<String>> infoMap = new HashMap<String, List<String>>();
	
	SlicePredicate predicate = new SlicePredicate();// null, new
	SliceRange range = new SliceRange();
	
	range.setStart(new byte[0]);
	range.setFinish(new byte[0]); 
	range.setCount(200000);
	
	predicate.setSlice_range(range);

	ColumnParent parent = new ColumnParent();
	parent.column_family = "UserBookPage";

	String bookid;
	List<String> listPage = null;
	
	try {
		List<ColumnOrSuperColumn> results = client.get_slice(this.cassandraUtil.toByteBuffer(username), parent, predicate, ConsistencyLevel.ONE);
		
		for (ColumnOrSuperColumn result : results) {
			listPage = new ArrayList<String>();
			
			SuperColumn superColumn2 = result.super_column;
			
			bookid = "";
			bookid = new String(superColumn2.getName(), "UTF-8");           // bookid
			
			List<Column> columns2 = superColumn2.getColumns();
			
			for (Column column : columns2) {
				String columnName = new String(column.getName(), "UTF-8");
				
				if(columnName.equalsIgnoreCase("times")){
					continue;
				}else{
					listPage.add(new String(column.getName(), "UTF-8"));     // page-number
				}
			}
			
			infoMap.put(bookid, listPage);
		}
		
		return infoMap;
	} catch (Exception e) {
		 e.printStackTrace();
		 return infoMap;
	}

}
 
开发者ID:YinYanfei,项目名称:CadalWorkspace,代码行数:57,代码来源:UserBookPageInfo.java

示例14: QueryFromBCPRelation

import org.apache.cassandra.thrift.SliceRange; //导入方法依赖的package包/类
/**
 * To query from 'BCPRelation'
 * @param bookChapterList
 * @return
 */
@SuppressWarnings("unchecked")
public List<String> QueryFromBCPRelation (List<String> bookChapterList){
	List<String> chapterList = new ArrayList<String>();
	
	SlicePredicate predicate = new SlicePredicate();
	SliceRange range = new SliceRange();
	
	range.setStart(new byte[0]);
	range.setFinish(new byte[0]); 
	
	predicate.setSlice_range(range);
	
	ColumnParent parent = new ColumnParent();
	parent.column_family = "BCPRelation";

	try{
		Map<String, List<String>> mapBookChapter = this.Preprocess(bookChapterList);

		for (Iterator iter = mapBookChapter.keySet().iterator(); iter.hasNext();) {  
		    Object key = iter.next();
		    Object val = mapBookChapter.get(key);
		    
		    List<ColumnOrSuperColumn> results = client.get_slice(this.cassandraUtil.toByteBuffer(key.toString()), parent, predicate,ConsistencyLevel.ONE);
		    
			// Iterator SuperColumn List
			for (ColumnOrSuperColumn result : results) {
				SuperColumn superColumn2 = result.super_column;
				List<Column> columns2 = superColumn2.getColumns();

				// Get detail information of a single chapter
				String chapterLevel = "";
				String chapterTitle = "";
				
				for (Column column : columns2) {
					String columnName = new String(column.getName(), "UTF-8");
					
					if (columnName.equalsIgnoreCase("ChapterLevel")) {
						chapterLevel = new String(column.getValue(), "UTF-8");
					} else if (columnName.equalsIgnoreCase("ChapterLabel")) {
						chapterTitle = new String(column.getValue(), "UTF-8");
					}
				}
				
				// Iterator PageNoList insert into map
				List<String> listStr = (List<String>) val;
				for(int j = 0; j < listStr.size(); ++j){
					if(chapterLevel.equals(listStr.get(j))) {
						chapterList.add(chapterTitle);
					}
				}
			}
		}  
		
		return chapterList;
	}catch(Exception e) {
		LOG.warn("Error when dealing bookChapterList");
		e.printStackTrace();
	}
	
	return chapterList;
}
 
开发者ID:YinYanfei,项目名称:CadalWorkspace,代码行数:67,代码来源:QueryBCPRelation.java

示例15: getChapterInfo

import org.apache.cassandra.thrift.SliceRange; //导入方法依赖的package包/类
public void getChapterInfo() {
			SlicePredicate predicate = new SlicePredicate();// null, new
			SliceRange range = new SliceRange();
			
			range.setStart(new byte[0]);
			range.setFinish(new byte[0]); 
			
			predicate.setSlice_range(range);

			ColumnParent parent = new ColumnParent();
			parent.column_family = "BCPRelation";

			try {
				List<ColumnOrSuperColumn> results = client.get_slice(toByteBuffer(bookNo), parent, predicate,ConsistencyLevel.ONE);

				for (ColumnOrSuperColumn result : results) {
					SuperColumn superColumn2 = result.super_column;
					
					System.out.println(new String(superColumn2.getName(), "UTF-8"));
					
					List<Column> columns2 = superColumn2.getColumns();

					String chapterName = null;
					int startPage = 0;
					int endPage = 0;
					String chapterLevel = "";
					
					for (Column column : columns2) {
						String columnName = new String(column.getName(), "UTF-8");
												
						if (columnName.equalsIgnoreCase("ChapterLevel")) {
							chapterLevel = new String(column.getValue(), "UTF-8");
						} else if (columnName.equalsIgnoreCase("StartPage")) {
							startPage = Integer.parseInt(new String(column.getValue(), "UTF-8"));
						} else if (columnName.equalsIgnoreCase("EndPage")) {
							endPage = Integer.parseInt(new String(column.getValue(), "UTF-8"));
						} else if (columnName.equalsIgnoreCase("ChapterLabel")) {
							chapterName = new String(column.getValue(), "UTF-8");
						}
					}
					
					if (MatchRange(pageNo, startPage, endPage)) { 
						   System.out.println("BookNo:" + bookNo + "   PageNo:" + pageNo + "  in[" + chapterName + "]:" 
								   + " StartPage:" + startPage + "  EndPage:" + endPage);
						   System.out.println(chapterLevel);
					}
				}
			} catch (Exception e) {
				 e.printStackTrace();
			}

//			tr.close();
		}
 
开发者ID:YinYanfei,项目名称:CadalWorkspace,代码行数:54,代码来源:PageChapterMatch.java


注:本文中的org.apache.cassandra.thrift.SliceRange.setStart方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。