當前位置: 首頁>>代碼示例>>Java>>正文


Java Scan.setAttribute方法代碼示例

本文整理匯總了Java中org.apache.hadoop.hbase.client.Scan.setAttribute方法的典型用法代碼示例。如果您正苦於以下問題:Java Scan.setAttribute方法的具體用法?Java Scan.setAttribute怎麽用?Java Scan.setAttribute使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.hbase.client.Scan的用法示例。


在下文中一共展示了Scan.setAttribute方法的14個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: getNextScanner

import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
private ResultScanner getNextScanner() throws IOException {
  if (INIT_REGION_SIZE != getRegionNumber()) {
    throw new IOException(
        "region number changed from " + INIT_REGION_SIZE + " to " + getRegionNumber());
  }
  if (regionLocationQueue.isEmpty()) return null;
  HRegionLocation regionLocation = regionLocationQueue.poll();

  Scan newScan = new Scan(rawScan);
  byte[] key = regionLocation.getRegionInfo().getStartKey();
  if (key != null && key.length > 0) newScan.setStartRow(key);
  key = regionLocation.getRegionInfo().getEndKey();
  if (key != null && key.length > 0) newScan.setStopRow(key);
  newScan.setAttribute(IndexConstants.SCAN_WITH_INDEX, Bytes.toBytes("Hi"));
  newScan.setId(rawScan.getId());
  newScan.setCacheBlocks(rawScan.getCacheBlocks());
  newScan.setCaching(rawScan.getCaching());
  return table.getScanner(newScan);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:20,代碼來源:LocalScanner.java

示例2: innerAddScanner

import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
private void innerAddScanner(HRegionLocation regionLocation) throws IOException {
  if (INIT_REGION_SIZE != getRegionNumber()) {
    throw new IOException(
        "region number changed from " + INIT_REGION_SIZE + " to " + getRegionNumber());
  }
  Scan newScan = new Scan(rawScan);
  if (regionLocation.getRegionInfo().getStartKey() != null)
    newScan.setStartRow(regionLocation.getRegionInfo().getStartKey());
  if (regionLocation.getRegionInfo().getEndKey() != null)
    newScan.setStopRow(regionLocation.getRegionInfo().getEndKey());
  newScan.setAttribute(IndexConstants.SCAN_WITH_INDEX, Bytes.toBytes("Hi"));
  newScan.setFilter(rangeList.toFilterList());
  newScan.setAttribute(IndexConstants.MAX_SCAN_SCALE, Bytes.toBytes(1.0f));
  ResultScanner scanner = table.getScanner(newScan);
  synchronized (scannerList) {
    scannerList.add(scanner);
  }
  runningGet.decrementAndGet();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:20,代碼來源:IRScannerInParallel.java

示例3: getNextScanner

import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
private ResultScanner getNextScanner() throws IOException {
  if (INIT_REGION_SIZE != getRegionNumber()) {
    throw new IOException(
        "region number changed from " + INIT_REGION_SIZE + " to " + getRegionNumber());
  }
  if (regionLocationQueue.isEmpty()) return null;
  HRegionLocation regionLocation = regionLocationQueue.poll();
  Scan newScan = new Scan(rawScan);
  if (regionLocation.getRegionInfo().getStartKey() != null)
    newScan.setStartRow(regionLocation.getRegionInfo().getStartKey());
  if (regionLocation.getRegionInfo().getEndKey() != null)
    newScan.setStopRow(regionLocation.getRegionInfo().getEndKey());
  newScan.setAttribute(IndexConstants.SCAN_WITH_INDEX, Bytes.toBytes("Hi"));
  newScan.setFilter(rangeList.toFilterList());
  newScan.setAttribute(IndexConstants.MAX_SCAN_SCALE, Bytes.toBytes(1.0f));
  newScan.setId(rawScan.getId());
  newScan.setCacheBlocks(rawScan.getCacheBlocks());
  newScan.setCaching(rawScan.getCaching());
  return table.getScanner(newScan);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:21,代碼來源:IRScanner.java

示例4: initScans

import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
/**
 * 初始化scan集合
 * 
 * @param job
 * @return
 */
private List<Scan> initScans(Job job) {
	Configuration conf = job.getConfiguration();
	// 獲取運行時間: yyyy-MM-dd
	String date = conf.get(GlobalConstants.RUNNING_DATE_PARAMES);
	long startDate = TimeUtil.parseString2Long(date);
	long endDate = startDate + GlobalConstants.DAY_OF_MILLISECONDS;

	Scan scan = new Scan();
	// 定義hbase掃描的開始rowkey和結束rowkey
	scan.setStartRow(Bytes.toBytes("" + startDate));
	scan.setStopRow(Bytes.toBytes("" + endDate));

	FilterList filterList = new FilterList();
	// 定義mapper中需要獲取的列名
	String[] columns = new String[] { EventLogConstants.LOG_COLUMN_NAME_UUID, // 用戶id
			EventLogConstants.LOG_COLUMN_NAME_SERVER_TIME, // 服務器時間
			EventLogConstants.LOG_COLUMN_NAME_PLATFORM, // 平台名稱
			EventLogConstants.LOG_COLUMN_NAME_BROWSER_NAME, // 瀏覽器名稱
			EventLogConstants.LOG_COLUMN_NAME_BROWSER_VERSION // 瀏覽器版本號
	};
	filterList.addFilter(this.getColumnFilter(columns));

	scan.setAttribute(Scan.SCAN_ATTRIBUTES_TABLE_NAME, Bytes.toBytes(EventLogConstants.HBASE_NAME_EVENT_LOGS));
	scan.setFilter(filterList);
	return Lists.newArrayList(scan);
}
 
開發者ID:liuhaozzu,項目名稱:big_data,代碼行數:33,代碼來源:ActiveUserRunner.java

示例5: initScans

import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
/**
 * 初始化scan集合
 * 
 * @param job
 * @return
 */
private List<Scan> initScans(Job job) {
	// 時間戳+....
	Configuration conf = job.getConfiguration();
	// 獲取運行時間: yyyy-MM-dd
	String date = conf.get(GlobalConstants.RUNNING_DATE_PARAMES);
	long startDate = TimeUtil.parseString2Long(date);
	long endDate = startDate + GlobalConstants.DAY_OF_MILLISECONDS;

	Scan scan = new Scan();
	// 定義hbase掃描的開始rowkey和結束rowkey
	scan.setStartRow(Bytes.toBytes(Long.toString(startDate)));
	scan.setStopRow(Bytes.toBytes(Long.toString(endDate)));

	FilterList filterList = new FilterList();
	// 過濾數據,隻分析launch事件
	filterList.addFilter(new SingleColumnValueFilter(Bytes.toBytes(EventLogConstants.EVENT_LOGS_FAMILY_NAME),
			Bytes.toBytes(EventLogConstants.LOG_COLUMN_NAME_EVENT_NAME), CompareOp.EQUAL,
			Bytes.toBytes(EventEnum.LAUNCH.alias)));
	// 定義mapper中需要獲取的列名
	String[] columns = new String[] { EventLogConstants.LOG_COLUMN_NAME_EVENT_NAME,
			EventLogConstants.LOG_COLUMN_NAME_UUID, EventLogConstants.LOG_COLUMN_NAME_SERVER_TIME,
			EventLogConstants.LOG_COLUMN_NAME_PLATFORM, EventLogConstants.LOG_COLUMN_NAME_BROWSER_NAME,
			EventLogConstants.LOG_COLUMN_NAME_BROWSER_VERSION };
	// scan.addColumn(family, qualifier)
	filterList.addFilter(this.getColumnFilter(columns));

	scan.setAttribute(Scan.SCAN_ATTRIBUTES_TABLE_NAME, Bytes.toBytes(EventLogConstants.HBASE_NAME_EVENT_LOGS));
	scan.setFilter(filterList);
	return Lists.newArrayList(scan);
}
 
開發者ID:liuhaozzu,項目名稱:big_data,代碼行數:37,代碼來源:NewInstallUserRunner.java

示例6: run

import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
@Override
	public int run(String[] arg0) throws Exception {
		List<Scan> mainSiteScan = new ArrayList<Scan>();
		Scan siteScan = new Scan();
		siteScan.setAttribute("scan.attributes.table.name", Bytes.toBytes("myTest07WebSite"));
		System.out.println(siteScan.getAttribute("scan.attributes.table.name"));
		mainSiteScan.add(siteScan);
 
		Scan webSitehitScan = new Scan();
		webSitehitScan.setAttribute("scan.attributes.table.name", Bytes.toBytes("myTest07SiteHits"));// lookup for the table which we have created and is having the site hit data.
		System.out.println(webSitehitScan.getAttribute("scan.attributes.table.name"));
		mainSiteScan.add(webSitehitScan);
 
		Configuration conf = new Configuration();
		Job job = new Job(conf);
// will get the server details of Hbase/hadoop	
		job.setJarByClass(TableWebsiteJob.class);
 // setting the class name to the job
		TableMapReduceUtil.initTableMapperJob(
				mainSiteScan, // tables to read from 
				TableWebsiteMapper.class, 
				Text.class, 
				IntWritable.class, 
				job);
	    TableMapReduceUtil.initTableReducerJob(
	    		"myTest07SiteHitsPlusWebSite",
	    		TableWebsiteReducer.class, 
	    		job);
	    job.waitForCompletion(true);
		return 0;
// totalhit is the third table which will receive the data
	}
 
開發者ID:PacktPublishing,項目名稱:HBase-High-Performance-Cookbook,代碼行數:33,代碼來源:TableWebsiteJob.java

示例7: doIndexScan

import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
private void doIndexScan() throws IOException {
  ScanRange.ScanRangeList rangeList = new ScanRange.ScanRangeList();
  FilterList filterList = new FilterList();
  CompareFilter.CompareOp startOp = CompareFilter.CompareOp.GREATER_OR_EQUAL;
  CompareFilter.CompareOp stopOp = CompareFilter.CompareOp.LESS_OR_EQUAL;
  for (int i = 0; i < indexColumnNames.length && i < scanValues.length; i++) {
    rangeList.addScanRange(new ScanRange(familyName, Bytes.toBytes(indexColumnNames[i]),
        Bytes.toBytes(scanValues[i][0]), Bytes.toBytes(scanValues[i][1]), startOp, stopOp,
        DataType.INT));
    filterList.addFilter(
        new SingleColumnValueFilter(familyName, Bytes.toBytes(indexColumnNames[i]), startOp,
            Bytes.toBytes(scanValues[i][0])));
    filterList.addFilter(
        new SingleColumnValueFilter(familyName, Bytes.toBytes(indexColumnNames[i]), stopOp,
            Bytes.toBytes(scanValues[i][1])));
  }
  Scan scan = new Scan();
  scan.setFilter(filterList);
  if (rangeList.getRanges().size() > 0) {
    scan.setAttribute(ScanRange.SCAN_RANGE_ATTRIBUTE_STR, rangeList.toBytesAttribute());
  }
  scan.setId("LMD-scan");
  scan.setCaching(1);
  ResultScanner scanner = BaseIndexScanner.getIndexScanner(conn, relation, scan);
  Result result;
  int count = 0;
  while ((result = scanner.next()) != null) {
    count++;
    if (PRINT_RESULT) printResult(result);
  }
  scanner.close();
  System.out.println("LMDIndex scan has " + count + " records");
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:34,代碼來源:LMDTester.java

示例8: getScan

import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
protected Scan getScan() {
  Scan scan = new Scan();
  scan.setAttribute(ScanRange.SCAN_RANGE_ATTRIBUTE_STR, scanRangeList.toBytesAttribute());
  scan.setFilter(scanRangeList.toFilterList());
  scan.setCacheBlocks(false);
  return scan;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:8,代碼來源:BaseRunner.java

示例9: testScan

import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
/**
 * Tests a MR scan using specific start and stop rows.
 *
 * @throws IOException
 * @throws ClassNotFoundException
 * @throws InterruptedException
 */
private void testScan(String start, String stop, String last)
    throws IOException, InterruptedException, ClassNotFoundException {
  String jobName =
      "Scan" + (start != null ? start.toUpperCase() : "Empty") + "To" +
          (stop != null ? stop.toUpperCase() : "Empty");
  LOG.info("Before map/reduce startup - job " + jobName);
  Configuration c = new Configuration(TEST_UTIL.getConfiguration());

  c.set(KEY_STARTROW, start != null ? start : "");
  c.set(KEY_LASTROW, last != null ? last : "");

  List<Scan> scans = new ArrayList<Scan>();

  for (String tableName : TABLES) {
    Scan scan = new Scan();

    scan.addFamily(INPUT_FAMILY);
    scan.setAttribute(Scan.SCAN_ATTRIBUTES_TABLE_NAME, Bytes.toBytes(tableName));

    if (start != null) {
      scan.setStartRow(Bytes.toBytes(start));
    }
    if (stop != null) {
      scan.setStopRow(Bytes.toBytes(stop));
    }

    scans.add(scan);

    LOG.info("scan before: " + scan);
  }

  runJob(jobName, c, scans);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:41,代碼來源:MultiTableInputFormatTestBase.java

示例10: result

import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
private void result(byte[] fam, byte[] row, byte[] qual, byte[] row2, Table table, byte[] value,
    byte[] value2, byte[] row1, byte[] value1) throws IOException {
  Scan s = new Scan(row);
  // If filters are used this attribute can be specifically check for in
  // filterKV method and
  // kvs can be filtered out if the tags of interest is not found in that kv
  s.setAttribute("visibility", Bytes.toBytes("myTag"));
  ResultScanner scanner = null;
  try {
    scanner = table.getScanner(s);
    Result next = scanner.next();

    assertTrue(Bytes.equals(next.getRow(), row));
    assertTrue(Bytes.equals(next.getValue(fam, qual), value));

    Result next2 = scanner.next();
    assertTrue(next2 != null);
    assertTrue(Bytes.equals(next2.getRow(), row1));
    assertTrue(Bytes.equals(next2.getValue(fam, qual), value1));

    next2 = scanner.next();
    assertTrue(next2 != null);
    assertTrue(Bytes.equals(next2.getRow(), row2));
    assertTrue(Bytes.equals(next2.getValue(fam, qual), value2));

  } finally {
    if (scanner != null)
      scanner.close();
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:31,代碼來源:TestTags.java

示例11: getScan

import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
public static Scan getScan(String fileName) throws IOException {
  BufferedReader br = new BufferedReader(new FileReader(fileName));
  String line;
  Scan scan = new Scan();
  System.out.println("winter for scan ******");
  FilterList filterList = new FilterList();
  while ((line = br.readLine()) != null) {
    System.out.println("winter for scan : " + line);
    if (line.startsWith("#")) continue;
    // family, qualifier, type, >=, 10, <=, 1000
    // family, qualifier, type, >=, 10
    String[] splits = line.split("\t");
    byte[] family = Bytes.toBytes(splits[0]);
    byte[] qualifier = Bytes.toBytes(splits[1]);
    DataType type = DataType.valueOf(splits[2].toUpperCase());
    CompareFilter.CompareOp firstOp = parseOp(splits[3]);
    byte[] firstValue = DataType.stringToBytes(type, splits[4]);
    filterList.addFilter(new SingleColumnValueFilter(family, qualifier, firstOp, firstValue));
    if (splits.length >= 6) {
      CompareFilter.CompareOp secondOp = parseOp(splits[5].toUpperCase());
      byte[] secondValue = DataType.stringToBytes(type, splits[6]);
      filterList
          .addFilter(new SingleColumnValueFilter(family, qualifier, secondOp, secondValue));
    }
  }
  scan.setFilter(filterList);
  ScanRangeList scanRangeList = ScanRangeList.getScanRangeList(fileName);
  if (scanRangeList.getRanges().size() > 0) {
    scan.setAttribute(ScanRange.SCAN_RANGE_ATTRIBUTE_STR, scanRangeList.toBytesAttribute());
  }
  return scan;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:33,代碼來源:ScanRange.java

示例12: innerAddScanner

import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
private void innerAddScanner(HRegionLocation regionLocation) throws IOException {
  Scan newScan = new Scan(rawScan);
  if (regionLocation.getRegionInfo().getStartKey() != null)
    newScan.setStartRow(regionLocation.getRegionInfo().getStartKey());
  if (regionLocation.getRegionInfo().getEndKey() != null)
    newScan.setStopRow(regionLocation.getRegionInfo().getEndKey());
  newScan.setAttribute(IndexConstants.SCAN_WITH_INDEX, Bytes.toBytes("Hi"));
  ResultScanner scanner = table.getScanner(newScan);
  synchronized (scannerList) {
    scannerList.add(scanner);
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:13,代碼來源:LocalScannerInParallel.java

示例13: main

import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
public static void main(String[] args) {
	try {
		Configuration conf = new Configuration();
		conf.set("fs.defaultFS", "hdfs://hadoop1:8020");
		conf.set("yarn.resourcemanager.hostname", "hadoop1");
		conf.set("hbase.zookeeper.quorum", "hadoop1,hadoop2,hadoop3");
		conf = HBaseConfiguration.create(conf);
		HTable table = new HTable(conf, "event_logs".getBytes());

		String date = "2016-03-23";
		long startDate = TimeUtil.parseString2Long(date);
		long endDate = startDate + GlobalConstants.DAY_OF_MILLISECONDS;
		System.out.println();
		Scan scan = new Scan();
		// 定義hbase掃描的開始rowkey和結束rowkey
		scan.setStartRow(Bytes.toBytes("" + startDate));
		scan.setStopRow(Bytes.toBytes("" + endDate));

		FilterList filterList = new FilterList();
		// 過濾數據,隻分析launch事件
		filterList.addFilter(new SingleColumnValueFilter(Bytes.toBytes(EventLogConstants.EVENT_LOGS_FAMILY_NAME),
				Bytes.toBytes(EventLogConstants.LOG_COLUMN_NAME_EVENT_NAME), CompareOp.EQUAL,
				Bytes.toBytes(EventEnum.LAUNCH.alias)));
		// 定義mapper中需要獲取的列名
		String[] columns = new String[] { EventLogConstants.LOG_COLUMN_NAME_EVENT_NAME,
				EventLogConstants.LOG_COLUMN_NAME_UUID, EventLogConstants.LOG_COLUMN_NAME_SERVER_TIME,
				EventLogConstants.LOG_COLUMN_NAME_PLATFORM, EventLogConstants.LOG_COLUMN_NAME_BROWSER_NAME,
				EventLogConstants.LOG_COLUMN_NAME_BROWSER_VERSION };
		// scan.addColumn(family, qualifier)
		filterList.addFilter(getColumnFilter(columns));

		scan.setAttribute(Scan.SCAN_ATTRIBUTES_TABLE_NAME, Bytes.toBytes(EventLogConstants.HBASE_NAME_EVENT_LOGS));
		scan.setFilter(filterList);

		ResultScanner ress = table.getScanner(scan);
		for (Result res : ress) {
			Cell cell = res.getColumnLatestCell("info".getBytes(),
					EventLogConstants.LOG_COLUMN_NAME_UUID.getBytes());
			System.out.println(new String(CellUtil.cloneValue(cell)));
		}
		ress.close();
	} catch (Exception e) {
		// TODO Auto-generated catch block
		e.printStackTrace();
	}
}
 
開發者ID:liuhaozzu,項目名稱:big_data,代碼行數:47,代碼來源:TestHbase.java

示例14: testScan

import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
/**
 * Tests a MR scan using specific start and stop rows.
 *
 * @throws IOException
 * @throws ClassNotFoundException
 * @throws InterruptedException
 */
private void testScan(String start, String stop, String last)
    throws IOException, InterruptedException, ClassNotFoundException {
  String jobName =
      "Scan" + (start != null ? start.toUpperCase() : "Empty") + "To" +
          (stop != null ? stop.toUpperCase() : "Empty");
  LOG.info("Before map/reduce startup - job " + jobName);
  Configuration c = new Configuration(TEST_UTIL.getConfiguration());
  
  c.set(KEY_STARTROW, start != null ? start : "");
  c.set(KEY_LASTROW, last != null ? last : "");
  
  List<Scan> scans = new ArrayList<Scan>();
  
  for(int i=0; i<3; i++){
    Scan scan = new Scan();
    
    scan.addFamily(INPUT_FAMILY);
    scan.setAttribute(Scan.SCAN_ATTRIBUTES_TABLE_NAME, Bytes.toBytes(TABLE_NAME + i));
    
    if (start != null) {
      scan.setStartRow(Bytes.toBytes(start));
    }
    if (stop != null) {
      scan.setStopRow(Bytes.toBytes(stop));
    }
    
    scans.add(scan);
    
    LOG.info("scan before: " + scan);
  }
  
  Job job = new Job(c, jobName);

  TableMapReduceUtil.initTableMapperJob(scans, ScanMapper.class,
      ImmutableBytesWritable.class, ImmutableBytesWritable.class, job);
  job.setReducerClass(ScanReducer.class);
  job.setNumReduceTasks(1); // one to get final "first" and "last" key
  FileOutputFormat.setOutputPath(job,
    new Path(TEST_UTIL.getDataTestDirOnTestFS(), job.getJobName()));
  LOG.info("Started " + job.getJobName());
  job.waitForCompletion(true);
  assertTrue(job.isSuccessful());
  LOG.info("After map/reduce completion - job " + jobName);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:52,代碼來源:TestMultiTableInputFormat.java


注:本文中的org.apache.hadoop.hbase.client.Scan.setAttribute方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。