当前位置: 首页>>代码示例>>Java>>正文


Java ColumnInterpreter类代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.coprocessor.ColumnInterpreter的典型用法代码示例。如果您正苦于以下问题:Java ColumnInterpreter类的具体用法?Java ColumnInterpreter怎么用?Java ColumnInterpreter使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


ColumnInterpreter类属于org.apache.hadoop.hbase.coprocessor包,在下文中一共展示了ColumnInterpreter类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: main

import org.apache.hadoop.hbase.coprocessor.ColumnInterpreter; //导入依赖的package包/类
public static void main(String[] args) {
  Configuration conf = HBaseConfiguration.create();
  conf.setInt("hbase.client.retries.number", 1);
  conf.setInt("ipc.client.connect.max.retries", 1);
  
  byte[] table = Bytes.toBytes("t");
  Scan scan = new Scan();
  scan.addColumn(Bytes.toBytes("f"), Bytes.toBytes("id"));
  final ColumnInterpreter<Long, Long> columnInterpreter = new LongStrColumnInterpreter();

  try {
    AggregationClient aClient = new AggregationClient(conf);
    Long rowCount = aClient.min(table, columnInterpreter, scan);
    System.out.println("The result is " + rowCount);
  } catch (Throwable e) {
    e.printStackTrace();
  }
}
 
开发者ID:javachen,项目名称:learning-hadoop,代码行数:19,代码来源:AggregateTest.java

示例2: median

import org.apache.hadoop.hbase.coprocessor.ColumnInterpreter; //导入依赖的package包/类
public static <R, S, P extends Message, Q extends Message, T extends Message>
    CompletableFuture<R> median(AsyncTable<AdvancedScanResultConsumer> table,
    ColumnInterpreter<R, S, P, Q, T> ci, Scan scan) {
  CompletableFuture<R> future = new CompletableFuture<>();
  sumByRegion(table, ci, scan).whenComplete((sumByRegion, error) -> {
    if (error != null) {
      future.completeExceptionally(error);
    } else if (sumByRegion.isEmpty()) {
      future.completeExceptionally(new NoSuchElementException());
    } else {
      findMedian(future, table, ci, ReflectionUtils.newInstance(scan.getClass(), scan),
        sumByRegion);
    }
  });
  return future;
}
 
开发者ID:apache,项目名称:hbase,代码行数:17,代码来源:AsyncAggregationClient.java

示例3: validateArgAndGetPB

import org.apache.hadoop.hbase.coprocessor.ColumnInterpreter; //导入依赖的package包/类
<R, S, P extends Message, Q extends Message, T extends Message> TimeseriesAggregateRequest
    validateArgAndGetPB(Scan scan, ColumnInterpreter<R, S, P, Q, T> ci,
        boolean canFamilyBeAbsent, int intervalSeconds, Integer timestampSecondsMin,
        Integer timestampSecondsMax, String keyFilterPattern) throws IOException {
  validateParameters(scan, canFamilyBeAbsent);
  final TimeseriesAggregateRequest.Builder requestBuilder =
      TimeseriesAggregateRequest.newBuilder();
  requestBuilder.setInterpreterClassName(ci.getClass().getCanonicalName());
  P columnInterpreterSpecificData = null;
  if ((columnInterpreterSpecificData = ci.getRequestData()) != null) {
    requestBuilder.setInterpreterSpecificBytes(columnInterpreterSpecificData.toByteString());
  }
  requestBuilder.setScan(ProtobufUtil.toScan(scan));

  requestBuilder.setTimeIntervalSeconds(intervalSeconds);
  if (!(null == timestampSecondsMin)) {
    final TimeseriesRange.Builder rangeBuilder = TimeseriesRange.newBuilder();
    rangeBuilder.setKeyTimestampMin(timestampSecondsMin);
    rangeBuilder.setKeyTimestampMax(timestampSecondsMax);
    rangeBuilder.setKeyTimestampFilterPattern(keyFilterPattern);
    requestBuilder.setRange(rangeBuilder.build());
  }
  return requestBuilder.build();
}
 
开发者ID:juwi,项目名称:HBase-TAggregator,代码行数:25,代码来源:TimeseriesAggregationClient.java

示例4: wrapForTransport

import org.apache.hadoop.hbase.coprocessor.ColumnInterpreter; //导入依赖的package包/类
@Override
public TimeseriesAggregateProtos.TimeseriesAggregateResponse wrapForTransport(Map results, ColumnInterpreter ci) {
    Map<Long,Pair<Long,S>> avgs = results;
    TimeseriesAggregateProtos.TimeseriesAggregateResponse.Builder responseBuilder =
            TimeseriesAggregateProtos.TimeseriesAggregateResponse.newBuilder();

    for (Map.Entry<Long, Pair<Long, S>> entry : avgs.entrySet()) {
        TimeseriesAggregateProtos.TimeseriesAggregateResponseEntry.Builder valueBuilder =
                TimeseriesAggregateProtos.TimeseriesAggregateResponseEntry.newBuilder();
        TimeseriesAggregateProtos.TimeseriesAggregateResponseMapEntry.Builder mapElementBuilder =
                TimeseriesAggregateProtos.TimeseriesAggregateResponseMapEntry.newBuilder();
        ByteString first = ci.getProtoForPromotedType(entry.getValue().getSecond()).toByteString();
        valueBuilder.addFirstPart(first);
        ByteBuffer bb = ByteBuffer.allocate(8).putLong(entry.getValue().getFirst());
        bb.rewind();
        valueBuilder.setSecondPart(ByteString.copyFrom(bb));
        mapElementBuilder.setKey(entry.getKey());
        mapElementBuilder.setValue(valueBuilder.build());
        responseBuilder.addEntry(mapElementBuilder.build());
    }
    return responseBuilder.build();
}
 
开发者ID:juwi,项目名称:HBase-TAggregator,代码行数:23,代码来源:Avg.java

示例5: compute

import org.apache.hadoop.hbase.coprocessor.ColumnInterpreter; //导入依赖的package包/类
@Override
public <T, S, P extends Message, Q extends Message, R extends Message> Map<Long, S> compute(
        Map results, Cell kv, ColumnInterpreter<T, S, P, Q, R> ci, byte[] columnFamily, long timestamp,
        List<TimeRange> timeRanges) throws IOException {
    Map<Long, S> sums = results;
    ColumnInterpreter<T, S, P, Q, R> columnInterpreter = ci;
    T temp;
    S sum;
    for (TimeRange t : timeRanges) {
        if (t.withinTimeRange(timestamp)) {
            long minTimestamp = t.getMin();
            if (sums.containsKey(minTimestamp)) {
                sum = sums.get(minTimestamp);
            } else sum = null;
            temp = ci.getValue(columnFamily, CellUtil.cloneQualifier(kv), kv);
            if (temp != null) sum = ci.add(sum, ci.castToReturnType(temp));
            sums.put(minTimestamp, sum);
        }
    }
    return sums;
}
 
开发者ID:juwi,项目名称:HBase-TAggregator,代码行数:22,代码来源:Sum.java

示例6: wrapForTransport

import org.apache.hadoop.hbase.coprocessor.ColumnInterpreter; //导入依赖的package包/类
@Override
public TimeseriesAggregateProtos.TimeseriesAggregateResponse wrapForTransport(Map results, ColumnInterpreter ci) {
    Map<Long, S> sums = results;
    TimeseriesAggregateProtos.TimeseriesAggregateResponse.Builder responseBuilder =
            TimeseriesAggregateProtos.TimeseriesAggregateResponse.newBuilder();

    for (Map.Entry<Long, S> entry : sums.entrySet()) {
        TimeseriesAggregateProtos.TimeseriesAggregateResponseEntry.Builder valueBuilder =
                TimeseriesAggregateProtos.TimeseriesAggregateResponseEntry.newBuilder();
        TimeseriesAggregateProtos.TimeseriesAggregateResponseMapEntry.Builder mapElementBuilder =
                TimeseriesAggregateProtos.TimeseriesAggregateResponseMapEntry.newBuilder();
        valueBuilder.addFirstPart(ci.getProtoForPromotedType(entry.getValue()).toByteString());
        mapElementBuilder.setKey(entry.getKey());
        mapElementBuilder.setValue(valueBuilder.build());
        responseBuilder.addEntry(mapElementBuilder.build());
    }
    return responseBuilder.build();
}
 
开发者ID:juwi,项目名称:HBase-TAggregator,代码行数:19,代码来源:Sum.java

示例7: compute

import org.apache.hadoop.hbase.coprocessor.ColumnInterpreter; //导入依赖的package包/类
@Override
public <T, S, P extends Message, Q extends Message, R extends Message> Map<Long, T> compute(
        Map results, Cell kv, ColumnInterpreter<T, S, P, Q, R> ci, byte[] columnFamily, long timestamp,
        List<TimeRange> timeRanges) throws IOException {
    Map<Long, T> minimums = results;
    ColumnInterpreter<T, S, P, Q, R> columnInterpreter = ci;
    T temp;
    T min;
    for (TimeRange t : timeRanges) {
        if (t.withinTimeRange(timestamp)) {
            long minTimestamp = t.getMin();
            if (minimums.containsKey(minTimestamp)) {
                min = minimums.get(minTimestamp);
            } else min = null;
            temp = ci.getValue(columnFamily, CellUtil.cloneQualifier(kv), kv);
            min = (min == null || (temp != null && ci.compare(temp, min) < 0)) ? temp : min;
            minimums.put(minTimestamp, min);
        }
    }
    return minimums;
}
 
开发者ID:juwi,项目名称:HBase-TAggregator,代码行数:22,代码来源:Min.java

示例8: wrapForTransport

import org.apache.hadoop.hbase.coprocessor.ColumnInterpreter; //导入依赖的package包/类
@Override
public TimeseriesAggregateProtos.TimeseriesAggregateResponse wrapForTransport(Map results, ColumnInterpreter ci) {
    Map<Long, T> minimums = results;
    TimeseriesAggregateProtos.TimeseriesAggregateResponse.Builder responseBuilder =
            TimeseriesAggregateProtos.TimeseriesAggregateResponse.newBuilder();

    for (Map.Entry<Long, T> entry : minimums.entrySet()) {
        TimeseriesAggregateProtos.TimeseriesAggregateResponseEntry.Builder valueBuilder =
                TimeseriesAggregateProtos.TimeseriesAggregateResponseEntry.newBuilder();
        TimeseriesAggregateProtos.TimeseriesAggregateResponseMapEntry.Builder mapElementBuilder =
                TimeseriesAggregateProtos.TimeseriesAggregateResponseMapEntry.newBuilder();

        valueBuilder.addFirstPart(ci.getProtoForCellType(entry.getValue()).toByteString());

        mapElementBuilder.setKey(entry.getKey());
        mapElementBuilder.setValue(valueBuilder.build());

        responseBuilder.addEntry(mapElementBuilder.build());
    }
    return responseBuilder.build();
}
 
开发者ID:juwi,项目名称:HBase-TAggregator,代码行数:22,代码来源:Min.java

示例9: compute

import org.apache.hadoop.hbase.coprocessor.ColumnInterpreter; //导入依赖的package包/类
@Override
public <T, S, P extends Message, Q extends Message, R extends Message> Map compute(
        Map results, Cell kv, ColumnInterpreter<T, S, P, Q, R> ci, byte[] columnFamily, long timestamp,
        List<TimeRange> timeRanges)
        throws IOException {
    Map<Long, Long> counts = results;
    long count;
    for (TimeRange t : timeRanges) {
        if (t.withinTimeRange(timestamp)) {
            long minTimestamp = t.getMin();
            if (counts.containsKey(minTimestamp)) {
                count = counts.get(minTimestamp);
                count++;
            } else count = 1L;
            counts.put(minTimestamp, count);
        }
    }
    return counts;
}
 
开发者ID:juwi,项目名称:HBase-TAggregator,代码行数:20,代码来源:Count.java

示例10: wrapForTransport

import org.apache.hadoop.hbase.coprocessor.ColumnInterpreter; //导入依赖的package包/类
@Override
public TimeseriesAggregateProtos.TimeseriesAggregateResponse wrapForTransport(Map results, ColumnInterpreter ci) {
    Map<Long, Long> counts = results;
    TimeseriesAggregateProtos.TimeseriesAggregateResponse.Builder responseBuilder =
            TimeseriesAggregateProtos.TimeseriesAggregateResponse.newBuilder();

    for (Map.Entry<Long, Long> entry : counts.entrySet()) {
        TimeseriesAggregateProtos.TimeseriesAggregateResponseEntry.Builder valueBuilder =
                TimeseriesAggregateProtos.TimeseriesAggregateResponseEntry.newBuilder();
        TimeseriesAggregateProtos.TimeseriesAggregateResponseMapEntry.Builder mapElementBuilder =
                TimeseriesAggregateProtos.TimeseriesAggregateResponseMapEntry.newBuilder();

        valueBuilder.addFirstPart(ByteString.copyFrom(Bytes.toBytes(entry.getValue())));

        mapElementBuilder.setKey(entry.getKey());
        mapElementBuilder.setValue(valueBuilder.build());

        responseBuilder.addEntry(mapElementBuilder.build());
    }
    return responseBuilder.build();
}
 
开发者ID:juwi,项目名称:HBase-TAggregator,代码行数:22,代码来源:Count.java

示例11: compute

import org.apache.hadoop.hbase.coprocessor.ColumnInterpreter; //导入依赖的package包/类
@Override
public <T, S, P extends Message, Q extends Message, R extends Message> Map<Long, T> compute(
        Map results, Cell kv, ColumnInterpreter<T, S, P, Q, R> ci, byte[] columnFamily, long timestamp,
        List<TimeRange> timeRanges) throws IOException {
    Map<Long, T> maximums = results;
    ColumnInterpreter<T, S, P, Q, R> columnInterpreter = ci;
    T temp;
    T max;
    for (TimeRange t : timeRanges) {
        if (t.withinTimeRange(timestamp)) {
            long minTimestamp = t.getMin();
            if (maximums.containsKey(minTimestamp)) {
                max = maximums.get(minTimestamp);
            } else max = null;
            temp = columnInterpreter.getValue(columnFamily, CellUtil.cloneQualifier(kv), kv);
            max = (max == null || (temp != null && ci.compare(temp, max) > 0)) ? temp : max;
            maximums.put(minTimestamp, max);
        }
    }
    return maximums;
}
 
开发者ID:juwi,项目名称:HBase-TAggregator,代码行数:22,代码来源:Max.java

示例12: wrapForTransport

import org.apache.hadoop.hbase.coprocessor.ColumnInterpreter; //导入依赖的package包/类
@Override
public TimeseriesAggregateProtos.TimeseriesAggregateResponse wrapForTransport(Map results, ColumnInterpreter ci) {
    Map<Long, T> maximums = results;
    TimeseriesAggregateProtos.TimeseriesAggregateResponse.Builder responseBuilder =
            TimeseriesAggregateProtos.TimeseriesAggregateResponse.newBuilder();

    for (Map.Entry<Long, T> entry : maximums.entrySet()) {
        TimeseriesAggregateProtos.TimeseriesAggregateResponseEntry.Builder valueBuilder =
                TimeseriesAggregateProtos.TimeseriesAggregateResponseEntry.newBuilder();
        TimeseriesAggregateProtos.TimeseriesAggregateResponseMapEntry.Builder mapElementBuilder =
                TimeseriesAggregateProtos.TimeseriesAggregateResponseMapEntry.newBuilder();

        valueBuilder.addFirstPart(ci.getProtoForCellType(entry.getValue()).toByteString());

        mapElementBuilder.setKey(entry.getKey());
        mapElementBuilder.setValue(valueBuilder.build());

        responseBuilder.addEntry(mapElementBuilder.build());
    }
    return responseBuilder.build();
}
 
开发者ID:juwi,项目名称:HBase-TAggregator,代码行数:22,代码来源:Max.java

示例13: max

import org.apache.hadoop.hbase.coprocessor.ColumnInterpreter; //导入依赖的package包/类
/**
 * It gives the maximum value of a column for a given column family for the
 * given range. In case qualifier is null, a max of all values for the given
 * family is returned.
 * @param table
 * @param ci
 * @param scan
 * @return max val &lt;&gt;
 * @throws Throwable
 *           The caller is supposed to handle the exception as they are thrown
 *           &amp; propagated to it.
 */
public <R, S, P extends Message, Q extends Message, T extends Message>
R max(final Table table, final ColumnInterpreter<R, S, P, Q, T> ci,
    final Scan scan) throws Throwable {
  final AggregateRequest requestArg = validateArgAndGetPB(scan, ci, false);
  class MaxCallBack implements Batch.Callback<R> {
    R max = null;

    R getMax() {
      return max;
    }

    @Override
    public synchronized void update(byte[] region, byte[] row, R result) {
      max = (max == null || (result != null && ci.compare(max, result) < 0)) ? result : max;
    }
  }
  MaxCallBack aMaxCallBack = new MaxCallBack();
  table.coprocessorService(AggregateService.class, scan.getStartRow(), scan.getStopRow(),
      new Batch.Call<AggregateService, R>() {
        @Override
        public R call(AggregateService instance) throws IOException {
          ServerRpcController controller = new ServerRpcController();
          BlockingRpcCallback<AggregateResponse> rpcCallback =
              new BlockingRpcCallback<AggregateResponse>();
          instance.getMax(controller, requestArg, rpcCallback);
          AggregateResponse response = rpcCallback.get();
          if (controller.failedOnException()) {
            throw controller.getFailedOn();
          }
          if (response.getFirstPartCount() > 0) {
            ByteString b = response.getFirstPart(0);
            Q q = ProtobufUtil.getParsedGenericInstance(ci.getClass(), 3, b);
            return ci.getCellValueFromProto(q);
          }
          return null;
        }
      }, aMaxCallBack);
  return aMaxCallBack.getMax();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:52,代码来源:AggregationClient.java

示例14: rowCount

import org.apache.hadoop.hbase.coprocessor.ColumnInterpreter; //导入依赖的package包/类
/**
 * It gives the row count, by summing up the individual results obtained from
 * regions. In case the qualifier is null, FirstKeyValueFilter is used to
 * optimised the operation. In case qualifier is provided, I can't use the
 * filter as it may set the flag to skip to next row, but the value read is
 * not of the given filter: in this case, this particular row will not be
 * counted ==&gt; an error.
 * @param table
 * @param ci
 * @param scan
 * @return &lt;R, S&gt;
 * @throws Throwable
 */
public <R, S, P extends Message, Q extends Message, T extends Message>
long rowCount(final Table table,
    final ColumnInterpreter<R, S, P, Q, T> ci, final Scan scan) throws Throwable {
  final AggregateRequest requestArg = validateArgAndGetPB(scan, ci, true);
  class RowNumCallback implements Batch.Callback<Long> {
    private final AtomicLong rowCountL = new AtomicLong(0);

    public long getRowNumCount() {
      return rowCountL.get();
    }

    @Override
    public void update(byte[] region, byte[] row, Long result) {
      rowCountL.addAndGet(result.longValue());
    }
  }
  RowNumCallback rowNum = new RowNumCallback();
  table.coprocessorService(AggregateService.class, scan.getStartRow(), scan.getStopRow(),
      new Batch.Call<AggregateService, Long>() {
        @Override
        public Long call(AggregateService instance) throws IOException {
          ServerRpcController controller = new ServerRpcController();
          BlockingRpcCallback<AggregateResponse> rpcCallback =
              new BlockingRpcCallback<AggregateResponse>();
          instance.getRowNum(controller, requestArg, rpcCallback);
          AggregateResponse response = rpcCallback.get();
          if (controller.failedOnException()) {
            throw controller.getFailedOn();
          }
          byte[] bytes = getBytesFromResponse(response.getFirstPart(0));
          ByteBuffer bb = ByteBuffer.allocate(8).put(bytes);
          bb.rewind();
          return bb.getLong();
        }
      }, rowNum);
  return rowNum.getRowNumCount();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:51,代码来源:AggregationClient.java

示例15: validateArgAndGetPB

import org.apache.hadoop.hbase.coprocessor.ColumnInterpreter; //导入依赖的package包/类
<R, S, P extends Message, Q extends Message, T extends Message> AggregateRequest
validateArgAndGetPB(Scan scan, ColumnInterpreter<R,S,P,Q,T> ci, boolean canFamilyBeAbsent)
    throws IOException {
  validateParameters(scan, canFamilyBeAbsent);
  final AggregateRequest.Builder requestBuilder =
      AggregateRequest.newBuilder();
  requestBuilder.setInterpreterClassName(ci.getClass().getCanonicalName());
  P columnInterpreterSpecificData = null;
  if ((columnInterpreterSpecificData = ci.getRequestData())
     != null) {
    requestBuilder.setInterpreterSpecificBytes(columnInterpreterSpecificData.toByteString());
  }
  requestBuilder.setScan(ProtobufUtil.toScan(scan));
  return requestBuilder.build();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:16,代码来源:AggregationClient.java


注:本文中的org.apache.hadoop.hbase.coprocessor.ColumnInterpreter类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。