当前位置: 首页>>代码示例>>Java>>正文


Java CellUtil.cloneFamily方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.CellUtil.cloneFamily方法的典型用法代码示例。如果您正苦于以下问题:Java CellUtil.cloneFamily方法的具体用法?Java CellUtil.cloneFamily怎么用?Java CellUtil.cloneFamily使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.CellUtil的用法示例。


在下文中一共展示了CellUtil.cloneFamily方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: recoverClusteringResult

import org.apache.hadoop.hbase.CellUtil; //导入方法依赖的package包/类
public static Result recoverClusteringResult(Result raw, byte[] family, byte[] qualifier) {
  if (raw == null) return null;
  byte[][] indexColumn = IndexPutParser.parseIndexRowKey(raw.getRow());
  List<KeyValue> list = new ArrayList<>(raw.listCells().size() + 1);
  for (Cell cell : raw.listCells()) {
    byte[] tag = cell.getTagsArray();
    if (tag != null && tag.length > KeyValue.MAX_TAGS_LENGTH) tag = null;
    KeyValue kv =
        new KeyValue(indexColumn[0], CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell),
            cell.getTimestamp(), KeyValue.Type.codeToType(cell.getTypeByte()),
            CellUtil.cloneValue(cell), tag);
    list.add(kv);
  }
  list.add(new KeyValue(indexColumn[0], family, qualifier, indexColumn[1]));
  Collections.sort(list, KeyValue.COMPARATOR);
  return new Result(list);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:18,代码来源:BaseIndexScanner.java

示例2: resultToString

import org.apache.hadoop.hbase.CellUtil; //导入方法依赖的package包/类
protected String resultToString(Result result) {
  StringBuilder sb = new StringBuilder();
  sb.append("{").append(keyToString(result.getRow())).append(":");
  for (Cell cell : result.listCells()) {
    byte[] f = CellUtil.cloneFamily(cell);
    byte[] q = CellUtil.cloneQualifier(cell);
    RangeDescription range = rangeMap.get(Bytes.add(f, q));
    sb.append("[").append(Bytes.toString(f)).append(":").append(Bytes.toString(q)).append("->");
    if (notPrintingSet.contains(q)) sb.append("skipped random value");
    else sb.append(DataType.byteToString(range.dataType, CellUtil.cloneValue(cell)));
    sb.append("]");
  }
  sb.append("}");
  return sb.toString();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:16,代码来源:BaseRunner.java

示例3: addDeleteMarker

import org.apache.hadoop.hbase.CellUtil; //导入方法依赖的package包/类
/**
 * Advanced use only.
 * Add an existing delete marker to this Delete object.
 * @param kv An existing KeyValue of type "delete".
 * @return this for invocation chaining
 * @throws IOException
 */
@SuppressWarnings("unchecked")
public Delete addDeleteMarker(Cell kv) throws IOException {
  // TODO: Deprecate and rename 'add' so it matches how we add KVs to Puts.
  if (!CellUtil.isDelete(kv)) {
    throw new IOException("The recently added KeyValue is not of type "
        + "delete. Rowkey: " + Bytes.toStringBinary(this.row));
  }
  if (Bytes.compareTo(this.row, 0, row.length, kv.getRowArray(),
      kv.getRowOffset(), kv.getRowLength()) != 0) {
    throw new WrongRowIOException("The row in " + kv.toString() +
      " doesn't match the original one " +  Bytes.toStringBinary(this.row));
  }
  byte [] family = CellUtil.cloneFamily(kv);
  List<Cell> list = familyMap.get(family);
  if (list == null) {
    list = new ArrayList<Cell>();
  }
  list.add(kv);
  familyMap.put(family, list);
  return this;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:29,代码来源:Delete.java

示例4: SerializableCell

import org.apache.hadoop.hbase.CellUtil; //导入方法依赖的package包/类
/**
 * Copy data from {@code Cell} instance.
 *
 * @param cell
 */
public SerializableCell( Cell cell ) {
    rowKey = CellUtil.cloneRow(cell);
    family = CellUtil.cloneFamily(cell);
    qualifier = CellUtil.cloneQualifier(cell);
    value = CellUtil.cloneValue(cell);
    timestamp = cell.getTimestamp();
    type = cell.getTypeByte();
}
 
开发者ID:i-knowledge,项目名称:hbase-client,代码行数:14,代码来源:SerializableCell.java

示例5: deleteFromHBase

import org.apache.hadoop.hbase.CellUtil; //导入方法依赖的package包/类
public static TDelete deleteFromHBase(Delete in) {
  TDelete out = new TDelete(ByteBuffer.wrap(in.getRow()));

  List<TColumn> columns = new ArrayList<TColumn>();
  long rowTimestamp = in.getTimeStamp();
  if (rowTimestamp != HConstants.LATEST_TIMESTAMP) {
    out.setTimestamp(rowTimestamp);
  }

  // Map<family, List<KeyValue>>
  for (Map.Entry<byte[], List<org.apache.hadoop.hbase.Cell>> familyEntry:
      in.getFamilyCellMap().entrySet()) {
    TColumn column = new TColumn(ByteBuffer.wrap(familyEntry.getKey()));
    for (org.apache.hadoop.hbase.Cell cell: familyEntry.getValue()) {
      byte[] family = CellUtil.cloneFamily(cell);
      byte[] qualifier = CellUtil.cloneQualifier(cell);
      long timestamp = cell.getTimestamp();
      if (family != null) {
        column.setFamily(family);
      }
      if (qualifier != null) {
        column.setQualifier(qualifier);
      }
      if (timestamp != HConstants.LATEST_TIMESTAMP) {
        column.setTimestamp(timestamp);
      }
    }
    columns.add(column);
  }
  out.setColumns(columns);

  return out;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:34,代码来源:ThriftUtilities.java

示例6: filterCellByStore

import org.apache.hadoop.hbase.CellUtil; //导入方法依赖的package包/类
private void filterCellByStore(Entry logEntry) {
  Map<byte[], Long> maxSeqIdInStores =
      regionMaxSeqIdInStores.get(Bytes.toString(logEntry.getKey().getEncodedRegionName()));
  if (maxSeqIdInStores == null || maxSeqIdInStores.isEmpty()) {
    return;
  }
  // Create the array list for the cells that aren't filtered.
  // We make the assumption that most cells will be kept.
  ArrayList<Cell> keptCells = new ArrayList<Cell>(logEntry.getEdit().getCells().size());
  for (Cell cell : logEntry.getEdit().getCells()) {
    if (CellUtil.matchingFamily(cell, WALEdit.METAFAMILY)) {
      keptCells.add(cell);
    } else {
      byte[] family = CellUtil.cloneFamily(cell);
      Long maxSeqId = maxSeqIdInStores.get(family);
      // Do not skip cell even if maxSeqId is null. Maybe we are in a rolling upgrade,
      // or the master was crashed before and we can not get the information.
      if (maxSeqId == null || maxSeqId.longValue() < logEntry.getKey().getLogSeqNum()) {
        keptCells.add(cell);
      }
    }
  }

  // Anything in the keptCells array list is still live.
  // So rather than removing the cells from the array list
  // which would be an O(n^2) operation, we just replace the list
  logEntry.getEdit().setCells(keptCells);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:29,代码来源:WALSplitter.java

示例7: rollbackMemstore

import org.apache.hadoop.hbase.CellUtil; //导入方法依赖的package包/类
/**
 * Remove all the keys listed in the map from the memstore. This method is called when a
 * Put/Delete has updated memstore but subsequently fails to update the wal. This method is then
 * invoked to rollback the memstore.
 */
private void rollbackMemstore(List<Cell> memstoreCells) {
  int kvsRolledback = 0;

  for (Cell cell : memstoreCells) {
    byte[] family = CellUtil.cloneFamily(cell);
    Store store = getStore(family);
    store.rollback(cell);
    kvsRolledback++;
  }
  LOG.debug("rollbackMemstore rolled back " + kvsRolledback);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:17,代码来源:HRegion.java

示例8: assertNResult

import org.apache.hadoop.hbase.CellUtil; //导入方法依赖的package包/类
private void assertNResult(Result result, byte [] row,
    byte [][] families, byte [][] qualifiers, byte [][] values,
    int [][] idxs)
throws Exception {
  assertTrue("Expected row [" + Bytes.toString(row) + "] " +
      "Got row [" + Bytes.toString(result.getRow()) +"]",
      equals(row, result.getRow()));
  assertTrue("Expected " + idxs.length + " keys but result contains "
      + result.size(), result.size() == idxs.length);

  Cell [] keys = result.rawCells();

  for(int i=0;i<keys.length;i++) {
    byte [] family = families[idxs[i][0]];
    byte [] qualifier = qualifiers[idxs[i][1]];
    byte [] value = values[idxs[i][2]];
    Cell key = keys[i];

    byte[] famb = CellUtil.cloneFamily(key);
    byte[] qualb = CellUtil.cloneQualifier(key);
    byte[] valb = CellUtil.cloneValue(key);
    assertTrue("(" + i + ") Expected family [" + Bytes.toString(family)
        + "] " + "Got family [" + Bytes.toString(famb) + "]",
        equals(family, famb));
    assertTrue("(" + i + ") Expected qualifier [" + Bytes.toString(qualifier)
        + "] " + "Got qualifier [" + Bytes.toString(qualb) + "]",
        equals(qualifier, qualb));
    assertTrue("(" + i + ") Expected value [" + Bytes.toString(value) + "] "
        + "Got value [" + Bytes.toString(valb) + "]",
        equals(value, valb));
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:33,代码来源:TestFromClientSide.java

示例9: add

import org.apache.hadoop.hbase.CellUtil; //导入方法依赖的package包/类
/**
 * Add the specified KeyValue to this Put operation.  Operation assumes that
 * the passed KeyValue is immutable and its backing array will not be modified
 * for the duration of this Put.
 * @param kv individual KeyValue
 * @return this
 * @throws java.io.IOException e
 */
public Put add(Cell kv) throws IOException{
  byte [] family = CellUtil.cloneFamily(kv);
  List<Cell> list = getCellList(family);
  //Checking that the row of the kv is the same as the put
  int res = Bytes.compareTo(this.row, 0, row.length,
      kv.getRowArray(), kv.getRowOffset(), kv.getRowLength());
  if (res != 0) {
    throw new WrongRowIOException("The row in " + kv.toString() +
      " doesn't match the original one " +  Bytes.toStringBinary(this.row));
  }
  list.add(kv);
  familyMap.put(family, list);
  return this;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:23,代码来源:Put.java

示例10: getMap

import org.apache.hadoop.hbase.CellUtil; //导入方法依赖的package包/类
/**
 * Map of families to all versions of its qualifiers and values.
 * <p>
 * Returns a three level Map of the form:
 * <code>Map&amp;family,Map&lt;qualifier,Map&lt;timestamp,value&gt;&gt;&gt;</code>
 * <p>
 * Note: All other map returning methods make use of this map internally.
 * @return map from families to qualifiers to versions
 */
public NavigableMap<byte[], NavigableMap<byte[], NavigableMap<Long, byte[]>>> getMap() {
  if (this.familyMap != null) {
    return this.familyMap;
  }
  if(isEmpty()) {
    return null;
  }
  this.familyMap = new TreeMap<byte[], NavigableMap<byte[], NavigableMap<Long, byte[]>>>(Bytes.BYTES_COMPARATOR);
  for(Cell kv : this.cells) {
    byte [] family = CellUtil.cloneFamily(kv);
    NavigableMap<byte[], NavigableMap<Long, byte[]>> columnMap =
      familyMap.get(family);
    if(columnMap == null) {
      columnMap = new TreeMap<byte[], NavigableMap<Long, byte[]>>
        (Bytes.BYTES_COMPARATOR);
      familyMap.put(family, columnMap);
    }
    byte [] qualifier = CellUtil.cloneQualifier(kv);
    NavigableMap<Long, byte[]> versionMap = columnMap.get(qualifier);
    if(versionMap == null) {
      versionMap = new TreeMap<Long, byte[]>(new Comparator<Long>() {
        @Override
        public int compare(Long l1, Long l2) {
          return l2.compareTo(l1);
        }
      });
      columnMap.put(qualifier, versionMap);
    }
    Long timestamp = kv.getTimestamp();
    byte [] value = CellUtil.cloneValue(kv);

    versionMap.put(timestamp, value);
  }
  return this.familyMap;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:45,代码来源:Result.java

示例11: add

import org.apache.hadoop.hbase.CellUtil; //导入方法依赖的package包/类
/**
 * Add column and value to this Append operation.
 * @param cell
 * @return This instance
 */
@SuppressWarnings("unchecked")
public Append add(final Cell cell) {
  // Presume it is KeyValue for now.
  byte [] family = CellUtil.cloneFamily(cell);
  List<Cell> list = this.familyMap.get(family);
  if (list == null) {
    list  = new ArrayList<Cell>();
  }
  // find where the new entry should be placed in the List
  list.add(cell);
  this.familyMap.put(family, list);
  return this;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:19,代码来源:Append.java

示例12: add

import org.apache.hadoop.hbase.CellUtil; //导入方法依赖的package包/类
/**
 * Add the specified KeyValue to this operation.
 * @param cell individual Cell
 * @return this
 * @throws java.io.IOException e
 */
public Increment add(Cell cell) throws IOException{
  byte [] family = CellUtil.cloneFamily(cell);
  List<Cell> list = getCellList(family);
  //Checking that the row of the kv is the same as the put
  int res = Bytes.compareTo(this.row, 0, row.length,
      cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
  if (res != 0) {
    throw new WrongRowIOException("The row in " + cell +
      " doesn't match the original one " +  Bytes.toStringBinary(this.row));
  }
  list.add(cell);
  familyMap.put(family, list);
  return this;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:21,代码来源:Increment.java

示例13: filterKeyValue

import org.apache.hadoop.hbase.CellUtil; //导入方法依赖的package包/类
@Override
public ReturnCode filterKeyValue(Cell cell) {
  if (isSystemTable) {
    return ReturnCode.INCLUDE;
  }
  if (prevFam.getBytes() == null
      || (Bytes.compareTo(prevFam.getBytes(), prevFam.getOffset(), prevFam.getLength(),
          cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength()) != 0)) {
    prevFam.set(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength());
    // Similar to VisibilityLabelFilter
    familyMaxVersions = cfVsMaxVersions.get(prevFam);
    // Family is changed. Just unset curQualifier.
    prevQual.unset();
  }
  if (prevQual.getBytes() == null
      || (Bytes.compareTo(prevQual.getBytes(), prevQual.getOffset(),
          prevQual.getLength(), cell.getQualifierArray(), cell.getQualifierOffset(),
          cell.getQualifierLength()) != 0)) {
    prevQual.set(cell.getQualifierArray(), cell.getQualifierOffset(),
        cell.getQualifierLength());
    currentVersions = 0;
  }
  currentVersions++;
  if (currentVersions > familyMaxVersions) {
    return ReturnCode.SKIP;
  }
  // XXX: Compare in place, don't clone
  byte[] family = CellUtil.cloneFamily(cell);
  byte[] qualifier = CellUtil.cloneQualifier(cell);
  switch (strategy) {
    // Filter only by checking the table or CF permissions
    case CHECK_TABLE_AND_CF_ONLY: {
      if (authManager.authorize(user, table, family, qualifier, Permission.Action.READ)) {
        return ReturnCode.INCLUDE;
      }
    }
    break;
    // Cell permissions can override table or CF permissions
    case CHECK_CELL_DEFAULT: {
      if (authManager.authorize(user, table, family, qualifier, Permission.Action.READ) ||
          authManager.authorize(user, table, cell, Permission.Action.READ)) {
        return ReturnCode.INCLUDE;
      }
    }
    break;
    default:
      throw new RuntimeException("Unhandled strategy " + strategy);
  }

  return ReturnCode.SKIP;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:52,代码来源:AccessControlFilter.java

示例14: parsePermissionRecord

import org.apache.hadoop.hbase.CellUtil; //导入方法依赖的package包/类
private static Pair<String, TablePermission> parsePermissionRecord(
    byte[] entryName, Cell kv) {
  // return X given a set of permissions encoded in the permissionRecord kv.
  byte[] family = CellUtil.cloneFamily(kv);

  if (!Bytes.equals(family, ACL_LIST_FAMILY)) {
    return null;
  }

  byte[] key = CellUtil.cloneQualifier(kv);
  byte[] value = CellUtil.cloneValue(kv);
  if (LOG.isDebugEnabled()) {
    LOG.debug("Read acl: kv ["+
              Bytes.toStringBinary(key)+": "+
              Bytes.toStringBinary(value)+"]");
  }

  // check for a column family appended to the key
  // TODO: avoid the string conversion to make this more efficient
  String username = Bytes.toString(key);

  //Handle namespace entry
  if(isNamespaceEntry(entryName)) {
    return new Pair<String, TablePermission>(username,
        new TablePermission(Bytes.toString(fromNamespaceEntry(entryName)), value));
  }

  //Handle table and global entry
  //TODO global entry should be handled differently
  int idx = username.indexOf(ACL_KEY_DELIMITER);
  byte[] permFamily = null;
  byte[] permQualifier = null;
  if (idx > 0 && idx < username.length()-1) {
    String remainder = username.substring(idx+1);
    username = username.substring(0, idx);
    idx = remainder.indexOf(ACL_KEY_DELIMITER);
    if (idx > 0 && idx < remainder.length()-1) {
      permFamily = Bytes.toBytes(remainder.substring(0, idx));
      permQualifier = Bytes.toBytes(remainder.substring(idx+1));
    } else {
      permFamily = Bytes.toBytes(remainder);
    }
  }

  return new Pair<String,TablePermission>(username,
      new TablePermission(TableName.valueOf(entryName), permFamily, permQualifier, value));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:48,代码来源:AccessControlLists.java

示例15: process

import org.apache.hadoop.hbase.CellUtil; //导入方法依赖的package包/类
@Override
public void process(long now, HRegion region,
    List<Mutation> mutations, WALEdit walEdit) throws IOException {

  // Override the time to avoid race-condition in the unit test caused by
  // inacurate timer on some machines
  now = myTimer.getAndIncrement();

  // Scan both rows
  List<Cell> kvs1 = new ArrayList<Cell>();
  List<Cell> kvs2 = new ArrayList<Cell>();
  doScan(region, new Scan(row1, row1), kvs1);
  doScan(region, new Scan(row2, row2), kvs2);

  // Assert swapped
  if (swapped) {
    assertEquals(rowSize, kvs2.size());
    assertEquals(row2Size, kvs1.size());
  } else {
    assertEquals(rowSize, kvs1.size());
    assertEquals(row2Size, kvs2.size());
  }
  swapped = !swapped;

  // Add and delete keyvalues
  List<List<Cell>> kvs = new ArrayList<List<Cell>>();
  kvs.add(kvs1);
  kvs.add(kvs2);
  byte[][] rows = new byte[][]{row1, row2};
  for (int i = 0; i < kvs.size(); ++i) {
    for (Cell kv : kvs.get(i)) {
      // Delete from the current row and add to the other row
      Delete d = new Delete(rows[i]);
      KeyValue kvDelete =
          new KeyValue(rows[i], CellUtil.cloneFamily(kv), CellUtil.cloneQualifier(kv), 
              kv.getTimestamp(), KeyValue.Type.Delete);
      d.addDeleteMarker(kvDelete);
      Put p = new Put(rows[1 - i]);
      KeyValue kvAdd =
          new KeyValue(rows[1 - i], CellUtil.cloneFamily(kv), CellUtil.cloneQualifier(kv),
              now, CellUtil.cloneValue(kv));
      p.add(kvAdd);
      mutations.add(d);
      walEdit.add(kvDelete);
      mutations.add(p);
      walEdit.add(kvAdd);
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:50,代码来源:TestRowProcessorEndpoint.java


注:本文中的org.apache.hadoop.hbase.CellUtil.cloneFamily方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。