当前位置: 首页>>代码示例>>Java>>正文


Java Pair类代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.util.Pair的典型用法代码示例。如果您正苦于以下问题:Java Pair类的具体用法?Java Pair怎么用?Java Pair使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


Pair类属于org.apache.hadoop.hbase.util包,在下文中一共展示了Pair类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: buildBulkLoadHFileRequest

import org.apache.hadoop.hbase.util.Pair; //导入依赖的package包/类
/**
 * Create a protocol buffer bulk load request
 *
 * @param familyPaths
 * @param regionName
 * @param assignSeqNum
 * @return a bulk load request
 */
public static BulkLoadHFileRequest buildBulkLoadHFileRequest(
    final List<Pair<byte[], String>> familyPaths,
    final byte[] regionName, boolean assignSeqNum) {
  BulkLoadHFileRequest.Builder builder = BulkLoadHFileRequest.newBuilder();
  RegionSpecifier region = buildRegionSpecifier(
    RegionSpecifierType.REGION_NAME, regionName);
  builder.setRegion(region);
  FamilyPath.Builder familyPathBuilder = FamilyPath.newBuilder();
  for (Pair<byte[], String> familyPath: familyPaths) {
    familyPathBuilder.setFamily(ByteStringer.wrap(familyPath.getFirst()));
    familyPathBuilder.setPath(familyPath.getSecond());
    builder.addFamilyPath(familyPathBuilder.build());
  }
  builder.setAssignSeqNum(assignSeqNum);
  return builder.build();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:25,代码来源:RequestConverter.java

示例2: modifyTableSync

import org.apache.hadoop.hbase.util.Pair; //导入依赖的package包/类
/**
 * Modify a table, synchronous. Waiting logic similar to that of {@code admin.rb#alter_status}.
 */
@SuppressWarnings("serial")
public static void modifyTableSync(Admin admin, HTableDescriptor desc)
    throws IOException, InterruptedException {
  admin.modifyTable(desc.getTableName(), desc);
  Pair<Integer, Integer> status = new Pair<Integer, Integer>() {{
    setFirst(0);
    setSecond(0);
  }};
  int i = 0;
  do {
    status = admin.getAlterStatus(desc.getTableName());
    if (status.getSecond() != 0) {
      LOG.debug(status.getSecond() - status.getFirst() + "/" + status.getSecond()
        + " regions updated.");
      Thread.sleep(1 * 1000l);
    } else {
      LOG.debug("All regions updated.");
      break;
    }
  } while (status.getFirst() != 0 && i++ < 500);
  if (status.getFirst() != 0) {
    throw new IOException("Failed to update all regions even after 500 seconds.");
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:28,代码来源:HBaseTestingUtility.java

示例3: extractLabelsAndAuths

import org.apache.hadoop.hbase.util.Pair; //导入依赖的package包/类
protected Pair<Map<String, Integer>, Map<String, List<Integer>>> extractLabelsAndAuths(
    List<List<Cell>> labelDetails) {
  Map<String, Integer> labels = new HashMap<String, Integer>();
  Map<String, List<Integer>> userAuths = new HashMap<String, List<Integer>>();
  for (List<Cell> cells : labelDetails) {
    for (Cell cell : cells) {
      if (Bytes.equals(cell.getQualifierArray(), cell.getQualifierOffset(),
          cell.getQualifierLength(), LABEL_QUALIFIER, 0, LABEL_QUALIFIER.length)) {
        labels.put(
            Bytes.toString(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()),
            Bytes.toInt(cell.getRowArray(), cell.getRowOffset()));
      } else {
        // These are user cells who has authorization for this label
        String user = Bytes.toString(cell.getQualifierArray(), cell.getQualifierOffset(),
            cell.getQualifierLength());
        List<Integer> auths = userAuths.get(user);
        if (auths == null) {
          auths = new ArrayList<Integer>();
          userAuths.put(user, auths);
        }
        auths.add(Bytes.toInt(cell.getRowArray(), cell.getRowOffset()));
      }
    }
  }
  return new Pair<Map<String, Integer>, Map<String, List<Integer>>>(labels, userAuths);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:27,代码来源:DefaultVisibilityLabelServiceImpl.java

示例4: getKeysAndRegionsInRange

import org.apache.hadoop.hbase.util.Pair; //导入依赖的package包/类
/**
 * Get the corresponding start keys and regions for an arbitrary range of
 * keys.
 * <p>
 * @param startKey Starting row in range, inclusive
 * @param endKey Ending row in range
 * @param includeEndKey true if endRow is inclusive, false if exclusive
 * @param reload true to reload information or false to use cached information
 * @return A pair of list of start keys and list of HRegionLocations that
 *         contain the specified range
 * @throws IOException if a remote or network exception occurs
 * @deprecated This is no longer a public API
 */
@Deprecated
private Pair<List<byte[]>, List<HRegionLocation>> getKeysAndRegionsInRange(
    final byte[] startKey, final byte[] endKey, final boolean includeEndKey,
    final boolean reload) throws IOException {
  final boolean endKeyIsEndOfTable = Bytes.equals(endKey,HConstants.EMPTY_END_ROW);
  if ((Bytes.compareTo(startKey, endKey) > 0) && !endKeyIsEndOfTable) {
    throw new IllegalArgumentException(
      "Invalid range: " + Bytes.toStringBinary(startKey) +
      " > " + Bytes.toStringBinary(endKey));
  }
  List<byte[]> keysInRange = new ArrayList<byte[]>();
  List<HRegionLocation> regionsInRange = new ArrayList<HRegionLocation>();
  byte[] currentKey = startKey;
  do {
    HRegionLocation regionLocation = getRegionLocation(currentKey, reload);
    keysInRange.add(currentKey);
    regionsInRange.add(regionLocation);
    currentKey = regionLocation.getRegionInfo().getEndKey();
  } while (!Bytes.equals(currentKey, HConstants.EMPTY_END_ROW)
      && (endKeyIsEndOfTable || Bytes.compareTo(currentKey, endKey) < 0
          || (includeEndKey && Bytes.compareTo(currentKey, endKey) == 0)));
  return new Pair<List<byte[]>, List<HRegionLocation>>(keysInRange,
      regionsInRange);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:38,代码来源:HTable.java

示例5: addCallsForOtherReplicas

import org.apache.hadoop.hbase.util.Pair; //导入依赖的package包/类
private void addCallsForOtherReplicas(
    ResultBoundedCompletionService<Pair<Result[], ScannerCallable>> cs, RegionLocations rl,
    int min, int max) {
  if (scan.getConsistency() == Consistency.STRONG) {
    return; // not scheduling on other replicas for strong consistency
  }
  for (int id = min; id <= max; id++) {
    if (currentScannerCallable.id == id) {
      continue; //this was already scheduled earlier
    }
    ScannerCallable s = currentScannerCallable.getScannerCallableForReplica(id);
    setStartRowForReplicaCallable(s);
    outstandingCallables.add(s);
    RetryingRPC retryingOnReplica = new RetryingRPC(s);
    cs.submit(retryingOnReplica, scannerTimeout, id);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:18,代码来源:ScannerCallableWithReplicas.java

示例6: getRegionInfo

import org.apache.hadoop.hbase.util.Pair; //导入依赖的package包/类
/**
 * Get the HRegionInfo from cache, if not there, from the hbase:meta table
 * @param  regionName
 * @return HRegionInfo for the region
 */
@SuppressWarnings("deprecation")
protected HRegionInfo getRegionInfo(final byte [] regionName) {
  String encodedName = HRegionInfo.encodeRegionName(regionName);
  RegionState regionState = getRegionState(encodedName);
  if (regionState != null) {
    return regionState.getRegion();
  }

  try {
    Pair<HRegionInfo, ServerName> p =
      MetaTableAccessor.getRegion(server.getConnection(), regionName);
    HRegionInfo hri = p == null ? null : p.getFirst();
    if (hri != null) {
      createRegionState(hri);
    }
    return hri;
  } catch (IOException e) {
    server.abort("Aborting because error occoured while reading "
      + Bytes.toStringBinary(regionName) + " from hbase:meta", e);
    return null;
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:28,代码来源:RegionStates.java

示例7: cleanMergeQualifier

import org.apache.hadoop.hbase.util.Pair; //导入依赖的package包/类
/**
 * Checks if the specified region has merge qualifiers, if so, try to clean
 * them
 * @param region
 * @return true if the specified region doesn't have merge qualifier now
 * @throws IOException
 */
public boolean cleanMergeQualifier(final HRegionInfo region)
    throws IOException {
  // Get merge regions if it is a merged region and already has merge
  // qualifier
  Pair<HRegionInfo, HRegionInfo> mergeRegions = MetaTableAccessor
      .getRegionsFromMergeQualifier(this.services.getConnection(),
        region.getRegionName());
  if (mergeRegions == null
      || (mergeRegions.getFirst() == null && mergeRegions.getSecond() == null)) {
    // It doesn't have merge qualifier, no need to clean
    return true;
  }
  // It shouldn't happen, we must insert/delete these two qualifiers together
  if (mergeRegions.getFirst() == null || mergeRegions.getSecond() == null) {
    LOG.error("Merged region " + region.getRegionNameAsString()
        + " has only one merge qualifier in META.");
    return false;
  }
  return cleanMergeRegion(region, mergeRegions.getFirst(),
      mergeRegions.getSecond());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:29,代码来源:CatalogJanitor.java

示例8: regionsToAssignWithServerName

import org.apache.hadoop.hbase.util.Pair; //导入依赖的package包/类
/**
 * @param regionsInMeta
 * @return List of regions neither in transition nor assigned.
 * @throws IOException
 */
private Map<HRegionInfo, ServerName> regionsToAssignWithServerName(
    final List<Pair<HRegionInfo, ServerName>> regionsInMeta) throws IOException {
  Map<HRegionInfo, ServerName> regionsToAssign =
      new HashMap<HRegionInfo, ServerName>(regionsInMeta.size());
  RegionStates regionStates = this.assignmentManager.getRegionStates();
  for (Pair<HRegionInfo, ServerName> regionLocation : regionsInMeta) {
    HRegionInfo hri = regionLocation.getFirst();
    ServerName sn = regionLocation.getSecond();
    if (regionStates.isRegionOffline(hri)) {
      regionsToAssign.put(hri, sn);
    } else {
      if (LOG.isDebugEnabled()) {
        LOG.debug("Skipping assign for the region " + hri + " during enable table "
            + hri.getTable() + " because its already in tranition or assigned.");
      }
    }
  }
  return regionsToAssign;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:25,代码来源:EnableTableHandler.java

示例9: regionsToAssignWithServerName

import org.apache.hadoop.hbase.util.Pair; //导入依赖的package包/类
/**
 * @param regionsInMeta
 * @return List of regions neither in transition nor assigned.
 * @throws IOException
 */
private static Map<HRegionInfo, ServerName> regionsToAssignWithServerName(
    final MasterProcedureEnv env,
    final List<Pair<HRegionInfo, ServerName>> regionsInMeta) throws IOException {
  Map<HRegionInfo, ServerName> regionsToAssign =
      new HashMap<HRegionInfo, ServerName>(regionsInMeta.size());
  RegionStates regionStates = env.getMasterServices().getAssignmentManager().getRegionStates();
  for (Pair<HRegionInfo, ServerName> regionLocation : regionsInMeta) {
    HRegionInfo hri = regionLocation.getFirst();
    ServerName sn = regionLocation.getSecond();
    if (regionStates.isRegionOffline(hri)) {
      regionsToAssign.put(hri, sn);
    } else {
      if (LOG.isDebugEnabled()) {
        LOG.debug("Skipping assign for the region " + hri + " during enable table "
            + hri.getTable() + " because its already in tranition or assigned.");
      }
    }
  }
  return regionsToAssign;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:26,代码来源:EnableTableProcedure.java

示例10: makeMeta

import org.apache.hadoop.hbase.util.Pair; //导入依赖的package包/类
/**
 * Create up a map that is keyed by meta row name and whose value is the HRegionInfo and
 * ServerName to return for this row.
 * @return Map with faked hbase:meta content in it.
 */
static SortedMap<byte [], Pair<HRegionInfo, ServerName>> makeMeta(final byte [] tableName,
    final int regionCount, final long namespaceSpan, final int serverCount) {
  // I need a comparator for meta rows so we sort properly.
  SortedMap<byte [], Pair<HRegionInfo, ServerName>> meta =
    new ConcurrentSkipListMap<byte[], Pair<HRegionInfo,ServerName>>(new MetaRowsComparator());
  HRegionInfo [] hris = makeHRegionInfos(tableName, regionCount, namespaceSpan);
  ServerName [] serverNames = makeServerNames(serverCount);
  int per = regionCount / serverCount;
  int count = 0;
  for (HRegionInfo hri: hris) {
    Pair<HRegionInfo, ServerName> p =
      new Pair<HRegionInfo, ServerName>(hri, serverNames[count++ / per]);
    meta.put(hri.getRegionName(), p);
  }
  return meta;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:22,代码来源:TestClientNoCluster.java

示例11: runTest

import org.apache.hadoop.hbase.util.Pair; //导入依赖的package包/类
private void runTest(Table hTable, int cqStart, int expectedSize) throws IOException {
  // [0, 2, ?, ?, ?, ?, 0, 0, 0, 1]
  byte[] fuzzyKey = new byte[10];
  ByteBuffer buf = ByteBuffer.wrap(fuzzyKey);
  buf.clear();
  buf.putShort((short) 2);
  for (int i = 0; i < 4; i++)
    buf.put((byte)63);
  buf.putInt((short)1);

  byte[] mask = new byte[] {0 , 0, 1, 1, 1, 1, 0, 0, 0, 0};

  Pair<byte[], byte[]> pair = new Pair<byte[], byte[]>(fuzzyKey, mask);
  FuzzyRowFilter fuzzyRowFilter = new FuzzyRowFilter(Lists.newArrayList(pair));
  ColumnRangeFilter columnRangeFilter = new ColumnRangeFilter(Bytes.toBytes(cqStart), true
          , Bytes.toBytes(4), true);
  //regular test
  runScanner(hTable, expectedSize, fuzzyRowFilter, columnRangeFilter);
  //reverse filter order test
  runScanner(hTable, expectedSize, columnRangeFilter, fuzzyRowFilter);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:22,代码来源:TestFuzzyRowAndColumnRangeFilter.java

示例12: filterKeyValue

import org.apache.hadoop.hbase.util.Pair; //导入依赖的package包/类
@Override
public ReturnCode filterKeyValue(Cell c) {
  final int startIndex = lastFoundIndex >= 0 ? lastFoundIndex : 0;
  final int size = fuzzyKeysData.size();
  for (int i = startIndex; i < size + startIndex; i++) {
    final int index = i % size;
    Pair<byte[], byte[]> fuzzyData = fuzzyKeysData.get(index);
    SatisfiesCode satisfiesCode =
        satisfies(isReversed(), c.getRowArray(), c.getRowOffset(), c.getRowLength(),
          fuzzyData.getFirst(), fuzzyData.getSecond());
    if (satisfiesCode == SatisfiesCode.YES) {
      lastFoundIndex = index;
      return ReturnCode.INCLUDE;
    }
  }
  // NOT FOUND -> seek next using hint
  lastFoundIndex = -1;

  return ReturnCode.SEEK_NEXT_USING_HINT;

}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:22,代码来源:FuzzyRowFilter.java

示例13: execWithRetries

import org.apache.hadoop.hbase.util.Pair; //导入依赖的package包/类
private Pair<Integer, String> execWithRetries(String hostname, ServiceType service, String... cmd)
    throws IOException {
  RetryCounter retryCounter = retryCounterFactory.create();
  while (true) {
    try {
      return exec(hostname, service, cmd);
    } catch (IOException e) {
      retryOrThrow(retryCounter, e, hostname, cmd);
    }
    try {
      retryCounter.sleepUntilNextRetry();
    } catch (InterruptedException ex) {
      // ignore
      LOG.warn("Sleep Interrupted:" + ex);
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:18,代码来源:HBaseClusterManager.java

示例14: exec

import org.apache.hadoop.hbase.util.Pair; //导入依赖的package包/类
/**
 * Execute the given command on the host using SSH
 * @return pair of exit code and command output
 * @throws IOException if something goes wrong.
 */
private Pair<Integer, String> exec(String hostname, ServiceType service, String... cmd)
  throws IOException {
  LOG.info("Executing remote command: " + StringUtils.join(cmd, " ") + " , hostname:" + hostname);

  RemoteShell shell = new RemoteShell(hostname, getServiceUser(service), cmd);
  try {
    shell.execute();
  } catch (Shell.ExitCodeException ex) {
    // capture the stdout of the process as well.
    String output = shell.getOutput();
    // add output for the ExitCodeException.
    throw new Shell.ExitCodeException(ex.getExitCode(), "stderr: " + ex.getMessage()
      + ", stdout: " + output);
  }

  LOG.info("Executed remote command, exit code:" + shell.getExitCode()
      + " , output:" + shell.getOutput());

  return new Pair<Integer, String>(shell.getExitCode(), shell.getOutput());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:26,代码来源:HBaseClusterManager.java

示例15: listReplicationPeers

import org.apache.hadoop.hbase.util.Pair; //导入依赖的package包/类
@VisibleForTesting
List<ReplicationPeer> listReplicationPeers() {
  Map<String, ReplicationPeerConfig> peers = listPeerConfigs();
  if (peers == null || peers.size() <= 0) {
    return null;
  }
  List<ReplicationPeer> listOfPeers = new ArrayList<ReplicationPeer>(peers.size());
  for (Entry<String, ReplicationPeerConfig> peerEntry : peers.entrySet()) {
    String peerId = peerEntry.getKey();
    try {
      Pair<ReplicationPeerConfig, Configuration> pair = this.replicationPeers.getPeerConf(peerId);
      Configuration peerConf = pair.getSecond();
      ReplicationPeer peer = new ReplicationPeerZKImpl(peerConf, peerId, pair.getFirst(),
          parseTableCFsFromConfig(this.getPeerTableCFs(peerId)));
      listOfPeers.add(peer);
    } catch (ReplicationException e) {
      LOG.warn("Failed to get valid replication peers. "
          + "Error connecting to peer cluster with peerId=" + peerId + ". Error message="
          + e.getMessage());
      LOG.debug("Failure details to get valid replication peers.", e);
      continue;
    }
  }
  return listOfPeers;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:26,代码来源:ReplicationAdmin.java


注:本文中的org.apache.hadoop.hbase.util.Pair类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。