当前位置: 首页>>代码示例>>Java>>正文


Java Pair.getSecond方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.util.Pair.getSecond方法的典型用法代码示例。如果您正苦于以下问题:Java Pair.getSecond方法的具体用法?Java Pair.getSecond怎么用?Java Pair.getSecond使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.util.Pair的用法示例。


在下文中一共展示了Pair.getSecond方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: selectWeightedRandomItem

import org.apache.hadoop.hbase.util.Pair; //导入方法依赖的package包/类
/** Selects a random item from the given items with weights*/
public static <T> T selectWeightedRandomItem(List<Pair<T, Integer>> items) {
  int totalWeight = 0;
  for (Pair<T, Integer> pair : items) {
    totalWeight += pair.getSecond();
  }

  int cutoff = RandomUtils.nextInt(totalWeight);
  int cummulative = 0;
  T item = null;

  //warn: O(n)
  for (int i=0; i<items.size(); i++) {
    int curWeight = items.get(i).getSecond();
    if ( cutoff < cummulative + curWeight) {
      item = items.get(i).getFirst();
      break;
    }
    cummulative += curWeight;
  }

  return item;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:24,代码来源:PolicyBasedChaosMonkey.java

示例2: verifyStartEndKeys

import org.apache.hadoop.hbase.util.Pair; //导入方法依赖的package包/类
void verifyStartEndKeys(Pair<byte[][], byte[][]> keys) {
  byte[][] startKeys = keys.getFirst();
  byte[][] endKeys = keys.getSecond();
  assertEquals(startKeys.length, endKeys.length);
  assertTrue("Found 0 regions for the table", startKeys.length > 0);

  assertArrayEquals("Start key for the first region is not byte[0]",
      HConstants.EMPTY_START_ROW, startKeys[0]);
  byte[] prevEndKey = HConstants.EMPTY_START_ROW;

  // ensure that we do not have any gaps
  for (int i=0; i<startKeys.length; i++) {
    assertArrayEquals(
        "Hole in hbase:meta is detected. prevEndKey=" + Bytes.toStringBinary(prevEndKey)
            + " ,regionStartKey=" + Bytes.toStringBinary(startKeys[i]), prevEndKey,
        startKeys[i]);
    prevEndKey = endKeys[i];
  }
  assertArrayEquals("End key for the last region is not byte[0]", HConstants.EMPTY_END_ROW,
      endKeys[endKeys.length - 1]);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:22,代码来源:TestEndToEndSplitTransaction.java

示例3: getCompactionStateForRegion

import org.apache.hadoop.hbase.util.Pair; //导入方法依赖的package包/类
/**
 * {@inheritDoc}
 */
@Override
public CompactionState getCompactionStateForRegion(final byte[] regionName)
throws IOException {
  try {
    Pair<HRegionInfo, ServerName> regionServerPair = getRegion(regionName);
    if (regionServerPair == null) {
      throw new IllegalArgumentException("Invalid region: " + Bytes.toStringBinary(regionName));
    }
    if (regionServerPair.getSecond() == null) {
      throw new NoServerForRegionException(Bytes.toStringBinary(regionName));
    }
    ServerName sn = regionServerPair.getSecond();
    AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
    GetRegionInfoRequest request = RequestConverter.buildGetRegionInfoRequest(
      regionServerPair.getFirst().getRegionName(), true);
    PayloadCarryingRpcController controller = rpcControllerFactory.newController();
    // TODO: this does not do retries, it should. Set priority and timeout in controller
    GetRegionInfoResponse response = admin.getRegionInfo(controller, request);
    return response.getCompactionState();
  } catch (ServiceException se) {
    throw ProtobufUtil.getRemoteException(se);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:27,代码来源:HBaseAdmin.java

示例4: modifyTableSync

import org.apache.hadoop.hbase.util.Pair; //导入方法依赖的package包/类
/**
 * Modify a table, synchronous. Waiting logic similar to that of {@code admin.rb#alter_status}.
 */
@SuppressWarnings("serial")
public static void modifyTableSync(Admin admin, HTableDescriptor desc)
    throws IOException, InterruptedException {
  admin.modifyTable(desc.getTableName(), desc);
  Pair<Integer, Integer> status = new Pair<Integer, Integer>() {{
    setFirst(0);
    setSecond(0);
  }};
  int i = 0;
  do {
    status = admin.getAlterStatus(desc.getTableName());
    if (status.getSecond() != 0) {
      LOG.debug(status.getSecond() - status.getFirst() + "/" + status.getSecond()
        + " regions updated.");
      Thread.sleep(1 * 1000l);
    } else {
      LOG.debug("All regions updated.");
      break;
    }
  } while (status.getFirst() != 0 && i++ < 500);
  if (status.getFirst() != 0) {
    throw new IOException("Failed to update all regions even after 500 seconds.");
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:28,代码来源:HBaseTestingUtility.java

示例5: splitRegion

import org.apache.hadoop.hbase.util.Pair; //导入方法依赖的package包/类
/**
 * {@inheritDoc}
 */
@Override
public void splitRegion(final byte[] regionName, final byte [] splitPoint)
throws IOException {
  Pair<HRegionInfo, ServerName> regionServerPair = getRegion(regionName);
  if (regionServerPair == null) {
    throw new IllegalArgumentException("Invalid region: " + Bytes.toStringBinary(regionName));
  }
  if (regionServerPair.getFirst() != null &&
      regionServerPair.getFirst().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) {
    throw new IllegalArgumentException("Can't split replicas directly. "
        + "Replicas are auto-split when their primary is split.");
  }
  if (regionServerPair.getSecond() == null) {
    throw new NoServerForRegionException(Bytes.toStringBinary(regionName));
  }
  split(regionServerPair.getSecond(), regionServerPair.getFirst(), splitPoint);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:21,代码来源:HBaseAdmin.java

示例6: regionsToAssignWithServerName

import org.apache.hadoop.hbase.util.Pair; //导入方法依赖的package包/类
/**
 * @param regionsInMeta
 * @return List of regions neither in transition nor assigned.
 * @throws IOException
 */
private Map<HRegionInfo, ServerName> regionsToAssignWithServerName(
    final List<Pair<HRegionInfo, ServerName>> regionsInMeta) throws IOException {
  Map<HRegionInfo, ServerName> regionsToAssign =
      new HashMap<HRegionInfo, ServerName>(regionsInMeta.size());
  RegionStates regionStates = this.assignmentManager.getRegionStates();
  for (Pair<HRegionInfo, ServerName> regionLocation : regionsInMeta) {
    HRegionInfo hri = regionLocation.getFirst();
    ServerName sn = regionLocation.getSecond();
    if (regionStates.isRegionOffline(hri)) {
      regionsToAssign.put(hri, sn);
    } else {
      if (LOG.isDebugEnabled()) {
        LOG.debug("Skipping assign for the region " + hri + " during enable table "
            + hri.getTable() + " because its already in tranition or assigned.");
      }
    }
  }
  return regionsToAssign;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:25,代码来源:EnableTableHandler.java

示例7: regionsToAssignWithServerName

import org.apache.hadoop.hbase.util.Pair; //导入方法依赖的package包/类
/**
 * @param regionsInMeta
 * @return List of regions neither in transition nor assigned.
 * @throws IOException
 */
private static Map<HRegionInfo, ServerName> regionsToAssignWithServerName(
    final MasterProcedureEnv env,
    final List<Pair<HRegionInfo, ServerName>> regionsInMeta) throws IOException {
  Map<HRegionInfo, ServerName> regionsToAssign =
      new HashMap<HRegionInfo, ServerName>(regionsInMeta.size());
  RegionStates regionStates = env.getMasterServices().getAssignmentManager().getRegionStates();
  for (Pair<HRegionInfo, ServerName> regionLocation : regionsInMeta) {
    HRegionInfo hri = regionLocation.getFirst();
    ServerName sn = regionLocation.getSecond();
    if (regionStates.isRegionOffline(hri)) {
      regionsToAssign.put(hri, sn);
    } else {
      if (LOG.isDebugEnabled()) {
        LOG.debug("Skipping assign for the region " + hri + " during enable table "
            + hri.getTable() + " because its already in tranition or assigned.");
      }
    }
  }
  return regionsToAssign;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:26,代码来源:EnableTableProcedure.java

示例8: listReplicationPeers

import org.apache.hadoop.hbase.util.Pair; //导入方法依赖的package包/类
@VisibleForTesting
List<ReplicationPeer> listReplicationPeers() {
  Map<String, ReplicationPeerConfig> peers = listPeerConfigs();
  if (peers == null || peers.size() <= 0) {
    return null;
  }
  List<ReplicationPeer> listOfPeers = new ArrayList<ReplicationPeer>(peers.size());
  for (Entry<String, ReplicationPeerConfig> peerEntry : peers.entrySet()) {
    String peerId = peerEntry.getKey();
    try {
      Pair<ReplicationPeerConfig, Configuration> pair = this.replicationPeers.getPeerConf(peerId);
      Configuration peerConf = pair.getSecond();
      ReplicationPeer peer = new ReplicationPeerZKImpl(peerConf, peerId, pair.getFirst(),
          parseTableCFsFromConfig(this.getPeerTableCFs(peerId)));
      listOfPeers.add(peer);
    } catch (ReplicationException e) {
      LOG.warn("Failed to get valid replication peers. "
          + "Error connecting to peer cluster with peerId=" + peerId + ". Error message="
          + e.getMessage());
      LOG.debug("Failure details to get valid replication peers.", e);
      continue;
    }
  }
  return listOfPeers;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:26,代码来源:ReplicationAdmin.java

示例9: getHFileFromBackReference

import org.apache.hadoop.hbase.util.Pair; //导入方法依赖的package包/类
/**
 * Get the full path of the HFile referenced by the back reference
 *
 * @param rootDir root hbase directory
 * @param linkRefPath Link Back Reference path
 * @return full path of the referenced hfile
 */
public static Path getHFileFromBackReference(final Path rootDir, final Path linkRefPath) {
  Pair<TableName, String> p = parseBackReferenceName(linkRefPath.getName());
  TableName linkTableName = p.getFirst();
  String linkRegionName = p.getSecond();

  String hfileName = getBackReferenceFileName(linkRefPath.getParent());
  Path familyPath = linkRefPath.getParent().getParent();
  Path regionPath = familyPath.getParent();
  Path tablePath = regionPath.getParent();

  String linkName = createHFileLinkName(FSUtils.getTableName(tablePath),
          regionPath.getName(), hfileName);
  Path linkTableDir = FSUtils.getTableDir(rootDir, linkTableName);
  Path regionDir = HRegion.getRegionDir(linkTableDir, linkRegionName);
  return new Path(new Path(regionDir, familyPath.getName()), linkName);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:24,代码来源:HFileLink.java

示例10: getSortedTagOrdinals

import org.apache.hadoop.hbase.util.Pair; //导入方法依赖的package包/类
private static void getSortedTagOrdinals(List<List<Integer>> fullTagsList, Tag tag)
    throws IOException {
  List<Integer> tagsOrdinalInSortedOrder = new ArrayList<Integer>();
  int offset = tag.getTagOffset();
  int endOffset = offset + tag.getTagLength();
  while (offset < endOffset) {
    Pair<Integer, Integer> result = StreamUtils.readRawVarint32(tag.getBuffer(), offset);
    tagsOrdinalInSortedOrder.add(result.getFirst());
    offset += result.getSecond();
  }
  Collections.sort(tagsOrdinalInSortedOrder);
  fullTagsList.add(tagsOrdinalInSortedOrder);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:14,代码来源:DefaultVisibilityLabelServiceImpl.java

示例11: buildUpdateFavoredNodesRequest

import org.apache.hadoop.hbase.util.Pair; //导入方法依赖的package包/类
/**
 * Create a protocol buffer UpdateFavoredNodesRequest to update a list of favorednode mappings
 * @param updateRegionInfos
 * @return a protocol buffer UpdateFavoredNodesRequest
 */
public static UpdateFavoredNodesRequest buildUpdateFavoredNodesRequest(
    final List<Pair<HRegionInfo, List<ServerName>>> updateRegionInfos) {
  UpdateFavoredNodesRequest.Builder ubuilder = UpdateFavoredNodesRequest.newBuilder();
  for (Pair<HRegionInfo, List<ServerName>> pair : updateRegionInfos) {
    RegionUpdateInfo.Builder builder = RegionUpdateInfo.newBuilder();
    builder.setRegion(HRegionInfo.convert(pair.getFirst()));
    for (ServerName server : pair.getSecond()) {
      builder.addFavoredNodes(ProtobufUtil.toServerName(server));
    }
    ubuilder.addUpdateInfo(builder.build());
  }
  return ubuilder.build();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:19,代码来源:RequestConverter.java

示例12: selectPartitions

import org.apache.hadoop.hbase.util.Pair; //导入方法依赖的package包/类
/**
 * Choose partitions between row ranges to hash to a single output file
 * Selects region boundaries that fall within the scan range, and groups them
 * into the desired number of partitions.
 */
void selectPartitions(Pair<byte[][], byte[][]> regionStartEndKeys) {
  List<byte[]> startKeys = new ArrayList<byte[]>();
  for (int i = 0; i < regionStartEndKeys.getFirst().length; i++) {
    byte[] regionStartKey = regionStartEndKeys.getFirst()[i];
    byte[] regionEndKey = regionStartEndKeys.getSecond()[i];
    
    // if scan begins after this region, or starts before this region, then drop this region
    // in other words:
    //   IF (scan begins before the end of this region
    //      AND scan ends before the start of this region)
    //   THEN include this region
    if ((isTableStartRow(startRow) || isTableEndRow(regionEndKey)
        || Bytes.compareTo(startRow, regionEndKey) < 0)
      && (isTableEndRow(stopRow) || isTableStartRow(regionStartKey)
        || Bytes.compareTo(stopRow, regionStartKey) > 0)) {
      startKeys.add(regionStartKey);
    }
  }
  
  int numRegions = startKeys.size();
  if (numHashFiles == 0) {
    numHashFiles = numRegions / 100;
  }
  if (numHashFiles == 0) {
    numHashFiles = 1;
  }
  if (numHashFiles > numRegions) {
    // can't partition within regions
    numHashFiles = numRegions;
  }
  
  // choose a subset of start keys to group regions into ranges
  partitions = new ArrayList<ImmutableBytesWritable>(numHashFiles - 1);
  // skip the first start key as it is not a partition between ranges.
  for (long i = 1; i < numHashFiles; i++) {
    int splitIndex = (int) (numRegions * i / numHashFiles);
    partitions.add(new ImmutableBytesWritable(startKeys.get(splitIndex)));
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:45,代码来源:HashTable.java

示例13: ExportSnapshotInputSplit

import org.apache.hadoop.hbase.util.Pair; //导入方法依赖的package包/类
public ExportSnapshotInputSplit(final List<Pair<SnapshotFileInfo, Long>> snapshotFiles) {
  this.files = new ArrayList(snapshotFiles.size());
  for (Pair<SnapshotFileInfo, Long> fileInfo: snapshotFiles) {
    this.files.add(new Pair<BytesWritable, Long>(
      new BytesWritable(fileInfo.getFirst().toByteArray()), fileInfo.getSecond()));
    this.length += fileInfo.getSecond();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:9,代码来源:ExportSnapshot.java

示例14: compactRegion

import org.apache.hadoop.hbase.util.Pair; //导入方法依赖的package包/类
/**
 * Compact an individual region.
 * Asynchronous operation.
 *
 * @param regionName region to compact
 * @param columnFamily column family within a table or region
 * @param major True if we are to do a major compaction.
 * @throws IOException if a remote or network exception occurs
 * @throws InterruptedException
 */
private void compactRegion(final byte[] regionName, final byte[] columnFamily,final boolean major)
throws IOException {
  Pair<HRegionInfo, ServerName> regionServerPair = getRegion(regionName);
  if (regionServerPair == null) {
    throw new IllegalArgumentException("Invalid region: " + Bytes.toStringBinary(regionName));
  }
  if (regionServerPair.getSecond() == null) {
    throw new NoServerForRegionException(Bytes.toStringBinary(regionName));
  }
  compact(regionServerPair.getSecond(), regionServerPair.getFirst(), major, columnFamily);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:22,代码来源:HBaseAdmin.java

示例15: createEmptyRequest

import org.apache.hadoop.hbase.util.Pair; //导入方法依赖的package包/类
public StripeCompactionRequest createEmptyRequest(
    StripeInformationProvider si, CompactionRequest request) {
  // Treat as L0-ish compaction with fixed set of files, and hope for the best.
  if (si.getStripeCount() > 0) {
    return new BoundaryStripeCompactionRequest(request, si.getStripeBoundaries());
  }
  Pair<Long, Integer> targetKvsAndCount = estimateTargetKvs(
      request.getFiles(), this.config.getInitialCount());
  return new SplitStripeCompactionRequest(
      request, OPEN_KEY, OPEN_KEY, targetKvsAndCount.getSecond(), targetKvsAndCount.getFirst());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:12,代码来源:StripeCompactionPolicy.java


注:本文中的org.apache.hadoop.hbase.util.Pair.getSecond方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。