当前位置: 首页>>代码示例>>Java>>正文


Java Pair.getFirst方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.util.Pair.getFirst方法的典型用法代码示例。如果您正苦于以下问题:Java Pair.getFirst方法的具体用法?Java Pair.getFirst怎么用?Java Pair.getFirst使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.util.Pair的用法示例。


在下文中一共展示了Pair.getFirst方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: listLabels

import org.apache.hadoop.hbase.util.Pair; //导入方法依赖的package包/类
@Override
public List<String> listLabels(String regex) throws IOException {
  assert (labelsRegion != null);
  Pair<Map<String, Integer>, Map<String, List<Integer>>> labelsAndUserAuths =
      extractLabelsAndAuths(getExistingLabelsWithAuths());
  Map<String, Integer> labels = labelsAndUserAuths.getFirst();
  labels.remove(SYSTEM_LABEL);
  if (regex != null) {
    Pattern pattern = Pattern.compile(regex);
    ArrayList<String> matchedLabels = new ArrayList<String>();
    for (String label : labels.keySet()) {
      if (pattern.matcher(label).matches()) {
        matchedLabels.add(label);
      }
    }
    return matchedLabels;
  }
  return new ArrayList<String>(labels.keySet());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:20,代码来源:DefaultVisibilityLabelServiceImpl.java

示例2: testRow

import org.apache.hadoop.hbase.util.Pair; //导入方法依赖的package包/类
@Override
void testRow(final int i) throws IOException {
  Pair<byte[], byte[]> startAndStopRow = getStartAndStopRow();
  Scan scan = new Scan(startAndStopRow.getFirst(), startAndStopRow.getSecond());
  scan.addColumn(FAMILY_NAME, QUALIFIER_NAME);
  ResultScanner s = this.table.getScanner(scan);
  int count = 0;
  for (Result rr = null; (rr = s.next()) != null;) {
    count++;
  }

  if (i % 100 == 0) {
    LOG.info(String.format("Scan for key range %s - %s returned %s rows",
        Bytes.toString(startAndStopRow.getFirst()),
        Bytes.toString(startAndStopRow.getSecond()), count));
  }

  s.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:20,代码来源:PerformanceEvaluation.java

示例3: isFailedServer

import org.apache.hadoop.hbase.util.Pair; //导入方法依赖的package包/类
/**
 * Check if the server should be considered as bad. Clean the old entries of the list.
 *
 * @return true if the server is in the failed servers list
 */
public synchronized boolean isFailedServer(final InetSocketAddress address) {
  if (failedServers.isEmpty()) {
    return false;
  }

  final String lookup = address.toString();
  final long now = EnvironmentEdgeManager.currentTime();

  // iterate, looking for the search entry and cleaning expired entries
  Iterator<Pair<Long, String>> it = failedServers.iterator();
  while (it.hasNext()) {
    Pair<Long, String> cur = it.next();
    if (cur.getFirst() < now) {
      it.remove();
    } else {
      if (lookup.equals(cur.getSecond())) {
        return true;
      }
    }
  }

  return false;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:29,代码来源:FailedServers.java

示例4: cleanMergeQualifier

import org.apache.hadoop.hbase.util.Pair; //导入方法依赖的package包/类
/**
 * Checks if the specified region has merge qualifiers, if so, try to clean
 * them
 * @param region
 * @return true if the specified region doesn't have merge qualifier now
 * @throws IOException
 */
public boolean cleanMergeQualifier(final HRegionInfo region)
    throws IOException {
  // Get merge regions if it is a merged region and already has merge
  // qualifier
  Pair<HRegionInfo, HRegionInfo> mergeRegions = MetaTableAccessor
      .getRegionsFromMergeQualifier(this.services.getConnection(),
        region.getRegionName());
  if (mergeRegions == null
      || (mergeRegions.getFirst() == null && mergeRegions.getSecond() == null)) {
    // It doesn't have merge qualifier, no need to clean
    return true;
  }
  // It shouldn't happen, we must insert/delete these two qualifiers together
  if (mergeRegions.getFirst() == null || mergeRegions.getSecond() == null) {
    LOG.error("Merged region " + region.getRegionNameAsString()
        + " has only one merge qualifier in META.");
    return false;
  }
  return cleanMergeRegion(region, mergeRegions.getFirst(),
      mergeRegions.getSecond());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:29,代码来源:CatalogJanitor.java

示例5: FuzzyRowFilter

import org.apache.hadoop.hbase.util.Pair; //导入方法依赖的package包/类
public FuzzyRowFilter(List<Pair<byte[], byte[]>> fuzzyKeysData) {
  Pair<byte[], byte[]> p;
  for (int i = 0; i < fuzzyKeysData.size(); i++) {
    p = fuzzyKeysData.get(i);
    if (p.getFirst().length != p.getSecond().length) {
      Pair<String, String> readable =
          new Pair<String, String>(Bytes.toStringBinary(p.getFirst()), Bytes.toStringBinary(p
              .getSecond()));
      throw new IllegalArgumentException("Fuzzy pair lengths do not match: " + readable);
    }
    // update mask ( 0 -> -1 (0xff), 1 -> 0)
    p.setSecond(preprocessMask(p.getSecond()));
    preprocessSearchKey(p);
  }
  this.fuzzyKeysData = fuzzyKeysData;
  this.tracker = new RowTracker();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:18,代码来源:FuzzyRowFilter.java

示例6: verifyStartEndKeys

import org.apache.hadoop.hbase.util.Pair; //导入方法依赖的package包/类
void verifyStartEndKeys(Pair<byte[][], byte[][]> keys) {
  byte[][] startKeys = keys.getFirst();
  byte[][] endKeys = keys.getSecond();
  assertEquals(startKeys.length, endKeys.length);
  assertTrue("Found 0 regions for the table", startKeys.length > 0);

  assertArrayEquals("Start key for the first region is not byte[0]",
      HConstants.EMPTY_START_ROW, startKeys[0]);
  byte[] prevEndKey = HConstants.EMPTY_START_ROW;

  // ensure that we do not have any gaps
  for (int i=0; i<startKeys.length; i++) {
    assertArrayEquals(
        "Hole in hbase:meta is detected. prevEndKey=" + Bytes.toStringBinary(prevEndKey)
            + " ,regionStartKey=" + Bytes.toStringBinary(startKeys[i]), prevEndKey,
        startKeys[i]);
    prevEndKey = endKeys[i];
  }
  assertArrayEquals("End key for the last region is not byte[0]", HConstants.EMPTY_END_ROW,
      endKeys[endKeys.length - 1]);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:22,代码来源:TestEndToEndSplitTransaction.java

示例7: loadAll

import org.apache.hadoop.hbase.util.Pair; //导入方法依赖的package包/类
/**
 * Loads all of the permission grants stored in a region of the {@code _acl_}
 * table.
 *
 * @param aclRegion
 * @return a map of the permissions for this table.
 * @throws IOException
 */
static Map<byte[], ListMultimap<String,TablePermission>> loadAll(Region aclRegion)
  throws IOException {

  if (!isAclRegion(aclRegion)) {
    throw new IOException("Can only load permissions from "+ACL_TABLE_NAME);
  }

  Map<byte[], ListMultimap<String, TablePermission>> allPerms =
      new TreeMap<byte[], ListMultimap<String, TablePermission>>(Bytes.BYTES_RAWCOMPARATOR);

  // do a full scan of _acl_ table

  Scan scan = new Scan();
  scan.addFamily(ACL_LIST_FAMILY);

  InternalScanner iScanner = null;
  try {
    iScanner = aclRegion.getScanner(scan);

    while (true) {
      List<Cell> row = new ArrayList<Cell>();

      boolean hasNext = iScanner.next(row);
      ListMultimap<String,TablePermission> perms = ArrayListMultimap.create();
      byte[] entry = null;
      for (Cell kv : row) {
        if (entry == null) {
          entry = CellUtil.cloneRow(kv);
        }
        Pair<String,TablePermission> permissionsOfUserOnTable =
            parsePermissionRecord(entry, kv);
        if (permissionsOfUserOnTable != null) {
          String username = permissionsOfUserOnTable.getFirst();
          TablePermission permissions = permissionsOfUserOnTable.getSecond();
          perms.put(username, permissions);
        }
      }
      if (entry != null) {
        allPerms.put(entry, perms);
      }
      if (!hasNext) {
        break;
      }
    }
  } finally {
    if (iScanner != null) {
      iScanner.close();
    }
  }

  return allPerms;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:61,代码来源:AccessControlLists.java

示例8: explainTableAvailability

import org.apache.hadoop.hbase.util.Pair; //导入方法依赖的package包/类
public String explainTableAvailability(TableName tableName) throws IOException {
  String msg = explainTableState(tableName) + ",";
  if (getHBaseCluster().getMaster().isAlive()) {
    Map<HRegionInfo, ServerName> assignments =
        getHBaseCluster().getMaster().getAssignmentManager().getRegionStates()
            .getRegionAssignments();
    final List<Pair<HRegionInfo, ServerName>> metaLocations =
        MetaTableAccessor
            .getTableRegionsAndLocations(getZooKeeperWatcher(), connection, tableName);
    for (Pair<HRegionInfo, ServerName> metaLocation : metaLocations) {
      HRegionInfo hri = metaLocation.getFirst();
      ServerName sn = metaLocation.getSecond();
      if (!assignments.containsKey(hri)) {
        msg += ", region " + hri
            + " not assigned, but found in meta, it expected to be on " + sn;

      } else if (sn == null) {
        msg += ",  region " + hri
            + " assigned,  but has no server in meta";
      } else if (!sn.equals(assignments.get(hri))) {
        msg += ",  region " + hri
            + " assigned,  but has different servers in meta and AM ( " +
            sn + " <> " + assignments.get(hri);
      }
    }
  }
  return msg;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:29,代码来源:HBaseTestingUtility.java

示例9: preprocessSearchKey

import org.apache.hadoop.hbase.util.Pair; //导入方法依赖的package包/类
private void preprocessSearchKey(Pair<byte[], byte[]> p) {
  if (!UNSAFE_UNALIGNED) {
    return;
  }
  byte[] key = p.getFirst();
  byte[] mask = p.getSecond();
  for (int i = 0; i < mask.length; i++) {
    // set non-fixed part of a search key to 0.
    if (mask[i] == 0) key[i] = 0;
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:12,代码来源:FuzzyRowFilter.java

示例10: getProcedureResult

import org.apache.hadoop.hbase.util.Pair; //导入方法依赖的package包/类
@Override
public GetProcedureResultResponse getProcedureResult(RpcController controller,
    GetProcedureResultRequest request) throws ServiceException {
  LOG.debug("Checking to see if procedure is done procId=" + request.getProcId());
  try {
    master.checkInitialized();
    GetProcedureResultResponse.Builder builder = GetProcedureResultResponse.newBuilder();

    Pair<ProcedureInfo, Procedure> v = master.getMasterProcedureExecutor()
        .getResultOrProcedure(request.getProcId());
    if (v.getFirst() != null) {
      ProcedureInfo result = v.getFirst();
      builder.setState(GetProcedureResultResponse.State.FINISHED);
      builder.setStartTime(result.getStartTime());
      builder.setLastUpdate(result.getLastUpdate());
      if (result.isFailed()) {
        builder.setException(result.getForeignExceptionMessage());
      }
      if (result.hasResultData()) {
        builder.setResult(ByteStringer.wrap(result.getResult()));
      }
      master.getMasterProcedureExecutor().removeResult(request.getProcId());
    } else {
      Procedure proc = v.getSecond();
      if (proc == null) {
        builder.setState(GetProcedureResultResponse.State.NOT_FOUND);
      } else {
        builder.setState(GetProcedureResultResponse.State.RUNNING);
        builder.setStartTime(proc.getStartTime());
        builder.setLastUpdate(proc.getLastUpdate());
      }
    }
    return builder.build();
  } catch (IOException e) {
    throw new ServiceException(e);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:38,代码来源:MasterRpcServices.java

示例11: offlineRegion

import org.apache.hadoop.hbase.util.Pair; //导入方法依赖的package包/类
/**
 * Offline specified region from master's in-memory state. It will not attempt to
 * reassign the region as in unassign.
 *
 * This is a special method that should be used by experts or hbck.
 *
 */
@Override
public OfflineRegionResponse offlineRegion(RpcController controller,
    OfflineRegionRequest request) throws ServiceException {
  final byte [] regionName = request.getRegion().getValue().toByteArray();
  RegionSpecifierType type = request.getRegion().getType();
  if (type != RegionSpecifierType.REGION_NAME) {
    LOG.warn("moveRegion specifier type: expected: " + RegionSpecifierType.REGION_NAME
      + " actual: " + type);
  }

  try {
    master.checkInitialized();
    Pair<HRegionInfo, ServerName> pair =
      MetaTableAccessor.getRegion(master.getConnection(), regionName);
    if (pair == null) throw new UnknownRegionException(Bytes.toStringBinary(regionName));
    HRegionInfo hri = pair.getFirst();
    if (master.cpHost != null) {
      master.cpHost.preRegionOffline(hri);
    }
    LOG.info(master.getClientIdAuditPrefix() + " offline " + hri.getRegionNameAsString());
    master.assignmentManager.regionOffline(hri);
    if (master.cpHost != null) {
      master.cpHost.postRegionOffline(hri);
    }
  } catch (IOException ioe) {
    throw new ServiceException(ioe);
  }
  return OfflineRegionResponse.newBuilder().build();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:37,代码来源:MasterRpcServices.java

示例12: unassignRegion

import org.apache.hadoop.hbase.util.Pair; //导入方法依赖的package包/类
@Override
public UnassignRegionResponse unassignRegion(RpcController controller,
    UnassignRegionRequest req) throws ServiceException {
  try {
    final byte [] regionName = req.getRegion().getValue().toByteArray();
    RegionSpecifierType type = req.getRegion().getType();
    final boolean force = req.getForce();
    UnassignRegionResponse urr = UnassignRegionResponse.newBuilder().build();

    master.checkInitialized();
    if (type != RegionSpecifierType.REGION_NAME) {
      LOG.warn("unassignRegion specifier type: expected: " + RegionSpecifierType.REGION_NAME
        + " actual: " + type);
    }
    Pair<HRegionInfo, ServerName> pair =
      MetaTableAccessor.getRegion(master.getConnection(), regionName);
    if (pair == null) throw new UnknownRegionException(Bytes.toString(regionName));
    HRegionInfo hri = pair.getFirst();
    if (master.cpHost != null) {
      if (master.cpHost.preUnassign(hri, force)) {
        return urr;
      }
    }
    LOG.debug(master.getClientIdAuditPrefix() + " unassign " + hri.getRegionNameAsString()
        + " in current location if it is online and reassign.force=" + force);
    master.assignmentManager.unassign(hri, force);
    if (master.assignmentManager.getRegionStates().isRegionOffline(hri)) {
      LOG.debug("Region " + hri.getRegionNameAsString()
          + " is not online on any region server, reassigning it.");
      master.assignRegion(hri);
    }
    if (master.cpHost != null) {
      master.cpHost.postUnassign(hri, force);
    }

    return urr;
  } catch (IOException ioe) {
    throw new ServiceException(ioe);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:41,代码来源:MasterRpcServices.java

示例13: handleHBCK

import org.apache.hadoop.hbase.util.Pair; //导入方法依赖的package包/类
/**
 * Handle a ZK unassigned node transition triggered by HBCK repair tool.
 * <p>
 * This is handled in a separate code path because it breaks the normal rules.
 * @param rt
 */
@SuppressWarnings("deprecation")
private void handleHBCK(RegionTransition rt) {
  String encodedName = HRegionInfo.encodeRegionName(rt.getRegionName());
  LOG.info("Handling HBCK triggered transition=" + rt.getEventType() +
    ", server=" + rt.getServerName() + ", region=" +
    HRegionInfo.prettyPrint(encodedName));
  RegionState regionState = regionStates.getRegionTransitionState(encodedName);
  switch (rt.getEventType()) {
    case M_ZK_REGION_OFFLINE:
      HRegionInfo regionInfo;
      if (regionState != null) {
        regionInfo = regionState.getRegion();
      } else {
        try {
          byte [] name = rt.getRegionName();
          Pair<HRegionInfo, ServerName> p = MetaTableAccessor.getRegion(
            this.server.getConnection(), name);
          regionInfo = p.getFirst();
        } catch (IOException e) {
          LOG.info("Exception reading hbase:meta doing HBCK repair operation", e);
          return;
        }
      }
      LOG.info("HBCK repair is triggering assignment of region=" +
          regionInfo.getRegionNameAsString());
      // trigger assign, node is already in OFFLINE so don't need to update ZK
      assign(regionInfo, false);
      break;

    default:
      LOG.warn("Received unexpected region state from HBCK: " + rt.toString());
      break;
  }

}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:42,代码来源:AssignmentManager.java

示例14: selectPartitions

import org.apache.hadoop.hbase.util.Pair; //导入方法依赖的package包/类
/**
 * Choose partitions between row ranges to hash to a single output file
 * Selects region boundaries that fall within the scan range, and groups them
 * into the desired number of partitions.
 */
void selectPartitions(Pair<byte[][], byte[][]> regionStartEndKeys) {
  List<byte[]> startKeys = new ArrayList<byte[]>();
  for (int i = 0; i < regionStartEndKeys.getFirst().length; i++) {
    byte[] regionStartKey = regionStartEndKeys.getFirst()[i];
    byte[] regionEndKey = regionStartEndKeys.getSecond()[i];
    
    // if scan begins after this region, or starts before this region, then drop this region
    // in other words:
    //   IF (scan begins before the end of this region
    //      AND scan ends before the start of this region)
    //   THEN include this region
    if ((isTableStartRow(startRow) || isTableEndRow(regionEndKey)
        || Bytes.compareTo(startRow, regionEndKey) < 0)
      && (isTableEndRow(stopRow) || isTableStartRow(regionStartKey)
        || Bytes.compareTo(stopRow, regionStartKey) > 0)) {
      startKeys.add(regionStartKey);
    }
  }
  
  int numRegions = startKeys.size();
  if (numHashFiles == 0) {
    numHashFiles = numRegions / 100;
  }
  if (numHashFiles == 0) {
    numHashFiles = 1;
  }
  if (numHashFiles > numRegions) {
    // can't partition within regions
    numHashFiles = numRegions;
  }
  
  // choose a subset of start keys to group regions into ranges
  partitions = new ArrayList<ImmutableBytesWritable>(numHashFiles - 1);
  // skip the first start key as it is not a partition between ranges.
  for (long i = 1; i < numHashFiles; i++) {
    int splitIndex = (int) (numRegions * i / numHashFiles);
    partitions.add(new ImmutableBytesWritable(startKeys.get(splitIndex)));
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:45,代码来源:HashTable.java

示例15: createEmptyRequest

import org.apache.hadoop.hbase.util.Pair; //导入方法依赖的package包/类
public StripeCompactionRequest createEmptyRequest(
    StripeInformationProvider si, CompactionRequest request) {
  // Treat as L0-ish compaction with fixed set of files, and hope for the best.
  if (si.getStripeCount() > 0) {
    return new BoundaryStripeCompactionRequest(request, si.getStripeBoundaries());
  }
  Pair<Long, Integer> targetKvsAndCount = estimateTargetKvs(
      request.getFiles(), this.config.getInitialCount());
  return new SplitStripeCompactionRequest(
      request, OPEN_KEY, OPEN_KEY, targetKvsAndCount.getSecond(), targetKvsAndCount.getFirst());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:12,代码来源:StripeCompactionPolicy.java


注:本文中的org.apache.hadoop.hbase.util.Pair.getFirst方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。