当前位置: 首页>>代码示例>>Java>>正文


Java ZNRecord.getListFields方法代码示例

本文整理汇总了Java中org.apache.helix.ZNRecord.getListFields方法的典型用法代码示例。如果您正苦于以下问题:Java ZNRecord.getListFields方法的具体用法?Java ZNRecord.getListFields怎么用?Java ZNRecord.getListFields使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.helix.ZNRecord的用法示例。


在下文中一共展示了ZNRecord.getListFields方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: runRepeatedly

import org.apache.helix.ZNRecord; //导入方法依赖的package包/类
/**
 * Repeatedly randomly select a task to run and report the result
 * @param numIterations
 *          Number of random tasks to run in sequence
 */
public void runRepeatedly(int numIterations) {
  logger.info("~~~~ Initial State ~~~~~");
  RebalanceStrategy strategy =
      new AutoRebalanceStrategy(RESOURCE_NAME, _partitions, _states, _maxPerNode);
  ZNRecord initialResult =
      strategy.computePartitionAssignment(_allNodes, _liveNodes, _currentMapping, null);
  _currentMapping = getMapping(initialResult.getListFields());
  logger.info(_currentMapping.toString());
  getRunResult(_currentMapping, initialResult.getListFields());
  for (int i = 0; i < numIterations; i++) {
    logger.info("~~~~ Iteration " + i + " ~~~~~");
    ZNRecord znRecord = runOnceRandomly();
    if (znRecord != null) {
      final Map<String, List<String>> listResult = znRecord.getListFields();
      final Map<String, Map<String, String>> mapResult = getMapping(listResult);
      logger.info(mapResult.toString());
      logger.info(listResult.toString());
      getRunResult(mapResult, listResult);
      _currentMapping = mapResult;
    }
  }
}
 
开发者ID:apache,项目名称:helix,代码行数:28,代码来源:TestAutoRebalanceStrategy.java

示例2: validatePartitionAssignment

import org.apache.helix.ZNRecord; //导入方法依赖的package包/类
private ZNRecord validatePartitionAssignment(FakePinotLLCRealtimeSegmentManager segmentManager, int nKafkaPartitions,
    int nReplicas) {
  ZNRecord partitionAssignment;
  Map<String, List<String>> partitionToServerListMap;
  partitionAssignment = segmentManager._partitionAssignment;
  partitionToServerListMap = partitionAssignment.getListFields();
  Assert.assertEquals(partitionToServerListMap.size(), nKafkaPartitions);
  for (List<String> serverList : partitionToServerListMap.values()) {
    Assert.assertEquals(serverList.size(), nReplicas);
  }
  return partitionAssignment;
}
 
开发者ID:linkedin,项目名称:pinot,代码行数:13,代码来源:PinotLLCRealtimeSegmentManagerTest.java

示例3: serialize

import org.apache.helix.ZNRecord; //导入方法依赖的package包/类
@Override
public byte[] serialize(Object data) {
  if (!(data instanceof ZNRecord)) {
    // null is NOT an instance of any class
    logger.error("Input object must be of type ZNRecord but it is " + data
        + ". Will not write to zk");
    throw new HelixException("Input object is not of type ZNRecord (was " + data + ")");
  }

  ZNRecord record = (ZNRecord) data;

  // apply retention policy
  int max = getListFieldBound(record);
  if (max < Integer.MAX_VALUE) {
    Map<String, List<String>> listMap = record.getListFields();
    for (String key : listMap.keySet()) {
      List<String> list = listMap.get(key);
      if (list.size() > max) {
        listMap.put(key, list.subList(0, max));
      }
    }
  }

  // do serialization
  ObjectMapper mapper = new ObjectMapper();
  SerializationConfig serializationConfig = mapper.getSerializationConfig();
  serializationConfig.set(SerializationConfig.Feature.INDENT_OUTPUT, true);
  serializationConfig.set(SerializationConfig.Feature.AUTO_DETECT_FIELDS, true);
  serializationConfig.set(SerializationConfig.Feature.CAN_OVERRIDE_ACCESS_MODIFIERS, true);
  ByteArrayOutputStream baos = new ByteArrayOutputStream();
  byte[] serializedBytes = null;
  try {
    mapper.writeValue(baos, data);
    serializedBytes = baos.toByteArray();
    // apply compression if needed
    if (record.getBooleanField("enableCompression", false) || serializedBytes.length > ZNRecord.SIZE_LIMIT) {
      serializedBytes = GZipCompressionUtil.compress(serializedBytes);
    }
  } catch (Exception e) {
    logger.error("Exception during data serialization. Will not write to zk. Data (first 1k): "
        + new String(baos.toByteArray()).substring(0, 1024), e);
    throw new HelixException(e);
  }
  if (serializedBytes.length > ZNRecord.SIZE_LIMIT) {
    logger.error("Data size larger than 1M, ZNRecord.id: " + record.getId()
        + ". Will not write to zk. Data (first 1k): "
        + new String(serializedBytes).substring(0, 1024));
    throw new HelixException("Data size larger than 1M, ZNRecord.id: " + record.getId());
  }
  return serializedBytes;
}
 
开发者ID:apache,项目名称:helix,代码行数:52,代码来源:ZNRecordSerializer.java

示例4: removeListValue

import org.apache.helix.ZNRecord; //导入方法依赖的package包/类
private static void removeListValue(ZNRecord record, String key) {
  if (record == null || record.getListFields() == null) {
    record.getListFields().remove(key);
  }
}
 
开发者ID:apache,项目名称:helix,代码行数:6,代码来源:TestExecutor.java

示例5: validateLLCSegments

import org.apache.helix.ZNRecord; //导入方法依赖的package包/类
void validateLLCSegments(final String realtimeTableName, TableConfig tableConfig) {
  LOGGER.info("Validating LLC Segments for {}", realtimeTableName);
  Map<String, String> streamConfigs = tableConfig.getIndexingConfig().getStreamConfigs();
  ZNRecord partitionAssignment = _llcRealtimeSegmentManager.getKafkaPartitionAssignment(realtimeTableName);
  if (partitionAssignment == null) {
    LOGGER.warn("No partition assignment found for table {}", realtimeTableName);
    return;
  }
  Map<String, List<String>> partitionToHostsMap = partitionAssignment.getListFields();
  // Keep a set of kafka partitions, and remove the partition when we find a segment in CONSUMING state in
  // that partition.
  Set<Integer> nonConsumingKafkaPartitions = new HashSet<>(partitionToHostsMap.size());
  for (String partitionStr : partitionToHostsMap.keySet()) {
    nonConsumingKafkaPartitions.add(Integer.valueOf(partitionStr));
  }

  IdealState idealState =
      HelixHelper.getTableIdealState(_pinotHelixResourceManager.getHelixZkManager(), realtimeTableName);
  if (!idealState.isEnabled()) {
    // No validation to be done.
    LOGGER.info("Skipping validation for {} since it is disabled", realtimeTableName);
    return;
  }
  // Walk through all segments in the idealState, looking for one instance that is in CONSUMING state. If we find one
  // remove the kafka partition that the segment belongs to, from the kafka partition set.
  // Make sure that there are at least some LLC segments in place. If there are no LLC segments, it is possible
  // that this table is in the process of being disabled for LLC
  Set<String> segmentIds = idealState.getPartitionSet();
  List<String> llcSegments = new ArrayList<>(segmentIds.size());
  for (String segmentId : segmentIds) {
    if (SegmentName.isLowLevelConsumerSegmentName(segmentId)) {
      llcSegments.add(segmentId);
      Map<String, String> stateMap = idealState.getInstanceStateMap(segmentId);
      Iterator<String> iterator = stateMap.values().iterator();
      // If there is at least one instance in CONSUMING state, we are good.
      boolean foundConsuming = false;
      while (iterator.hasNext() && !foundConsuming) {
        String stateString = iterator.next();
        if (stateString.equals(PinotHelixSegmentOnlineOfflineStateModelGenerator.CONSUMING_STATE)) {
          LOGGER.info("Found CONSUMING segment {}", segmentId);
          foundConsuming = true;
        }
      }
      if (foundConsuming) {
        LLCSegmentName llcSegmentName = new LLCSegmentName(segmentId);
        nonConsumingKafkaPartitions.remove(llcSegmentName.getPartitionId());
      }
    }
  }

  // Kafka partition set now has all the partitions that do not have any segments in CONSUMING state.
  if (!llcSegments.isEmpty()) {
    // Raise the metric only if there is at least one llc segment in the idealstate.
    _validationMetrics.updateNonConsumingPartitionCountMetric(realtimeTableName, nonConsumingKafkaPartitions.size());
    // Recreate a segment for the partitions that are missing one.
    for (Integer kafkaPartition : nonConsumingKafkaPartitions) {
      LOGGER.warn("Table {}, kafka partition {} has no segments in CONSUMING state (out of {} llc segments)",
          realtimeTableName, kafkaPartition, llcSegments.size());
    }
    if (_autoCreateOnError) {
      _llcRealtimeSegmentManager.createConsumingSegment(realtimeTableName, nonConsumingKafkaPartitions, llcSegments,
          tableConfig);
      _llcRealtimeSegmentManager.completeCommittingSegments(realtimeTableName, llcSegments);
    }
  }
  // Make this call after other validations (so that we verify that we are consistent against the existing partition
  // assignment). This call may end up changing the kafka partition assignment for the table.
  _llcRealtimeSegmentManager.updateKafkaPartitionsIfNecessary(tableConfig);
}
 
开发者ID:linkedin,项目名称:pinot,代码行数:70,代码来源:ValidationManager.java

示例6: updateKafkaPartitionsIfNecessary

import org.apache.helix.ZNRecord; //导入方法依赖的package包/类
/**
 * Update the kafka partitions as necessary to accommodate changes in number of replicas, number of tenants or
 * number of kafka partitions. As new segments are assigned, they will obey the new kafka partition assignment.
 *
 * @param tableConfig tableConfig from propertystore
 */
public void updateKafkaPartitionsIfNecessary(TableConfig tableConfig) {
  final String realtimeTableName = tableConfig.getTableName();
  final ZNRecord partitionAssignment = getKafkaPartitionAssignment(realtimeTableName);
  final Map<String, List<String>> partitionToServersMap = partitionAssignment.getListFields();
  final KafkaStreamMetadata kafkaStreamMetadata = new KafkaStreamMetadata(tableConfig.getIndexingConfig().getStreamConfigs());

  final String realtimeServerTenantName =
      ControllerTenantNameBuilder.getRealtimeTenantNameForTenant(tableConfig.getTenantConfig().getServer());
  final List<String> currentInstances = getInstances(realtimeServerTenantName);

  // Previous partition count is what we find in the Kafka partition assignment znode.
  // Get the current partition count from Kafka.
  final int prevPartitionCount = partitionToServersMap.size();
  int currentPartitionCount = -1;
  try {
    currentPartitionCount = getKafkaPartitionCount(kafkaStreamMetadata);
  } catch (Exception e) {
    LOGGER.warn("Could not get partition count for {}. Leaving kafka partition count at {}", realtimeTableName, currentPartitionCount);
    return;
  }

  // Previous instance set is what we find in the Kafka partition assignment znode (values of the map entries)
  final Set<String> prevInstances = new HashSet<>(currentInstances.size());
  for (List<String> servers : partitionToServersMap.values()) {
    prevInstances.addAll(servers);
  }

  final int prevReplicaCount = partitionToServersMap.entrySet().iterator().next().getValue().size();
  final int currentReplicaCount = Integer.valueOf(tableConfig.getValidationConfig().getReplicasPerPartition());

  boolean updateKafkaAssignment = false;

  if (!prevInstances.equals(new HashSet<String>(currentInstances))) {
    LOGGER.info("Detected change in instances for table {}", realtimeTableName);
    updateKafkaAssignment = true;
  }

  if (prevPartitionCount != currentPartitionCount) {
    LOGGER.info("Detected change in Kafka partition count for table {} from {} to {}", realtimeTableName, prevPartitionCount, currentPartitionCount);
    updateKafkaAssignment = true;
  }

  if (prevReplicaCount != currentReplicaCount) {
    LOGGER.info("Detected change in per-partition replica count for table {} from {} to {}", realtimeTableName, prevReplicaCount, currentReplicaCount);
    updateKafkaAssignment = true;
  }

  if (!updateKafkaAssignment) {
    LOGGER.info("Not updating Kafka partition assignment for table {}", realtimeTableName);
    return;
  }

  // Generate new kafka partition assignment and update the znode
  if (currentInstances.size() < currentReplicaCount) {
    LOGGER.error("Cannot have {} replicas in {} instances for {}.Not updating partition assignment", currentReplicaCount, currentInstances.size(), realtimeTableName);
    long numOfInstancesNeeded = currentReplicaCount - currentInstances.size();
    _controllerMetrics.setValueOfTableGauge(realtimeTableName, ControllerGauge.SHORT_OF_LIVE_INSTANCES, numOfInstancesNeeded);
    return;
  } else {
    _controllerMetrics.setValueOfTableGauge(realtimeTableName, ControllerGauge.SHORT_OF_LIVE_INSTANCES, 0);
  }
  ZNRecord newPartitionAssignment = generatePartitionAssignment(tableConfig, kafkaStreamMetadata.getKafkaTopicName(), currentPartitionCount, currentInstances);
  writeKafkaPartitionAssignment(realtimeTableName, newPartitionAssignment);
  LOGGER.info("Successfully updated Kafka partition assignment for table {}", realtimeTableName);
}
 
开发者ID:linkedin,项目名称:pinot,代码行数:72,代码来源:PinotLLCRealtimeSegmentManager.java

示例7: PartitionToReplicaGroupMappingZKMetadata

import org.apache.helix.ZNRecord; //导入方法依赖的package包/类
public PartitionToReplicaGroupMappingZKMetadata(ZNRecord znRecord) {
  _partitionToReplicaGroupMapping = znRecord.getListFields();
  _tableName = znRecord.getId();
}
 
开发者ID:linkedin,项目名称:pinot,代码行数:5,代码来源:PartitionToReplicaGroupMappingZKMetadata.java


注:本文中的org.apache.helix.ZNRecord.getListFields方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。