本文整理汇总了Java中org.apache.helix.ZNRecord.getListField方法的典型用法代码示例。如果您正苦于以下问题:Java ZNRecord.getListField方法的具体用法?Java ZNRecord.getListField怎么用?Java ZNRecord.getListField使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.helix.ZNRecord
的用法示例。
在下文中一共展示了ZNRecord.getListField方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: convertListFields
import org.apache.helix.ZNRecord; //导入方法依赖的package包/类
public static List<ZNRecordRow> convertListFields(ZNRecord record) {
List<ZNRecordRow> result = new ArrayList<ZNRecordRow>();
for (String key : record.getListFields().keySet()) {
int order = 0;
for (String value : record.getListField(key)) {
ZNRecordRow row = new ZNRecordRow();
row.putField(ZNRECORD_ID, record.getId());
row.putField(LIST_KEY, key);
row.putField(LIST_VALUE, record.getSimpleField(key));
row.putField(LIST_VALUE_INDEX, "" + order);
order++;
result.add(row);
}
}
return result;
}
示例2: getFinalDelayedMapping
import org.apache.helix.ZNRecord; //导入方法依赖的package包/类
private ZNRecord getFinalDelayedMapping(IdealState idealState, ZNRecord newIdealMapping,
ZNRecord newActiveMapping, Set<String> liveInstances, int numReplica, int minActiveReplica) {
if (minActiveReplica >= numReplica) {
return newIdealMapping;
}
ZNRecord finalMapping = new ZNRecord(idealState.getResourceName());
for (String partition : newIdealMapping.getListFields().keySet()) {
List<String> idealList = newIdealMapping.getListField(partition);
List<String> activeList = newActiveMapping.getListField(partition);
List<String> liveList = new ArrayList<>();
int activeReplica = 0;
for (String ins : activeList) {
if (liveInstances.contains(ins)) {
activeReplica++;
liveList.add(ins);
}
}
if (activeReplica >= minActiveReplica) {
finalMapping.setListField(partition, activeList);
} else {
List<String> candidates = new ArrayList<String>(idealList);
candidates.removeAll(activeList);
for (String liveIns : candidates) {
liveList.add(liveIns);
if (liveList.size() >= minActiveReplica) {
break;
}
}
finalMapping.setListField(partition, liveList);
}
}
return finalMapping;
}
示例3: getSingleValue
import org.apache.helix.ZNRecord; //导入方法依赖的package包/类
private static String getSingleValue(ZNRecord record, ZnodePropertyType type, String key) {
if (record == null || key == null) {
return null;
}
String value = null;
String keyParts[] = key.split("/");
switch (type) {
case SIMPLE:
value = record.getSimpleField(key);
break;
case LIST:
List<String> list = record.getListField(keyParts[0]);
if (list == null) {
logger.warn("invalid key for list field: " + key + ", map for key part-1 doesn't exist");
return null;
}
int idx = Integer.parseInt(keyParts[1]);
value = list.get(idx);
break;
case MAP:
Map<String, String> map = record.getMapField(keyParts[0]);
if (map == null) {
logger.warn("invalid key for map field: " + key + ", map for key part-1 doesn't exist");
return null;
}
value = map.get(keyParts[1]);
break;
default:
break;
}
return value;
}
示例4: setSingleValue
import org.apache.helix.ZNRecord; //导入方法依赖的package包/类
private static void setSingleValue(ZNRecord record, ZnodePropertyType type, String key,
String value) {
String keyParts[] = key.split("/");
switch (type) {
case SIMPLE:
record.setSimpleField(key, value);
break;
case LIST:
List<String> list = record.getListField(keyParts[0]);
if (list == null) {
logger.warn("invalid key for list field: " + key + ", value for key part-1 doesn't exist");
return;
}
int idx = Integer.parseInt(keyParts[1]);
list.remove(idx);
list.add(idx, value);
break;
case MAP:
Map<String, String> map = record.getMapField(keyParts[0]);
if (map == null) {
logger.warn("invalid key for map field: " + key + ", value for key part-1 doesn't exist");
return;
}
map.put(keyParts[1], value);
break;
default:
break;
}
}
示例5: removeSingleValue
import org.apache.helix.ZNRecord; //导入方法依赖的package包/类
private static void removeSingleValue(ZNRecord record, ZnodePropertyType type, String key) {
if (record == null) {
return;
}
String keyParts[] = key.split("/");
switch (type) {
case SIMPLE:
record.getSimpleFields().remove(key);
break;
case LIST:
List<String> list = record.getListField(keyParts[0]);
if (list == null) {
logger.warn("invalid key for list field: " + key + ", value for key part-1 doesn't exist");
return;
}
int idx = Integer.parseInt(keyParts[1]);
list.remove(idx);
break;
case MAP:
Map<String, String> map = record.getMapField(keyParts[0]);
if (map == null) {
logger.warn("invalid key for map field: " + key + ", value for key part-1 doesn't exist");
return;
}
map.remove(keyParts[1]);
break;
default:
break;
}
}
示例6: removeInstanceFromPreferences
import org.apache.helix.ZNRecord; //导入方法依赖的package包/类
/**
* Update an ideal state so that partitions will have an instance removed from their preference
* lists
* @param accessor
* @param instanceName
* @param resourceName
* @param partitionName
*/
private void removeInstanceFromPreferences(HelixDataAccessor accessor, final String instanceName,
final String resourceName, final String partitionName) {
PropertyKey.Builder keyBuilder = accessor.keyBuilder();
String idealStatePath = keyBuilder.idealStates(resourceName).getPath();
synchronized (_prefListHistory) {
// Updater for ideal state
final List<String> prefList = Lists.newLinkedList();
DataUpdater<ZNRecord> idealStateUpdater = new DataUpdater<ZNRecord>() {
@Override
public ZNRecord update(ZNRecord currentData) {
List<String> preferenceList = currentData.getListField(partitionName);
int numReplicas =
Integer.valueOf(currentData.getSimpleField(IdealStateProperty.REPLICAS.toString()));
List<String> newPrefList =
removeInstanceFromPreferenceList(preferenceList, instanceName, numReplicas);
currentData.setListField(partitionName, newPrefList);
prefList.clear();
prefList.addAll(newPrefList);
return currentData;
}
};
List<DataUpdater<ZNRecord>> updaters = Lists.newArrayList();
updaters.add(idealStateUpdater);
accessor.updateChildren(Arrays.asList(idealStatePath), updaters, AccessOption.PERSISTENT);
_prefListHistory.add(prefList);
}
}
示例7: addInstanceToPreferences
import org.apache.helix.ZNRecord; //导入方法依赖的package包/类
/**
* Update an ideal state so that partitions will have a new instance at the tails of their
* preference lists
* @param accessor
* @param instanceName
* @param resourceName
* @param partitions
*/
private void addInstanceToPreferences(HelixDataAccessor accessor, final String instanceName,
final String resourceName, final List<String> partitions) {
PropertyKey.Builder keyBuilder = accessor.keyBuilder();
String idealStatePath = keyBuilder.idealStates(resourceName).getPath();
synchronized (_prefListHistory) {
// Updater for ideal state
final List<String> prefList = Lists.newLinkedList();
DataUpdater<ZNRecord> idealStateUpdater = new DataUpdater<ZNRecord>() {
@Override
public ZNRecord update(ZNRecord currentData) {
for (String partitionName : partitions) {
List<String> preferenceList = currentData.getListField(partitionName);
int numReplicas =
Integer.valueOf(currentData.getSimpleField(IdealStateProperty.REPLICAS.toString()));
List<String> newPrefList =
addInstanceToPreferenceList(preferenceList, instanceName, numReplicas);
currentData.setListField(partitionName, newPrefList);
prefList.clear();
prefList.addAll(newPrefList);
}
return currentData;
}
};
// Send update requests together
List<DataUpdater<ZNRecord>> updaters = Lists.newArrayList();
updaters.add(idealStateUpdater);
accessor.updateChildren(Arrays.asList(idealStatePath), updaters, AccessOption.PERSISTENT);
_prefListHistory.add(prefList);
}
}
示例8: testUpdateConfigFields
import org.apache.helix.ZNRecord; //导入方法依赖的package包/类
@Test(dependsOnMethods = "testAddConfigFields")
public void testUpdateConfigFields() throws IOException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
String cluster = _clusters.iterator().next();
ClusterConfig config = getClusterConfigFromRest(cluster);
ZNRecord record = config.getRecord();
String key = record.getSimpleFields().keySet().iterator().next();
String value = record.getSimpleField(key);
record.getSimpleFields().clear();
record.setSimpleField(key, value + "--updated");
key = record.getListFields().keySet().iterator().next();
List<String> list = record.getListField(key);
list.remove(0);
list.add("newValue--updated");
record.getListFields().clear();
record.setListField(key, list);
key = record.getMapFields().keySet().iterator().next();
Map<String, String> map = record.getMapField(key);
Iterator it = map.entrySet().iterator();
it.next();
it.remove();
map.put("newKey--updated", "newValue--updated");
record.getMapFields().clear();
record.setMapField(key, map);
ClusterConfig prevConfig = getClusterConfigFromRest(cluster);
updateClusterConfigFromRest(cluster, config, Command.update);
ClusterConfig newConfig = getClusterConfigFromRest(cluster);
prevConfig.getRecord().update(config.getRecord());
Assert.assertEquals(newConfig, prevConfig,
"cluster config from response: " + newConfig + " vs cluster config actually: " + prevConfig);
}
示例9: testDeleteConfigFields
import org.apache.helix.ZNRecord; //导入方法依赖的package包/类
@Test (dependsOnMethods = "testUpdateConfigFields")
public void testDeleteConfigFields()
throws IOException {
System.out.println("Start test :" + TestHelper.getTestMethodName());
String cluster = _clusters.iterator().next();
ClusterConfig config = getClusterConfigFromRest(cluster);
ZNRecord record = config.getRecord();
String simpleKey = record.getSimpleFields().keySet().iterator().next();
String value = record.getSimpleField(simpleKey);
record.getSimpleFields().clear();
record.setSimpleField(simpleKey, value);
String listKey = record.getListFields().keySet().iterator().next();
List<String> list = record.getListField(listKey);
record.getListFields().clear();
record.setListField(listKey, list);
String mapKey = record.getMapFields().keySet().iterator().next();
Map<String, String> map = record.getMapField(mapKey);
record.getMapFields().clear();
record.setMapField(mapKey, map);
ClusterConfig prevConfig = getClusterConfigFromRest(cluster);
updateClusterConfigFromRest(cluster, config, Command.delete);
ClusterConfig newConfig = getClusterConfigFromRest(cluster);
Assert.assertFalse(newConfig.getRecord().getSimpleFields().containsKey(simpleKey),
"Failed to delete key " + simpleKey + " from cluster config");
Assert.assertFalse(newConfig.getRecord().getListFields().containsKey(listKey),
"Failed to delete key " + listKey + " from cluster config");
Assert.assertFalse(newConfig.getRecord().getSimpleFields().containsKey(mapKey),
"Failed to delete key " + mapKey + " from cluster config");
prevConfig.getRecord().subtract(config.getRecord());
Assert.assertEquals(newConfig, prevConfig,
"cluster config from response: " + newConfig + " vs cluster config actually: "
+ prevConfig);
}
示例10: updateFlushThresholdForSegmentMetadata
import org.apache.helix.ZNRecord; //导入方法依赖的package包/类
void updateFlushThresholdForSegmentMetadata(LLCRealtimeSegmentZKMetadata segmentZKMetadata,
ZNRecord partitionAssignment, int tableFlushSize) {
// If config does not have a flush threshold, use the default.
if (tableFlushSize < 1) {
tableFlushSize = KafkaHighLevelStreamProviderConfig.getDefaultMaxRealtimeRowsCount();
}
// Gather list of instances for this partition
Object2IntMap<String> partitionCountForInstance = new Object2IntLinkedOpenHashMap<>();
String segmentPartitionId = new LLCSegmentName(segmentZKMetadata.getSegmentName()).getPartitionRange();
for (String instanceName : partitionAssignment.getListField(segmentPartitionId)) {
partitionCountForInstance.put(instanceName, 0);
}
// Find the maximum number of partitions served for each instance that is serving this segment
int maxPartitionCountPerInstance = 1;
for (Map.Entry<String, List<String>> partitionAndInstanceList : partitionAssignment.getListFields().entrySet()) {
for (String instance : partitionAndInstanceList.getValue()) {
if (partitionCountForInstance.containsKey(instance)) {
int partitionCountForThisInstance = partitionCountForInstance.getInt(instance);
partitionCountForThisInstance++;
partitionCountForInstance.put(instance, partitionCountForThisInstance);
if (maxPartitionCountPerInstance < partitionCountForThisInstance) {
maxPartitionCountPerInstance = partitionCountForThisInstance;
}
}
}
}
// Configure the segment size flush limit based on the maximum number of partitions allocated to a replica
int segmentFlushSize = (int) (((float) tableFlushSize) / maxPartitionCountPerInstance);
segmentZKMetadata.setSizeThresholdToFlushSegment(segmentFlushSize);
}
示例11: completeCommittingSegmentsInternal
import org.apache.helix.ZNRecord; //导入方法依赖的package包/类
private void completeCommittingSegmentsInternal(String realtimeTableName,
Map<Integer, MinMaxPriorityQueue<LLCSegmentName>> partitionToLatestSegments) {
IdealState idealState = getTableIdealState(realtimeTableName);
Set<String> segmentNamesIS = idealState.getPartitionSet();
final ZNRecord partitionAssignment = getKafkaPartitionAssignment(realtimeTableName);
for (Map.Entry<Integer, MinMaxPriorityQueue<LLCSegmentName>> entry : partitionToLatestSegments.entrySet()) {
final LLCSegmentName segmentName = entry.getValue().pollFirst();
final String segmentId = segmentName.getSegmentName();
final int partitionId = entry.getKey();
if (!segmentNamesIS.contains(segmentId)) {
LOGGER.info("{}:Repairing segment for partition {}. Segment {} not found in idealstate", realtimeTableName,
partitionId, segmentId);
List<String> newInstances = partitionAssignment.getListField(Integer.toString(partitionId));
LOGGER.info("{}: Assigning segment {} to {}", realtimeTableName, segmentId, newInstances);
// TODO Re-write num-partitions in metadata if needed.
// If there was a prev segment in the same partition, then we need to fix it to be ONLINE.
LLCSegmentName prevSegmentName = entry.getValue().pollLast();
String prevSegmentNameStr = null;
if (prevSegmentName != null) {
prevSegmentNameStr = prevSegmentName.getSegmentName();
}
updateIdealState(realtimeTableName, newInstances, prevSegmentNameStr, segmentId);
}
}
}
示例12: getListValue
import org.apache.helix.ZNRecord; //导入方法依赖的package包/类
private static List<String> getListValue(ZNRecord record, String key) {
if (record == null) {
return null;
}
return record.getListField(key);
}
示例13: createConsumingSegment
import org.apache.helix.ZNRecord; //导入方法依赖的package包/类
/**
* Create a consuming segment for the kafka partitions that are missing one.
*
* @param realtimeTableName is the name of the realtime table (e.g. "table_REALTIME")
* @param nonConsumingPartitions is a set of integers (kafka partitions that do not have a consuming segment)
* @param llcSegments is a list of segment names in the ideal state as was observed last.
*/
public void createConsumingSegment(final String realtimeTableName, final Set<Integer> nonConsumingPartitions,
final List<String> llcSegments, final TableConfig tableConfig) {
final KafkaStreamMetadata kafkaStreamMetadata = new KafkaStreamMetadata(tableConfig.getIndexingConfig().getStreamConfigs());
final ZNRecord partitionAssignment = getKafkaPartitionAssignment(realtimeTableName);
final HashMap<Integer, LLCSegmentName> ncPartitionToLatestSegment = new HashMap<>(nonConsumingPartitions.size());
final int nReplicas = partitionAssignment.getListField("0").size(); // Number of replicas (should be same for all partitions)
// For each non-consuming partition, find the latest segment (i.e. segment with highest seq number) for that partition.
// (null if there is none).
for (String segmentId : llcSegments) {
LLCSegmentName segmentName = new LLCSegmentName(segmentId);
int partitionId = segmentName.getPartitionId();
if (nonConsumingPartitions.contains(partitionId)) {
LLCSegmentName hashedSegName = ncPartitionToLatestSegment.get(partitionId);
if (hashedSegName == null || hashedSegName.getSequenceNumber() < segmentName.getSequenceNumber()) {
ncPartitionToLatestSegment.put(partitionId, segmentName);
}
}
}
// For each non-consuming partition, create a segment with a sequence number one higher than the latest segment.
// If there are no segments, then this is the first segment, so create the new segment with sequence number
// STARTING_SEQUENCE_NUMBER.
// Pick the starting offset of the new segment depending on the end offset of the prev segment (if available
// and completed), or the table configuration (smallest/largest).
for (int partition : nonConsumingPartitions) {
try {
LLCSegmentName latestSegment = ncPartitionToLatestSegment.get(partition);
long startOffset;
int nextSeqNum;
List<String> instances = partitionAssignment.getListField(Integer.toString(partition));
if (latestSegment == null) {
// No segment yet in partition, Create a new one with a starting offset as per table config specification.
nextSeqNum = STARTING_SEQUENCE_NUMBER;
LOGGER.info("Creating CONSUMING segment for {} partition {} with seq {}", realtimeTableName, partition,
nextSeqNum);
String consumerStartOffsetSpec = kafkaStreamMetadata.getKafkaConsumerProperties()
.get(CommonConstants.Helix.DataSource.Realtime.Kafka.AUTO_OFFSET_RESET);
startOffset = getKafkaPartitionOffset(kafkaStreamMetadata, consumerStartOffsetSpec, partition);
LOGGER.info("Found kafka offset {} for table {} for partition {}", startOffset, realtimeTableName, partition);
} else {
nextSeqNum = latestSegment.getSequenceNumber() + 1;
LOGGER.info("Creating CONSUMING segment for {} partition {} with seq {}", realtimeTableName, partition,
nextSeqNum);
// To begin with, set startOffset to the oldest available offset in kafka. Fix it to be the one we want,
// depending on what the prev segment had.
startOffset = getKafkaPartitionOffset(kafkaStreamMetadata, "smallest", partition);
LOGGER.info("Found kafka offset {} for table {} for partition {}", startOffset, realtimeTableName, partition);
startOffset = getBetterStartOffsetIfNeeded(realtimeTableName, partition, latestSegment, startOffset,
nextSeqNum);
}
createSegment(realtimeTableName, nReplicas, partition, nextSeqNum, instances, startOffset, partitionAssignment);
} catch (Exception e) {
LOGGER.error("Exception creating CONSUMING segment for {} partition {}", realtimeTableName, partition, e);
}
}
}