本文整理汇总了Java中org.apache.helix.model.IdealState.setPartitionState方法的典型用法代码示例。如果您正苦于以下问题:Java IdealState.setPartitionState方法的具体用法?Java IdealState.setPartitionState怎么用?Java IdealState.setPartitionState使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.helix.model.IdealState
的用法示例。
在下文中一共展示了IdealState.setPartitionState方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: handleBrokerResource
import org.apache.helix.model.IdealState; //导入方法依赖的package包/类
private void handleBrokerResource(AbstractTableConfig tableConfig) {
try {
String brokerTenant =
ControllerTenantNameBuilder.getBrokerTenantNameForTenant(tableConfig.getTenantConfig().getBroker());
if (_helixAdmin.getInstancesInClusterWithTag(_helixClusterName, brokerTenant).isEmpty()) {
throw new RuntimeException("broker tenant : " + tableConfig.getTenantConfig().getBroker() + " is not existed!");
}
LOGGER.info("Trying to update BrokerDataResource IdealState!");
final IdealState idealState =
_helixAdmin.getResourceIdealState(_helixClusterName, CommonConstants.Helix.BROKER_RESOURCE_INSTANCE);
String tableName = tableConfig.getTableName();
for (String instanceName : _helixAdmin.getInstancesInClusterWithTag(_helixClusterName, brokerTenant)) {
idealState.setPartitionState(tableName, instanceName, BrokerOnlineOfflineStateModel.ONLINE);
}
if (idealState != null) {
_helixAdmin
.setResourceIdealState(_helixClusterName, CommonConstants.Helix.BROKER_RESOURCE_INSTANCE, idealState);
}
} catch (final Exception e) {
LOGGER.warn("Caught exception while creating broker", e);
}
}
示例2: addSegmentToIdealState
import org.apache.helix.model.IdealState; //导入方法依赖的package包/类
/**
* Add the new specified segment to the idealState of the specified table in the specified cluster.
*
* @param helixManager The HelixManager object to access the helix cluster.
* @param tableName Name of the table to which the new segment is to be added.
* @param segmentName Name of the new segment to be added
* @param getInstancesForSegment Callable returning list of instances where the segment should be uploaded.
*/
public static void addSegmentToIdealState(HelixManager helixManager, String tableName, final String segmentName,
final Callable<List<String>> getInstancesForSegment) {
Function<IdealState, IdealState> updater = new Function<IdealState, IdealState>() {
@Override
public IdealState apply(IdealState idealState) {
List<String> targetInstances = null;
try {
targetInstances = getInstancesForSegment.call();
} catch (Exception e) {
LOGGER.error("Unable to get new instances for segment uploading.");
return null;
}
for (final String instance : targetInstances) {
idealState.setPartitionState(segmentName, instance, ONLINE);
}
idealState.setNumPartitions(idealState.getNumPartitions() + 1);
return idealState;
}
};
updateIdealState(helixManager, tableName, updater, DEFAULT_RETRY_POLICY);
}
示例3: doInitialAssignment
import org.apache.helix.model.IdealState; //导入方法依赖的package包/类
public IdealState doInitialAssignment(String clusterName, List<String> instanceNames,
int replicationFactor) {
IdealState idealState = new IdealState(MySQLConstants.MASTER_SLAVE_RESOURCE_NAME);
idealState.setStateModelDefRef("MasterSlave");
idealState.setRebalanceMode(RebalanceMode.CUSTOMIZED);
if (instanceNames.size() % replicationFactor != 0) {
LOG.error(String.format(
"Number of instances (%s) in the cluster must be a multiple of replication factor (%s)",
instanceNames.size(), replicationFactor));
return null;
}
int numSlices = instanceNames.size() / replicationFactor;
idealState.setNumPartitions(numSlices);
idealState.setReplicas(String.valueOf(replicationFactor));
Collections.sort(instanceNames);
for (int i = 0; i < numSlices; i++) {
for (int j = 0; j < replicationFactor; j++) {
idealState.setPartitionState(MySQLConstants.MASTER_SLAVE_RESOURCE_NAME + "_" + i,
instanceNames.get(i * replicationFactor + j), (j == 0) ? "MASTER" : "SLAVE");
}
}
LOG.info("Creating initial assignment \n" + idealState);
_helixAdmin.setResourceIdealState(clusterName, MySQLConstants.MASTER_SLAVE_RESOURCE_NAME,
idealState);
return idealState;
}
示例4: createIdealState
import org.apache.helix.model.IdealState; //导入方法依赖的package包/类
protected IdealState createIdealState(String resourceGroupName, String instanceGroupTag,
List<String> instanceNames, int numPartition, int replica, String rebalanceMode,
String stateModelDef) {
IdealState is = _gSetupTool
.createIdealStateForResourceGroup(resourceGroupName, instanceGroupTag, numPartition,
replica, rebalanceMode, stateModelDef);
// setup initial partition->instance mapping.
int nodeIdx = 0;
int numNode = instanceNames.size();
assert (numNode >= replica);
for (int i = 0; i < numPartition; i++) {
String partitionName = resourceGroupName + "_" + i;
for (int j = 0; j < replica; j++) {
is.setPartitionState(partitionName, instanceNames.get((nodeIdx + j) % numNode),
OnlineOfflineSMD.States.ONLINE.toString());
}
nodeIdx++;
}
return is;
}
示例5: dropSegmentFromIdealStateFor
import org.apache.helix.model.IdealState; //导入方法依赖的package包/类
/**
* Remove a segment is also required to recompute the ideal state.
*
* @param tableName
* @param segmentId
* @param helixAdmin
* @param helixClusterName
* @return
*/
public synchronized static IdealState dropSegmentFromIdealStateFor(String tableName, String segmentId,
HelixAdmin helixAdmin, String helixClusterName) {
final IdealState currentIdealState = helixAdmin.getResourceIdealState(helixClusterName, tableName);
final Set<String> currentInstanceSet = currentIdealState.getInstanceSet(segmentId);
if (!currentInstanceSet.isEmpty() && currentIdealState.getPartitionSet().contains(segmentId)) {
for (String instanceName : currentIdealState.getInstanceSet(segmentId)) {
currentIdealState.setPartitionState(segmentId, instanceName, "DROPPED");
}
} else {
throw new RuntimeException("Cannot found segmentId - " + segmentId + " in table - " + tableName);
}
return currentIdealState;
}
示例6: registerSegmentMetadata
import org.apache.helix.model.IdealState; //导入方法依赖的package包/类
private void registerSegmentMetadata(SegmentMetadata segmentMetadata) {
// put into propertyStore
OfflineSegmentZKMetadata offlineSegmentZKMetadata = new OfflineSegmentZKMetadata();
ZKMetadataUtils.updateSegmentMetadata(offlineSegmentZKMetadata, segmentMetadata);
ZKMetadataProvider.setOfflineSegmentZKMetadata(_pinotHelixResourceManager.getPropertyStore(),
offlineSegmentZKMetadata);
// put into idealStates
IdealState idealState = _helixAdmin.getResourceIdealState(HELIX_CLUSTER_NAME, _offlineTableName);
idealState.setPartitionState(segmentMetadata.getName(), "Server_localhost_0", "ONLINE");
idealState.setPartitionState(segmentMetadata.getName(), "Server_localhost_1", "ONLINE");
_helixAdmin.setResourceIdealState(HELIX_CLUSTER_NAME, _offlineTableName, idealState);
}
示例7: main
import org.apache.helix.model.IdealState; //导入方法依赖的package包/类
public static void main(String[] args) {
IdealState idealState = new IdealState("id");
idealState.setPartitionState("p0", "i001", "MASTER");
System.out.println(idealState);
ZNRecord record = new ZNRecord(idealState.getId());
record.setSimpleFields(idealState.getRecord().getSimpleFields());
record.setMapFields(idealState.getRecord().getMapFields());
IdealState newIdealState = new IdealState(record);
newIdealState.setPartitionState("p0", "i001", "SLAVE");
System.out.println(idealState);
System.out.println(newIdealState);
}
示例8: addClusterToGrandCluster
import org.apache.helix.model.IdealState; //导入方法依赖的package包/类
@Override
public void addClusterToGrandCluster(String clusterName, String grandCluster) {
if (!ZKUtil.isClusterSetup(grandCluster, _zkClient)) {
throw new HelixException("Grand cluster " + grandCluster + " is not setup yet");
}
if (!ZKUtil.isClusterSetup(clusterName, _zkClient)) {
throw new HelixException("Cluster " + clusterName + " is not setup yet");
}
IdealState idealState = new IdealState(clusterName);
idealState.setNumPartitions(1);
idealState.setStateModelDefRef("LeaderStandby");
List<String> controllers = getInstancesInCluster(grandCluster);
if (controllers.size() == 0) {
throw new HelixException("Grand cluster " + grandCluster + " has no instances");
}
idealState.setReplicas(Integer.toString(controllers.size()));
Collections.shuffle(controllers);
idealState.getRecord().setListField(clusterName, controllers);
idealState.setPartitionState(clusterName, controllers.get(0), "LEADER");
for (int i = 1; i < controllers.size(); i++) {
idealState.setPartitionState(clusterName, controllers.get(i), "STANDBY");
}
ZKHelixDataAccessor accessor =
new ZKHelixDataAccessor(grandCluster, new ZkBaseDataAccessor<ZNRecord>(_zkClient));
Builder keyBuilder = accessor.keyBuilder();
accessor.setProperty(keyBuilder.idealStates(idealState.getResourceName()), idealState);
}
示例9: addInstanceToBrokerIdealState
import org.apache.helix.model.IdealState; //导入方法依赖的package包/类
private void addInstanceToBrokerIdealState(String brokerTenantTag, String instanceName) {
IdealState tableIdealState =
_helixAdmin.getResourceIdealState(_helixClusterName, CommonConstants.Helix.BROKER_RESOURCE_INSTANCE);
for (String tableNameWithType : tableIdealState.getPartitionSet()) {
TableConfig tableConfig = ZKMetadataProvider.getTableConfig(_propertyStore, tableNameWithType);
Preconditions.checkNotNull(tableConfig);
String brokerTag =
ControllerTenantNameBuilder.getBrokerTenantNameForTenant(tableConfig.getTenantConfig().getBroker());
if (brokerTag.equals(brokerTenantTag)) {
tableIdealState.setPartitionState(tableNameWithType, instanceName, BrokerOnlineOfflineStateModel.ONLINE);
}
}
_helixAdmin.setResourceIdealState(_helixClusterName, CommonConstants.Helix.BROKER_RESOURCE_INSTANCE,
tableIdealState);
}
示例10: addLLCRealtimeSegmentsInIdealState
import org.apache.helix.model.IdealState; //导入方法依赖的package包/类
private IdealState addLLCRealtimeSegmentsInIdealState(final IdealState idealState, Map<String, List<String>> idealStateEntries) {
for (Map.Entry<String, List<String>> entry : idealStateEntries.entrySet()) {
final String segmentId = entry.getKey();
final Map<String, String> stateMap = idealState.getInstanceStateMap(segmentId);
if (stateMap != null) {
// Replace the segment if it already exists
stateMap.clear();
}
for (String instanceName : entry.getValue()) {
idealState.setPartitionState(segmentId, instanceName, PinotHelixSegmentOnlineOfflineStateModelGenerator.CONSUMING_STATE);
}
}
return idealState;
}
示例11: testUpdateHelixForSegmentClosing
import org.apache.helix.model.IdealState; //导入方法依赖的package包/类
@Test
public void testUpdateHelixForSegmentClosing() throws Exception {
final IdealState idealState = PinotTableIdealStateBuilder.buildEmptyKafkaConsumerRealtimeIdealStateFor(
"someTable_REALTIME", 17);
final String s1 = "S1";
final String s2 = "S2";
final String s3 = "S3";
String[] instanceArr = {s1, s2, s3};
final String oldSegmentNameStr = "oldSeg";
final String newSegmentNameStr = "newSeg";
idealState.setPartitionState(oldSegmentNameStr, s1,
PinotHelixSegmentOnlineOfflineStateModelGenerator.CONSUMING_STATE);
idealState.setPartitionState(oldSegmentNameStr, s2,
PinotHelixSegmentOnlineOfflineStateModelGenerator.CONSUMING_STATE);
idealState.setPartitionState(oldSegmentNameStr, s3,
PinotHelixSegmentOnlineOfflineStateModelGenerator.CONSUMING_STATE);
PinotLLCRealtimeSegmentManager.updateForNewRealtimeSegment(idealState, Arrays.asList(instanceArr),
oldSegmentNameStr, newSegmentNameStr);
// Now verify that the old segment state is online in the idealstate and the new segment state is CONSUMING
Map<String, String> oldsegStateMap = idealState.getInstanceStateMap(oldSegmentNameStr);
Assert.assertEquals(oldsegStateMap.get(s1), PinotHelixSegmentOnlineOfflineStateModelGenerator.ONLINE_STATE);
Assert.assertEquals(oldsegStateMap.get(s2), PinotHelixSegmentOnlineOfflineStateModelGenerator.ONLINE_STATE);
Assert.assertEquals(oldsegStateMap.get(s3), PinotHelixSegmentOnlineOfflineStateModelGenerator.ONLINE_STATE);
Map<String, String> newsegStateMap = idealState.getInstanceStateMap(oldSegmentNameStr);
Assert.assertEquals(oldsegStateMap.get(s1), PinotHelixSegmentOnlineOfflineStateModelGenerator.ONLINE_STATE);
Assert.assertEquals(oldsegStateMap.get(s2), PinotHelixSegmentOnlineOfflineStateModelGenerator.ONLINE_STATE);
Assert.assertEquals(oldsegStateMap.get(s3), PinotHelixSegmentOnlineOfflineStateModelGenerator.ONLINE_STATE);
}
示例12: addSegmentToIdealState
import org.apache.helix.model.IdealState; //导入方法依赖的package包/类
/**
* Add the new specified segment to the idealState of the specified table in the specified cluster.
*
* @param helixManager The HelixManager object to access the helix cluster.
* @param tableName Name of the table to which the new segment is to be added.
* @param segmentName Name of the new segment to be added
* @param getInstancesForSegment Callable returning list of instances where the segment should be uploaded.
*/
public static void addSegmentToIdealState(HelixManager helixManager, final String tableName, final String segmentName,
final Callable<List<String>> getInstancesForSegment) {
Function<IdealState, IdealState> updater = new Function<IdealState, IdealState>() {
@Override
public IdealState apply(IdealState idealState) {
List<String> targetInstances = null;
try {
targetInstances = getInstancesForSegment.call();
} catch (Exception e) {
LOGGER.error("Unable to get new instances for uploading segment {}, table {}", segmentName, tableName, e);
return null;
}
if (targetInstances == null || targetInstances.size() == 0) {
LOGGER.warn("No instances assigned for segment {}, table {}", segmentName, tableName);
} else {
for (final String instance : targetInstances) {
idealState.setPartitionState(segmentName, instance, ONLINE);
}
}
idealState.setNumPartitions(idealState.getNumPartitions() + 1);
return idealState;
}
};
updateIdealState(helixManager, tableName, updater, DEFAULT_RETRY_POLICY);
}
示例13: addNewRealtimeSegmentToIdealState
import org.apache.helix.model.IdealState; //导入方法依赖的package包/类
public static IdealState addNewRealtimeSegmentToIdealState(String segmentId, IdealState state, String instanceName) {
state.setPartitionState(segmentId, instanceName, ONLINE);
state.setNumPartitions(state.getNumPartitions() + 1);
return state;
}
示例14: handleMessageUsingScheduledTaskQueue
import org.apache.helix.model.IdealState; //导入方法依赖的package包/类
void handleMessageUsingScheduledTaskQueue(Criteria recipientCriteria, Message messageTemplate,
String controllerMsgId) {
HelixDataAccessor accessor = _manager.getHelixDataAccessor();
Builder keyBuilder = accessor.keyBuilder();
Map<String, String> sendSummary = new HashMap<String, String>();
sendSummary.put("MessageCount", "0");
Map<InstanceType, List<Message>> messages =
_manager.getMessagingService().generateMessage(recipientCriteria, messageTemplate);
// Calculate tasks, and put them into the idealState of the SCHEDULER_TASK_QUEUE resource.
// List field are the destination node, while the Message parameters are stored in the
// mapFields
// task throttling can be done on SCHEDULER_TASK_QUEUE resource
if (messages.size() > 0) {
String taskQueueName = _message.getRecord().getSimpleField(SCHEDULER_TASK_QUEUE);
if (taskQueueName == null) {
throw new HelixException("SchedulerTaskMessage need to have " + SCHEDULER_TASK_QUEUE
+ " specified.");
}
IdealState newAddedScheduledTasks = new IdealState(taskQueueName);
newAddedScheduledTasks.setBucketSize(TASKQUEUE_BUCKET_NUM);
newAddedScheduledTasks.setStateModelDefRef(SCHEDULER_TASK_QUEUE);
synchronized (_manager) {
int existingTopPartitionId = 0;
IdealState currentTaskQueue =
_manager.getHelixDataAccessor().getProperty(
accessor.keyBuilder().idealStates(newAddedScheduledTasks.getId()));
if (currentTaskQueue != null) {
existingTopPartitionId = findTopPartitionId(currentTaskQueue) + 1;
}
List<Message> taskMessages = (List<Message>) (messages.values().toArray()[0]);
for (Message task : taskMessages) {
String partitionId = taskQueueName + "_" + existingTopPartitionId;
existingTopPartitionId++;
String instanceName = task.getTgtName();
newAddedScheduledTasks.setPartitionState(partitionId, instanceName, "COMPLETED");
task.getRecord().setSimpleField(instanceName, "COMPLETED");
task.getRecord().setSimpleField(CONTROLLER_MSG_ID, controllerMsgId);
List<String> priorityList = new LinkedList<String>();
priorityList.add(instanceName);
newAddedScheduledTasks.getRecord().setListField(partitionId, priorityList);
newAddedScheduledTasks.getRecord().setMapField(partitionId,
task.getRecord().getSimpleFields());
_logger.info("Scheduling for controllerMsg " + controllerMsgId + " , sending task "
+ partitionId + " " + task.getMsgId() + " to " + instanceName);
if (_logger.isDebugEnabled()) {
_logger.debug(task.getRecord().getSimpleFields().toString());
}
}
_manager.getHelixDataAccessor().updateProperty(
accessor.keyBuilder().idealStates(newAddedScheduledTasks.getId()),
newAddedScheduledTasks);
sendSummary.put("MessageCount", "" + taskMessages.size());
}
}
// Record the number of messages sent into scheduler message status updates
ZNRecord statusUpdate =
accessor.getProperty(
keyBuilder.controllerTaskStatus(MessageType.SCHEDULER_MSG.name(),
_message.getMsgId())).getRecord();
statusUpdate.getMapFields().put("SentMessageCount", sendSummary);
accessor.updateProperty(keyBuilder.controllerTaskStatus(MessageType.SCHEDULER_MSG.name(),
_message.getMsgId()), new StatusUpdate(statusUpdate));
}
示例15: testDisableResourceInCustomMode
import org.apache.helix.model.IdealState; //导入方法依赖的package包/类
@Test
public void testDisableResourceInCustomMode() throws Exception {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
TestHelper.setupCluster(clusterName, ZK_ADDR, 12918, // participant port
"localhost", // participant name prefix
"TestDB", // resource name prefix
1, // resources
PARTITION_NUM, // partitions per resource
N, // number of nodes
2, // replicas
"MasterSlave", RebalanceMode.CUSTOMIZED, true); // do rebalance
// set up custom ideal-state
BaseDataAccessor<ZNRecord> baseAccessor = new ZkBaseDataAccessor<ZNRecord>(_gZkClient);
HelixDataAccessor accessor = new ZKHelixDataAccessor(clusterName, baseAccessor);
PropertyKey.Builder keyBuilder = accessor.keyBuilder();
IdealState idealState = accessor.getProperty(keyBuilder.idealStates("TestDB0"));
idealState.setPartitionState("TestDB0_0", "localhost_12918", "SLAVE");
idealState.setPartitionState("TestDB0_0", "localhost_12919", "SLAVE");
accessor.setProperty(keyBuilder.idealStates("TestDB0"), idealState);
ClusterControllerManager controller =
new ClusterControllerManager(ZK_ADDR, clusterName, "controller");
controller.syncStart();
// start participants
MockParticipantManager participants[] = new MockParticipantManager[N];
for (int i = 0; i < N; i++) {
String instanceName = "localhost_" + (12918 + i);
participants[i] = new MockParticipantManager(ZK_ADDR, clusterName, instanceName);
participants[i].syncStart();
}
boolean result =
ClusterStateVerifier
.verifyByZkCallback(new ClusterStateVerifier.BestPossAndExtViewZkVerifier(ZK_ADDR,
clusterName));
Assert.assertTrue(result);
// Disable TestDB0
enableResource(clusterName, false);
checkExternalView(clusterName);
// Re-enable TestDB0
enableResource(clusterName, true);
result =
ClusterStateVerifier.verifyByPolling(new ClusterStateVerifier.BestPossAndExtViewZkVerifier(
ZK_ADDR,
clusterName));
Assert.assertTrue(result);
// Clean up
controller.syncStop();
for (int i = 0; i < N; i++) {
participants[i].syncStop();
}
System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis()));
}