当前位置: 首页>>代码示例>>Java>>正文


Java HelixAdmin.setResourceIdealState方法代码示例

本文整理汇总了Java中org.apache.helix.HelixAdmin.setResourceIdealState方法的典型用法代码示例。如果您正苦于以下问题:Java HelixAdmin.setResourceIdealState方法的具体用法?Java HelixAdmin.setResourceIdealState怎么用?Java HelixAdmin.setResourceIdealState使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.helix.HelixAdmin的用法示例。


在下文中一共展示了HelixAdmin.setResourceIdealState方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: assignIdealStates

import org.apache.helix.HelixAdmin; //导入方法依赖的package包/类
private static void assignIdealStates(HelixManager helixManager,
    Map<String, IdealState> idealStatesFromAssignment) {
  HelixAdmin helixAdmin = helixManager.getClusterManagmentTool();
  String helixClusterName = helixManager.getClusterName();
  for (String topic : idealStatesFromAssignment.keySet()) {
    IdealState idealState = idealStatesFromAssignment.get(topic);
    helixAdmin.setResourceIdealState(helixClusterName, topic, idealState);
  }
}
 
开发者ID:uber,项目名称:uReplicator,代码行数:10,代码来源:AutoRebalanceLiveInstanceChangeListener.java

示例2: testEnableDisablePartitions

import org.apache.helix.HelixAdmin; //导入方法依赖的package包/类
@Test
public void testEnableDisablePartitions() throws InterruptedException {
  HelixAdmin admin = new ZKHelixAdmin(_gZkClient);
  admin.enablePartition(false, CLUSTER_NAME, (PARTICIPANT_PREFIX + "_" + _startPort),
      WorkflowGenerator.DEFAULT_TGT_DB, Arrays.asList(new String[] { "TestDB_0", "TestDB_2" }));

  IdealState idealState =
      admin.getResourceIdealState(CLUSTER_NAME, WorkflowGenerator.DEFAULT_TGT_DB);
  List<String> preferenceList =
      Arrays.asList(new String[] { "localhost_12919", "localhost_12918" });
  for (String partitionName : idealState.getPartitionSet()) {
    idealState.setPreferenceList(partitionName, preferenceList);
  }
  idealState.setRebalanceMode(IdealState.RebalanceMode.SEMI_AUTO);
  admin.setResourceIdealState(CLUSTER_NAME, WorkflowGenerator.DEFAULT_TGT_DB, idealState);

  String workflowName = TestHelper.getTestMethodName();
  Workflow.Builder builder = new Workflow.Builder(workflowName);
  JobConfig.Builder jobBuilder =
      new JobConfig.Builder().setWorkflow(workflowName).setCommand(MockTask.TASK_COMMAND)
          .setTargetResource(WorkflowGenerator.DEFAULT_TGT_DB)
          .setTargetPartitionStates(Collections.singleton("SLAVE"));
  builder.addJob("JOB", jobBuilder);
  _driver.start(builder.build());
  Thread.sleep(2000L);
  JobContext jobContext =
      _driver.getJobContext(TaskUtil.getNamespacedJobName(workflowName, "JOB"));
  Assert.assertEquals(jobContext.getPartitionState(0), null);
  Assert.assertEquals(jobContext.getPartitionState(1), TaskPartitionState.COMPLETED);
  Assert.assertEquals(jobContext.getPartitionState(2), null);
}
 
开发者ID:apache,项目名称:helix,代码行数:32,代码来源:TestZkHelixAdmin.java

示例3: startAdmin

import org.apache.helix.HelixAdmin; //导入方法依赖的package包/类
void startAdmin() throws Exception {
  HelixAdmin admin = new ZKHelixAdmin(ZK_ADDR);

  // create cluster
  System.out.println("Creating cluster: " + clusterName);
  admin.addCluster(clusterName, true);

  // add MasterSlave state mode definition
  admin.addStateModelDef(clusterName, "MasterSlave", new StateModelDefinition(
      generateConfigForMasterSlave()));

  // ideal-state znrecord
  ZNRecord record = new ZNRecord(resourceName);
  record.setSimpleField("IDEAL_STATE_MODE", "AUTO");
  record.setSimpleField("NUM_PARTITIONS", "1");
  record.setSimpleField("REPLICAS", "2");
  record.setSimpleField("STATE_MODEL_DEF_REF", "MasterSlave");
  record.setListField(resourceName, Arrays.asList("node1", "node2"));

  admin.setResourceIdealState(clusterName, resourceName, new IdealState(record));

  ConstraintItemBuilder builder = new ConstraintItemBuilder();

  // limit one transition message at a time across the entire cluster
  builder.addConstraintAttribute("MESSAGE_TYPE", "STATE_TRANSITION")
  // .addConstraintAttribute("INSTANCE", ".*") // un-comment this line if using instance-level
  // constraint
      .addConstraintAttribute("CONSTRAINT_VALUE", "1");
  admin.setConstraint(clusterName, ClusterConstraints.ConstraintType.MESSAGE_CONSTRAINT,
      "constraint1", builder.build());
}
 
开发者ID:apache,项目名称:helix,代码行数:32,代码来源:TestMessageThrottle2.java

示例4: createHelixClusterIfNeeded

import org.apache.helix.HelixAdmin; //导入方法依赖的package包/类
public static void createHelixClusterIfNeeded(String helixClusterName, String zkPath) {
  final HelixAdmin admin = new ZKHelixAdmin(zkPath);

  if (admin.getClusters().contains(helixClusterName)) {
    LOGGER.info("cluster already exist, skipping it.. ********************************************* ");
    return;
  }

  LOGGER.info("Creating a new cluster, as the helix cluster : " + helixClusterName
      + " was not found ********************************************* ");
  admin.addCluster(helixClusterName, false);

  LOGGER.info("Enable auto join.");
  final HelixConfigScope scope =
      new HelixConfigScopeBuilder(ConfigScopeProperty.CLUSTER).forCluster(helixClusterName).build();

  final Map<String, String> props = new HashMap<String, String>();
  props.put(ZKHelixManager.ALLOW_PARTICIPANT_AUTO_JOIN, String.valueOf(true));
  //we need only one segment to be loaded at a time
  props.put(MessageType.STATE_TRANSITION + "." + HelixTaskExecutor.MAX_THREADS, String.valueOf(1));

  admin.setConfig(scope, props);

  LOGGER.info("Adding state model definition named : "
      + PinotHelixSegmentOnlineOfflineStateModelGenerator.PINOT_SEGMENT_ONLINE_OFFLINE_STATE_MODEL
      + " generated using : " + PinotHelixSegmentOnlineOfflineStateModelGenerator.class.toString()
      + " ********************************************** ");

  admin.addStateModelDef(helixClusterName,
      PinotHelixSegmentOnlineOfflineStateModelGenerator.PINOT_SEGMENT_ONLINE_OFFLINE_STATE_MODEL,
      PinotHelixSegmentOnlineOfflineStateModelGenerator.generatePinotStateModelDefinition());

  LOGGER.info("Adding state model definition named : "
      + PinotHelixBrokerResourceOnlineOfflineStateModelGenerator.PINOT_BROKER_RESOURCE_ONLINE_OFFLINE_STATE_MODEL
      + " generated using : " + PinotHelixBrokerResourceOnlineOfflineStateModelGenerator.class.toString()
      + " ********************************************** ");

  admin.addStateModelDef(helixClusterName,
      PinotHelixBrokerResourceOnlineOfflineStateModelGenerator.PINOT_BROKER_RESOURCE_ONLINE_OFFLINE_STATE_MODEL,
      PinotHelixBrokerResourceOnlineOfflineStateModelGenerator.generatePinotStateModelDefinition());

  LOGGER.info("Adding empty ideal state for Broker!");
  HelixHelper.updateResourceConfigsFor(new HashMap<String, String>(), CommonConstants.Helix.BROKER_RESOURCE_INSTANCE,
      helixClusterName, admin);
  IdealState idealState =
      PinotTableIdealStateBuilder.buildEmptyIdealStateForBrokerResource(admin, helixClusterName);
  admin.setResourceIdealState(helixClusterName, CommonConstants.Helix.BROKER_RESOURCE_INSTANCE, idealState);
  LOGGER.info("New Cluster setup completed... ********************************************** ");
}
 
开发者ID:Hanmourang,项目名称:Pinot,代码行数:50,代码来源:HelixSetupUtils.java

示例5: scheduleSingleJob

import org.apache.helix.HelixAdmin; //导入方法依赖的package包/类
/**
 * Posts new job to cluster
 */
private void scheduleSingleJob(String jobResource, JobConfig jobConfig) {
  HelixAdmin admin = _manager.getClusterManagmentTool();

  IdealState jobIS = admin.getResourceIdealState(_manager.getClusterName(), jobResource);
  if (jobIS != null) {
    LOG.info("Job " + jobResource + " idealstate already exists!");
    return;
  }

  // Set up job resource based on partitions from target resource
  TaskUtil.createUserContent(_manager.getHelixPropertyStore(), jobResource,
      new ZNRecord(TaskUtil.USER_CONTENT_NODE));
  int numIndependentTasks = jobConfig.getTaskConfigMap().size();

  int numPartitions = numIndependentTasks;
  if (numPartitions == 0) {
    IdealState targetIs =
        admin.getResourceIdealState(_manager.getClusterName(), jobConfig.getTargetResource());
    if (targetIs == null) {
      LOG.warn("Target resource does not exist for job " + jobResource);
      // do not need to fail here, the job will be marked as failure immediately when job starts running.
    } else {
      numPartitions = targetIs.getPartitionSet().size();
    }
  }

  admin.addResource(_manager.getClusterName(), jobResource, numPartitions,
      TaskConstants.STATE_MODEL_NAME);

  HelixDataAccessor accessor = _manager.getHelixDataAccessor();

  // Set the job configuration
  PropertyKey.Builder keyBuilder = accessor.keyBuilder();
  HelixProperty resourceConfig = new HelixProperty(jobResource);
  resourceConfig.getRecord().getSimpleFields().putAll(jobConfig.getResourceConfigMap());
  Map<String, TaskConfig> taskConfigMap = jobConfig.getTaskConfigMap();
  if (taskConfigMap != null) {
    for (TaskConfig taskConfig : taskConfigMap.values()) {
      resourceConfig.getRecord().setMapField(taskConfig.getId(), taskConfig.getConfigMap());
    }
  }
  accessor.setProperty(keyBuilder.resourceConfig(jobResource), resourceConfig);

  // Push out new ideal state based on number of target partitions
  IdealStateBuilder builder = new CustomModeISBuilder(jobResource);
  builder.setRebalancerMode(IdealState.RebalanceMode.TASK);
  builder.setNumReplica(1);
  builder.setNumPartitions(numPartitions);
  builder.setStateModel(TaskConstants.STATE_MODEL_NAME);

  if (jobConfig.getInstanceGroupTag() != null) {
    builder.setNodeGroup(jobConfig.getInstanceGroupTag());
  }

  if (jobConfig.isDisableExternalView()) {
    builder.disableExternalView();
  }

  jobIS = builder.build();
  for (int i = 0; i < numPartitions; i++) {
    jobIS.getRecord().setListField(jobResource + "_" + i, new ArrayList<String>());
    jobIS.getRecord().setMapField(jobResource + "_" + i, new HashMap<String, String>());
  }
  jobIS.setRebalancerClassName(JobRebalancer.class.getName());
  admin.setResourceIdealState(_manager.getClusterName(), jobResource, jobIS);
}
 
开发者ID:apache,项目名称:helix,代码行数:70,代码来源:WorkflowRebalancer.java

示例6: testInvalidReplica2

import org.apache.helix.HelixAdmin; //导入方法依赖的package包/类
void testInvalidReplica2() throws Exception {
  HelixAdmin admin = new ZKHelixAdmin(ZK_ADDR);

  // create cluster
  String className = TestHelper.getTestClassName();
  String methodName = TestHelper.getTestMethodName();
  String clusterName = className + "_" + methodName;
  String db = "TestDB";

  System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));

  // System.out.println("Creating cluster: " + clusterName);
  admin.addCluster(clusterName, true);

  // add MasterSlave state mode definition
  admin.addStateModelDef(clusterName, "MasterSlave", new StateModelDefinition(
      StateModelConfigGenerator.generateConfigForMasterSlave()));

  // Add nodes to the cluster
  int n = 3;
  System.out.println("Adding " + n + " participants to the cluster");
  for (int i = 0; i < n; i++) {
    int port = 12918 + i;
    InstanceConfig instanceConfig = new InstanceConfig("localhost_" + port);
    instanceConfig.setHostName("localhost");
    instanceConfig.setPort("" + port);
    instanceConfig.setInstanceEnabled(true);
    admin.addInstance(clusterName, instanceConfig);
    // System.out.println("\t Added participant: " + instanceConfig.getInstanceName());
  }

  // construct ideal-state manually
  IdealState idealState = new IdealState(db);
  idealState.setRebalanceMode(RebalanceMode.SEMI_AUTO);
  idealState.setNumPartitions(2);
  idealState.setReplicas("" + 2); // should be 3
  idealState.setStateModelDefRef("MasterSlave");
  idealState.getRecord().setListField("TestDB_0",
      Arrays.asList("localhost_12918", "localhost_12919", "localhost_12920"));
  idealState.getRecord().setListField("TestDB_1",
      Arrays.asList("localhost_12919", "localhost_12918", "localhost_12920"));

  admin.setResourceIdealState(clusterName, "TestDB", idealState);

  // start participants
  MockParticipantManager[] participants = new MockParticipantManager[n];
  for (int i = 0; i < n; i++) {
    String instanceName = "localhost_" + (12918 + i);

    participants[i] = new MockParticipantManager(ZK_ADDR, clusterName, instanceName);
    participants[i].syncStart();
  }

  ClusterControllerManager controller =
      new ClusterControllerManager(ZK_ADDR, clusterName, "controller_0");
  controller.syncStart();

  boolean result =
      ClusterStateVerifier.verifyByZkCallback(new BestPossAndExtViewZkVerifier(ZK_ADDR,
          clusterName));
  Assert.assertTrue(result);

  // make sure localhost_12919 is master on TestDB_1
  HelixDataAccessor accessor = controller.getHelixDataAccessor();
  Builder keyBuilder = accessor.keyBuilder();
  ExternalView extView = accessor.getProperty(keyBuilder.externalView(db));
  Map<String, String> stateMap = extView.getStateMap(db + "_1");
  Assert
      .assertEquals(
          stateMap.get("localhost_12919"),
          "MASTER",
          "localhost_12919 should be MASTER even though replicas is set to 2, since we generate message based on target-state priority");

  System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis()));
}
 
开发者ID:apache,项目名称:helix,代码行数:76,代码来源:TestInvalidAutoIdealState.java

示例7: testResourceTaggedFirst

import org.apache.helix.HelixAdmin; //导入方法依赖的package包/类
/**
 * Ensure that no assignments happen when there are no tagged nodes, but the resource is tagged
 */
@Test
public void testResourceTaggedFirst() throws Exception {
  final int NUM_PARTICIPANTS = 10;
  final int NUM_PARTITIONS = 4;
  final int NUM_REPLICAS = 2;
  final String RESOURCE_NAME = "TestDB0";
  final String TAG = "ASSIGNABLE";

  String className = TestHelper.getTestClassName();
  String methodName = TestHelper.getTestMethodName();
  String clusterName = className + "_" + methodName;
  System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));

  // Set up cluster
  TestHelper.setupCluster(clusterName, ZK_ADDR, 12918, // participant port
      "localhost", // participant name prefix
      "TestDB", // resource name prefix
      1, // resources
      NUM_PARTITIONS, // partitions per resource
      NUM_PARTICIPANTS, // number of nodes
      NUM_REPLICAS, // replicas
      "MasterSlave", RebalanceMode.FULL_AUTO, // use FULL_AUTO mode to test node tagging
      true); // do rebalance

  // tag the resource
  HelixAdmin helixAdmin = new ZKHelixAdmin(ZK_ADDR);
  IdealState idealState = helixAdmin.getResourceIdealState(clusterName, RESOURCE_NAME);
  idealState.setInstanceGroupTag(TAG);
  helixAdmin.setResourceIdealState(clusterName, RESOURCE_NAME, idealState);

  // start controller
  ClusterControllerManager controller =
      new ClusterControllerManager(ZK_ADDR, clusterName, "controller");
  controller.syncStart();

  // start participants
  MockParticipantManager[] participants = new MockParticipantManager[NUM_PARTICIPANTS];
  for (int i = 0; i < NUM_PARTICIPANTS; i++) {
    final String instanceName = "localhost_" + (12918 + i);

    participants[i] = new MockParticipantManager(ZK_ADDR, clusterName, instanceName);
    participants[i].syncStart();
  }

  Thread.sleep(1000);
  boolean result =
      ClusterStateVerifier.verifyByZkCallback(new EmptyZkVerifier(clusterName, RESOURCE_NAME));
  Assert.assertTrue(result, "External view and current state must be empty");

  // cleanup
  for (int i = 0; i < NUM_PARTICIPANTS; i++) {
    participants[i].syncStop();
  }
  controller.syncStop();
}
 
开发者ID:apache,项目名称:helix,代码行数:59,代码来源:TestFullAutoNodeTagging.java

示例8: createHelixClusterIfNeeded

import org.apache.helix.HelixAdmin; //导入方法依赖的package包/类
public static void createHelixClusterIfNeeded(String helixClusterName, String zkPath, boolean isUpdateStateModel) {
  final HelixAdmin admin = new ZKHelixAdmin(zkPath);
  final String segmentStateModelName = PinotHelixSegmentOnlineOfflineStateModelGenerator.PINOT_SEGMENT_ONLINE_OFFLINE_STATE_MODEL;

  if (admin.getClusters().contains(helixClusterName)) {
    LOGGER.info("cluster already exists ********************************************* ");
    if (isUpdateStateModel) {
      final StateModelDefinition curStateModelDef = admin.getStateModelDef(helixClusterName, segmentStateModelName);
      List<String> states = curStateModelDef.getStatesPriorityList();
      if (states.contains(PinotHelixSegmentOnlineOfflineStateModelGenerator.CONSUMING_STATE)) {
        LOGGER.info("State model {} already updated to contain CONSUMING state", segmentStateModelName);
        return;
      } else {
        LOGGER.info("Updating {} to add states for low level kafka consumers", segmentStateModelName);
        StateModelDefinition newStateModelDef = PinotHelixSegmentOnlineOfflineStateModelGenerator.generatePinotStateModelDefinition();
        ZkClient zkClient = new ZkClient(zkPath);
        zkClient.waitUntilConnected(20, TimeUnit.SECONDS);
        zkClient.setZkSerializer(new ZNRecordSerializer());
        HelixDataAccessor accessor = new ZKHelixDataAccessor(helixClusterName, new ZkBaseDataAccessor<ZNRecord>(zkClient));
        PropertyKey.Builder keyBuilder = accessor.keyBuilder();
        accessor.setProperty(keyBuilder.stateModelDef(segmentStateModelName), newStateModelDef);
        LOGGER.info("Completed updating statemodel {}", segmentStateModelName);
        zkClient.close();
      }
    }
    return;
  }

  LOGGER.info("Creating a new cluster, as the helix cluster : " + helixClusterName
      + " was not found ********************************************* ");
  admin.addCluster(helixClusterName, false);

  LOGGER.info("Enable auto join.");
  final HelixConfigScope scope =
      new HelixConfigScopeBuilder(ConfigScopeProperty.CLUSTER).forCluster(helixClusterName).build();

  final Map<String, String> props = new HashMap<String, String>();
  props.put(ZKHelixManager.ALLOW_PARTICIPANT_AUTO_JOIN, String.valueOf(true));
  //we need only one segment to be loaded at a time
  props.put(MessageType.STATE_TRANSITION + "." + HelixTaskExecutor.MAX_THREADS, String.valueOf(1));

  admin.setConfig(scope, props);

  LOGGER.info("Adding state model {} (with CONSUMED state) generated using {} **********************************************",
      segmentStateModelName , PinotHelixSegmentOnlineOfflineStateModelGenerator.class.toString());

  // If this is a fresh cluster we are creating, then the cluster will see the CONSUMING state in the
  // state model. But then the servers will never be asked to go to that STATE (whether they have the code
  // to handle it or not) unil we complete the feature using low-level kafka consumers and turn the feature on.
  admin.addStateModelDef(helixClusterName, segmentStateModelName,
      PinotHelixSegmentOnlineOfflineStateModelGenerator.generatePinotStateModelDefinition());

  LOGGER.info("Adding state model definition named : "
      + PinotHelixBrokerResourceOnlineOfflineStateModelGenerator.PINOT_BROKER_RESOURCE_ONLINE_OFFLINE_STATE_MODEL
      + " generated using : " + PinotHelixBrokerResourceOnlineOfflineStateModelGenerator.class.toString()
      + " ********************************************** ");

  admin.addStateModelDef(helixClusterName,
      PinotHelixBrokerResourceOnlineOfflineStateModelGenerator.PINOT_BROKER_RESOURCE_ONLINE_OFFLINE_STATE_MODEL,
      PinotHelixBrokerResourceOnlineOfflineStateModelGenerator.generatePinotStateModelDefinition());

  LOGGER.info("Adding empty ideal state for Broker!");
  HelixHelper.updateResourceConfigsFor(new HashMap<String, String>(), CommonConstants.Helix.BROKER_RESOURCE_INSTANCE,
      helixClusterName, admin);
  IdealState idealState =
      PinotTableIdealStateBuilder.buildEmptyIdealStateForBrokerResource(admin, helixClusterName);
  admin.setResourceIdealState(helixClusterName, CommonConstants.Helix.BROKER_RESOURCE_INSTANCE, idealState);
  initPropertyStorePath(helixClusterName, zkPath);
  LOGGER.info("New Cluster setup completed... ********************************************** ");
}
 
开发者ID:linkedin,项目名称:pinot,代码行数:71,代码来源:HelixSetupUtils.java


注:本文中的org.apache.helix.HelixAdmin.setResourceIdealState方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。