本文整理汇总了Java中org.apache.helix.HelixAdmin.addInstance方法的典型用法代码示例。如果您正苦于以下问题:Java HelixAdmin.addInstance方法的具体用法?Java HelixAdmin.addInstance怎么用?Java HelixAdmin.addInstance使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.helix.HelixAdmin
的用法示例。
在下文中一共展示了HelixAdmin.addInstance方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: addInstance
import org.apache.helix.HelixAdmin; //导入方法依赖的package包/类
@PUT
@Path("{instanceName}")
public Response addInstance(@PathParam("clusterId") String clusterId,
@PathParam("instanceName") String instanceName, String content) {
HelixAdmin admin = getHelixAdmin();
ZNRecord record;
try {
record = toZNRecord(content);
} catch (IOException e) {
_logger.error("Failed to deserialize user's input " + content + ", Exception: " + e);
return badRequest("Input is not a vaild ZNRecord!");
}
try {
admin.addInstance(clusterId, new InstanceConfig(record));
} catch (Exception ex) {
_logger.error("Error in adding an instance: " + instanceName, ex);
return serverError(ex);
}
return OK();
}
示例2: testEnableDisablePartitions
import org.apache.helix.HelixAdmin; //导入方法依赖的package包/类
@Test
public void testEnableDisablePartitions() {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
String instanceName = "TestInstance";
String testResourcePrefix = "TestResource";
System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
HelixAdmin admin = new ZKHelixAdmin(_gZkClient);
admin.addCluster(clusterName, true);
admin.addInstance(clusterName, new InstanceConfig(instanceName));
// Test disable instances with resources
admin.enablePartition(false, clusterName, instanceName, testResourcePrefix + "0",
Arrays.asList(new String[]{"1", "2"}));
admin.enablePartition(false, clusterName, instanceName, testResourcePrefix + "1",
Arrays.asList(new String[]{"2", "3", "4"}));
InstanceConfig instanceConfig = admin.getInstanceConfig(clusterName, instanceName);
Assert.assertEquals(instanceConfig.getDisabledPartitions(testResourcePrefix + "0").size(), 2);
Assert.assertEquals(instanceConfig.getDisabledPartitions(testResourcePrefix + "1").size(), 3);
// Test disable partition across resources
// TODO : Remove this part once setInstanceEnabledForPartition(partition, enabled) is removed
instanceConfig.setInstanceEnabledForPartition("10", false);
Assert.assertEquals(instanceConfig.getDisabledPartitions(testResourcePrefix + "0").size(), 3);
Assert.assertEquals(instanceConfig.getDisabledPartitions(testResourcePrefix + "1").size(), 4);
}
示例3: testRebuildBrokerResourceWhenBrokerAdded
import org.apache.helix.HelixAdmin; //导入方法依赖的package包/类
@Test
public void testRebuildBrokerResourceWhenBrokerAdded() throws Exception {
// Check that the first table we added doesn't need to be rebuilt(case where ideal state brokers and brokers in broker resource are the same.
String partitionName = _offlineTableConfig.getTableName();
HelixAdmin helixAdmin = _helixManager.getClusterManagmentTool();
IdealState idealState = HelixHelper.getBrokerIdealStates(helixAdmin, HELIX_CLUSTER_NAME);
// Ensure that the broker resource is not rebuilt.
Assert.assertTrue(idealState.getInstanceSet(partitionName)
.equals(_pinotHelixResourceManager.getAllInstancesForBrokerTenant(
ControllerTenantNameBuilder.DEFAULT_TENANT_NAME)));
_pinotHelixResourceManager.rebuildBrokerResourceFromHelixTags(partitionName);
// Add another table that needs to be rebuilt
TableConfig offlineTableConfigTwo =
new TableConfig.Builder(CommonConstants.Helix.TableType.OFFLINE).setTableName(TEST_TABLE_TWO).build();
_pinotHelixResourceManager.addTable(offlineTableConfigTwo);
String partitionNameTwo = offlineTableConfigTwo.getTableName();
// Add a new broker manually such that the ideal state is not updated and ensure that rebuild broker resource is called
final String brokerId = "Broker_localhost_2";
InstanceConfig instanceConfig = new InstanceConfig(brokerId);
instanceConfig.setInstanceEnabled(true);
instanceConfig.setHostName("Broker_localhost");
instanceConfig.setPort("2");
helixAdmin.addInstance(HELIX_CLUSTER_NAME, instanceConfig);
helixAdmin.addInstanceTag(HELIX_CLUSTER_NAME, instanceConfig.getInstanceName(),
ControllerTenantNameBuilder.getBrokerTenantNameForTenant(ControllerTenantNameBuilder.DEFAULT_TENANT_NAME));
idealState = HelixHelper.getBrokerIdealStates(helixAdmin, HELIX_CLUSTER_NAME);
// Assert that the two don't equal before the call to rebuild the broker resource.
Assert.assertTrue(!idealState.getInstanceSet(partitionNameTwo)
.equals(_pinotHelixResourceManager.getAllInstancesForBrokerTenant(
ControllerTenantNameBuilder.DEFAULT_TENANT_NAME)));
_pinotHelixResourceManager.rebuildBrokerResourceFromHelixTags(partitionNameTwo);
idealState = HelixHelper.getBrokerIdealStates(helixAdmin, HELIX_CLUSTER_NAME);
// Assert that the two do equal after being rebuilt.
Assert.assertTrue(idealState.getInstanceSet(partitionNameTwo)
.equals(_pinotHelixResourceManager.getAllInstancesForBrokerTenant(
ControllerTenantNameBuilder.DEFAULT_TENANT_NAME)));
}
示例4: addNewDataNodes
import org.apache.helix.HelixAdmin; //导入方法依赖的package包/类
/**
* Add nodes in the static cluster map that is not already present in Helix.
* Ignores those that are already present. This is to make upgrades smooth.
*
* Replica/Partition information is not updated by this method. That is updated when
* replicas and partitions are added.
*
* At this time, node removals are not dealt with.
*/
private void addNewDataNodes() {
for (Datacenter dc : staticClusterMap.hardwareLayout.getDatacenters()) {
HelixAdmin dcAdmin = adminForDc.get(dc.getName());
for (DataNode node : dc.getDataNodes()) {
String instanceName = getInstanceName(node);
if (!dcAdmin.getInstancesInCluster(clusterName).contains(instanceName)) {
InstanceConfig instanceConfig = new InstanceConfig(instanceName);
instanceConfig.setHostName(node.getHostname());
instanceConfig.setPort(Integer.toString(node.getPort()));
// populate mountPath -> Disk information.
Map<String, Map<String, String>> diskInfos = new HashMap<>();
for (Disk disk : node.getDisks()) {
Map<String, String> diskInfo = new HashMap<>();
diskInfo.put(ClusterMapUtils.DISK_CAPACITY_STR, Long.toString(disk.getRawCapacityInBytes()));
diskInfo.put(ClusterMapUtils.DISK_STATE, ClusterMapUtils.AVAILABLE_STR);
// Note: An instance config has to contain the information for each disk about the replicas it hosts.
// This information will be initialized to the empty string - but will be updated whenever the partition
// is added to the cluster.
diskInfo.put(ClusterMapUtils.REPLICAS_STR, "");
diskInfos.put(disk.getMountPath(), diskInfo);
}
// Add all instance configuration.
instanceConfig.getRecord().setMapFields(diskInfos);
if (node.hasSSLPort()) {
instanceConfig.getRecord().setSimpleField(ClusterMapUtils.SSLPORT_STR, Integer.toString(node.getSSLPort()));
}
instanceConfig.getRecord().setSimpleField(ClusterMapUtils.DATACENTER_STR, node.getDatacenterName());
instanceConfig.getRecord().setSimpleField(ClusterMapUtils.RACKID_STR, Long.toString(node.getRackId()));
instanceConfig.getRecord().setListField(ClusterMapUtils.SEALED_STR, new ArrayList<String>());
// Finally, add this node to the DC.
dcAdmin.addInstance(clusterName, instanceConfig);
}
}
System.out.println("Added all new nodes in datacenter " + dc.getName());
}
}
示例5: setupCluster
import org.apache.helix.HelixAdmin; //导入方法依赖的package包/类
public static List<InstanceConfig> setupCluster(String zkAddr, String clusterName, int numNodes) {
ZkClient zkclient = null;
List<InstanceConfig> instanceConfigs = new ArrayList<InstanceConfig>();
List<String> instanceNames = new ArrayList<String>();
try {
HelixAdmin admin = new ZKHelixAdmin(zkAddr);
ClusterAdmin clusterAdmin = new ClusterAdmin(admin);
// add cluster, always recreate
admin.dropCluster(clusterName);
clusterAdmin.createCluster(clusterName);
// addNodes
for (int i = 0; i < numNodes; i++) {
String port = String.valueOf(MYSQL_PORTS[i]);
String host = MYSQL_HOSTS[i];
String serverId = host + "_" + port;
instanceNames.add(serverId);
InstanceConfig config = new InstanceConfig(serverId);
config.setHostName(host);
config.setPort(port);
config.setInstanceEnabled(true);
config.getRecord().setSimpleField(MySQLConstants.MYSQL_PORT, port);
config.getRecord().setSimpleField(MySQLConstants.MYSQL_SUPER_USER, "monty");
config.getRecord().setSimpleField(MySQLConstants.MYSQL_SUPER_PASSWORD, "some_pass");
admin.addInstance(clusterName, config);
instanceConfigs.add(config);
}
// add resource "MasterSlaveAssignment" which maintains the mapping master and slave mappings
clusterAdmin.doInitialAssignment(clusterName, instanceNames, 2);
// Create database
clusterAdmin.createDatabase(clusterName, "MyDB", 6, "");
// Add a table to the database
clusterAdmin.createTable(clusterName, "MyDB", "MyTable", " ( col1 INT, col2 INT ) ");
} finally {
if (zkclient != null) {
zkclient.close();
}
}
return instanceConfigs;
}
示例6: testInvalidReplica2
import org.apache.helix.HelixAdmin; //导入方法依赖的package包/类
void testInvalidReplica2() throws Exception {
HelixAdmin admin = new ZKHelixAdmin(ZK_ADDR);
// create cluster
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
String db = "TestDB";
System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
// System.out.println("Creating cluster: " + clusterName);
admin.addCluster(clusterName, true);
// add MasterSlave state mode definition
admin.addStateModelDef(clusterName, "MasterSlave", new StateModelDefinition(
StateModelConfigGenerator.generateConfigForMasterSlave()));
// Add nodes to the cluster
int n = 3;
System.out.println("Adding " + n + " participants to the cluster");
for (int i = 0; i < n; i++) {
int port = 12918 + i;
InstanceConfig instanceConfig = new InstanceConfig("localhost_" + port);
instanceConfig.setHostName("localhost");
instanceConfig.setPort("" + port);
instanceConfig.setInstanceEnabled(true);
admin.addInstance(clusterName, instanceConfig);
// System.out.println("\t Added participant: " + instanceConfig.getInstanceName());
}
// construct ideal-state manually
IdealState idealState = new IdealState(db);
idealState.setRebalanceMode(RebalanceMode.SEMI_AUTO);
idealState.setNumPartitions(2);
idealState.setReplicas("" + 2); // should be 3
idealState.setStateModelDefRef("MasterSlave");
idealState.getRecord().setListField("TestDB_0",
Arrays.asList("localhost_12918", "localhost_12919", "localhost_12920"));
idealState.getRecord().setListField("TestDB_1",
Arrays.asList("localhost_12919", "localhost_12918", "localhost_12920"));
admin.setResourceIdealState(clusterName, "TestDB", idealState);
// start participants
MockParticipantManager[] participants = new MockParticipantManager[n];
for (int i = 0; i < n; i++) {
String instanceName = "localhost_" + (12918 + i);
participants[i] = new MockParticipantManager(ZK_ADDR, clusterName, instanceName);
participants[i].syncStart();
}
ClusterControllerManager controller =
new ClusterControllerManager(ZK_ADDR, clusterName, "controller_0");
controller.syncStart();
boolean result =
ClusterStateVerifier.verifyByZkCallback(new BestPossAndExtViewZkVerifier(ZK_ADDR,
clusterName));
Assert.assertTrue(result);
// make sure localhost_12919 is master on TestDB_1
HelixDataAccessor accessor = controller.getHelixDataAccessor();
Builder keyBuilder = accessor.keyBuilder();
ExternalView extView = accessor.getProperty(keyBuilder.externalView(db));
Map<String, String> stateMap = extView.getStateMap(db + "_1");
Assert
.assertEquals(
stateMap.get("localhost_12919"),
"MASTER",
"localhost_12919 should be MASTER even though replicas is set to 2, since we generate message based on target-state priority");
System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis()));
}
示例7: testGetResourcesWithTag
import org.apache.helix.HelixAdmin; //导入方法依赖的package包/类
@Test
public void testGetResourcesWithTag() {
String TEST_TAG = "TestTAG";
final String clusterName = getShortClassName();
String rootPath = "/" + clusterName;
if (_gZkClient.exists(rootPath)) {
_gZkClient.deleteRecursive(rootPath);
}
HelixAdmin tool = new ZKHelixAdmin(_gZkClient);
tool.addCluster(clusterName, true);
Assert.assertTrue(ZKUtil.isClusterSetup(clusterName, _gZkClient));
tool.addStateModelDef(clusterName, "OnlineOffline",
new StateModelDefinition(StateModelConfigGenerator.generateConfigForOnlineOffline()));
for (int i = 0; i < 4; i++) {
String instanceName = "host" + i + "_9999";
InstanceConfig config = new InstanceConfig(instanceName);
config.setHostName("host" + i);
config.setPort("9999");
// set tag to two instances
if (i < 2) {
config.addTag(TEST_TAG);
}
tool.addInstance(clusterName, config);
tool.enableInstance(clusterName, instanceName, true);
String path = PropertyPathBuilder.instance(clusterName, instanceName);
AssertJUnit.assertTrue(_gZkClient.exists(path));
}
for (int i = 0; i < 4; i++) {
String resourceName = "database_" + i;
IdealState is = new IdealState(resourceName);
is.setStateModelDefRef("OnlineOffline");
is.setNumPartitions(2);
is.setRebalanceMode(IdealState.RebalanceMode.FULL_AUTO);
is.setReplicas("1");
is.enable(true);
if (i < 2) {
is.setInstanceGroupTag(TEST_TAG);
}
tool.addResource(clusterName, resourceName, is);
}
List<String> allResources = tool.getResourcesInCluster(clusterName);
List<String> resourcesWithTag = tool.getResourcesInClusterWithTag(clusterName, TEST_TAG);
AssertJUnit.assertEquals(allResources.size(), 4);
AssertJUnit.assertEquals(resourcesWithTag.size(), 2);
}
示例8: testGetInstances
import org.apache.helix.HelixAdmin; //导入方法依赖的package包/类
@Test
public void testGetInstances() throws IOException {
final String clusterName = "TestTagAwareness_testGetResources";
final String[] TAGS = {
"tag1", "tag2"
};
final String URL_BASE =
"http://localhost:" + ADMIN_PORT + "/clusters/" + clusterName + "/instances";
_gSetupTool.addCluster(clusterName, true);
HelixAdmin admin = _gSetupTool.getClusterManagementTool();
// Add 4 participants, each with differint tag characteristics
InstanceConfig instance1 = new InstanceConfig("localhost_1");
instance1.addTag(TAGS[0]);
admin.addInstance(clusterName, instance1);
InstanceConfig instance2 = new InstanceConfig("localhost_2");
instance2.addTag(TAGS[1]);
admin.addInstance(clusterName, instance2);
InstanceConfig instance3 = new InstanceConfig("localhost_3");
instance3.addTag(TAGS[0]);
instance3.addTag(TAGS[1]);
admin.addInstance(clusterName, instance3);
InstanceConfig instance4 = new InstanceConfig("localhost_4");
admin.addInstance(clusterName, instance4);
// Now make a REST call for all resources
Reference resourceRef = new Reference(URL_BASE);
Request request = new Request(Method.GET, resourceRef);
Response response = _gClient.handle(request);
ListInstancesWrapper responseWrapper =
ClusterRepresentationUtil.JsonToObject(ListInstancesWrapper.class,
response.getEntityAsText());
Map<String, List<String>> tagInfo = responseWrapper.tagInfo;
// Ensure tag ownership is reported correctly
Assert.assertTrue(tagInfo.containsKey(TAGS[0]));
Assert.assertTrue(tagInfo.containsKey(TAGS[1]));
Assert.assertTrue(tagInfo.get(TAGS[0]).contains("localhost_1"));
Assert.assertFalse(tagInfo.get(TAGS[0]).contains("localhost_2"));
Assert.assertTrue(tagInfo.get(TAGS[0]).contains("localhost_3"));
Assert.assertFalse(tagInfo.get(TAGS[0]).contains("localhost_4"));
Assert.assertFalse(tagInfo.get(TAGS[1]).contains("localhost_1"));
Assert.assertTrue(tagInfo.get(TAGS[1]).contains("localhost_2"));
Assert.assertTrue(tagInfo.get(TAGS[1]).contains("localhost_3"));
Assert.assertFalse(tagInfo.get(TAGS[1]).contains("localhost_4"));
}