本文整理汇总了Java中org.apache.helix.model.HelixConfigScope.ConfigScopeProperty类的典型用法代码示例。如果您正苦于以下问题:Java ConfigScopeProperty类的具体用法?Java ConfigScopeProperty怎么用?Java ConfigScopeProperty使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
ConfigScopeProperty类属于org.apache.helix.model.HelixConfigScope包,在下文中一共展示了ConfigScopeProperty类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getClusterConfig
import org.apache.helix.model.HelixConfigScope.ConfigScopeProperty; //导入依赖的package包/类
/**
* Get ClusterConfig of the given cluster.
*
* @param clusterName
*
* @return
*/
public ClusterConfig getClusterConfig(String clusterName) {
if (!ZKUtil.isClusterSetup(clusterName, zkClient)) {
throw new HelixException("fail to get config. cluster: " + clusterName + " is NOT setup.");
}
HelixConfigScope scope =
new HelixConfigScopeBuilder(ConfigScopeProperty.CLUSTER).forCluster(clusterName).build();
ZNRecord record = getConfigZnRecord(scope);
if (record == null) {
LOG.warn("No config found at " + scope.getZkPath());
return null;
}
return new ClusterConfig(record);
}
示例2: updateResourceConfig
import org.apache.helix.model.HelixConfigScope.ConfigScopeProperty; //导入依赖的package包/类
private void updateResourceConfig(String clusterName, String resourceName,
ResourceConfig resourceConfig, boolean overwrite) {
if (!ZKUtil.isClusterSetup(clusterName, zkClient)) {
throw new HelixException("fail to setup config. cluster: " + clusterName + " is NOT setup.");
}
HelixConfigScope scope =
new HelixConfigScopeBuilder(ConfigScopeProperty.RESOURCE).forCluster(clusterName)
.forResource(resourceName).build();
String zkPath = scope.getZkPath();
if (overwrite) {
ZKUtil.createOrReplace(zkClient, zkPath, resourceConfig.getRecord(), true);
} else {
ZKUtil.createOrUpdate(zkClient, zkPath, resourceConfig.getRecord(), true, true);
}
}
示例3: getInstanceConfig
import org.apache.helix.model.HelixConfigScope.ConfigScopeProperty; //导入依赖的package包/类
/**
* Get instance config for given resource in given cluster.
*
* @param clusterName
* @param instanceName
*
* @return
*/
public InstanceConfig getInstanceConfig(String clusterName, String instanceName) {
if (!ZKUtil.isInstanceSetup(zkClient, clusterName, instanceName, InstanceType.PARTICIPANT)) {
throw new HelixException(
"fail to get config. instance: " + instanceName + " is NOT setup in cluster: "
+ clusterName);
}
HelixConfigScope scope =
new HelixConfigScopeBuilder(ConfigScopeProperty.PARTICIPANT).forCluster(clusterName)
.forParticipant(instanceName).build();
ZNRecord record = getConfigZnRecord(scope);
if (record == null) {
LOG.warn("No config found at " + scope.getZkPath());
return null;
}
return new InstanceConfig(record);
}
示例4: updateInstanceConfig
import org.apache.helix.model.HelixConfigScope.ConfigScopeProperty; //导入依赖的package包/类
private void updateInstanceConfig(String clusterName, String instanceName,
InstanceConfig instanceConfig, boolean overwrite) {
if (!ZKUtil.isClusterSetup(clusterName, zkClient)) {
throw new HelixException("fail to setup config. cluster: " + clusterName + " is NOT setup.");
}
HelixConfigScope scope =
new HelixConfigScopeBuilder(ConfigScopeProperty.PARTICIPANT).forCluster(clusterName)
.forParticipant(instanceName).build();
String zkPath = scope.getZkPath();
if (overwrite) {
ZKUtil.createOrReplace(zkClient, zkPath, instanceConfig.getRecord(), true);
} else {
ZKUtil.createOrUpdate(zkClient, zkPath, instanceConfig.getRecord(), true, true);
}
}
示例5: build
import org.apache.helix.model.HelixConfigScope.ConfigScopeProperty; //导入依赖的package包/类
public ConfigScope build(String scopePairs) {
String[] scopes = scopePairs.split("[\\s,]+");
for (String scope : scopes) {
try {
int idx = scope.indexOf('=');
if (idx == -1) {
LOG.error("Invalid scope string: " + scope);
continue;
}
String scopeStr = scope.substring(0, idx);
String value = scope.substring(idx + 1);
ConfigScopeProperty scopeProperty = ConfigScopeProperty.valueOf(scopeStr);
_scopeMap.put(scopeProperty, value);
} catch (Exception e) {
LOG.error("Invalid scope string: " + scope);
continue;
}
}
return build();
}
示例6: getConfig
import org.apache.helix.model.HelixConfigScope.ConfigScopeProperty; //导入依赖的package包/类
/**
* get configs
* @param type config-scope-type, e.g. CLUSTER, RESOURCE, etc.
* @param scopeArgsCsv csv-formatted scope-args, e.g myCluster,testDB
* @param keysCsv csv-formatted keys. e.g. k1,k2
* @return json-formated key-value pairs, e.g. {k1=v1,k2=v2}
*/
public String getConfig(ConfigScopeProperty type, String scopeArgsCsv, String keysCsv) {
// ConfigScope scope = new ConfigScopeBuilder().build(scopesStr);
String[] scopeArgs = scopeArgsCsv.split("[\\s,]");
HelixConfigScope scope = new HelixConfigScopeBuilder(type, scopeArgs).build();
String[] keys = keysCsv.split("[\\s,]");
// parse keys
// String[] keys = keysStr.split("[\\s,]");
// Set<String> keysSet = new HashSet<String>(Arrays.asList(keys));
Map<String, String> keyValueMap = _admin.getConfig(scope, Arrays.asList(keys));
ZNRecord record = new ZNRecord(type.toString());
// record.setMapField(scopesStr, propertiesMap);
record.getSimpleFields().putAll(keyValueMap);
ZNRecordSerializer serializer = new ZNRecordSerializer();
return new String(serializer.serialize(record));
}
示例7: onBecomeOnlineFromOffline
import org.apache.helix.model.HelixConfigScope.ConfigScopeProperty; //导入依赖的package包/类
@Transition(to = "ONLINE", from = "OFFLINE")
public void onBecomeOnlineFromOffline(Message message, NotificationContext context)
throws Exception {
LOG.debug(_workerId + " becomes ONLINE from OFFLINE for " + _partition);
ConfigAccessor clusterConfig = context.getManager().getConfigAccessor();
HelixManager manager = context.getManager();
HelixConfigScope clusterScope =
new HelixConfigScopeBuilder(ConfigScopeProperty.CLUSTER).forCluster(
manager.getClusterName()).build();
String json = clusterConfig.get(clusterScope, message.getResourceName());
Dag.Node node = Dag.Node.fromJson(json);
Set<String> parentIds = node.getParentIds();
String resourceName = message.getResourceName();
int numPartitions = node.getNumPartitions();
Task task = _taskFactory.createTask(resourceName, parentIds, manager, _taskResultStore);
manager.addExternalViewChangeListener(task);
LOG.debug("Starting task for " + _partition + "...");
int partitionNum = Integer.parseInt(_partition.split("_")[1]);
task.execute(resourceName, numPartitions, partitionNum);
LOG.debug("Task for " + _partition + " done");
}
示例8: addConfiguration
import org.apache.helix.model.HelixConfigScope.ConfigScopeProperty; //导入依赖的package包/类
private static void addConfiguration(ClusterSetup setup, String baseDir, String clusterName,
String instanceName) throws IOException {
Map<String, String> properties = new HashMap<String, String>();
HelixConfigScopeBuilder builder = new HelixConfigScopeBuilder(ConfigScopeProperty.PARTICIPANT);
HelixConfigScope instanceScope =
builder.forCluster(clusterName).forParticipant(instanceName).build();
properties.put("change_log_dir", baseDir + instanceName + "/translog");
properties.put("file_store_dir", baseDir + instanceName + "/filestore");
properties.put("check_point_dir", baseDir + instanceName + "/checkpoint");
setup.getClusterManagementTool().setConfig(instanceScope, properties);
FileUtils.deleteDirectory(new File(properties.get("change_log_dir")));
FileUtils.deleteDirectory(new File(properties.get("file_store_dir")));
FileUtils.deleteDirectory(new File(properties.get("check_point_dir")));
new File(properties.get("change_log_dir")).mkdirs();
new File(properties.get("file_store_dir")).mkdirs();
new File(properties.get("check_point_dir")).mkdirs();
}
示例9: createHelixClusterIfNeeded
import org.apache.helix.model.HelixConfigScope.ConfigScopeProperty; //导入依赖的package包/类
public static void createHelixClusterIfNeeded(String helixClusterName, String zkPath) {
final HelixAdmin admin = new ZKHelixAdmin(zkPath);
if (admin.getClusters().contains(helixClusterName)) {
LOGGER.info(
"cluster already exist, skipping it.. ********************************************* ");
return;
}
LOGGER.info("Creating a new cluster, as the helix cluster : " + helixClusterName
+ " was not found ********************************************* ");
admin.addCluster(helixClusterName, false);
LOGGER.info("Enable mirror maker machines auto join.");
final HelixConfigScope scope = new HelixConfigScopeBuilder(ConfigScopeProperty.CLUSTER)
.forCluster(helixClusterName).build();
final Map<String, String> props = new HashMap<String, String>();
props.put(ZKHelixManager.ALLOW_PARTICIPANT_AUTO_JOIN, String.valueOf(true));
props.put(MessageType.STATE_TRANSITION + "." + HelixTaskExecutor.MAX_THREADS,
String.valueOf(100));
admin.setConfig(scope, props);
LOGGER.info("Adding state model definition named : OnlineOffline generated using : "
+ OnlineOfflineStateModel.class.toString()
+ " ********************************************** ");
// add state model definition
admin.addStateModelDef(helixClusterName, "OnlineOffline", OnlineOfflineStateModel.build());
LOGGER.info("New Cluster setup completed... ********************************************** ");
}
示例10: disableAutoBalancing
import org.apache.helix.model.HelixConfigScope.ConfigScopeProperty; //导入依赖的package包/类
public void disableAutoBalancing() {
HelixConfigScope scope =
new HelixConfigScopeBuilder(ConfigScopeProperty.CLUSTER).forCluster(_helixClusterName)
.build();
Map<String, String> properties = new HashMap<String, String>();
properties.put(AUTO_BALANCING, DISABLE);
_helixAdmin.setConfig(scope, properties);
}
示例11: enableAutoBalancing
import org.apache.helix.model.HelixConfigScope.ConfigScopeProperty; //导入依赖的package包/类
public void enableAutoBalancing() {
HelixConfigScope scope =
new HelixConfigScopeBuilder(ConfigScopeProperty.CLUSTER).forCluster(_helixClusterName)
.build();
Map<String, String> properties = new HashMap<String, String>();
properties.put(AUTO_BALANCING, ENABLE);
_helixAdmin.setConfig(scope, properties);
}
示例12: isAutoBalancingEnabled
import org.apache.helix.model.HelixConfigScope.ConfigScopeProperty; //导入依赖的package包/类
public boolean isAutoBalancingEnabled() {
HelixConfigScope scope =
new HelixConfigScopeBuilder(ConfigScopeProperty.CLUSTER).forCluster(_helixClusterName)
.build();
Map<String, String> config = _helixAdmin.getConfig(scope, Arrays.asList(AUTO_BALANCING));
if (config.containsKey(AUTO_BALANCING) && config.get(AUTO_BALANCING).equals(DISABLE)) {
return false;
}
return true;
}
示例13: setShuttingDownStatus
import org.apache.helix.model.HelixConfigScope.ConfigScopeProperty; //导入依赖的package包/类
private void setShuttingDownStatus(boolean shuttingDown) {
HelixConfigScope scope =
new HelixConfigScopeBuilder(ConfigScopeProperty.PARTICIPANT, _helixClusterName).forParticipant(_instanceId)
.build();
Map<String, String> propToUpdate = new HashMap<String, String>();
propToUpdate.put(CommonConstants.Helix.IS_SHUTDOWN_IN_PROGRESS, String.valueOf(shuttingDown));
_helixAdmin.setConfig(scope, propToUpdate);
}
示例14: start
import org.apache.helix.model.HelixConfigScope.ConfigScopeProperty; //导入依赖的package包/类
public void start() throws Exception {
_manager =
HelixManagerFactory.getZKHelixManager(_cluster, "connectionURLProvider",
InstanceType.SPECTATOR, _zkAddress);
_manager.connect();
_manager.addExternalViewChangeListener(_routingTableProvider);
_manager.addConfigChangeListener(this, ConfigScopeProperty.RESOURCE);
}
示例15: createTable
import org.apache.helix.model.HelixConfigScope.ConfigScopeProperty; //导入依赖的package包/类
public void createTable(String clusterName, String dbName, String table, String tableSpec) {
LOG.info("Creating table:" + table + " with table_spec: " + tableSpec + " in dbName:" + dbName);
IdealState dbIdealState = _helixAdmin.getResourceIdealState(clusterName, dbName);
int numPartitions = dbIdealState.getNumPartitions();
String dbTableName = dbName + "." + table;
AutoModeISBuilder builder = new AutoModeISBuilder(dbTableName);
builder.setRebalancerMode(RebalanceMode.SEMI_AUTO);
builder.setNumPartitions(numPartitions);
builder.setNumReplica(Integer.parseInt(dbIdealState.getReplicas()));
builder.setStateModel("OnlineOffline");
builder.setStateModelFactoryName("TableTransitionHandlerFactory");
for (String dbPartitionName : dbIdealState.getPartitionSet()) {
String tablePartitionName = dbPartitionName + "." + table;
Set<String> instanceSet = dbIdealState.getInstanceSet(dbPartitionName);
builder.add(tablePartitionName);
String[] instanceNames = new String[instanceSet.size()];
instanceSet.toArray(instanceNames);
builder.assignPreferenceList(tablePartitionName, instanceNames);
}
IdealState idealState = builder.build();
// before setting the idealstate, set the configuration
Map<String, String> properties = new HashMap<String, String>();
properties.put("table_spec", tableSpec);
properties.put("type", "TABLE");
HelixConfigScope scope =
new HelixConfigScopeBuilder(ConfigScopeProperty.RESOURCE).forCluster(clusterName)
.forResource(dbTableName).build();
_helixAdmin.setConfig(scope, properties);
_helixAdmin.setResourceIdealState(clusterName, dbTableName, idealState);
}