本文整理汇总了Java中org.apache.helix.task.TaskConstants类的典型用法代码示例。如果您正苦于以下问题:Java TaskConstants类的具体用法?Java TaskConstants怎么用?Java TaskConstants使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
TaskConstants类属于org.apache.helix.task包,在下文中一共展示了TaskConstants类的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: resetActiveTaskCount
import org.apache.helix.task.TaskConstants; //导入依赖的package包/类
/**
* Reset RUNNING/INIT tasks count in JobRebalancer
*/
public void resetActiveTaskCount(CurrentStateOutput currentStateOutput) {
// init participant map
for (String liveInstance : getLiveInstances().keySet()) {
_participantActiveTaskCount.put(liveInstance, 0);
}
// Active task == init and running tasks
fillActiveTaskCount(currentStateOutput.getPartitionCountWithPendingState(TaskConstants.STATE_MODEL_NAME,
TaskPartitionState.INIT.name()), _participantActiveTaskCount);
fillActiveTaskCount(currentStateOutput.getPartitionCountWithPendingState(TaskConstants.STATE_MODEL_NAME,
TaskPartitionState.RUNNING.name()), _participantActiveTaskCount);
fillActiveTaskCount(currentStateOutput.getPartitionCountWithCurrentState(TaskConstants.STATE_MODEL_NAME,
TaskPartitionState.INIT.name()), _participantActiveTaskCount);
fillActiveTaskCount(currentStateOutput
.getPartitionCountWithCurrentState(TaskConstants.STATE_MODEL_NAME,
TaskPartitionState.RUNNING.name()), _participantActiveTaskCount);
}
示例2: createWorkflows
import org.apache.helix.task.TaskConstants; //导入依赖的package包/类
protected Map<String, Workflow> createWorkflows(String cluster, int numWorkflows) {
Map<String, Workflow> workflows = new HashMap<>();
for (int i = 0; i < numWorkflows; i++) {
Workflow.Builder workflow = new Workflow.Builder(WORKFLOW_PREFIX + i);
int j = 0;
for (JobConfig.Builder job : createJobs(cluster, WORKFLOW_PREFIX + i, 3)) {
workflow.addJob(JOB_PREFIX + j++, job);
}
workflows.put(WORKFLOW_PREFIX + i, workflow.build());
WorkflowContext workflowContext = TaskTestUtil
.buildWorkflowContext(WORKFLOW_PREFIX + i, TaskState.IN_PROGRESS,
System.currentTimeMillis(), TaskState.COMPLETED, TaskState.COMPLETED,
TaskState.IN_PROGRESS);
_baseAccessor.set(String.format("/%s/%s%s/%s/%s", cluster, PropertyType.PROPERTYSTORE.name(),
TaskConstants.REBALANCER_CONTEXT_ROOT, WORKFLOW_PREFIX + i, TaskConstants.CONTEXT_NODE),
workflowContext.getRecord(), AccessOption.PERSISTENT);
_configAccessor.setResourceConfig(cluster, WORKFLOW_PREFIX + i, workflow.getWorkflowConfig());
}
return workflows;
}
示例3: createJobs
import org.apache.helix.task.TaskConstants; //导入依赖的package包/类
protected Set<JobConfig.Builder> createJobs(String cluster, String workflowName, int numJobs) {
Set<JobConfig.Builder> jobCfgs = new HashSet<>();
for (int i = 0; i < numJobs; i++) {
JobConfig.Builder job =
new JobConfig.Builder().setCommand("DummyCommand").setTargetResource("RESOURCE")
.setWorkflow(workflowName);
jobCfgs.add(job);
JobContext jobContext = TaskTestUtil
.buildJobContext(System.currentTimeMillis(), System.currentTimeMillis() + 1,
TaskPartitionState.COMPLETED);
_baseAccessor.set(String.format("/%s/%s%s/%s/%s", cluster, PropertyType.PROPERTYSTORE.name(),
TaskConstants.REBALANCER_CONTEXT_ROOT, workflowName + "_" + JOB_PREFIX + i,
TaskConstants.CONTEXT_NODE), jobContext.getRecord(), AccessOption.PERSISTENT);
_configAccessor.setResourceConfig(cluster, workflowName + "_" + JOB_PREFIX + i, job.build());
}
return jobCfgs;
}
示例4: cleanUpTaskQueue
import org.apache.helix.task.TaskConstants; //导入依赖的package包/类
/**
* Clean up a task queue for the given task type.
*
* @param taskType Task type
*/
public synchronized void cleanUpTaskQueue(@Nonnull String taskType) {
// NOTE: There is a Helix bug that causes task contexts not removed properly. Need to explicitly remove them
// TODO: After Helix bug gets fixed, remove the extra logic
String helixJobQueueName = getHelixJobQueueName(taskType);
Set<String> helixJobsBeforeCleaningUp = _taskDriver.getWorkflowConfig(helixJobQueueName).getJobDag().getAllNodes();
if (helixJobsBeforeCleaningUp.isEmpty()) {
return;
}
LOGGER.info("Cleaning up task queue: {} for task type: {}", helixJobQueueName, taskType);
_taskDriver.cleanupJobQueue(helixJobQueueName);
// Explicitly remove the task contexts
Set<String> helixJobsAfterCleaningUp = _taskDriver.getWorkflowConfig(helixJobQueueName).getJobDag().getAllNodes();
for (String helixJobName : helixJobsBeforeCleaningUp) {
if (!helixJobsAfterCleaningUp.contains(helixJobName)) {
_propertyStore.remove(TaskConstants.REBALANCER_CONTEXT_ROOT + "/" + helixJobName, AccessOption.PERSISTENT);
}
}
}
示例5: registerHealthReportTasks
import org.apache.helix.task.TaskConstants; //导入依赖的package包/类
/**
* Register {@link HelixHealthReportAggregatorTask}s for appropriate {@link AmbryHealthReport}s.
* @param engine the {@link StateMachineEngine} to register the task state model.
* @param healthReports the {@link List} of {@link AmbryHealthReport}s that may require the registration of
* corresponding {@link HelixHealthReportAggregatorTask}s.
*/
private void registerHealthReportTasks(StateMachineEngine engine, List<AmbryHealthReport> healthReports) {
Map<String, TaskFactory> taskFactoryMap = new HashMap<>();
for (final AmbryHealthReport healthReport : healthReports) {
if (healthReport.getAggregateIntervalInMinutes() != Utils.Infinite_Time) {
// register cluster wide aggregation task for the health report
taskFactoryMap.put(
String.format("%s_%s", HelixHealthReportAggregatorTask.TASK_COMMAND_PREFIX, healthReport.getReportName()),
new TaskFactory() {
@Override
public Task createNewTask(TaskCallbackContext context) {
return new HelixHealthReportAggregatorTask(context, healthReport.getAggregateIntervalInMinutes(),
healthReport.getReportName(), healthReport.getQuotaStatsFieldName());
}
});
}
}
if (!taskFactoryMap.isEmpty()) {
engine.registerStateModelFactory(TaskConstants.STATE_MODEL_NAME,
new TaskStateModelFactory(manager, taskFactoryMap));
}
}
示例6: updateContext
import org.apache.helix.task.TaskConstants; //导入依赖的package包/类
/**
* Update context of the Workflow or Job
*/
private void updateContext(String resourceName, ZNRecord record, HelixDataAccessor accessor) {
String path = String.format("/%s/%s%s/%s/%s", _clusterName, PropertyType.PROPERTYSTORE.name(),
TaskConstants.REBALANCER_CONTEXT_ROOT, resourceName, TaskConstants.CONTEXT_NODE);
accessor.getBaseDataAccessor().set(path, record, AccessOption.PERSISTENT);
_contextMap.put(resourceName, record);
}
示例7: testExpiry
import org.apache.helix.task.TaskConstants; //导入依赖的package包/类
@Test
public void testExpiry() throws Exception {
String jobName = "Expiry";
long expiry = 1000;
Map<String, String> commandConfig = ImmutableMap.of(TIMEOUT_CONFIG, String.valueOf(100));
JobConfig.Builder jobBuilder = JobConfig.Builder.fromMap(WorkflowGenerator.DEFAULT_JOB_CONFIG);
jobBuilder.setJobCommandConfigMap(commandConfig);
Workflow flow = WorkflowGenerator
.generateSingleJobWorkflowBuilder(jobName, jobBuilder)
.setExpiry(expiry).build();
_driver.start(flow);
_driver.pollForWorkflowState(jobName, TaskState.IN_PROGRESS);
// Running workflow should have config and context viewable through accessor
HelixDataAccessor accessor = _manager.getHelixDataAccessor();
PropertyKey workflowCfgKey = accessor.keyBuilder().resourceConfig(jobName);
String workflowPropStoreKey =
Joiner.on("/").join(TaskConstants.REBALANCER_CONTEXT_ROOT, jobName);
// Ensure context and config exist
Assert.assertTrue(_manager.getHelixPropertyStore().exists(workflowPropStoreKey,
AccessOption.PERSISTENT));
Assert.assertNotSame(accessor.getProperty(workflowCfgKey), null);
// Wait for job to finish and expire
_driver.pollForWorkflowState(jobName, TaskState.COMPLETED);
Thread.sleep(expiry + 100);
// Ensure workflow config and context were cleaned up by now
Assert.assertFalse(_manager.getHelixPropertyStore().exists(workflowPropStoreKey,
AccessOption.PERSISTENT));
Assert.assertEquals(accessor.getProperty(workflowCfgKey), null);
}
示例8: build
import org.apache.helix.task.TaskConstants; //导入依赖的package包/类
/**
* Build Task state model definition
* @return
*/
public static StateModelDefinition build() {
StateModelDefinition.Builder builder =new StateModelDefinition.Builder(TaskConstants.STATE_MODEL_NAME);
// init state
builder.initialState(TaskPartitionState.INIT.name());
// add states
builder.addState(TaskPartitionState.INIT.name(), 0);
builder.addState(TaskPartitionState.RUNNING.name(), 1);
builder.addState(TaskPartitionState.STOPPED.name(), 2);
builder.addState(TaskPartitionState.COMPLETED.name(), 3);
builder.addState(TaskPartitionState.TIMED_OUT.name(), 4);
builder.addState(TaskPartitionState.TASK_ERROR.name(), 5);
builder.addState(TaskPartitionState.TASK_ABORTED.name(), 6);
builder.addState(TaskPartitionState.DROPPED.name());
// add transitions
builder.addTransition(TaskPartitionState.INIT.name(), TaskPartitionState.RUNNING.name(), 0);
builder.addTransition(TaskPartitionState.RUNNING.name(), TaskPartitionState.STOPPED.name(), 1);
builder.addTransition(TaskPartitionState.RUNNING.name(), TaskPartitionState.COMPLETED.name(), 2);
builder.addTransition(TaskPartitionState.RUNNING.name(), TaskPartitionState.TIMED_OUT.name(), 3);
builder.addTransition(TaskPartitionState.RUNNING.name(), TaskPartitionState.TASK_ERROR.name(), 4);
builder.addTransition(TaskPartitionState.RUNNING.name(), TaskPartitionState.TASK_ABORTED.name(), 5);
builder.addTransition(TaskPartitionState.STOPPED.name(), TaskPartitionState.RUNNING.name(), 6);
// All states have a transition to DROPPED.
builder.addTransition(TaskPartitionState.INIT.name(), TaskPartitionState.DROPPED.name(), 7);
builder.addTransition(TaskPartitionState.RUNNING.name(), TaskPartitionState.DROPPED.name(), 8);
builder.addTransition(TaskPartitionState.COMPLETED.name(), TaskPartitionState.DROPPED.name(), 9);
builder.addTransition(TaskPartitionState.STOPPED.name(), TaskPartitionState.DROPPED.name(), 10);
builder.addTransition(TaskPartitionState.TIMED_OUT.name(), TaskPartitionState.DROPPED.name(), 11);
builder.addTransition(TaskPartitionState.TASK_ERROR.name(), TaskPartitionState.DROPPED.name(), 12);
builder.addTransition(TaskPartitionState.TASK_ABORTED.name(), TaskPartitionState.DROPPED.name(), 13);
// All states, except DROPPED, have a transition to INIT.
builder.addTransition(TaskPartitionState.RUNNING.name(), TaskPartitionState.INIT.name(), 14);
builder.addTransition(TaskPartitionState.COMPLETED.name(), TaskPartitionState.INIT.name(), 15);
builder.addTransition(TaskPartitionState.STOPPED.name(), TaskPartitionState.INIT.name(), 16);
builder.addTransition(TaskPartitionState.TIMED_OUT.name(), TaskPartitionState.INIT.name(), 17);
builder.addTransition(TaskPartitionState.TASK_ERROR.name(), TaskPartitionState.INIT.name(), 18);
builder.addTransition(TaskPartitionState.TASK_ABORTED.name(), TaskPartitionState.INIT.name(), 19);
return builder.build();
}
示例9: verifyState
import org.apache.helix.task.TaskConstants; //导入依赖的package包/类
@Override
protected boolean verifyState() {
try {
PropertyKey.Builder keyBuilder = _accessor.keyBuilder();
// read cluster once and do verification
ClusterDataCache cache = new ClusterDataCache();
cache.refresh(_accessor);
Map<String, IdealState> idealStates = cache.getIdealStates();
if (idealStates == null) {
// ideal state is null because ideal state is dropped
idealStates = Collections.emptyMap();
}
// filter out all resources that use Task state model
Iterator<Map.Entry<String, IdealState>> it = idealStates.entrySet().iterator();
while (it.hasNext()) {
Map.Entry<String, IdealState> pair = it.next();
if (pair.getValue().getStateModelDefRef().equals(TaskConstants.STATE_MODEL_NAME)) {
it.remove();
}
}
// verify live instances.
if (_expectLiveInstances != null && !_expectLiveInstances.isEmpty()) {
Set<String> actualLiveNodes = cache.getLiveInstances().keySet();
if (!_expectLiveInstances.equals(actualLiveNodes)) {
return false;
}
}
Map<String, ExternalView> extViews = _accessor.getChildValuesMap(keyBuilder.externalViews());
if (extViews == null) {
extViews = Collections.emptyMap();
}
// Filter resources if requested
if (_resources != null && !_resources.isEmpty()) {
idealStates.keySet().retainAll(_resources);
extViews.keySet().retainAll(_resources);
}
// if externalView is not empty and idealState doesn't exist
// add empty idealState for the resource
for (String resource : extViews.keySet()) {
if (!idealStates.containsKey(resource)) {
idealStates.put(resource, new IdealState(resource));
}
}
for (String resourceName : idealStates.keySet()) {
ExternalView extView = extViews.get(resourceName);
IdealState idealState = idealStates.get(resourceName);
if (extView == null) {
if (idealState.isExternalViewDisabled()) {
continue;
} else {
LOG.debug("externalView for " + resourceName + " is not available");
return false;
}
}
boolean result = verifyExternalView(cache, extView, idealState);
if (!result) {
return false;
}
}
return true;
} catch (Exception e) {
LOG.error("exception in verification", e);
return false;
}
}
示例10: stopAndDeleteQueue
import org.apache.helix.task.TaskConstants; //导入依赖的package包/类
@Test
public void stopAndDeleteQueue() throws Exception {
final String queueName = TestHelper.getTestMethodName();
// Create a queue
System.out.println("START " + queueName + " at " + new Date(System.currentTimeMillis()));
WorkflowConfig wfCfg = new WorkflowConfig.Builder(queueName).setExpiry(2, TimeUnit.MINUTES).build();
JobQueue qCfg = new JobQueue.Builder(queueName).fromMap(wfCfg.getResourceConfigMap()).build();
_driver.createQueue(qCfg);
// Enqueue 2 jobs
Set<String> master = Sets.newHashSet("MASTER");
JobConfig.Builder job1 =
new JobConfig.Builder().setCommand(MockTask.TASK_COMMAND)
.setTargetResource(WorkflowGenerator.DEFAULT_TGT_DB).setTargetPartitionStates(master);
String job1Name = "masterJob";
LOG.info("Enqueuing job1: " + job1Name);
_driver.enqueueJob(queueName, job1Name, job1);
Set<String> slave = Sets.newHashSet("SLAVE");
JobConfig.Builder job2 =
new JobConfig.Builder().setCommand(MockTask.TASK_COMMAND)
.setTargetResource(WorkflowGenerator.DEFAULT_TGT_DB).setTargetPartitionStates(slave);
String job2Name = "slaveJob";
LOG.info("Enqueuing job2: " + job2Name);
_driver.enqueueJob(queueName, job2Name, job2);
String namespacedJob1 = String.format("%s_%s", queueName, job1Name);
_driver.pollForJobState(queueName, namespacedJob1, TaskState.COMPLETED);
String namespacedJob2 = String.format("%s_%s", queueName, job2Name);
_driver.pollForJobState(queueName, namespacedJob2, TaskState.COMPLETED);
// Stop queue
_driver.stop(queueName);
boolean result =
ClusterStateVerifier.verifyByPolling(new ClusterStateVerifier.BestPossAndExtViewZkVerifier(
ZK_ADDR, CLUSTER_NAME));
Assert.assertTrue(result);
// Delete queue
_driver.delete(queueName);
// Wait until all status are cleaned up
result = TestHelper.verify(new TestHelper.Verifier() {
@Override public boolean verify() throws Exception {
HelixDataAccessor accessor = _manager.getHelixDataAccessor();
PropertyKey.Builder keyBuilder = accessor.keyBuilder();
// check paths for resource-config, ideal-state, external-view, property-store
List<String> paths
= Lists.newArrayList(keyBuilder.resourceConfigs().getPath(),
keyBuilder.idealStates().getPath(),
keyBuilder.externalViews().getPath(),
PropertyPathBuilder.propertyStore(CLUSTER_NAME) + TaskConstants.REBALANCER_CONTEXT_ROOT);
for (String path : paths) {
List<String> childNames = accessor.getBaseDataAccessor().getChildNames(path, 0);
for (String childName : childNames) {
if (childName.startsWith(queueName)) {
return false;
}
}
}
return true;
}
}, 30 * 1000);
Assert.assertTrue(result);
System.out.println("END " + queueName + " at " + new Date(System.currentTimeMillis()));
}