本文整理汇总了Java中azkaban.flow.CommonJobProperties类的典型用法代码示例。如果您正苦于以下问题:Java CommonJobProperties类的具体用法?Java CommonJobProperties怎么用?Java CommonJobProperties使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
CommonJobProperties类属于azkaban.flow包,在下文中一共展示了CommonJobProperties类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getKrb5ccname
import azkaban.flow.CommonJobProperties; //导入依赖的package包/类
/**
* <pre>
* This method extracts the kerberos ticket cache file name from the jobprops.
* This method will ensure that each job execution will have its own kerberos ticket cache file
* Given that the code only sets an environmental variable, the number of files created corresponds
* to the number of processes that are doing kinit in their flow, which should not be an inordinately
* high number.
* </pre>
*
* @return file name: the kerberos ticket cache file to use
*/
private String getKrb5ccname(Props jobProps) {
String effectiveUser = getEffectiveUser(jobProps);
String projectName =
jobProps.getString(CommonJobProperties.PROJECT_NAME).replace(" ", "_");
String flowId =
jobProps.getString(CommonJobProperties.FLOW_ID).replace(" ", "_");
String jobId =
jobProps.getString(CommonJobProperties.JOB_ID).replace(" ", "_");
// execId should be an int and should not have space in it, ever
String execId = jobProps.getString(CommonJobProperties.EXEC_ID);
String krb5ccname =
String.format("/tmp/krb5cc__%s__%s__%s__%s__%s", projectName, flowId,
jobId, execId, effectiveUser);
return krb5ccname;
}
示例2: setUp
import azkaban.flow.CommonJobProperties; //导入依赖的package包/类
@Before
public void setUp() throws IOException {
File workingDir = temp.newFolder("TestProcess");
props = new Props();
props.put(AbstractProcessJob.WORKING_DIR, workingDir.getCanonicalPath());
props.put("type", "command");
props.put("fullPath", ".");
props.put(CommonJobProperties.PROJECT_NAME, "test_project");
props.put(CommonJobProperties.FLOW_ID, "test_flow");
props.put(CommonJobProperties.JOB_ID, "test_job");
props.put(CommonJobProperties.EXEC_ID, "123");
props.put(CommonJobProperties.SUBMIT_USER, "test_user");
job = new ProcessJob("TestProcess", props, props, log);
}
示例3: setUp
import azkaban.flow.CommonJobProperties; //导入依赖的package包/类
@Before
public void setUp() throws IOException {
File workingDir = temp.newFolder("testJavaProcess");
// Initialize job
props = new Props();
props.put(AbstractProcessJob.WORKING_DIR, workingDir.getCanonicalPath());
props.put("type", "java");
props.put("fullPath", ".");
props.put(CommonJobProperties.PROJECT_NAME, "test_project");
props.put(CommonJobProperties.FLOW_ID, "test_flow");
props.put(CommonJobProperties.JOB_ID, "test_job");
props.put(CommonJobProperties.EXEC_ID, "123");
props.put(CommonJobProperties.SUBMIT_USER, "test_user");
job = new JavaProcessJob("testJavaProcess", props, props, log);
}
示例4: getDependent
import azkaban.flow.CommonJobProperties; //导入依赖的package包/类
private void getDependent(HashMap<String, ProjectNode> nodeMap) {
for(ProjectNode node : nodeMap.values()) {
Props props = node.getProps();
if(props == null)
continue;
List<String> dependencyList = props.getStringList(CommonJobProperties.DEPENDENCIES, (List<String>)null);
if(dependencyList != null) {
for(String dependency : dependencyList) {
dependency = dependency == null ? null : dependency.trim();
if(dependency == null || dependency.isEmpty())
continue;
ProjectNode dependencyNode = nodeMap.get(dependency);
if(dependencyNode != null) {
dependencyNode.addDependent(node);
node.addDependencies(dependencyNode);
}
}
}
}
}
示例5: addCommonFlowProperties
import azkaban.flow.CommonJobProperties; //导入依赖的package包/类
public static Props addCommonFlowProperties(Props parentProps, final ExecutableFlowBase flow) {
Props props = new Props(parentProps);
props.put(CommonJobProperties.FLOW_ID, flow.getFlowId());
props.put(CommonJobProperties.EXEC_ID, flow.getExecutionId());
props.put(CommonJobProperties.PROJECT_ID, flow.getProjectId());
props.put(CommonJobProperties.PROJECT_VERSION, flow.getVersion());
props.put(CommonJobProperties.FLOW_UUID, UUID.randomUUID().toString());
DateTime loadTime = new DateTime();
props.put(CommonJobProperties.FLOW_START_TIMESTAMP, loadTime.toString());
props.put(CommonJobProperties.FLOW_START_YEAR, loadTime.toString("yyyy"));
props.put(CommonJobProperties.FLOW_START_MONTH, loadTime.toString("MM"));
props.put(CommonJobProperties.FLOW_START_DAY, loadTime.toString("dd"));
props.put(CommonJobProperties.FLOW_START_HOUR, loadTime.toString("HH"));
props.put(CommonJobProperties.FLOW_START_MINUTE, loadTime.toString("mm"));
props.put(CommonJobProperties.FLOW_START_SECOND, loadTime.toString("ss"));
props.put(CommonJobProperties.FLOW_START_MILLISSECOND, loadTime.toString("SSS"));
props.put(CommonJobProperties.FLOW_START_TIMEZONE, loadTime.toString("ZZZZ"));
return props;
}
示例6: cancel
import azkaban.flow.CommonJobProperties; //导入依赖的package包/类
/**
* This cancel method, in addition to the default canceling behavior, also
* kills the MR jobs launched by this job on Hadoop
*/
@Override
public void cancel() throws InterruptedException {
super.cancel();
info("Cancel called. Killing the launched MR jobs on the cluster");
String azExecId = jobProps.getString(CommonJobProperties.EXEC_ID);
final String logFilePath =
String.format("%s/_job.%s.%s.log", getWorkingDirectory(), azExecId,
getId());
info("log file path is: " + logFilePath);
HadoopJobUtils.proxyUserKillAllSpawnedHadoopJobs(logFilePath, jobProps,
tokenFile, getLog());
}
示例7: HadoopSparkJob
import azkaban.flow.CommonJobProperties; //导入依赖的package包/类
public HadoopSparkJob(String jobid, Props sysProps, Props jobProps, Logger log) {
super(jobid, sysProps, jobProps, log);
getJobProps().put(CommonJobProperties.JOB_ID, jobid);
shouldProxy = getSysProps().getBoolean(ENABLE_PROXYING, false);
getJobProps().put(ENABLE_PROXYING, Boolean.toString(shouldProxy));
obtainTokens = getSysProps().getBoolean(OBTAIN_BINARY_TOKEN, false);
if (shouldProxy) {
getLog().info("Initiating hadoop security manager.");
try {
hadoopSecurityManager =
HadoopJobUtils.loadHadoopSecurityManager(getSysProps(), log);
} catch (RuntimeException e) {
throw new RuntimeException("Failed to get hadoop security manager!" + e);
}
}
}
示例8: cancel
import azkaban.flow.CommonJobProperties; //导入依赖的package包/类
/**
* This cancel method, in addition to the default canceling behavior, also
* kills the Spark job on Hadoop
*/
@Override
public void cancel() throws InterruptedException {
super.cancel();
info("Cancel called. Killing the Spark job on the cluster");
String azExecId = jobProps.getString(CommonJobProperties.EXEC_ID);
final String logFilePath =
String.format("%s/_job.%s.%s.log", getWorkingDirectory(), azExecId,
getId());
info("log file path is: " + logFilePath);
HadoopJobUtils.proxyUserKillAllSpawnedHadoopJobs(logFilePath, jobProps,
tokenFile, getLog());
}
示例9: HadoopPigJob
import azkaban.flow.CommonJobProperties; //导入依赖的package包/类
public HadoopPigJob(String jobid, Props sysProps, Props jobProps, Logger log)
throws IOException {
super(jobid, sysProps, jobProps, log);
HADOOP_SECURE_PIG_WRAPPER = HadoopSecurePigWrapper.class.getName();
getJobProps().put(CommonJobProperties.JOB_ID, jobid);
shouldProxy =
getSysProps().getBoolean(HadoopSecurityManager.ENABLE_PROXYING, false);
getJobProps().put(HadoopSecurityManager.ENABLE_PROXYING,
Boolean.toString(shouldProxy));
obtainTokens =
getSysProps().getBoolean(HadoopSecurityManager.OBTAIN_BINARY_TOKEN,
false);
userPigJar = getJobProps().getBoolean("use.user.pig.jar", false);
if (shouldProxy) {
getLog().info("Initiating hadoop security manager.");
try {
hadoopSecurityManager =
HadoopJobUtils.loadHadoopSecurityManager(getSysProps(), log);
} catch (RuntimeException e) {
throw new RuntimeException("Failed to get hadoop security manager!" + e);
}
}
}
示例10: cancel
import azkaban.flow.CommonJobProperties; //导入依赖的package包/类
/**
* This cancel method, in addition to the default canceling behavior, also
* kills the MR jobs launched by Pig on Hadoop
*/
@Override
public void cancel() throws InterruptedException {
super.cancel();
info("Cancel called. Killing the Pig launched MR jobs on the cluster");
String azExecId = jobProps.getString(CommonJobProperties.EXEC_ID);
final String logFilePath =
String.format("%s/_job.%s.%s.log", getWorkingDirectory(), azExecId,
getId());
info("log file path is: " + logFilePath);
HadoopJobUtils.proxyUserKillAllSpawnedHadoopJobs(logFilePath, jobProps,
tokenFile, getLog());
}
示例11: addHadoopProperties
import azkaban.flow.CommonJobProperties; //导入依赖的package包/类
private static void addHadoopProperties(Props props) {
String[] propsToInject = new String[]{
CommonJobProperties.EXEC_ID,
CommonJobProperties.FLOW_ID,
CommonJobProperties.JOB_ID,
CommonJobProperties.PROJECT_NAME,
CommonJobProperties.PROJECT_VERSION,
CommonJobProperties.EXECUTION_LINK,
CommonJobProperties.JOB_LINK,
CommonJobProperties.WORKFLOW_LINK,
CommonJobProperties.JOBEXEC_LINK,
CommonJobProperties.ATTEMPT_LINK,
CommonJobProperties.OUT_NODES,
CommonJobProperties.IN_NODES,
CommonJobProperties.PROJECT_LAST_CHANGED_DATE,
CommonJobProperties.PROJECT_LAST_CHANGED_BY,
CommonJobProperties.SUBMIT_USER
};
for(String propertyName : propsToInject) {
addHadoopProperty(props, propertyName);
}
addHadoopWorkflowProperty(props, WORKFLOW_ID_CONFIG);
}
示例12: HadoopHiveJob
import azkaban.flow.CommonJobProperties; //导入依赖的package包/类
public HadoopHiveJob(String jobid, Props sysProps, Props jobProps, Logger log)
throws IOException {
super(jobid, sysProps, jobProps, log);
getJobProps().put(CommonJobProperties.JOB_ID, jobid);
shouldProxy = getSysProps().getBoolean(HadoopSecurityManager.ENABLE_PROXYING, false);
getJobProps().put(HadoopSecurityManager.ENABLE_PROXYING, Boolean.toString(shouldProxy));
obtainTokens = getSysProps().getBoolean(HadoopSecurityManager.OBTAIN_BINARY_TOKEN, false);
debug = getJobProps().getBoolean("debug", false);
if (shouldProxy) {
getLog().info("Initiating hadoop security manager.");
try {
hadoopSecurityManager = HadoopJobUtils.loadHadoopSecurityManager(getSysProps(), log);
} catch (RuntimeException e) {
throw new RuntimeException("Failed to get hadoop security manager!" + e);
}
}
}
示例13: GobblinHadoopJob
import azkaban.flow.CommonJobProperties; //导入依赖的package包/类
public GobblinHadoopJob(String jobid, Props sysProps, Props jobProps, Logger log) {
super(jobid, sysProps, jobProps, log);
initializePresets();
jobProps.put(HadoopJavaJob.JOB_CLASS, "gobblin.azkaban.AzkabanJobLauncher");
jobProps.put("job.name", jobProps.get(CommonJobProperties.JOB_ID));
jobProps.put("launcher.type", "MAPREDUCE"); //Azkaban only supports MR mode
jobProps.put("fs.uri", sysProps.get("fs.uri")); //Azkaban should only support HDFS
//If gobblin jars are in HDFS pass HDFS path to Gobblin, otherwise pass local file system path.
if (sysProps.containsKey(GobblinConstants.GOBBLIN_HDFS_JOB_JARS_KEY)) {
jobProps.put(GobblinConstants.GOBBLIN_HDFS_JOB_JARS_KEY, sysProps.getString(GobblinConstants.GOBBLIN_HDFS_JOB_JARS_KEY));
} else {
jobProps.put(GobblinConstants.GOBBLIN_JOB_JARS_KEY, sysProps.get("jobtype.classpath"));
}
loadPreset();
transformProperties();
getLog().info("Job properties for Gobblin: " + printableJobProperties(jobProps));
}
示例14: testProxyUserNotAuthorized
import azkaban.flow.CommonJobProperties; //导入依赖的package包/类
@Test
public void testProxyUserNotAuthorized() throws IOException, URISyntaxException {
String notAuthorized = "not_white_listed";
for (String authorized : whitelisted) {
Props props = new Props();
props.put(PROXY_USER_KEY, notAuthorized);
props.put(CommonJobProperties.SUBMIT_USER, authorized);
try {
whitelist.validateWhitelisted(props);
Assert.fail("Should throw UnsupportedOperationException");
} catch (Exception e) {
Assert.assertTrue(e instanceof UnsupportedOperationException);
}
}
}
示例15: ProcessJob
import azkaban.flow.CommonJobProperties; //导入依赖的package包/类
public ProcessJob(final String jobId, final Props sysProps,
final Props jobProps, final Logger log) {
super(jobId, sysProps, jobProps, log);
// this is in line with what other job types (hadoopJava, spark, pig, hive)
// is doing
jobProps.put(CommonJobProperties.JOB_ID, jobId);
}