本文整理汇总了Java中org.apache.kylin.job.engine.JobEngineConfig类的典型用法代码示例。如果您正苦于以下问题:Java JobEngineConfig类的具体用法?Java JobEngineConfig怎么用?Java JobEngineConfig使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
JobEngineConfig类属于org.apache.kylin.job.engine包,在下文中一共展示了JobEngineConfig类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: before
import org.apache.kylin.job.engine.JobEngineConfig; //导入依赖的package包/类
public void before() throws Exception {
deployEnv();
final KylinConfig kylinConfig = KylinConfig.getInstanceFromEnv();
jobService = ExecutableManager.getInstance(kylinConfig);
scheduler = DefaultScheduler.createInstance();
scheduler.init(new JobEngineConfig(kylinConfig), new ZookeeperJobLock());
if (!scheduler.hasStarted()) {
throw new RuntimeException("scheduler has not been started");
}
cubeManager = CubeManager.getInstance(kylinConfig);
for (String jobId : jobService.getAllJobIds()) {
if (jobService.getJob(jobId) instanceof CubingJob) {
jobService.deleteJob(jobId);
}
}
cubeDescManager = CubeDescManager.getInstance(kylinConfig);
}
示例2: addInMemCubingSteps
import org.apache.kylin.job.engine.JobEngineConfig; //导入依赖的package包/类
protected void addInMemCubingSteps(final CubingJob result, String jobId, String cuboidRootPath) {
// base cuboid job
MapReduceExecutable cubeStep = new MapReduceExecutable();
StringBuilder cmd = new StringBuilder();
appendMapReduceParameters(cmd, JobEngineConfig.IN_MEM_JOB_CONF_SUFFIX);
cubeStep.setName(ExecutableConstants.STEP_NAME_BUILD_IN_MEM_CUBE);
appendExecCmdParameters(cmd, BatchConstants.ARG_CUBE_NAME, seg.getRealization().getName());
appendExecCmdParameters(cmd, BatchConstants.ARG_SEGMENT_ID, seg.getUuid());
appendExecCmdParameters(cmd, BatchConstants.ARG_OUTPUT, cuboidRootPath);
appendExecCmdParameters(cmd, BatchConstants.ARG_JOB_NAME, "Kylin_Cube_Builder_" + seg.getRealization().getName());
appendExecCmdParameters(cmd, BatchConstants.ARG_CUBING_JOB_ID, jobId);
cubeStep.setMapReduceParams(cmd.toString());
cubeStep.setMapReduceJobClass(getInMemCuboidJob());
result.addTask(cubeStep);
}
示例3: createInMemCubingStep
import org.apache.kylin.job.engine.JobEngineConfig; //导入依赖的package包/类
private MapReduceExecutable createInMemCubingStep(String jobId, CuboidModeEnum cuboidMode, String cuboidRootPath) {
MapReduceExecutable cubeStep = new MapReduceExecutable();
StringBuilder cmd = new StringBuilder();
appendMapReduceParameters(cmd, JobEngineConfig.IN_MEM_JOB_CONF_SUFFIX);
cubeStep.setName(ExecutableConstants.STEP_NAME_BUILD_IN_MEM_CUBE);
appendExecCmdParameters(cmd, BatchConstants.ARG_CUBE_NAME, seg.getRealization().getName());
appendExecCmdParameters(cmd, BatchConstants.ARG_SEGMENT_ID, seg.getUuid());
appendExecCmdParameters(cmd, BatchConstants.ARG_INPUT, getBaseCuboidPath(cuboidRootPath));
appendExecCmdParameters(cmd, BatchConstants.ARG_OUTPUT, getInMemCuboidPath(cuboidRootPath));
appendExecCmdParameters(cmd, BatchConstants.ARG_JOB_NAME,
"Kylin_Cube_Builder_" + seg.getRealization().getName());
appendExecCmdParameters(cmd, BatchConstants.ARG_CUBING_JOB_ID, jobId);
appendExecCmdParameters(cmd, BatchConstants.ARG_CUBOID_MODE, cuboidMode.toString());
cubeStep.setMapReduceParams(cmd.toString());
cubeStep.setMapReduceJobClass(InMemCuboidFromBaseCuboidJob.class);
cubeStep.setCounterSaveAs(
CubingJob.SOURCE_RECORD_COUNT + "," + CubingJob.SOURCE_SIZE_BYTES + "," + CubingJob.CUBE_SIZE_BYTES);
return cubeStep;
}
示例4: generateInsertDataStatement
import org.apache.kylin.job.engine.JobEngineConfig; //导入依赖的package包/类
public static String generateInsertDataStatement(IJoinedFlatTableDesc flatDesc) {
CubeSegment segment = ((CubeSegment) flatDesc.getSegment());
KylinConfig kylinConfig;
if (null == segment) {
kylinConfig = KylinConfig.getInstanceFromEnv();
} else {
kylinConfig = (flatDesc.getSegment()).getConfig();
}
if (kylinConfig.isAdvancedFlatTableUsed()) {
try {
Class advancedFlatTable = Class.forName(kylinConfig.getAdvancedFlatTableClass());
Method method = advancedFlatTable.getMethod("generateInsertDataStatement", IJoinedFlatTableDesc.class,
JobEngineConfig.class);
return (String) method.invoke(null, flatDesc);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
return "INSERT OVERWRITE TABLE " + flatDesc.getTableName() + " " + generateSelectDataStatement(flatDesc) + "\n";
}
示例5: doWork
import org.apache.kylin.job.engine.JobEngineConfig; //导入依赖的package包/类
@Override
protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException {
try {
config = new JobEngineConfig(context.getConfig());
List<String> toDeletePaths = getDeletePaths();
dropHdfsPathOnCluster(toDeletePaths, HadoopUtil.getWorkingFileSystem());
if (StringUtils.isNotEmpty(context.getConfig().getHBaseClusterFs())) {
dropHdfsPathOnCluster(toDeletePaths, FileSystem.get(HBaseConnection.getCurrentHBaseConfiguration()));
}
} catch (IOException e) {
logger.error("job:" + getId() + " execute finished with exception", e);
output.append("\n").append(e.getLocalizedMessage());
return new ExecuteResult(ExecuteResult.State.ERROR, output.toString(), e);
}
return new ExecuteResult(ExecuteResult.State.SUCCEED, output.toString());
}
示例6: before
import org.apache.kylin.job.engine.JobEngineConfig; //导入依赖的package包/类
@Before
public void before() throws Exception {
HBaseMetadataTestCase.staticCreateTestMetadata(AbstractKylinTestCase.SANDBOX_TEST_DATA);
DeployUtil.initCliWorkDir();
DeployUtil.deployMetadata();
DeployUtil.overrideJobJarLocations();
final KylinConfig kylinConfig = KylinConfig.getInstanceFromEnv();
jobService = ExecutableManager.getInstance(kylinConfig);
scheduler = DefaultScheduler.getInstance();
scheduler.init(new JobEngineConfig(kylinConfig));
if (!scheduler.hasStarted()) {
throw new RuntimeException("scheduler has not been started");
}
cubeManager = CubeManager.getInstance(kylinConfig);
jobEngineConfig = new JobEngineConfig(kylinConfig);
for (String jobId : jobService.getAllJobIds()) {
if(jobService.getJob(jobId) instanceof CubingJob){
jobService.deleteJob(jobId);
}
}
}
示例7: before
import org.apache.kylin.job.engine.JobEngineConfig; //导入依赖的package包/类
public void before() throws Exception {
deployEnv();
String fastModeStr = System.getProperty("fastBuildMode");
if (fastModeStr != null && fastModeStr.equalsIgnoreCase("true")) {
fastBuildMode = true;
logger.info("Will use fast build mode");
} else {
logger.info("Will not use fast build mode");
}
final KylinConfig kylinConfig = KylinConfig.getInstanceFromEnv();
jobService = ExecutableManager.getInstance(kylinConfig);
scheduler = DefaultScheduler.createInstance();
scheduler.init(new JobEngineConfig(kylinConfig), new ZookeeperJobLock());
if (!scheduler.hasStarted()) {
throw new RuntimeException("scheduler has not been started");
}
cubeManager = CubeManager.getInstance(kylinConfig);
final CubeInstance cubeInstance = CubeManager.getInstance(kylinConfig).getCube(cubeName);
final String factTable = cubeInstance.getRootFactTable();
final StreamingManager streamingManager = StreamingManager.getInstance(kylinConfig);
final StreamingConfig streamingConfig = streamingManager.getStreamingConfig(factTable);
kafkaConfig = KafkaConfigManager.getInstance(kylinConfig).getKafkaConfig(streamingConfig.getName());
String topicName = UUID.randomUUID().toString();
String localIp = NetworkUtils.getLocalIp();
BrokerConfig brokerConfig = kafkaConfig.getKafkaClusterConfigs().get(0).getBrokerConfigs().get(0);
brokerConfig.setHost(localIp);
kafkaConfig.setTopic(topicName);
KafkaConfigManager.getInstance(kylinConfig).updateKafkaConfig(kafkaConfig);
startEmbeddedKafka(topicName, brokerConfig);
}
示例8: initCubingJob
import org.apache.kylin.job.engine.JobEngineConfig; //导入依赖的package包/类
private static CubingJob initCubingJob(CubeSegment seg, String jobType, String submitter, JobEngineConfig config) {
KylinConfig kylinConfig = config.getConfig();
CubeInstance cube = seg.getCubeInstance();
List<ProjectInstance> projList = ProjectManager.getInstance(kylinConfig).findProjects(cube.getType(),
cube.getName());
if (projList == null || projList.size() == 0) {
throw new RuntimeException("Cannot find the project containing the cube " + cube.getName() + "!!!");
} else if (projList.size() >= 2) {
String msg = "Find more than one project containing the cube " + cube.getName()
+ ". It does't meet the uniqueness requirement!!! ";
if (!config.getConfig().allowCubeAppearInMultipleProjects()) {
throw new RuntimeException(msg);
} else {
logger.warn(msg);
}
}
CubingJob result = new CubingJob();
SimpleDateFormat format = new SimpleDateFormat("z yyyy-MM-dd HH:mm:ss");
format.setTimeZone(TimeZone.getTimeZone(config.getTimeZone()));
result.setDeployEnvName(kylinConfig.getDeployEnv());
result.setProjectName(projList.get(0).getName());
result.setJobType(jobType);
CubingExecutableUtil.setCubeName(seg.getCubeInstance().getName(), result.getParams());
CubingExecutableUtil.setSegmentId(seg.getUuid(), result.getParams());
CubingExecutableUtil.setSegmentName(seg.getName(), result.getParams());
result.setName(jobType + " CUBE - " + seg.getCubeInstance().getDisplayName() + " - " + seg.getName() + " - "
+ format.format(new Date(System.currentTimeMillis())));
result.setSubmitter(submitter);
result.setNotifyList(seg.getCubeInstance().getDescriptor().getNotifyList());
return result;
}
示例9: init
import org.apache.kylin.job.engine.JobEngineConfig; //导入依赖的package包/类
@Override
public synchronized void init(JobEngineConfig jobEngineConfig, JobLock lock) throws SchedulerException {
jobLock = lock;
String serverMode = jobEngineConfig.getConfig().getServerMode();
if (!("job".equals(serverMode.toLowerCase()) || "all".equals(serverMode.toLowerCase()))) {
logger.info("server mode: " + serverMode + ", no need to run job scheduler");
return;
}
logger.info("Initializing Job Engine ....");
if (!initialized) {
initialized = true;
} else {
return;
}
this.jobEngineConfig = jobEngineConfig;
if (jobLock.lockJobEngine() == false) {
throw new IllegalStateException("Cannot start job scheduler due to lack of job lock");
}
executableManager = ExecutableManager.getInstance(jobEngineConfig.getConfig());
//load all executable, set them to a consistent status
fetcherPool = Executors.newScheduledThreadPool(1);
int corePoolSize = jobEngineConfig.getMaxConcurrentJobLimit();
jobPool = new ThreadPoolExecutor(corePoolSize, corePoolSize, Long.MAX_VALUE, TimeUnit.DAYS,
new SynchronousQueue<Runnable>());
context = new DefaultContext(Maps.<String, Executable> newConcurrentMap(), jobEngineConfig.getConfig());
executableManager.resumeAllRunningJobs();
int pollSecond = jobEngineConfig.getPollIntervalSecond();
logger.info("Fetching jobs every {} seconds", pollSecond);
fetcher = jobEngineConfig.getJobPriorityConsidered() ? new FetcherRunnerWithPriority() : new FetcherRunner();
fetcherPool.scheduleAtFixedRate(fetcher, pollSecond / 10, pollSecond, TimeUnit.SECONDS);
hasStarted = true;
}
示例10: testPropertiesHotLoad
import org.apache.kylin.job.engine.JobEngineConfig; //导入依赖的package包/类
@Test
public void testPropertiesHotLoad() throws IOException {
KylinConfig baseConfig = KylinConfig.getInstanceFromEnv();
JobEngineConfig jobEngineConfig = new JobEngineConfig(baseConfig);
assertEquals(10, jobEngineConfig.getMaxConcurrentJobLimit());
updateProperty("kylin.job.max-concurrent-jobs", "20");
KylinConfig.getInstanceFromEnv().reloadFromSiteProperties();
assertEquals(20, jobEngineConfig.getMaxConcurrentJobLimit());
}
示例11: startScheduler
import org.apache.kylin.job.engine.JobEngineConfig; //导入依赖的package包/类
protected void startScheduler() throws SchedulerException {
scheduler = DefaultScheduler.createInstance();
scheduler.init(new JobEngineConfig(KylinConfig.getInstanceFromEnv()), new MockJobLock());
if (!scheduler.hasStarted()) {
throw new RuntimeException("scheduler has not been started");
}
}
示例12: KafkaTableInputFormat
import org.apache.kylin.job.engine.JobEngineConfig; //导入依赖的package包/类
public KafkaTableInputFormat(CubeSegment cubeSegment, List<TblColRef> columns, KafkaConfig kafkaConfig, JobEngineConfig conf) {
this.cubeSegment = cubeSegment;
this.conf = conf;
try {
streamingParser = StreamingParser.getStreamingParser(kafkaConfig.getParserName(), kafkaConfig.getAllParserProperties(), columns);
} catch (ReflectiveOperationException e) {
throw new IllegalArgumentException(e);
}
}
示例13: appendMapReduceParameters
import org.apache.kylin.job.engine.JobEngineConfig; //导入依赖的package包/类
private void appendMapReduceParameters(StringBuilder builder, JobEngineConfig engineConfig) {
try {
String jobConf = engineConfig.getHadoopJobConfFilePath(RealizationCapacity.MEDIUM);
if (jobConf != null && jobConf.length() > 0) {
builder.append(" -conf ").append(jobConf);
}
} catch (IOException e) {
throw new RuntimeException(e);
}
}
示例14: testGenerateInsertSql
import org.apache.kylin.job.engine.JobEngineConfig; //导入依赖的package包/类
@Test
public void testGenerateInsertSql() throws IOException {
String sqls = JoinedFlatTable.generateInsertDataStatement(intermediateTableDesc, fakeJobUUID, new JobEngineConfig(KylinConfig.getInstanceFromEnv()));
System.out.println(sqls);
int length = sqls.length();
assertEquals(1155, length);
}
示例15: before
import org.apache.kylin.job.engine.JobEngineConfig; //导入依赖的package包/类
@Before
public void before() throws Exception {
HBaseMetadataTestCase.staticCreateTestMetadata(AbstractKylinTestCase.SANDBOX_TEST_DATA);
DeployUtil.initCliWorkDir();
// DeployUtil.deployMetadata();
DeployUtil.overrideJobJarLocations();
final KylinConfig kylinConfig = KylinConfig.getInstanceFromEnv();
jobService = ExecutableManager.getInstance(kylinConfig);
scheduler = DefaultScheduler.getInstance();
scheduler.init(new JobEngineConfig(kylinConfig));
if (!scheduler.hasStarted()) {
throw new RuntimeException("scheduler has not been started");
}
iiManager = IIManager.getInstance(kylinConfig);
jobEngineConfig = new JobEngineConfig(kylinConfig);
for (String jobId : jobService.getAllJobIds()) {
if(jobService.getJob(jobId) instanceof IIJob){
jobService.deleteJob(jobId);
}
}
IIInstance ii = iiManager.getII(TEST_II_NAME);
if (ii.getStatus() != RealizationStatusEnum.DISABLED) {
ii.setStatus(RealizationStatusEnum.DISABLED);
iiManager.updateII(ii);
}
}