本文整理汇总了Java中org.apache.kylin.job.execution.ExecuteResult.createError方法的典型用法代码示例。如果您正苦于以下问题:Java ExecuteResult.createError方法的具体用法?Java ExecuteResult.createError怎么用?Java ExecuteResult.createError使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.kylin.job.execution.ExecuteResult
的用法示例。
在下文中一共展示了ExecuteResult.createError方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: doWork
import org.apache.kylin.job.execution.ExecuteResult; //导入方法依赖的package包/类
@Override
protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException {
final CubeManager mgr = CubeManager.getInstance(context.getConfig());
final CubeInstance cube = mgr.getCube(CubingExecutableUtil.getCubeName(this.getParams()));
final CubeSegment optimizeSegment = cube.getSegmentById(CubingExecutableUtil.getSegmentId(this.getParams()));
CubeSegment oldSegment = optimizeSegment.getCubeInstance().getOriginalSegmentToOptimize(optimizeSegment);
Preconditions.checkNotNull(oldSegment,
"cannot find the original segment to be optimized by " + optimizeSegment);
// --- Copy dictionary
optimizeSegment.getDictionaries().putAll(oldSegment.getDictionaries());
optimizeSegment.getSnapshots().putAll(oldSegment.getSnapshots());
optimizeSegment.getRowkeyStats().addAll(oldSegment.getRowkeyStats());
try {
CubeUpdate cubeBuilder = new CubeUpdate(cube);
cubeBuilder.setToUpdateSegs(optimizeSegment);
mgr.updateCube(cubeBuilder);
} catch (IOException e) {
logger.error("fail to merge dictionary or lookup snapshots", e);
return ExecuteResult.createError(e);
}
return new ExecuteResult();
}
示例2: doWork
import org.apache.kylin.job.execution.ExecuteResult; //导入方法依赖的package包/类
@Override
protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException {
final CubeManager cubeManager = CubeManager.getInstance(context.getConfig());
final CubeInstance cube = cubeManager.getCube(CubingExecutableUtil.getCubeName(this.getParams()));
Set<Long> recommendCuboids = cube.getCuboidsRecommend();
try {
List<CubeSegment> newSegments = cube.getSegments(SegmentStatusEnum.READY_PENDING);
Map<Long, Long> recommendCuboidsWithStats = CuboidStatsReaderUtil
.readCuboidStatsFromSegments(recommendCuboids, newSegments);
if (recommendCuboidsWithStats == null) {
throw new RuntimeException("Fail to get statistics info for recommended cuboids after optimization!!!");
}
cubeManager.promoteCheckpointOptimizeSegments(cube, recommendCuboidsWithStats,
newSegments.toArray(new CubeSegment[newSegments.size()]));
return new ExecuteResult();
} catch (Exception e) {
logger.error("fail to update cube after build", e);
return ExecuteResult.createError(e);
}
}
示例3: doWork
import org.apache.kylin.job.execution.ExecuteResult; //导入方法依赖的package包/类
@Override
protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException {
KylinConfig config = context.getConfig();
StringBuffer output = new StringBuffer();
try {
output.append(cleanUpIntermediateFlatTable(config));
// don't drop view to avoid concurrent issue
//output.append(cleanUpHiveViewIntermediateTable(config));
} catch (IOException e) {
logger.error("job:" + getId() + " execute finished with exception", e);
return ExecuteResult.createError(e);
}
return new ExecuteResult(ExecuteResult.State.SUCCEED, output.toString());
}
示例4: doWork
import org.apache.kylin.job.execution.ExecuteResult; //导入方法依赖的package包/类
@Override
protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException {
final CubeManager cubeManager = CubeManager.getInstance(context.getConfig());
final CubeInstance cube = cubeManager.getCube(CubingExecutableUtil.getCubeName(this.getParams()));
final CubeSegment segment = cube.getSegmentById(CubingExecutableUtil.getSegmentId(this.getParams()));
CubeSegment originalSegment = cube.getOriginalSegmentToOptimize(segment);
long sourceCount = originalSegment.getInputRecords();
long sourceSizeBytes = originalSegment.getInputRecordsSize();
CubingJob cubingJob = (CubingJob) getManager().getJob(CubingExecutableUtil.getCubingJobId(this.getParams()));
long cubeSizeBytes = cubingJob.findCubeSizeBytes();
segment.setLastBuildJobID(CubingExecutableUtil.getCubingJobId(this.getParams()));
segment.setLastBuildTime(System.currentTimeMillis());
segment.setSizeKB(cubeSizeBytes / 1024);
segment.setInputRecords(sourceCount);
segment.setInputRecordsSize(sourceSizeBytes);
try {
cubeManager.promoteNewlyOptimizeSegments(cube, segment);
return new ExecuteResult();
} catch (IOException e) {
logger.error("fail to update cube after build", e);
return ExecuteResult.createError(e);
}
}
示例5: doWork
import org.apache.kylin.job.execution.ExecuteResult; //导入方法依赖的package包/类
@Override
protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException {
final CubeManager mgr = CubeManager.getInstance(context.getConfig());
final CubeInstance cube = mgr.getCube(CubingExecutableUtil.getCubeName(this.getParams()));
final CubeSegment newSegment = cube.getSegmentById(CubingExecutableUtil.getSegmentId(this.getParams()));
final List<CubeSegment> mergingSegments = getMergingSegments(cube);
KylinConfig conf = cube.getConfig();
Collections.sort(mergingSegments);
try {
checkLookupSnapshotsMustIncremental(mergingSegments);
// work on copy instead of cached objects
CubeInstance cubeCopy = cube.latestCopyForWrite();
CubeSegment newSegCopy = cubeCopy.getSegmentById(newSegment.getUuid());
makeDictForNewSegment(conf, cubeCopy, newSegCopy, mergingSegments);
makeSnapshotForNewSegment(cubeCopy, newSegCopy, mergingSegments);
CubeUpdate update = new CubeUpdate(cubeCopy);
update.setToUpdateSegs(newSegCopy);
mgr.updateCube(update);
return ExecuteResult.createSucceed();
} catch (IOException e) {
logger.error("fail to merge dictionary or lookup snapshots", e);
return ExecuteResult.createError(e);
}
}
示例6: doWork
import org.apache.kylin.job.execution.ExecuteResult; //导入方法依赖的package包/类
@Override
protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException {
final CubeManager cubeManager = CubeManager.getInstance(context.getConfig());
final CubeInstance cube = cubeManager.getCube(CubingExecutableUtil.getCubeName(this.getParams()));
final CubeSegment segment = cube.getSegmentById(CubingExecutableUtil.getSegmentId(this.getParams()));
CubingJob cubingJob = (CubingJob) getManager().getJob(CubingExecutableUtil.getCubingJobId(this.getParams()));
long sourceCount = cubingJob.findSourceRecordCount();
long sourceSizeBytes = cubingJob.findSourceSizeBytes();
long cubeSizeBytes = cubingJob.findCubeSizeBytes();
segment.setLastBuildJobID(CubingExecutableUtil.getCubingJobId(this.getParams()));
segment.setLastBuildTime(System.currentTimeMillis());
segment.setSizeKB(cubeSizeBytes / 1024);
segment.setInputRecords(sourceCount);
segment.setInputRecordsSize(sourceSizeBytes);
try {
if (segment.isOffsetCube()) {
updateTimeRange(segment);
}
cubeManager.promoteNewlyBuiltSegments(cube, segment);
return new ExecuteResult();
} catch (IOException e) {
logger.error("fail to update cube after build", e);
return ExecuteResult.createError(e);
}
}
示例7: doWork
import org.apache.kylin.job.execution.ExecuteResult; //导入方法依赖的package包/类
@Override
protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException {
try {
logger.info("executing:" + getCmd());
final PatternedLogger patternedLogger = new PatternedLogger(logger);
final Pair<Integer, String> result = context.getConfig().getCliCommandExecutor().execute(getCmd(), patternedLogger);
getManager().addJobInfo(getId(), patternedLogger.getInfo());
return result.getFirst() == 0 ? new ExecuteResult(ExecuteResult.State.SUCCEED, result.getSecond())
: ExecuteResult.createFailed(new ShellException(result.getSecond()));
} catch (IOException e) {
logger.error("job:" + getId() + " execute finished with exception", e);
return ExecuteResult.createError(e);
}
}
示例8: doWork
import org.apache.kylin.job.execution.ExecuteResult; //导入方法依赖的package包/类
@Override
protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException {
try {
rmdirOnHDFS(getDataPath());
} catch (IOException e) {
logger.error("job:" + getId() + " execute finished with exception", e);
return ExecuteResult.createError(e);
}
return new ExecuteResult(ExecuteResult.State.SUCCEED, "HDFS path " + getDataPath() + " is dropped.\n");
}
示例9: doWork
import org.apache.kylin.job.execution.ExecuteResult; //导入方法依赖的package包/类
@Override
protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException {
final CubeManager cubeManager = CubeManager.getInstance(context.getConfig());
final CubeInstance cubeCopy = cubeManager.getCube(CubingExecutableUtil.getCubeName(this.getParams())).latestCopyForWrite();
final String segmentId = CubingExecutableUtil.getSegmentId(this.getParams());
final CubeSegment segCopy = cubeCopy.getSegmentById(segmentId);
Preconditions.checkNotNull(segCopy, "Cube segment '" + segmentId + "' not found.");
Segments<CubeSegment> mergingSegs = cubeCopy.getMergingSegments(segCopy);
Preconditions.checkArgument(mergingSegs.size() > 0, "Merging segment not exist.");
Collections.sort(mergingSegs);
final CubeSegment first = mergingSegs.get(0);
final CubeSegment last = mergingSegs.get(mergingSegs.size() - 1);
segCopy.setSegRange(new SegmentRange(first.getSegRange().start, last.getSegRange().end));
segCopy.setSourcePartitionOffsetStart(first.getSourcePartitionOffsetStart());
segCopy.setSourcePartitionOffsetEnd(last.getSourcePartitionOffsetEnd());
segCopy.setTSRange(new TSRange(mergingSegs.getTSStart(), mergingSegs.getTSEnd()));
CubeUpdate update = new CubeUpdate(cubeCopy);
update.setToUpdateSegs(segCopy);
try {
cubeManager.updateCube(update);
return ExecuteResult.createSucceed();
} catch (IOException e) {
logger.error("fail to update cube segment offset", e);
return ExecuteResult.createError(e);
}
}
示例10: doWork
import org.apache.kylin.job.execution.ExecuteResult; //导入方法依赖的package包/类
@Override
protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException {
final CubeInstance cube = cubeManager.getCube(CubingExecutableUtil.getCubeName(this.getParams()));
CubeSegment mergedSegment = cube.getSegmentById(CubingExecutableUtil.getSegmentId(this.getParams()));
if (mergedSegment == null) {
return ExecuteResult.createFailed(new SegmentNotFoundException(
"there is no segment with id:" + CubingExecutableUtil.getSegmentId(this.getParams())));
}
CubingJob cubingJob = (CubingJob) getManager().getJob(CubingExecutableUtil.getCubingJobId(this.getParams()));
long cubeSizeBytes = cubingJob.findCubeSizeBytes();
// collect source statistics
List<String> mergingSegmentIds = CubingExecutableUtil.getMergingSegmentIds(this.getParams());
if (mergingSegmentIds.isEmpty()) {
return ExecuteResult.createFailed(new SegmentNotFoundException("there are no merging segments"));
}
long sourceCount = 0L;
long sourceSize = 0L;
for (String id : mergingSegmentIds) {
CubeSegment segment = cube.getSegmentById(id);
sourceCount += segment.getInputRecords();
sourceSize += segment.getInputRecordsSize();
}
// update segment info
mergedSegment.setSizeKB(cubeSizeBytes / 1024);
mergedSegment.setInputRecords(sourceCount);
mergedSegment.setInputRecordsSize(sourceSize);
mergedSegment.setLastBuildJobID(CubingExecutableUtil.getCubingJobId(this.getParams()));
mergedSegment.setLastBuildTime(System.currentTimeMillis());
try {
cubeManager.promoteNewlyBuiltSegments(cube, mergedSegment);
return new ExecuteResult(ExecuteResult.State.SUCCEED);
} catch (IOException e) {
logger.error("fail to update cube after merge", e);
return ExecuteResult.createError(e);
}
}