本文整理汇总了Java中org.apache.kylin.cube.common.RowKeySplitter类的典型用法代码示例。如果您正苦于以下问题:Java RowKeySplitter类的具体用法?Java RowKeySplitter怎么用?Java RowKeySplitter使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
RowKeySplitter类属于org.apache.kylin.cube.common包,在下文中一共展示了RowKeySplitter类的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: doSetup
import org.apache.kylin.cube.common.RowKeySplitter; //导入依赖的package包/类
@Override
protected void doSetup(Context context) throws IOException {
super.bindCurrentConfiguration(context.getConfiguration());
mos = new MultipleOutputs(context);
String cubeName = context.getConfiguration().get(BatchConstants.CFG_CUBE_NAME);
String segmentID = context.getConfiguration().get(BatchConstants.CFG_CUBE_SEGMENT_ID);
KylinConfig config = AbstractHadoopJob.loadKylinPropsAndMetadata();
CubeManager cubeManager = CubeManager.getInstance(config);
CubeInstance cube = cubeManager.getCube(cubeName);
CubeSegment optSegment = cube.getSegmentById(segmentID);
CubeSegment originalSegment = cube.getOriginalSegmentToOptimize(optSegment);
rowKeySplitter = new RowKeySplitter(originalSegment, 65, 255);
baseCuboid = cube.getCuboidScheduler().getBaseCuboidId();
recommendCuboids = cube.getCuboidsRecommend();
Preconditions.checkNotNull(recommendCuboids, "The recommend cuboid map could not be null");
}
示例2: doSetup
import org.apache.kylin.cube.common.RowKeySplitter; //导入依赖的package包/类
@Override
protected void doSetup(Context context) throws IOException {
super.bindCurrentConfiguration(context.getConfiguration());
cubeName = context.getConfiguration().get(BatchConstants.CFG_CUBE_NAME);
segmentID = context.getConfiguration().get(BatchConstants.CFG_CUBE_SEGMENT_ID);
String cuboidModeName = context.getConfiguration().get(BatchConstants.CFG_CUBOID_MODE);
KylinConfig config = AbstractHadoopJob.loadKylinPropsAndMetadata();
CubeInstance cube = CubeManager.getInstance(config).getCube(cubeName);
cubeDesc = cube.getDescriptor();
cubeSegment = cube.getSegmentById(segmentID);
ndCuboidBuilder = new NDCuboidBuilder(cubeSegment);
// initialize CubiodScheduler
cuboidScheduler = CuboidSchedulerUtil.getCuboidSchedulerByMode(cubeSegment, cuboidModeName);
rowKeySplitter = new RowKeySplitter(cubeSegment, 65, 256);
}
示例3: doSetup
import org.apache.kylin.cube.common.RowKeySplitter; //导入依赖的package包/类
@Override
protected void doSetup(Context context) throws IOException {
super.bindCurrentConfiguration(context.getConfiguration());
mos = new MultipleOutputs(context);
String cubeName = context.getConfiguration().get(BatchConstants.CFG_CUBE_NAME);
String segmentID = context.getConfiguration().get(BatchConstants.CFG_CUBE_SEGMENT_ID);
KylinConfig config = AbstractHadoopJob.loadKylinPropsAndMetadata();
CubeInstance cube = CubeManager.getInstance(config).getCube(cubeName);
CubeSegment cubeSegment = cube.getSegmentById(segmentID);
CubeSegment oldSegment = cube.getOriginalSegmentToOptimize(cubeSegment);
cubeDesc = cube.getDescriptor();
baseCuboid = cube.getCuboidScheduler().getBaseCuboidId();
rowKeySplitter = new RowKeySplitter(oldSegment, 65, 256);
rowKeyEncoderProvider = new RowKeyEncoderProvider(cubeSegment);
}
示例4: init
import org.apache.kylin.cube.common.RowKeySplitter; //导入依赖的package包/类
public void init() {
KylinConfig kConfig = AbstractHadoopJob.loadKylinConfigFromHdfs(conf, metaUrl);
CubeInstance cubeInstance = CubeManager.getInstance(kConfig).getCube(cubeName);
this.cubeSegment = cubeInstance.getSegmentById(segmentId);
this.cubeDesc = cubeInstance.getDescriptor();
this.cuboidScheduler = cubeSegment.getCuboidScheduler();
this.ndCuboidBuilder = new NDCuboidBuilder(cubeSegment, new RowKeyEncoderProvider(cubeSegment));
this.rowKeySplitter = new RowKeySplitter(cubeSegment, 65, 256);
}
示例5: setup
import org.apache.kylin.cube.common.RowKeySplitter; //导入依赖的package包/类
@Override
protected void setup(Context context) throws IOException, InterruptedException {
super.publishConfiguration(context.getConfiguration());
cubeName = context.getConfiguration().get(BatchConstants.CFG_CUBE_NAME).toUpperCase();
segmentName = context.getConfiguration().get(BatchConstants.CFG_CUBE_SEGMENT_NAME).toUpperCase();
config = AbstractHadoopJob.loadKylinPropsAndMetadata(context.getConfiguration());
cubeManager = CubeManager.getInstance(config);
cube = cubeManager.getCube(cubeName);
cubeDesc = cube.getDescriptor();
mergedCubeSegment = cube.getSegment(segmentName, SegmentStatusEnum.NEW);
// int colCount = cubeDesc.getRowkey().getRowKeyColumns().length;
newKeyBuf = new byte[256];// size will auto-grow
// decide which source segment
InputSplit inputSplit = context.getInputSplit();
String filePath = ((FileSplit) inputSplit).getPath().toString();
System.out.println("filePath:" + filePath);
String jobID = extractJobIDFromPath(filePath);
System.out.println("jobID:" + jobID);
sourceCubeSegment = findSegmentWithUuid(jobID, cube);
System.out.println(sourceCubeSegment);
this.rowKeySplitter = new RowKeySplitter(sourceCubeSegment, 65, 255);
}
示例6: doSetup
import org.apache.kylin.cube.common.RowKeySplitter; //导入依赖的package包/类
@Override
protected void doSetup(Context context) throws IOException, InterruptedException {
super.bindCurrentConfiguration(context.getConfiguration());
cubeName = context.getConfiguration().get(BatchConstants.CFG_CUBE_NAME);
segmentID = context.getConfiguration().get(BatchConstants.CFG_CUBE_SEGMENT_ID);
config = AbstractHadoopJob.loadKylinPropsAndMetadata();
cubeManager = CubeManager.getInstance(config);
cube = cubeManager.getCube(cubeName);
cubeDesc = cube.getDescriptor();
mergedCubeSegment = cube.getSegmentById(segmentID);
// int colCount = cubeDesc.getRowkey().getRowKeyColumns().length;
newKeyBodyBuf = new byte[RowConstants.ROWKEY_BUFFER_SIZE];// size will auto-grow
newKeyBuf = ByteArray.allocate(RowConstants.ROWKEY_BUFFER_SIZE);
// decide which source segment
FileSplit fileSplit = (FileSplit) context.getInputSplit();
IMROutput2.IMRMergeOutputFormat outputFormat = MRUtil.getBatchMergeOutputSide2(mergedCubeSegment).getOuputFormat();
sourceCubeSegment = outputFormat.findSourceSegment(fileSplit, cube);
rowKeySplitter = new RowKeySplitter(sourceCubeSegment, 65, 255);
rowKeyEncoderProvider = new RowKeyEncoderProvider(mergedCubeSegment);
measureDescs = cubeDesc.getMeasures();
codec = new BufferedMeasureCodec(measureDescs);
measureObjs = new Object[measureDescs.size()];
outputValue = new Text();
dictMeasures = Lists.newArrayList();
oldDicts = Maps.newHashMap();
newDicts = Maps.newHashMap();
for (int i = 0; i < measureDescs.size(); i++) {
MeasureDesc measureDesc = measureDescs.get(i);
MeasureType measureType = measureDesc.getFunction().getMeasureType();
List<TblColRef> columns = measureType.getColumnsNeedDictionary(measureDesc.getFunction());
boolean needReEncode = false;
for (TblColRef col : columns) {
//handle the column that all records is null
if (sourceCubeSegment.getDictionary(col) == null) {
continue;
}
oldDicts.put(col, sourceCubeSegment.getDictionary(col));
newDicts.put(col, mergedCubeSegment.getDictionary(col));
if (!sourceCubeSegment.getDictionary(col).equals(mergedCubeSegment.getDictionary(col))) {
needReEncode = true;
}
}
if (needReEncode) {
dictMeasures.add(Pair.newPair(i, measureType.newIngester()));
}
}
}
示例7: NDCuboidBuilder
import org.apache.kylin.cube.common.RowKeySplitter; //导入依赖的package包/类
public NDCuboidBuilder(CubeSegment cubeSegment) {
this.cubeSegment = cubeSegment;
this.rowKeySplitter = new RowKeySplitter(cubeSegment, 65, 256);
this.rowKeyEncoderProvider = new RowKeyEncoderProvider(cubeSegment);
}
示例8: RowKeyDecoder
import org.apache.kylin.cube.common.RowKeySplitter; //导入依赖的package包/类
public RowKeyDecoder(CubeSegment cubeSegment) {
this.cubeDesc = cubeSegment.getCubeDesc();
this.rowKeySplitter = new RowKeySplitter(cubeSegment, 65, 255);
this.colIO = new RowKeyColumnIO(cubeSegment.getDimensionEncodingMap());
this.values = new ArrayList<String>();
}
示例9: getRowKeySplitter
import org.apache.kylin.cube.common.RowKeySplitter; //导入依赖的package包/类
public RowKeySplitter getRowKeySplitter() {
return rowKeySplitter;
}
示例10: setup
import org.apache.kylin.cube.common.RowKeySplitter; //导入依赖的package包/类
@Override
protected void setup(Context context) throws IOException {
super.publishConfiguration(context.getConfiguration());
cubeName = context.getConfiguration().get(BatchConstants.CFG_CUBE_NAME).toUpperCase();
segmentName = context.getConfiguration().get(BatchConstants.CFG_CUBE_SEGMENT_NAME).toUpperCase();
KylinConfig config = AbstractHadoopJob.loadKylinPropsAndMetadata(context.getConfiguration());
CubeInstance cube = CubeManager.getInstance(config).getCube(cubeName);
CubeSegment cubeSegment = cube.getSegment(segmentName, SegmentStatusEnum.NEW);
cubeDesc = cube.getDescriptor();
// initialize CubiodScheduler
cuboidScheduler = new CuboidScheduler(cubeDesc);
rowKeySplitter = new RowKeySplitter(cubeSegment, 65, 256);
}
示例11: RowKeyDecoder
import org.apache.kylin.cube.common.RowKeySplitter; //导入依赖的package包/类
public RowKeyDecoder(CubeSegment cubeSegment) {
this.cubeDesc = cubeSegment.getCubeDesc();
this.rowKeySplitter = new RowKeySplitter(cubeSegment, 65, 255);
this.colIO = new RowKeyColumnIO(cubeSegment);
this.values = new ArrayList<String>();
}