本文整理汇总了Java中org.apache.tajo.catalog.proto.CatalogProtos.StoreType.CSV属性的典型用法代码示例。如果您正苦于以下问题:Java StoreType.CSV属性的具体用法?Java StoreType.CSV怎么用?Java StoreType.CSV使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类org.apache.tajo.catalog.proto.CatalogProtos.StoreType
的用法示例。
在下文中一共展示了StoreType.CSV属性的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testGetTable
@Test
public void testGetTable() throws Exception {
schema1 = new Schema();
schema1.addColumn(FieldName1, Type.BLOB);
schema1.addColumn(FieldName2, Type.INT4);
schema1.addColumn(FieldName3, Type.INT8);
Path path = new Path(CommonTestingUtil.getTestDir(), "table1");
TableDesc meta = new TableDesc(
CatalogUtil.buildFQName(DEFAULT_DATABASE_NAME, "getTable"),
schema1,
StoreType.CSV,
new Options(),
path);
assertFalse(catalog.existsTable(DEFAULT_DATABASE_NAME, "getTable"));
catalog.createTable(meta);
assertTrue(catalog.existsTable(DEFAULT_DATABASE_NAME, "getTable"));
catalog.dropTable(CatalogUtil.buildFQName(DEFAULT_DATABASE_NAME, "getTable"));
assertFalse(catalog.existsTable(DEFAULT_DATABASE_NAME, "getTable"));
}
示例2: setUp
@BeforeClass
public static void setUp() throws Exception {
util = new TajoTestingCluster();
util.startCatalogCluster();
catalog = util.getMiniCatalogCluster().getCatalog();
Schema schema = new Schema();
schema.addColumn("name", Type.TEXT);
schema.addColumn("empId", CatalogUtil.newSimpleDataType(Type.INT4));
schema.addColumn("deptName", Type.TEXT);
Schema schema2 = new Schema();
schema2.addColumn("deptName", Type.TEXT);
schema2.addColumn("manager", Type.TEXT);
Schema schema3 = new Schema();
schema3.addColumn("deptName", Type.TEXT);
schema3.addColumn("score", CatalogUtil.newSimpleDataType(Type.INT4));
TableMeta meta = CatalogUtil.newTableMeta(StoreType.CSV);
TableDesc people = new TableDesc("employee", schema, meta, CommonTestingUtil.getTestDir());
catalog.addTable(people);
TableDesc student = new TableDesc("dept", schema2, StoreType.CSV, new Options(), CommonTestingUtil.getTestDir());
catalog.addTable(student);
TableDesc score = new TableDesc("score", schema3, StoreType.CSV, new Options(), CommonTestingUtil.getTestDir());
catalog.addTable(score);
FunctionDesc funcDesc = new FunctionDesc("sumtest", SumInt.class, FunctionType.AGGREGATION,
CatalogUtil.newSimpleDataType(Type.INT4),
CatalogUtil.newSimpleDataTypeArray(Type.INT4));
catalog.createFunction(funcDesc);
analyzer = new SQLAnalyzer();
planner = new LogicalPlanner(catalog);
}
示例3: testTime
@Test
public void testTime() throws IOException {
if (storeType == StoreType.CSV || storeType == StoreType.RAW) {
Schema schema = new Schema();
schema.addColumn("col1", Type.DATE);
schema.addColumn("col2", Type.TIME);
schema.addColumn("col3", Type.TIMESTAMP);
Options options = new Options();
TableMeta meta = CatalogUtil.newTableMeta(storeType, options);
Path tablePath = new Path(testDir, "testTime.data");
Appender appender = StorageManagerFactory.getStorageManager(conf).getAppender(meta, schema, tablePath);
appender.init();
Tuple tuple = new VTuple(3);
tuple.put(new Datum[]{
DatumFactory.createDate("1980-04-01"),
DatumFactory.createTime("12:34:56"),
DatumFactory.createTimeStamp((int) System.currentTimeMillis() / 1000)
});
appender.addTuple(tuple);
appender.flush();
appender.close();
FileStatus status = fs.getFileStatus(tablePath);
FileFragment fragment = new FileFragment("table", tablePath, 0, status.getLen());
Scanner scanner = StorageManagerFactory.getStorageManager(conf).getScanner(meta, schema, fragment);
scanner.init();
Tuple retrieved;
while ((retrieved = scanner.next()) != null) {
for (int i = 0; i < tuple.size(); i++) {
assertEquals(tuple.get(i), retrieved.get(i));
}
}
scanner.close();
}
}
示例4: createMockupTable
private TableDesc createMockupTable(String databaseName, String tableName) throws IOException {
schema1 = new Schema();
schema1.addColumn(FieldName1, Type.BLOB);
schema1.addColumn(FieldName2, Type.INT4);
schema1.addColumn(FieldName3, Type.INT8);
Path path = new Path(CommonTestingUtil.getTestDir(), tableName);
TableDesc table = new TableDesc(
CatalogUtil.buildFQName(databaseName, tableName),
schema1,
new TableMeta(StoreType.CSV, new Options()),
path, true);
return table;
}
示例5: testProjection
@Test
public void testProjection() throws IOException {
Schema schema = new Schema();
schema.addColumn("id", Type.INT4);
schema.addColumn("age", Type.INT8);
schema.addColumn("score", Type.FLOAT4);
TableMeta meta = CatalogUtil.newTableMeta(storeType);
Path tablePath = new Path(testDir, "testProjection.data");
Appender appender = StorageManagerFactory.getStorageManager(conf).getAppender(meta, schema, tablePath);
appender.init();
int tupleNum = 10000;
VTuple vTuple;
for(int i = 0; i < tupleNum; i++) {
vTuple = new VTuple(3);
vTuple.put(0, DatumFactory.createInt4(i + 1));
vTuple.put(1, DatumFactory.createInt8(i + 2));
vTuple.put(2, DatumFactory.createFloat4(i + 3));
appender.addTuple(vTuple);
}
appender.close();
FileStatus status = fs.getFileStatus(tablePath);
FileFragment fragment = new FileFragment("testReadAndWrite", tablePath, 0, status.getLen());
Schema target = new Schema();
target.addColumn("age", Type.INT8);
target.addColumn("score", Type.FLOAT4);
Scanner scanner = StorageManagerFactory.getStorageManager(conf).getScanner(meta, schema, fragment, target);
scanner.init();
int tupleCnt = 0;
Tuple tuple;
while ((tuple = scanner.next()) != null) {
if (storeType == StoreType.RCFILE || storeType == StoreType.TREVNI || storeType == StoreType.CSV) {
assertTrue(tuple.get(0) == null);
}
assertTrue(tupleCnt + 2 == tuple.get(1).asInt8());
assertTrue(tupleCnt + 3 == tuple.get(2).asFloat4());
tupleCnt++;
}
scanner.close();
assertEquals(tupleNum, tupleCnt);
}
示例6: testProjection
@Test
public void testProjection() throws IOException {
Schema schema = new Schema();
schema.addColumn("id", Type.INT4);
schema.addColumn("age", Type.INT8);
schema.addColumn("score", Type.FLOAT4);
TableMeta meta = CatalogUtil.newTableMeta(storeType);
Path tablePath = new Path(testDir, "testProjection.data");
Appender appender = StorageManagerFactory.getStorageManager(conf).getAppender(meta, schema, tablePath);
appender.init();
int tupleNum = 10000;
VTuple vTuple;
for(int i = 0; i < tupleNum; i++) {
vTuple = new VTuple(3);
vTuple.put(0, DatumFactory.createInt4(i + 1));
vTuple.put(1, DatumFactory.createInt8(i + 2));
vTuple.put(2, DatumFactory.createFloat4(i + 3));
appender.addTuple(vTuple);
}
appender.close();
FileStatus status = fs.getFileStatus(tablePath);
FileFragment fragment = new FileFragment("testReadAndWrite", tablePath, 0, status.getLen());
Schema target = new Schema();
target.addColumn("age", Type.INT8);
target.addColumn("score", Type.FLOAT4);
Scanner scanner = StorageManagerFactory.getStorageManager(conf).getScanner(meta, schema, fragment, target);
scanner.init();
int tupleCnt = 0;
Tuple tuple;
while ((tuple = scanner.next()) != null) {
if (storeType == StoreType.RCFILE || storeType == StoreType.TREVNI || storeType == StoreType.CSV) {
assertTrue(tuple.get(0) == null || tuple.get(0) instanceof NullDatum);
}
assertTrue(tupleCnt + 2 == tuple.get(1).asInt8());
assertTrue(tupleCnt + 3 == tuple.get(2).asFloat4());
tupleCnt++;
}
scanner.close();
assertEquals(tupleNum, tupleCnt);
}
示例7: testSplitCompressionData
@Test
public void testSplitCompressionData() throws IOException {
if(StoreType.CSV != storeType) return;
Schema schema = new Schema();
schema.addColumn("id", Type.INT4);
schema.addColumn("age", Type.INT8);
TableMeta meta = CatalogUtil.newTableMeta(storeType);
meta.putOption("compression.codec", BZip2Codec.class.getCanonicalName());
Path tablePath = new Path(testDir, "SplitCompression");
Appender appender = StorageManagerFactory.getStorageManager(conf).getAppender(meta, schema, tablePath);
appender.enableStats();
appender.init();
String extention = "";
if (appender instanceof CSVFile.CSVAppender) {
extention = ((CSVFile.CSVAppender) appender).getExtension();
}
int tupleNum = 100000;
VTuple vTuple;
for (int i = 0; i < tupleNum; i++) {
vTuple = new VTuple(2);
vTuple.put(0, DatumFactory.createInt4(i + 1));
vTuple.put(1, DatumFactory.createInt8(25l));
appender.addTuple(vTuple);
}
appender.close();
TableStats stat = appender.getStats();
assertEquals(tupleNum, stat.getNumRows().longValue());
tablePath = tablePath.suffix(extention);
FileStatus status = fs.getFileStatus(tablePath);
long fileLen = status.getLen();
long randomNum = (long) (Math.random() * fileLen) + 1;
FileFragment[] tablets = new FileFragment[2];
tablets[0] = new FileFragment("SplitCompression", tablePath, 0, randomNum);
tablets[1] = new FileFragment("SplitCompression", tablePath, randomNum, (fileLen - randomNum));
Scanner scanner = StorageManagerFactory.getStorageManager(conf).getScanner(meta, schema, tablets[0], schema);
assertTrue(scanner.isSplittable());
scanner.init();
int tupleCnt = 0;
Tuple tuple;
while ((tuple = scanner.next()) != null) {
tupleCnt++;
}
scanner.close();
scanner = StorageManagerFactory.getStorageManager(conf).getScanner(meta, schema, tablets[1], schema);
assertTrue(scanner.isSplittable());
scanner.init();
while ((tuple = scanner.next()) != null) {
tupleCnt++;
}
scanner.close();
assertEquals(tupleNum, tupleCnt);
}
示例8: storageCompressionTest
private void storageCompressionTest(StoreType storeType, Class<? extends CompressionCodec> codec) throws IOException {
Schema schema = new Schema();
schema.addColumn("id", Type.INT4);
schema.addColumn("age", Type.FLOAT4);
schema.addColumn("name", Type.TEXT);
TableMeta meta = CatalogUtil.newTableMeta(storeType);
meta.putOption("compression.codec", codec.getCanonicalName());
meta.putOption("rcfile.serde", TextSerializerDeserializer.class.getName());
String fileName = "Compression_" + codec.getSimpleName();
Path tablePath = new Path(testDir, fileName);
Appender appender = StorageManagerFactory.getStorageManager(conf).getAppender(meta, schema, tablePath);
appender.enableStats();
appender.init();
String extension = "";
if (appender instanceof CSVFile.CSVAppender) {
extension = ((CSVFile.CSVAppender) appender).getExtension();
}
int tupleNum = 100000;
VTuple vTuple;
for (int i = 0; i < tupleNum; i++) {
vTuple = new VTuple(3);
vTuple.put(0, DatumFactory.createInt4(i + 1));
vTuple.put(1, DatumFactory.createFloat4((float) i));
vTuple.put(2, DatumFactory.createText(String.valueOf(i)));
appender.addTuple(vTuple);
}
appender.close();
TableStats stat = appender.getStats();
assertEquals(tupleNum, stat.getNumRows().longValue());
tablePath = tablePath.suffix(extension);
FileStatus status = fs.getFileStatus(tablePath);
long fileLen = status.getLen();
FileFragment[] tablets = new FileFragment[1];
tablets[0] = new FileFragment(fileName, tablePath, 0, fileLen);
Scanner scanner = StorageManagerFactory.getStorageManager(conf).getScanner(meta, schema, tablets[0], schema);
if (StoreType.CSV == storeType) {
if (SplittableCompressionCodec.class.isAssignableFrom(codec)) {
assertTrue(scanner.isSplittable());
} else {
assertFalse(scanner.isSplittable());
}
}
scanner.init();
int tupleCnt = 0;
Tuple tuple;
while ((tuple = scanner.next()) != null) {
tupleCnt++;
}
scanner.close();
assertEquals(tupleNum, tupleCnt);
assertNotSame(appender.getStats().getNumBytes().longValue(), scanner.getInputStats().getNumBytes().longValue());
assertEquals(appender.getStats().getNumRows().longValue(), scanner.getInputStats().getNumRows().longValue());
}
示例9: setUp
@BeforeClass
public static void setUp() throws Exception {
util = new TajoTestingCluster();
util.startCatalogCluster();
catalog = util.getMiniCatalogCluster().getCatalog();
catalog.createTablespace(DEFAULT_TABLESPACE_NAME, "hdfs://localhost:1234/warehouse");
catalog.createDatabase(DEFAULT_DATABASE_NAME, DEFAULT_TABLESPACE_NAME);
Schema schema = new Schema();
schema.addColumn("name", Type.TEXT);
schema.addColumn("empid", CatalogUtil.newSimpleDataType(Type.INT4));
schema.addColumn("deptname", Type.TEXT);
Schema schema2 = new Schema();
schema2.addColumn("deptname", Type.TEXT);
schema2.addColumn("manager", Type.TEXT);
Schema schema3 = new Schema();
schema3.addColumn("deptname", Type.TEXT);
schema3.addColumn("score", CatalogUtil.newSimpleDataType(Type.INT4));
TableMeta meta = CatalogUtil.newTableMeta(StoreType.CSV);
TableDesc people = new TableDesc(
CatalogUtil.buildFQName(TajoConstants.DEFAULT_DATABASE_NAME, "employee"), schema, meta,
CommonTestingUtil.getTestDir());
catalog.createTable(people);
TableDesc student =
new TableDesc(
CatalogUtil.buildFQName(DEFAULT_DATABASE_NAME, "dept"), schema2, StoreType.CSV,
new Options(), CommonTestingUtil.getTestDir());
catalog.createTable(student);
TableDesc score =
new TableDesc(
CatalogUtil.buildFQName(DEFAULT_DATABASE_NAME, "score"), schema3, StoreType.CSV,
new Options(), CommonTestingUtil.getTestDir());
catalog.createTable(score);
FunctionDesc funcDesc = new FunctionDesc("sumtest", SumInt.class, FunctionType.AGGREGATION,
CatalogUtil.newSimpleDataType(Type.INT4),
CatalogUtil.newSimpleDataTypeArray(Type.INT4));
catalog.createFunction(funcDesc);
analyzer = new SQLAnalyzer();
planner = new LogicalPlanner(catalog);
}
示例10: testProjection
@Test
public void testProjection() throws IOException {
Schema schema = new Schema();
schema.addColumn("id", Type.INT4);
schema.addColumn("age", Type.INT8);
schema.addColumn("score", Type.FLOAT4);
TableMeta meta = CatalogUtil.newTableMeta(storeType);
meta.setOptions(StorageUtil.newPhysicalProperties(storeType));
if (storeType == StoreType.AVRO) {
meta.putOption(StorageConstants.AVRO_SCHEMA_LITERAL,
TEST_PROJECTION_AVRO_SCHEMA);
}
Path tablePath = new Path(testDir, "testProjection.data");
Appender appender = StorageManagerFactory.getStorageManager(conf).getAppender(meta, schema, tablePath);
appender.init();
int tupleNum = 10000;
VTuple vTuple;
for (int i = 0; i < tupleNum; i++) {
vTuple = new VTuple(3);
vTuple.put(0, DatumFactory.createInt4(i + 1));
vTuple.put(1, DatumFactory.createInt8(i + 2));
vTuple.put(2, DatumFactory.createFloat4(i + 3));
appender.addTuple(vTuple);
}
appender.close();
FileStatus status = fs.getFileStatus(tablePath);
FileFragment fragment = new FileFragment("testReadAndWrite", tablePath, 0, status.getLen());
Schema target = new Schema();
target.addColumn("age", Type.INT8);
target.addColumn("score", Type.FLOAT4);
Scanner scanner = StorageManagerFactory.getStorageManager(conf).getScanner(meta, schema, fragment, target);
scanner.init();
int tupleCnt = 0;
Tuple tuple;
while ((tuple = scanner.next()) != null) {
if (storeType == StoreType.RCFILE
|| storeType == StoreType.TREVNI
|| storeType == StoreType.CSV
|| storeType == StoreType.PARQUET
|| storeType == StoreType.SEQUENCEFILE
|| storeType == StoreType.AVRO) {
assertTrue(tuple.get(0) == null);
}
assertTrue(tupleCnt + 2 == tuple.get(1).asInt8());
assertTrue(tupleCnt + 3 == tuple.get(2).asFloat4());
tupleCnt++;
}
scanner.close();
assertEquals(tupleNum, tupleCnt);
}
示例11: testProjection
@Test
public void testProjection() throws IOException {
Schema schema = new Schema();
schema.addColumn("id", Type.INT4);
schema.addColumn("age", Type.INT8);
schema.addColumn("score", Type.FLOAT4);
TableMeta meta = CatalogUtil.newTableMeta(storeType);
meta.setOptions(StorageUtil.newPhysicalProperties(storeType));
if (storeType == StoreType.AVRO) {
meta.putOption(StorageConstants.AVRO_SCHEMA_LITERAL,
TEST_PROJECTION_AVRO_SCHEMA);
}
Path tablePath = new Path(testDir, "testProjection.data");
Appender appender = StorageManagerFactory.getStorageManager(conf).getAppender(meta, schema, tablePath);
appender.init();
int tupleNum = 10000;
VTuple vTuple;
for(int i = 0; i < tupleNum; i++) {
vTuple = new VTuple(3);
vTuple.put(0, DatumFactory.createInt4(i + 1));
vTuple.put(1, DatumFactory.createInt8(i + 2));
vTuple.put(2, DatumFactory.createFloat4(i + 3));
appender.addTuple(vTuple);
}
appender.close();
FileStatus status = fs.getFileStatus(tablePath);
FileFragment fragment = new FileFragment("testReadAndWrite", tablePath, 0, status.getLen());
Schema target = new Schema();
target.addColumn("age", Type.INT8);
target.addColumn("score", Type.FLOAT4);
Scanner scanner = StorageManagerFactory.getStorageManager(conf).getScanner(meta, schema, fragment, target);
scanner.init();
int tupleCnt = 0;
Tuple tuple;
while ((tuple = scanner.next()) != null) {
if (storeType == StoreType.RCFILE
|| storeType == StoreType.TREVNI
|| storeType == StoreType.CSV
|| storeType == StoreType.PARQUET
|| storeType == StoreType.AVRO) {
assertTrue(tuple.get(0) == null || tuple.get(0) instanceof NullDatum);
}
assertTrue(tupleCnt + 2 == tuple.get(1).asInt8());
assertTrue(tupleCnt + 3 == tuple.get(2).asFloat4());
tupleCnt++;
}
scanner.close();
assertEquals(tupleNum, tupleCnt);
}
示例12: storageCompressionTest
private void storageCompressionTest(StoreType storeType, Class<? extends CompressionCodec> codec) throws IOException {
Schema schema = new Schema();
schema.addColumn("id", Type.INT4);
schema.addColumn("age", Type.FLOAT4);
schema.addColumn("name", Type.TEXT);
TableMeta meta = CatalogUtil.newTableMeta(storeType);
meta.putOption("compression.codec", codec.getCanonicalName());
meta.putOption("compression.type", SequenceFile.CompressionType.BLOCK.name());
meta.putOption("rcfile.serde", TextSerializerDeserializer.class.getName());
meta.putOption("sequencefile.serde", TextSerializerDeserializer.class.getName());
String fileName = "Compression_" + codec.getSimpleName();
Path tablePath = new Path(testDir, fileName);
Appender appender = StorageManagerFactory.getStorageManager(conf).getAppender(meta, schema, tablePath);
appender.enableStats();
appender.init();
String extension = "";
if (appender instanceof CSVFile.CSVAppender) {
extension = ((CSVFile.CSVAppender) appender).getExtension();
}
int tupleNum = 100000;
VTuple vTuple;
for (int i = 0; i < tupleNum; i++) {
vTuple = new VTuple(3);
vTuple.put(0, DatumFactory.createInt4(i + 1));
vTuple.put(1, DatumFactory.createFloat4((float) i));
vTuple.put(2, DatumFactory.createText(String.valueOf(i)));
appender.addTuple(vTuple);
}
appender.close();
TableStats stat = appender.getStats();
assertEquals(tupleNum, stat.getNumRows().longValue());
tablePath = tablePath.suffix(extension);
FileStatus status = fs.getFileStatus(tablePath);
long fileLen = status.getLen();
FileFragment[] tablets = new FileFragment[1];
tablets[0] = new FileFragment(fileName, tablePath, 0, fileLen);
Scanner scanner = StorageManagerFactory.getStorageManager(conf).getScanner(meta, schema, tablets[0], schema);
if (StoreType.CSV == storeType) {
if (SplittableCompressionCodec.class.isAssignableFrom(codec)) {
assertTrue(scanner.isSplittable());
} else {
assertFalse(scanner.isSplittable());
}
}
scanner.init();
int tupleCnt = 0;
Tuple tuple;
while ((tuple = scanner.next()) != null) {
tupleCnt++;
}
scanner.close();
assertEquals(tupleNum, tupleCnt);
assertNotSame(appender.getStats().getNumBytes().longValue(), scanner.getInputStats().getNumBytes().longValue());
assertEquals(appender.getStats().getNumRows().longValue(), scanner.getInputStats().getNumRows().longValue());
}