本文整理汇总了Java中org.apache.flink.types.Row类的典型用法代码示例。如果您正苦于以下问题:Java Row类的具体用法?Java Row怎么用?Java Row使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
Row类属于org.apache.flink.types包,在下文中一共展示了Row类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testAggregationWithTwoCount
import org.apache.flink.types.Row; //导入依赖的package包/类
@Test
public void testAggregationWithTwoCount() throws Exception {
ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
BatchTableEnvironment tableEnv = TableEnvironment.getTableEnvironment(env, config());
DataSource<Tuple2<Float, String>> input =
env.fromElements(
new Tuple2<>(1f, "Hello"),
new Tuple2<>(2f, "Ciao"));
Table table =
tableEnv.fromDataSet(input);
Table result =
table.select("f0.count, f1.count");
DataSet<Row> ds = tableEnv.toDataSet(result, Row.class);
List<Row> results = ds.collect();
String expected = "2,2";
compareResultAsText(results, expected);
}
示例2: getOutputTable
import org.apache.flink.types.Row; //导入依赖的package包/类
private AppendStreamTableSink<Row> getOutputTable(
ExternalCatalogTable output) throws IOException {
String tableType = output.tableType();
DataSinkProvider c = DataSinkRegistry.getProvider(tableType);
Preconditions.checkNotNull(c, "Cannot find output connectors for " + tableType);
return c.getAppendStreamTableSink(output);
}
示例3: testCompile
import org.apache.flink.types.Row; //导入依赖的package包/类
@Test
public void testCompile() throws IOException {
RowTypeInfo schema = new RowTypeInfo(new TypeInformation[]{BasicTypeInfo.INT_TYPE_INFO}, new String[] {"id"});
MockExternalCatalogTable inputTable = new MockExternalCatalogTable(schema, Collections.singletonList(Row.of(1)));
MockExternalCatalogTable outputTable = new MockExternalCatalogTable(schema, new ArrayList<>());
SingleLevelMemoryCatalog input = new SingleLevelMemoryCatalog("input",
Collections.singletonMap("foo", inputTable));
SingleLevelMemoryCatalog output = new SingleLevelMemoryCatalog("output",
Collections.singletonMap("bar", outputTable));
JobDescriptor job = new JobDescriptor(
Collections.singletonMap("input", input),
Collections.emptyMap(),
output,
1,
"SELECT * FROM input.foo");
CompilationResult res = new ContainedExecutor().run(job);
assertNull(res.remoteThrowable());
assertNotNull(res.jobGraph());
}
示例4: testInvalidSql
import org.apache.flink.types.Row; //导入依赖的package包/类
@Test
public void testInvalidSql() throws IOException {
RowTypeInfo schema = new RowTypeInfo(new TypeInformation[]{BasicTypeInfo.INT_TYPE_INFO}, new String[] {"id"});
MockExternalCatalogTable inputTable = new MockExternalCatalogTable(schema, Collections.singletonList(Row.of(1)));
MockExternalCatalogTable outputTable = new MockExternalCatalogTable(schema, new ArrayList<>());
SingleLevelMemoryCatalog input = new SingleLevelMemoryCatalog("input",
Collections.singletonMap("foo", inputTable));
SingleLevelMemoryCatalog output = new SingleLevelMemoryCatalog("output",
Collections.singletonMap("bar", outputTable));
JobDescriptor job = new JobDescriptor(
Collections.singletonMap("input", input),
Collections.emptyMap(),
output,
1,
"SELECT2 * FROM input.foo");
CompilationResult res = new ContainedExecutor().run(job);
assertNull(res.jobGraph());
assertTrue(res.remoteThrowable() instanceof SqlParserException);
}
示例5: testBatchTableSourceSQL
import org.apache.flink.types.Row; //导入依赖的package包/类
@Test
public void testBatchTableSourceSQL() throws Exception {
ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
BatchTableEnvironment tableEnv = TableEnvironment.getTableEnvironment(env, config());
BatchTableSource csvTable = CommonTestData.getCsvTableSource();
tableEnv.registerTableSource("persons", csvTable);
Table result = tableEnv
.sqlQuery("SELECT `last`, FLOOR(id), score * 2 FROM persons WHERE score < 20");
DataSet<Row> resultSet = tableEnv.toDataSet(result, Row.class);
List<Row> results = resultSet.collect();
String expected = "Smith,1,24.6\n" +
"Miller,3,15.78\n" +
"Smith,4,0.24\n" +
"Miller,6,13.56\n" +
"Williams,8,4.68\n";
compareResultAsText(results, expected);
}
示例6: testNumericAutocastInArithmetic
import org.apache.flink.types.Row; //导入依赖的package包/类
@Test
public void testNumericAutocastInArithmetic() throws Exception {
ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
BatchTableEnvironment tableEnv = TableEnvironment.getTableEnvironment(env, config());
DataSource<Tuple8<Byte, Short, Integer, Long, Float, Double, Long, Double>> input =
env.fromElements(new Tuple8<>((byte) 1, (short) 1, 1, 1L, 1.0f, 1.0d, 1L, 1001.1));
Table table =
tableEnv.fromDataSet(input);
Table result = table.select("f0 + 1, f1 +" +
" 1, f2 + 1L, f3 + 1.0f, f4 + 1.0d, f5 + 1, f6 + 1.0d, f7 + f0");
DataSet<Row> ds = tableEnv.toDataSet(result, Row.class);
List<Row> results = ds.collect();
String expected = "2,2,2,2.0,2.0,2.0,2.0,1002.1";
compareResultAsText(results, expected);
}
示例7: testSimpleRegister
import org.apache.flink.types.Row; //导入依赖的package包/类
@Test
public void testSimpleRegister() throws Exception {
final String tableName = "MyTable";
ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
BatchTableEnvironment tableEnv = TableEnvironment.getTableEnvironment(env, config());
DataSet<Tuple3<Integer, Long, String>> ds = CollectionDataSets.get3TupleDataSet(env);
tableEnv.registerDataSet(tableName, ds);
Table t = tableEnv.scan(tableName);
Table result = t.select("f0, f1");
DataSet<Row> resultSet = tableEnv.toDataSet(result, Row.class);
List<Row> results = resultSet.collect();
String expected = "1,1\n" + "2,2\n" + "3,2\n" + "4,3\n" + "5,3\n" + "6,3\n" + "7,4\n" +
"8,4\n" + "9,4\n" + "10,4\n" + "11,5\n" + "12,5\n" + "13,5\n" + "14,5\n" + "15,5\n" +
"16,6\n" + "17,6\n" + "18,6\n" + "19,6\n" + "20,6\n" + "21,6\n";
compareResultAsText(results, expected);
}
示例8: verifySplit
import org.apache.flink.types.Row; //导入依赖的package包/类
private void verifySplit(InputSplit split, int expectedIDSum) throws IOException {
int sum = 0;
Row row = new Row(5);
jdbcInputFormat.open(split);
while (!jdbcInputFormat.reachedEnd()) {
row = jdbcInputFormat.nextRecord(row);
int id = ((int) row.getField(0));
int testDataIndex = id - 1001;
assertEquals(TEST_DATA[testDataIndex], row);
sum += id;
}
Assert.assertEquals(expectedIDSum, sum);
}
示例9: testMap
import org.apache.flink.types.Row; //导入依赖的package包/类
@Test
public void testMap() throws Exception {
ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
BatchTableEnvironment tableEnv = TableEnvironment.getTableEnvironment(env, config());
List<Tuple2<Integer, Map<String, String>>> rows = new ArrayList<>();
rows.add(new Tuple2<>(1, Collections.singletonMap("foo", "bar")));
rows.add(new Tuple2<>(2, Collections.singletonMap("foo", "spam")));
TypeInformation<Tuple2<Integer, Map<String, String>>> ty = new TupleTypeInfo<>(
BasicTypeInfo.INT_TYPE_INFO,
new MapTypeInfo<>(BasicTypeInfo.STRING_TYPE_INFO, BasicTypeInfo.STRING_TYPE_INFO));
DataSet<Tuple2<Integer, Map<String, String>>> ds1 = env.fromCollection(rows, ty);
tableEnv.registerDataSet("t1", ds1, "a, b");
String sqlQuery = "SELECT b['foo'] FROM t1";
Table result = tableEnv.sqlQuery(sqlQuery);
DataSet<Row> resultSet = tableEnv.toDataSet(result, Row.class);
List<Row> results = resultSet.collect();
String expected = "bar\n" + "spam\n";
compareResultAsText(results, expected);
}
示例10: testAsFromTupleByName
import org.apache.flink.types.Row; //导入依赖的package包/类
@Test
public void testAsFromTupleByName() throws Exception {
ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
BatchTableEnvironment tableEnv = TableEnvironment.getTableEnvironment(env, config());
Table table = tableEnv.fromDataSet(CollectionDataSets.get3TupleDataSet(env), "f2");
DataSet<Row> ds = tableEnv.toDataSet(table, Row.class);
List<Row> results = ds.collect();
String expected = "Hi\n" + "Hello\n" + "Hello world\n" +
"Hello world, how are you?\n" + "I am fine.\n" + "Luke Skywalker\n" +
"Comment#1\n" + "Comment#2\n" + "Comment#3\n" + "Comment#4\n" +
"Comment#5\n" + "Comment#6\n" + "Comment#7\n" +
"Comment#8\n" + "Comment#9\n" + "Comment#10\n" +
"Comment#11\n" + "Comment#12\n" + "Comment#13\n" +
"Comment#14\n" + "Comment#15\n";
compareResultAsText(results, expected);
}
示例11: testAsFromPrivateFieldsPojo
import org.apache.flink.types.Row; //导入依赖的package包/类
@Test
public void testAsFromPrivateFieldsPojo() throws Exception {
ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
BatchTableEnvironment tableEnv = TableEnvironment.getTableEnvironment(env, config());
List<PrivateSmallPojo> data = new ArrayList<>();
data.add(new PrivateSmallPojo("Peter", 28, 4000.00, "Sales"));
data.add(new PrivateSmallPojo("Anna", 56, 10000.00, "Engineering"));
data.add(new PrivateSmallPojo("Lucy", 42, 6000.00, "HR"));
Table table = tableEnv
.fromDataSet(env.fromCollection(data),
"department AS a, " +
"age AS b, " +
"salary AS c, " +
"name AS d")
.select("a, b, c, d");
DataSet<Row> ds = tableEnv.toDataSet(table, Row.class);
List<Row> results = ds.collect();
String expected =
"Sales,28,4000.0,Peter\n" +
"Engineering,56,10000.0,Anna\n" +
"HR,42,6000.0,Lucy\n";
compareResultAsText(results, expected);
}
示例12: testIncompatibleTypes
import org.apache.flink.types.Row; //导入依赖的package包/类
@Test(expected = RuntimeException.class)
public void testIncompatibleTypes() throws IOException {
jdbcOutputFormat = JDBCOutputFormat.buildJDBCOutputFormat()
.setDrivername(DRIVER_CLASS)
.setDBUrl(DB_URL)
.setQuery(String.format(INSERT_TEMPLATE, INPUT_TABLE))
.finish();
jdbcOutputFormat.open(0, 1);
Row row = new Row(5);
row.setField(0, 4);
row.setField(1, "hello");
row.setField(2, "world");
row.setField(3, 0.99);
row.setField(4, "imthewrongtype");
jdbcOutputFormat.writeRecord(row);
jdbcOutputFormat.close();
}
示例13: testIntegerBiggerThan128
import org.apache.flink.types.Row; //导入依赖的package包/类
@Test
public void testIntegerBiggerThan128() throws Exception {
ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
BatchTableEnvironment tableEnv = TableEnvironment.getTableEnvironment(env, config());
DataSet<Tuple3<Integer, Long, String>> input = env.fromElements(new Tuple3<>(300, 1L, "Hello"));
Table table = tableEnv.fromDataSet(input, "a, b, c");
Table result = table
.filter("a = 300 ");
DataSet<Row> ds = tableEnv.toDataSet(result, Row.class);
List<Row> results = ds.collect();
String expected = "300,1,Hello\n";
compareResultAsText(results, expected);
}
示例14: testWorkingAggregationDataTypes
import org.apache.flink.types.Row; //导入依赖的package包/类
@Test
public void testWorkingAggregationDataTypes() throws Exception {
ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
BatchTableEnvironment tableEnv = TableEnvironment.getTableEnvironment(env, config());
DataSource<Tuple7<Byte, Short, Integer, Long, Float, Double, String>> input =
env.fromElements(
new Tuple7<>((byte) 1, (short) 1, 1, 1L, 1.0f, 1.0d, "Hello"),
new Tuple7<>((byte) 2, (short) 2, 2, 2L, 2.0f, 2.0d, "Ciao"));
Table table = tableEnv.fromDataSet(input);
Table result =
table.select("f0.avg, f1.avg, f2.avg, f3.avg, f4.avg, f5.avg, f6.count");
DataSet<Row> ds = tableEnv.toDataSet(result, Row.class);
List<Row> results = ds.collect();
String expected = "1,1,1,1,1.5,1.5,2";
compareResultAsText(results, expected);
}
示例15: addSink
import org.apache.flink.types.Row; //导入依赖的package包/类
/**
* Writes a DataStream into a Cassandra database.
*
* @param input input DataStream
* @param <IN> input type
* @return CassandraSinkBuilder, to further configure the sink
*/
public static <IN> CassandraSinkBuilder<IN> addSink(DataStream<IN> input) {
TypeInformation<IN> typeInfo = input.getType();
if (typeInfo instanceof TupleTypeInfo) {
DataStream<Tuple> tupleInput = (DataStream<Tuple>) input;
return (CassandraSinkBuilder<IN>) new CassandraTupleSinkBuilder<>(tupleInput, tupleInput.getType(), tupleInput.getType().createSerializer(tupleInput.getExecutionEnvironment().getConfig()));
}
if (typeInfo instanceof RowTypeInfo) {
DataStream<Row> rowInput = (DataStream<Row>) input;
return (CassandraSinkBuilder<IN>) new CassandraRowSinkBuilder(rowInput, rowInput.getType(), rowInput.getType().createSerializer(rowInput.getExecutionEnvironment().getConfig()));
}
if (typeInfo instanceof PojoTypeInfo) {
return new CassandraPojoSinkBuilder<>(input, input.getType(), input.getType().createSerializer(input.getExecutionEnvironment().getConfig()));
}
if (typeInfo instanceof CaseClassTypeInfo) {
DataStream<Product> productInput = (DataStream<Product>) input;
return (CassandraSinkBuilder<IN>) new CassandraScalaProductSinkBuilder<>(productInput, productInput.getType(), productInput.getType().createSerializer(input.getExecutionEnvironment().getConfig()));
}
throw new IllegalArgumentException("No support for the type of the given DataStream: " + input.getType());
}