本文整理匯總了Java中org.apache.avro.generic.GenericData.setStringType方法的典型用法代碼示例。如果您正苦於以下問題:Java GenericData.setStringType方法的具體用法?Java GenericData.setStringType怎麽用?Java GenericData.setStringType使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.avro.generic.GenericData
的用法示例。
在下文中一共展示了GenericData.setStringType方法的3個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: testNonRollingAvroKeyValueWithCompressionWriter
import org.apache.avro.generic.GenericData; //導入方法依賴的package包/類
/**
* This tests {@link AvroKeyValueSinkWriter}
* with non-rolling output and with compression.
*/
@Test
public void testNonRollingAvroKeyValueWithCompressionWriter() throws Exception {
final String outPath = hdfsURI + "/avro-kv-no-comp-non-rolling-out";
final int numElements = 20;
Map<String, String> properties = new HashMap<>();
Schema keySchema = Schema.create(Schema.Type.INT);
Schema valueSchema = Schema.create(Schema.Type.STRING);
properties.put(AvroKeyValueSinkWriter.CONF_OUTPUT_KEY_SCHEMA, keySchema.toString());
properties.put(AvroKeyValueSinkWriter.CONF_OUTPUT_VALUE_SCHEMA, valueSchema.toString());
properties.put(AvroKeyValueSinkWriter.CONF_COMPRESS, String.valueOf(true));
properties.put(AvroKeyValueSinkWriter.CONF_COMPRESS_CODEC, DataFileConstants.SNAPPY_CODEC);
BucketingSink<Tuple2<Integer, String>> sink = new BucketingSink<Tuple2<Integer, String>>(outPath)
.setWriter(new AvroKeyValueSinkWriter<Integer, String>(properties))
.setBucketer(new BasePathBucketer<Tuple2<Integer, String>>())
.setPartPrefix(PART_PREFIX)
.setPendingPrefix("")
.setPendingSuffix("");
OneInputStreamOperatorTestHarness<Tuple2<Integer, String>, Object> testHarness =
createTestSink(sink, 1, 0);
testHarness.setProcessingTime(0L);
testHarness.setup();
testHarness.open();
for (int i = 0; i < numElements; i++) {
testHarness.processElement(new StreamRecord<>(Tuple2.of(
i, "message #" + Integer.toString(i)
)));
}
testHarness.close();
GenericData.setStringType(valueSchema, GenericData.StringType.String);
Schema elementSchema = AvroKeyValueSinkWriter.AvroKeyValue.getSchema(keySchema, valueSchema);
FSDataInputStream inStream = dfs.open(new Path(outPath + "/" + PART_PREFIX + "-0-0"));
SpecificDatumReader<GenericRecord> elementReader = new SpecificDatumReader<>(elementSchema);
DataFileStream<GenericRecord> dataFileStream = new DataFileStream<>(inStream, elementReader);
for (int i = 0; i < numElements; i++) {
AvroKeyValueSinkWriter.AvroKeyValue<Integer, String> wrappedEntry =
new AvroKeyValueSinkWriter.AvroKeyValue<>(dataFileStream.next());
int key = wrappedEntry.getKey();
Assert.assertEquals(i, key);
String value = wrappedEntry.getValue();
Assert.assertEquals("message #" + i, value);
}
dataFileStream.close();
inStream.close();
}
示例2: testUserDefinedConfiguration
import org.apache.avro.generic.GenericData; //導入方法依賴的package包/類
/**
* This tests user defined hdfs configuration.
* @throws Exception
*/
@Test
public void testUserDefinedConfiguration() throws Exception {
final String outPath = hdfsURI + "/string-non-rolling-with-config";
final int numElements = 20;
Map<String, String> properties = new HashMap<>();
Schema keySchema = Schema.create(Schema.Type.INT);
Schema valueSchema = Schema.create(Schema.Type.STRING);
properties.put(AvroKeyValueSinkWriter.CONF_OUTPUT_KEY_SCHEMA, keySchema.toString());
properties.put(AvroKeyValueSinkWriter.CONF_OUTPUT_VALUE_SCHEMA, valueSchema.toString());
properties.put(AvroKeyValueSinkWriter.CONF_COMPRESS, String.valueOf(true));
properties.put(AvroKeyValueSinkWriter.CONF_COMPRESS_CODEC, DataFileConstants.SNAPPY_CODEC);
Configuration conf = new Configuration();
conf.set("io.file.buffer.size", "40960");
BucketingSink<Tuple2<Integer, String>> sink = new BucketingSink<Tuple2<Integer, String>>(outPath)
.setFSConfig(conf)
.setWriter(new StreamWriterWithConfigCheck<Integer, String>(properties, "io.file.buffer.size", "40960"))
.setBucketer(new BasePathBucketer<Tuple2<Integer, String>>())
.setPartPrefix(PART_PREFIX)
.setPendingPrefix("")
.setPendingSuffix("");
OneInputStreamOperatorTestHarness<Tuple2<Integer, String>, Object> testHarness =
createTestSink(sink, 1, 0);
testHarness.setProcessingTime(0L);
testHarness.setup();
testHarness.open();
for (int i = 0; i < numElements; i++) {
testHarness.processElement(new StreamRecord<>(Tuple2.of(
i, "message #" + Integer.toString(i)
)));
}
testHarness.close();
GenericData.setStringType(valueSchema, GenericData.StringType.String);
Schema elementSchema = AvroKeyValueSinkWriter.AvroKeyValue.getSchema(keySchema, valueSchema);
FSDataInputStream inStream = dfs.open(new Path(outPath + "/" + PART_PREFIX + "-0-0"));
SpecificDatumReader<GenericRecord> elementReader = new SpecificDatumReader<>(elementSchema);
DataFileStream<GenericRecord> dataFileStream = new DataFileStream<>(inStream, elementReader);
for (int i = 0; i < numElements; i++) {
AvroKeyValueSinkWriter.AvroKeyValue<Integer, String> wrappedEntry =
new AvroKeyValueSinkWriter.AvroKeyValue<>(dataFileStream.next());
int key = wrappedEntry.getKey();
Assert.assertEquals(i, key);
String value = wrappedEntry.getValue();
Assert.assertEquals("message #" + i, value);
}
dataFileStream.close();
inStream.close();
}
示例3: testUserDefinedConfiguration
import org.apache.avro.generic.GenericData; //導入方法依賴的package包/類
/**
* This tests user defined hdfs configuration
* @throws Exception
*/
@Test
public void testUserDefinedConfiguration() throws Exception {
final String outPath = hdfsURI + "/string-non-rolling-with-config";
final int numElements = 20;
Map<String, String> properties = new HashMap<>();
Schema keySchema = Schema.create(Schema.Type.INT);
Schema valueSchema = Schema.create(Schema.Type.STRING);
properties.put(AvroKeyValueSinkWriter.CONF_OUTPUT_KEY_SCHEMA, keySchema.toString());
properties.put(AvroKeyValueSinkWriter.CONF_OUTPUT_VALUE_SCHEMA, valueSchema.toString());
properties.put(AvroKeyValueSinkWriter.CONF_COMPRESS, String.valueOf(true));
properties.put(AvroKeyValueSinkWriter.CONF_COMPRESS_CODEC, DataFileConstants.SNAPPY_CODEC);
Configuration conf = new Configuration();
conf.set("io.file.buffer.size", "40960");
BucketingSink<Tuple2<Integer,String>> sink = new BucketingSink<Tuple2<Integer, String>>(outPath)
.setFSConfig(conf)
.setWriter(new StreamWriterWithConfigCheck<Integer, String>(properties, "io.file.buffer.size", "40960"))
.setBucketer(new BasePathBucketer<Tuple2<Integer,String>>())
.setPartPrefix(PART_PREFIX)
.setPendingPrefix("")
.setPendingSuffix("");
OneInputStreamOperatorTestHarness<Tuple2<Integer, String>, Object> testHarness =
createTestSink(sink, 1, 0);
testHarness.setProcessingTime(0L);
testHarness.setup();
testHarness.open();
for (int i = 0; i < numElements; i++) {
testHarness.processElement(new StreamRecord<>(Tuple2.of(
i, "message #" + Integer.toString(i)
)));
}
testHarness.close();
GenericData.setStringType(valueSchema, GenericData.StringType.String);
Schema elementSchema = AvroKeyValueSinkWriter.AvroKeyValue.getSchema(keySchema, valueSchema);
FSDataInputStream inStream = dfs.open(new Path(outPath + "/" + PART_PREFIX + "-0-0"));
SpecificDatumReader<GenericRecord> elementReader = new SpecificDatumReader<>(elementSchema);
DataFileStream<GenericRecord> dataFileStream = new DataFileStream<>(inStream, elementReader);
for (int i = 0; i < numElements; i++) {
AvroKeyValueSinkWriter.AvroKeyValue<Integer, String> wrappedEntry =
new AvroKeyValueSinkWriter.AvroKeyValue<>(dataFileStream.next());
int key = wrappedEntry.getKey();
Assert.assertEquals(i, key);
String value = wrappedEntry.getValue();
Assert.assertEquals("message #" + i, value);
}
dataFileStream.close();
inStream.close();
}