本文整理汇总了Java中org.apache.flink.streaming.api.functions.source.SourceFunction类的典型用法代码示例。如果您正苦于以下问题:Java SourceFunction类的具体用法?Java SourceFunction怎么用?Java SourceFunction使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
SourceFunction类属于org.apache.flink.streaming.api.functions.source包,在下文中一共展示了SourceFunction类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: LogDataFetcher
import org.apache.flink.streaming.api.functions.source.SourceFunction; //导入依赖的package包/类
public LogDataFetcher(SourceFunction.SourceContext<T> sourceContext,
RuntimeContext runtimeContext,
Properties configProps,
LogDeserializationSchema<T> deserializationSchema, LogClientProxy logClient) {
this.sourceContext = sourceContext;
this.runtimeContext = runtimeContext;
this.configProps = configProps;
this.deserializationSchema = deserializationSchema;
this.totalNumberOfConsumerSubtasks = runtimeContext.getNumberOfParallelSubtasks();
this.indexOfThisConsumerSubtask = runtimeContext.getIndexOfThisSubtask();
this.checkpointLock = sourceContext.getCheckpointLock();
this.subscribedShardsState = new LinkedList<LogstoreShardState>();
this.shardConsumersExecutor = createShardConsumersThreadPool(runtimeContext.getTaskNameWithSubtasks());
this.error = new AtomicReference<Throwable>();
this.logProject = configProps.getProperty(ConfigConstants.LOG_PROJECT);
this.logStore = configProps.getProperty(ConfigConstants.LOG_LOGSTORE);
this.logClient = logClient;
}
示例2: run
import org.apache.flink.streaming.api.functions.source.SourceFunction; //导入依赖的package包/类
@Override
public void run(SourceFunction.SourceContext<model.LocalWeatherData> sourceContext) throws Exception {
// The Source needs to be Serializable, so we have to construct the Paths at this point:
final Path csvStationPath = FileSystems.getDefault().getPath(stationFilePath);
final Path csvLocalWeatherDataPath = FileSystems.getDefault().getPath(localWeatherDataFilePath);
// Get the Stream of LocalWeatherData Elements in the CSV File:
try(Stream<model.LocalWeatherData> stream = getLocalWeatherData(csvStationPath, csvLocalWeatherDataPath)) {
// We need to get an iterator, since the SourceFunction has to break out of its main loop on cancellation:
Iterator<model.LocalWeatherData> iterator = stream.iterator();
// Make sure to cancel, when the Source function is canceled by an external event:
while (isRunning && iterator.hasNext()) {
sourceContext.collect(iterator.next());
}
}
}
示例3: createProducerTopology
import org.apache.flink.streaming.api.functions.source.SourceFunction; //导入依赖的package包/类
private void createProducerTopology(StreamExecutionEnvironment env, AMQSinkConfig<String> config) {
DataStreamSource<String> stream = env.addSource(new SourceFunction<String>() {
@Override
public void run(SourceContext<String> ctx) throws Exception {
for (int i = 0; i < MESSAGES_NUM; i++) {
ctx.collect("amq-" + i);
}
}
@Override
public void cancel() {}
});
AMQSink<String> sink = new AMQSink<>(config);
stream.addSink(sink);
}
示例4: beforeTest
import org.apache.flink.streaming.api.functions.source.SourceFunction; //导入依赖的package包/类
@Before
public void beforeTest() throws Exception {
feederActorSystem = ActorSystem.create("feederActorSystem",
getFeederActorConfig());
sourceContext = new DummySourceContext();
sourceThread = new Thread(new Runnable() {
@Override
public void run() {
try {
SourceFunction.SourceContext<Object> sourceContext =
new DummySourceContext();
source.run(sourceContext);
} catch (Exception e) {
exception = e;
}
}
});
}
示例5: main
import org.apache.flink.streaming.api.functions.source.SourceFunction; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
SiteToSiteClientConfig clientConfig = new SiteToSiteClient.Builder()
.url("http://localhost:8080/nifi")
.portName("Data for Flink")
.requestBatchCount(5)
.buildConfig();
SourceFunction<NiFiDataPacket> nifiSource = new NiFiSource(clientConfig);
DataStream<NiFiDataPacket> streamSource = env.addSource(nifiSource).setParallelism(2);
DataStream<String> dataStream = streamSource.map(new MapFunction<NiFiDataPacket, String>() {
@Override
public String map(NiFiDataPacket value) throws Exception {
return new String(value.getContent(), Charset.defaultCharset());
}
});
dataStream.print();
env.execute();
}
示例6: testKafkaConsumer
import org.apache.flink.streaming.api.functions.source.SourceFunction; //导入依赖的package包/类
@Test
@SuppressWarnings("unchecked")
public void testKafkaConsumer() {
KafkaTableSource.Builder b = getBuilder();
configureBuilder(b);
// assert that correct
KafkaTableSource observed = spy(b.build());
StreamExecutionEnvironment env = mock(StreamExecutionEnvironment.class);
when(env.addSource(any(SourceFunction.class))).thenReturn(mock(DataStreamSource.class));
observed.getDataStream(env);
verify(env).addSource(any(getFlinkKafkaConsumer()));
verify(observed).getKafkaConsumer(
eq(TOPIC),
eq(PROPS),
any(getDeserializationSchema()));
}
示例7: KinesisDataFetcher
import org.apache.flink.streaming.api.functions.source.SourceFunction; //导入依赖的package包/类
/**
* Creates a Kinesis Data Fetcher.
*
* @param streams the streams to subscribe to
* @param sourceContext context of the source function
* @param runtimeContext this subtask's runtime context
* @param configProps the consumer configuration properties
* @param deserializationSchema deserialization schema
*/
public KinesisDataFetcher(List<String> streams,
SourceFunction.SourceContext<T> sourceContext,
RuntimeContext runtimeContext,
Properties configProps,
KinesisDeserializationSchema<T> deserializationSchema) {
this(streams,
sourceContext,
sourceContext.getCheckpointLock(),
runtimeContext,
configProps,
deserializationSchema,
new AtomicReference<Throwable>(),
new LinkedList<KinesisStreamShardState>(),
createInitialSubscribedStreamsToLastDiscoveredShardsState(streams),
KinesisProxy.create(configProps));
}
示例8: fromCollection
import org.apache.flink.streaming.api.functions.source.SourceFunction; //导入依赖的package包/类
/**
* Creates a data stream from the given non-empty collection.
*
* <p>Note that this operation will result in a non-parallel data stream source,
* i.e., a data stream source with parallelism one.
*
* @param data
* The collection of elements to create the data stream from
* @param typeInfo
* The TypeInformation for the produced data stream
* @param <OUT>
* The type of the returned data stream
* @return The data stream representing the given collection
*/
public <OUT> DataStreamSource<OUT> fromCollection(Collection<OUT> data, TypeInformation<OUT> typeInfo) {
Preconditions.checkNotNull(data, "Collection must not be null");
// must not have null elements and mixed elements
FromElementsFunction.checkCollection(data, typeInfo.getTypeClass());
SourceFunction<OUT> function;
try {
function = new FromElementsFunction<>(typeInfo.createSerializer(getConfig()), data);
}
catch (IOException e) {
throw new RuntimeException(e.getMessage(), e);
}
return addSource(function, "Collection Source", typeInfo).setParallelism(1);
}
示例9: doTestPropagationFromCheckpointConfig
import org.apache.flink.streaming.api.functions.source.SourceFunction; //导入依赖的package包/类
public void doTestPropagationFromCheckpointConfig(boolean failTaskOnCheckpointErrors) throws Exception {
StreamExecutionEnvironment streamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment();
streamExecutionEnvironment.setParallelism(1);
streamExecutionEnvironment.getCheckpointConfig().setCheckpointInterval(1000);
streamExecutionEnvironment.getCheckpointConfig().setFailOnCheckpointingErrors(failTaskOnCheckpointErrors);
streamExecutionEnvironment.addSource(new SourceFunction<Integer>() {
@Override
public void run(SourceContext<Integer> ctx) throws Exception {
}
@Override
public void cancel() {
}
}).addSink(new DiscardingSink<>());
StreamGraph streamGraph = streamExecutionEnvironment.getStreamGraph();
JobGraph jobGraph = StreamingJobGraphGenerator.createJobGraph(streamGraph);
SerializedValue<ExecutionConfig> serializedExecutionConfig = jobGraph.getSerializedExecutionConfig();
ExecutionConfig executionConfig =
serializedExecutionConfig.deserializeValue(Thread.currentThread().getContextClassLoader());
Assert.assertEquals(failTaskOnCheckpointErrors, executionConfig.isFailTaskOnCheckpointError());
}
示例10: checkUseFetcherWhenNoCheckpoint
import org.apache.flink.streaming.api.functions.source.SourceFunction; //导入依赖的package包/类
/**
* Tests that on snapshots, states and offsets to commit to Kafka are correct
*/
@Test
public void checkUseFetcherWhenNoCheckpoint() throws Exception {
FlinkKafkaConsumerBase<String> consumer = getConsumer(null, new LinkedMap(), true);
List<KafkaTopicPartition> partitionList = new ArrayList<>(1);
partitionList.add(new KafkaTopicPartition("test", 0));
consumer.setSubscribedPartitions(partitionList);
OperatorStateStore operatorStateStore = mock(OperatorStateStore.class);
TestingListState<Serializable> listState = new TestingListState<>();
when(operatorStateStore.getSerializableListState(Matchers.any(String.class))).thenReturn(listState);
StateInitializationContext initializationContext = mock(StateInitializationContext.class);
when(initializationContext.getOperatorStateStore()).thenReturn(operatorStateStore);
// make the context signal that there is no restored state, then validate that
when(initializationContext.isRestored()).thenReturn(false);
consumer.initializeState(initializationContext);
consumer.run(mock(SourceFunction.SourceContext.class));
}
示例11: testFetcherShouldNotBeRestoringFromFailureIfNotRestoringFromCheckpoint
import org.apache.flink.streaming.api.functions.source.SourceFunction; //导入依赖的package包/类
@Test
@SuppressWarnings("unchecked")
public void testFetcherShouldNotBeRestoringFromFailureIfNotRestoringFromCheckpoint() throws Exception {
KinesisDataFetcher mockedFetcher = Mockito.mock(KinesisDataFetcher.class);
PowerMockito.whenNew(KinesisDataFetcher.class).withAnyArguments().thenReturn(mockedFetcher);
// assume the given config is correct
PowerMockito.mockStatic(KinesisConfigUtil.class);
PowerMockito.doNothing().when(KinesisConfigUtil.class);
TestableFlinkKinesisConsumer consumer = new TestableFlinkKinesisConsumer(
"fakeStream", new Properties(), 10, 2);
consumer.open(new Configuration());
consumer.run(Mockito.mock(SourceFunction.SourceContext.class));
Mockito.verify(mockedFetcher).setIsRestoringFromFailure(false);
}
示例12: main
import org.apache.flink.streaming.api.functions.source.SourceFunction; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
ParameterTool parameterTool = ParameterTool.fromArgs(args);
if (parameterTool.getNumberOfParameters() < 2) {
System.out.println("Missing parameters!");
System.out.println("Usage: Kafka --topic <topic> --bootstrap.servers <kafka brokers>");
return;
}
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.getConfig().disableSysoutLogging();
env.getConfig().setRestartStrategy(RestartStrategies.fixedDelayRestart(4, 10000));
// very simple data generator
DataStream<String> messageStream = env.addSource(new SourceFunction<String>() {
private static final long serialVersionUID = 6369260445318862378L;
public boolean running = true;
@Override
public void run(SourceContext<String> ctx) throws Exception {
long i = 0;
while (this.running) {
ctx.collect("Element - " + i++);
Thread.sleep(500);
}
}
@Override
public void cancel() {
running = false;
}
});
// write data into Kafka
messageStream.addSink(new FlinkKafkaProducer08<>(parameterTool.getRequired("topic"), new SimpleStringSchema(), parameterTool.getProperties()));
env.execute("Write into Kafka example");
}
示例13: run
import org.apache.flink.streaming.api.functions.source.SourceFunction; //导入依赖的package包/类
@Override
public void run(SourceFunction.SourceContext<TaxiRide> sourceContext) throws Exception {
gzipStream = new GZIPInputStream(new FileInputStream(dataFilePath));
reader = new BufferedReader(new InputStreamReader(gzipStream, "UTF-8"));
generateUnorderedStream(sourceContext);
this.reader.close();
this.reader = null;
this.gzipStream.close();
this.gzipStream = null;
}
示例14: main
import org.apache.flink.streaming.api.functions.source.SourceFunction; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
String propertiesFile = DEFAULT_PROPERTIES_FILE;
if (args != null && args.length == 1 && args[0] != null) {
propertiesFile = args[0];
}
WindowLogLevelCountProps props = new WindowLogLevelCountProps(propertiesFile);
// Set up the execution environment
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
// Configure the SiteToSiteClient
SiteToSiteClientConfig clientConfig = getSourceConfig(props);
// Create our data stream with a NiFiSource
SourceFunction<NiFiDataPacket> nifiSource = new NiFiSource(clientConfig);
DataStream<NiFiDataPacket> streamSource = env.addSource(nifiSource);
int windowSize = props.getFlinkWindowMillis();
LogLevelFlatMap logLevelFlatMap = new LogLevelFlatMap(props.getLogLevelAttribute());
// Count the occurrences of each log level over a window
DataStream<LogLevels> counts =
streamSource.flatMap(logLevelFlatMap)
.timeWindowAll(Time.of(windowSize, TimeUnit.MILLISECONDS))
.apply(new LogLevelWindowCounter());
// Add the sink to send the dictionary back to NiFi
double rateThreshold = props.getFlinkRateThreshold();
SiteToSiteClientConfig sinkConfig = getSinkConfig(props);
NiFiDataPacketBuilder<LogLevels> builder = new DictionaryBuilder(windowSize, rateThreshold);
counts.addSink(new NiFiSink<>(sinkConfig, builder));
// execute program
env.execute("WindowLogLevelCount");
}
示例15: run
import org.apache.flink.streaming.api.functions.source.SourceFunction; //导入依赖的package包/类
@Override
public void run(SourceFunction.SourceContext<Object> ctx) throws Exception {
LOG.info("Starting the Receiver actor {}", actorName);
receiverActor = receiverActorSystem.actorOf(
Props.create(classForActor, ctx, urlOfPublisher, autoAck), actorName);
LOG.info("Started the Receiver actor {} successfully", actorName);
receiverActorSystem.awaitTermination();
}