本文整理汇总了Java中org.apache.samza.config.JobCoordinatorConfig类的典型用法代码示例。如果您正苦于以下问题:Java JobCoordinatorConfig类的具体用法?Java JobCoordinatorConfig怎么用?Java JobCoordinatorConfig使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
JobCoordinatorConfig类属于org.apache.samza.config包,在下文中一共展示了JobCoordinatorConfig类的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: buildStreamApplicationConfigMap
import org.apache.samza.config.JobCoordinatorConfig; //导入依赖的package包/类
private Map<String, String> buildStreamApplicationConfigMap(String systemName, String inputTopic,
String appName, String appId) {
Map<String, String> samzaContainerConfig = ImmutableMap.<String, String>builder()
.put(TaskConfig.INPUT_STREAMS(), inputTopic)
.put(JobConfig.JOB_DEFAULT_SYSTEM(), systemName)
.put(TaskConfig.IGNORED_EXCEPTIONS(), "*")
.put(ZkConfig.ZK_CONNECT, zkConnect())
.put(JobConfig.SSP_GROUPER_FACTORY(), TEST_SSP_GROUPER_FACTORY)
.put(TaskConfig.GROUPER_FACTORY(), TEST_TASK_GROUPER_FACTORY)
.put(JobCoordinatorConfig.JOB_COORDINATOR_FACTORY, TEST_JOB_COORDINATOR_FACTORY)
.put(ApplicationConfig.APP_NAME, appName)
.put(ApplicationConfig.APP_ID, appId)
.put(String.format("systems.%s.samza.factory", systemName), TEST_SYSTEM_FACTORY)
.put(JobConfig.JOB_NAME(), appName)
.put(JobConfig.JOB_ID(), appId)
.put(TaskConfigJava.TASK_SHUTDOWN_MS, TASK_SHUTDOWN_MS)
.put(JobConfig.JOB_DEBOUNCE_TIME_MS(), JOB_DEBOUNCE_TIME_MS)
.build();
Map<String, String> applicationConfig = Maps.newHashMap(samzaContainerConfig);
applicationConfig.putAll(StandaloneTestUtils.getKafkaSystemConfigs(systemName, bootstrapServers(), zkConnect(), null, StandaloneTestUtils.SerdeAlias.STRING, true));
return applicationConfig;
}
示例2: getBaseJobConfig
import org.apache.samza.config.JobCoordinatorConfig; //导入依赖的package包/类
private Map<String, String> getBaseJobConfig() {
Map<String, String> configs = new HashMap<>();
configs.put("systems.test.samza.factory", ArraySystemFactory.class.getName());
configs.put(JobConfig.JOB_NAME(), "test-table-job");
configs.put(JobConfig.PROCESSOR_ID(), "1");
configs.put(JobCoordinatorConfig.JOB_COORDINATION_UTILS_FACTORY, PassthroughCoordinationUtilsFactory.class.getName());
configs.put(JobCoordinatorConfig.JOB_COORDINATOR_FACTORY, PassthroughJobCoordinatorFactory.class.getName());
configs.put(TaskConfig.GROUPER_FACTORY(), SingleContainerGrouperFactory.class.getName());
// For intermediate streams
configs.put("systems.kafka.samza.factory", "org.apache.samza.system.kafka.KafkaSystemFactory");
configs.put("systems.kafka.producer.bootstrap.servers", bootstrapUrl());
configs.put("systems.kafka.consumer.zookeeper.connect", zkConnect());
configs.put("systems.kafka.samza.key.serde", "int");
configs.put("systems.kafka.samza.msg.serde", "json");
configs.put("systems.kafka.default.stream.replication.factor", "1");
configs.put("job.default.system", "kafka");
configs.put("serializers.registry.int.class", "org.apache.samza.serializers.IntegerSerdeFactory");
configs.put("serializers.registry.json.class", PageViewJsonSerdeFactory.class.getName());
return configs;
}
示例3: getJobCoordinator
import org.apache.samza.config.JobCoordinatorConfig; //导入依赖的package包/类
JobCoordinator getJobCoordinator() {
return Util.
<JobCoordinatorFactory>getObj(
new JobCoordinatorConfig(config)
.getJobCoordinatorFactoryClassName())
.getJobCoordinator(config);
}
示例4: if
import org.apache.samza.config.JobCoordinatorConfig; //导入依赖的package包/类
/**
* Create intermediate streams using {@link org.apache.samza.execution.StreamManager}.
* If {@link CoordinationUtils} is provided, this function will first invoke leader election, and the leader
* will create the streams. All the runner processes will wait on the latch that is released after the leader finishes
* stream creation.
* @param planId a unique identifier representing the plan used for coordination purpose
* @param intStreams list of intermediate {@link StreamSpec}s
* @throws TimeoutException exception for latch timeout
*/
/* package private */ void createStreams(String planId, List<StreamSpec> intStreams) throws TimeoutException {
if (intStreams.isEmpty()) {
LOG.info("Set of intermediate streams is empty. Nothing to create.");
return;
}
LOG.info("A single processor must create the intermediate streams. Processor {} will attempt to acquire the lock.", uid);
// Move the scope of coordination utils within stream creation to address long idle connection problem.
// Refer SAMZA-1385 for more details
JobCoordinatorConfig jcConfig = new JobCoordinatorConfig(config);
String coordinationId = new ApplicationConfig(config).getGlobalAppId() + APPLICATION_RUNNER_PATH_SUFFIX;
CoordinationUtils coordinationUtils =
jcConfig.getCoordinationUtilsFactory().getCoordinationUtils(coordinationId, uid, config);
if (coordinationUtils == null) {
LOG.warn("Processor {} failed to create utils. Each processor will attempt to create streams.", uid);
// each application process will try creating the streams, which
// requires stream creation to be idempotent
getStreamManager().createStreams(intStreams);
return;
}
DistributedLockWithState lockWithState = coordinationUtils.getLockWithState(planId);
try {
// check if the processor needs to go through leader election and stream creation
if (lockWithState.lockIfNotSet(1000, TimeUnit.MILLISECONDS)) {
LOG.info("lock acquired for streams creation by " + uid);
getStreamManager().createStreams(intStreams);
lockWithState.unlockAndSet();
} else {
LOG.info("Processor {} did not obtain the lock for streams creation. They must've been created by another processor.", uid);
}
} catch (TimeoutException e) {
String msg = String.format("Processor {} failed to get the lock for stream initialization", uid);
throw new SamzaException(msg, e);
} finally {
coordinationUtils.close();
}
}
示例5: testStreamCreation
import org.apache.samza.config.JobCoordinatorConfig; //导入依赖的package包/类
@Test
public void testStreamCreation()
throws Exception {
Map<String, String> config = new HashMap<>();
LocalApplicationRunner runner = spy(new LocalApplicationRunner(new MapConfig(config)));
StreamApplication app = mock(StreamApplication.class);
doNothing().when(app).init(anyObject(), anyObject());
StreamManager streamManager = mock(StreamManager.class);
doReturn(streamManager).when(runner).getStreamManager();
ExecutionPlan plan = mock(ExecutionPlan.class);
when(plan.getIntermediateStreams()).thenReturn(Collections.singletonList(new StreamSpec("test-stream", "test-stream", "test-system")));
when(plan.getPlanAsJson()).thenReturn("");
doReturn(plan).when(runner).getExecutionPlan(any(), any());
CoordinationUtilsFactory coordinationUtilsFactory = mock(CoordinationUtilsFactory.class);
JobCoordinatorConfig mockJcConfig = mock(JobCoordinatorConfig.class);
when(mockJcConfig.getCoordinationUtilsFactory()).thenReturn(coordinationUtilsFactory);
PowerMockito.whenNew(JobCoordinatorConfig.class).withAnyArguments().thenReturn(mockJcConfig);
try {
runner.run(app);
} catch (Throwable t) {
assertNotNull(t); //no jobs exception
}
ArgumentCaptor<List> captor = ArgumentCaptor.forClass(List.class);
verify(streamManager).createStreams(captor.capture());
List<StreamSpec> streamSpecs = captor.getValue();
assertEquals(streamSpecs.size(), 1);
assertEquals(streamSpecs.get(0).getId(), "test-stream");
}
示例6: createStreamProcessor
import org.apache.samza.config.JobCoordinatorConfig; //导入依赖的package包/类
protected StreamProcessor createStreamProcessor(final String pId, Map<String, String> map, final CountDownLatch waitStart,
final CountDownLatch waitStop) {
map.put(ApplicationConfig.PROCESSOR_ID, pId);
Config config = new MapConfig(map);
JobCoordinator jobCoordinator =
Util.<JobCoordinatorFactory>getObj(new JobCoordinatorConfig(config).getJobCoordinatorFactoryClassName())
.getJobCoordinator(config);
StreamProcessorLifecycleListener listener = new StreamProcessorLifecycleListener() {
@Override
public void onStart() {
if (waitStart != null) {
waitStart.countDown();
}
LOG.info("onStart is called for pid=" + pId);
}
@Override
public void onShutdown() {
if (waitStop != null) {
waitStop.countDown();
}
LOG.info("onShutdown is called for pid=" + pId);
}
@Override
public void onFailure(Throwable t) {
LOG.info("onFailure is called for pid=" + pId);
}
};
StreamProcessor processor =
new StreamProcessor(config, new HashMap<>(), (StreamTaskFactory) TestStreamTask::new, listener, jobCoordinator);
return processor;
}
示例7: createConfigs
import org.apache.samza.config.JobCoordinatorConfig; //导入依赖的package包/类
protected Map<String, String> createConfigs(String testSystem, String inputTopic, String outputTopic,
int messageCount) {
Map<String, String> configs = new HashMap<>();
configs.putAll(StandaloneTestUtils
.getStandaloneConfigs("test-job", "org.apache.samza.processor.TestZkStreamProcessor.TestStreamTask"));
configs.putAll(StandaloneTestUtils
.getKafkaSystemConfigs(testSystem, bootstrapServers(), zkConnect(), null, StandaloneTestUtils.SerdeAlias.STRING,
true));
configs.put("task.inputs", String.format("%s.%s", testSystem, inputTopic));
configs.put("app.messageCount", String.valueOf(messageCount));
configs.put("app.outputTopic", outputTopic);
configs.put("app.outputSystem", testSystem);
configs.put(ZkConfig.ZK_CONNECT, zkConnect());
configs.put("job.systemstreampartition.grouper.factory",
"org.apache.samza.container.grouper.stream.GroupByPartitionFactory");
configs.put("task.name.grouper.factory", "org.apache.samza.container.grouper.task.GroupByContainerIdsFactory");
configs.put(JobCoordinatorConfig.JOB_COORDINATOR_FACTORY, "org.apache.samza.zk.ZkJobCoordinatorFactory");
configs.put(TaskConfigJava.TASK_SHUTDOWN_MS, TASK_SHUTDOWN_MS);
configs.put(JobConfig.JOB_DEBOUNCE_TIME_MS(), JOB_DEBOUNCE_TIME_MS);
configs.put(ZkConfig.ZK_CONSENSUS_TIMEOUT_MS, BARRIER_TIMEOUT_MS);
configs.put(ZkConfig.ZK_SESSION_TIMEOUT_MS, ZK_SESSION_TIMEOUT_MS);
configs.put(ZkConfig.ZK_CONNECTION_TIMEOUT_MS, ZK_CONNECTION_TIMEOUT_MS);
return configs;
}
示例8: getStandaloneConfigs
import org.apache.samza.config.JobCoordinatorConfig; //导入依赖的package包/类
public static Map<String, String> getStandaloneConfigs(final String jobName, final String taskClass) {
return new HashMap<String, String>() {
{
put(JOB_NAME, jobName);
put(TASK_CLASS, taskClass);
put(JobCoordinatorConfig.JOB_COORDINATOR_FACTORY, PASSTHROUGH_JOB_COORDINATOR_FACTORY);
put(SSP_GROUPER_FACTORY, STANDALONE_SSP_GROUPER_FACTORY);
put(TASK_NAME_GROUPER_FACTORY, STANDALONE_TASK_NAME_GROUPER_FACTORY);
}
};
}
示例9: fetchSamzaSqlConfig
import org.apache.samza.config.JobCoordinatorConfig; //导入依赖的package包/类
public static Map<String, String> fetchSamzaSqlConfig() {
HashMap<String, String> staticConfigs = new HashMap<>();
staticConfigs.put(JobConfig.JOB_NAME(), "sql-job");
staticConfigs.put(JobConfig.PROCESSOR_ID(), "1");
staticConfigs.put(JobCoordinatorConfig.JOB_COORDINATOR_FACTORY, PassthroughJobCoordinatorFactory.class.getName());
staticConfigs.put(TaskConfig.GROUPER_FACTORY(), SingleContainerGrouperFactory.class.getName());
staticConfigs.put(SamzaSqlApplicationConfig.CFG_SOURCE_RESOLVER, "config");
String configSourceResolverDomain =
String.format(SamzaSqlApplicationConfig.CFG_FMT_SOURCE_RESOLVER_DOMAIN, "config");
staticConfigs.put(configSourceResolverDomain + SamzaSqlApplicationConfig.CFG_FACTORY,
ConfigBasedSourceResolverFactory.class.getName());
staticConfigs.put(SamzaSqlApplicationConfig.CFG_UDF_RESOLVER, "config");
String configUdfResolverDomain = String.format(SamzaSqlApplicationConfig.CFG_FMT_UDF_RESOLVER_DOMAIN, "config");
staticConfigs.put(configUdfResolverDomain + SamzaSqlApplicationConfig.CFG_FACTORY,
ConfigBasedUdfResolver.class.getName());
staticConfigs.put(configUdfResolverDomain + ConfigBasedUdfResolver.CFG_UDF_CLASSES,
Joiner.on(",").join(RegexMatchUdf.class.getName(), FlattenUdf.class.getName()));
staticConfigs.put("serializers.registry.string.class", StringSerdeFactory.class.getName());
staticConfigs.put("serializers.registry.avro.class", AvroSerDeFactory.class.getName());
staticConfigs.put(AvroSerDeFactory.CFG_AVRO_SCHEMA, ProfileChangeEvent.SCHEMA$.toString());
String kafkaSystemConfigPrefix =
String.format(ConfigBasedSourceResolverFactory.CFG_FMT_SAMZA_PREFIX, SAMZA_SYSTEM_KAFKA);
String avroSamzaSqlConfigPrefix = configSourceResolverDomain + String.format("%s.", SAMZA_SYSTEM_KAFKA);
staticConfigs.put(kafkaSystemConfigPrefix + "samza.factory", KafkaSystemFactory.class.getName());
staticConfigs.put(kafkaSystemConfigPrefix + "samza.key.serde", "string");
staticConfigs.put(kafkaSystemConfigPrefix + "samza.msg.serde", "avro");
staticConfigs.put(kafkaSystemConfigPrefix + "consumer.zookeeper.connect", "localhost:2181");
staticConfigs.put(kafkaSystemConfigPrefix + "producer.bootstrap.servers", "localhost:9092");
staticConfigs.put(kafkaSystemConfigPrefix + "samza.offset.reset", "true");
staticConfigs.put(kafkaSystemConfigPrefix + "samza.offset.default", "oldest");
staticConfigs.put(avroSamzaSqlConfigPrefix + SqlSystemStreamConfig.CFG_SAMZA_REL_CONVERTER, "avro");
staticConfigs.put(avroSamzaSqlConfigPrefix + SqlSystemStreamConfig.CFG_REL_SCHEMA_PROVIDER, "config");
String logSystemConfigPrefix =
String.format(ConfigBasedSourceResolverFactory.CFG_FMT_SAMZA_PREFIX, SAMZA_SYSTEM_LOG);
String logSamzaSqlConfigPrefix = configSourceResolverDomain + String.format("%s.", SAMZA_SYSTEM_LOG);
staticConfigs.put(logSystemConfigPrefix + "samza.factory", ConsoleLoggingSystemFactory.class.getName());
staticConfigs.put(logSamzaSqlConfigPrefix + SqlSystemStreamConfig.CFG_SAMZA_REL_CONVERTER, "json");
staticConfigs.put(logSamzaSqlConfigPrefix + SqlSystemStreamConfig.CFG_REL_SCHEMA_PROVIDER, "config");
String avroSamzaToRelMsgConverterDomain =
String.format(SamzaSqlApplicationConfig.CFG_FMT_SAMZA_REL_CONVERTER_DOMAIN, "avro");
staticConfigs.put(avroSamzaToRelMsgConverterDomain + SamzaSqlApplicationConfig.CFG_FACTORY,
AvroSchemaGenRelConverterFactory.class.getName());
String jsonSamzaToRelMsgConverterDomain =
String.format(SamzaSqlApplicationConfig.CFG_FMT_SAMZA_REL_CONVERTER_DOMAIN, "json");
staticConfigs.put(jsonSamzaToRelMsgConverterDomain + SamzaSqlApplicationConfig.CFG_FACTORY,
JsonRelConverterFactory.class.getName());
String configAvroRelSchemaProviderDomain =
String.format(SamzaSqlApplicationConfig.CFG_FMT_REL_SCHEMA_PROVIDER_DOMAIN, "config");
staticConfigs.put(configAvroRelSchemaProviderDomain + SamzaSqlApplicationConfig.CFG_FACTORY,
ConfigBasedAvroRelSchemaProviderFactory.class.getName());
staticConfigs.put(
configAvroRelSchemaProviderDomain + String.format(ConfigBasedAvroRelSchemaProviderFactory.CFG_SOURCE_SCHEMA,
"kafka", "PageViewStream"), PageViewEvent.SCHEMA$.toString());
staticConfigs.put(
configAvroRelSchemaProviderDomain + String.format(ConfigBasedAvroRelSchemaProviderFactory.CFG_SOURCE_SCHEMA,
"kafka", "ProfileChangeStream"), ProfileChangeEvent.SCHEMA$.toString());
return staticConfigs;
}
示例10: testStreamCreationWithCoordination
import org.apache.samza.config.JobCoordinatorConfig; //导入依赖的package包/类
@Test
public void testStreamCreationWithCoordination()
throws Exception {
Map<String, String> config = new HashMap<>();
LocalApplicationRunner localRunner = new LocalApplicationRunner(new MapConfig(config));
LocalApplicationRunner runner = spy(localRunner);
StreamApplication app = mock(StreamApplication.class);
doNothing().when(app).init(anyObject(), anyObject());
StreamManager streamManager = mock(StreamManager.class);
doReturn(streamManager).when(runner).getStreamManager();
ExecutionPlan plan = mock(ExecutionPlan.class);
when(plan.getIntermediateStreams()).thenReturn(Collections.singletonList(new StreamSpec("test-stream", "test-stream", "test-system")));
when(plan.getPlanAsJson()).thenReturn("");
doReturn(plan).when(runner).getExecutionPlan(any(), any());
CoordinationUtils coordinationUtils = mock(CoordinationUtils.class);
CoordinationUtilsFactory coordinationUtilsFactory = mock(CoordinationUtilsFactory.class);
JobCoordinatorConfig mockJcConfig = mock(JobCoordinatorConfig.class);
when(mockJcConfig.getCoordinationUtilsFactory()).thenReturn(coordinationUtilsFactory);
PowerMockito.whenNew(JobCoordinatorConfig.class).withAnyArguments().thenReturn(mockJcConfig);
DistributedLockWithState lock = mock(DistributedLockWithState.class);
when(lock.lockIfNotSet(anyLong(), anyObject())).thenReturn(true);
when(coordinationUtils.getLockWithState(anyString())).thenReturn(lock);
when(coordinationUtilsFactory.getCoordinationUtils(anyString(), anyString(), anyObject()))
.thenReturn(coordinationUtils);
try {
runner.run(app);
} catch (Throwable t) {
assertNotNull(t); //no jobs exception
}
ArgumentCaptor<List> captor = ArgumentCaptor.forClass(List.class);
verify(streamManager).createStreams(captor.capture());
List<StreamSpec> streamSpecs = captor.getValue();
assertEquals(streamSpecs.size(), 1);
assertEquals(streamSpecs.get(0).getId(), "test-stream");
}
示例11: fetchStaticConfigsWithFactories
import org.apache.samza.config.JobCoordinatorConfig; //导入依赖的package包/类
public static Map<String, String> fetchStaticConfigsWithFactories(int numberOfMessages) {
HashMap<String, String> staticConfigs = new HashMap<>();
staticConfigs.put(JobConfig.JOB_NAME(), "sql-job");
staticConfigs.put(JobConfig.PROCESSOR_ID(), "1");
staticConfigs.put(JobCoordinatorConfig.JOB_COORDINATOR_FACTORY, PassthroughJobCoordinatorFactory.class.getName());
staticConfigs.put(TaskConfig.GROUPER_FACTORY(), SingleContainerGrouperFactory.class.getName());
staticConfigs.put(SamzaSqlApplicationConfig.CFG_SOURCE_RESOLVER, "config");
String configSourceResolverDomain =
String.format(SamzaSqlApplicationConfig.CFG_FMT_SOURCE_RESOLVER_DOMAIN, "config");
staticConfigs.put(configSourceResolverDomain + SamzaSqlApplicationConfig.CFG_FACTORY,
TestSourceResolverFactory.class.getName());
staticConfigs.put(SamzaSqlApplicationConfig.CFG_UDF_RESOLVER, "config");
String configUdfResolverDomain = String.format(SamzaSqlApplicationConfig.CFG_FMT_UDF_RESOLVER_DOMAIN, "config");
staticConfigs.put(configUdfResolverDomain + SamzaSqlApplicationConfig.CFG_FACTORY,
ConfigBasedUdfResolver.class.getName());
staticConfigs.put(configUdfResolverDomain + ConfigBasedUdfResolver.CFG_UDF_CLASSES, Joiner.on(",")
.join(MyTestUdf.class.getName(), RegexMatchUdf.class.getName(), FlattenUdf.class.getName(),
MyTestArrayUdf.class.getName()));
String avroSystemConfigPrefix =
String.format(ConfigBasedSourceResolverFactory.CFG_FMT_SAMZA_PREFIX, SAMZA_SYSTEM_TEST_AVRO);
String avroSamzaSqlConfigPrefix = configSourceResolverDomain + String.format("%s.", SAMZA_SYSTEM_TEST_AVRO);
staticConfigs.put(avroSystemConfigPrefix + "samza.factory", TestAvroSystemFactory.class.getName());
staticConfigs.put(avroSystemConfigPrefix + TestAvroSystemFactory.CFG_NUM_MESSAGES,
String.valueOf(numberOfMessages));
staticConfigs.put(avroSamzaSqlConfigPrefix + SqlSystemStreamConfig.CFG_SAMZA_REL_CONVERTER, "avro");
staticConfigs.put(avroSamzaSqlConfigPrefix + SqlSystemStreamConfig.CFG_REL_SCHEMA_PROVIDER, "config");
String avroSamzaToRelMsgConverterDomain =
String.format(SamzaSqlApplicationConfig.CFG_FMT_SAMZA_REL_CONVERTER_DOMAIN, "avro");
staticConfigs.put(avroSamzaToRelMsgConverterDomain + SamzaSqlApplicationConfig.CFG_FACTORY,
AvroRelConverterFactory.class.getName());
String configAvroRelSchemaProviderDomain =
String.format(SamzaSqlApplicationConfig.CFG_FMT_REL_SCHEMA_PROVIDER_DOMAIN, "config");
staticConfigs.put(configAvroRelSchemaProviderDomain + SamzaSqlApplicationConfig.CFG_FACTORY,
ConfigBasedAvroRelSchemaProviderFactory.class.getName());
staticConfigs.put(
configAvroRelSchemaProviderDomain + String.format(ConfigBasedAvroRelSchemaProviderFactory.CFG_SOURCE_SCHEMA,
"testavro", "SIMPLE1"), SimpleRecord.SCHEMA$.toString());
staticConfigs.put(
configAvroRelSchemaProviderDomain + String.format(ConfigBasedAvroRelSchemaProviderFactory.CFG_SOURCE_SCHEMA,
"testavro", "outputTopic"), ComplexRecord.SCHEMA$.toString());
staticConfigs.put(
configAvroRelSchemaProviderDomain + String.format(ConfigBasedAvroRelSchemaProviderFactory.CFG_SOURCE_SCHEMA,
"testavro", "COMPLEX1"), ComplexRecord.SCHEMA$.toString());
return staticConfigs;
}
示例12: testPipeline
import org.apache.samza.config.JobCoordinatorConfig; //导入依赖的package包/类
@Test
public void testPipeline() throws Exception {
Random random = new Random();
int count = 10;
PageView[] pageviews = new PageView[count];
for (int i = 0; i < count; i++) {
String pagekey = PAGEKEYS[random.nextInt(PAGEKEYS.length - 1)];
int memberId = random.nextInt(10);
pageviews[i] = new PageView(pagekey, memberId);
}
int partitionCount = 4;
Map<String, String> configs = new HashMap<>();
configs.put("systems.test.samza.factory", ArraySystemFactory.class.getName());
configs.put("streams.PageView.samza.system", "test");
configs.put("streams.PageView.source", Base64Serializer.serialize(pageviews));
configs.put("streams.PageView.partitionCount", String.valueOf(partitionCount));
configs.put(JobConfig.JOB_NAME(), "test-eos-job");
configs.put(JobConfig.PROCESSOR_ID(), "1");
configs.put(JobCoordinatorConfig.JOB_COORDINATION_UTILS_FACTORY, PassthroughCoordinationUtilsFactory.class.getName());
configs.put(JobCoordinatorConfig.JOB_COORDINATOR_FACTORY, PassthroughJobCoordinatorFactory.class.getName());
configs.put(TaskConfig.GROUPER_FACTORY(), SingleContainerGrouperFactory.class.getName());
configs.put("systems.kafka.samza.factory", "org.apache.samza.system.kafka.KafkaSystemFactory");
configs.put("systems.kafka.producer.bootstrap.servers", bootstrapUrl());
configs.put("systems.kafka.consumer.zookeeper.connect", zkConnect());
configs.put("systems.kafka.samza.key.serde", "int");
configs.put("systems.kafka.samza.msg.serde", "json");
configs.put("systems.kafka.default.stream.replication.factor", "1");
configs.put("job.default.system", "kafka");
configs.put("serializers.registry.int.class", "org.apache.samza.serializers.IntegerSerdeFactory");
configs.put("serializers.registry.json.class", PageViewJsonSerdeFactory.class.getName());
final LocalApplicationRunner runner = new LocalApplicationRunner(new MapConfig(configs));
List<PageView> received = new ArrayList<>();
final StreamApplication app = (streamGraph, cfg) -> {
streamGraph.<KV<String, PageView>>getInputStream("PageView")
.map(Values.create())
.partitionBy(pv -> pv.getMemberId(), pv -> pv, "p1")
.sink((m, collector, coordinator) -> {
received.add(m.getValue());
});
};
runner.run(app);
runner.waitForFinish();
assertEquals(received.size(), count * partitionCount);
}