本文整理汇总了Java中akka.cluster.client.ClusterClient类的典型用法代码示例。如果您正苦于以下问题:Java ClusterClient类的具体用法?Java ClusterClient怎么用?Java ClusterClient使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
ClusterClient类属于akka.cluster.client包,在下文中一共展示了ClusterClient类的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: startWorkersWithExecutors
import akka.cluster.client.ClusterClient; //导入依赖的package包/类
public static ActorSystem startWorkersWithExecutors(AgentConfig agent) {
Config conf = ConfigFactory.parseString("akka.cluster.roles=[" + agent.getActor().getRole() + "]")
.withFallback(ConfigFactory.parseString("akka.remote.netty.tcp.port=" + agent.getActor().getPort()))
.withFallback(ConfigFactory.parseString("akka.remote.netty.tcp.hostname=" + HostUtils.lookupIp()))
.withFallback(ConfigFactory.load("application"));
ActorSystem system = ActorSystem.create(Constants.PerformanceSystem, conf);
Set<ActorPath> initialContacts = new HashSet<>(agent.getContactPoint()
.map(p->ActorPaths.fromString(p))
.collect(Collectors.toList()));
ClusterClientSettings settings = ClusterClientSettings.create(system).withInitialContacts(initialContacts);
final ActorRef clusterClient = system.actorOf(ClusterClient.props(settings), "clusterClient");
IntStream.range(1,agent.getActor().getNumberOfActors()+1).forEach(i->
system.actorOf(Worker.props(clusterClient,
createWorkExecutor(agent),
agent.getActor().getRole()),
agent.getActor().getRole()+i)
);
return system;
}
示例2: startCommandClient
import akka.cluster.client.ClusterClient; //导入依赖的package包/类
public static ActorSystem startCommandClient(ClientConfig clientConfig) {
Config conf = ConfigFactory.parseString("akka.cluster.roles=[" + clientConfig.getRole() + "]")
.withFallback(ConfigFactory.parseString("akka.remote.netty.tcp.port=" + clientConfig.getPort()))
.withFallback(ConfigFactory.parseString("akka.remote.netty.tcp.hostname=" + HostUtils.lookupIp()))
.withFallback(ConfigFactory.load("application"));
ActorSystem system = ActorSystem.create(Constants.PerformanceSystem, conf);
Set<ActorPath> initialContacts = new HashSet<>(clientConfig.getContactPoint()
.map(p->ActorPaths.fromString(p))
.collect(Collectors.toList()));
ClusterClientSettings settings = ClusterClientSettings.create(system).withInitialContacts(initialContacts);
final ActorRef clusterClient = system.actorOf(ClusterClient.props(settings), "clusterClient");
system.actorOf(CommandClientActor.props(clusterClient, clientConfig), clientConfig.getRole() );
return system;
}
示例3: Worker
import akka.cluster.client.ClusterClient; //导入依赖的package包/类
public Worker(ActorRef clusterClient, Props workExecutorProps, FiniteDuration registerInterval, String workerRole) {
this.clusterClient = clusterClient;
this.workerRole = workerRole;
this.host = HostUtils.lookupIp();
this.workExecutor = getContext().watch(getContext().actorOf(workExecutorProps, "exec"));
this.registerTask = getContext().system().scheduler().schedule
(
Duration.Zero(),
registerInterval,
clusterClient,
new ClusterClient.SendToAll("/user/master/singleton", new MasterWorkerProtocol.RegisterWorker(workerId)),
getContext().dispatcher(),
getSelf()
);
FiniteDuration workTimeout = Duration.create(60, "seconds");
this.keepAliveTask = getContext().system().scheduler().schedule(workTimeout.div(2), workTimeout.div(2), getSelf(), KeepAliveTick, getContext().dispatcher(), getSelf());
}
示例4: cancelUniqueField
import akka.cluster.client.ClusterClient; //导入依赖的package包/类
private void cancelUniqueField( ApplicationScope scope,
Id entityId, UUID version, Field field, String region ) throws UniqueValueException {
UniqueValueActor.Cancellation request = new UniqueValueActor.Cancellation(
scope, entityId, version, field );
if ( actorSystemManager.getCurrentRegion().equals( region ) ) {
// sending to current region, use local clientActor
ActorRef clientActor = actorSystemManager.getClientActor();
clientActor.tell( request, null );
} else {
// sending to remote region, send via cluster client for that region
ActorRef clusterClient = actorSystemManager.getClusterClient( region );
clusterClient.tell( new ClusterClient.Send("/user/clientActor", request), null );
}
}
示例5: startWorker
import akka.cluster.client.ClusterClient; //导入依赖的package包/类
public static void startWorker(int port) {
Config conf = ConfigFactory.parseString("akka.remote.netty.tcp.port=" + port).
withFallback(ConfigFactory.load("worker"));
ActorSystem system = ActorSystem.create("WorkerSystem", conf);
ActorRef clusterClient = system.actorOf(
ClusterClient.props(ClusterClientSettings.create(system)),
"clusterClient");
system.actorOf(Worker.props(clusterClient, Props.create(WorkExecutor.class)), "worker");
}
示例6: publishToAllRegions
import akka.cluster.client.ClusterClient; //导入依赖的package包/类
@Override
public void publishToAllRegions( String topic, Object message, ActorRef sender ) {
// send to local subscribers to topic
mediator.tell( new DistributedPubSubMediator.Publish( topic, message ), sender );
// send to each ClusterClient
for ( ActorRef clusterClient : clusterClientsByRegion.values() ) {
clusterClient.tell( new ClusterClient.Publish( topic, message ), sender );
}
}
示例7: sendToMaster
import akka.cluster.client.ClusterClient; //导入依赖的package包/类
private void sendToMaster(Object msg) {
clusterClient.tell(new ClusterClient.SendToAll("/user/master/singleton", msg), getSelf());
}
示例8: testWorkers
import akka.cluster.client.ClusterClient; //导入依赖的package包/类
@Test
public void testWorkers() throws Exception {
new JavaTestKit(system) {{
TestProbe clusterProbe = new TestProbe(system);
Cluster.get(system).subscribe(clusterProbe.ref(), ClusterEvent.MemberUp.class);
clusterProbe.expectMsgClass(ClusterEvent.CurrentClusterState.class);
Address clusterAddress = Cluster.get(system).selfAddress();
Cluster.get(system).join(clusterAddress);
clusterProbe.expectMsgClass(ClusterEvent.MemberUp.class);
system.actorOf(
ClusterSingletonManager.props(
Master.props(workTimeout),
PoisonPill.getInstance(),
ClusterSingletonManagerSettings.create(system).withRole("backend")
),
"master");
Set<ActorPath> initialContacts = new HashSet<>();
initialContacts.add(ActorPaths.fromString(clusterAddress + "/system/receptionist"));
ActorRef clusterClient = system.actorOf(
ClusterClient.props(ClusterClientSettings.create(system).withInitialContacts(initialContacts)),
"clusterClient");
for (int n = 1; n <= 3; n += 1) {
system.actorOf(Worker.props(clusterClient,
Props.create(WorkExecutor.class), registerInterval), "worker-" + n);
}
ActorRef flakyWorker = system.actorOf(Worker.props(clusterClient,
Props.create(FlakyWorkExecutor.class), registerInterval), "flaky-worker");
final ActorRef frontend = system.actorOf(Props.create(Frontend.class), "frontend");
final JavaTestKit results = new JavaTestKit(system);
DistributedPubSub.get(system).mediator().tell(
new DistributedPubSubMediator.Subscribe(Master.ResultsTopic, results.getRef()),
getRef());
expectMsgClass(DistributedPubSubMediator.SubscribeAck.class);
// might take a while for things to get connected
new AwaitAssert(duration("10 seconds")) {
protected void check() {
frontend.tell(new Master.Work("1", 1), getRef());
expectMsgEquals(Frontend.Ok.getInstance());
}
};
assertEquals(results.expectMsgClass(Master.WorkResult.class).workId, "1");
for (int n = 2; n <= 100; n += 1) {
frontend.tell(new Master.Work(Integer.toString(n), n), getRef());
expectMsgEquals(Frontend.Ok.getInstance());
}
results.new Within(duration("10 seconds")) {
public void run() {
Object[] messages = results.receiveN(99);
SortedSet<Integer> set = new TreeSet<Integer>();
for (Object m: messages) {
set.add(Integer.parseInt(((Master.WorkResult) m).workId));
}
// nothing lost, and no duplicates
Iterator<Integer> iterator = set.iterator();
for (int n = 2; n <= 100; n += 1) {
assertEquals(n, iterator.next().intValue());
}
}
};
}};
}
开发者ID:typesafehub,项目名称:activator-akka-distributed-workers-java,代码行数:79,代码来源:DistributedWorkerTest.java
示例9: sendMessageToRegion
import akka.cluster.client.ClusterClient; //导入依赖的package包/类
QueueWriter.WriteStatus sendMessageToRegion(
String queueName,
String sourceRegion,
String destRegion,
UUID messageId,
Long deliveryTime,
Long expirationTime ) {
Timer.Context timer = metricsService.getMetricRegistry().timer( MetricsService.SEND_TIME_SEND ).time();
try {
int maxRetries = qakkaFig.getMaxSendRetries();
int retries = 0;
QueueWriteRequest request = new QueueWriteRequest(
queueName, sourceRegion, destRegion, messageId, deliveryTime, expirationTime );
while (retries++ < maxRetries) {
try {
Timeout t = new Timeout( qakkaFig.getSendTimeoutSeconds(), TimeUnit.SECONDS );
Future<Object> fut;
if (actorSystemManager.getCurrentRegion().equals( destRegion )) {
logger.trace("{}: Sending queue {} message to local region {}", name, queueName, destRegion );
// send to current region via local clientActor
ActorRef clientActor = actorSystemManager.getClientActor();
fut = Patterns.ask( clientActor, request, t );
} else {
logger.trace("{} Sending queue {} message to remote region {}", name, queueName, destRegion );
// send to remote region via cluster client for that region
ActorRef clusterClient = actorSystemManager.getClusterClient( destRegion );
fut = Patterns.ask(
clusterClient, new ClusterClient.Send( "/user/clientActor", request ), t );
}
// wait for response...
final Object response = Await.result( fut, t.duration() );
if (response != null && response instanceof QueueWriteResponse) {
QueueWriteResponse qarm = (QueueWriteResponse) response;
if (!QueueWriter.WriteStatus.ERROR.equals( qarm.getSendStatus() )) {
if (retries > 1) {
logger.debug( "queueAdd TOTAL_SUCCESS after {} retries", retries );
}
return qarm.getSendStatus();
} else {
logger.debug( "ERROR STATUS adding to queue, retrying {}", retries );
}
} else if (response != null) {
logger.debug( "NULL RESPONSE adding to queue, retrying {}", retries );
} else {
logger.debug( "TIMEOUT adding to queue, retrying {}", retries );
}
} catch (Exception e) {
logger.debug( "ERROR adding to queue, retrying " + retries, e );
}
}
throw new QakkaRuntimeException( "Error adding to queue after " + retries + " retries" );
} finally {
timer.stop();
}
}
示例10: sendUniqueValueRequest
import akka.cluster.client.ClusterClient; //导入依赖的package包/类
private void sendUniqueValueRequest(
Entity entity, String region, UniqueValueActor.Request request ) throws UniqueValueException {
int maxRetries = uniqueValuesFig.getRequestRetryCount();
int retries = 0;
UniqueValueActor.Response response = null;
while ( retries++ < maxRetries ) {
try {
Timeout t = new Timeout( uniqueValuesFig.getRequestTimeout(), TimeUnit.MILLISECONDS );
Future<Object> fut;
if ( actorSystemManager.getCurrentRegion().equals( region ) ) {
// sending to current region, use local clientActor
ActorRef clientActor = actorSystemManager.getClientActor();
fut = Patterns.ask( clientActor, request, t );
} else {
// sending to remote region, send via cluster client for that region
ActorRef clusterClient = actorSystemManager.getClusterClient( region );
fut = Patterns.ask( clusterClient, new ClusterClient.Send("/user/clientActor", request), t );
}
// wait (up to timeout) for response
response = (UniqueValueActor.Response) Await.result( fut, t.duration() );
if ( response != null && (
response.getStatus().equals( UniqueValueActor.Response.Status.IS_UNIQUE )
|| response.getStatus().equals( UniqueValueActor.Response.Status.NOT_UNIQUE ))) {
if ( retries > 1 ) {
logger.debug("IS_UNIQUE after retrying {} for entity {} rowkey {}",
retries, entity.getId().getUuid(), request.getConsistentHashKey());
}
break;
} else if ( response != null ) {
logger.warn("ERROR status retrying {} entity {} rowkey {}",
retries, entity.getId().getUuid(), request.getConsistentHashKey());
} else {
logger.warn("Timed-out retrying {} entity {} rowkey",
retries, entity.getId().getUuid(), request.getConsistentHashKey());
}
} catch ( Exception e ) {
logger.error("{} caused retry {} for entity {} rowkey {}",
e.getClass().getSimpleName(), retries, entity.getId().getUuid(), request.getConsistentHashKey());
}
}
if ( response == null || response.getStatus().equals( UniqueValueActor.Response.Status.ERROR )) {
logger.debug("ERROR after retrying {} for entity {} rowkey {}",
retries, entity.getId().getUuid(), request.getConsistentHashKey());
// should result in an HTTP 503
throw new RuntimeException( "Error verifying unique value after " + retries + " retries");
}
if ( response.getStatus().equals( UniqueValueActor.Response.Status.NOT_UNIQUE )) {
// should result in an HTTP 409 (conflict)
throw new UniqueValueException( "Error property not unique", request.getField() );
}
}
示例11: createClientActors
import akka.cluster.client.ClusterClient; //导入依赖的package包/类
/**
* Create ClientActor for each region.
*/
private void createClientActors( ActorSystem system ) {
for ( String region : getSeedsByRegion().keySet() ) {
if ( currentRegion.equals( region )) {
logger.info( "Creating clientActor for region [{}]", region );
// Each clientActor needs to know path to ClusterSingletonProxy and region
clientActor = system.actorOf(
Props.create( ClientActor.class, routersByMessageType ), "clientActor" );
ClusterClientReceptionist.get(system).registerService( clientActor );
} else {
logger.info( "Creating clusterClient for region [{}]", region );
Set<ActorPath> seedPaths = new HashSet<>(20);
for ( String seed : getSeedsByRegion().get( region ) ) {
seedPaths.add( ActorPaths.fromString( seed + "/system/receptionist") );
}
ActorRef clusterClient = system.actorOf( ClusterClient.props(
ClusterClientSettings.create(system).withInitialContacts( seedPaths )), "client");
clusterClientsByRegion.put( region, clusterClient );
}
}
}