本文整理汇总了Java中backtype.storm.utils.Utils.newInstance方法的典型用法代码示例。如果您正苦于以下问题:Java Utils.newInstance方法的具体用法?Java Utils.newInstance怎么用?Java Utils.newInstance使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类backtype.storm.utils.Utils
的用法示例。
在下文中一共展示了Utils.newInstance方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: registerDisruptorQueue
import backtype.storm.utils.Utils; //导入方法依赖的package包/类
public DisruptorQueue registerDisruptorQueue() {
int queueSize = JStormUtils.parseInt(
stormConf.get(Config.TOPOLOGY_EXECUTOR_RECEIVE_BUFFER_SIZE), 256);
WaitStrategy waitStrategy = (WaitStrategy) Utils
.newInstance((String) stormConf
.get(Config.TOPOLOGY_DISRUPTOR_WAIT_STRATEGY));
DisruptorQueue queue = DisruptorQueue.mkInstance("TaskDeserialize", ProducerType.SINGLE,
queueSize, waitStrategy);
deserializeQueues.put(taskid, queue);
return queue;
}
示例2: startDispatchThread
import backtype.storm.utils.Utils; //导入方法依赖的package包/类
private AsyncLoopThread startDispatchThread() {
Map stormConf = workerData.getStormConf();
int queue_size = Utils.getInt(
stormConf.get(Config.TOPOLOGY_TRANSFER_BUFFER_SIZE), 1024);
WaitStrategy waitStrategy = (WaitStrategy) Utils
.newInstance((String) stormConf
.get(Config.TOPOLOGY_DISRUPTOR_WAIT_STRATEGY));
DisruptorQueue recvQueue = DisruptorQueue.mkInstance("Dispatch", ProducerType.MULTI,
queue_size, waitStrategy);
// stop consumerStarted
//recvQueue.consumerStarted();
IContext context = workerData.getContext();
String topologyId = workerData.getTopologyId();
IConnection recvConnection = context.bind(topologyId,
workerData.getPort());
recvConnection.registerQueue(recvQueue);
RunnableCallback recvDispather = new VirtualPortDispatch(workerData,
recvConnection, recvQueue);
AsyncLoopThread vthread = new AsyncLoopThread(recvDispather, false,
Thread.MAX_PRIORITY, false);
return vthread;
}
示例3: TaskTransfer
import backtype.storm.utils.Utils; //导入方法依赖的package包/类
public TaskTransfer(String taskName,
KryoTupleSerializer serializer, TaskStatus taskStatus,
WorkerData workerData) {
this.taskName = taskName;
this.serializer = serializer;
this.taskStatus = taskStatus;
this.storm_conf = workerData.getConf();
this.transferQueue = workerData.getTransferQueue();
this.innerTaskTransfer = workerData.getInnerTaskTransfer();
int queue_size = Utils.getInt(storm_conf
.get(Config.TOPOLOGY_EXECUTOR_SEND_BUFFER_SIZE));
WaitStrategy waitStrategy = (WaitStrategy) Utils
.newInstance((String) storm_conf
.get(Config.TOPOLOGY_DISRUPTOR_WAIT_STRATEGY));
this.serializeQueue = DisruptorQueue.mkInstance(taskName, ProducerType.MULTI,
queue_size, waitStrategy);
this.serializeQueue.consumerStarted();
String taskId = taskName.substring(taskName.indexOf(":") + 1);
Metrics.registerQueue(taskName, MetricDef.SERIALIZE_QUEUE, serializeQueue, taskId, Metrics.MetricType.TASK);
timer = Metrics.registerTimer(taskName, MetricDef.SERIALIZE_TIME, taskId, Metrics.MetricType.TASK);
serializeThread = new AsyncLoopThread(new TransferRunnable());
LOG.info("Successfully start TaskTransfer thread");
}
示例4: test_large_msg
import backtype.storm.utils.Utils; //导入方法依赖的package包/类
@Test
public void test_large_msg() {
System.out.println("!!!!!!!!!!start larget message test!!!!!!!!");
String req_msg = setupLargMsg();
System.out.println("!!!!Finish batch data, size:" + req_msg.length()
+ "!!!!");
IConnection server = null;
IConnection client = null;
server = context.bind(null, port);
WaitStrategy waitStrategy = (WaitStrategy) Utils
.newInstance((String) storm_conf
.get(Config.TOPOLOGY_DISRUPTOR_WAIT_STRATEGY));
DisruptorQueue recvQueue = DisruptorQueue.mkInstance(
"NettyUnitTest", ProducerType.SINGLE, 1024, waitStrategy);
server.registerQueue(recvQueue);
client = context.connect(null, "localhost", port);
List<TaskMessage> list = new ArrayList<TaskMessage>();
TaskMessage message = new TaskMessage(task, req_msg.getBytes());
list.add(message);
LOG.info("Client send data");
client.send(message);
TaskMessage recv = server.recv(0);
Assert.assertEquals(req_msg, new String(recv.message()));
client.close();
server.close();
System.out.println("!!!!!!!!!!End larget message test!!!!!!!!");
}
示例5: NettyClientSync
import backtype.storm.utils.Utils; //导入方法依赖的package包/类
@SuppressWarnings("rawtypes")
NettyClientSync(Map storm_conf, ChannelFactory factory, ScheduledExecutorService scheduler, String host, int port, ReconnectRunnable reconnector) {
super(storm_conf, factory, scheduler, host, port, reconnector);
batchQueue = new ConcurrentLinkedQueue<MessageBatch>();
WaitStrategy waitStrategy = (WaitStrategy) Utils.newInstance((String) storm_conf.get(Config.TOPOLOGY_DISRUPTOR_WAIT_STRATEGY));
disruptorQueue = DisruptorQueue.mkInstance(name, ProducerType.MULTI, MAX_SEND_PENDING * 8, waitStrategy);
disruptorQueue.consumerStarted();
if (connectMyself == false) {
registerSyncMetrics();
}
Runnable trigger = new Runnable() {
@Override
public void run() {
trigger();
}
};
scheduler.scheduleAtFixedRate(trigger, 10, 1, TimeUnit.SECONDS);
/**
* In sync mode, it can't directly use common factory, it will occur problem when client close and restart
*/
ThreadFactory bossFactory = new NettyRenameThreadFactory(MetricDef.NETTY_CLI + JStormServerUtils.getName(host, port) + "-boss");
bossExecutor = Executors.newCachedThreadPool(bossFactory);
ThreadFactory workerFactory = new NettyRenameThreadFactory(MetricDef.NETTY_CLI + JStormServerUtils.getName(host, port) + "-worker");
workerExecutor = Executors.newCachedThreadPool(workerFactory);
clientChannelFactory = new NioClientSocketChannelFactory(bossExecutor, workerExecutor, 1);
start();
LOG.info(this.toString());
}
示例6: test_small_message
import backtype.storm.utils.Utils; //导入方法依赖的package包/类
@Test
public void test_small_message() {
System.out.println("!!!!!!!!Start test_small_message !!!!!!!!!!!");
String req_msg = "Aloha is the most Hawaiian word.";
IConnection server = null;
IConnection client = null;
server = context.bind(null, port);
WaitStrategy waitStrategy = (WaitStrategy) Utils
.newInstance((String) storm_conf
.get(Config.TOPOLOGY_DISRUPTOR_WAIT_STRATEGY));
DisruptorQueue recvQueue = DisruptorQueue.mkInstance(
"NettyUnitTest", ProducerType.SINGLE, 1024, waitStrategy);
server.registerQueue(recvQueue);
client = context.connect(null, "localhost", port);
List<TaskMessage> list = new ArrayList<TaskMessage>();
TaskMessage message = new TaskMessage(task, req_msg.getBytes());
list.add(message);
client.send(message);
TaskMessage recv = server.recv(0);
Assert.assertEquals(req_msg, new String(recv.message()));
System.out.println("!!!!!!!!!!!!!!!!!!Test one time!!!!!!!!!!!!!!!!!");
server.close();
client.close();
System.out.println("!!!!!!!!!!!!End test_small_message!!!!!!!!!!!!!");
}
示例7: JStormMetricCache
import backtype.storm.utils.Utils; //导入方法依赖的package包/类
public JStormMetricCache(Map conf, StormClusterState zkCluster) {
String dbCacheClass = getNimbusCacheClass(conf);
LOG.info("JStorm metrics cache will use {}", dbCacheClass);
boolean reset = ConfigExtension.getMetricCacheReset(conf);
try {
cache = (JStormCache) Utils.newInstance(dbCacheClass);
String dbDir = StormConfig.metricDbDir(conf);
conf.put(RocksDBCache.ROCKSDB_ROOT_DIR, dbDir);
conf.put(RocksDBCache.ROCKSDB_RESET, reset);
cache.init(conf);
} catch (Exception e) {
if (!reset && cache != null) {
LOG.error("Failed to init rocks db, will reset and try to re-init...");
conf.put(RocksDBCache.ROCKSDB_RESET, true);
try {
cache.init(conf);
} catch (Exception ex) {
LOG.error("Error", ex);
}
} else {
LOG.error("Failed to create metrics cache!", e);
throw new RuntimeException(e);
}
}
this.zkCluster = zkCluster;
}
示例8: test_server_delay
import backtype.storm.utils.Utils; //导入方法依赖的package包/类
@Test
public void test_server_delay() throws InterruptedException {
System.out.println("!!!!!!!!!!Start delay message test!!!!!!!!");
String req_msg = setupLargMsg();
IConnection server = null;
IConnection client = null;
server = context.bind(null, port);
WaitStrategy waitStrategy = (WaitStrategy) Utils
.newInstance((String) storm_conf
.get(Config.TOPOLOGY_DISRUPTOR_WAIT_STRATEGY));
DisruptorQueue recvQueue = DisruptorQueue.mkInstance(
"NettyUnitTest", ProducerType.SINGLE, 1024, waitStrategy);
server.registerQueue(recvQueue);
client = context.connect(null, "localhost", port);
List<TaskMessage> list = new ArrayList<TaskMessage>();
TaskMessage message = new TaskMessage(task, req_msg.getBytes());
list.add(message);
LOG.info("Client send data");
client.send(message);
Thread.sleep(1000);
TaskMessage recv = server.recv(0);
Assert.assertEquals(req_msg, new String(recv.message()));
server.close();
client.close();
System.out.println("!!!!!!!!!!End delay message test!!!!!!!!");
}
示例9: BaseExecutors
import backtype.storm.utils.Utils; //导入方法依赖的package包/类
public BaseExecutors(TaskTransfer _transfer_fn, Map _storm_conf,
DisruptorQueue deserializeQueue,
Map<Integer, DisruptorQueue> innerTaskTransfer,
TopologyContext topology_context, TopologyContext _user_context,
CommonStatsRolling _task_stats, TaskStatus taskStatus,
ITaskReportErr _report_error) {
this.storm_conf = _storm_conf;
this.deserializeQueue = deserializeQueue;
this.userTopologyCtx = _user_context;
this.task_stats = _task_stats;
this.taskId = topology_context.getThisTaskId();
this.innerTaskTransfer = innerTaskTransfer;
this.component_id = topology_context.getThisComponentId();
this.idStr = JStormServerUtils.getName(component_id, taskId);
this.taskStatus = taskStatus;
this.report_error = _report_error;
this.deserializer = new KryoTupleDeserializer(storm_conf,
topology_context);// (KryoTupleDeserializer.
this.isDebugRecv = ConfigExtension.isTopologyDebugRecvTuple(storm_conf);
this.isDebug = JStormUtils.parseBoolean(
storm_conf.get(Config.TOPOLOGY_DEBUG), false);
message_timeout_secs = JStormUtils.parseInt(
storm_conf.get(Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS), 30);
int queue_size = Utils.getInt(
storm_conf.get(Config.TOPOLOGY_EXECUTOR_RECEIVE_BUFFER_SIZE),
256);
WaitStrategy waitStrategy = (WaitStrategy) Utils
.newInstance((String) storm_conf
.get(Config.TOPOLOGY_DISRUPTOR_WAIT_STRATEGY));
this.exeQueue = DisruptorQueue.mkInstance(idStr, ProducerType.MULTI,
queue_size, waitStrategy);
this.exeQueue.consumerStarted();
this.registerInnerTransfer(exeQueue);
deserializeThread = new AsyncLoopThread(new DeserializeRunnable(
deserializeQueue, exeQueue));
deserializeTimer = Metrics.registerTimer(idStr, MetricDef.DESERIALIZE_TIME, String.valueOf(taskId), Metrics.MetricType.TASK);
Metrics.registerQueue(idStr, MetricDef.DESERIALIZE_QUEUE, deserializeQueue, String.valueOf(taskId), Metrics.MetricType.TASK);
Metrics.registerQueue(idStr, MetricDef.EXECUTE_QUEUE, exeQueue, String.valueOf(taskId), Metrics.MetricType.TASK);
RotatingMapTrigger rotatingMapTrigger = new RotatingMapTrigger(storm_conf, idStr + "_rotating", exeQueue);
rotatingMapTrigger.register();
}
示例10: test_server_reboot
import backtype.storm.utils.Utils; //导入方法依赖的package包/类
@Test
public void test_server_reboot() throws InterruptedException {
System.out.println("!!!!!!!!!!Start server reboot test!!!!!!!!");
final String req_msg = setupLargMsg();
final IContext context = TransportFactory.makeContext(storm_conf);
IConnection server = null;
new Thread(new Runnable() {
@Override
public void run() {
final IConnection client = context.connect(null, "localhost", port);
lock.lock();
List<TaskMessage> list = new ArrayList<TaskMessage>();
TaskMessage message = new TaskMessage(task, req_msg.getBytes());
list.add(message);
client.send(message);
System.out.println("Send first");
JStormUtils.sleepMs(10000);
System.out.println("Begin to Send second");
client.send(message);
System.out.println("Send second");
JStormUtils.sleepMs(15000);
client.send(message);
System.out.println("Send third time");
try {
clientClose.await();
} catch (InterruptedException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
client.close();
contextClose.signal();
lock.unlock();
}
}).start();
server = context.bind(null, port);
WaitStrategy waitStrategy = (WaitStrategy) Utils
.newInstance((String) storm_conf
.get(Config.TOPOLOGY_DISRUPTOR_WAIT_STRATEGY));
DisruptorQueue recvQueue = DisruptorQueue.mkInstance(
"NettyUnitTest", ProducerType.SINGLE, 1024, waitStrategy);
server.registerQueue(recvQueue);
TaskMessage recv = server.recv(0);
System.out.println("Receive first");
Assert.assertEquals(req_msg, new String(recv.message()));
server.close();
System.out.println("!!shutdow server and sleep 30s, please wait!!");
Thread.sleep(30000);
IConnection server2 = context.bind(null, port);
server2.registerQueue(recvQueue);
System.out.println("!!!!!!!!!!!!!!!!!!!! restart server !!!!!!!!!!!");
TaskMessage recv2 = server2.recv(0);
Assert.assertEquals(req_msg, new String(recv2.message()));
lock.lock();
clientClose.signal();
server2.close();
contextClose.await();
context.term();
lock.unlock();
System.out.println("!!!!!!!!!!End server reboot test!!!!!!!!");
}
示例11: test_server_reboot
import backtype.storm.utils.Utils; //导入方法依赖的package包/类
@Test
public void test_server_reboot() throws InterruptedException {
System.out.println("!!!!!!!!!!Start server reboot test!!!!!!!!");
final String req_msg = setupLargMsg();
final IContext context = TransportFactory.makeContext(storm_conf);
IConnection server = null;
new Thread(new Runnable() {
@Override
public void run() {
final IConnection client = context.connect(null, "localhost", port);
lock.lock();
List<TaskMessage> list = new ArrayList<TaskMessage>();
TaskMessage message = new TaskMessage(task, req_msg.getBytes());
list.add(message);
client.send(message);
System.out.println("Send first");
JStormUtils.sleepMs(10000);
System.out.println("Begin to Send second");
client.send(message);
System.out.println("Send second");
JStormUtils.sleepMs(15000);
client.send(message);
System.out.println("Send third time");
try {
clientClose.await();
} catch (InterruptedException e) {
e.printStackTrace();
}
client.close();
contextClose.signal();
lock.unlock();
}
}).start();
server = context.bind(null, port);
WaitStrategy waitStrategy = (WaitStrategy) Utils
.newInstance((String) storm_conf
.get(Config.TOPOLOGY_DISRUPTOR_WAIT_STRATEGY));
DisruptorQueue recvQueue = DisruptorQueue.mkInstance(
"NettyUnitTest", ProducerType.SINGLE, 1024, waitStrategy);
server.registerQueue(recvQueue);
TaskMessage recv = server.recv(0);
System.out.println("Receive first");
Assert.assertEquals(req_msg, new String(recv.message()));
server.close();
System.out.println("!!shutdow server and sleep 30s, please wait!!");
Thread.sleep(30000);
IConnection server2 = context.bind(null, port);
server2.registerQueue(recvQueue);
System.out.println("!!!!!!!!!!!!!!!!!!!! restart server !!!!!!!!!!!");
TaskMessage recv2 = server2.recv(0);
Assert.assertEquals(req_msg, new String(recv2.message()));
lock.lock();
clientClose.signal();
server2.close();
contextClose.await();
context.term();
lock.unlock();
System.out.println("!!!!!!!!!!End server reboot test!!!!!!!!");
}
示例12: NettyClientSync
import backtype.storm.utils.Utils; //导入方法依赖的package包/类
@SuppressWarnings("rawtypes")
NettyClientSync(Map storm_conf, ChannelFactory factory,
ScheduledExecutorService scheduler, String host, int port,
ReconnectRunnable reconnector) {
super(storm_conf, factory, scheduler, host, port, reconnector);
batchQueue = new ConcurrentLinkedQueue<MessageBatch>();
Metrics.register(address, MetricDef.NETTY_CLI_SYNC_BATCH_QUEUE,
new Gauge<Integer>() {
@Override
public Integer getValue() {
return batchQueue.size();
}
}, null, Metrics.MetricType.WORKER);
WaitStrategy waitStrategy = (WaitStrategy) Utils
.newInstance((String) storm_conf
.get(Config.TOPOLOGY_DISRUPTOR_WAIT_STRATEGY));
disruptorQueue = DisruptorQueue.mkInstance(name, ProducerType.MULTI,
MAX_SEND_PENDING * 8, waitStrategy);
disruptorQueue.consumerStarted();
Metrics.registerQueue(address, MetricDef.NETTY_CLI_SYNC_DISR_QUEUE, disruptorQueue,
null, Metrics.MetricType.WORKER);
Runnable trigger = new Runnable() {
@Override
public void run() {
trigger();
}
};
scheduler.scheduleWithFixedDelay(trigger, 10, 1, TimeUnit.SECONDS);
/**
* In sync mode, it can't directly use common factory,
* it will occur problem when client close and restart
*/
ThreadFactory bossFactory = new NettyRenameThreadFactory(
PREFIX + JStormServerUtils.getName(host, port) + "-boss");
bossExecutor = Executors.newCachedThreadPool(bossFactory);
ThreadFactory workerFactory = new NettyRenameThreadFactory(
PREFIX + JStormServerUtils.getName(host, port) + "-worker");
workerExecutor = Executors.newCachedThreadPool(workerFactory);
clientChannelFactory = new NioClientSocketChannelFactory(
bossExecutor,
workerExecutor, 1);
start();
LOG.info(this.toString());
}
示例13: BaseExecutors
import backtype.storm.utils.Utils; //导入方法依赖的package包/类
public BaseExecutors(TaskTransfer _transfer_fn, Map _storm_conf,
DisruptorQueue deserializeQueue,
Map<Integer, DisruptorQueue> innerTaskTransfer,
TopologyContext topology_context, TopologyContext _user_context,
CommonStatsRolling _task_stats, TaskStatus taskStatus,
ITaskReportErr _report_error) {
this.storm_conf = _storm_conf;
this.deserializeQueue = deserializeQueue;
this.userTopologyCtx = _user_context;
this.task_stats = _task_stats;
this.taskId = topology_context.getThisTaskId();
this.innerTaskTransfer = innerTaskTransfer;
this.component_id = topology_context.getThisComponentId();
this.idStr = JStormServerUtils.getName(component_id, taskId);
this.taskStatus = taskStatus;
this.report_error = _report_error;
this.deserializer = new KryoTupleDeserializer(storm_conf,
topology_context);// (KryoTupleDeserializer.
this.isDebugRecv = ConfigExtension.isTopologyDebugRecvTuple(storm_conf);
this.isDebug = JStormUtils.parseBoolean(
storm_conf.get(Config.TOPOLOGY_DEBUG), false);
message_timeout_secs = JStormUtils.parseInt(
storm_conf.get(Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS), 30);
int queue_size = Utils.getInt(
storm_conf.get(Config.TOPOLOGY_EXECUTOR_RECEIVE_BUFFER_SIZE),
256);
WaitStrategy waitStrategy = (WaitStrategy) Utils
.newInstance((String) storm_conf
.get(Config.TOPOLOGY_DISRUPTOR_WAIT_STRATEGY));
this.exeQueue = DisruptorQueue.mkInstance(idStr, ProducerType.MULTI,
queue_size, waitStrategy);
this.exeQueue.consumerStarted();
this.registerInnerTransfer(exeQueue);
deserializeThread = new AsyncLoopThread(new DeserializeRunnable(
deserializeQueue, exeQueue));
deserializeTimer = Metrics.registerTimer(idStr, MetricDef.DESERIALIZE_TIME, String.valueOf(taskId), Metrics.MetricType.TASK);
Metrics.registerQueue(idStr, MetricDef.DESERIALIZE_QUEUE, deserializeQueue, String.valueOf(taskId), Metrics.MetricType.TASK);
Metrics.registerQueue(idStr, MetricDef.EXECUTE_QUEUE, exeQueue, String.valueOf(taskId), Metrics.MetricType.TASK);
}
示例14: test_first_client
import backtype.storm.utils.Utils; //导入方法依赖的package包/类
@Test
public void test_first_client() throws InterruptedException {
System.out.println("!!!!!!!!Start test_first_client !!!!!!!!!!!");
final String req_msg = setupLargMsg();
final IContext context = TransportFactory.makeContext(storm_conf);
new Thread(new Runnable() {
@Override
public void run() {
lock.lock();
IConnection client = context.connect(null, "localhost", port);
List<TaskMessage> list = new ArrayList<TaskMessage>();
TaskMessage message = new TaskMessage(task, req_msg.getBytes());
list.add(message);
client.send(message);
System.out.println("!!Client has sent data");
JStormUtils.sleepMs(1000);
try {
clientClose.await();
} catch (InterruptedException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
client.close();
contextClose.signal();
lock.unlock();
}
}).start();
IConnection server = null;
JStormUtils.sleepMs(1000);
System.out.println("!!server begin start!!!!!");
server = context.bind(null, port);
WaitStrategy waitStrategy = (WaitStrategy) Utils
.newInstance((String) storm_conf
.get(Config.TOPOLOGY_DISRUPTOR_WAIT_STRATEGY));
DisruptorQueue recvQueue = DisruptorQueue.mkInstance(
"NettyUnitTest", ProducerType.SINGLE, 1024, waitStrategy);
server.registerQueue(recvQueue);
TaskMessage recv = server.recv(0);
Assert.assertEquals(req_msg, new String(recv.message()));
lock.lock();
clientClose.signal();
server.close();
contextClose.await();
context.term();
lock.unlock();
System.out.println("!!!!!!!!!!!!End test_first_client!!!!!!!!!!!!!");
}
示例15: test_multiple_client
import backtype.storm.utils.Utils; //导入方法依赖的package包/类
/**
* Due to there is only one client to one server in one jvm
* It can't do this test
*
* @throws InterruptedException
*/
public void test_multiple_client() throws InterruptedException {
System.out.println("!!!!!!!!Start test_multiple_client !!!!!!!!!!!");
final String req_msg = setupLargMsg();
final int clientNum = 3;
final AtomicLong received = new AtomicLong(clientNum);
for (int i = 0; i < clientNum; i++ ) {
new Thread(new Runnable() {
@Override
public void run() {
IConnection client = context.connect(null, "localhost", port);
List<TaskMessage> list = new ArrayList<TaskMessage>();
TaskMessage message = new TaskMessage(task, req_msg.getBytes());
list.add(message);
client.send(message);
System.out.println("!!Client has sent data");
while(received.get() != 0) {
JStormUtils.sleepMs(1000);
}
client.close();
}
}).start();
}
IConnection server = null;
JStormUtils.sleepMs(1000);
System.out.println("!!server begin start!!!!!");
server = context.bind(null, port);
WaitStrategy waitStrategy = (WaitStrategy) Utils
.newInstance((String) storm_conf
.get(Config.TOPOLOGY_DISRUPTOR_WAIT_STRATEGY));
DisruptorQueue recvQueue = DisruptorQueue.mkInstance(
"NettyUnitTest", ProducerType.SINGLE, 1024, waitStrategy);
server.registerQueue(recvQueue);
for (int i = 0; i < clientNum; i++) {
TaskMessage recv = server.recv(0);
Assert.assertEquals(req_msg, new String(recv.message()));
received.decrementAndGet();
}
server.close();
System.out.println("!!!!!!!!!!!!End test_multiple_client!!!!!!!!!!!!!");
}