本文整理汇总了Java中backtype.storm.LocalDRPC.execute方法的典型用法代码示例。如果您正苦于以下问题:Java LocalDRPC.execute方法的具体用法?Java LocalDRPC.execute怎么用?Java LocalDRPC.execute使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类backtype.storm.LocalDRPC
的用法示例。
在下文中一共展示了LocalDRPC.execute方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: main
import backtype.storm.LocalDRPC; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
Config config = new Config();
config.setDebug(false);
LocalDRPC drpc = new LocalDRPC();
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("internet-radio-play-stats", config, TopologyBuilder.buildLocal(drpc));
Utils.sleep(ONE_MINUTE);
String result = drpc.execute("count-request-by-tag", "Classic Rock,Punk,Post Punk");
System.out.println("RESULTS");
System.out.println("==========================================================================");
System.out.println(result);
System.out.println("==========================================================================");
cluster.killTopology("internet-radio-play-stats");
cluster.shutdown();
drpc.shutdown();
}
示例2: main
import backtype.storm.LocalDRPC; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
// StateFactory stateFactory = MongoState.nonTransactional("mongodb://127.0.0.1/test.words", Word.class);
// StateFactory stateFactory = MongoState.opaque("mongodb://127.0.0.1/test.words", Word.class);
StateFactory stateFactory = MongoState.transactional("mongodb://127.0.0.1/test.words", Word.class);
Config conf = new Config();
conf.setMaxSpoutPending(20);
if (args.length == 0) {
LocalDRPC drpc = new LocalDRPC();
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("wordCounter", conf, buildTopology(drpc, stateFactory));
for (int i = 0; i < 100; i++) {
long startDate = System.nanoTime();
String result = drpc.execute("words", "cat the dog jumped");
long endDate = System.nanoTime() - startDate;
System.out.println("DRPC RESULT: " + result + " took: " + endDate / 1000000);
Thread.sleep(100);
}
cluster.shutdown();
} else {
conf.setNumWorkers(3);
StormSubmitter.submitTopology(args[0], conf, buildTopology(null, stateFactory));
}
}
示例3: macVendorTest
import backtype.storm.LocalDRPC; //导入方法依赖的package包/类
@Test
public void macVendorTest() throws FileNotFoundException {
File fileFlow = new File(Thread.currentThread().getContextClassLoader().getResource("inputData/flows.json").getPath());
File checkFlow = new File(Thread.currentThread().getContextClassLoader().getResource("dataCheck/macVendorFlows.json").getPath());
Scanner flows = new Scanner(fileFlow);
Scanner checkFlows = new Scanner(checkFlow);
MacVendorFunction._ouiFilePath = Thread.currentThread().getContextClassLoader().getResource("db/oui-vendors").getPath();
List<String> fieldsFlow = new ArrayList<String>();
fieldsFlow.add("flows");
fieldsFlow.add("macVendor");
LocalDRPC drpc = new LocalDRPC();
TridentTopology topology = new TridentTopology();
topology.newDRPCStream("test", drpc)
.each(new Fields("args"), new MapperFunction("rb_test"), new Fields("flows"))
.each(new Fields("flows"), new MacVendorFunction(), new Fields("macVendor"))
.each(new Fields(fieldsFlow), new MergeMapsFunction(), new Fields("finalMap"))
.project(new Fields("finalMap"))
.each(new Fields("finalMap"), new MapToJSONFunction(), new Fields("jsonString"));
Config conf = new Config();
conf.put("rbDebug", true);
conf.setMaxTaskParallelism(1);
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("testing-topology", conf, topology.build());
while (flows.hasNextLine()) {
String stormFlow = drpc.execute("test", flows.nextLine());
stormFlow = stormFlow.substring(stormFlow.indexOf("{"), stormFlow.indexOf("}") + 1);
Assert.assertEquals(checkFlows.nextLine(), stormFlow);
}
}
示例4: nonTimestampTest
import backtype.storm.LocalDRPC; //导入方法依赖的package包/类
@Test
public void nonTimestampTest() throws FileNotFoundException {
File fileFlow = new File(Thread.currentThread().getContextClassLoader().getResource("inputData/nonTimestampFlows.json").getPath());
Scanner flows = new Scanner(fileFlow);
List<String> fieldsFlow = new ArrayList<String>();
fieldsFlow.add("flows");
LocalDRPC drpc = new LocalDRPC();
TridentTopology topology = new TridentTopology();
topology.newDRPCStream("test", drpc)
.each(new Fields("args"), new MapperFunction("rb_test"), new Fields("flows"))
.each(new Fields("flows"), new CheckTimestampFunction(), new Fields("finalMap"))
.project(new Fields("finalMap"))
.each(new Fields("finalMap"), new MapToJSONFunction(), new Fields("jsonString"));
Config conf = new Config();
conf.put("rbDebug", true);
conf.setMaxTaskParallelism(1);
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("testing-topology", conf, topology.build());
while (flows.hasNextLine()) {
String stormFlow = drpc.execute("test", flows.nextLine());
stormFlow = stormFlow.substring(stormFlow.indexOf("{"), stormFlow.indexOf("}") + 1);
Assert.assertEquals(true, stormFlow.contains("timestamp"));
}
}
示例5: analizeHttpUrlTest
import backtype.storm.LocalDRPC; //导入方法依赖的package包/类
@Test
public void analizeHttpUrlTest() throws FileNotFoundException {
File fileFlow = new File(Thread.currentThread().getContextClassLoader().getResource("inputData/httpFlows.json").getPath());
File checkFlow = new File(Thread.currentThread().getContextClassLoader().getResource("dataCheck/httpFlows.json").getPath());
Scanner flows = new Scanner(fileFlow);
Scanner checkFlows = new Scanner(checkFlow);
List<String> fieldsFlow = new ArrayList<String>();
fieldsFlow.add("flows");
fieldsFlow.add("httpUrlMap");
LocalDRPC drpc = new LocalDRPC();
TridentTopology topology = new TridentTopology();
topology.newDRPCStream("test", drpc)
.each(new Fields("args"), new MapperFunction("rb_test"), new Fields("flows"))
.each(new Fields("flows"), new AnalizeHttpUrlFunction(), new Fields("httpUrlMap"))
.each(new Fields(fieldsFlow), new MergeMapsFunction(), new Fields("finalMap"))
.project(new Fields("finalMap"))
.each(new Fields("finalMap"), new MapToJSONFunction(), new Fields("jsonString"));
Config conf = new Config();
conf.put("rbDebug", true);
conf.setMaxTaskParallelism(1);
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("testing-topology", conf, topology.build());
while (flows.hasNextLine()) {
String stormFlow = drpc.execute("test", flows.nextLine());
stormFlow = stormFlow.substring(stormFlow.indexOf("{"), stormFlow.indexOf("}") + 1);
Assert.assertEquals(checkFlows.nextLine(), stormFlow);
}
}
示例6: kickStart
import backtype.storm.LocalDRPC; //导入方法依赖的package包/类
public void kickStart() {
localDRPC = new LocalDRPC();
localCluster = new LocalCluster();
Config conf = new Config();
//conf.setDebug(true);
conf.setMaxSpoutPending(20);
localCluster.submitTopology("CounterTopo", conf, buildTopology());
DateTime dateTime = new DateTime();
System.out.println("Submit topology complete, waiting for start");
Utils.sleep(6 * 1000);
dateTime.minusSeconds(1);
int groupById = 10;
for (int i = 0; i < 20; i++) {
System.out.println("Querying with key " + groupById);
String rtnVals = localDRPC.execute("CountItemStream", String.valueOf(groupById));
System.out.println("Returned str is " + rtnVals);
try {
HyperLogLog hll = HyperLogLog.Builder.build(Base64.decodeBase64(rtnVals));
System.out.println("unique items for string " + hll.cardinality());
} catch (IOException e) {
e.printStackTrace();
}
Utils.sleep(5 * 1000);
}
shutdown();
}
示例7: geoIpTest
import backtype.storm.LocalDRPC; //导入方法依赖的package包/类
@Test
public void geoIpTest() throws FileNotFoundException {
File fileFlow = new File(Thread.currentThread().getContextClassLoader().getResource("inputData/flows.json").getPath());
File checkFlow = new File(Thread.currentThread().getContextClassLoader().getResource("dataCheck/geoIpFlows.json").getPath());
Scanner flows = new Scanner(fileFlow);
Scanner checkFlows = new Scanner(checkFlow);
GeoIpFunction.CITY_DB_PATH = Thread.currentThread().getContextClassLoader().getResource("db/city.dat").getPath();
GeoIpFunction.CITY_V6_DB_PATH = Thread.currentThread().getContextClassLoader().getResource("db/cityv6.dat").getPath();
GeoIpFunction.ASN_DB_PATH = Thread.currentThread().getContextClassLoader().getResource("db/asn.dat").getPath();
GeoIpFunction.ASN_V6_DB_PATH = Thread.currentThread().getContextClassLoader().getResource("db/asnv6.dat").getPath();
List<String> fieldsFlow = new ArrayList<String>();
fieldsFlow.add("flows");
fieldsFlow.add("geoIPMap");
LocalDRPC drpc = new LocalDRPC();
TridentTopology topology = new TridentTopology();
topology.newDRPCStream("test", drpc)
.each(new Fields("args"), new MapperFunction("rb_test"), new Fields("flows"))
.each(new Fields("flows"), new GeoIpFunction(), new Fields("geoIPMap"))
.each(new Fields(fieldsFlow), new MergeMapsFunction(), new Fields("finalMap"))
.project(new Fields("finalMap"))
.each(new Fields("finalMap"), new MapToJSONFunction(), new Fields("jsonString"));
Config conf = new Config();
conf.put("rbDebug", true);
conf.setMaxTaskParallelism(1);
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("testing-topology", conf, topology.build());
while (flows.hasNextLine()) {
String stormFlow = drpc.execute("test", flows.nextLine());
stormFlow = stormFlow.substring(stormFlow.indexOf("{"), stormFlow.indexOf("}") + 1);
//System.out.println(stormFlow);
Assert.assertEquals(checkFlows.nextLine(), stormFlow);
}
}
示例8: main
import backtype.storm.LocalDRPC; //导入方法依赖的package包/类
public static void main(String[] args){
// first some global (topology configuration)
StormCVConfig conf = new StormCVConfig();
conf.put(StormCVConfig.STORMCV_OPENCV_LIB, "mac64_opencv_java248.dylib");
conf.setNumWorkers(5); // number of workers in the topology
conf.put(StormCVConfig.STORMCV_FRAME_ENCODING, Frame.JPG_IMAGE); // indicates frames will be encoded as JPG throughout the topology (JPG is the default when not explicitly set)
conf.put(Config.TOPOLOGY_ENABLE_MESSAGE_TIMEOUTS, true); // True if Storm should timeout messages or not.
conf.put(Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS , 10); // The maximum amount of time given to the topology to fully process a message emitted by a spout (default = 30)
conf.put(StormCVConfig.STORMCV_SPOUT_FAULTTOLERANT, false); // indicates if the spout must be fault tolerant; i.e. spouts do NOT! replay tuples on fail
conf.put(StormCVConfig.STORMCV_CACHES_TIMEOUT_SEC, 30); // TTL (seconds) for all elements in all caches throughout the topology (avoids memory overload)
conf.put(Config.NIMBUS_TASK_LAUNCH_SECS, 30);
String userDir = System.getProperty("user.dir").replaceAll("\\\\", "/");
List<String> prototypes = new ArrayList<String>();
prototypes.add( "file://"+ userDir +"/resources/data" );
// create a linear DRPC builder called 'match'
LinearDRPCTopologyBuilder builder = new LinearDRPCTopologyBuilder("match");
//add a FeatureMatchRequestOp that receives drpc requests
builder.addBolt(new RequestBolt(new FeatureMatchRequestOp()), 1);
// add two bolts that perform sift extraction (as used in other examples!)
builder.addBolt(new SingleInputBolt(
new FeatureExtractionOp("sift", FeatureDetector.SIFT, DescriptorExtractor.SIFT).outputFrame(false)
), 1).shuffleGrouping();
// add bolt that matches queries it gets with the prototypes it has loaded upon the prepare.
// The prototypes are divided over the available tasks which means that each query has to be send to all tasks (use allGrouping)
// the matcher only reports a match if at least 1 strong match has been found (can be set to 0)
builder.addBolt(new SingleInputBolt(new PartialMatcher(prototypes, 0, 0.5f)), 2).allGrouping();
// add a bolt that aggregates all the results it gets from the two matchers
builder.addBolt(new BatchBolt(new FeatureMatchResultOp(true)), 1).fieldsGrouping(new Fields(CVParticleSerializer.REQUESTID));
// create local drpc server and cluster. Deploy the drpc topology on the cluster
LocalDRPC drpc = new LocalDRPC();
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("drpc-demo", conf, builder.createLocalTopology(drpc));
// use all face images as queries (same images as loaded by the matcher!)
File queryDir = new File(userDir +"/resources/data/");
for(String img : queryDir.list()){
if(!img.endsWith(".jpg")) continue; // to avoid reading non-image files
// execute the drpc with the image as argument. Note that the execute blocks
String matchesJson = drpc.execute("match", "file://"+userDir +"/resources/data/"+img);
System.out.println(img+" : " + matchesJson);
}
cluster.shutdown();
drpc.shutdown();
}
示例9: main
import backtype.storm.LocalDRPC; //导入方法依赖的package包/类
public static void main(String... args) throws AlreadyAliveException, InvalidTopologyException, TException,
DRPCExecutionException {
String topologyName = "trident-word-count";
FixedBatchSpout spout = new FixedBatchSpout(new Fields("sentence"), 100, //
new Values("the cow jumped over the moon"), //
new Values("the man went to the store and bought some candy"), //
new Values("four score and seven years ago"), //
new Values("how many apples can you eat"));
spout.setCycle(true);
LocalDRPC localDrpc = new LocalDRPC();
TridentTopology tridentTopology = new TridentTopology();
TridentState wordCounts = tridentTopology.newStream("spout1", spout) //
.parallelismHint(16)//
.each(new Fields("sentence"), new SplitFunction(), new Fields("word")) //
.groupBy(new Fields("word")) //
.persistentAggregate(new MemoryMapState.Factory(), new Count(), new Fields("count")) //
.parallelismHint(2);
Stream queryStream = tridentTopology //
.newDRPCStream("words", localDrpc) //
.each(new Fields("args"), new SplitFunction(), new Fields("word")) //
.groupBy(new Fields("word")) //
.stateQuery(wordCounts, new Fields("word"), new MapGet(), new Fields("count")) //
.each(new Fields("count"), new FilterNull()) //
.aggregate(new Fields("count"), new Sum(), new Fields("sum"));
StormTopology stormTopology = tridentTopology.build();
Config topologyConf = new Config();
topologyConf.setMaxSpoutPending(50);
topologyConf.setDebug(false);
topologyConf.setNumWorkers(2);
LocalCluster localCluster = new LocalCluster();
localCluster.submitTopology(topologyName, topologyConf, stormTopology);
Utils.sleep(5000);
// DRPCClient client = new DRPCClient("localhost", 3772);
// String result = client.execute("words", "cat dog the man");
// System.out.println(result);
String result = localDrpc.execute("words", "cat dog the man");
System.out.println(result);
Utils.sleep(5000);
localCluster.killTopology(topologyName);
localCluster.shutdown();
}