本文整理汇总了Java中org.apache.spark.streaming.twitter.TwitterUtils类的典型用法代码示例。如果您正苦于以下问题:Java TwitterUtils类的具体用法?Java TwitterUtils怎么用?Java TwitterUtils使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
TwitterUtils类属于org.apache.spark.streaming.twitter包,在下文中一共展示了TwitterUtils类的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: run
import org.apache.spark.streaming.twitter.TwitterUtils; //导入依赖的package包/类
private void run(CompositeConfiguration conf) {
// Spark conf
SparkConf sparkConf = new SparkConf().setAppName("TwitterSparkCrawler").setMaster(conf.getString("spark.master"))
.set("spark.serializer", conf.getString("spark.serializer"));
JavaStreamingContext jssc = new JavaStreamingContext(sparkConf, Durations.seconds(conf.getLong("stream.duration")));
// Twitter4J
// IMPORTANT: put keys in twitter4J.properties
Configuration twitterConf = ConfigurationContext.getInstance();
Authorization twitterAuth = AuthorizationFactory.getInstance(twitterConf);
// Create twitter stream
String[] filters = { "#Car" };
TwitterUtils.createStream(jssc, twitterAuth, filters).print();
// Start the computation
jssc.start();
jssc.awaitTermination();
}
示例2: getTwitterStream
import org.apache.spark.streaming.twitter.TwitterUtils; //导入依赖的package包/类
protected static JavaReceiverInputDStream<Status> getTwitterStream(JavaStreamingContext spark, String consumerKey, String consumerSecret,
String accessToken, String accessTokenSecret) {
// Enable Oauth
ConfigurationBuilder cb = new ConfigurationBuilder();
cb.setDebugEnabled(true)
.setOAuthConsumerKey(consumerKey).setOAuthConsumerSecret(consumerSecret)
.setOAuthAccessToken(accessToken).setOAuthAccessTokenSecret(accessTokenSecret);
TwitterFactory tf = new TwitterFactory(cb.build());
Twitter twitter = tf.getInstance();
// Create stream
return TwitterUtils.createStream(spark, twitter.getAuthorization());
}
示例3: main
import org.apache.spark.streaming.twitter.TwitterUtils; //导入依赖的package包/类
public static void main(String[] args)
{
BasicConfigurator.configure();
SparkConf conf = new SparkConf().setAppName("Twitter Sentiment Analysis");
if (args.length > 0)
conf.setMaster(args[0]);
else
conf.setMaster("local[2]");
JavaStreamingContext ssc = new JavaStreamingContext(conf, new Duration(2000));
/*Map<String, Integer> topicMap = new HashMap<String, Integer>();
topicMap.put(KAFKA_TOPIC, KAFKA_PARALLELIZATION);
JavaPairReceiverInputDStream<String, String> messages =
KafkaUtils.createStream(
ssc,
Properties.getString("rts.spark.zkhosts"),
"twitter.sentimentanalysis.kafka",
topicMap);
JavaDStream<String> json = messages.map(
new Function<Tuple2<String, String>, String>() {
private static final long serialVersionUID = 42l;
public String call(Tuple2<String, String> message) {
return message._2();
}
}
);*/
JavaPairDStream<Long, String> tweets = TwitterUtils.createStream(ssc).mapToPair(
new TwitterFilterFunction());
JavaPairDStream<Long, String> filtered = tweets.filter(
tweet -> tweet != null
);
JavaDStream<Tuple2<Long, String>> tweetsFiltered = filtered.map(
new TextFilterFunction());
tweetsFiltered = tweetsFiltered.map(
new StemmingFunction());
JavaPairDStream<Tuple2<Long, String>, Float> positiveTweets =
tweetsFiltered.mapToPair(new PositiveScoreFunction());
JavaPairDStream<Tuple2<Long, String>, Float> negativeTweets =
tweetsFiltered.mapToPair(new NegativeScoreFunction());
JavaPairDStream<Tuple2<Long, String>, Tuple2<Float, Float>> joined =
positiveTweets.join(negativeTweets);
JavaDStream<Tuple4<Long, String, Float, Float>> scoredTweets =
joined.map(tweet -> new Tuple4<Long, String, Float, Float>(
tweet._1()._1(),
tweet._1()._2(),
tweet._2()._1(),
tweet._2()._2()));
JavaDStream<Tuple5<Long, String, Float, Float, String>> result =
scoredTweets.map(new ScoreTweetsFunction());
//result.print();
result.dstream().saveAsTextFiles("file:///home/mayconbordin/spark/sentiment", "txt");
//result.foreachRDD(new FileWriter());
//result.foreachRDD(new HTTPNotifierFunction());
ssc.start();
ssc.awaitTermination();
}
示例4: convert
import org.apache.spark.streaming.twitter.TwitterUtils; //导入依赖的package包/类
@Override
public JavaDStream<Tuple> convert(List<JavaDStream<Tuple>> predecessorRdds, POLoad poLoad) throws IOException {
// if (predecessors.size()!=0) {
// throw new RuntimeException("Should not have predecessors for Load. Got : "+predecessors);
// }
configureLoader(physicalPlan, poLoad, sparkContext.ssc().sc().hadoopConfiguration(),this.pigContext);
Iterator<PhysicalOperator> top = physicalPlan.iterator();
boolean isTwitter = false;
while(top.hasNext()){
String load = top.next().toString();
if(load.contains("hdfs://")){
String[] splitted = load.split("hdfs://");
String url = "hdfs://" + splitted[1];
if(url.contains("/_twitter")){
isTwitter = true;
}
break;
}
}
if(!isTwitter){
DStream<Tuple2<Text, Tuple>> hadoopRDD= sparkContext.ssc().fileStream(poLoad.getLFile().getFileName(),
SparkUtil.getManifest(Text.class),
SparkUtil.getManifest(Tuple.class),
SparkUtil.getManifest(PigInputFormat.class));
//hadoopRDD.print();
/*
JavaDStream<String> mhadoopRDD = sparkContext.textFileStream(poLoad.getLFile().getFileName());
stringTupleFunction tf = new stringTupleFunction();
JavaDStream<Tuple> lulz = mhadoopRDD.map(tf);
//lulz.print();
return lulz;
*/
JavaDStream<Tuple> hdfsTuple = new JavaDStream<Tuple>(hadoopRDD.map(TO_VALUE_FUNCTION,SparkUtil.getManifest(Tuple.class)),SparkUtil.getManifest(Tuple.class));
hdfsTuple.print();
return hdfsTuple;
}else{
System.out.println("=====Tweeets-Tweets=======");
System.setProperty("twitter4j.oauth.consumerKey","mGkece93BmDILkPXXXXX");
System.setProperty("twitter4j.oauth.consumerSecret","K9RhnuOdZJlxDgxKJXXXXXXXXXXXXXXXXXXXXX");
System.setProperty("twitter4j.oauth.accessToken","2493987XXXXXXXXXXXXXXXXXXXXXXXXXFPRs0Ho7");
System.setProperty("twitter4j.oauth.accessTokenSecret","XXXXXXXXXXXXXXXXXXXXikQ0KxfqByVrtzs3jYP");
//sparkContext.checkpoint("/home/akhld/mobi/temp/pig/twitter/");
//JavaDStream<Status> dtweets= sparkContext.twitterStream();
JavaDStream<Status> dtweets = TwitterUtils.createStream(sparkContext);
System.out.println("=====Tweeets-Tweets=======");
tweetFunction fnc = new tweetFunction();
DStream<Tuple> dstatuses = dtweets.dstream().map(fnc,SparkUtil.getManifest(Tuple.class));
dstatuses.print();
JavaDStream<Tuple> tweetTuple = new JavaDStream<Tuple>(dstatuses, SparkUtil.getManifest(Tuple.class));
return tweetTuple;
}
}