本文整理汇总了Java中backtype.storm.generated.AuthorizationException类的典型用法代码示例。如果您正苦于以下问题:Java AuthorizationException类的具体用法?Java AuthorizationException怎么用?Java AuthorizationException使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
AuthorizationException类属于backtype.storm.generated包,在下文中一共展示了AuthorizationException类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: main
import backtype.storm.generated.AuthorizationException; //导入依赖的package包/类
public static void main(String[] args) {
Config config = new Config();
HdfsBolt hdfsBolt = makeHdfsBolt();
KafkaSpout kafkaSpout = makeKafkaSpout(TOPIC, TOPOLOGY_NAME);
LOG.info("Topology name is {}", TOPOLOGY_NAME);
TopologyBuilder topologyBuilder = new TopologyBuilder();
topologyBuilder.setSpout(KAFKA_SPOUT_ID, kafkaSpout, 10);
topologyBuilder.setBolt(CROP_BOLT_ID, new CropBolt(), 10).shuffleGrouping(KAFKA_SPOUT_ID);
topologyBuilder.setBolt(SPLIT_FIELDS_BOLT_ID, new SplitFieldsBolt(), 10).shuffleGrouping(CROP_BOLT_ID);
topologyBuilder.setBolt(STORM_HDFS_BOLT_ID, hdfsBolt, 4).fieldsGrouping(SPLIT_FIELDS_BOLT_ID, new Fields("timestamp", "fieldvalues"));
if (args != null && args.length > 0) {
config.setDebug(false);
config.setNumWorkers(3);
try {
StormSubmitter.submitTopology(args[0], config, topologyBuilder.createTopology());
} catch (InvalidTopologyException | AlreadyAliveException | AuthorizationException e) {
e.printStackTrace();
}
}
}
示例2: fail
import backtype.storm.generated.AuthorizationException; //导入依赖的package包/类
@Override
public void fail(Object msgId) {
DRPCMessageId did = (DRPCMessageId) msgId;
DistributedRPCInvocations.Iface client;
if (_local_drpc_id == null) {
client = _clients.get(did.index);
} else {
client = (DistributedRPCInvocations.Iface) ServiceRegistry.getService(_local_drpc_id);
}
try {
client.failRequest(did.id);
} catch (AuthorizationException aze) {
LOG.error("Not authorized to failREquest from DRPC server", aze);
} catch (TException e) {
LOG.error("Failed to fail request", e);
}
}
示例3: buildAndSubmit
import backtype.storm.generated.AuthorizationException; //导入依赖的package包/类
private void buildAndSubmit() throws AlreadyAliveException, InvalidTopologyException, AuthorizationException {
final int numWorkers = Integer.valueOf(topologyConfig.getProperty("num.workers"));
Config config = new Config();
config.setDebug(DEBUG);
config.setNumWorkers(numWorkers);
config.setMaxSpoutPending(1000000);
// https://github.com/apache/storm/tree/v0.10.0/external/storm-kafka
config.setMessageTimeoutSecs(600); // This value(30 secs by default) must
// be larger than retryDelayMaxMs
// (60 secs by default) in
/// KafkaSpout.
TopologyBuilder builder = new TopologyBuilder();
configureKafkaSpout(builder, config);
configureESBolts(builder, config);
// LocalCluster cluster = new LocalCluster();
StormSubmitter.submitTopology("LogAnalyzerV1", config, builder.createTopology());
}
示例4: buildAndSubmit
import backtype.storm.generated.AuthorizationException; //导入依赖的package包/类
private void buildAndSubmit() throws AlreadyAliveException, InvalidTopologyException, AuthorizationException {
final int numWorkers = Integer.valueOf(topologyConfig.getProperty("num.workers"));
Config config = new Config();
config.setDebug(DEBUG);
config.setNumWorkers(numWorkers);
config.setMaxSpoutPending(1000000);
// https://github.com/apache/storm/tree/v0.10.0/external/storm-kafka
config.setMessageTimeoutSecs(600); // This value(30 secs by default) must
// be larger than retryDelayMaxMs
// (60 secs by default) in
// KafkaSpout.
TopologyBuilder builder = new TopologyBuilder();
configureKafkaSpout(builder, config);
configureESBolts(builder, config);
// configureHBaseBolts(builder, config);
// conf.put(Config.NIMBUS_HOST, "hdp01.localdomain");
// System.setProperty("storm.jar", "/root/workspace//LearnStorm/target/LearnStorm-0.0.1-SNAPSHOT.jar");
// System.setProperty("hadoop.home.dir", "/tmp");
// LocalCluster cluster = new LocalCluster();
StormSubmitter.submitTopology("ApLogAnalyzerV1", config, builder.createTopology());
}
示例5: assertStoreHasExactly
import backtype.storm.generated.AuthorizationException; //导入依赖的package包/类
public static void assertStoreHasExactly(BlobStore store, Subject who, String ... keys)
throws IOException, KeyNotFoundException, AuthorizationException {
Set<String> expected = new HashSet<String>(Arrays.asList(keys));
Set<String> found = new HashSet<String>();
Iterator<String> c = store.listKeys();
while (c.hasNext()) {
String keyName = c.next();
found.add(keyName);
}
Set<String> extra = new HashSet<String>(found);
extra.removeAll(expected);
assertTrue("Found extra keys in the blob store "+extra, extra.isEmpty());
Set<String> missing = new HashSet<String>(expected);
missing.removeAll(found);
assertTrue("Found keys missing from the blob store "+missing, missing.isEmpty());
}
示例6: complete
import backtype.storm.generated.AuthorizationException; //导入依赖的package包/类
@Override
public void complete(ReturnResultsState state, TridentCollector collector) {
// only one of the multireducers will receive the tuples
if (state.returnInfo != null) {
String result = JSONValue.toJSONString(state.results);
Map retMap = (Map) JSONValue.parse(state.returnInfo);
final String host = (String) retMap.get("host");
final int port = Utils.getInt(retMap.get("port"));
String id = (String) retMap.get("id");
DistributedRPCInvocations.Iface client;
if (local) {
client = (DistributedRPCInvocations.Iface) ServiceRegistry.getService(host);
} else {
List server = new ArrayList() {
{
add(host);
add(port);
}
};
if (!_clients.containsKey(server)) {
try {
_clients.put(server, new DRPCInvocationsClient(conf, host, port));
} catch (TTransportException ex) {
throw new RuntimeException(ex);
}
}
client = _clients.get(server);
}
try {
client.result(id, result);
} catch (AuthorizationException aze) {
collector.reportError(aze);
} catch (TException e) {
collector.reportError(e);
}
}
}
示例7: fetchRequest
import backtype.storm.generated.AuthorizationException; //导入依赖的package包/类
public DRPCRequest fetchRequest(String func) throws TException, AuthorizationException {
DistributedRPCInvocations.Client c = client.get();
try {
if (c == null) {
throw new TException("Client is not connected...");
}
return c.fetchRequest(func);
} catch (AuthorizationException aze) {
throw aze;
} catch (TException e) {
client.compareAndSet(c, null);
throw e;
}
}
示例8: failRequest
import backtype.storm.generated.AuthorizationException; //导入依赖的package包/类
public void failRequest(String id) throws TException {
DistributedRPCInvocations.Client c = client.get();
try {
if (c == null) {
throw new TException("Client is not connected...");
}
c.failRequest(id);
} catch (AuthorizationException aze) {
throw aze;
} catch (TException e) {
client.compareAndSet(c, null);
throw e;
}
}
示例9: buildAndSubmit
import backtype.storm.generated.AuthorizationException; //导入依赖的package包/类
private void buildAndSubmit() throws AlreadyAliveException, InvalidTopologyException, AuthorizationException {
Config config = new Config();
config.setDebug(DEBUG);
config.setNumWorkers(1);
TopologyBuilder builder = new TopologyBuilder();
configureRandomLogSpout(builder, config);
configureKafkaBolt(builder, config);
// LocalCluster cluster = new LocalCluster();
StormSubmitter.submitTopology("ApLogGeneratorV1", config, builder.createTopology());
}
示例10: main
import backtype.storm.generated.AuthorizationException; //导入依赖的package包/类
/**
*
* This example is very dangerous to the consistency of your bank accounts. Guess why, or read the
* tutorial.
*
* @throws AlreadyAliveException
* @throws InvalidTopologyException
* @throws AuthorizationException
*/
public static void main(String... args) throws AlreadyAliveException, InvalidTopologyException, AuthorizationException {
// starting to build topology
TopologyBuilder builder = new TopologyBuilder();
// Kafka as a spout
builder.setSpout(IDs.kafkaSpout, new KafkaSpoutBuilder(Conf.zookeeper, Conf.inputTopic).build());
// bolt to println data
builder.setBolt(IDs.printlnBolt, new PrintlnBolt()).shuffleGrouping(IDs.kafkaSpout);
// bolt to perform transactions and simulate bank accounts
builder.setBolt(IDs.userAccountBolt, new BankAccountBolt()).shuffleGrouping(IDs.kafkaSpout);
// Kafka as a bolt -- sending messages to the output topic
KafkaBolt<Object, Object> bolt = new KafkaBolt<>().withTopicSelector(new DefaultTopicSelector(Conf.outputTopic))
.withTupleToKafkaMapper(new TransactionTupleToKafkaMapper());
builder.setBolt(IDs.kafkaBolt, bolt).shuffleGrouping(IDs.userAccountBolt);
// submit topolody to local cluster
new LocalCluster().submitTopology(IDs.kafkaAccountsTopology, topologyConfig(), builder.createTopology());
// wait a while, then simulate random transaction stream to Kafka
Sleep.seconds(5);
KafkaProduceExample.start(2000);
}
示例11: main
import backtype.storm.generated.AuthorizationException; //导入依赖的package包/类
public static void main(String... args) throws AlreadyAliveException, InvalidTopologyException, AuthorizationException {
// starting to build topology
TridentTopology topology = new TridentTopology();
// Kafka as an opaque trident spout
OpaqueTridentKafkaSpout spout = new OpaqueTridentKafkaSpoutBuilder(Conf.zookeeper, Conf.inputTopic).build();
Stream stream = topology.newStream(kafkaSpout, spout);
// mapping transaction messages to pairs: (person,amount)
Stream atomicTransactions = stream.each(strF, Functions.mapToPersonAmount, personAmountF);
// bolt to println data
atomicTransactions.each(personAmountF, Functions.printlnFunction, emptyF);
// aggregating transactions and mapping to Kafka messages
Stream transactionsGroupped = atomicTransactions.groupBy(personF)
.persistentAggregate(new MemoryMapState.Factory(), amountF, new Sum(), sumF).newValuesStream()
.each(personSumF, Functions.mapToKafkaMessage, keyMessageF);
// Kafka as a bolt -- producing to outputTopic
TridentKafkaStateFactory stateFactory = new TridentKafkaStateFactory() //
.withKafkaTopicSelector(new DefaultTopicSelector(Conf.outputTopic)) //
.withTridentTupleToKafkaMapper(new FieldNameBasedTupleToKafkaMapper<String, String>(key, message));
transactionsGroupped.partitionPersist(stateFactory, keyMessageF, new TridentKafkaUpdater(), emptyF);
// submitting topology to local cluster
new LocalCluster().submitTopology(kafkaAccountsTopology, topologyConfig(), topology.build());
// waiting a while, then running Kafka producer
Sleep.seconds(5);
KafkaProduceExample.start(20);
}
示例12: fetchRequest
import backtype.storm.generated.AuthorizationException; //导入依赖的package包/类
public DRPCRequest fetchRequest(String func) throws TException {
DistributedRPCInvocations.Client c = client.get();
try {
if (c == null) {
throw new TException("Client is not connected...");
}
return c.fetchRequest(func);
} catch (AuthorizationException aze) {
throw aze;
} catch (TException e) {
client.compareAndSet(c, null);
throw e;
}
}
示例13: readInt
import backtype.storm.generated.AuthorizationException; //导入依赖的package包/类
public static int readInt(BlobStore store, Subject who, String key)
throws IOException, KeyNotFoundException, AuthorizationException {
InputStream in = store.getBlob(key);
try {
return in.read();
} finally {
in.close();
}
}
示例14: testGetFileLength
import backtype.storm.generated.AuthorizationException; //导入依赖的package包/类
@Test
public void testGetFileLength()
throws AuthorizationException, KeyNotFoundException, KeyAlreadyExistsException, IOException {
LocalFsBlobStore store = initLocalFs();
AtomicOutputStream out = store.createBlob("test", new SettableBlobMeta());
out.write(1);
out.close();
assertEquals(1, store.getBlob("test").getFileLength());
}
示例15: execute
import backtype.storm.generated.AuthorizationException; //导入依赖的package包/类
public String execute(String func, String args) throws TException, DRPCExecutionException, AuthorizationException {
return client.execute(func, args);
}