本文整理汇总了Java中org.apache.flume.Context类的典型用法代码示例。如果您正苦于以下问题:Java Context类的具体用法?Java Context怎么用?Java Context使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
Context类属于org.apache.flume包,在下文中一共展示了Context类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testSlowness
import org.apache.flume.Context; //导入依赖的package包/类
@Test(expected = EventDeliveryException.class)
public void testSlowness() throws Throwable {
ch = new SlowMemoryChannel(2000);
Configurables.configure(ch, new Context());
configureSource();
props.put("log4j.appender.out2.Timeout", "1000");
props.put("log4j.appender.out2.layout", "org.apache.log4j.PatternLayout");
props.put("log4j.appender.out2.layout.ConversionPattern",
"%-5p [%t]: %m%n");
PropertyConfigurator.configure(props);
Logger logger = LogManager.getLogger(TestLog4jAppender.class);
Thread.currentThread().setName("Log4jAppenderTest");
int level = 10000;
String msg = "This is log message number" + String.valueOf(1);
try {
logger.log(Level.toLevel(level), msg);
} catch (FlumeException ex) {
throw ex.getCause();
}
}
示例2: configure
import org.apache.flume.Context; //导入依赖的package包/类
@Override
public void configure(Context context) throws ConfigurationException {
super.configure(context);
sinks = Arrays.asList(context.getString(
BasicConfigurationConstants.CONFIG_SINKS).split("\\s+"));
Map<String, String> params = context.getSubProperties(
BasicConfigurationConstants.CONFIG_SINK_PROCESSOR_PREFIX);
processorContext = new Context();
processorContext.putAll(params);
SinkProcessorType spType = getKnownSinkProcessor(processorContext.getString(
BasicConfigurationConstants.CONFIG_TYPE));
if (spType != null) {
processorConf =
(SinkProcessorConfiguration) ComponentConfigurationFactory.create(
this.getComponentName() + "-processor",
spType.toString(),
ComponentType.SINK_PROCESSOR);
if (processorConf != null) {
processorConf.setSinks(new HashSet<String>(sinks));
processorConf.configure(processorContext);
}
}
setConfigured();
}
示例3: testPreserve
import org.apache.flume.Context; //导入依赖的package包/类
/**
* Ensure host is NOT overwritten when preserveExisting=true.
*/
@Test
public void testPreserve() throws Exception {
Context ctx = new Context();
ctx.put("preserveExisting", "true");
Interceptor.Builder builder = InterceptorBuilderFactory.newInstance(
InterceptorType.HOST.toString());
builder.configure(ctx);
Interceptor interceptor = builder.build();
final String ORIGINAL_HOST = "originalhost";
Event eventBeforeIntercept = EventBuilder.withBody("test event",
Charsets.UTF_8);
eventBeforeIntercept.getHeaders().put(Constants.HOST, ORIGINAL_HOST);
Assert.assertEquals(ORIGINAL_HOST,
eventBeforeIntercept.getHeaders().get(Constants.HOST));
String expectedHost = ORIGINAL_HOST;
Event eventAfterIntercept = interceptor.intercept(eventBeforeIntercept);
String actualHost = eventAfterIntercept.getHeaders().get(Constants.HOST);
Assert.assertNotNull(actualHost);
Assert.assertEquals(expectedHost, actualHost);
}
示例4: setUp
import org.apache.flume.Context; //导入依赖的package包/类
@Before
public void setUp() {
logger.info("Running setup");
channel = new MemoryChannel();
source = new NetcatSource();
Context context = new Context();
Configurables.configure(channel, context);
List<Channel> channels = Lists.newArrayList(channel);
ChannelSelector rcs = new ReplicatingChannelSelector();
rcs.setChannels(channels);
source.setChannelProcessor(new ChannelProcessor(rcs));
}
示例5: testEmptyChannel
import org.apache.flume.Context; //导入依赖的package包/类
@Test
public void testEmptyChannel() throws UnsupportedEncodingException, EventDeliveryException {
Sink kafkaSink = new KafkaSink();
Context context = prepareDefaultContext();
Configurables.configure(kafkaSink, context);
Channel memoryChannel = new MemoryChannel();
Configurables.configure(memoryChannel, context);
kafkaSink.setChannel(memoryChannel);
kafkaSink.start();
Sink.Status status = kafkaSink.process();
if (status != Sink.Status.BACKOFF) {
fail("Error Occurred");
}
assertNull(testUtil.getNextMessageFromConsumer(DEFAULT_TOPIC));
}
示例6: setUp
import org.apache.flume.Context; //导入依赖的package包/类
public void setUp(String compressionType, int compressionLevel) {
if (sink != null) {
throw new RuntimeException("double setup");
}
sink = new AvroSink();
channel = new MemoryChannel();
Context context = new Context();
context.put("hostname", hostname);
context.put("port", String.valueOf(port));
context.put("batch-size", String.valueOf(2));
context.put("connect-timeout", String.valueOf(2000L));
context.put("request-timeout", String.valueOf(3000L));
if (compressionType.equals("deflate")) {
context.put("compression-type", compressionType);
context.put("compression-level", Integer.toString(compressionLevel));
}
sink.setChannel(channel);
Configurables.configure(sink, context);
Configurables.configure(channel, context);
}
示例7: testPutFilenameHeader
import org.apache.flume.Context; //导入依赖的package包/类
@Test
public void testPutFilenameHeader() throws IOException {
File f1 = new File(tmpDir, "file1");
Files.write("f1\n", f1, Charsets.UTF_8);
Context context = new Context();
context.put(POSITION_FILE, posFilePath);
context.put(FILE_GROUPS, "fg");
context.put(FILE_GROUPS_PREFIX + "fg", tmpDir.getAbsolutePath() + "/file.*");
context.put(FILENAME_HEADER, "true");
context.put(FILENAME_HEADER_KEY, "path");
Configurables.configure(source, context);
source.start();
source.process();
Transaction txn = channel.getTransaction();
txn.begin();
Event e = channel.take();
txn.commit();
txn.close();
assertNotNull(e.getHeaders().get("path"));
assertEquals(f1.getAbsolutePath(),
e.getHeaders().get("path"));
}
示例8: configure
import org.apache.flume.Context; //导入依赖的package包/类
@Override
public void configure(Context context) {
configuredMinReplicas = context.getInteger("hdfs.minBlockReplicas");
if (configuredMinReplicas != null) {
Preconditions.checkArgument(configuredMinReplicas >= 0,
"hdfs.minBlockReplicas must be greater than or equal to 0");
}
numberOfCloseRetries = context.getInteger("hdfs.closeTries", 1) - 1;
if (numberOfCloseRetries > 1) {
try {
timeBetweenCloseRetries = context.getLong("hdfs.callTimeout", 10000l);
} catch (NumberFormatException e) {
logger.warn("hdfs.callTimeout can not be parsed to a long: " + context.getLong("hdfs.callTimeout"));
}
timeBetweenCloseRetries = Math.max(timeBetweenCloseRetries/numberOfCloseRetries, 1000);
}
}
示例9: init
import org.apache.flume.Context; //导入依赖的package包/类
private void init(String keepFields) {
source = new SyslogUDPSource();
channel = new MemoryChannel();
Configurables.configure(channel, new Context());
List<Channel> channels = new ArrayList<Channel>();
channels.add(channel);
ChannelSelector rcs = new ReplicatingChannelSelector();
rcs.setChannels(channels);
source.setChannelProcessor(new ChannelProcessor(rcs));
Context context = new Context();
context.put("host", InetAddress.getLoopbackAddress().getHostAddress());
context.put("port", String.valueOf(TEST_SYSLOG_PORT));
context.put("keepFields", keepFields);
source.configure(context);
}
示例10: testKafkaProperties
import org.apache.flume.Context; //导入依赖的package包/类
@Test
public void testKafkaProperties() {
Context context = new Context();
context.put(TOPICS, "test1, test2");
context.put(KAFKA_CONSUMER_PREFIX + ConsumerConfig.GROUP_ID_CONFIG,
"override.default.group.id");
context.put(KAFKA_CONSUMER_PREFIX + "fake.property", "kafka.property.value");
context.put(BOOTSTRAP_SERVERS, "real-bootstrap-servers-list");
context.put(KAFKA_CONSUMER_PREFIX + "bootstrap.servers", "bad-bootstrap-servers-list");
KafkaSource source = new KafkaSource();
source.doConfigure(context);
Properties kafkaProps = source.getConsumerProps();
//check that we have defaults set
assertEquals(String.valueOf(DEFAULT_AUTO_COMMIT),
kafkaProps.getProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG));
//check that kafka properties override the default and get correct name
assertEquals("override.default.group.id",
kafkaProps.getProperty(ConsumerConfig.GROUP_ID_CONFIG));
//check that any kafka property gets in
assertEquals("kafka.property.value",
kafkaProps.getProperty("fake.property"));
//check that documented property overrides defaults
assertEquals("real-bootstrap-servers-list",
kafkaProps.getProperty("bootstrap.servers"));
}
示例11: testIfDetectMimeTypeRouteToSouthPole
import org.apache.flume.Context; //导入依赖的package包/类
@Test
/** morphline says route to southpole if it's an avro file, otherwise route to northpole */
public void testIfDetectMimeTypeRouteToSouthPole() throws Exception {
Context context = new Context();
context.put(MorphlineHandlerImpl.MORPHLINE_FILE_PARAM,
RESOURCES_DIR + "/test-morphlines/ifDetectMimeType.conf");
context.put(MorphlineHandlerImpl.MORPHLINE_VARIABLE_PARAM + ".MY.MIME_TYPE", "avro/binary");
Event input = EventBuilder.withBody(Files.toByteArray(
new File(RESOURCES_DIR + "/test-documents/sample-statuses-20120906-141433.avro")));
Event actual = build(context).intercept(input);
Map<String, String> expected = new HashMap();
expected.put(Fields.ATTACHMENT_MIME_TYPE, "avro/binary");
expected.put("flume.selector.header", "goToSouthPole");
Event expectedEvent = EventBuilder.withBody(input.getBody(), expected);
assertEqualsEvent(expectedEvent, actual);
}
示例12: doTestForbiddenMethods
import org.apache.flume.Context; //导入依赖的package包/类
public void doTestForbiddenMethods(int port, String method) throws Exception {
MonitorService srv = new HTTPMetricsServer();
Context context = new Context();
if (port > 1024) {
context.put(HTTPMetricsServer.CONFIG_PORT, String.valueOf(port));
} else {
port = HTTPMetricsServer.DEFAULT_PORT;
}
srv.configure(context);
srv.start();
Thread.sleep(1000);
URL url = new URL("http://0.0.0.0:" + String.valueOf(port) + "/metrics");
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod(method);
Assert.assertEquals(HttpServletResponse.SC_FORBIDDEN, conn.getResponseCode());
srv.stop();
}
示例13: createAvroFile
import org.apache.flume.Context; //导入依赖的package包/类
public void createAvroFile(File file, String codec) throws FileNotFoundException, IOException {
if (file.exists()) {
FileUtils.forceDelete(file);
}
// serialize a few events using the reflection-based avro serializer
OutputStream out = new FileOutputStream(file);
Context ctx = new Context();
if (codec != null) {
ctx.put("compressionCodec", codec);
}
EventSerializer.Builder builder =
new FlumeEventAvroEventSerializer.Builder();
EventSerializer serializer = builder.build(ctx, out);
serializer.afterCreate();
serializer.write(EventBuilder.withBody("yo man!", Charsets.UTF_8));
serializer.write(EventBuilder.withBody("2nd event!", Charsets.UTF_8));
serializer.write(EventBuilder.withBody("last one!", Charsets.UTF_8));
serializer.flush();
serializer.beforeClose();
out.flush();
out.close();
}
示例14: setUp
import org.apache.flume.Context; //导入依赖的package包/类
@Before
public void setUp() {
source = spy(new AbstractPollableSource() {
@Override
protected Status doProcess() throws EventDeliveryException {
return Status.BACKOFF;
}
@Override
protected void doConfigure(Context context) throws FlumeException {
throw new FlumeException("dummy");
}
@Override
protected void doStart() throws FlumeException {
}
@Override
protected void doStop() throws FlumeException {
}
});
}
示例15: setUp
import org.apache.flume.Context; //导入依赖的package包/类
/**
* We set up the the Netcat source and Flume Memory Channel on localhost
*
* @throws UnknownHostException
*/
@Before
public void setUp() throws UnknownHostException {
localhost = InetAddress.getByName("127.0.0.1");
source = new NetcatSource();
channel = new MemoryChannel();
Configurables.configure(channel, new Context());
List<Channel> channels = new ArrayList<Channel>();
channels.add(channel);
ChannelSelector rcs = new ReplicatingChannelSelector();
rcs.setChannels(channels);
source.setChannelProcessor(new ChannelProcessor(rcs));
}