本文整理汇总了Java中org.apache.flume.Transaction.close方法的典型用法代码示例。如果您正苦于以下问题:Java Transaction.close方法的具体用法?Java Transaction.close怎么用?Java Transaction.close使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.flume.Transaction
的用法示例。
在下文中一共展示了Transaction.close方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testPutForceCheckpointCommitReplay
import org.apache.flume.Transaction; //导入方法依赖的package包/类
@Test
public void testPutForceCheckpointCommitReplay() throws Exception {
Map<String, String> overrides = Maps.newHashMap();
overrides.put(FileChannelConfiguration.CAPACITY, String.valueOf(2));
overrides.put(FileChannelConfiguration.TRANSACTION_CAPACITY,
String.valueOf(2));
overrides.put(FileChannelConfiguration.CHECKPOINT_INTERVAL, "10000");
FileChannel channel = createFileChannel(overrides);
channel.start();
//Force a checkpoint by committing a transaction
Transaction tx = channel.getTransaction();
Set<String> in = putWithoutCommit(channel, tx, "putWithoutCommit", 1);
forceCheckpoint(channel);
tx.commit();
tx.close();
channel.stop();
channel = createFileChannel(overrides);
channel.start();
Assert.assertTrue(channel.isOpen());
Set<String> out = takeEvents(channel, 1);
compareInputAndOut(in, out);
channel.stop();
}
示例2: testRollbackAfterNoPutTake
import org.apache.flume.Transaction; //导入方法依赖的package包/类
@Test
public void testRollbackAfterNoPutTake() throws Exception {
channel.start();
Assert.assertTrue(channel.isOpen());
Transaction transaction;
transaction = channel.getTransaction();
transaction.begin();
transaction.rollback();
transaction.close();
// ensure we can reopen log with no error
channel.stop();
channel = createFileChannel();
channel.start();
Assert.assertTrue(channel.isOpen());
transaction = channel.getTransaction();
transaction.begin();
Assert.assertNull(channel.take());
transaction.commit();
transaction.close();
}
示例3: testCapacityOverload
import org.apache.flume.Transaction; //导入方法依赖的package包/类
@Test(expected = ChannelException.class)
public void testCapacityOverload() {
Context context = new Context();
Map<String, String> parms = new HashMap<String, String>();
parms.put("capacity", "5");
parms.put("transactionCapacity", "3");
context.putAll(parms);
Configurables.configure(channel, context);
Transaction transaction = channel.getTransaction();
transaction.begin();
channel.put(EventBuilder.withBody("test".getBytes()));
channel.put(EventBuilder.withBody("test".getBytes()));
channel.put(EventBuilder.withBody("test".getBytes()));
transaction.commit();
transaction.close();
transaction = channel.getTransaction();
transaction.begin();
channel.put(EventBuilder.withBody("test".getBytes()));
channel.put(EventBuilder.withBody("test".getBytes()));
channel.put(EventBuilder.withBody("test".getBytes()));
// this should kill it
transaction.commit();
Assert.fail();
}
示例4: testRollbackSimulatedCrash
import org.apache.flume.Transaction; //导入方法依赖的package包/类
@Test
public void testRollbackSimulatedCrash() throws Exception {
channel.start();
Assert.assertTrue(channel.isOpen());
int numEvents = 50;
Set<String> in = putEvents(channel, "rollback", 1, numEvents);
Transaction transaction;
// put an item we will rollback
transaction = channel.getTransaction();
transaction.begin();
channel.put(EventBuilder.withBody("rolled back".getBytes(Charsets.UTF_8)));
transaction.rollback();
transaction.close();
// simulate crash
channel.stop();
channel = createFileChannel();
channel.start();
Assert.assertTrue(channel.isOpen());
// we should not get the rolled back item
Set<String> out = takeEvents(channel, 1, numEvents);
compareInputAndOut(in, out);
}
示例5: doTestFixInvalidEvents
import org.apache.flume.Transaction; //导入方法依赖的package包/类
public void doTestFixInvalidEvents(boolean withCheckpoint, String eventHandler) throws Exception {
FileChannelIntegrityTool tool = new FileChannelIntegrityTool();
tool.run(new String[] {"-l", dataDir.toString(), "-e", eventHandler, "-DvalidatorValue=0"});
FileChannel channel = new FileChannel();
channel.setName("channel");
if (withCheckpoint) {
File[] cpFiles = origCheckpointDir.listFiles(new FilenameFilter() {
@Override
public boolean accept(File dir, String name) {
if (name.contains("lock") || name.contains("queueset")) {
return false;
}
return true;
}
});
for (File cpFile : cpFiles) {
Serialization.copyFile(cpFile, new File(checkpointDir, cpFile.getName()));
}
} else {
FileUtils.deleteDirectory(checkpointDir);
Assert.assertTrue(checkpointDir.mkdirs());
}
ctx.put(FileChannelConfiguration.CHECKPOINT_DIR, checkpointDir.toString());
ctx.put(FileChannelConfiguration.DATA_DIRS, dataDir.toString());
channel.configure(ctx);
channel.start();
Transaction tx = channel.getTransaction();
tx.begin();
int i = 0;
while (channel.take() != null) {
i++;
}
tx.commit();
tx.close();
channel.stop();
Assert.assertTrue(invalidEvent != 0);
Assert.assertEquals(25 - invalidEvent, i);
}
示例6: testOneEvent
import org.apache.flume.Transaction; //导入方法依赖的package包/类
@Test
public void testOneEvent() throws Exception {
testUtility.createTable(tableName.getBytes(), columnFamily.getBytes());
deleteTable = true;
AsyncHBaseSink sink = new AsyncHBaseSink(testUtility.getConfiguration());
Configurables.configure(sink, ctx);
Channel channel = new MemoryChannel();
Configurables.configure(channel, ctx);
sink.setChannel(channel);
sink.start();
Transaction tx = channel.getTransaction();
tx.begin();
Event e = EventBuilder.withBody(
Bytes.toBytes(valBase));
channel.put(e);
tx.commit();
tx.close();
Assert.assertFalse(sink.isConfNull());
sink.process();
sink.stop();
HTable table = new HTable(testUtility.getConfiguration(), tableName);
byte[][] results = getResults(table, 1);
byte[] out = results[0];
Assert.assertArrayEquals(e.getBody(), out);
out = results[1];
Assert.assertArrayEquals(Longs.toByteArray(1), out);
}
示例7: commitAndCloseTransaction
import org.apache.flume.Transaction; //导入方法依赖的package包/类
private void commitAndCloseTransaction(Transaction txn) {
try {
txn.commit();
} catch (Throwable t) {
logger.error("Transaction commit failed, rolling back", t);
txn.rollback();
} finally {
txn.close();
}
}
示例8: testSingleWriterUseHeaders
import org.apache.flume.Transaction; //导入方法依赖的package包/类
@Test
public void testSingleWriterUseHeaders()
throws Exception {
String[] colNames = {COL1, COL2};
String PART1_NAME = "country";
String PART2_NAME = "hour";
String[] partNames = {PART1_NAME, PART2_NAME};
List<String> partitionVals = null;
String PART1_VALUE = "%{" + PART1_NAME + "}";
String PART2_VALUE = "%y-%m-%d-%k";
partitionVals = new ArrayList<String>(2);
partitionVals.add(PART1_VALUE);
partitionVals.add(PART2_VALUE);
String tblName = "hourlydata";
TestUtil.dropDB(conf, dbName2);
String dbLocation = dbFolder.newFolder(dbName2).getCanonicalPath() + ".db";
dbLocation = dbLocation.replaceAll("\\\\","/"); // for windows paths
TestUtil.createDbAndTable(driver, dbName2, tblName, partitionVals, colNames,
colTypes, partNames, dbLocation);
int totalRecords = 4;
int batchSize = 2;
int batchCount = totalRecords / batchSize;
Context context = new Context();
context.put("hive.metastore",metaStoreURI);
context.put("hive.database",dbName2);
context.put("hive.table",tblName);
context.put("hive.partition", PART1_VALUE + "," + PART2_VALUE);
context.put("autoCreatePartitions","true");
context.put("useLocalTimeStamp", "false");
context.put("batchSize","" + batchSize);
context.put("serializer", HiveDelimitedTextSerializer.ALIAS);
context.put("serializer.fieldnames", COL1 + ",," + COL2 + ",");
context.put("heartBeatInterval", "0");
Channel channel = startSink(sink, context);
Calendar eventDate = Calendar.getInstance();
List<String> bodies = Lists.newArrayList();
// push events in two batches - two per batch. each batch is diff hour
Transaction txn = channel.getTransaction();
txn.begin();
for (int j = 1; j <= totalRecords; j++) {
Event event = new SimpleEvent();
String body = j + ",blah,This is a log message,other stuff";
event.setBody(body.getBytes());
eventDate.clear();
eventDate.set(2014, 03, 03, j % batchCount, 1); // yy mm dd hh mm
event.getHeaders().put( "timestamp",
String.valueOf(eventDate.getTimeInMillis()) );
event.getHeaders().put( PART1_NAME, "Asia" );
bodies.add(body);
channel.put(event);
}
// execute sink to process the events
txn.commit();
txn.close();
checkRecordCountInTable(0, dbName2, tblName);
for (int i = 0; i < batchCount ; i++) {
sink.process();
}
checkRecordCountInTable(totalRecords, dbName2, tblName);
sink.stop();
// verify counters
SinkCounter counter = sink.getCounter();
Assert.assertEquals(2, counter.getConnectionCreatedCount());
Assert.assertEquals(2, counter.getConnectionClosedCount());
Assert.assertEquals(2, counter.getBatchCompleteCount());
Assert.assertEquals(0, counter.getBatchEmptyCount());
Assert.assertEquals(0, counter.getConnectionFailedCount() );
Assert.assertEquals(4, counter.getEventDrainAttemptCount());
Assert.assertEquals(4, counter.getEventDrainSuccessCount() );
}
示例9: testRollback7
import org.apache.flume.Transaction; //导入方法依赖的package包/类
@Test
public void testRollback7() throws Exception {
final Transaction transaction = channel.getTransaction();
transaction.begin();
testExceptions(new Runnable() {
@Override
public void run() {
transaction.commit();
}
});
transaction.rollback();
testIllegalState(new Runnable() {
@Override
public void run() {
transaction.rollback();
}
});
transaction.close();
testIllegalState(new Runnable() {
@Override
public void run() {
transaction.rollback();
}
});
}
示例10: testIRCSinkMissingSplitLineProperty
import org.apache.flume.Transaction; //导入方法依赖的package包/类
@Test
public void testIRCSinkMissingSplitLineProperty() {
Sink ircSink = new IRCSink();
ircSink.setName("IRC Sink - " + UUID.randomUUID().toString());
Context context = new Context();
context.put("hostname", "localhost");
context.put("port", String.valueOf(ircServerPort));
context.put("nick", "flume");
context.put("password", "flume");
context.put("user", "flume");
context.put("name", "flume-dev");
context.put("chan", "flume");
context.put("splitchars", "false");
Configurables.configure(ircSink, context);
Channel memoryChannel = new MemoryChannel();
Configurables.configure(memoryChannel, context);
ircSink.setChannel(memoryChannel);
ircSink.start();
Transaction txn = memoryChannel.getTransaction();
txn.begin();
Event event = EventBuilder.withBody("Dummy Event".getBytes());
memoryChannel.put(event);
txn.commit();
txn.close();
try {
Sink.Status status = ircSink.process();
if (status == Sink.Status.BACKOFF) {
fail("Error occured");
}
} catch (EventDeliveryException eDelExcp) {
// noop
}
}
示例11: testOneEventWithDefaults
import org.apache.flume.Transaction; //导入方法依赖的package包/类
@Test
public void testOneEventWithDefaults() throws Exception {
Map<String,String> ctxMap = new HashMap<String,String>();
ctxMap.put("table", tableName);
ctxMap.put("columnFamily", columnFamily);
ctxMap.put("serializer",
"org.apache.flume.sink.hbase.SimpleAsyncHbaseEventSerializer");
ctxMap.put("keep-alive", "0");
ctxMap.put("timeout", "10000");
Context tmpctx = new Context();
tmpctx.putAll(ctxMap);
testUtility.createTable(tableName.getBytes(), columnFamily.getBytes());
deleteTable = true;
AsyncHBaseSink sink = new AsyncHBaseSink(testUtility.getConfiguration());
Configurables.configure(sink, tmpctx);
Channel channel = new MemoryChannel();
Configurables.configure(channel, tmpctx);
sink.setChannel(channel);
sink.start();
Transaction tx = channel.getTransaction();
tx.begin();
Event e = EventBuilder.withBody(
Bytes.toBytes(valBase));
channel.put(e);
tx.commit();
tx.close();
Assert.assertFalse(sink.isConfNull());
sink.process();
sink.stop();
HTable table = new HTable(testUtility.getConfiguration(), tableName);
byte[][] results = getResults(table, 1);
byte[] out = results[0];
Assert.assertArrayEquals(e.getBody(), out);
out = results[1];
Assert.assertArrayEquals(Longs.toByteArray(1), out);
}
示例12: createDataFiles
import org.apache.flume.Transaction; //导入方法依赖的package包/类
private static void createDataFiles() throws Exception {
final byte[] eventData = new byte[2000];
for (int i = 0; i < 2000; i++) {
eventData[i] = 1;
}
WriteOrderOracle.setSeed(System.currentTimeMillis());
event = EventBuilder.withBody(eventData);
baseDir = Files.createTempDir();
if (baseDir.exists()) {
FileUtils.deleteDirectory(baseDir);
}
baseDir = Files.createTempDir();
origCheckpointDir = new File(baseDir, "chkpt");
Assert.assertTrue(origCheckpointDir.mkdirs() || origCheckpointDir.isDirectory());
origDataDir = new File(baseDir, "data");
Assert.assertTrue(origDataDir.mkdirs() || origDataDir.isDirectory());
FileChannel channel = new FileChannel();
channel.setName("channel");
ctx = new Context();
ctx.put(FileChannelConfiguration.CAPACITY, "1000");
ctx.put(FileChannelConfiguration.CHECKPOINT_DIR, origCheckpointDir.toString());
ctx.put(FileChannelConfiguration.DATA_DIRS, origDataDir.toString());
ctx.put(FileChannelConfiguration.MAX_FILE_SIZE, "10000");
ctx.put(FileChannelConfiguration.TRANSACTION_CAPACITY, "100");
channel.configure(ctx);
channel.start();
for (int j = 0; j < 5; j++) {
Transaction tx = channel.getTransaction();
tx.begin();
for (int i = 0; i < 5; i++) {
if (i % 3 == 0) {
event.getBody()[0] = 0;
invalidEvent++;
} else {
event.getBody()[0] = 1;
}
channel.put(event);
}
tx.commit();
tx.close();
}
Log log = field("log").ofType(Log.class)
.in(channel)
.get();
Assert.assertTrue("writeCheckpoint returned false",
method("writeCheckpoint").withReturnType(Boolean.class)
.withParameterTypes(Boolean.class)
.in(log)
.invoke(true));
channel.stop();
}
示例13: process
import org.apache.flume.Transaction; //导入方法依赖的package包/类
@Override
public Status process() throws EventDeliveryException {
Status status = Status.READY;
Channel channel = getChannel();
Transaction transaction = channel.getTransaction();
if (resetConnectionFlag.get()) {
resetConnection();
// if the time to reset is long and the timeout is short
// this may cancel the next reset request
// this should however not be an issue
resetConnectionFlag.set(false);
}
try {
transaction.begin();
verifyConnection();
List<Event> batch = Lists.newLinkedList();
for (int i = 0; i < client.getBatchSize(); i++) {
Event event = channel.take();
if (event == null) {
break;
}
batch.add(event);
}
int size = batch.size();
int batchSize = client.getBatchSize();
if (size == 0) {
sinkCounter.incrementBatchEmptyCount();
status = Status.BACKOFF;
} else {
if (size < batchSize) {
sinkCounter.incrementBatchUnderflowCount();
} else {
sinkCounter.incrementBatchCompleteCount();
}
sinkCounter.addToEventDrainAttemptCount(size);
client.appendBatch(batch);
}
transaction.commit();
sinkCounter.addToEventDrainSuccessCount(size);
} catch (Throwable t) {
transaction.rollback();
if (t instanceof Error) {
throw (Error) t;
} else if (t instanceof ChannelException) {
logger.error("Rpc Sink " + getName() + ": Unable to get event from" +
" channel " + channel.getName() + ". Exception follows.", t);
status = Status.BACKOFF;
} else {
destroyConnection();
throw new EventDeliveryException("Failed to send events", t);
}
} finally {
transaction.close();
}
return status;
}
示例14: testSingleWriterSimpleUnPartitionedTable
import org.apache.flume.Transaction; //导入方法依赖的package包/类
@Test
public void testSingleWriterSimpleUnPartitionedTable()
throws Exception {
TestUtil.dropDB(conf, dbName2);
String dbLocation = dbFolder.newFolder(dbName2).getCanonicalPath() + ".db";
dbLocation = dbLocation.replaceAll("\\\\","/"); // for windows paths
TestUtil.createDbAndTable(driver, dbName2, tblName2, null, colNames2, colTypes2,
null, dbLocation);
try {
int totalRecords = 4;
int batchSize = 2;
int batchCount = totalRecords / batchSize;
Context context = new Context();
context.put("hive.metastore", metaStoreURI);
context.put("hive.database", dbName2);
context.put("hive.table", tblName2);
context.put("autoCreatePartitions","false");
context.put("batchSize","" + batchSize);
context.put("serializer", HiveDelimitedTextSerializer.ALIAS);
context.put("serializer.fieldnames", COL1 + ",," + COL2 + ",");
context.put("heartBeatInterval", "0");
Channel channel = startSink(sink, context);
List<String> bodies = Lists.newArrayList();
// Push the events in two batches
Transaction txn = channel.getTransaction();
txn.begin();
for (int j = 1; j <= totalRecords; j++) {
Event event = new SimpleEvent();
String body = j + ",blah,This is a log message,other stuff";
event.setBody(body.getBytes());
bodies.add(body);
channel.put(event);
}
txn.commit();
txn.close();
checkRecordCountInTable(0, dbName2, tblName2);
for (int i = 0; i < batchCount ; i++) {
sink.process();
}
// check before & after stopping sink
checkRecordCountInTable(totalRecords, dbName2, tblName2);
sink.stop();
checkRecordCountInTable(totalRecords, dbName2, tblName2);
} finally {
TestUtil.dropDB(conf, dbName2);
}
}
示例15: testSingleWriterSimplePartitionedTable
import org.apache.flume.Transaction; //导入方法依赖的package包/类
@Test
public void testSingleWriterSimplePartitionedTable()
throws EventDeliveryException, IOException, CommandNeedRetryException {
int totalRecords = 4;
int batchSize = 2;
int batchCount = totalRecords / batchSize;
Context context = new Context();
context.put("hive.metastore", metaStoreURI);
context.put("hive.database",dbName);
context.put("hive.table",tblName);
context.put("hive.partition", PART1_VALUE + "," + PART2_VALUE);
context.put("autoCreatePartitions","false");
context.put("batchSize","" + batchSize);
context.put("serializer", HiveDelimitedTextSerializer.ALIAS);
context.put("serializer.fieldnames", COL1 + ",," + COL2 + ",");
context.put("heartBeatInterval", "0");
Channel channel = startSink(sink, context);
List<String> bodies = Lists.newArrayList();
// push the events in two batches
Transaction txn = channel.getTransaction();
txn.begin();
for (int j = 1; j <= totalRecords; j++) {
Event event = new SimpleEvent();
String body = j + ",blah,This is a log message,other stuff";
event.setBody(body.getBytes());
bodies.add(body);
channel.put(event);
}
// execute sink to process the events
txn.commit();
txn.close();
checkRecordCountInTable(0, dbName, tblName);
for (int i = 0; i < batchCount ; i++) {
sink.process();
}
sink.stop();
checkRecordCountInTable(totalRecords, dbName, tblName);
}