本文整理汇总了Java中org.apache.accumulo.core.client.BatchWriterConfig类的典型用法代码示例。如果您正苦于以下问题:Java BatchWriterConfig类的具体用法?Java BatchWriterConfig怎么用?Java BatchWriterConfig使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
BatchWriterConfig类属于org.apache.accumulo.core.client包,在下文中一共展示了BatchWriterConfig类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: SignedBatchWriter
import org.apache.accumulo.core.client.BatchWriterConfig; //导入依赖的package包/类
/**
* Create an signed batch tableWriter.
*
* @param connector
* The connector for the Accumulo instance.
* @param tableName
* Name of the table to write to.
* @param batchConfig
* Configuration for a {@link BatchWriter}.
* @param signatureConfig
* Configuration for the signatures.
* @param keys
* Container with the keys to use for signatures.
*/
public SignedBatchWriter(Connector connector, String tableName, BatchWriterConfig batchConfig, SignatureConfig signatureConfig, SignatureKeyContainer keys)
throws TableNotFoundException {
checkArgument(connector != null, "connector is null");
checkArgument(tableName != null, "tableName is null");
checkArgument(signatureConfig != null, "signatureConfig is null");
checkArgument(keys != null, "keys is null");
this.tableWriter = connector.createBatchWriter(tableName, batchConfig);
this.signer = new EntrySigner(signatureConfig, keys);
this.signatureConfig = signatureConfig;
if (signatureConfig.destination == SignatureConfig.Destination.SEPARATE_TABLE) {
this.signatureTableWriter = connector.createBatchWriter(signatureConfig.destinationTable, batchConfig);
} else {
this.signatureTableWriter = null;
}
}
示例2: createEntries
import org.apache.accumulo.core.client.BatchWriterConfig; //导入依赖的package包/类
private void createEntries(Opts opts) throws TableNotFoundException, AccumuloException, AccumuloSecurityException {
// Trace the write operation. Note, unless you flush the BatchWriter, you will not capture
// the write operation as it is occurs asynchronously. You can optionally create additional Spans
// within a given Trace as seen below around the flush
TraceScope scope = Trace.startSpan("Client Write", Sampler.ALWAYS);
System.out.println("TraceID: " + Long.toHexString(scope.getSpan().getTraceId()));
BatchWriter batchWriter = opts.getConnector().createBatchWriter(opts.getTableName(), new BatchWriterConfig());
Mutation m = new Mutation("row");
m.put("cf", "cq", "value");
batchWriter.addMutation(m);
// You can add timeline annotations to Spans which will be able to be viewed in the Monitor
scope.getSpan().addTimelineAnnotation("Initiating Flush");
batchWriter.flush();
batchWriter.close();
scope.close();
}
示例3: test
import org.apache.accumulo.core.client.BatchWriterConfig; //导入依赖的package包/类
@Test
public void test() throws Exception {
conn.tableOperations().create(tableName);
BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
for (Entry<Key,Value> e : data) {
Key k = e.getKey();
Mutation m = new Mutation(k.getRow());
m.put(k.getColumnFamily(), k.getColumnQualifier(), new ColumnVisibility(k.getColumnVisibility()), k.getTimestamp(), e.getValue());
bw.addMutation(m);
}
bw.close();
assertEquals(0, CIFTester.main(tableName, CIFTester.TestMapper.class.getName()));
assertEquals(1, assertionErrors.get(tableName).size());
}
示例4: testErrorOnNextWithoutClose
import org.apache.accumulo.core.client.BatchWriterConfig; //导入依赖的package包/类
@Test
public void testErrorOnNextWithoutClose() throws Exception {
conn.tableOperations().create(tableName);
BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
for (Entry<Key,Value> e : data) {
Key k = e.getKey();
Mutation m = new Mutation(k.getRow());
m.put(k.getColumnFamily(), k.getColumnQualifier(), new ColumnVisibility(k.getColumnVisibility()), k.getTimestamp(), e.getValue());
bw.addMutation(m);
}
bw.close();
assertEquals(1, CIFTester.main(tableName, CIFTester.TestNoClose.class.getName()));
assertEquals(1, assertionErrors.get(tableName).size());
// this should actually exist, in addition to the dummy entry
assertEquals(2, assertionErrors.get(tableName + "_map_ioexception").size());
}
示例5: setupInstance
import org.apache.accumulo.core.client.BatchWriterConfig; //导入依赖的package包/类
@Before
public void setupInstance() throws Exception {
tableName = getUniqueNames(1)[0];
conn = getConnector();
conn.tableOperations().create(tableName);
BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
ColumnVisibility cv = new ColumnVisibility();
// / has 1 dir
// /local has 2 dirs 1 file
// /local/user1 has 2 files
bw.addMutation(Ingest.buildMutation(cv, "/local", true, false, true, 272, 12345, null));
bw.addMutation(Ingest.buildMutation(cv, "/local/user1", true, false, true, 272, 12345, null));
bw.addMutation(Ingest.buildMutation(cv, "/local/user2", true, false, true, 272, 12345, null));
bw.addMutation(Ingest.buildMutation(cv, "/local/file", false, false, false, 1024, 12345, null));
bw.addMutation(Ingest.buildMutation(cv, "/local/file", false, false, false, 1024, 23456, null));
bw.addMutation(Ingest.buildMutation(cv, "/local/user1/file1", false, false, false, 2024, 12345, null));
bw.addMutation(Ingest.buildMutation(cv, "/local/user1/file2", false, false, false, 1028, 23456, null));
bw.close();
}
示例6: ExportTask
import org.apache.accumulo.core.client.BatchWriterConfig; //导入依赖的package包/类
ExportTask(String instanceName, String zookeepers, String user, String password, String table)
throws TableNotFoundException, AccumuloException, AccumuloSecurityException {
ZooKeeperInstance zki = new ZooKeeperInstance(
new ClientConfiguration().withInstance(instanceName).withZkHosts(zookeepers));
// TODO need to close batch writer
Connector conn = zki.getConnector(user, new PasswordToken(password));
try {
bw = conn.createBatchWriter(table, new BatchWriterConfig());
} catch (TableNotFoundException tnfe) {
try {
conn.tableOperations().create(table);
} catch (TableExistsException e) {
// nothing to do
}
bw = conn.createBatchWriter(table, new BatchWriterConfig());
}
}
示例7: testBatchWriterConfigIsSetToValuesWithParameters
import org.apache.accumulo.core.client.BatchWriterConfig; //导入依赖的package包/类
@Test
public void testBatchWriterConfigIsSetToValuesWithParameters() {
int numThreads = 2;
long timeout = 3;
long maxMemory = 5;
long maxLatency = 7;
Map configMap = Maps.newHashMap();
MapUtils.putAll(configMap, new String[]{
AccumuloGraphConfiguration.BATCHWRITER_MAX_LATENCY, "" + maxLatency,
AccumuloGraphConfiguration.BATCHWRITER_MAX_MEMORY, "" + maxMemory,
AccumuloGraphConfiguration.BATCHWRITER_MAX_WRITE_THREADS, "" + numThreads,
AccumuloGraphConfiguration.BATCHWRITER_TIMEOUT, "" + timeout});
AccumuloGraphConfiguration accumuloGraphConfiguration = new AccumuloGraphConfiguration(configMap);
BatchWriterConfig batchWriterConfig = accumuloGraphConfiguration.createBatchWriterConfig();
assertThat(batchWriterConfig.getMaxLatency(TimeUnit.MILLISECONDS), is(maxLatency));
assertThat(batchWriterConfig.getTimeout(TimeUnit.MILLISECONDS), is(timeout));
assertThat(batchWriterConfig.getMaxMemory(), is(maxMemory));
assertThat(batchWriterConfig.getMaxWriteThreads(), is(numThreads));
}
示例8: init
import org.apache.accumulo.core.client.BatchWriterConfig; //导入依赖的package包/类
@Before
public void init() throws AccumuloException, AccumuloSecurityException, TableNotFoundException, TableExistsException {
mock = new MockInstance("accumulo");
PasswordToken pToken = new PasswordToken("pass".getBytes());
conn = mock.getConnector("user", pToken);
config = new BatchWriterConfig();
config.setMaxMemory(1000);
config.setMaxLatency(1000, TimeUnit.SECONDS);
config.setMaxWriteThreads(10);
if (conn.tableOperations().exists("rya_prospects")) {
conn.tableOperations().delete("rya_prospects");
}
if (conn.tableOperations().exists("rya_selectivity")) {
conn.tableOperations().delete("rya_selectivity");
}
arc = new AccumuloRdfConfiguration();
arc.setTableLayoutStrategy(new TablePrefixLayoutStrategy());
arc.setMaxRangesForScanner(300);
}
开发者ID:apache,项目名称:incubator-rya,代码行数:25,代码来源:RdfCloudTripleStoreSelectivityEvaluationStatisticsTest.java
示例9: init
import org.apache.accumulo.core.client.BatchWriterConfig; //导入依赖的package包/类
@Before
public void init() throws AccumuloException, AccumuloSecurityException, TableNotFoundException, TableExistsException {
mock = new MockInstance("accumulo");
PasswordToken pToken = new PasswordToken("pass".getBytes());
conn = mock.getConnector("user", pToken);
config = new BatchWriterConfig();
config.setMaxMemory(1000);
config.setMaxLatency(1000, TimeUnit.SECONDS);
config.setMaxWriteThreads(10);
if (conn.tableOperations().exists("rya_prospects")) {
conn.tableOperations().delete("rya_prospects");
}
if (conn.tableOperations().exists("rya_selectivity")) {
conn.tableOperations().delete("rya_selectivity");
}
arc = new AccumuloRdfConfiguration();
arc.setTableLayoutStrategy(new TablePrefixLayoutStrategy());
arc.setMaxRangesForScanner(300);
res = new ProspectorServiceEvalStatsDAO(conn, arc);
}
示例10: setStoreLocation
import org.apache.accumulo.core.client.BatchWriterConfig; //导入依赖的package包/类
@Override
public void setStoreLocation(final String location, final Job job) throws IOException {
conf = job.getConfiguration();
setLocationFromUri(location, job);
if (!conf.getBoolean(AccumuloOutputFormat.class.getSimpleName() + ".configured", false)) {
try {
AccumuloOutputFormat.setConnectorInfo(job, user, new PasswordToken(userP.getBytes(StandardCharsets.UTF_8)));
} catch (final AccumuloSecurityException e) {
throw new RuntimeException(e);
}
AccumuloOutputFormat.setDefaultTableName(job, table);
AccumuloOutputFormat.setZooKeeperInstance(job, inst, zookeepers);
final BatchWriterConfig config = new BatchWriterConfig();
config.setMaxLatency(10, TimeUnit.SECONDS);
config.setMaxMemory(10 * 1000 * 1000);
config.setMaxWriteThreads(10);
AccumuloOutputFormat.setBatchWriterOptions(job, config);
}
}
示例11: getFreeTextIndexer
import org.apache.accumulo.core.client.BatchWriterConfig; //导入依赖的package包/类
private static FreeTextIndexer getFreeTextIndexer(final Configuration conf) throws IOException {
if (!conf.getBoolean(ENABLE_FREETEXT, true)) {
return null;
}
final AccumuloFreeTextIndexer freeText = new AccumuloFreeTextIndexer();
freeText.setConf(conf);
Connector connector;
try {
connector = ConfigUtils.getConnector(conf);
} catch (AccumuloException | AccumuloSecurityException e) {
throw new IOException("Error when attempting to create a connection for writing the freeText index.", e);
}
final MultiTableBatchWriter mtbw = connector.createMultiTableBatchWriter(new BatchWriterConfig());
freeText.setConnector(connector);
freeText.setMultiTableBatchWriter(mtbw);
freeText.init();
return freeText;
}
示例12: getTemporalIndexer
import org.apache.accumulo.core.client.BatchWriterConfig; //导入依赖的package包/类
private static TemporalIndexer getTemporalIndexer(final Configuration conf) throws IOException {
if (!conf.getBoolean(ENABLE_TEMPORAL, true)) {
return null;
}
final AccumuloTemporalIndexer temporal = new AccumuloTemporalIndexer();
temporal.setConf(conf);
Connector connector;
try {
connector = ConfigUtils.getConnector(conf);
} catch (AccumuloException | AccumuloSecurityException e) {
throw new IOException("Error when attempting to create a connection for writing the temporal index.", e);
}
final MultiTableBatchWriter mtbw = connector.createMultiTableBatchWriter(new BatchWriterConfig());
temporal.setConnector(connector);
temporal.setMultiTableBatchWriter(mtbw);
temporal.init();
return temporal;
}
示例13: setUp
import org.apache.accumulo.core.client.BatchWriterConfig; //导入依赖的package包/类
/**
* @throws java.lang.Exception
*/
@Before
public void setUp() throws Exception {
conf = new Configuration();
conf.set(RdfCloudTripleStoreConfiguration.CONF_TBL_PREFIX, "triplestore_");
conf.setBoolean(ConfigUtils.USE_MOCK_INSTANCE, true);
// The temporal predicates are from http://linkedevents.org/ontology
// and http://motools.sourceforge.net/event/event.html
conf.setStrings(ConfigUtils.TEMPORAL_PREDICATES_LIST, ""
+ URI_PROPERTY_AT_TIME + ","
+ URI_PROPERTY_CIRCA + ","
+ URI_PROPERTY_EVENT_TIME);
tIndexer = new AccumuloTemporalIndexer();
tIndexer.setConf(conf);
Connector connector = ConfigUtils.getConnector(conf);
MultiTableBatchWriter mt_bw = connector.createMultiTableBatchWriter(new BatchWriterConfig());
tIndexer.setConnector(connector);
tIndexer.setMultiTableBatchWriter(mt_bw);
tIndexer.init();
}
示例14: init
import org.apache.accumulo.core.client.BatchWriterConfig; //导入依赖的package包/类
@Before
public void init() throws AccumuloException, AccumuloSecurityException, TableNotFoundException, TableExistsException {
mock = new MockInstance("accumulo");
PasswordToken pToken = new PasswordToken("pass".getBytes());
conn = mock.getConnector("user", pToken);
config = new BatchWriterConfig();
config.setMaxMemory(1000);
config.setMaxLatency(1000, TimeUnit.SECONDS);
config.setMaxWriteThreads(10);
if (conn.tableOperations().exists("rya_prospects")) {
conn.tableOperations().delete("rya_prospects");
}
if (conn.tableOperations().exists("rya_selectivity")) {
conn.tableOperations().delete("rya_selectivity");
}
arc = new AccumuloRdfConfiguration();
res = new ProspectorServiceEvalStatsDAO(conn, arc);
arc.setTableLayoutStrategy(new TablePrefixLayoutStrategy());
arc.setMaxRangesForScanner(300);
}
示例15: createBatchWriter
import org.apache.accumulo.core.client.BatchWriterConfig; //导入依赖的package包/类
/**
* Creates a {@link org.apache.accumulo.core.client.BatchWriter} for the
* specified table
* <p>
*
* @param store the accumulo store
* @param tableName the table name
* @return A new BatchWriter with the settings defined in the
* gaffer.accumulostore properties
* @throws StoreException if the table could not be found or other table issues
*/
private static BatchWriter createBatchWriter(final AccumuloStore store, final String tableName)
throws StoreException {
final BatchWriterConfig batchConfig = new BatchWriterConfig();
batchConfig.setMaxMemory(store.getProperties().getMaxBufferSizeForBatchWriterInBytes());
batchConfig.setMaxLatency(store.getProperties().getMaxTimeOutForBatchWriterInMilliseconds(),
TimeUnit.MILLISECONDS);
batchConfig.setMaxWriteThreads(store.getProperties().getNumThreadsForBatchWriter());
try {
return store.getConnection().createBatchWriter(tableName, batchConfig);
} catch (final TableNotFoundException e) {
throw new StoreException("Table not set up! Use table gaffer.accumulostore.utils to create the table"
+ store.getTableName(), e);
}
}