本文整理汇总了Java中org.apache.accumulo.core.client.Connector.createBatchWriter方法的典型用法代码示例。如果您正苦于以下问题:Java Connector.createBatchWriter方法的具体用法?Java Connector.createBatchWriter怎么用?Java Connector.createBatchWriter使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.accumulo.core.client.Connector
的用法示例。
在下文中一共展示了Connector.createBatchWriter方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: SignedBatchWriter
import org.apache.accumulo.core.client.Connector; //导入方法依赖的package包/类
/**
* Create an signed batch tableWriter.
*
* @param connector
* The connector for the Accumulo instance.
* @param tableName
* Name of the table to write to.
* @param batchConfig
* Configuration for a {@link BatchWriter}.
* @param signatureConfig
* Configuration for the signatures.
* @param keys
* Container with the keys to use for signatures.
*/
public SignedBatchWriter(Connector connector, String tableName, BatchWriterConfig batchConfig, SignatureConfig signatureConfig, SignatureKeyContainer keys)
throws TableNotFoundException {
checkArgument(connector != null, "connector is null");
checkArgument(tableName != null, "tableName is null");
checkArgument(signatureConfig != null, "signatureConfig is null");
checkArgument(keys != null, "keys is null");
this.tableWriter = connector.createBatchWriter(tableName, batchConfig);
this.signer = new EntrySigner(signatureConfig, keys);
this.signatureConfig = signatureConfig;
if (signatureConfig.destination == SignatureConfig.Destination.SEPARATE_TABLE) {
this.signatureTableWriter = connector.createBatchWriter(signatureConfig.destinationTable, batchConfig);
} else {
this.signatureTableWriter = null;
}
}
示例2: main
import org.apache.accumulo.core.client.Connector; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
Opts opts = new Opts();
BatchWriterOpts bwOpts = new BatchWriterOpts();
opts.parseArgs(FileDataIngest.class.getName(), args, bwOpts);
Connector conn = opts.getConnector();
if (!conn.tableOperations().exists(opts.getTableName())) {
conn.tableOperations().create(opts.getTableName());
conn.tableOperations().attachIterator(opts.getTableName(), new IteratorSetting(1, ChunkCombiner.class));
}
BatchWriter bw = conn.createBatchWriter(opts.getTableName(), bwOpts.getBatchWriterConfig());
FileDataIngest fdi = new FileDataIngest(opts.chunkSize, opts.visibility);
for (String filename : opts.files) {
fdi.insertFileData(filename, bw);
}
bw.close();
//TODO
//opts.stopTracing();
}
示例3: main
import org.apache.accumulo.core.client.Connector; //导入方法依赖的package包/类
/**
* Writes a specified number of entries to Accumulo using a {@link BatchWriter}. The rows of the entries will be sequential starting at a specified number.
* The column families will be "foo" and column qualifiers will be "1". The values will be random byte arrays of a specified size.
*/
public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, TableNotFoundException, MutationsRejectedException {
Opts opts = new Opts();
BatchWriterOpts bwOpts = new BatchWriterOpts();
opts.parseArgs(SequentialBatchWriter.class.getName(), args, bwOpts);
Connector connector = opts.getConnector();
BatchWriter bw = connector.createBatchWriter(opts.getTableName(), bwOpts.getBatchWriterConfig());
long end = opts.start + opts.num;
for (long i = opts.start; i < end; i++) {
Mutation m = RandomBatchWriter.createMutation(i, opts.valueSize, opts.vis);
bw.addMutation(m);
}
bw.close();
}
示例4: main
import org.apache.accumulo.core.client.Connector; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
Opts opts = new Opts();
ScannerOpts scanOpts = new ScannerOpts();
BatchWriterOpts bwOpts = new BatchWriterOpts();
opts.parseArgs(Reverse.class.getName(), args, scanOpts, bwOpts);
Connector conn = opts.getConnector();
Scanner scanner = conn.createScanner(opts.shardTable, opts.auths);
scanner.setBatchSize(scanOpts.scanBatchSize);
BatchWriter bw = conn.createBatchWriter(opts.doc2TermTable, bwOpts.getBatchWriterConfig());
for (Entry<Key,Value> entry : scanner) {
Key key = entry.getKey();
Mutation m = new Mutation(key.getColumnQualifier());
m.put(key.getColumnFamily(), new Text(), new Value(new byte[0]));
bw.addMutation(m);
}
bw.close();
}
示例5: ExportTask
import org.apache.accumulo.core.client.Connector; //导入方法依赖的package包/类
ExportTask(String instanceName, String zookeepers, String user, String password, String table)
throws TableNotFoundException, AccumuloException, AccumuloSecurityException {
ZooKeeperInstance zki = new ZooKeeperInstance(
new ClientConfiguration().withInstance(instanceName).withZkHosts(zookeepers));
// TODO need to close batch writer
Connector conn = zki.getConnector(user, new PasswordToken(password));
try {
bw = conn.createBatchWriter(table, new BatchWriterConfig());
} catch (TableNotFoundException tnfe) {
try {
conn.tableOperations().create(table);
} catch (TableExistsException e) {
// nothing to do
}
bw = conn.createBatchWriter(table, new BatchWriterConfig());
}
}
示例6: main
import org.apache.accumulo.core.client.Connector; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
Opts opts = new Opts();
BatchWriterOpts bwOpts = new BatchWriterOpts();
opts.parseArgs(Ingest.class.getName(), args, bwOpts);
Connector conn = opts.getConnector();
if (!conn.tableOperations().exists(opts.nameTable))
conn.tableOperations().create(opts.nameTable);
if (!conn.tableOperations().exists(opts.indexTable))
conn.tableOperations().create(opts.indexTable);
if (!conn.tableOperations().exists(opts.dataTable)) {
conn.tableOperations().create(opts.dataTable);
conn.tableOperations().attachIterator(opts.dataTable, new IteratorSetting(1, ChunkCombiner.class));
}
BatchWriter dirBW = conn.createBatchWriter(opts.nameTable, bwOpts.getBatchWriterConfig());
BatchWriter indexBW = conn.createBatchWriter(opts.indexTable, bwOpts.getBatchWriterConfig());
BatchWriter dataBW = conn.createBatchWriter(opts.dataTable, bwOpts.getBatchWriterConfig());
FileDataIngest fdi = new FileDataIngest(opts.chunkSize, opts.visibility);
for (String dir : opts.directories) {
recurse(new File(dir), opts.visibility, dirBW, indexBW, fdi, dataBW);
// fill in parent directory info
int slashIndex = -1;
while ((slashIndex = dir.lastIndexOf("/")) > 0) {
dir = dir.substring(0, slashIndex);
ingest(new File(dir), opts.visibility, dirBW, indexBW, fdi, dataBW);
}
}
ingest(new File("/"), opts.visibility, dirBW, indexBW, fdi, dataBW);
dirBW.close();
indexBW.close();
dataBW.close();
}
示例7: main
import org.apache.accumulo.core.client.Connector; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
Opts opts = new Opts();
BatchWriterOpts bwOpts = new BatchWriterOpts();
opts.parseArgs(InterferenceTest.class.getName(), args, bwOpts);
if (opts.iterations < 1)
opts.iterations = Long.MAX_VALUE;
Connector conn = opts.getConnector();
if (!conn.tableOperations().exists(opts.getTableName()))
conn.tableOperations().create(opts.getTableName());
Thread writer = new Thread(new Writer(conn.createBatchWriter(opts.getTableName(), bwOpts.getBatchWriterConfig()), opts.iterations));
writer.start();
Reader r;
if (opts.isolated)
r = new Reader(new IsolatedScanner(conn.createScanner(opts.getTableName(), opts.auths)));
else
r = new Reader(conn.createScanner(opts.getTableName(), opts.auths));
Thread reader;
reader = new Thread(r);
reader.start();
writer.join();
r.stopNow();
reader.join();
System.out.println("finished");
}
示例8: createDefaultBatchWriter
import org.apache.accumulo.core.client.Connector; //导入方法依赖的package包/类
public static BatchWriter createDefaultBatchWriter(final String tablename, final Configuration conf)
throws TableNotFoundException, AccumuloException, AccumuloSecurityException {
final Long DEFAULT_MAX_MEMORY = getWriterMaxMemory(conf);
final Long DEFAULT_MAX_LATENCY = getWriterMaxLatency(conf);
final Integer DEFAULT_MAX_WRITE_THREADS = getWriterMaxWriteThreads(conf);
final Connector connector = ConfigUtils.getConnector(conf);
return connector.createBatchWriter(tablename, DEFAULT_MAX_MEMORY, DEFAULT_MAX_LATENCY, DEFAULT_MAX_WRITE_THREADS);
}
示例9: writeMutations
import org.apache.accumulo.core.client.Connector; //导入方法依赖的package包/类
public static void writeMutations(final Connector connector, final String tableName, final Collection<Mutation> mutations) throws TableNotFoundException, MutationsRejectedException {
final BatchWriter bw = connector.createBatchWriter(tableName, 10000l, 10000l, 4);
for(final Mutation mutation : mutations) {
bw.addMutation(mutation);
}
bw.flush();
bw.close();
}
示例10: testRFileReaderRDDCanBeCreatedAndIsNonEmpty
import org.apache.accumulo.core.client.Connector; //导入方法依赖的package包/类
@Test
public void testRFileReaderRDDCanBeCreatedAndIsNonEmpty() throws IOException,
InterruptedException, AccumuloSecurityException, AccumuloException, TableNotFoundException,
TableExistsException, StoreException {
// Given
final String table = "table1";
final MiniAccumuloCluster cluster = MiniAccumuloClusterProvider.getMiniAccumuloCluster();
// Add some data
final Connector connector = cluster.getConnector(MiniAccumuloClusterProvider.USER,
MiniAccumuloClusterProvider.PASSWORD);
connector.tableOperations().create(table);
final BatchWriter bw = connector.createBatchWriter(table, new BatchWriterConfig());
final Mutation m1 = new Mutation("row");
m1.put("CF", "CQ", "value");
bw.addMutation(m1);
final Mutation m2 = new Mutation("row2");
m2.put("CF", "CQ", "not");
bw.addMutation(m2);
bw.close();
// Compact to ensure an RFile is created, sleep to give it a little time to do it
connector.tableOperations().compact(table, new CompactionConfig());
Thread.sleep(1000L);
// When
final SparkSession sparkSession = SparkSessionProvider.getSparkSession();
final Configuration conf = new Configuration();
InputConfigurator.fetchColumns(AccumuloInputFormat.class, conf,
Sets.newHashSet(new Pair<>(new Text("CF"), new Text("CQ"))));
final RFileReaderRDD rdd = new RFileReaderRDD(sparkSession.sparkContext(),
cluster.getInstanceName(), cluster.getZooKeepers(), MiniAccumuloClusterProvider.USER,
MiniAccumuloClusterProvider.PASSWORD, table, new HashSet<>(),
serialiseConfiguration(conf));
final long count = rdd.count();
// Then
assertEquals(2L, count);
}
示例11: doScan
import org.apache.accumulo.core.client.Connector; //导入方法依赖的package包/类
private void doScan(WebSocketClient client) throws Exception {
long now = System.currentTimeMillis();
String tableName = "qonduit.scanTest";
Connector con = mac.getConnector(MAC_ROOT_USER, MAC_ROOT_PASSWORD);
con.namespaceOperations().create("qonduit");
con.tableOperations().create(tableName);
BatchWriterConfig bwc = new BatchWriterConfig();
bwc.setMaxLatency(2, TimeUnit.SECONDS);
BatchWriter writer = con.createBatchWriter(tableName, bwc);
ColumnVisibility cv = new ColumnVisibility();
for (int i = 0; i < 10; i++) {
Mutation m = new Mutation("m" + i);
m.put("cf" + i, "cq" + i, cv, now + i, Integer.toString(i));
writer.addMutation(m);
}
writer.flush();
writer.close();
sleepUninterruptibly(2, TimeUnit.SECONDS);
List<byte[]> responses = new ArrayList<>();
String id = UUID.randomUUID().toString();
ScanRequest request = new ScanRequest();
request.setRequestId(id);
request.setTableName(tableName);
request.setResultBatchSize(5);
doIt(client, request, responses, 3);
Assert.assertEquals(11, responses.size());
for (byte[] b : responses) {
KVPair kv = JsonSerializer.getObjectMapper().readValue(b, KVPair.class);
Value val = kv.getValue();
if (null != val) {
int num = Integer.parseInt(new String(val.getValue()));
Key key = kv.getKey().toKey();
Assert.assertEquals("m" + num, key.getRow().toString());
Assert.assertEquals("cf" + num, key.getColumnFamily().toString());
Assert.assertEquals("cq" + num, key.getColumnQualifier().toString());
Assert.assertEquals(now + num, key.getTimestamp());
Assert.assertEquals(id, kv.getRequestId());
} else {
Assert.assertTrue(kv.isEndOfResults());
}
}
}
示例12: setConf
import org.apache.accumulo.core.client.Connector; //导入方法依赖的package包/类
@Override
@SuppressWarnings("unchecked")
public void setConf(Configuration config) {
this.conf = config;
// Get the implementation of MutationTransformer to use.
// By default, we call toString() on every non-null field.
Class<? extends MutationTransformer> xformerClass =
(Class<? extends MutationTransformer>)
this.conf.getClass(AccumuloConstants.TRANSFORMER_CLASS_KEY,
ToStringMutationTransformer.class);
this.mutationTransformer = (MutationTransformer)
ReflectionUtils.newInstance(xformerClass, this.conf);
if (null == mutationTransformer) {
throw new RuntimeException("Could not instantiate MutationTransformer.");
}
String colFam = conf.get(AccumuloConstants.COL_FAMILY_KEY, null);
if (null == colFam) {
throw new RuntimeException("Accumulo column family not set.");
}
this.mutationTransformer.setColumnFamily(colFam);
String rowKey = conf.get(AccumuloConstants.ROW_KEY_COLUMN_KEY, null);
if (null == rowKey) {
throw new RuntimeException("Row key column not set.");
}
this.mutationTransformer.setRowKeyColumn(rowKey);
String vis = conf.get(AccumuloConstants.VISIBILITY_KEY, null);
this.mutationTransformer.setVisibility(vis);
this.tableName = conf.get(AccumuloConstants.TABLE_NAME_KEY, null);
String zookeeper = conf.get(AccumuloConstants.ZOOKEEPERS);
String instance = conf.get(AccumuloConstants.ACCUMULO_INSTANCE);
Instance inst = new ZooKeeperInstance(instance, zookeeper);
String username = conf.get(AccumuloConstants.ACCUMULO_USER_NAME);
String pw = conf.get(AccumuloConstants.ACCUMULO_PASSWORD);
if (null == pw) {
pw = "";
}
byte[] password = pw.getBytes();
BatchWriterConfig bwc = new BatchWriterConfig();
long bs = conf.getLong(AccumuloConstants.BATCH_SIZE,
AccumuloConstants.DEFAULT_BATCH_SIZE);
bwc.setMaxMemory(bs);
long la = conf.getLong(AccumuloConstants.MAX_LATENCY,
AccumuloConstants.DEFAULT_LATENCY);
bwc.setMaxLatency(la, TimeUnit.MILLISECONDS);
try {
Connector conn = inst.getConnector(username, new PasswordToken(password));
this.table = conn.createBatchWriter(tableName, bwc);
} catch (AccumuloException ex) {
throw new RuntimeException("Error accessing Accumulo", ex);
} catch (AccumuloSecurityException aex){
throw new RuntimeException("Security exception accessing Accumulo", aex);
} catch(TableNotFoundException tex){
throw new RuntimeException("Accumulo table " + tableName
+ " not found", tex);
}
}
示例13: main
import org.apache.accumulo.core.client.Connector; //导入方法依赖的package包/类
/**
* Writes a specified number of entries to Accumulo using a {@link BatchWriter}.
*/
public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
Opts opts = new Opts();
BatchWriterOpts bwOpts = new BatchWriterOpts();
opts.parseArgs(RandomBatchWriter.class.getName(), args, bwOpts);
if ((opts.max - opts.min) < 1L * opts.num) { // right-side multiplied by 1L to convert to long in a way that doesn't trigger FindBugs
System.err.println(String.format("You must specify a min and a max that allow for at least num possible values. "
+ "For example, you requested %d rows, but a min of %d and a max of %d (exclusive), which only allows for %d rows.", opts.num, opts.min, opts.max,
(opts.max - opts.min)));
System.exit(1);
}
Random r;
if (opts.seed == null)
r = new Random();
else {
r = new Random(opts.seed);
}
Connector connector = opts.getConnector();
BatchWriter bw = connector.createBatchWriter(opts.getTableName(), bwOpts.getBatchWriterConfig());
// reuse the ColumnVisibility object to improve performance
ColumnVisibility cv = opts.visiblity;
// Generate num unique row ids in the given range
HashSet<Long> rowids = new HashSet<>(opts.num);
while (rowids.size() < opts.num) {
rowids.add((abs(r.nextLong()) % (opts.max - opts.min)) + opts.min);
}
for (long rowid : rowids) {
Mutation m = createMutation(rowid, opts.size, cv);
bw.addMutation(m);
}
try {
bw.close();
} catch (MutationsRejectedException e) {
if (e.getSecurityErrorCodes().size() > 0) {
HashMap<String,Set<SecurityErrorCode>> tables = new HashMap<>();
for (Entry<TabletId,Set<SecurityErrorCode>> ke : e.getSecurityErrorCodes().entrySet()) {
String tableId = ke.getKey().getTableId().toString();
Set<SecurityErrorCode> secCodes = tables.get(tableId);
if (secCodes == null) {
secCodes = new HashSet<>();
tables.put(tableId, secCodes);
}
secCodes.addAll(ke.getValue());
}
System.err.println("ERROR : Not authorized to write to tables : " + tables);
}
if (e.getConstraintViolationSummaries().size() > 0) {
System.err.println("ERROR : Constraint violations occurred : " + e.getConstraintViolationSummaries());
}
System.exit(1);
}
}
示例14: testRFileReaderRDDAppliesIteratorCorrectly
import org.apache.accumulo.core.client.Connector; //导入方法依赖的package包/类
@Test
public void testRFileReaderRDDAppliesIteratorCorrectly() throws IOException,
InterruptedException, AccumuloSecurityException, AccumuloException, TableNotFoundException,
TableExistsException, StoreException {
// Given
final String table = "table2";
final MiniAccumuloCluster cluster = MiniAccumuloClusterProvider.getMiniAccumuloCluster();
// Add some data
final Connector connector = cluster.getConnector(MiniAccumuloClusterProvider.USER,
MiniAccumuloClusterProvider.PASSWORD);
connector.tableOperations().create(table);
final BatchWriter bw = connector.createBatchWriter(table, new BatchWriterConfig());
final Mutation m1 = new Mutation("row");
m1.put("CF", "CQ", "value");
bw.addMutation(m1);
final Mutation m2 = new Mutation("row2");
m2.put("CF", "CQ", "not");
bw.addMutation(m2);
bw.close();
// Compact to ensure an RFile is created, sleep to give it a little time to do it
connector.tableOperations().compact(table, new CompactionConfig());
Thread.sleep(1000L);
// Create an iterator and an option to grep for "val"
final Map<String, String> options = new HashMap<>();
options.put("term", "val");
final Configuration conf = new Configuration();
final Job job = Job.getInstance(conf);
AccumuloInputFormat.addIterator(job, new IteratorSetting(2, "NAME", GrepIterator.class.getName(), options));
InputConfigurator.fetchColumns(AccumuloInputFormat.class, job.getConfiguration(),
Sets.newHashSet(new Pair<>(new Text("CF"), new Text("CQ"))));
// When
final SparkSession sparkSession = SparkSessionProvider.getSparkSession();
final RFileReaderRDD rdd = new RFileReaderRDD(sparkSession.sparkContext(),
cluster.getInstanceName(), cluster.getZooKeepers(), MiniAccumuloClusterProvider.USER,
MiniAccumuloClusterProvider.PASSWORD, table, new HashSet<>(),
serialiseConfiguration(job.getConfiguration()));
final long count = rdd.count();
// Then
assertEquals(1L, count);
}
示例15: EncryptedBatchWriter
import org.apache.accumulo.core.client.Connector; //导入方法依赖的package包/类
/**
* Create an encrypted batch writer.
*
* @param connector
* The connector for the Accumulo instance.
* @param tableName
* Name of the table to write to.
* @param batchConfig
* Configuration for a {@link BatchWriter}.
* @param cryptoConfig
* Configuration for the encryption.
* @param keys
* Container with the keys to use for encryption.
* @throws TableNotFoundException
* Thrown if the table name is not found in the Accumulo instance.
*/
public EncryptedBatchWriter(Connector connector, String tableName, BatchWriterConfig batchConfig, EncryptionConfig cryptoConfig, EncryptionKeyContainer keys)
throws TableNotFoundException {
checkArgument(connector != null, "connector is null");
checkArgument(tableName != null, "tableName is null");
checkArgument(cryptoConfig != null, "config is null");
checkArgument(keys != null, "keys is null");
this.writer = connector.createBatchWriter(tableName, batchConfig);
this.encryptor = new EntryEncryptor(cryptoConfig, keys);
this.supportsDelete = this.encryptor.canBeDeleteServerSide();
}