本文整理汇总了Java中org.apache.accumulo.core.client.BatchWriter.addMutation方法的典型用法代码示例。如果您正苦于以下问题:Java BatchWriter.addMutation方法的具体用法?Java BatchWriter.addMutation怎么用?Java BatchWriter.addMutation使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.accumulo.core.client.BatchWriter
的用法示例。
在下文中一共展示了BatchWriter.addMutation方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: processSinglePathWithByteBuffer
import org.apache.accumulo.core.client.BatchWriter; //导入方法依赖的package包/类
private void processSinglePathWithByteBuffer(BatchWriter writer, FileMapping mapping, Path p, CsvParser parser) throws IOException, MutationsRejectedException {
final FileSystem fs = p.getFileSystem(conf);
FSDataInputStream dis = fs.open(p, INPUT_BUFFER_SIZE);
InputStreamReader reader = new InputStreamReader(dis, UTF_8);
try {
parser.beginParsing(reader);
String[] line = null;
while ((line = parser.parseNext()) != null) {
writer.addMutation(parseLine(mapping, line));
}
} finally {
if (null != reader) {
reader.close();
}
}
}
示例2: writeRandomEntries
import org.apache.accumulo.core.client.BatchWriter; //导入方法依赖的package包/类
/**
* Write random entries.
* <p>
* Closes the writer after entries are written.
*
* @param writer
* Writer to write entries to.
*/
void writeRandomEntries(BatchWriter writer) throws MutationsRejectedException {
for (int i = 0; i < rowCount; i++) {
byte[] row = getRandomBytes(keyFieldSize, true);
for (int j = 0; j < columnCount; j++) {
byte[] colF = getRandomBytes(keyFieldSize, true);
byte[] colQ = getRandomBytes(keyFieldSize, true);
byte[] value = getRandomBytes(valueFieldSize, false);
Mutation mutation = new Mutation(row);
mutation.put(colF, colQ, VISIBILITY, value);
writer.addMutation(mutation);
}
}
writer.close();
}
示例3: main
import org.apache.accumulo.core.client.BatchWriter; //导入方法依赖的package包/类
public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, MutationsRejectedException, TableExistsException,
TableNotFoundException {
ClientOnRequiredTable opts = new ClientOnRequiredTable();
BatchWriterOpts bwOpts = new BatchWriterOpts();
opts.parseArgs(InsertWithBatchWriter.class.getName(), args, bwOpts);
Connector connector = opts.getConnector();
MultiTableBatchWriter mtbw = connector.createMultiTableBatchWriter(bwOpts.getBatchWriterConfig());
if (!connector.tableOperations().exists(opts.getTableName()))
connector.tableOperations().create(opts.getTableName());
BatchWriter bw = mtbw.getBatchWriter(opts.getTableName());
Text colf = new Text("colfam");
System.out.println("writing ...");
for (int i = 0; i < 10000; i++) {
Mutation m = new Mutation(new Text(String.format("row_%d", i)));
for (int j = 0; j < 5; j++) {
m.put(colf, new Text(String.format("colqual_%d", j)), new Value((String.format("value_%d_%d", i, j)).getBytes()));
}
bw.addMutation(m);
if (i % 100 == 0)
System.out.println(i);
}
mtbw.close();
}
示例4: createEntries
import org.apache.accumulo.core.client.BatchWriter; //导入方法依赖的package包/类
private void createEntries(Opts opts) throws TableNotFoundException, AccumuloException, AccumuloSecurityException {
// Trace the write operation. Note, unless you flush the BatchWriter, you will not capture
// the write operation as it is occurs asynchronously. You can optionally create additional Spans
// within a given Trace as seen below around the flush
TraceScope scope = Trace.startSpan("Client Write", Sampler.ALWAYS);
System.out.println("TraceID: " + Long.toHexString(scope.getSpan().getTraceId()));
BatchWriter batchWriter = opts.getConnector().createBatchWriter(opts.getTableName(), new BatchWriterConfig());
Mutation m = new Mutation("row");
m.put("cf", "cq", "value");
batchWriter.addMutation(m);
// You can add timeline annotations to Spans which will be able to be viewed in the Monitor
scope.getSpan().addTimelineAnnotation("Initiating Flush");
batchWriter.flush();
batchWriter.close();
scope.close();
}
示例5: main
import org.apache.accumulo.core.client.BatchWriter; //导入方法依赖的package包/类
/**
* Writes a specified number of entries to Accumulo using a {@link BatchWriter}. The rows of the entries will be sequential starting at a specified number.
* The column families will be "foo" and column qualifiers will be "1". The values will be random byte arrays of a specified size.
*/
public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, TableNotFoundException, MutationsRejectedException {
Opts opts = new Opts();
BatchWriterOpts bwOpts = new BatchWriterOpts();
opts.parseArgs(SequentialBatchWriter.class.getName(), args, bwOpts);
Connector connector = opts.getConnector();
BatchWriter bw = connector.createBatchWriter(opts.getTableName(), bwOpts.getBatchWriterConfig());
long end = opts.start + opts.num;
for (long i = opts.start; i < end; i++) {
Mutation m = RandomBatchWriter.createMutation(i, opts.valueSize, opts.vis);
bw.addMutation(m);
}
bw.close();
}
示例6: main
import org.apache.accumulo.core.client.BatchWriter; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
Opts opts = new Opts();
ScannerOpts scanOpts = new ScannerOpts();
BatchWriterOpts bwOpts = new BatchWriterOpts();
opts.parseArgs(Reverse.class.getName(), args, scanOpts, bwOpts);
Connector conn = opts.getConnector();
Scanner scanner = conn.createScanner(opts.shardTable, opts.auths);
scanner.setBatchSize(scanOpts.scanBatchSize);
BatchWriter bw = conn.createBatchWriter(opts.doc2TermTable, bwOpts.getBatchWriterConfig());
for (Entry<Key,Value> entry : scanner) {
Key key = entry.getKey();
Mutation m = new Mutation(key.getColumnQualifier());
m.put(key.getColumnFamily(), new Text(), new Value(new byte[0]));
bw.addMutation(m);
}
bw.close();
}
示例7: test
import org.apache.accumulo.core.client.BatchWriter; //导入方法依赖的package包/类
@Test
public void test() throws Exception {
conn.tableOperations().create(tableName);
BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
for (Entry<Key,Value> e : data) {
Key k = e.getKey();
Mutation m = new Mutation(k.getRow());
m.put(k.getColumnFamily(), k.getColumnQualifier(), new ColumnVisibility(k.getColumnVisibility()), k.getTimestamp(), e.getValue());
bw.addMutation(m);
}
bw.close();
assertEquals(0, CIFTester.main(tableName, CIFTester.TestMapper.class.getName()));
assertEquals(1, assertionErrors.get(tableName).size());
}
示例8: testErrorOnNextWithoutClose
import org.apache.accumulo.core.client.BatchWriter; //导入方法依赖的package包/类
@Test
public void testErrorOnNextWithoutClose() throws Exception {
conn.tableOperations().create(tableName);
BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
for (Entry<Key,Value> e : data) {
Key k = e.getKey();
Mutation m = new Mutation(k.getRow());
m.put(k.getColumnFamily(), k.getColumnQualifier(), new ColumnVisibility(k.getColumnVisibility()), k.getTimestamp(), e.getValue());
bw.addMutation(m);
}
bw.close();
assertEquals(1, CIFTester.main(tableName, CIFTester.TestNoClose.class.getName()));
assertEquals(1, assertionErrors.get(tableName).size());
// this should actually exist, in addition to the dummy entry
assertEquals(2, assertionErrors.get(tableName + "_map_ioexception").size());
}
示例9: setupInstance
import org.apache.accumulo.core.client.BatchWriter; //导入方法依赖的package包/类
@Before
public void setupInstance() throws Exception {
tableName = getUniqueNames(1)[0];
conn = getConnector();
conn.tableOperations().create(tableName);
BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
ColumnVisibility cv = new ColumnVisibility();
// / has 1 dir
// /local has 2 dirs 1 file
// /local/user1 has 2 files
bw.addMutation(Ingest.buildMutation(cv, "/local", true, false, true, 272, 12345, null));
bw.addMutation(Ingest.buildMutation(cv, "/local/user1", true, false, true, 272, 12345, null));
bw.addMutation(Ingest.buildMutation(cv, "/local/user2", true, false, true, 272, 12345, null));
bw.addMutation(Ingest.buildMutation(cv, "/local/file", false, false, false, 1024, 12345, null));
bw.addMutation(Ingest.buildMutation(cv, "/local/file", false, false, false, 1024, 23456, null));
bw.addMutation(Ingest.buildMutation(cv, "/local/user1/file1", false, false, false, 2024, 12345, null));
bw.addMutation(Ingest.buildMutation(cv, "/local/user1/file2", false, false, false, 1028, 23456, null));
bw.close();
}
示例10: testSimpleOutput
import org.apache.accumulo.core.client.BatchWriter; //导入方法依赖的package包/类
public void testSimpleOutput() throws Exception {
BatchWriter batchWriter = connector.createBatchWriter(table, 10l, 10l, 2);
Mutation row = new Mutation("row");
row.put("cf", "cq", new Value(new byte[0]));
batchWriter.addMutation(row);
batchWriter.flush();
batchWriter.close();
String location = "accumulo://" + table + "?instance=" + instance + "&user=" + user + "&password=" + pwd + "&range=a|z&mock=true";
AccumuloStorage storage = createAccumuloStorage(location);
int count = 0;
while (true) {
Tuple next = storage.getNext();
if (next == null)
break;
assertEquals(6, next.size());
count++;
}
assertEquals(1, count);
}
示例11: testColumns
import org.apache.accumulo.core.client.BatchWriter; //导入方法依赖的package包/类
public void testColumns() throws Exception {
BatchWriter batchWriter = connector.createBatchWriter(table, 10l, 10l, 2);
Mutation row = new Mutation("a");
row.put("cf1", "cq", new Value(new byte[0]));
row.put("cf2", "cq", new Value(new byte[0]));
row.put("cf3", "cq1", new Value(new byte[0]));
row.put("cf3", "cq2", new Value(new byte[0]));
batchWriter.addMutation(row);
batchWriter.flush();
batchWriter.close();
String location = "accumulo://" + table + "?instance=" + instance + "&user=" + user + "&password=" + pwd + "&range=a|c&columns=cf1,cf3|cq1&mock=true";
AccumuloStorage storage = createAccumuloStorage(location);
int count = 0;
while (true) {
Tuple next = storage.getNext();
if (next == null)
break;
assertEquals(6, next.size());
count++;
}
assertEquals(2, count);
}
示例12: testWholeRowRange
import org.apache.accumulo.core.client.BatchWriter; //导入方法依赖的package包/类
public void testWholeRowRange() throws Exception {
BatchWriter batchWriter = connector.createBatchWriter(table, 10l, 10l, 2);
Mutation row = new Mutation("a");
row.put("cf1", "cq", new Value(new byte[0]));
row.put("cf2", "cq", new Value(new byte[0]));
row.put("cf3", "cq1", new Value(new byte[0]));
row.put("cf3", "cq2", new Value(new byte[0]));
batchWriter.addMutation(row);
batchWriter.flush();
batchWriter.close();
String location = "accumulo://" + table + "?instance=" + instance + "&user=" + user + "&password=" + pwd + "&range=a&mock=true";
AccumuloStorage storage = createAccumuloStorage(location);
int count = 0;
while (true) {
Tuple next = storage.getNext();
if (next == null)
break;
assertEquals(6, next.size());
count++;
}
assertEquals(4, count);
}
示例13: writeMutation
import org.apache.accumulo.core.client.BatchWriter; //导入方法依赖的package包/类
private void writeMutation(BatchWriter writer, List<ColumnReference> columns, List<Expression> values) throws MutationsRejectedException {
byte[] rowId = getRowId(columns, values);
for (int i = 0; i < columns.size(); i++) {
Column column = columns.get(i).getMetadataObject();
if (SQLStringVisitor.getRecordName(column).equalsIgnoreCase(AccumuloMetadataProcessor.ROWID)) {
continue;
}
Object value = values.get(i);
if (value instanceof Literal) {
writer.addMutation(buildMutation(rowId, column, ((Literal)value).getValue()));
}
else {
writer.addMutation(buildMutation(rowId, column, value));
}
}
}
示例14: createTable
import org.apache.accumulo.core.client.BatchWriter; //导入方法依赖的package包/类
@Before
public void createTable() throws AccumuloException, AccumuloSecurityException, TableExistsException, TableNotFoundException {
connector.tableOperations().create(table);
BatchWriter bw = connector.createBatchWriter(table, new BatchWriterConfig());
for (char ch = 'a'; ch <= 'z'; ch++) {
Mutation m = new Mutation(ch + "_row");
m.put("fam1", "qual1", "val1:1");
m.put("fam1", "qual2", "val1:2");
m.put("fam2", "qual1", "val2:1");
m.put("fam2", "qual2", "val2:2");
m.put("fam2", "qual3", "val2:3");
noAuthCount = m.getUpdates().size();
bw.addMutation(m);
}
bw.close();
}
示例15: writeSchema
import org.apache.accumulo.core.client.BatchWriter; //导入方法依赖的package包/类
protected void writeSchema(Text fileName, String[] header) throws AccumuloException, AccumuloSecurityException {
final BatchWriter bw;
try {
bw = mtbw.getBatchWriter(schemaTableName);
} catch (TableNotFoundException e) {
log.error("Schema table ({}) was deleted", schemaTableName, e);
throw new RuntimeException(e);
}
final Value emptyValue = new Value(new byte[0]);
// track existence of column in schema
for (String columnName : header) {
Mutation m = new Mutation(columnName);
m.put(SCHEMA_COLUMN, fileName, emptyValue);
bw.addMutation(m);
}
}