本文整理汇总了Java中org.apache.accumulo.core.client.Connector.tableOperations方法的典型用法代码示例。如果您正苦于以下问题:Java Connector.tableOperations方法的具体用法?Java Connector.tableOperations怎么用?Java Connector.tableOperations使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.accumulo.core.client.Connector
的用法示例。
在下文中一共展示了Connector.tableOperations方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: exists_ryaDetailsTable
import org.apache.accumulo.core.client.Connector; //导入方法依赖的package包/类
@Test
public void exists_ryaDetailsTable() throws AccumuloException, AccumuloSecurityException, RyaClientException, TableExistsException {
final Connector connector = getConnector();
final TableOperations tableOps = connector.tableOperations();
// Create the Rya instance's Rya details table.
final String instanceName = "test_instance_";
final String ryaDetailsTable = instanceName + AccumuloRyaInstanceDetailsRepository.INSTANCE_DETAILS_TABLE_NAME;
tableOps.create(ryaDetailsTable);
// Verify the command reports the instance exists.
final AccumuloConnectionDetails connectionDetails = new AccumuloConnectionDetails(
getUsername(),
getPassword().toCharArray(),
getInstanceName(),
getZookeepers());
final AccumuloInstanceExists instanceExists = new AccumuloInstanceExists(connectionDetails, getConnector());
assertTrue( instanceExists.exists(instanceName) );
}
示例2: exists_dataTables
import org.apache.accumulo.core.client.Connector; //导入方法依赖的package包/类
@Test
public void exists_dataTables() throws AccumuloException, AccumuloSecurityException, RyaClientException, TableExistsException {
final Connector connector = getConnector();
final TableOperations tableOps = connector.tableOperations();
// Create the Rya instance's Rya details table.
final String instanceName = "test_instance_";
final String spoTableName = instanceName + RdfCloudTripleStoreConstants.TBL_SPO_SUFFIX;
final String ospTableName = instanceName + RdfCloudTripleStoreConstants.TBL_OSP_SUFFIX;
final String poTableName = instanceName + RdfCloudTripleStoreConstants.TBL_PO_SUFFIX;
tableOps.create(spoTableName);
tableOps.create(ospTableName);
tableOps.create(poTableName);
// Verify the command reports the instance exists.
final AccumuloConnectionDetails connectionDetails = new AccumuloConnectionDetails(
getUsername(),
getPassword().toCharArray(),
getInstanceName(),
getZookeepers());
final AccumuloInstanceExists instanceExists = new AccumuloInstanceExists(connectionDetails, getConnector());
assertTrue( instanceExists.exists(instanceName) );
}
示例3: ProspectorService
import org.apache.accumulo.core.client.Connector; //导入方法依赖的package包/类
/**
* Constructs an instance of {@link ProspectorService}.
*
* @param connector - The Accumulo connector used to communicate with the table. (not null)
* @param tableName - The name of the Accumulo table that will be queried for Prospect results. (not null)
* @throws AccumuloException A problem occurred while creating the table.
* @throws AccumuloSecurityException A problem occurred while creating the table.
*/
public ProspectorService(Connector connector, String tableName) throws AccumuloException, AccumuloSecurityException {
this.connector = requireNonNull(connector);
this.tableName = requireNonNull(tableName);
this.plans = ProspectorUtils.planMap(manager.getPlans());
// Create the table if it doesn't already exist.
try {
final TableOperations tos = connector.tableOperations();
if(!tos.exists(tableName)) {
tos.create(tableName);
}
} catch(TableExistsException e) {
// Do nothing. Something else must have made it while we were.
}
}
示例4: createTable
import org.apache.accumulo.core.client.Connector; //导入方法依赖的package包/类
/**
* Creates a table and gives all users read-write access to that table.
*
* @param tableName
* name of the table.
*/
public static synchronized void createTable(String tableName) throws AccumuloException, AccumuloSecurityException, TableExistsException {
Connector connector = getConnector();
TableOperations tableOps = connector.tableOperations();
tableOps.create(tableName);
SecurityOperations secOps = connector.securityOperations();
for (User user : User.getUsers().values()) {
secOps.grantTablePermission(user.id, tableName, TablePermission.READ);
secOps.grantTablePermission(user.id, tableName, TablePermission.WRITE);
secOps.grantTablePermission(user.id, tableName, TablePermission.BULK_IMPORT);
}
}
示例5: getSplits
import org.apache.accumulo.core.client.Connector; //导入方法依赖的package包/类
@Override
public List<InputSplit> getSplits(JobContext jobContext) throws IOException {
//read the params from AccumuloInputFormat
Configuration conf = jobContext.getConfiguration();
Instance instance = MRUtils.AccumuloProps.getInstance(jobContext);
String user = MRUtils.AccumuloProps.getUsername(jobContext);
AuthenticationToken password = MRUtils.AccumuloProps.getPassword(jobContext);
String table = MRUtils.AccumuloProps.getTablename(jobContext);
ArgumentChecker.notNull(instance);
ArgumentChecker.notNull(table);
//find the files necessary
try {
Connector connector = instance.getConnector(user, password);
TableOperations tos = connector.tableOperations();
String tableId = tos.tableIdMap().get(table);
Scanner scanner = connector.createScanner("accumulo.metadata", Authorizations.EMPTY); //TODO: auths?
scanner.setRange(new Range(new Text(tableId + "\u0000"), new Text(tableId + "\uFFFD")));
scanner.fetchColumnFamily(new Text("file"));
List<String> files = new ArrayList<String>();
List<InputSplit> fileSplits = new ArrayList<InputSplit>();
for (Map.Entry<Key, Value> entry : scanner) {
String file = entry.getKey().getColumnQualifier().toString();
Path path = new Path(file);
FileSystem fs = path.getFileSystem(conf);
FileStatus fileStatus = fs.getFileStatus(path);
long len = fileStatus.getLen();
BlockLocation[] fileBlockLocations = fs.getFileBlockLocations(fileStatus, 0, len);
files.add(file);
fileSplits.add(new FileSplit(path, 0, len, fileBlockLocations[0].getHosts()));
}
System.out.println(files);
return fileSplits;
} catch (Exception e) {
throw new IOException(e);
}
}
示例6: importFilesToChildTable
import org.apache.accumulo.core.client.Connector; //导入方法依赖的package包/类
/**
* Imports the files that hold the table data into the child instance.
* @param childTableName the name of the child table to import.
* @throws Exception
*/
public void importFilesToChildTable(final String childTableName) throws Exception {
final Configuration childConfig = MergeToolMapper.getChildConfig(conf);
final AccumuloRdfConfiguration childAccumuloRdfConfiguration = new AccumuloRdfConfiguration(childConfig);
childAccumuloRdfConfiguration.setTablePrefix(childTablePrefix);
final Connector childConnector = AccumuloRyaUtils.setupConnector(childAccumuloRdfConfiguration);
final TableOperations childTableOperations = childConnector.tableOperations();
final Path localWorkDir = getPath(localCopyFileImportDir, childTableName);
final Path hdfsBaseWorkDir = getPath(baseImportDir, childTableName);
final FileSystem fs = FileSystem.get(conf);
if (fs.exists(hdfsBaseWorkDir)) {
fs.delete(hdfsBaseWorkDir, true);
}
log.info("Importing from the local directory: " + localWorkDir);
log.info("Importing to the HDFS directory: " + hdfsBaseWorkDir);
copyLocalToHdfs(localWorkDir, hdfsBaseWorkDir);
final Path files = getPath(hdfsBaseWorkDir.toString(), "files");
final Path failures = getPath(hdfsBaseWorkDir.toString(), "failures");
// With HDFS permissions on, we need to make sure the Accumulo user can read/move the files
final FsShell hdfs = new FsShell(conf);
if (!fs.isDirectory(hdfsBaseWorkDir)) {
throw new IllegalArgumentException("Configured working directory is not a valid directory" + hdfsBaseWorkDir.toString());
}
hdfs.run(new String[] {"-chmod", "777", hdfsBaseWorkDir.toString()});
if (fs.exists(failures)) {
fs.delete(failures, true);
}
fs.mkdirs(failures);
childTableOperations.importDirectory(childTableName, files.toString(), failures.toString(), false);
}
示例7: importChildFilesToTempParentTable
import org.apache.accumulo.core.client.Connector; //导入方法依赖的package包/类
/**
* Imports the child files that hold the table data into the parent instance as a temporary table.
* @param childTableName the name of the child table to import into a temporary parent table.
* @throws Exception
*/
public void importChildFilesToTempParentTable(final String childTableName) throws Exception {
// Create a temporary table in the parent instance to import the child files to. Then run the merge process on the parent table and temp child table.
final String tempChildTable = childTableName + TEMP_SUFFIX;
createTempTableIfNeeded(tempChildTable);
final AccumuloRdfConfiguration parentAccumuloRdfConfiguration = new AccumuloRdfConfiguration(conf);
parentAccumuloRdfConfiguration.setTablePrefix(childTablePrefix);
final Connector parentConnector = AccumuloRyaUtils.setupConnector(parentAccumuloRdfConfiguration);
final TableOperations parentTableOperations = parentConnector.tableOperations();
final Path localWorkDir = CopyTool.getPath(localMergeFileImportDir, childTableName);
final Path hdfsBaseWorkDir = CopyTool.getPath(baseImportDir, childTableName);
CopyTool.copyLocalToHdfs(localWorkDir, hdfsBaseWorkDir, conf);
final Path files = CopyTool.getPath(hdfsBaseWorkDir.toString(), "files");
final Path failures = CopyTool.getPath(hdfsBaseWorkDir.toString(), "failures");
final FileSystem fs = FileSystem.get(conf);
// With HDFS permissions on, we need to make sure the Accumulo user can read/move the files
fs.setPermission(hdfsBaseWorkDir, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));
if (fs.exists(failures)) {
fs.delete(failures, true);
}
fs.mkdirs(failures);
parentTableOperations.importDirectory(tempChildTable, files.toString(), failures.toString(), false);
AccumuloRyaUtils.printTablePretty(tempChildTable, conf);
}
示例8: deleteCoreRyaTables
import org.apache.accumulo.core.client.Connector; //导入方法依赖的package包/类
public static void deleteCoreRyaTables(final Connector accCon, final String prefix)
throws AccumuloException, AccumuloSecurityException,
TableNotFoundException {
final TableOperations ops = accCon.tableOperations();
if (ops.exists(prefix + "spo")) {
ops.delete(prefix + "spo");
}
if (ops.exists(prefix + "po")) {
ops.delete(prefix + "po");
}
if (ops.exists(prefix + "osp")) {
ops.delete(prefix + "osp");
}
}
示例9: deleteIndexTables
import org.apache.accumulo.core.client.Connector; //导入方法依赖的package包/类
public static void deleteIndexTables(final Connector accCon, final int tableNum,
final String prefix) throws AccumuloException, AccumuloSecurityException,
TableNotFoundException {
final TableOperations ops = accCon.tableOperations();
final String tablename = prefix + "INDEX_";
for (int i = 1; i < tableNum + 1; i++) {
if (ops.exists(tablename + i)) {
ops.delete(tablename + i);
}
}
}
示例10: run
import org.apache.accumulo.core.client.Connector; //导入方法依赖的package包/类
@Override
public int run(String[] args) throws Exception {
Job job = new Job(getConf(), "Ingest Wikipedia");
Configuration conf = job.getConfiguration();
conf.set("mapred.map.tasks.speculative.execution", "false");
String tablename = WikipediaConfiguration.getTableName(conf);
ClientConfiguration clientConfig = new ClientConfiguration();
clientConfig.setProperty(ClientProperty.INSTANCE_NAME, WikipediaConfiguration.getInstanceName(conf));
clientConfig.setProperty(ClientProperty.INSTANCE_ZK_HOST, WikipediaConfiguration.getZookeepers(conf));
String user = WikipediaConfiguration.getUser(conf);
byte[] password = WikipediaConfiguration.getPassword(conf);
Connector connector = WikipediaConfiguration.getConnector(conf);
TableOperations tops = connector.tableOperations();
createTables(tops, tablename, true);
configureJob(job);
List<Path> inputPaths = new ArrayList<Path>();
SortedSet<String> languages = new TreeSet<String>();
FileSystem fs = FileSystem.get(conf);
Path parent = new Path(conf.get("wikipedia.input"));
listFiles(parent, fs, inputPaths, languages);
System.out.println("Input files in " + parent + ":" + inputPaths.size());
Path[] inputPathsArray = new Path[inputPaths.size()];
inputPaths.toArray(inputPathsArray);
System.out.println("Languages:" + languages.size());
FileInputFormat.setInputPaths(job, inputPathsArray);
job.setMapperClass(WikipediaMapper.class);
job.setNumReduceTasks(0);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Mutation.class);
job.setOutputFormatClass(AccumuloOutputFormat.class);
AccumuloOutputFormat.setConnectorInfo(job, user, new PasswordToken(password));
AccumuloOutputFormat.setZooKeeperInstance(job, clientConfig);
return job.waitForCompletion(true) ? 0 : 1;
}
示例11: runIngestJob
import org.apache.accumulo.core.client.Connector; //导入方法依赖的package包/类
private int runIngestJob() throws Exception
{
Job ingestJob = new Job(getConf(), "Ingest Partitioned Wikipedia");
Configuration ingestConf = ingestJob.getConfiguration();
ingestConf.set("mapred.map.tasks.speculative.execution", "false");
configureIngestJob(ingestJob);
String tablename = WikipediaConfiguration.getTableName(ingestConf);
Connector connector = WikipediaConfiguration.getConnector(ingestConf);
TableOperations tops = connector.tableOperations();
createTables(tops, tablename);
ingestJob.setMapperClass(WikipediaPartitionedMapper.class);
ingestJob.setNumReduceTasks(0);
// setup input format
ingestJob.setInputFormatClass(SequenceFileInputFormat.class);
SequenceFileInputFormat.setInputPaths(ingestJob, WikipediaConfiguration.getPartitionedArticlesPath(ingestConf));
// TODO make split size configurable
SequenceFileInputFormat.setMinInputSplitSize(ingestJob, WikipediaConfiguration.getMinInputSplitSize(ingestConf));
// setup output format
ingestJob.setMapOutputKeyClass(Text.class);
ingestJob.setMapOutputValueClass(Mutation.class);
if(WikipediaConfiguration.bulkIngest(ingestConf))
{
ingestJob.setOutputFormatClass(SortingRFileOutputFormat.class);
SortingRFileOutputFormat.setMaxBufferSize(ingestConf, WikipediaConfiguration.bulkIngestBufferSize(ingestConf));
String bulkIngestDir = WikipediaConfiguration.bulkIngestDir(ingestConf);
if(bulkIngestDir == null)
{
log.error("Bulk ingest dir not set");
return 1;
}
SortingRFileOutputFormat.setPathName(ingestConf, WikipediaConfiguration.bulkIngestDir(ingestConf));
} else {
ingestJob.setOutputFormatClass(AccumuloOutputFormat.class);
ClientConfiguration clientConfig = new ClientConfiguration();
clientConfig.setProperty(ClientProperty.INSTANCE_NAME, WikipediaConfiguration.getInstanceName(ingestConf));
clientConfig.setProperty(ClientProperty.INSTANCE_ZK_HOST, WikipediaConfiguration.getZookeepers(ingestConf));
String user = WikipediaConfiguration.getUser(ingestConf);
byte[] password = WikipediaConfiguration.getPassword(ingestConf);
AccumuloOutputFormat.setConnectorInfo(ingestJob, user, new PasswordToken(password));
AccumuloOutputFormat.setZooKeeperInstance(ingestJob, clientConfig);
}
return ingestJob.waitForCompletion(true) ? 0 : 1;
}
示例12: getTableNames
import org.apache.accumulo.core.client.Connector; //导入方法依赖的package包/类
/**
* Get the the Accumulo table names that are used by an instance of Rya.
*
* @param ryaInstanceName - The name of the Rya instance. (not null)
* @param conn - A connector to the host Accumulo instance. (not null)
* @return The Accumulo table names that are used by the Rya instance.
* @throws NotInitializedException The instance's Rya Details have not been initialized.
* @throws RyaDetailsRepositoryException General problem with the Rya Details repository.
* @throws PCJStorageException General problem with the PCJ storage.
*/
public List<String> getTableNames(final String ryaInstanceName, final Connector conn) throws NotInitializedException, RyaDetailsRepositoryException, PCJStorageException {
// Build the list of tables that may be present within the Rya instance.
final List<String> tables = new ArrayList<>();
// Core Rya tables.
final TableLayoutStrategy coreTableNames = new TablePrefixLayoutStrategy(ryaInstanceName);
tables.add( coreTableNames.getSpo() );
tables.add( coreTableNames.getPo() );
tables.add( coreTableNames.getOsp() );
tables.add( coreTableNames.getEval() );
tables.add( coreTableNames.getNs() );
tables.add( coreTableNames.getProspects() );
tables.add( coreTableNames.getSelectivity() );
// Rya Details table.
tables.add( AccumuloRyaInstanceDetailsRepository.makeTableName(ryaInstanceName) );
// Secondary Indexer Tables.
final RyaDetailsRepository detailsRepo = new AccumuloRyaInstanceDetailsRepository(conn, ryaInstanceName);
final RyaDetails details = detailsRepo.getRyaInstanceDetails();
if(details.getEntityCentricIndexDetails().isEnabled()) {
tables.add( EntityCentricIndex.makeTableName(ryaInstanceName) );
}
if(details.getFreeTextIndexDetails().isEnabled()) {
tables.addAll( AccumuloFreeTextIndexer.makeTableNames(ryaInstanceName) );
}
if(details.getTemporalIndexDetails().isEnabled()) {
tables.add( AccumuloTemporalIndexer.makeTableName(ryaInstanceName) );
}
/**
* if(details.getGeoIndexDetails().isEnabled()) {
* tables.add( GeoMesaGeoIndexer.makeTableName(ryaInstanceName) );
* }
*/
if(details.getPCJIndexDetails().isEnabled()) {
try(final PrecomputedJoinStorage pcjStorage = new AccumuloPcjStorage(conn, ryaInstanceName)) {
final List<String> pcjIds = pcjStorage.listPcjs();
final PcjTableNameFactory tableNameFactory = new PcjTableNameFactory();
for(final String pcjId : pcjIds) {
tables.add( tableNameFactory.makeTableName(ryaInstanceName, pcjId) );
}
}
}
// Verify they actually exist. If any don't, remove them from the list.
final TableOperations tableOps = conn.tableOperations();
final Iterator<String> tablesIt = tables.iterator();
while(tablesIt.hasNext()) {
final String table = tablesIt.next();
if(!tableOps.exists(table)) {
tablesIt.remove();
}
}
return tables;
}
示例13: deleteTable
import org.apache.accumulo.core.client.Connector; //导入方法依赖的package包/类
/**
* Delete the indicated table.
*
* @param tableName
* Name of the table to delete.
*/
public static synchronized void deleteTable(String tableName) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
Connector connector = getConnector();
TableOperations tableOps = connector.tableOperations();
tableOps.delete(tableName);
}
示例14: clearTable
import org.apache.accumulo.core.client.Connector; //导入方法依赖的package包/类
/**
* Clear the indicated table.
*
* @param tableName
* Name of the table to clear.
*/
public static synchronized void clearTable(String tableName) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
Connector connector = getConnector();
TableOperations tableOps = connector.tableOperations();
tableOps.deleteRows(tableName, null, null);
}