本文整理汇总了Java中org.apache.accumulo.minicluster.MiniAccumuloCluster.getConnector方法的典型用法代码示例。如果您正苦于以下问题:Java MiniAccumuloCluster.getConnector方法的具体用法?Java MiniAccumuloCluster.getConnector怎么用?Java MiniAccumuloCluster.getConnector使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.accumulo.minicluster.MiniAccumuloCluster
的用法示例。
在下文中一共展示了MiniAccumuloCluster.getConnector方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testRFileReaderRDDCanBeCreatedAndIsNonEmpty
import org.apache.accumulo.minicluster.MiniAccumuloCluster; //导入方法依赖的package包/类
@Test
public void testRFileReaderRDDCanBeCreatedAndIsNonEmpty() throws IOException,
InterruptedException, AccumuloSecurityException, AccumuloException, TableNotFoundException,
TableExistsException, StoreException {
// Given
final String table = "table1";
final MiniAccumuloCluster cluster = MiniAccumuloClusterProvider.getMiniAccumuloCluster();
// Add some data
final Connector connector = cluster.getConnector(MiniAccumuloClusterProvider.USER,
MiniAccumuloClusterProvider.PASSWORD);
connector.tableOperations().create(table);
final BatchWriter bw = connector.createBatchWriter(table, new BatchWriterConfig());
final Mutation m1 = new Mutation("row");
m1.put("CF", "CQ", "value");
bw.addMutation(m1);
final Mutation m2 = new Mutation("row2");
m2.put("CF", "CQ", "not");
bw.addMutation(m2);
bw.close();
// Compact to ensure an RFile is created, sleep to give it a little time to do it
connector.tableOperations().compact(table, new CompactionConfig());
Thread.sleep(1000L);
// When
final SparkSession sparkSession = SparkSessionProvider.getSparkSession();
final Configuration conf = new Configuration();
InputConfigurator.fetchColumns(AccumuloInputFormat.class, conf,
Sets.newHashSet(new Pair<>(new Text("CF"), new Text("CQ"))));
final RFileReaderRDD rdd = new RFileReaderRDD(sparkSession.sparkContext(),
cluster.getInstanceName(), cluster.getZooKeepers(), MiniAccumuloClusterProvider.USER,
MiniAccumuloClusterProvider.PASSWORD, table, new HashSet<>(),
serialiseConfiguration(conf));
final long count = rdd.count();
// Then
assertEquals(2L, count);
}
示例2: setup
import org.apache.accumulo.minicluster.MiniAccumuloCluster; //导入方法依赖的package包/类
@BeforeClass
public static void setup() throws Exception {
// Squash loud logs.
Logger.getLogger(ClientCnxn.class).setLevel(Level.ERROR);
// Setup the Mini Accumulo Cluster.
final File miniDataDir = com.google.common.io.Files.createTempDir();
final MiniAccumuloConfig cfg = new MiniAccumuloConfig( miniDataDir, ACCUMULO_PASSWORD);
cluster = new MiniAccumuloCluster(cfg);
cluster.start();
// Create a Rya Client connected to the Mini Accumulo Cluster.
final AccumuloConnectionDetails connDetails = new AccumuloConnectionDetails(
ACCUMULO_USER,
ACCUMULO_PASSWORD.toCharArray(),
cluster.getInstanceName(),
cluster.getZooKeepers());
final Connector connector = cluster.getConnector(ACCUMULO_USER, ACCUMULO_PASSWORD);
final RyaClient ryaClient = AccumuloRyaClientFactory.build(connDetails, connector);
// Install an instance of Rya on the mini cluster.
installRya(ryaClient);
// Get a Sail object that is backed by the Rya store that is on the mini cluster.
final AccumuloRdfConfiguration ryaConf = new AccumuloRdfConfiguration();
ryaConf.setTablePrefix(RYA_INSTANCE_NAME);
ryaConf.set(ConfigUtils.CLOUDBASE_USER, ACCUMULO_USER);
ryaConf.set(ConfigUtils.CLOUDBASE_PASSWORD, ACCUMULO_PASSWORD);
ryaConf.set(ConfigUtils.CLOUDBASE_ZOOKEEPERS, cluster.getZooKeepers());
ryaConf.set(ConfigUtils.CLOUDBASE_INSTANCE, cluster.getInstanceName());
ryaConf.set(ConfigUtils.USE_PCJ, "true");
ryaConf.set(ConfigUtils.PCJ_STORAGE_TYPE, PrecomputedJoinStorageType.ACCUMULO.toString());
ryaConf.set(ConfigUtils.PCJ_UPDATER_TYPE, PrecomputedJoinUpdaterType.NO_UPDATE.toString());
sail = RyaSailFactory.getInstance( ryaConf );
// Load some data into the cluster that will match the query we're testing against.
loadTestStatements();
// Add a PCJ to the application that summarizes the query.
createTestPCJ(ryaClient);
}
示例3: testRFileReaderRDDAppliesIteratorCorrectly
import org.apache.accumulo.minicluster.MiniAccumuloCluster; //导入方法依赖的package包/类
@Test
public void testRFileReaderRDDAppliesIteratorCorrectly() throws IOException,
InterruptedException, AccumuloSecurityException, AccumuloException, TableNotFoundException,
TableExistsException, StoreException {
// Given
final String table = "table2";
final MiniAccumuloCluster cluster = MiniAccumuloClusterProvider.getMiniAccumuloCluster();
// Add some data
final Connector connector = cluster.getConnector(MiniAccumuloClusterProvider.USER,
MiniAccumuloClusterProvider.PASSWORD);
connector.tableOperations().create(table);
final BatchWriter bw = connector.createBatchWriter(table, new BatchWriterConfig());
final Mutation m1 = new Mutation("row");
m1.put("CF", "CQ", "value");
bw.addMutation(m1);
final Mutation m2 = new Mutation("row2");
m2.put("CF", "CQ", "not");
bw.addMutation(m2);
bw.close();
// Compact to ensure an RFile is created, sleep to give it a little time to do it
connector.tableOperations().compact(table, new CompactionConfig());
Thread.sleep(1000L);
// Create an iterator and an option to grep for "val"
final Map<String, String> options = new HashMap<>();
options.put("term", "val");
final Configuration conf = new Configuration();
final Job job = Job.getInstance(conf);
AccumuloInputFormat.addIterator(job, new IteratorSetting(2, "NAME", GrepIterator.class.getName(), options));
InputConfigurator.fetchColumns(AccumuloInputFormat.class, job.getConfiguration(),
Sets.newHashSet(new Pair<>(new Text("CF"), new Text("CQ"))));
// When
final SparkSession sparkSession = SparkSessionProvider.getSparkSession();
final RFileReaderRDD rdd = new RFileReaderRDD(sparkSession.sparkContext(),
cluster.getInstanceName(), cluster.getZooKeepers(), MiniAccumuloClusterProvider.USER,
MiniAccumuloClusterProvider.PASSWORD, table, new HashSet<>(),
serialiseConfiguration(job.getConfiguration()));
final long count = rdd.count();
// Then
assertEquals(1L, count);
}
示例4: start
import org.apache.accumulo.minicluster.MiniAccumuloCluster; //导入方法依赖的package包/类
public void start() throws Exception {
log.info("Starting WebIndex development server...");
log.info("Starting MiniAccumuloCluster at {}", baseDir);
MiniAccumuloConfig cfg = new MiniAccumuloConfig(baseDir.toFile(), "secret");
cluster = new MiniAccumuloCluster(cfg);
cluster.start();
FluoConfiguration config = new FluoConfiguration();
AccumuloExportITBase.configureFromMAC(config, cluster);
config.setApplicationName("webindex");
config.setAccumuloTable("webindex");
configureMetrics(config);
String exportTable = "webindex_search";
log.info("Initializing Accumulo & Fluo");
IndexEnv env = new IndexEnv(config, exportTable, "/tmp", TEST_SPLITS, TEST_SPLITS);
env.initAccumuloIndexTable();
env.configureApplication(config, config);
FluoFactory.newAdmin(config).initialize(
new FluoAdmin.InitializationOptions().setClearTable(true).setClearZookeeper(true));
env.setFluoTableSplits();
log.info("Starting web server");
client = new IndexClient(exportTable, cluster.getConnector("root", "secret"));
webServer.start(client, webPort, templatePath);
log.info("Loading data from {}", dataPath);
Gson gson = new Gson();
miniFluo = FluoFactory.newMiniFluo(config);
running.set(true);
try (FluoClient client =
FluoFactory.newClient(configureMetrics(miniFluo.getClientConfiguration()))) {
try (LoaderExecutor le = client.newLoaderExecutor()) {
Files
.lines(dataPath)
.map(json -> Page.fromJson(gson, json))
.forEach(
page -> {
log.debug("Loading page {} with {} links", page.getUrl(), page.getOutboundLinks()
.size());
le.execute(PageLoader.updatePage(page));
});
}
log.info("Finished loading data. Waiting for observers to finish...");
miniFluo.waitForObservers();
log.info("Observers finished");
}
}