本文整理汇总了Java中org.apache.accumulo.core.conf.AccumuloConfiguration类的典型用法代码示例。如果您正苦于以下问题:Java AccumuloConfiguration类的具体用法?Java AccumuloConfiguration怎么用?Java AccumuloConfiguration使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
AccumuloConfiguration类属于org.apache.accumulo.core.conf包,在下文中一共展示了AccumuloConfiguration类的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: writeFile
import org.apache.accumulo.core.conf.AccumuloConfiguration; //导入依赖的package包/类
private void writeFile(final KeyPackage keyPackage, final Schema schema, final String file)
throws IllegalArgumentException, IOException {
final Configuration conf = new Configuration();
final CachableBlockFile.Writer blockFileWriter = new CachableBlockFile.Writer(
FileSystem.get(conf),
new Path(file),
Compression.COMPRESSION_NONE,
null,
conf,
AccumuloConfiguration.getDefaultConfiguration());
final AccumuloElementConverter converter;
switch (keyPackage) {
case BYTE_ENTITY:
converter = new ByteEntityAccumuloElementConverter(schema);
break;
case CLASSIC:
converter = new ClassicAccumuloElementConverter(schema);
break;
default:
throw new IllegalArgumentException("Unknown keypackage");
}
final Entity entity = (Entity) getElementsForIngestAggregationChecking().get(0);
final Key key = converter.getKeyFromEntity((Entity) getElementsForIngestAggregationChecking().get(0));
final Value value = converter.getValueFromProperties(entity.getGroup(), entity.getProperties());
final RFile.Writer writer = new RFile.Writer(blockFileWriter, 1000);
writer.startDefaultLocalityGroup();
writer.append(key, value);
writer.close();
}
示例2: write
import org.apache.accumulo.core.conf.AccumuloConfiguration; //导入依赖的package包/类
@Override
public void write(MrsPyramidMetadata metadata) throws IOException
{
// write the metadata object to hdfs
// Properties mrgeoAccProps = AccumuloConnector.getAccumuloProperties();
// ColumnVisibility cv;
// if(mrgeoAccProps.getProperty(MrGeoAccumuloConstants.MRGEO_ACC_KEY_VIZ) == null){
// cv = new ColumnVisibility();
// } else {
// cv = new ColumnVisibility(mrgeoAccProps.getProperty(MrGeoAccumuloConstants.MRGEO_ACC_KEY_VIZ));
// }
Path path = new Path(workDir, "meta.rf");
FileSystem fs = HadoopFileUtils.getFileSystem(path);
if (fs.exists(path))
{
fs.delete(path, false);
}
log.debug("Saving metadata to " + path.toString());
ByteArrayOutputStream baos = new ByteArrayOutputStream();
String metadataStr = null;
metadata.save(baos);
metadataStr = baos.toString();
baos.close();
FileSKVWriter metaWrite = FileOperations.getInstance()
.openWriter(path.toString(), fs, fs.getConf(), AccumuloConfiguration.getDefaultConfiguration());
metaWrite.startDefaultLocalityGroup();
Key metKey = new Key(MrGeoAccumuloConstants.MRGEO_ACC_METADATA,
MrGeoAccumuloConstants.MRGEO_ACC_METADATA,
MrGeoAccumuloConstants.MRGEO_ACC_CQALL);
Value metValue = new Value(metadataStr.getBytes());
metaWrite.append(metKey, metValue);
metaWrite.close();
}
示例3: DefaultIteratorEnvironment
import org.apache.accumulo.core.conf.AccumuloConfiguration; //导入依赖的package包/类
public DefaultIteratorEnvironment(AccumuloConfiguration conf) {
this.conf = conf;
}
示例4: getConfig
import org.apache.accumulo.core.conf.AccumuloConfiguration; //导入依赖的package包/类
@Override
public AccumuloConfiguration getConfig() {
return conf;
}
示例5: DefaultIteratorEnvironment
import org.apache.accumulo.core.conf.AccumuloConfiguration; //导入依赖的package包/类
public DefaultIteratorEnvironment() {
this.conf = AccumuloConfiguration.getDefaultConfiguration();
}
示例6: getConfig
import org.apache.accumulo.core.conf.AccumuloConfiguration; //导入依赖的package包/类
@Override
public AccumuloConfiguration getConfig() {
return conf;
}
示例7: init
import org.apache.accumulo.core.conf.AccumuloConfiguration; //导入依赖的package包/类
private void init() throws IOException {
final AccumuloTablet accumuloTablet = (AccumuloTablet) partition;
LOGGER.info("Initialising RFileReaderIterator for files {}", StringUtils.join(accumuloTablet.getFiles(), ','));
final AccumuloConfiguration accumuloConfiguration = SiteConfiguration.getInstance(DefaultConfiguration.getInstance());
// Required column families according to the configuration
final Set<ByteSequence> requiredColumnFamilies = InputConfigurator
.getFetchedColumns(AccumuloInputFormat.class, configuration)
.stream()
.map(Pair::getFirst)
.map(c -> new ArrayByteSequence(c.toString()))
.collect(Collectors.toSet());
LOGGER.info("RFileReaderIterator will read column families of {}", StringUtils.join(requiredColumnFamilies, ','));
// Column families
final List<SortedKeyValueIterator<Key, Value>> iterators = new ArrayList<>();
for (final String filename : accumuloTablet.getFiles()) {
final Path path = new Path(filename);
final FileSystem fs = path.getFileSystem(configuration);
final RFile.Reader rFileReader = new RFile.Reader(
new CachableBlockFile.Reader(fs, path, configuration, null, null, accumuloConfiguration));
iterators.add(rFileReader);
}
mergedIterator = new MultiIterator(iterators, true);
// Apply visibility filtering iterator
if (null != auths) {
final Authorizations authorizations = new Authorizations(auths.toArray(new String[auths.size()]));
final VisibilityFilter visibilityFilter = new VisibilityFilter(mergedIterator, authorizations, new byte[]{});
final IteratorSetting visibilityIteratorSetting = new IteratorSetting(1, "auth", VisibilityFilter.class);
visibilityFilter.init(mergedIterator, visibilityIteratorSetting.getOptions(), null);
iteratorAfterIterators = visibilityFilter;
LOGGER.info("Set authorizations to {}", authorizations);
} else {
iteratorAfterIterators = mergedIterator;
}
// Apply iterator stack
final List<IteratorSetting> iteratorSettings = getIteratorSettings();
iteratorSettings.sort(Comparator.comparingInt(IteratorSetting::getPriority));
for (final IteratorSetting is : iteratorSettings) {
iteratorAfterIterators = applyIterator(iteratorAfterIterators, is);
}
taskContext.addTaskCompletionListener(context -> close());
final Range range = new Range(accumuloTablet.getStartRow(), true, accumuloTablet.getEndRow(), false);
iteratorAfterIterators.seek(range, requiredColumnFamilies, true);
LOGGER.info("Initialised iterator");
}
示例8: applyIterator
import org.apache.accumulo.core.conf.AccumuloConfiguration; //导入依赖的package包/类
private SortedKeyValueIterator<Key, Value> applyIterator(final SortedKeyValueIterator<Key, Value> source,
final IteratorSetting is) {
try {
SortedKeyValueIterator<Key, Value> result = Class.forName(is.getIteratorClass())
.asSubclass(SortedKeyValueIterator.class).newInstance();
result.init(source, is.getOptions(), new IteratorEnvironment() {
@Override
public SortedKeyValueIterator<Key, Value> reserveMapFileReader(final String mapFileName) throws IOException {
return null;
}
@Override
public AccumuloConfiguration getConfig() {
return null;
}
@Override
public IteratorUtil.IteratorScope getIteratorScope() {
return IteratorUtil.IteratorScope.majc;
}
@Override
public boolean isFullMajorCompaction() {
return false;
}
@Override
public void registerSideChannel(final SortedKeyValueIterator<Key, Value> iter) {
}
@Override
public Authorizations getAuthorizations() {
return null;
}
@Override
public IteratorEnvironment cloneWithSamplingEnabled() {
return null;
}
@Override
public boolean isSamplingEnabled() {
return false;
}
@Override
public SamplerConfiguration getSamplerConfiguration() {
return null;
}
});
return result;
} catch (final IOException | InstantiationException | IllegalAccessException | ClassNotFoundException e) {
throw new RuntimeException("Exception creating iterator of class " + is.getIteratorClass());
}
}
示例9: getAccumuloConfiguration
import org.apache.accumulo.core.conf.AccumuloConfiguration; //导入依赖的package包/类
public AccumuloConfiguration getAccumuloConfiguration() throws Exception {
return config.getAccumuloConfig();
}
示例10: getAccumuloConfig
import org.apache.accumulo.core.conf.AccumuloConfiguration; //导入依赖的package包/类
public synchronized AccumuloConfiguration getAccumuloConfig() throws AccumuloException, AccumuloSecurityException {
if (null == ac) {
ac = new ConfigurationCopy(getConnector().instanceOperations().getSystemConfiguration());
}
return ac;
}
示例11: getConfiguration
import org.apache.accumulo.core.conf.AccumuloConfiguration; //导入依赖的package包/类
@Deprecated
public AccumuloConfiguration getConfiguration() {
throw ExceptionFactory.unsupported();
}
示例12: setConfiguration
import org.apache.accumulo.core.conf.AccumuloConfiguration; //导入依赖的package包/类
@Deprecated
public void setConfiguration(AccumuloConfiguration conf) {
throw ExceptionFactory.unsupported();
}
示例13: getPartitions
import org.apache.accumulo.core.conf.AccumuloConfiguration; //导入依赖的package包/类
@Override
public List<PartitionQuery<K,T>> getPartitions(Query<K,T> query) throws IOException {
try {
TabletLocator tl;
if (conn instanceof MockConnector)
tl = new MockTabletLocator();
else
tl = TabletLocator.getLocator(new ClientContext(conn.getInstance(), credentials, AccumuloConfiguration.getTableConfiguration(conn, Tables.getTableId(conn.getInstance(), mapping.tableName))), new Text(Tables.getTableId(conn.getInstance(), mapping.tableName)));
Map<String,Map<KeyExtent,List<Range>>> binnedRanges = new HashMap<>();
tl.invalidateCache();
while (tl.binRanges(new ClientContext(conn.getInstance(), credentials, AccumuloConfiguration.getTableConfiguration(conn, Tables.getTableId(conn.getInstance(), mapping.tableName))), Collections.singletonList(createRange(query)), binnedRanges).size() > 0) {
// TODO log?
if (!Tables.exists(conn.getInstance(), Tables.getTableId(conn.getInstance(), mapping.tableName)))
throw new TableDeletedException(Tables.getTableId(conn.getInstance(), mapping.tableName));
else if (Tables.getTableState(conn.getInstance(), Tables.getTableId(conn.getInstance(), mapping.tableName)) == TableState.OFFLINE)
throw new TableOfflineException(conn.getInstance(), Tables.getTableId(conn.getInstance(), mapping.tableName));
UtilWaitThread.sleep(100);
tl.invalidateCache();
}
List<PartitionQuery<K,T>> ret = new ArrayList<>();
Text startRow = null;
Text endRow = null;
if (query.getStartKey() != null)
startRow = new Text(toBytes(query.getStartKey()));
if (query.getEndKey() != null)
endRow = new Text(toBytes(query.getEndKey()));
//hadoop expects hostnames, accumulo keeps track of IPs... so need to convert
HashMap<String,String> hostNameCache = new HashMap<>();
for (Entry<String,Map<KeyExtent,List<Range>>> entry : binnedRanges.entrySet()) {
String ip = entry.getKey().split(":", 2)[0];
String location = hostNameCache.get(ip);
if (location == null) {
InetAddress inetAddress = InetAddress.getByName(ip);
location = inetAddress.getHostName();
hostNameCache.put(ip, location);
}
Map<KeyExtent,List<Range>> tablets = entry.getValue();
for (KeyExtent ke : tablets.keySet()) {
K startKey = null;
if (startRow == null || !ke.contains(startRow)) {
if (ke.getPrevEndRow() != null) {
startKey = followingKey(encoder, getKeyClass(), getBytes(ke.getPrevEndRow()));
}
} else {
startKey = fromBytes(getKeyClass(), getBytes(startRow));
}
K endKey = null;
if (endRow == null || !ke.contains(endRow)) {
if (ke.getEndRow() != null)
endKey = lastPossibleKey(encoder, getKeyClass(), getBytes(ke.getEndRow()));
} else {
endKey = fromBytes(getKeyClass(), getBytes(endRow));
}
PartitionQueryImpl<K, T> pqi = new PartitionQueryImpl<>(query, startKey, endKey, location);
pqi.setConf(getConf());
ret.add(pqi);
}
}
return ret;
} catch (TableNotFoundException | AccumuloException | AccumuloSecurityException e) {
throw new IOException(e);
}
}