本文整理汇总了Java中org.apache.hadoop.hbase.io.hfile.Compression.Algorithm类的典型用法代码示例。如果您正苦于以下问题:Java Algorithm类的具体用法?Java Algorithm怎么用?Java Algorithm使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
Algorithm类属于org.apache.hadoop.hbase.io.hfile.Compression包,在下文中一共展示了Algorithm类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: HFileSortedOplogWriter
import org.apache.hadoop.hbase.io.hfile.Compression.Algorithm; //导入依赖的package包/类
public HFileSortedOplogWriter(int keys) throws IOException {
try {
int hfileBlockSize = Integer.getInteger(
HoplogConfig.HFILE_BLOCK_SIZE_CONF, (1 << 16));
Algorithm compress = Algorithm.valueOf(System.getProperty(HoplogConfig.COMPRESSION,
HoplogConfig.COMPRESSION_DEFAULT));
// ByteComparator bc = new ByteComparator();
writer = HFile.getWriterFactory(conf, cacheConf)
.withPath(fsProvider.getFS(), path)
.withBlockSize(hfileBlockSize)
// .withComparator(bc)
.withCompression(compress)
.create();
bfw = BloomFilterFactory.createGeneralBloomAtWrite(conf, cacheConf, BloomType.ROW, keys,
writer);
logger.fine("Created hoplog writer with compression " + compress);
} catch (IOException e) {
logger.fine("IO Error while creating writer");
throw e;
}
}
示例2: FSReaderV2
import org.apache.hadoop.hbase.io.hfile.Compression.Algorithm; //导入依赖的package包/类
public FSReaderV2(FSDataInputStream istream, FSDataInputStream istreamNoFsChecksum,
Algorithm compressAlgo, long fileSize, int minorVersion, HFileSystem hfs, Path path)
throws IOException {
super(istream, istreamNoFsChecksum, compressAlgo, fileSize, minorVersion, hfs, path);
if (hfs != null) {
// Check the configuration to determine whether hbase-level
// checksum verification is needed or not.
useHBaseChecksum = hfs.useHBaseChecksum();
} else {
// The configuration does not specify anything about hbase checksum
// validations. Set it to true here assuming that we will verify
// hbase checksums for all reads. For older files that do not have
// stored checksums, this flag will be reset later.
useHBaseChecksum = true;
}
// for older versions, hbase did not store checksums.
if (getMinorVersion() < MINOR_VERSION_WITH_CHECKSUM) {
useHBaseChecksum = false;
}
this.useHBaseChecksumConfigured = useHBaseChecksum;
}
示例3: generateColumnDescriptors
import org.apache.hadoop.hbase.io.hfile.Compression.Algorithm; //导入依赖的package包/类
/**
* Create a set of column descriptors with the combination of compression,
* encoding, bloom codecs available.
* @param prefix family names prefix
* @return the list of column descriptors
*/
public static List<HColumnDescriptor> generateColumnDescriptors(final String prefix) {
List<HColumnDescriptor> htds = new ArrayList<HColumnDescriptor>();
long familyId = 0;
for (Compression.Algorithm compressionType: getSupportedCompressionAlgorithms()) {
for (DataBlockEncoding encodingType: DataBlockEncoding.values()) {
for (StoreFile.BloomType bloomType: StoreFile.BloomType.values()) {
String name = String.format("%[email protected]#&-%[email protected]#", prefix, familyId);
HColumnDescriptor htd = new HColumnDescriptor(name);
htd.setCompressionType(compressionType);
htd.setDataBlockEncoding(encodingType);
htd.setBloomFilterType(bloomType);
htds.add(htd);
familyId++;
}
}
}
return htds;
}
示例4: testCreateFamilyCompressionMap
import org.apache.hadoop.hbase.io.hfile.Compression.Algorithm; //导入依赖的package包/类
/**
* Test for
* {@link HFileOutputFormat#createFamilyCompressionMap(Configuration)}. Tests
* that the compression map is correctly deserialized from configuration
*
* @throws IOException
*/
@Test
public void testCreateFamilyCompressionMap() throws IOException {
for (int numCfs = 0; numCfs <= 3; numCfs++) {
Configuration conf = new Configuration(this.util.getConfiguration());
Map<String, Compression.Algorithm> familyToCompression = getMockColumnFamilies(numCfs);
HTable table = Mockito.mock(HTable.class);
setupMockColumnFamilies(table, familyToCompression);
HFileOutputFormat.configureCompression(table, conf);
// read back family specific compression setting from the configuration
Map<byte[], String> retrievedFamilyToCompressionMap = HFileOutputFormat.createFamilyCompressionMap(conf);
// test that we have a value for all column families that matches with the
// used mock values
for (Entry<String, Algorithm> entry : familyToCompression.entrySet()) {
assertEquals("Compression configuration incorrect for column family:" + entry.getKey(), entry.getValue()
.getName(), retrievedFamilyToCompressionMap.get(entry.getKey().getBytes()));
}
}
}
示例5: getMockColumnFamilies
import org.apache.hadoop.hbase.io.hfile.Compression.Algorithm; //导入依赖的package包/类
/**
* @return a map from column family names to compression algorithms for
* testing column family compression. Column family names have special characters
*/
private Map<String, Compression.Algorithm> getMockColumnFamilies(int numCfs) {
Map<String, Compression.Algorithm> familyToCompression = new HashMap<String, Compression.Algorithm>();
// use column family names having special characters
if (numCfs-- > 0) {
familyToCompression.put("[email protected]#[email protected]#&", Compression.Algorithm.LZO);
}
if (numCfs-- > 0) {
familyToCompression.put("Family2=asdads&!AASD", Compression.Algorithm.SNAPPY);
}
if (numCfs-- > 0) {
familyToCompression.put("Family2=asdads&!AASD", Compression.Algorithm.GZ);
}
if (numCfs-- > 0) {
familyToCompression.put("Family3", Compression.Algorithm.NONE);
}
return familyToCompression;
}
示例6: FSReaderV2
import org.apache.hadoop.hbase.io.hfile.Compression.Algorithm; //导入依赖的package包/类
public FSReaderV2(FSDataInputStream istream,
FSDataInputStream istreamNoFsChecksum, Algorithm compressAlgo,
long fileSize, int minorVersion, HFileSystem hfs, Path path)
throws IOException {
super(istream, istreamNoFsChecksum, compressAlgo, fileSize,
minorVersion, hfs, path);
if (hfs != null) {
// Check the configuration to determine whether hbase-level
// checksum verification is needed or not.
useHBaseChecksum = hfs.useHBaseChecksum();
} else {
// The configuration does not specify anything about hbase checksum
// validations. Set it to true here assuming that we will verify
// hbase checksums for all reads. For older files that do not have
// stored checksums, this flag will be reset later.
useHBaseChecksum = true;
}
// for older versions, hbase did not store checksums.
if (getMinorVersion() < MINOR_VERSION_WITH_CHECKSUM) {
useHBaseChecksum = false;
}
this.useHBaseChecksumConfigured = useHBaseChecksum;
}
示例7: createTable
import org.apache.hadoop.hbase.io.hfile.Compression.Algorithm; //导入依赖的package包/类
/**
* Attempts to create the table used by this tool with the fixed configuration details
*
* @param admin The configured administration used to perform this operation
*/
private void createTable(final HBaseAdmin admin) {
final String tableName = appConfig.getToolTable();
try {
if( !admin.tableExists(tableName) ) {
HTableDescriptor tableDesc = new HTableDescriptor(tableName.getBytes(Charsets.UTF_8));
HColumnDescriptor colDesc = new HColumnDescriptor(ConfigConstants.COLUMN_FAMILY);
colDesc.setBlockCacheEnabled(true).setBlocksize(65536)
.setBloomFilterType(BloomType.ROW)
.setCompressionType(Algorithm.SNAPPY)
.setDataBlockEncoding(DataBlockEncoding.PREFIX)
.setMaxVersions(1);
tableDesc.addFamily(colDesc);
admin.createTable(tableDesc);
log.info("Created table: " + tableName);
} else {
log.debug("Table already exists, creation skipped");
}
} catch (IOException e) {
log.error("Error occurred during table creation", e);
}
}
示例8: createWriter
import org.apache.hadoop.hbase.io.hfile.Compression.Algorithm; //导入依赖的package包/类
@Override
public Writer createWriter(FileSystem fs, Path path,
FSDataOutputStream ostream, int blockSize,
Algorithm compressAlgo, HFileDataBlockEncoder dataBlockEncoder,
KeyComparator comparator, final ChecksumType checksumType,
final int bytesPerChecksum, boolean includeMVCCReadpoint) throws IOException {
// version 1 does not implement checksums
return new HFileWriterV1(conf, cacheConf, fs, path, ostream, blockSize,
compressAlgo, dataBlockEncoder, comparator);
}
示例9: HFileWriterV1
import org.apache.hadoop.hbase.io.hfile.Compression.Algorithm; //导入依赖的package包/类
/** Constructor that takes a path, creates and closes the output stream. */
public HFileWriterV1(Configuration conf, CacheConfig cacheConf,
FileSystem fs, Path path, FSDataOutputStream ostream,
int blockSize, Compression.Algorithm compress,
HFileDataBlockEncoder blockEncoder,
final KeyComparator comparator) throws IOException {
super(cacheConf, ostream == null ? createOutputStream(conf, fs, path) : ostream, path,
blockSize, compress, blockEncoder, comparator);
SchemaMetrics.configureGlobally(conf);
}
示例10: Writer
import org.apache.hadoop.hbase.io.hfile.Compression.Algorithm; //导入依赖的package包/类
/**
* @param compressionAlgorithm compression algorithm to use
* @param dataBlockEncoderAlgo data block encoding algorithm to use
* @param checksumType type of checksum
* @param bytesPerChecksum bytes per checksum
*/
public Writer(Compression.Algorithm compressionAlgorithm,
HFileDataBlockEncoder dataBlockEncoder, boolean includesMemstoreTS, int minorVersion,
ChecksumType checksumType, int bytesPerChecksum) {
this.minorVersion = minorVersion;
compressAlgo = compressionAlgorithm == null ? NONE : compressionAlgorithm;
this.dataBlockEncoder =
dataBlockEncoder != null ? dataBlockEncoder : NoOpDataBlockEncoder.INSTANCE;
baosInMemory = new ByteArrayOutputStream();
if (compressAlgo != NONE) {
compressor = compressionAlgorithm.getCompressor();
compressedByteStream = new ByteArrayOutputStream();
try {
compressionStream =
compressionAlgorithm.createPlainCompressionStream(compressedByteStream, compressor);
} catch (IOException e) {
throw new RuntimeException("Could not create compression stream " + "for algorithm "
+ compressionAlgorithm, e);
}
}
if (minorVersion > MINOR_VERSION_NO_CHECKSUM && bytesPerChecksum < HEADER_SIZE_WITH_CHECKSUMS) {
throw new RuntimeException("Unsupported value of bytesPerChecksum. " + " Minimum is "
+ HEADER_SIZE_WITH_CHECKSUMS + " but the configured value is " + bytesPerChecksum);
}
prevOffsetByType = new long[BlockType.values().length];
for (int i = 0; i < prevOffsetByType.length; ++i)
prevOffsetByType[i] = -1;
this.includesMemstoreTS = includesMemstoreTS;
this.checksumType = checksumType;
this.bytesPerChecksum = bytesPerChecksum;
}
示例11: AbstractFSReader
import org.apache.hadoop.hbase.io.hfile.Compression.Algorithm; //导入依赖的package包/类
public AbstractFSReader(FSDataInputStream istream, FSDataInputStream istreamNoFsChecksum,
Algorithm compressAlgo, long fileSize, int minorVersion, HFileSystem hfs, Path path)
throws IOException {
this.istream = istream;
this.compressAlgo = compressAlgo;
this.fileSize = fileSize;
this.minorVersion = minorVersion;
this.hfs = hfs;
this.path = path;
this.hdrSize = headerSize(minorVersion);
this.istreamNoFsChecksum = istreamNoFsChecksum;
}
示例12: bloomAndCompressionCombinations
import org.apache.hadoop.hbase.io.hfile.Compression.Algorithm; //导入依赖的package包/类
/**
* Create all combinations of Bloom filters and compression algorithms for
* testing.
*/
private static List<Object[]> bloomAndCompressionCombinations() {
List<Object[]> configurations = new ArrayList<Object[]>();
for (Compression.Algorithm comprAlgo :
HBaseTestingUtility.COMPRESSION_ALGORITHMS) {
for (StoreFile.BloomType bloomType : StoreFile.BloomType.values()) {
configurations.add(new Object[] { comprAlgo, bloomType });
}
}
return Collections.unmodifiableList(configurations);
}
示例13: createPreSplitLoadTestTable
import org.apache.hadoop.hbase.io.hfile.Compression.Algorithm; //导入依赖的package包/类
/**
* Creates a pre-split table for load testing. If the table already exists,
* logs a warning and continues.
* @return the number of regions the table was split into
*/
public static int createPreSplitLoadTestTable(Configuration conf,
byte[] tableName, byte[] columnFamily, Algorithm compression,
DataBlockEncoding dataBlockEncoding) throws IOException {
HTableDescriptor desc = new HTableDescriptor(tableName);
HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
hcd.setDataBlockEncoding(dataBlockEncoding);
hcd.setCompressionType(compression);
return createPreSplitLoadTestTable(conf, desc, hcd);
}
示例14: getSupportedCompressionAlgorithms
import org.apache.hadoop.hbase.io.hfile.Compression.Algorithm; //导入依赖的package包/类
/**
* Get supported compression algorithms.
* @return supported compression algorithms.
*/
public static Compression.Algorithm[] getSupportedCompressionAlgorithms() {
String[] allAlgos = HFile.getSupportedCompressionAlgorithms();
List<Compression.Algorithm> supportedAlgos = new ArrayList<Compression.Algorithm>();
for (String algoName : allAlgos) {
try {
Compression.Algorithm algo = Compression.getCompressionAlgorithmByName(algoName);
algo.getCompressor();
supportedAlgos.add(algo);
} catch (Throwable t) {
// this algo is not available
}
}
return supportedAlgos.toArray(new Compression.Algorithm[0]);
}
示例15: setupMockColumnFamilies
import org.apache.hadoop.hbase.io.hfile.Compression.Algorithm; //导入依赖的package包/类
private void setupMockColumnFamilies(HTable table,
Map<String, Compression.Algorithm> familyToCompression) throws IOException
{
HTableDescriptor mockTableDescriptor = new HTableDescriptor(TABLE_NAME);
for (Entry<String, Compression.Algorithm> entry : familyToCompression.entrySet()) {
mockTableDescriptor.addFamily(new HColumnDescriptor(entry.getKey())
.setMaxVersions(1)
.setCompressionType(entry.getValue())
.setBlockCacheEnabled(false)
.setTimeToLive(0));
}
Mockito.doReturn(mockTableDescriptor).when(table).getTableDescriptor();
}