本文整理汇总了Java中net.jpountz.xxhash.XXHashFactory类的典型用法代码示例。如果您正苦于以下问题:Java XXHashFactory类的具体用法?Java XXHashFactory怎么用?Java XXHashFactory使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
XXHashFactory类属于net.jpountz.xxhash包,在下文中一共展示了XXHashFactory类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: receive
import net.jpountz.xxhash.XXHashFactory; //导入依赖的package包/类
void receive(FileReference reference, String filename, byte[] content) {
log.log(LogLevel.INFO, "Preparing receive call for " + reference.value() + " and file " + filename);
XXHash64 hasher = XXHashFactory.fastestInstance().hash64();
Request fileBlob = new Request("filedistribution.receiveFile");
log.log(LogLevel.INFO, "Calling " + fileBlob.methodName() + " with target " + target);
fileBlob.parameters().add(new StringValue(reference.value()));
fileBlob.parameters().add(new StringValue(filename));
fileBlob.parameters().add(new DataValue(content));
fileBlob.parameters().add(new Int64Value(hasher.hash(ByteBuffer.wrap(content), 0)));
fileBlob.parameters().add(new Int32Value(0));
fileBlob.parameters().add(new StringValue("OK"));
log.log(LogLevel.INFO, "Doing invokeSync");
target.invokeSync(fileBlob, 5);
log.log(LogLevel.INFO, "Done with invokeSync");
}
示例2: KafkaLZ4BlockOutputStream
import net.jpountz.xxhash.XXHashFactory; //导入依赖的package包/类
/**
* Create a new {@link OutputStream} that will compress data using the LZ4 algorithm.
*
* @param out The output stream to compress
* @param blockSize Default: 4. The block size used during compression. 4=64kb, 5=256kb, 6=1mb, 7=4mb. All other
* values will generate an exception
* @param blockChecksum Default: false. When true, a XXHash32 checksum is computed and appended to the stream for
* every block of data
* @param useBrokenFlagDescriptorChecksum Default: false. When true, writes an incorrect FrameDescriptor checksum
* compatible with older kafka clients.
* @throws IOException
*/
public KafkaLZ4BlockOutputStream(OutputStream out, int blockSize, boolean blockChecksum, boolean useBrokenFlagDescriptorChecksum) throws IOException {
super(out);
compressor = LZ4Factory.fastestInstance().fastCompressor();
checksum = XXHashFactory.fastestInstance().hash32();
this.useBrokenFlagDescriptorChecksum = useBrokenFlagDescriptorChecksum;
bd = new BD(blockSize);
flg = new FLG(blockChecksum);
bufferOffset = 0;
maxBlockSize = bd.getBlockMaximumSize();
buffer = new byte[maxBlockSize];
compressedBuffer = new byte[compressor.maxCompressedLength(maxBlockSize)];
finished = false;
writeHeader();
}
示例3: smallXXPerformanceTest
import net.jpountz.xxhash.XXHashFactory; //导入依赖的package包/类
@Test
public void smallXXPerformanceTest() {
final XXHashFactory xxHash = XXHashFactory.safeInstance();
final byte[] values = new byte[size];
for ( int i = 0; i < values.length; i++ ) {
values[i] = (byte) i;
}
for ( int i = 1; i < iterations; i++ ) {
xxHash.hash64().hash( values, 0, values.length, i );
}
}
示例4: smallXXUnsafePerformanceTest
import net.jpountz.xxhash.XXHashFactory; //导入依赖的package包/类
@Test
public void smallXXUnsafePerformanceTest() {
final XXHashFactory xxHash = XXHashFactory.unsafeInstance();
final byte[] values = new byte[size];
for ( int i = 0; i < values.length; i++ ) {
values[i] = (byte) i;
}
for ( int i = 1; i < iterations; i++ ) {
xxHash.hash64().hash( values, 0, values.length, i );
}
}
示例5: KafkaLZ4BlockInputStream
import net.jpountz.xxhash.XXHashFactory; //导入依赖的package包/类
/**
* Create a new {@link InputStream} that will decompress data using the LZ4 algorithm.
*
* @param in The stream to decompress
* @param ignoreFlagDescriptorChecksum for compatibility with old kafka clients, ignore incorrect HC byte
* @throws IOException
*/
public KafkaLZ4BlockInputStream(InputStream in, boolean ignoreFlagDescriptorChecksum) throws IOException {
super(in);
decompressor = LZ4Factory.fastestInstance().safeDecompressor();
checksum = XXHashFactory.fastestInstance().hash32();
this.ignoreFlagDescriptorChecksum = ignoreFlagDescriptorChecksum;
readHeader();
maxBlockSize = bd.getBlockMaximumSize();
buffer = new byte[maxBlockSize];
compressedBuffer = new byte[maxBlockSize];
bufferOffset = 0;
bufferSize = 0;
finished = false;
}
示例6: ColumnReverseDictionaryInfo
import net.jpountz.xxhash.XXHashFactory; //导入依赖的package包/类
/**
* check and initialize xxHash32 if enabled
*/
public ColumnReverseDictionaryInfo() {
boolean useXXHash = Boolean.valueOf(CarbonProperties.getInstance()
.getProperty(CarbonCommonConstants.ENABLE_XXHASH,
CarbonCommonConstants.ENABLE_XXHASH_DEFAULT));
if (useXXHash) {
xxHash32 = XXHashFactory.fastestInstance().hash32();
}
}
示例7: register
import net.jpountz.xxhash.XXHashFactory; //导入依赖的package包/类
public static final void register(final Map<String, Hasher> hashers) {
hashers.put(Jp64Hasher.XXH64_JNI,
new Jp64Hasher(XXHashFactory.nativeInstance().hash64()));
hashers.put(Jp64Hasher.XXH64_UNSAFE,
new Jp64Hasher(XXHashFactory.unsafeInstance().hash64()));
hashers.put(Jp64Hasher.XXH64_SAFE,
new Jp64Hasher(XXHashFactory.safeInstance().hash64()));
}
示例8: register
import net.jpountz.xxhash.XXHashFactory; //导入依赖的package包/类
public static final void register(final Map<String, Hasher> hashers) {
hashers.put(Jp32Hasher.XXH32_JNI,
new Jp32Hasher(XXHashFactory.nativeInstance().hash32()));
hashers.put(Jp32Hasher.XXH32_UNSAFE,
new Jp32Hasher(XXHashFactory.unsafeInstance().hash32()));
hashers.put(Jp32Hasher.XXH32_SAFE,
new Jp32Hasher(XXHashFactory.safeInstance().hash32()));
}
示例9: LZ4FrameOutputStream
import net.jpountz.xxhash.XXHashFactory; //导入依赖的package包/类
/**
* Create a new {@link OutputStream} that will compress data using the LZ4 algorithm.
*
* @param out The output stream to compress
* @param blockSize Default: 4. The block size used during compression. 4=64kb, 5=256kb, 6=1mb, 7=4mb. All other
* values will generate an exception
* @param bits A set of features to use
* @param knownSize The size of the uncompressed data. A value less than zero means unknown.
* @throws IOException
*/
public LZ4FrameOutputStream(OutputStream out, BLOCKSIZE blockSize, long knownSize, FLG.Bits... bits) throws IOException {
super(out);
compressor = getCompressor();
checksum = XXHashFactory.fastestInstance().hash32();
frameInfo = new FrameInfo(new FLG(FLG.DEFAULT_VERSION, bits), new BD(blockSize));
maxBlockSize = frameInfo.getBD().getBlockMaximumSize();
buffer = ByteBuffer.allocate(maxBlockSize).order(ByteOrder.LITTLE_ENDIAN);
compressedBuffer = new byte[compressor.maxCompressedLength(maxBlockSize)];
if(frameInfo.getFLG().isEnabled(FLG.Bits.CONTENT_SIZE) && knownSize < 0){
throw new IllegalArgumentException("Known size must be greater than zero in order to use the known size feature");
}
this.knownSize = knownSize;
writeHeader();
}
示例10: LZ4FrameInputStream
import net.jpountz.xxhash.XXHashFactory; //导入依赖的package包/类
/**
* Create a new {@link InputStream} that will decompress data using the LZ4 algorithm.
*
* @param in The stream to decompress
* @throws IOException
*/
public LZ4FrameInputStream(InputStream in) throws IOException {
super(in);
decompressor = LZ4Factory.fastestInstance().safeDecompressor();
checksum = XXHashFactory.fastestInstance().hash32();
nextFrameInfo();
}
示例11: KafkaLZ4BlockOutputStream
import net.jpountz.xxhash.XXHashFactory; //导入依赖的package包/类
/**
* Create a new {@link OutputStream} that will compress data using the LZ4 algorithm.
*
* @param out The output stream to compress
* @param blockSize Default: 4. The block size used during compression. 4=64kb, 5=256kb, 6=1mb, 7=4mb. All other
* values will generate an exception
* @param blockChecksum Default: false. When true, a XXHash32 checksum is computed and appended to the stream for
* every block of data
* @param useBrokenFlagDescriptorChecksum Default: false. When true, writes an incorrect FrameDescriptor checksum
* compatible with older kafka clients.
* @throws IOException
*/
public KafkaLZ4BlockOutputStream( OutputStream out, int blockSize, boolean blockChecksum, boolean useBrokenFlagDescriptorChecksum ) throws IOException {
super( out );
compressor = LZ4Factory.fastestInstance().highCompressor( 17 );
checksum = XXHashFactory.fastestInstance().hash32();
this.useBrokenFlagDescriptorChecksum = useBrokenFlagDescriptorChecksum;
bd = new BD( blockSize );
flg = new FLG( blockChecksum );
bufferOffset = 0;
maxBlockSize = bd.getBlockMaximumSize();
buffer = new byte[maxBlockSize];
compressedBuffer = new byte[compressor.maxCompressedLength( maxBlockSize )];
finished = false;
writeHeader();
}
示例12: KafkaLZ4BlockInputStream
import net.jpountz.xxhash.XXHashFactory; //导入依赖的package包/类
/**
* Create a new {@link InputStream} that will decompress data using the LZ4 algorithm.
*
* @param in The stream to decompress
* @param ignoreFlagDescriptorChecksum for compatibility with old kafka clients, ignore incorrect HC byte
* @throws IOException
*/
public KafkaLZ4BlockInputStream( InputStream in, boolean ignoreFlagDescriptorChecksum ) throws IOException {
super( in );
decompressor = LZ4Factory.fastestInstance().safeDecompressor();
checksum = XXHashFactory.fastestInstance().hash32();
this.ignoreFlagDescriptorChecksum = ignoreFlagDescriptorChecksum;
readHeader();
maxBlockSize = bd.getBlockMaximumSize();
buffer = new byte[maxBlockSize];
compressedBuffer = new byte[maxBlockSize];
bufferOffset = 0;
bufferSize = 0;
finished = false;
}
示例13: XXHashKeyHashing
import net.jpountz.xxhash.XXHashFactory; //导入依赖的package包/类
public XXHashKeyHashing(boolean allowNative) {
if(allowNative) {
factory = XXHashFactory.fastestInstance();
} else {
factory = XXHashFactory.fastestJavaInstance();
}
}
示例14: SixtPartitioner
import net.jpountz.xxhash.XXHashFactory; //导入依赖的package包/类
public SixtPartitioner() {
XXHashFactory factory = XXHashFactory.fastestInstance();
xxHasher = factory.hash32();
}
示例15: testCompression
import net.jpountz.xxhash.XXHashFactory; //导入依赖的package包/类
@Test
public void testCompression() throws Exception {
byte[] compressed = compressedBytes();
// Check magic bytes stored as little-endian
int offset = 0;
assertEquals(0x04, compressed[offset++]);
assertEquals(0x22, compressed[offset++]);
assertEquals(0x4D, compressed[offset++]);
assertEquals(0x18, compressed[offset++]);
// Check flg descriptor
byte flg = compressed[offset++];
// 2-bit version must be 01
int version = (flg >>> 6) & 3;
assertEquals(1, version);
// Reserved bits should always be 0
int reserved = flg & 3;
assertEquals(0, reserved);
// Check block descriptor
byte bd = compressed[offset++];
// Block max-size
int blockMaxSize = (bd >>> 4) & 7;
// Only supported values are 4 (64KB), 5 (256KB), 6 (1MB), 7 (4MB)
assertTrue(blockMaxSize >= 4);
assertTrue(blockMaxSize <= 7);
// Multiple reserved bit ranges in block descriptor
reserved = bd & 15;
assertEquals(0, reserved);
reserved = (bd >>> 7) & 1;
assertEquals(0, reserved);
// If flg descriptor sets content size flag
// there are 8 additional bytes before checksum
boolean contentSize = ((flg >>> 3) & 1) != 0;
if (contentSize)
offset += 8;
// Checksum applies to frame descriptor: flg, bd, and optional contentsize
// so initial offset should be 4 (for magic bytes)
int off = 4;
int len = offset - 4;
// Initial implementation of checksum incorrectly applied to full header
// including magic bytes
if (this.useBrokenFlagDescriptorChecksum) {
off = 0;
len = offset;
}
int hash = XXHashFactory.fastestInstance().hash32().hash(compressed, off, len, 0);
byte hc = compressed[offset++];
assertEquals((byte) ((hash >> 8) & 0xFF), hc);
// Check EndMark, data block with size `0` expressed as a 32-bits value
if (this.close) {
offset = compressed.length - 4;
assertEquals(0, compressed[offset++]);
assertEquals(0, compressed[offset++]);
assertEquals(0, compressed[offset++]);
assertEquals(0, compressed[offset++]);
}
}