当前位置: 首页>>代码示例>>Java>>正文


Java XXHashFactory类代码示例

本文整理汇总了Java中net.jpountz.xxhash.XXHashFactory的典型用法代码示例。如果您正苦于以下问题:Java XXHashFactory类的具体用法?Java XXHashFactory怎么用?Java XXHashFactory使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


XXHashFactory类属于net.jpountz.xxhash包,在下文中一共展示了XXHashFactory类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: receive

import net.jpountz.xxhash.XXHashFactory; //导入依赖的package包/类
void receive(FileReference reference, String filename, byte[] content) {

            log.log(LogLevel.INFO, "Preparing receive call for " + reference.value() + " and file " + filename);

            XXHash64 hasher = XXHashFactory.fastestInstance().hash64();
            Request fileBlob = new Request("filedistribution.receiveFile");

            log.log(LogLevel.INFO, "Calling " + fileBlob.methodName() + " with target " + target);

            fileBlob.parameters().add(new StringValue(reference.value()));
            fileBlob.parameters().add(new StringValue(filename));
            fileBlob.parameters().add(new DataValue(content));
            fileBlob.parameters().add(new Int64Value(hasher.hash(ByteBuffer.wrap(content), 0)));
            fileBlob.parameters().add(new Int32Value(0));
            fileBlob.parameters().add(new StringValue("OK"));
            log.log(LogLevel.INFO, "Doing invokeSync");
            target.invokeSync(fileBlob, 5);
            log.log(LogLevel.INFO, "Done with invokeSync");
        }
 
开发者ID:vespa-engine,项目名称:vespa,代码行数:20,代码来源:RpcTester.java

示例2: KafkaLZ4BlockOutputStream

import net.jpountz.xxhash.XXHashFactory; //导入依赖的package包/类
/**
 * Create a new {@link OutputStream} that will compress data using the LZ4 algorithm.
 *
 * @param out The output stream to compress
 * @param blockSize Default: 4. The block size used during compression. 4=64kb, 5=256kb, 6=1mb, 7=4mb. All other
 *            values will generate an exception
 * @param blockChecksum Default: false. When true, a XXHash32 checksum is computed and appended to the stream for
 *            every block of data
 * @param useBrokenFlagDescriptorChecksum Default: false. When true, writes an incorrect FrameDescriptor checksum
 *            compatible with older kafka clients.
 * @throws IOException
 */
public KafkaLZ4BlockOutputStream(OutputStream out, int blockSize, boolean blockChecksum, boolean useBrokenFlagDescriptorChecksum) throws IOException {
    super(out);
    compressor = LZ4Factory.fastestInstance().fastCompressor();
    checksum = XXHashFactory.fastestInstance().hash32();
    this.useBrokenFlagDescriptorChecksum = useBrokenFlagDescriptorChecksum;
    bd = new BD(blockSize);
    flg = new FLG(blockChecksum);
    bufferOffset = 0;
    maxBlockSize = bd.getBlockMaximumSize();
    buffer = new byte[maxBlockSize];
    compressedBuffer = new byte[compressor.maxCompressedLength(maxBlockSize)];
    finished = false;
    writeHeader();
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:27,代码来源:KafkaLZ4BlockOutputStream.java

示例3: smallXXPerformanceTest

import net.jpountz.xxhash.XXHashFactory; //导入依赖的package包/类
@Test
public void smallXXPerformanceTest() {

	final XXHashFactory xxHash = XXHashFactory.safeInstance();

	final byte[] values = new byte[size];
	for ( int i = 0; i < values.length; i++ ) {
		values[i] = (byte) i;
	}

	for ( int i = 1; i < iterations; i++ ) {
		xxHash.hash64().hash( values, 0, values.length, i );
	}
}
 
开发者ID:cinnom,项目名称:nano-cuckoo,代码行数:15,代码来源:HashPerformanceIT.java

示例4: smallXXUnsafePerformanceTest

import net.jpountz.xxhash.XXHashFactory; //导入依赖的package包/类
@Test
public void smallXXUnsafePerformanceTest() {

	final XXHashFactory xxHash = XXHashFactory.unsafeInstance();

	final byte[] values = new byte[size];
	for ( int i = 0; i < values.length; i++ ) {
		values[i] = (byte) i;
	}

	for ( int i = 1; i < iterations; i++ ) {
		xxHash.hash64().hash( values, 0, values.length, i );
	}
}
 
开发者ID:cinnom,项目名称:nano-cuckoo,代码行数:15,代码来源:HashPerformanceIT.java

示例5: KafkaLZ4BlockInputStream

import net.jpountz.xxhash.XXHashFactory; //导入依赖的package包/类
/**
 * Create a new {@link InputStream} that will decompress data using the LZ4 algorithm.
 *
 * @param in The stream to decompress
 * @param ignoreFlagDescriptorChecksum for compatibility with old kafka clients, ignore incorrect HC byte
 * @throws IOException
 */
public KafkaLZ4BlockInputStream(InputStream in, boolean ignoreFlagDescriptorChecksum) throws IOException {
    super(in);
    decompressor = LZ4Factory.fastestInstance().safeDecompressor();
    checksum = XXHashFactory.fastestInstance().hash32();
    this.ignoreFlagDescriptorChecksum = ignoreFlagDescriptorChecksum;
    readHeader();
    maxBlockSize = bd.getBlockMaximumSize();
    buffer = new byte[maxBlockSize];
    compressedBuffer = new byte[maxBlockSize];
    bufferOffset = 0;
    bufferSize = 0;
    finished = false;
}
 
开发者ID:txazo,项目名称:kafka,代码行数:21,代码来源:KafkaLZ4BlockInputStream.java

示例6: ColumnReverseDictionaryInfo

import net.jpountz.xxhash.XXHashFactory; //导入依赖的package包/类
/**
 * check and initialize xxHash32 if enabled
 */
public ColumnReverseDictionaryInfo() {
  boolean useXXHash = Boolean.valueOf(CarbonProperties.getInstance()
      .getProperty(CarbonCommonConstants.ENABLE_XXHASH,
          CarbonCommonConstants.ENABLE_XXHASH_DEFAULT));
  if (useXXHash) {
    xxHash32 = XXHashFactory.fastestInstance().hash32();
  }
}
 
开发者ID:carbondata,项目名称:carbondata,代码行数:12,代码来源:ColumnReverseDictionaryInfo.java

示例7: register

import net.jpountz.xxhash.XXHashFactory; //导入依赖的package包/类
public static final void register(final Map<String, Hasher> hashers) {
  hashers.put(Jp64Hasher.XXH64_JNI,
          new Jp64Hasher(XXHashFactory.nativeInstance().hash64()));
  hashers.put(Jp64Hasher.XXH64_UNSAFE,
          new Jp64Hasher(XXHashFactory.unsafeInstance().hash64()));
  hashers.put(Jp64Hasher.XXH64_SAFE,
          new Jp64Hasher(XXHashFactory.safeInstance().hash64()));
}
 
开发者ID:benalexau,项目名称:hash-bench,代码行数:9,代码来源:Jp64Hasher.java

示例8: register

import net.jpountz.xxhash.XXHashFactory; //导入依赖的package包/类
public static final void register(final Map<String, Hasher> hashers) {
  hashers.put(Jp32Hasher.XXH32_JNI,
          new Jp32Hasher(XXHashFactory.nativeInstance().hash32()));
  hashers.put(Jp32Hasher.XXH32_UNSAFE,
          new Jp32Hasher(XXHashFactory.unsafeInstance().hash32()));
  hashers.put(Jp32Hasher.XXH32_SAFE,
          new Jp32Hasher(XXHashFactory.safeInstance().hash32()));
}
 
开发者ID:benalexau,项目名称:hash-bench,代码行数:9,代码来源:Jp32Hasher.java

示例9: LZ4FrameOutputStream

import net.jpountz.xxhash.XXHashFactory; //导入依赖的package包/类
/**
 * Create a new {@link OutputStream} that will compress data using the LZ4 algorithm.
 *
 * @param out The output stream to compress
 * @param blockSize Default: 4. The block size used during compression. 4=64kb, 5=256kb, 6=1mb, 7=4mb. All other
 *            values will generate an exception
 * @param bits A set of features to use
 * @param knownSize The size of the uncompressed data. A value less than zero means unknown.
 * @throws IOException
 */
public LZ4FrameOutputStream(OutputStream out, BLOCKSIZE blockSize, long knownSize, FLG.Bits... bits) throws IOException {
  super(out);
  compressor = getCompressor();
  checksum = XXHashFactory.fastestInstance().hash32();
  frameInfo = new FrameInfo(new FLG(FLG.DEFAULT_VERSION, bits), new BD(blockSize));
  maxBlockSize = frameInfo.getBD().getBlockMaximumSize();
  buffer = ByteBuffer.allocate(maxBlockSize).order(ByteOrder.LITTLE_ENDIAN);
  compressedBuffer = new byte[compressor.maxCompressedLength(maxBlockSize)];
  if(frameInfo.getFLG().isEnabled(FLG.Bits.CONTENT_SIZE) && knownSize < 0){
    throw new IllegalArgumentException("Known size must be greater than zero in order to use the known size feature");
  }
  this.knownSize = knownSize;
  writeHeader();
}
 
开发者ID:htools,项目名称:htools,代码行数:25,代码来源:LZ4FrameOutputStream.java

示例10: LZ4FrameInputStream

import net.jpountz.xxhash.XXHashFactory; //导入依赖的package包/类
/**
 * Create a new {@link InputStream} that will decompress data using the LZ4 algorithm.
 *
 * @param in The stream to decompress
 * @throws IOException
 */
public LZ4FrameInputStream(InputStream in) throws IOException {
  super(in);
  decompressor = LZ4Factory.fastestInstance().safeDecompressor();
  checksum = XXHashFactory.fastestInstance().hash32();
  nextFrameInfo();
}
 
开发者ID:htools,项目名称:htools,代码行数:13,代码来源:LZ4FrameInputStream.java

示例11: KafkaLZ4BlockOutputStream

import net.jpountz.xxhash.XXHashFactory; //导入依赖的package包/类
/**
 * Create a new {@link OutputStream} that will compress data using the LZ4 algorithm.
 *
 * @param out                             The output stream to compress
 * @param blockSize                       Default: 4. The block size used during compression. 4=64kb, 5=256kb, 6=1mb, 7=4mb. All other
 *                                        values will generate an exception
 * @param blockChecksum                   Default: false. When true, a XXHash32 checksum is computed and appended to the stream for
 *                                        every block of data
 * @param useBrokenFlagDescriptorChecksum Default: false. When true, writes an incorrect FrameDescriptor checksum
 *                                        compatible with older kafka clients.
 * @throws IOException
 */
public KafkaLZ4BlockOutputStream( OutputStream out, int blockSize, boolean blockChecksum, boolean useBrokenFlagDescriptorChecksum ) throws IOException {
    super( out );
    compressor = LZ4Factory.fastestInstance().highCompressor( 17 );
    checksum = XXHashFactory.fastestInstance().hash32();
    this.useBrokenFlagDescriptorChecksum = useBrokenFlagDescriptorChecksum;
    bd = new BD( blockSize );
    flg = new FLG( blockChecksum );
    bufferOffset = 0;
    maxBlockSize = bd.getBlockMaximumSize();
    buffer = new byte[maxBlockSize];
    compressedBuffer = new byte[compressor.maxCompressedLength( maxBlockSize )];
    finished = false;
    writeHeader();
}
 
开发者ID:oaplatform,项目名称:oap,代码行数:27,代码来源:KafkaLZ4BlockOutputStream.java

示例12: KafkaLZ4BlockInputStream

import net.jpountz.xxhash.XXHashFactory; //导入依赖的package包/类
/**
 * Create a new {@link InputStream} that will decompress data using the LZ4 algorithm.
 *
 * @param in                           The stream to decompress
 * @param ignoreFlagDescriptorChecksum for compatibility with old kafka clients, ignore incorrect HC byte
 * @throws IOException
 */
public KafkaLZ4BlockInputStream( InputStream in, boolean ignoreFlagDescriptorChecksum ) throws IOException {
    super( in );
    decompressor = LZ4Factory.fastestInstance().safeDecompressor();
    checksum = XXHashFactory.fastestInstance().hash32();
    this.ignoreFlagDescriptorChecksum = ignoreFlagDescriptorChecksum;
    readHeader();
    maxBlockSize = bd.getBlockMaximumSize();
    buffer = new byte[maxBlockSize];
    compressedBuffer = new byte[maxBlockSize];
    bufferOffset = 0;
    bufferSize = 0;
    finished = false;
}
 
开发者ID:oaplatform,项目名称:oap,代码行数:21,代码来源:KafkaLZ4BlockInputStream.java

示例13: XXHashKeyHashing

import net.jpountz.xxhash.XXHashFactory; //导入依赖的package包/类
public XXHashKeyHashing(boolean allowNative) {
    if(allowNative) {
        factory = XXHashFactory.fastestInstance();
    } else {
        factory = XXHashFactory.fastestJavaInstance();
    }
}
 
开发者ID:tootedom,项目名称:tomcat-memcached-response-filter,代码行数:8,代码来源:XXHashKeyHashing.java

示例14: SixtPartitioner

import net.jpountz.xxhash.XXHashFactory; //导入依赖的package包/类
public SixtPartitioner() {
    XXHashFactory factory = XXHashFactory.fastestInstance();
    xxHasher = factory.hash32();
}
 
开发者ID:Sixt,项目名称:ja-micro,代码行数:5,代码来源:SixtPartitioner.java

示例15: testCompression

import net.jpountz.xxhash.XXHashFactory; //导入依赖的package包/类
@Test
public void testCompression() throws Exception {
    byte[] compressed = compressedBytes();

    // Check magic bytes stored as little-endian
    int offset = 0;
    assertEquals(0x04, compressed[offset++]);
    assertEquals(0x22, compressed[offset++]);
    assertEquals(0x4D, compressed[offset++]);
    assertEquals(0x18, compressed[offset++]);

    // Check flg descriptor
    byte flg = compressed[offset++];

    // 2-bit version must be 01
    int version = (flg >>> 6) & 3;
    assertEquals(1, version);

    // Reserved bits should always be 0
    int reserved = flg & 3;
    assertEquals(0, reserved);

    // Check block descriptor
    byte bd = compressed[offset++];

    // Block max-size
    int blockMaxSize = (bd >>> 4) & 7;
    // Only supported values are 4 (64KB), 5 (256KB), 6 (1MB), 7 (4MB)
    assertTrue(blockMaxSize >= 4);
    assertTrue(blockMaxSize <= 7);

    // Multiple reserved bit ranges in block descriptor
    reserved = bd & 15;
    assertEquals(0, reserved);
    reserved = (bd >>> 7) & 1;
    assertEquals(0, reserved);

    // If flg descriptor sets content size flag
    // there are 8 additional bytes before checksum
    boolean contentSize = ((flg >>> 3) & 1) != 0;
    if (contentSize)
        offset += 8;

    // Checksum applies to frame descriptor: flg, bd, and optional contentsize
    // so initial offset should be 4 (for magic bytes)
    int off = 4;
    int len = offset - 4;

    // Initial implementation of checksum incorrectly applied to full header
    // including magic bytes
    if (this.useBrokenFlagDescriptorChecksum) {
        off = 0;
        len = offset;
    }

    int hash = XXHashFactory.fastestInstance().hash32().hash(compressed, off, len, 0);

    byte hc = compressed[offset++];
    assertEquals((byte) ((hash >> 8) & 0xFF), hc);

    // Check EndMark, data block with size `0` expressed as a 32-bits value
    if (this.close) {
        offset = compressed.length - 4;
        assertEquals(0, compressed[offset++]);
        assertEquals(0, compressed[offset++]);
        assertEquals(0, compressed[offset++]);
        assertEquals(0, compressed[offset++]);
    }
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:70,代码来源:KafkaLZ4Test.java


注:本文中的net.jpountz.xxhash.XXHashFactory类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。