本文整理汇总了Golang中github.com/golang/snappy.MaxEncodedLen函数的典型用法代码示例。如果您正苦于以下问题:Golang MaxEncodedLen函数的具体用法?Golang MaxEncodedLen怎么用?Golang MaxEncodedLen使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了MaxEncodedLen函数的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: NewPartition
// NewPartition creates a new Partition on which to store the WAL.
func NewPartition(id uint8, path string, segmentSize int64, sizeThreshold uint64, readySeriesSize int,
flushColdInterval time.Duration, index IndexWriter, statMap *expvar.Map) (*Partition, error) {
p := &Partition{
id: id,
path: path,
maxSegmentSize: segmentSize,
sizeThreshold: sizeThreshold,
lastWriteTime: time.Now(),
cache: make(map[string]*cacheEntry),
readySeriesSize: readySeriesSize,
index: index,
flushColdInterval: flushColdInterval,
statMap: statMap,
}
p.os.OpenCompactionFile = os.OpenFile
p.os.OpenSegmentFile = os.OpenFile
p.os.Rename = os.Rename
p.buf = make([]byte, partitionBufLen)
p.snappybuf = make([]byte, snappy.MaxEncodedLen(partitionBufLen))
return p, nil
}
示例2: Fuzz
func Fuzz(data []byte) int {
n, err := snappy.DecodedLen(data)
if err != nil || n > 1e6 {
return 0
}
if n < 0 {
panic("negative decoded len")
}
dec, err := snappy.Decode(nil, data)
if err != nil {
if dec != nil {
panic("dec is not nil")
}
return 0
}
if len(dec) != n {
println(len(dec), n)
panic("bad decoded len")
}
n = snappy.MaxEncodedLen(len(dec))
enc := snappy.Encode(nil, dec)
if len(enc) > n {
panic("bad encoded len")
}
dec1, err := snappy.Decode(nil, enc)
if err != nil {
panic(err)
}
if bytes.Compare(dec, dec1) != 0 {
panic("not equal")
}
return 1
}
示例3: writeBlock
func (w *Writer) writeBlock(buf *util.Buffer, compression opt.Compression) (bh blockHandle, err error) {
// Compress the buffer if necessary.
var b []byte
if compression == opt.SnappyCompression {
// Allocate scratch enough for compression and block trailer.
if n := snappy.MaxEncodedLen(buf.Len()) + blockTrailerLen; len(w.compressionScratch) < n {
w.compressionScratch = make([]byte, n)
}
compressed := snappy.Encode(w.compressionScratch, buf.Bytes())
n := len(compressed)
b = compressed[:n+blockTrailerLen]
b[n] = blockTypeSnappyCompression
} else {
tmp := buf.Alloc(blockTrailerLen)
tmp[0] = blockTypeNoCompression
b = buf.Bytes()
}
// Calculate the checksum.
n := len(b) - 4
checksum := util.NewCRC(b[:n]).Value()
binary.LittleEndian.PutUint32(b[n:], checksum)
// Write the buffer to the file.
_, err = w.writer.Write(b)
if err != nil {
return
}
bh = blockHandle{w.offset, uint64(len(b) - blockTrailerLen)}
w.offset += uint64(len(b))
return
}
示例4: NewPartition
func NewPartition(id uint8, path string, segmentSize int64, sizeThreshold uint64, readySeriesSize int, flushColdInterval time.Duration, index IndexWriter) (*Partition, error) {
// Configure expvar monitoring. It's OK to do this even if the service fails to open and
// should be done before any data could arrive for the service.
key := strings.Join([]string{"partition", strconv.Itoa(int(id)), path}, ":")
tags := map[string]string{"partition": path, "id": strconv.Itoa(int(id))}
p := &Partition{
id: id,
path: path,
maxSegmentSize: segmentSize,
sizeThreshold: sizeThreshold,
lastWriteTime: time.Now(),
cache: make(map[string]*cacheEntry),
readySeriesSize: readySeriesSize,
index: index,
flushColdInterval: flushColdInterval,
statMap: influxdb.NewStatistics(key, "partition", tags),
}
p.os.OpenCompactionFile = os.OpenFile
p.os.OpenSegmentFile = os.OpenFile
p.os.Rename = os.Rename
p.buf = make([]byte, partitionBufLen)
p.snappybuf = make([]byte, snappy.MaxEncodedLen(partitionBufLen))
return p, nil
}
示例5: block_compressor
func block_compressor(in DuplexPipe, block_size, conc_level int) DuplexPipe {
// This is the output of the generator
out := DuplexPipe{make(chan Block, conc_level), make(chan Block, conc_level)}
// Block ready to hold reading
comp_len := snappy.MaxEncodedLen(block_size)
for i := 0; i < conc_level; i++ {
out.Upstream <- Block{make([]byte, comp_len), 0, 0}
}
var comp_buf Block
go func() {
done := make(chan bool, conc_level)
for block := range in.Downstream {
comp_buf = <-out.Upstream
done <- false
go func() {
fmt.Println("Compressing block", block.N, "ID", block.BlockID)
if block.N == block_size {
// We are allocating comp_chunk extra here to know length
// ! Fork snappy to return len(comp_buf.Buf) instead of
// the the actual slice of comp_buf.Buf
comp_chunk := snappy.Encode(comp_buf.Buf, block.Buf)
// this misses the point of having reusable slices... :-(
comp_buf.N = len(comp_chunk)
comp_buf.BlockID = block.BlockID
} else {
comp_buf.N = block.N
comp_buf.BlockID = block.BlockID
copy(comp_buf.Buf[:comp_buf.N], block.Buf)
}
in.Upstream <- block
out.Downstream <- comp_buf
<-done
}()
}
// Wait for them to finish
for i := 0; i < conc_level; i++ {
done <- true
}
close(out.Downstream)
}()
return out
}
示例6: encrypt
func (s *hopCipher) encrypt(msg []byte) []byte {
cmsg := make([]byte, snappy.MaxEncodedLen(len(msg)))
cmsg = snappy.Encode(cmsg, msg)
pmsg := PKCS5Padding(cmsg, cipherBlockSize)
buf := make([]byte, len(pmsg)+cipherBlockSize)
iv := buf[:cipherBlockSize]
rand.Read(iv)
encrypter := _cipher.NewCBCEncrypter(s.block, iv)
encrypter.CryptBlocks(buf[cipherBlockSize:], pmsg)
return buf
}
示例7: writeToLog
func (l *WAL) writeToLog(entry WALEntry) (int, error) {
// limit how many concurrent encodings can be in flight. Since we can only
// write one at a time to disk, a slow disk can cause the allocations below
// to increase quickly. If we're backed up, wait until others have completed.
l.limiter.Take()
defer l.limiter.Release()
// encode and compress the entry while we're not locked
bytes := getBuf(walEncodeBufSize)
defer putBuf(bytes)
b, err := entry.Encode(bytes)
if err != nil {
return -1, err
}
encBuf := getBuf(snappy.MaxEncodedLen(len(b)))
defer putBuf(encBuf)
compressed := snappy.Encode(encBuf, b)
l.mu.Lock()
defer l.mu.Unlock()
// Make sure the log has not been closed
select {
case <-l.closing:
return -1, ErrWALClosed
default:
}
// roll the segment file if needed
if err := l.rollSegment(); err != nil {
return -1, fmt.Errorf("error rolling WAL segment: %v", err)
}
// write and sync
if err := l.currentSegmentWriter.Write(entry.Type(), compressed); err != nil {
return -1, fmt.Errorf("error writing WAL entry: %v", err)
}
// Update stats for current segment size
atomic.StoreInt64(&l.stats.CurrentBytes, int64(l.currentSegmentWriter.size))
l.lastWriteTime = time.Now()
return l.currentSegmentID, l.currentSegmentWriter.sync()
}
示例8: writeToLog
func (l *WAL) writeToLog(entry WALEntry) (int, error) {
// encode and compress the entry while we're not locked
bytes := getBuf(walEncodeBufSize)
defer putBuf(bytes)
b, err := entry.Encode(bytes)
if err != nil {
return -1, err
}
encBuf := getBuf(snappy.MaxEncodedLen(len(b)))
defer putBuf(encBuf)
compressed := snappy.Encode(encBuf, b)
l.mu.Lock()
defer l.mu.Unlock()
// Make sure the log has not been closed
select {
case <-l.closing:
return -1, ErrWALClosed
default:
}
// roll the segment file if needed
if err := l.rollSegment(); err != nil {
return -1, fmt.Errorf("error rolling WAL segment: %v", err)
}
// write and sync
if err := l.currentSegmentWriter.Write(entry.Type(), compressed); err != nil {
return -1, fmt.Errorf("error writing WAL entry: %v", err)
}
// Update stats for current segment size
curSize := new(expvar.Int)
curSize.Set(int64(l.currentSegmentWriter.size))
l.statMap.Set(statWALCurrentBytes, curSize)
l.lastWriteTime = time.Now()
return l.currentSegmentID, l.currentSegmentWriter.sync()
}
示例9: getMaxCompressedSize
func (snappyMessageCompressor) getMaxCompressedSize(srcLen int) int {
return snappy.MaxEncodedLen(srcLen)
}