本文整理汇总了Golang中github.com/syndtr/goleveldb/leveldb/filter.NewBloomFilter函数的典型用法代码示例。如果您正苦于以下问题:Golang NewBloomFilter函数的具体用法?Golang NewBloomFilter怎么用?Golang NewBloomFilter使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewBloomFilter函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: Start
func Start() {
dbfile := settings.DBFILE
var err error
// try to open an existing database
db, err = sublevel.Open(dbfile, &opt.Options{
Filter: filter.NewBloomFilter(10),
ErrorIfMissing: true,
})
if err != nil {
// database is missing, create it and do initial setup
db, err = sublevel.Open(dbfile, &opt.Options{
Filter: filter.NewBloomFilter(10),
ErrorIfExist: true,
})
// admin party
SetRulesAt("", map[string]interface{}{
"_read": "*",
"_write": "*",
"_admin": "*",
})
}
if err != nil {
log.WithFields(log.Fields{
"error": err,
"DBFILE": settings.DBFILE,
}).Fatal("couldn't open database file.")
}
}
示例2: NewLDBDatabase
// NewLDBDatabase returns a LevelDB wrapped object.
func NewLDBDatabase(file string, cache int, handles int) (*LDBDatabase, error) {
// Calculate the cache and file descriptor allowance for this particular database
cache = int(float64(cache) * cacheRatio[filepath.Base(file)])
if cache < 16 {
cache = 16
}
handles = int(float64(handles) * handleRatio[filepath.Base(file)])
if handles < 16 {
handles = 16
}
glog.V(logger.Info).Infof("Alloted %dMB cache and %d file handles to %s", cache, handles, file)
// Open the db and recover any potential corruptions
db, err := leveldb.OpenFile(file, &opt.Options{
OpenFilesCacheCapacity: handles,
BlockCacheCapacity: cache / 2 * opt.MiB,
WriteBuffer: cache / 4 * opt.MiB, // Two of these are used internally
Filter: filter.NewBloomFilter(10),
})
if _, corrupted := err.(*errors.ErrCorrupted); corrupted {
db, err = leveldb.RecoverFile(file, nil)
}
// (Re)check for errors and abort if opening of the db failed
if err != nil {
return nil, err
}
return &LDBDatabase{
fn: file,
db: db,
}, nil
}
示例3: Open
func Open(cfg skv.Config) (*DB, error) {
var (
db = &DB{}
err error
)
cfg.ReFix()
os.MkdirAll(cfg.DataDir, 0750)
db.ldb, err = leveldb.OpenFile(cfg.DataDir, &opt.Options{
WriteBuffer: cfg.WriteBuffer * opt.MiB,
BlockCacheCapacity: cfg.BlockCacheCapacity * opt.MiB,
OpenFilesCacheCapacity: cfg.OpenFilesCacheCapacity,
CompactionTableSize: cfg.CompactionTableSize * opt.MiB,
Compression: opt.SnappyCompression,
Filter: filter.NewBloomFilter(10),
})
if err == nil {
db.ttl_worker()
}
return db, err
}
示例4: newOptions
func newOptions(cfg *config.LevelDBConfig) *opt.Options {
opts := &opt.Options{}
opts.ErrorIfMissing = false
opts.BlockCacheCapacity = cfg.CacheSize
//we must use bloomfilter
opts.Filter = filter.NewBloomFilter(defaultFilterBits)
if !cfg.Compression {
opts.Compression = opt.NoCompression
} else {
opts.Compression = opt.SnappyCompression
}
opts.BlockSize = cfg.BlockSize
opts.WriteBuffer = cfg.WriteBufferSize
opts.OpenFilesCacheCapacity = cfg.MaxOpenFiles
//here we use default value, later add config support
opts.CompactionTableSize = 32 * 1024 * 1024
opts.WriteL0SlowdownTrigger = 16
opts.WriteL0PauseTrigger = 64
return opts
}
示例5: NewLevelStore
func NewLevelStore(path string, sync bool) *LevelStore {
opts := &opt.Options{
Filter: filter.NewBloomFilter(10),
ErrorIfMissing: false,
}
db, err := leveldb.OpenFile(path, opts)
if err != nil {
panic(fmt.Sprintf("queued.LevelStore: Unable to open db: %v", err))
}
id := 0
iter := db.NewIterator(nil, nil)
iter.Last()
if iter.Valid() {
id, err = strconv.Atoi(string(iter.Key()))
if err != nil {
panic(fmt.Sprintf("queued.LevelStore: Error loading db: %v", err))
}
}
return &LevelStore{
id: id,
path: path,
sync: sync,
db: db,
}
}
示例6: newKeyValueFromJSONConfig
// newKeyValueFromJSONConfig returns a KeyValue implementation on top of a
// github.com/syndtr/goleveldb/leveldb file.
func newKeyValueFromJSONConfig(cfg jsonconfig.Obj) (sorted.KeyValue, error) {
file := cfg.RequiredString("file")
if err := cfg.Validate(); err != nil {
return nil, err
}
strictness := opt.DefaultStrict
if env.IsDev() {
// Be more strict in dev mode.
strictness = opt.StrictAll
}
opts := &opt.Options{
// The default is 10,
// 8 means 2.126% or 1/47th disk check rate,
// 10 means 0.812% error rate (1/2^(bits/1.44)) or 1/123th disk check rate,
// 12 means 0.31% or 1/322th disk check rate.
// TODO(tgulacsi): decide which number is the best here. Till that go with the default.
Filter: filter.NewBloomFilter(10),
Strict: strictness,
}
db, err := leveldb.OpenFile(file, opts)
if err != nil {
return nil, err
}
is := &kvis{
db: db,
path: file,
opts: opts,
readOpts: &opt.ReadOptions{Strict: strictness},
// On machine crash we want to reindex anyway, and
// fsyncs may impose great performance penalty.
writeOpts: &opt.WriteOptions{Sync: false},
}
return is, nil
}
示例7: BenchmarkLevelGetPut
func BenchmarkLevelGetPut(b *testing.B) {
o := &opt.Options{
Filter: filter.NewBloomFilter(10),
}
db2, err := leveldb.OpenFile("./leveldb_test.db2", o)
if err != nil {
panic("Unable to open database")
}
kv := Wrap(db2, NewConfig())
defer kv.Close()
numRoutines := runtime.NumCPU()
finished := make(chan bool, numRoutines)
for j := 0; j < numRoutines; j++ {
go func(n int) {
key := []byte("keyaaaa")
key[2] = 'a' + byte(n)
for i := 0; i < b.N/numRoutines; i++ {
kv.Put(key, []byte("value"), nil)
kv.Get(key, nil)
key[0]++
if key[0] == '0' {
key[1]++
}
}
finished <- true
}(j)
}
for j := 0; j < numRoutines; j++ {
<-finished
}
}
示例8: leveldbOpener
func leveldbOpener(url *config.URL) (driver.Driver, error) {
value := url.Value
if !filepath.IsAbs(value) {
value = pathutil.Relative(value)
}
opts := &opt.Options{}
if url.Fragment["nocompress"] != "" {
opts.Compression = opt.NoCompression
}
if url.Fragment["nocreate"] != "" {
opts.ErrorIfMissing = true
}
filesDir := filepath.Join(value, "files")
files, err := leveldb.OpenFile(filesDir, opts)
if err != nil {
return nil, err
}
copts := *opts
copts.Filter = filter.NewBloomFilter(8 * sha1.Size)
chunksDir := filepath.Join(value, "chunks")
chunks, err := leveldb.OpenFile(chunksDir, &copts)
if err != nil {
return nil, err
}
return &leveldbDriver{
files: files,
chunks: chunks,
dir: value,
}, nil
}
示例9: internalOpen
func internalOpen(path string) (*leveldb.DB, error) {
o := &opt.Options{
Filter: filter.NewBloomFilter(10),
}
return leveldb.OpenFile(path, o)
}
示例10: TestCorruptDB_RecoverTable
func TestCorruptDB_RecoverTable(t *testing.T) {
h := newDbCorruptHarnessWopt(t, &opt.Options{
WriteBuffer: 112 * opt.KiB,
CompactionTableSize: 90 * opt.KiB,
Filter: filter.NewBloomFilter(10),
})
h.build(1000)
h.compactMem()
h.compactRangeAt(0, "", "")
h.compactRangeAt(1, "", "")
seq := h.db.seq
h.closeDB()
h.corrupt(storage.TypeTable, 0, 1000, 1)
h.corrupt(storage.TypeTable, 3, 10000, 1)
// Corrupted filter shouldn't affect recovery.
h.corrupt(storage.TypeTable, 3, 113888, 10)
h.corrupt(storage.TypeTable, -1, 20000, 1)
h.recover()
if h.db.seq != seq {
t.Errorf("invalid seq, want=%d got=%d", seq, h.db.seq)
}
h.check(985, 985)
h.close()
}
示例11: TestDb_BloomFilter
func TestDb_BloomFilter(t *testing.T) {
h := newDbHarnessWopt(t, &opt.Options{
BlockCache: opt.NoCache,
Filter: filter.NewBloomFilter(10),
})
defer h.close()
key := func(i int) string {
return fmt.Sprintf("key%06d", i)
}
const (
n = 10000
indexOverheat = 19898
filterOverheat = 19799
)
// Populate multiple layers
for i := 0; i < n; i++ {
h.put(key(i), key(i))
}
h.compactMem()
h.compactRange("a", "z")
for i := 0; i < n; i += 100 {
h.put(key(i), key(i))
}
h.compactMem()
// Prevent auto compactions triggered by seeks
h.stor.DelaySync(storage.TypeTable)
// Lookup present keys. Should rarely read from small sstable.
h.stor.SetReadCounter(storage.TypeTable)
for i := 0; i < n; i++ {
h.getVal(key(i), key(i))
}
cnt := int(h.stor.ReadCounter())
t.Logf("lookup of %d present keys yield %d sstable I/O reads", n, cnt)
if min, max := n+indexOverheat+filterOverheat, n+indexOverheat+filterOverheat+2*n/100; cnt < min || cnt > max {
t.Errorf("num of sstable I/O reads of present keys not in range of %d - %d, got %d", min, max, cnt)
}
// Lookup missing keys. Should rarely read from either sstable.
h.stor.ResetReadCounter()
for i := 0; i < n; i++ {
h.get(key(i)+".missing", false)
}
cnt = int(h.stor.ReadCounter())
t.Logf("lookup of %d missing keys yield %d sstable I/O reads", n, cnt)
if max := 3*n/100 + indexOverheat + filterOverheat; cnt > max {
t.Errorf("num of sstable I/O reads of missing keys was more than %d, got %d", max, cnt)
}
h.stor.ReleaseSync(storage.TypeTable)
}
示例12: Open
// Open a DB by fileName.
func Open(fileName string) (*DB, error) {
opts := &leveldbOpt.Options{
Filter: leveldbFilter.NewBloomFilter(LevelDBBloomFilterBitsPerKey),
}
db, err := leveldb.OpenFile(fileName, opts)
if err != nil {
return nil, err
}
return &DB{db}, nil
}
示例13: SetBloomFilterBitsPerKey
// SetBloomFilter sets the bits per key for a bloom filter. This filter
// will reduce the number of unnecessary disk reads needed for Get() calls
// by a large factor.
func (opts *goKeyValueOptions) SetBloomFilterBitsPerKey(bitsPerKey int) {
if bitsPerKey != opts.bloomBitsPerKey {
if opts.filter != nil {
// NOTE -- No destructor for bloom filter in goleveldb?
}
opts.filter = filter.NewBloomFilter(bitsPerKey)
opts.Options.Filter = opts.filter
opts.bloomBitsPerKey = bitsPerKey
}
}
示例14: OpenDB
// OpenDB opens (creates) LevelDB database
func OpenDB(path string) (Storage, error) {
o := &opt.Options{
Filter: filter.NewBloomFilter(10),
}
db, err := leveldb.OpenFile(path, o)
if err != nil {
return nil, err
}
return &levelDB{db: db}, nil
}
示例15: TestDb_BloomFilter
func TestDb_BloomFilter(t *testing.T) {
h := newDbHarnessWopt(t, &opt.Options{
Flag: opt.OFCreateIfMissing,
BlockCache: cache.EmptyCache{},
Filter: filter.NewBloomFilter(10),
})
key := func(i int) string {
return fmt.Sprintf("key%06d", i)
}
n := 10000
// Populate multiple layers
for i := 0; i < n; i++ {
h.put(key(i), key(i))
}
h.compactMem()
h.compactRange("a", "z")
for i := 0; i < n; i += 100 {
h.put(key(i), key(i))
}
h.compactMem()
// Prevent auto compactions triggered by seeks
h.desc.DelaySync(desc.TypeTable)
// Lookup present keys. Should rarely read from small sstable.
h.desc.SetReadAtCounter(desc.TypeTable)
for i := 0; i < n; i++ {
h.getVal(key(i), key(i))
}
cnt := int(h.desc.ReadAtCounter())
t.Logf("lookup of %d present keys yield %d sstable I/O reads", n, cnt)
if min, max := n, n+2*n/100; cnt < min || cnt > max {
t.Errorf("num of sstable I/O reads of present keys not in range of %d - %d, got %d", min, max, cnt)
}
// Lookup missing keys. Should rarely read from either sstable.
h.desc.ResetReadAtCounter()
for i := 0; i < n; i++ {
h.get(key(i)+".missing", false)
}
cnt = int(h.desc.ReadAtCounter())
t.Logf("lookup of %d missing keys yield %d sstable I/O reads", n, cnt)
if max := 3 * n / 100; cnt > max {
t.Errorf("num of sstable I/O reads of missing keys was more than %d, got %d", max, cnt)
}
h.desc.ReleaseSync(desc.TypeTable)
h.close()
}