本文整理汇总了Golang中github.com/syndtr/goleveldb/leveldb.Batch.Len方法的典型用法代码示例。如果您正苦于以下问题:Golang Batch.Len方法的具体用法?Golang Batch.Len怎么用?Golang Batch.Len使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类github.com/syndtr/goleveldb/leveldb.Batch
的用法示例。
在下文中一共展示了Batch.Len方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: Update
// Update block map state, removing any deleted or invalid files.
func (m *BlockMap) Update(files []protocol.FileInfo) error {
batch := new(leveldb.Batch)
buf := make([]byte, 4)
var key []byte
for _, file := range files {
if batch.Len() > maxBatchSize {
if err := m.db.Write(batch, nil); err != nil {
return err
}
batch.Reset()
}
if file.IsDirectory() {
continue
}
if file.IsDeleted() || file.IsInvalid() {
for _, block := range file.Blocks {
key = m.blockKeyInto(key, block.Hash, file.Name)
batch.Delete(key)
}
continue
}
for i, block := range file.Blocks {
binary.BigEndian.PutUint32(buf, uint32(i))
key = m.blockKeyInto(key, block.Hash, file.Name)
batch.Put(key, buf)
}
}
return m.db.Write(batch, nil)
}
示例2: childFileIds
// childFileIds returns a map containing IDs of all Files that have parent
// refs to the given file. The returned map keys are IDs, and the map values
// indicate if the child is a directory.
func (d *DriveDB) childFileIds(fileId string) (map[string]bool, error) {
ids := make(map[string]bool)
d.iters.Add(1)
batch := new(leveldb.Batch)
iter := d.db.NewIterator(util.BytesPrefix(childKeyPrefix(fileId)), nil)
for iter.Next() {
pidcid := deKey(string(iter.Key()))
cid := pidcid[len(fileId)+1:]
if gdriveFile, err := d.FileById(cid); err != nil {
log.Printf("unknown fileId %v: %v", fileId, err)
batch.Delete(iter.Key())
} else {
ids[cid] = gdriveFile.MimeType == driveFolderMimeType
}
}
iter.Release()
d.iters.Done()
if batch.Len() > 0 {
err := d.db.Write(batch, nil)
if err != nil {
log.Printf("error writing to db: %v", err)
}
}
return ids, iter.Error()
}
示例3: ChildFileIds
// ChildFileIds returns the IDs of all Files that have parent refs to the given file.
func (d *DriveDB) ChildFileIds(fileId string) ([]string, error) {
var ids []string
d.iters.Add(1)
batch := new(leveldb.Batch)
iter := d.db.NewIterator(util.BytesPrefix(childKey(fileId)), nil)
for iter.Next() {
pidcid := deKey(string(iter.Key()))
cid := pidcid[len(fileId)+1:]
found, err := d.db.Has(fileKey(cid), nil)
if err == nil && found {
ids = append(ids, cid)
} else {
batch.Delete(iter.Key())
}
}
iter.Release()
d.iters.Done()
if batch.Len() > 0 {
err := d.db.Write(batch, nil)
if err != nil {
log.Printf("error writing to db: %v", err)
}
}
return ids, iter.Error()
}
示例4: putBatch
func (l *internalLevelDBStore) putBatch(b *leveldb.Batch, numBytes int) {
l.concurrentWriteLimit <- struct{}{}
err := l.db.Write(b, nil)
d.Chk.NoError(err)
l.putCount += int64(b.Len())
l.putBytes += int64(numBytes)
<-l.concurrentWriteLimit
}
示例5: Delete
// Delete all states for a metric name.
// This operation is currently only used for cleaning.
func (db *DB) Delete(name string) error {
// Name must be the key prefix
iter := db.db.NewIterator(util.BytesPrefix([]byte(name)), nil)
batch := new(leveldb.Batch)
for iter.Next() {
key := iter.Key()
batch.Delete(key)
}
if batch.Len() > 0 {
return db.db.Write(batch, nil)
}
return nil
}
示例6: Discard
// Discard block map state, removing the given files
func (m *BlockMap) Discard(files []protocol.FileInfo) error {
batch := new(leveldb.Batch)
var key []byte
for _, file := range files {
if batch.Len() > maxBatchSize {
if err := m.db.Write(batch, nil); err != nil {
return err
}
batch.Reset()
}
for _, block := range file.Blocks {
key = m.blockKeyInto(key, block.Hash, file.Name)
batch.Delete(key)
}
}
return m.db.Write(batch, nil)
}
示例7: Drop
// Drop block map, removing all entries related to this block map from the db.
func (m *BlockMap) Drop() error {
batch := new(leveldb.Batch)
iter := m.db.NewIterator(util.BytesPrefix(m.blockKeyInto(nil, nil, "")[:keyPrefixLen+keyFolderLen]), nil)
defer iter.Release()
for iter.Next() {
if batch.Len() > maxBatchSize {
if err := m.db.Write(batch, nil); err != nil {
return err
}
batch.Reset()
}
batch.Delete(iter.Key())
}
if iter.Error() != nil {
return iter.Error()
}
return m.db.Write(batch, nil)
}
示例8: Reset
// Reset removes all entries in this namespace.
func (n *NamespacedKV) Reset() {
it := n.db.NewIterator(util.BytesPrefix(n.prefix), nil)
defer it.Release()
batch := new(leveldb.Batch)
for it.Next() {
batch.Delete(it.Key())
if batch.Len() > batchFlushSize {
if err := n.db.Write(batch, nil); err != nil {
panic(err)
}
batch.Reset()
}
}
if batch.Len() > 0 {
if err := n.db.Write(batch, nil); err != nil {
panic(err)
}
}
}
示例9: ldbUpdate
func ldbUpdate(db *leveldb.DB, folder, device []byte, fs []protocol.FileInfo) int64 {
runtime.GC()
batch := new(leveldb.Batch)
if debugDB {
l.Debugf("new batch %p", batch)
}
snap, err := db.GetSnapshot()
if err != nil {
panic(err)
}
if debugDB {
l.Debugf("created snapshot %p", snap)
}
defer func() {
if debugDB {
l.Debugf("close snapshot %p", snap)
}
snap.Release()
}()
var maxLocalVer int64
var fk []byte
for _, f := range fs {
name := []byte(f.Name)
fk = deviceKeyInto(fk[:cap(fk)], folder, device, name)
if debugDB {
l.Debugf("snap.Get %p %x", snap, fk)
}
bs, err := snap.Get(fk, nil)
if err == leveldb.ErrNotFound {
if lv := ldbInsert(batch, folder, device, f); lv > maxLocalVer {
maxLocalVer = lv
}
if f.IsInvalid() {
ldbRemoveFromGlobal(snap, batch, folder, device, name)
} else {
ldbUpdateGlobal(snap, batch, folder, device, f)
}
continue
}
var ef FileInfoTruncated
err = ef.UnmarshalXDR(bs)
if err != nil {
panic(err)
}
// Flags might change without the version being bumped when we set the
// invalid flag on an existing file.
if !ef.Version.Equal(f.Version) || ef.Flags != f.Flags {
if lv := ldbInsert(batch, folder, device, f); lv > maxLocalVer {
maxLocalVer = lv
}
if f.IsInvalid() {
ldbRemoveFromGlobal(snap, batch, folder, device, name)
} else {
ldbUpdateGlobal(snap, batch, folder, device, f)
}
}
// Write out and reuse the batch every few records, to avoid the batch
// growing too large and thus allocating unnecessarily much memory.
if batch.Len() > batchFlushSize {
if debugDB {
l.Debugf("db.Write %p", batch)
}
err = db.Write(batch, nil)
if err != nil {
panic(err)
}
batch.Reset()
}
}
if debugDB {
l.Debugf("db.Write %p", batch)
}
err = db.Write(batch, nil)
if err != nil {
panic(err)
}
return maxLocalVer
}
示例10: ldbGenericReplace
//.........这里部分代码省略.........
moreFs := fsi < len(fs)
if !moreDb && !moreFs {
break
}
if moreFs {
newName = []byte(fs[fsi].Name)
}
if moreDb {
oldName = deviceKeyName(dbi.Key())
}
cmp := bytes.Compare(newName, oldName)
if debugDB {
l.Debugf("generic replace; folder=%q device=%v moreFs=%v moreDb=%v cmp=%d newName=%q oldName=%q", folder, protocol.DeviceIDFromBytes(device), moreFs, moreDb, cmp, newName, oldName)
}
switch {
case moreFs && (!moreDb || cmp == -1):
if debugDB {
l.Debugln("generic replace; missing - insert")
}
// Database is missing this file. Insert it.
if lv := ldbInsert(batch, folder, device, fs[fsi]); lv > maxLocalVer {
maxLocalVer = lv
}
if fs[fsi].IsInvalid() {
ldbRemoveFromGlobal(snap, batch, folder, device, newName)
} else {
ldbUpdateGlobal(snap, batch, folder, device, fs[fsi])
}
fsi++
case moreFs && moreDb && cmp == 0:
// File exists on both sides - compare versions. We might get an
// update with the same version and different flags if a device has
// marked a file as invalid, so handle that too.
if debugDB {
l.Debugln("generic replace; exists - compare")
}
var ef FileInfoTruncated
ef.UnmarshalXDR(dbi.Value())
if !fs[fsi].Version.Equal(ef.Version) || fs[fsi].Flags != ef.Flags {
if debugDB {
l.Debugln("generic replace; differs - insert")
}
if lv := ldbInsert(batch, folder, device, fs[fsi]); lv > maxLocalVer {
maxLocalVer = lv
}
if fs[fsi].IsInvalid() {
ldbRemoveFromGlobal(snap, batch, folder, device, newName)
} else {
ldbUpdateGlobal(snap, batch, folder, device, fs[fsi])
}
} else if debugDB {
l.Debugln("generic replace; equal - ignore")
}
fsi++
moreDb = dbi.Next()
case moreDb && (!moreFs || cmp == 1):
if debugDB {
l.Debugln("generic replace; exists - remove")
}
if lv := deleteFn(snap, batch, folder, device, oldName, dbi); lv > maxLocalVer {
maxLocalVer = lv
}
moreDb = dbi.Next()
}
// Write out and reuse the batch every few records, to avoid the batch
// growing too large and thus allocating unnecessarily much memory.
if batch.Len() > batchFlushSize {
if debugDB {
l.Debugf("db.Write %p", batch)
}
err = db.Write(batch, nil)
if err != nil {
panic(err)
}
batch.Reset()
}
}
if debugDB {
l.Debugf("db.Write %p", batch)
}
err = db.Write(batch, nil)
if err != nil {
panic(err)
}
return maxLocalVer
}
示例11: main
func main() {
flag.Parse()
if enableBufferPool {
bpool = util.NewBufferPool(opt.DefaultBlockSize + 128)
}
log.Printf("Test DB stored at %q", dbPath)
if httpProf != "" {
log.Printf("HTTP pprof listening at %q", httpProf)
runtime.SetBlockProfileRate(1)
go func() {
if err := http.ListenAndServe(httpProf, nil); err != nil {
log.Fatalf("HTTPPROF: %v", err)
}
}()
}
runtime.GOMAXPROCS(runtime.NumCPU())
os.RemoveAll(dbPath)
stor, err := storage.OpenFile(dbPath, false)
if err != nil {
log.Fatal(err)
}
tstor := &testingStorage{stor}
defer tstor.Close()
fatalf := func(err error, format string, v ...interface{}) {
atomic.StoreUint32(&fail, 1)
atomic.StoreUint32(&done, 1)
log.Printf("FATAL: "+format, v...)
if err != nil && errors.IsCorrupted(err) {
cerr := err.(*errors.ErrCorrupted)
if !cerr.Fd.Zero() && cerr.Fd.Type == storage.TypeTable {
log.Print("FATAL: corruption detected, scanning...")
if !tstor.scanTable(storage.FileDesc{Type: storage.TypeTable, Num: cerr.Fd.Num}, false) {
log.Printf("FATAL: unable to find corrupted key/value pair in table %v", cerr.Fd)
}
}
}
runtime.Goexit()
}
if openFilesCacheCapacity == 0 {
openFilesCacheCapacity = -1
}
o := &opt.Options{
OpenFilesCacheCapacity: openFilesCacheCapacity,
DisableBufferPool: !enableBufferPool,
DisableBlockCache: !enableBlockCache,
ErrorIfExist: true,
Compression: opt.NoCompression,
}
if enableCompression {
o.Compression = opt.DefaultCompression
}
db, err := leveldb.Open(tstor, o)
if err != nil {
log.Fatal(err)
}
defer db.Close()
var (
mu = &sync.Mutex{}
gGetStat = &latencyStats{}
gIterStat = &latencyStats{}
gWriteStat = &latencyStats{}
gTrasactionStat = &latencyStats{}
startTime = time.Now()
writeReq = make(chan *leveldb.Batch)
writeAck = make(chan error)
writeAckAck = make(chan struct{})
)
go func() {
for b := range writeReq {
var err error
if mrand.Float64() < transactionProb {
log.Print("> Write using transaction")
gTrasactionStat.start()
var tr *leveldb.Transaction
if tr, err = db.OpenTransaction(); err == nil {
if err = tr.Write(b, nil); err == nil {
if err = tr.Commit(); err == nil {
gTrasactionStat.record(b.Len())
}
} else {
tr.Discard()
}
}
} else {
gWriteStat.start()
if err = db.Write(b, nil); err == nil {
gWriteStat.record(b.Len())
}
}
//.........这里部分代码省略.........
示例12: run
func run(d *osmpbf.Decoder, db *leveldb.DB, config Settings) {
batch := new(leveldb.Batch)
var nc, wc, rc uint64
for {
if v, err := d.Decode(); err == io.EOF {
break
} else if err != nil {
log.Fatal(err)
} else {
switch v := v.(type) {
case *osmpbf.Node:
// inc count
nc++
// ----------------
// write to leveldb
// ----------------
// write immediately
// cacheStore(db, v)
// write in batches
cacheQueue(batch, v)
if batch.Len() > config.BatchSize {
cacheFlush(db, batch)
}
// ----------------
// handle conditions
// ----------------
if len(config.Ids) != 0 {
i := sort.SearchStrings(config.Ids, strconv.FormatInt(v.ID, 10))
if i < len(config.Ids) && config.Ids[i] == strconv.FormatInt(v.ID, 10) {
onNode(v)
}
} else {
if !hasTags(v.Tags) {
break
}
v.Tags = trimTags(v.Tags)
if containsValidTags(v.Tags, config.Tags) {
onNode(v)
}
}
case *osmpbf.Way:
// ----------------
// write to leveldb
// ----------------
// flush outstanding batches
if batch.Len() > 1 {
cacheFlush(db, batch)
}
// inc count
wc++
if len(config.Ids) != 0 {
i := sort.SearchStrings(config.Ids, strconv.FormatInt(v.ID, 10))
if i < len(config.Ids) && config.Ids[i] == strconv.FormatInt(v.ID, 10) {
// lookup from leveldb
latlons, err := cacheLookup(db, v)
// skip ways which fail to denormalize
if err != nil {
break
}
// compute centroid
var centroid = computeCentroid(latlons)
onWay(v, latlons, centroid)
}
} else {
if !hasTags(v.Tags) {
break
}
v.Tags = trimTags(v.Tags)
if containsValidTags(v.Tags, config.Tags) {
// lookup from leveldb
latlons, err := cacheLookup(db, v)
// skip ways which fail to denormalize
if err != nil {
break
}
// compute centroid
var centroid = computeCentroid(latlons)
//.........这里部分代码省略.........