本文整理汇总了Golang中github.com/syndtr/goleveldb/leveldb.Batch.Reset方法的典型用法代码示例。如果您正苦于以下问题:Golang Batch.Reset方法的具体用法?Golang Batch.Reset怎么用?Golang Batch.Reset使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类github.com/syndtr/goleveldb/leveldb.Batch
的用法示例。
在下文中一共展示了Batch.Reset方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: Update
// Update block map state, removing any deleted or invalid files.
func (m *BlockMap) Update(files []protocol.FileInfo) error {
batch := new(leveldb.Batch)
buf := make([]byte, 4)
var key []byte
for _, file := range files {
if batch.Len() > maxBatchSize {
if err := m.db.Write(batch, nil); err != nil {
return err
}
batch.Reset()
}
if file.IsDirectory() {
continue
}
if file.IsDeleted() || file.IsInvalid() {
for _, block := range file.Blocks {
key = m.blockKeyInto(key, block.Hash, file.Name)
batch.Delete(key)
}
continue
}
for i, block := range file.Blocks {
binary.BigEndian.PutUint32(buf, uint32(i))
key = m.blockKeyInto(key, block.Hash, file.Name)
batch.Put(key, buf)
}
}
return m.db.Write(batch, nil)
}
示例2: DeleteDataWithPrefix
// DeleteDataWithPrefix deletes all service data such as service metadata, items and payloads.
func (ds *LevelDBStorage) DeleteDataWithPrefix(prefix string) int {
ds.FlushCache()
ds.saveLock.Lock()
defer ds.saveLock.Unlock()
limitCounter := 0
total := 0
iter := ds.IterData(prefix)
wb := new(leveldb.Batch)
for iter.Valid() {
total++
if limitCounter < 1000 {
wb.Delete(iter.GetKey())
limitCounter++
} else {
limitCounter = 0
ds.db.Write(wb, nil)
wb.Reset()
}
iter.Next()
}
ds.db.Write(wb, nil)
return total
}
示例3: cacheFlush
// flush a leveldb batch to database and reset batch to 0
func cacheFlush(db *leveldb.DB, batch *leveldb.Batch) {
err := db.Write(batch, nil)
if err != nil {
log.Fatal(err)
}
batch.Reset()
}
示例4: processChange
// processChange applies a ChangeList to the database.
func (d *DriveDB) processChange(c *gdrive.ChangeList) error {
if c == nil {
return nil
}
// If we read zero items, there's no work to do, and we're probably synced.
if len(c.Items) == 0 {
if d.lastChangeId() >= c.LargestChangeId {
d.synced.Broadcast()
}
return nil
}
log.Printf("processing %v/%v, %v changes", d.lastChangeId(), c.LargestChangeId, len(c.Items))
batch := new(leveldb.Batch)
for _, i := range c.Items {
if i.File == nil {
debug.Printf(" %s: deleted", i.FileId)
} else {
debug.Printf(" %s: %q size:%v version:%v labels:%#v", i.FileId, i.File.Title, i.File.FileSize, i.File.Version, i.File.Labels)
}
batch.Reset()
// Update leveldb.
inode, _ := d.InodeForFileId(i.FileId)
d.lruCache.Remove(inode)
// TODO: don't delete trashed/hidden files? ".trash" folder?
if i.Deleted || i.File.Labels.Trashed || i.File.Labels.Hidden {
d.RemoveFileById(i.FileId, batch)
} else {
d.UpdateFile(batch, i.File)
}
// Update the checkpoint, which now encompasses one additional change.
d.setLastChangeId(i.Id)
err := d.writeCheckpoint(batch)
if err != nil {
return err
}
// Commit
err = d.db.Write(batch, nil)
if err != nil {
return err
}
}
// Signal we're synced, if we are.
if d.lastChangeId() >= c.LargestChangeId {
d.synced.Broadcast()
}
return nil
}
示例5: Discard
// Discard block map state, removing the given files
func (m *BlockMap) Discard(files []protocol.FileInfo) error {
batch := new(leveldb.Batch)
var key []byte
for _, file := range files {
if batch.Len() > maxBatchSize {
if err := m.db.Write(batch, nil); err != nil {
return err
}
batch.Reset()
}
for _, block := range file.Blocks {
key = m.blockKeyInto(key, block.Hash, file.Name)
batch.Delete(key)
}
}
return m.db.Write(batch, nil)
}
示例6: Drop
// Drop block map, removing all entries related to this block map from the db.
func (m *BlockMap) Drop() error {
batch := new(leveldb.Batch)
iter := m.db.NewIterator(util.BytesPrefix(m.blockKeyInto(nil, nil, "")[:keyPrefixLen+keyFolderLen]), nil)
defer iter.Release()
for iter.Next() {
if batch.Len() > maxBatchSize {
if err := m.db.Write(batch, nil); err != nil {
return err
}
batch.Reset()
}
batch.Delete(iter.Key())
}
if iter.Error() != nil {
return iter.Error()
}
return m.db.Write(batch, nil)
}
示例7: Reset
// Reset removes all entries in this namespace.
func (n *NamespacedKV) Reset() {
it := n.db.NewIterator(util.BytesPrefix(n.prefix), nil)
defer it.Release()
batch := new(leveldb.Batch)
for it.Next() {
batch.Delete(it.Key())
if batch.Len() > batchFlushSize {
if err := n.db.Write(batch, nil); err != nil {
panic(err)
}
batch.Reset()
}
}
if batch.Len() > 0 {
if err := n.db.Write(batch, nil); err != nil {
panic(err)
}
}
}
示例8: ApplySnapshot
// ApplySnapshot applies the snapshot of the computation's persisted state
func (p *LevelDBPersister) ApplySnapshot(compID StreamID, snapshot []byte) error {
batch := new(leveldb.Batch)
log.Println("[persister] Applying snapshot") //, snapshot)
// clear data for this computation
keyPrefix := compID
iter := p.db.NewIterator(util.BytesPrefix([]byte(keyPrefix)), nil)
defer iter.Release()
for iter.Next() {
batch.Delete(iter.Key())
}
err := p.db.Write(batch, nil)
if err != nil {
return err
}
batch.Reset()
err = batch.Load(snapshot)
if err != nil {
return err
}
return p.db.Write(batch, nil)
}
示例9: ldbUpdate
func ldbUpdate(db *leveldb.DB, folder, device []byte, fs []protocol.FileInfo) int64 {
runtime.GC()
batch := new(leveldb.Batch)
if debugDB {
l.Debugf("new batch %p", batch)
}
snap, err := db.GetSnapshot()
if err != nil {
panic(err)
}
if debugDB {
l.Debugf("created snapshot %p", snap)
}
defer func() {
if debugDB {
l.Debugf("close snapshot %p", snap)
}
snap.Release()
}()
var maxLocalVer int64
var fk []byte
for _, f := range fs {
name := []byte(f.Name)
fk = deviceKeyInto(fk[:cap(fk)], folder, device, name)
if debugDB {
l.Debugf("snap.Get %p %x", snap, fk)
}
bs, err := snap.Get(fk, nil)
if err == leveldb.ErrNotFound {
if lv := ldbInsert(batch, folder, device, f); lv > maxLocalVer {
maxLocalVer = lv
}
if f.IsInvalid() {
ldbRemoveFromGlobal(snap, batch, folder, device, name)
} else {
ldbUpdateGlobal(snap, batch, folder, device, f)
}
continue
}
var ef FileInfoTruncated
err = ef.UnmarshalXDR(bs)
if err != nil {
panic(err)
}
// Flags might change without the version being bumped when we set the
// invalid flag on an existing file.
if !ef.Version.Equal(f.Version) || ef.Flags != f.Flags {
if lv := ldbInsert(batch, folder, device, f); lv > maxLocalVer {
maxLocalVer = lv
}
if f.IsInvalid() {
ldbRemoveFromGlobal(snap, batch, folder, device, name)
} else {
ldbUpdateGlobal(snap, batch, folder, device, f)
}
}
// Write out and reuse the batch every few records, to avoid the batch
// growing too large and thus allocating unnecessarily much memory.
if batch.Len() > batchFlushSize {
if debugDB {
l.Debugf("db.Write %p", batch)
}
err = db.Write(batch, nil)
if err != nil {
panic(err)
}
batch.Reset()
}
}
if debugDB {
l.Debugf("db.Write %p", batch)
}
err = db.Write(batch, nil)
if err != nil {
panic(err)
}
return maxLocalVer
}
示例10: ldbGenericReplace
//.........这里部分代码省略.........
moreFs := fsi < len(fs)
if !moreDb && !moreFs {
break
}
if moreFs {
newName = []byte(fs[fsi].Name)
}
if moreDb {
oldName = deviceKeyName(dbi.Key())
}
cmp := bytes.Compare(newName, oldName)
if debugDB {
l.Debugf("generic replace; folder=%q device=%v moreFs=%v moreDb=%v cmp=%d newName=%q oldName=%q", folder, protocol.DeviceIDFromBytes(device), moreFs, moreDb, cmp, newName, oldName)
}
switch {
case moreFs && (!moreDb || cmp == -1):
if debugDB {
l.Debugln("generic replace; missing - insert")
}
// Database is missing this file. Insert it.
if lv := ldbInsert(batch, folder, device, fs[fsi]); lv > maxLocalVer {
maxLocalVer = lv
}
if fs[fsi].IsInvalid() {
ldbRemoveFromGlobal(snap, batch, folder, device, newName)
} else {
ldbUpdateGlobal(snap, batch, folder, device, fs[fsi])
}
fsi++
case moreFs && moreDb && cmp == 0:
// File exists on both sides - compare versions. We might get an
// update with the same version and different flags if a device has
// marked a file as invalid, so handle that too.
if debugDB {
l.Debugln("generic replace; exists - compare")
}
var ef FileInfoTruncated
ef.UnmarshalXDR(dbi.Value())
if !fs[fsi].Version.Equal(ef.Version) || fs[fsi].Flags != ef.Flags {
if debugDB {
l.Debugln("generic replace; differs - insert")
}
if lv := ldbInsert(batch, folder, device, fs[fsi]); lv > maxLocalVer {
maxLocalVer = lv
}
if fs[fsi].IsInvalid() {
ldbRemoveFromGlobal(snap, batch, folder, device, newName)
} else {
ldbUpdateGlobal(snap, batch, folder, device, fs[fsi])
}
} else if debugDB {
l.Debugln("generic replace; equal - ignore")
}
fsi++
moreDb = dbi.Next()
case moreDb && (!moreFs || cmp == 1):
if debugDB {
l.Debugln("generic replace; exists - remove")
}
if lv := deleteFn(snap, batch, folder, device, oldName, dbi); lv > maxLocalVer {
maxLocalVer = lv
}
moreDb = dbi.Next()
}
// Write out and reuse the batch every few records, to avoid the batch
// growing too large and thus allocating unnecessarily much memory.
if batch.Len() > batchFlushSize {
if debugDB {
l.Debugf("db.Write %p", batch)
}
err = db.Write(batch, nil)
if err != nil {
panic(err)
}
batch.Reset()
}
}
if debugDB {
l.Debugf("db.Write %p", batch)
}
err = db.Write(batch, nil)
if err != nil {
panic(err)
}
return maxLocalVer
}
示例11: main
//.........这里部分代码省略.........
blockpool, _ := db.GetProperty("leveldb.blockpool")
log.Printf("> BlockCache=%s OpenedTables=%s AliveSnaps=%s AliveIter=%s BlockPool=%q",
cachedblock, openedtables, alivesnaps, aliveiters, blockpool)
log.Print("------------------------")
}
}()
for ns, numKey := range numKeys {
func(ns, numKey int) {
log.Printf("[%02d] STARTING: numKey=%d", ns, numKey)
keys := make([][]byte, numKey)
for i := range keys {
keys[i] = randomData(nil, byte(ns), 1, uint32(i), keyLen)
}
wg.Add(1)
go func() {
var wi uint32
defer func() {
log.Printf("[%02d] WRITER DONE #%d", ns, wi)
wg.Done()
}()
var (
b = new(leveldb.Batch)
k2, v2 []byte
nReader int32
)
for atomic.LoadUint32(&done) == 0 {
log.Printf("[%02d] WRITER #%d", ns, wi)
b.Reset()
for _, k1 := range keys {
k2 = randomData(k2, byte(ns), 2, wi, keyLen)
v2 = randomData(v2, byte(ns), 3, wi, valueLen)
b.Put(k2, v2)
b.Put(k1, k2)
}
writeReq <- b
if err := <-writeAck; err != nil {
writeAckAck <- struct{}{}
fatalf(err, "[%02d] WRITER #%d db.Write: %v", ns, wi, err)
}
snap, err := db.GetSnapshot()
if err != nil {
writeAckAck <- struct{}{}
fatalf(err, "[%02d] WRITER #%d db.GetSnapshot: %v", ns, wi, err)
}
writeAckAck <- struct{}{}
wg.Add(1)
atomic.AddInt32(&nReader, 1)
go func(snapwi uint32, snap *leveldb.Snapshot) {
var (
ri int
iterStat = &latencyStats{}
getStat = &latencyStats{}
)
defer func() {
mu.Lock()
gGetStat.add(getStat)
gIterStat.add(iterStat)