本文整理汇总了Golang中github.com/jmhodges/levigo.NewWriteBatch函数的典型用法代码示例。如果您正苦于以下问题:Golang NewWriteBatch函数的具体用法?Golang NewWriteBatch怎么用?Golang NewWriteBatch使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewWriteBatch函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: WriteSeriesData
func (self *LevelDbDatastore) WriteSeriesData(database string, series *protocol.Series) error {
wb := levigo.NewWriteBatch()
defer wb.Close()
for fieldIndex, field := range series.Fields {
temp := field
id, _, err := self.getIdForDbSeriesColumn(&database, series.Name, &temp)
if err != nil {
return err
}
for _, point := range series.Points {
timestampBuffer := bytes.NewBuffer(make([]byte, 0, 8))
sequenceNumberBuffer := bytes.NewBuffer(make([]byte, 0, 8))
binary.Write(timestampBuffer, binary.BigEndian, self.convertTimestampToUint(point.GetTimestampInMicroseconds()))
binary.Write(sequenceNumberBuffer, binary.BigEndian, uint64(*point.SequenceNumber))
pointKey := append(append(id, timestampBuffer.Bytes()...), sequenceNumberBuffer.Bytes()...)
// TODO: we should remove the column value if timestamp and sequence number
// were provided
if point.Values[fieldIndex] == nil {
continue
}
data, err2 := proto.Marshal(point.Values[fieldIndex])
if err2 != nil {
return err2
}
wb.Put(pointKey, data)
}
}
return self.db.Write(self.writeOptions, wb)
}
示例2: newLevelDBBatch
func newLevelDBBatch(store *LevelDBStore) *LevelDBBatch {
rv := LevelDBBatch{
store: store,
batch: levigo.NewWriteBatch(),
}
return &rv
}
示例3: DeleteSpan
// Delete a span from the shard. Note that leveldb may retain the data until
// compaction(s) remove it.
func (shd *shard) DeleteSpan(span *common.Span) error {
batch := levigo.NewWriteBatch()
defer batch.Close()
primaryKey :=
append([]byte{SPAN_ID_INDEX_PREFIX}, span.Id.Val()...)
batch.Delete(primaryKey)
for parentIdx := range span.Parents {
key := append(append([]byte{PARENT_ID_INDEX_PREFIX},
span.Parents[parentIdx].Val()...), span.Id.Val()...)
batch.Delete(key)
}
beginTimeKey := append(append([]byte{BEGIN_TIME_INDEX_PREFIX},
u64toSlice(s2u64(span.Begin))...), span.Id.Val()...)
batch.Delete(beginTimeKey)
endTimeKey := append(append([]byte{END_TIME_INDEX_PREFIX},
u64toSlice(s2u64(span.End))...), span.Id.Val()...)
batch.Delete(endTimeKey)
durationKey := append(append([]byte{DURATION_INDEX_PREFIX},
u64toSlice(s2u64(span.Duration()))...), span.Id.Val()...)
batch.Delete(durationKey)
err := shd.ldb.Write(shd.store.writeOpts, batch)
if err != nil {
return err
}
return nil
}
示例4: writeSpan
func (shd *shard) writeSpan(ispan *IncomingSpan) error {
batch := levigo.NewWriteBatch()
defer batch.Close()
span := ispan.Span
primaryKey :=
append([]byte{SPAN_ID_INDEX_PREFIX}, span.Id.Val()...)
batch.Put(primaryKey, ispan.SpanDataBytes)
// Add this to the parent index.
for parentIdx := range span.Parents {
key := append(append([]byte{PARENT_ID_INDEX_PREFIX},
span.Parents[parentIdx].Val()...), span.Id.Val()...)
batch.Put(key, EMPTY_BYTE_BUF)
}
// Add to the other secondary indices.
beginTimeKey := append(append([]byte{BEGIN_TIME_INDEX_PREFIX},
u64toSlice(s2u64(span.Begin))...), span.Id.Val()...)
batch.Put(beginTimeKey, EMPTY_BYTE_BUF)
endTimeKey := append(append([]byte{END_TIME_INDEX_PREFIX},
u64toSlice(s2u64(span.End))...), span.Id.Val()...)
batch.Put(endTimeKey, EMPTY_BYTE_BUF)
durationKey := append(append([]byte{DURATION_INDEX_PREFIX},
u64toSlice(s2u64(span.Duration()))...), span.Id.Val()...)
batch.Put(durationKey, EMPTY_BYTE_BUF)
err := shd.ldb.Write(shd.store.writeOpts, batch)
if err != nil {
shd.store.lg.Errorf("Error writing span %s to leveldb at %s: %s\n",
span.String(), shd.path, err.Error())
return err
}
return nil
}
示例5: NewWriteBatch
func (this *DB) NewWriteBatch() *WriteBatch {
batch := levigo.NewWriteBatch()
var hook *Hook
if len(this.pre) != 0 || len(this.post) != 0 {
hook = &Hook{db: this, batch: batch}
}
return &WriteBatch{db: this, batch: batch, hook: hook}
}
示例6: NewBatch
func (w *Writer) NewBatch() store.KVBatch {
rv := Batch{
w: w,
merge: store.NewEmulatedMerge(w.store.mo),
batch: levigo.NewWriteBatch(),
}
return &rv
}
示例7: StartBatch
// StartBatch start a new batch write processing
func (lvdb *LVDB) StartBatch() {
if lvdb._writeBatch == nil {
lvdb._writeBatch = levigo.NewWriteBatch()
} else {
lvdb._writeBatch.Clear()
}
lvdb.isBatch = true
}
示例8: Write
func (self *LevelDbShard) Write(database string, series []*protocol.Series) error {
wb := levigo.NewWriteBatch()
defer wb.Close()
for _, s := range series {
if len(s.Points) == 0 {
return errors.New("Unable to write no data. Series was nil or had no points.")
}
count := 0
for fieldIndex, field := range s.Fields {
temp := field
id, err := self.createIdForDbSeriesColumn(&database, s.Name, &temp)
if err != nil {
return err
}
keyBuffer := bytes.NewBuffer(make([]byte, 0, 24))
dataBuffer := proto.NewBuffer(nil)
for _, point := range s.Points {
keyBuffer.Reset()
dataBuffer.Reset()
keyBuffer.Write(id)
timestamp := self.convertTimestampToUint(point.GetTimestampInMicroseconds())
// pass the uint64 by reference so binary.Write() doesn't create a new buffer
// see the source code for intDataSize() in binary.go
binary.Write(keyBuffer, binary.BigEndian, ×tamp)
binary.Write(keyBuffer, binary.BigEndian, point.SequenceNumber)
pointKey := keyBuffer.Bytes()
if point.Values[fieldIndex].GetIsNull() {
wb.Delete(pointKey)
goto check
}
err = dataBuffer.Marshal(point.Values[fieldIndex])
if err != nil {
return err
}
wb.Put(pointKey, dataBuffer.Bytes())
check:
count++
if count >= self.writeBatchSize {
err = self.db.Write(self.writeOptions, wb)
if err != nil {
return err
}
count = 0
wb.Clear()
}
}
}
}
return self.db.Write(self.writeOptions, wb)
}
示例9: deleteRangeOfSeriesCommon
func (self *LevelDbShard) deleteRangeOfSeriesCommon(database, series string, startTimeBytes, endTimeBytes []byte) error {
columns := self.getColumnNamesForSeries(database, series)
fields, err := self.getFieldsForSeries(database, series, columns)
if err != nil {
// because a db is distributed across the cluster, it's possible we don't have the series indexed here. ignore
switch err := err.(type) {
case FieldLookupError:
return nil
default:
return err
}
}
ro := levigo.NewReadOptions()
defer ro.Close()
ro.SetFillCache(false)
for _, field := range fields {
it := self.db.NewIterator(ro)
defer it.Close()
wb := levigo.NewWriteBatch()
defer wb.Close()
startKey := append(field.Id, startTimeBytes...)
it.Seek(startKey)
if it.Valid() {
if !bytes.Equal(it.Key()[:8], field.Id) {
it.Next()
if it.Valid() {
startKey = it.Key()
}
}
}
count := 0
for it = it; it.Valid(); it.Next() {
k := it.Key()
if len(k) < 16 || !bytes.Equal(k[:8], field.Id) || bytes.Compare(k[8:16], endTimeBytes) == 1 {
break
}
wb.Delete(k)
count++
if count >= SIXTY_FOUR_KILOBYTES {
err = self.db.Write(self.writeOptions, wb)
if err != nil {
return err
}
count = 0
wb.Clear()
}
}
err = self.db.Write(self.writeOptions, wb)
if err != nil {
return err
}
}
return nil
}
示例10: BatchPut
func (db LevelDB) BatchPut(writes []Write) error {
wb := levigo.NewWriteBatch()
defer wb.Close()
for _, w := range writes {
if w.Value == nil {
wb.Delete(w.Key)
continue
}
wb.Put(w.Key, w.Value)
}
return db.db.Write(db.wopts, wb)
}
示例11: deleteRangeOfSeriesCommon
func (self *LevelDbDatastore) deleteRangeOfSeriesCommon(database, series string, startTimeBytes, endTimeBytes []byte) error {
columns := self.getColumnNamesForSeries(database, series)
fields, err := self.getFieldsForSeries(database, series, columns)
if err != nil {
// because a db is distributed across the cluster, it's possible we don't have the series indexed here. ignore
switch err := err.(type) {
case FieldLookupError:
return nil
default:
return err
}
}
ro := levigo.NewReadOptions()
defer ro.Close()
ro.SetFillCache(false)
rangesToCompact := make([]*levigo.Range, 0)
for _, field := range fields {
it := self.db.NewIterator(ro)
defer it.Close()
wb := levigo.NewWriteBatch()
defer wb.Close()
startKey := append(field.Id, startTimeBytes...)
endKey := startKey
it.Seek(startKey)
if it.Valid() {
if !bytes.Equal(it.Key()[:8], field.Id) {
it.Next()
if it.Valid() {
startKey = it.Key()
}
}
}
for it = it; it.Valid(); it.Next() {
k := it.Key()
if len(k) < 16 || !bytes.Equal(k[:8], field.Id) || bytes.Compare(k[8:16], endTimeBytes) == 1 {
break
}
wb.Delete(k)
endKey = k
}
err = self.db.Write(self.writeOptions, wb)
if err != nil {
return err
}
rangesToCompact = append(rangesToCompact, &levigo.Range{startKey, endKey})
}
for _, r := range rangesToCompact {
self.db.CompactRange(*r)
}
return nil
}
示例12: Write
func (context *levelDBContext) Write() error {
var wb = levigo.NewWriteBatch()
defer wb.Close()
for _, entry := range context.batch {
if entry.Delete {
wb.Delete(entry.Key)
} else {
wb.Put(entry.Key, entry.Val)
}
}
return context.ldbStore.db.Write(defaultWriteOptions, wb)
}
示例13: Puts
func (connection *LevelDbConnection) Puts(options *proto.DbWriteOptions, keys [][]byte, values [][]byte) error {
wo := levigo.NewWriteOptions()
defer wo.Close()
if options != nil {
wo.SetSync(options.Sync)
}
batch := levigo.NewWriteBatch()
defer batch.Close()
for i, key := range keys {
batch.Put(key, values[i])
}
return connection.db.Write(wo, batch)
}
示例14: DeleteMembers
func (p *InsertedWaysCache) DeleteMembers(members []element.Member) error {
batch := levigo.NewWriteBatch()
defer batch.Close()
for _, m := range members {
if m.Type != element.WAY {
continue
}
keyBuf := idToKeyBuf(m.Id)
batch.Delete(keyBuf)
}
return p.db.Write(p.wo, batch)
}
示例15: PutWays
func (p *WaysCache) PutWays(ways []element.Way) error {
batch := levigo.NewWriteBatch()
defer batch.Close()
for _, way := range ways {
keyBuf := idToKeyBuf(way.Id)
data, err := binary.MarshalWay(&way)
if err != nil {
return err
}
batch.Put(keyBuf, data)
}
return p.db.Write(p.wo, batch)
}