本文整理汇总了Golang中github.com/janelia-flyem/dvid/dvid.Errorf函数的典型用法代码示例。如果您正苦于以下问题:Golang Errorf函数的具体用法?Golang Errorf怎么用?Golang Errorf使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了Errorf函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: getValue
func getValue(r api.Row, verKey []byte) ([]byte, error) {
for _, readItem := range r[familyName] {
//readItem.Column contains the family prefix as part of the key,
//We have to trim the prefix and decode to bytes.
//If we try to compare this by encoding the verKey it wouldn't work
itemVer, err := decodeKey(readItem.Column)
if err != nil {
return nil, fmt.Errorf("Error in getValue(): %s\n", err)
}
if bytes.Equal(itemVer, verKey) {
return readItem.Value, nil
}
}
//Debug
dvid.Errorf("Row available %s", r[familyName])
for _, readItem := range r[familyName] {
itemVer, _ := decodeKey(readItem.Column)
dvid.Errorf("Versions available %s", itemVer)
}
return nil, fmt.Errorf("Failed to find version %s in Row", verKey)
}
示例2: MigrateInstance
// MigrateInstance migrates a data instance locally from an old storage
// engine to the current configured storage. After completion of the copy,
// the data instance in the old storage is deleted.
func MigrateInstance(uuid dvid.UUID, source dvid.InstanceName, oldStore dvid.Store, c dvid.Config) error {
if manager == nil {
return ErrManagerNotInitialized
}
// Get flatten or not
transmit, _, err := c.GetString("transmit")
if err != nil {
return err
}
var flatten bool
if transmit == "flatten" {
flatten = true
}
// Get the source data instance.
d, err := manager.getDataByUUIDName(uuid, source)
if err != nil {
return err
}
// Get the current store for this data instance.
storer, ok := d.(storage.Accessor)
if !ok {
return fmt.Errorf("unable to migrate data %q: unable to access backing store", d.DataName())
}
curKV, err := storer.GetOrderedKeyValueDB()
if err != nil {
return fmt.Errorf("unable to get backing store for data %q: %v\n", source, err)
}
// Get the old store.
oldKV, ok := oldStore.(storage.OrderedKeyValueDB)
if !ok {
return fmt.Errorf("unable to migrate data %q from store %s which isn't ordered kv store", source, storer)
}
// Abort if the two stores are the same.
if curKV == oldKV {
return fmt.Errorf("old store for data %q seems same as current store", source)
}
// Migrate data asynchronously.
go func() {
if err := copyData(oldKV, curKV, d, nil, uuid, nil, flatten); err != nil {
dvid.Errorf("error in migration of data %q: %v\n", source, err)
return
}
// delete data off old store.
dvid.Infof("Starting delete of instance %q from old storage %q\n", d.DataName(), oldKV)
ctx := storage.NewDataContext(d, 0)
if err := oldKV.DeleteAll(ctx, true); err != nil {
dvid.Errorf("deleting instance %q from %q after copy to %q: %v\n", d.DataName(), oldKV, curKV, err)
return
}
}()
dvid.Infof("Migrating data %q from store %q to store %q ...\n", d.DataName(), oldKV, curKV)
return nil
}
示例3: PutRange
// Put key-value pairs. Note that it could be more efficient to use the Batcher
// interface so you don't have to create and keep a slice of KeyValue. Some
// databases like leveldb will copy on batch put anyway.
func (db *BigTable) PutRange(ctx storage.Context, TKeyValue []storage.TKeyValue) error {
if db == nil {
return fmt.Errorf("Can't call PutRange() on nil BigTable")
}
if ctx == nil {
return fmt.Errorf("Received nil context in PutRange()")
}
for _, tkeyvalue := range TKeyValue {
unvKey, verKey, err := ctx.SplitKey(tkeyvalue.K)
if err != nil {
dvid.Errorf("Error in PutRange(): %v\n", err)
}
mut := api.NewMutation()
mut.Set(familyName, encodeKey(verKey), 0, tkeyvalue.V)
err = tbl.Apply(db.ctx, encodeKey(unvKey), mut)
if err != nil {
dvid.Errorf("Failed to Put value in PutRange()")
}
}
return nil
}
示例4: handleBlockEvent
// Processes each change as we get it.
// TODO -- accumulate larger # of changes before committing to prevent
// excessive compaction time? This assumes LSM storage engine, which
// might not always hold in future, so stick with incremental update
// until proven to be a bottleneck.
func (d *Data) handleBlockEvent(in <-chan datastore.SyncMessage, done <-chan struct{}) {
store, err := storage.SmallDataStore()
if err != nil {
dvid.Errorf("Data type labelvol had error initializing store: %v\n", err)
return
}
batcher, ok := store.(storage.KeyValueBatcher)
if !ok {
dvid.Errorf("Data type labelvol requires batch-enabled store, which %q is not\n", store)
return
}
for msg := range in {
select {
case <-done:
return
default:
ctx := datastore.NewVersionedCtx(d, msg.Version)
switch delta := msg.Delta.(type) {
case imageblk.Block:
d.ingestBlock(ctx, delta, batcher)
case labels.DeleteBlock:
d.deleteBlock(ctx, delta, batcher)
default:
dvid.Criticalf("Cannot sync labelvol from block event. Got unexpected delta: %v\n", msg)
}
}
}
}
示例5: MergeStart
// MergeStart handles label map caches during an active merge operation. Note that if there are
// multiple synced label instances, the InstanceVersion will always be the labelblk instance.
// Multiple merges into a single label are allowed, but chained merges are not. For example,
// you can merge label 1, 2, and 3 into 4, then later merge 6 into 4. However, you cannot
// concurrently merge label 4 into some other label because there can be a race condition between
// 3 -> 4 and 4 -> X.
func MergeStart(iv dvid.InstanceVersion, op MergeOp) error {
dvid.Infof("MergeStart starting for iv %v with op %v. mergeCache: %v\n", iv, op, mc.m)
// Don't allow a merge to start in the middle of a concurrent merge/split.
if labelsSplitting.IsDirty(iv, op.Target) { // we might be able to relax this one.
return fmt.Errorf("can't merge into label %d while it has an ongoing split", op.Target)
}
if mc.MergingToOther(iv, op.Target) {
dvid.Errorf("can't merge label %d while it is currently merging into another label", op.Target)
return fmt.Errorf("can't merge label %d while it is currently merging into another label", op.Target)
}
for merged := range op.Merged {
if labelsSplitting.IsDirty(iv, merged) {
return fmt.Errorf("can't merge label %d while it has an ongoing split", merged)
}
if labelsMerging.IsDirty(iv, merged) {
dvid.Errorf("can't merge label %d while it is currently involved in a merge", merged)
return fmt.Errorf("can't merge label %d while it is currently involved in a merge", merged)
}
}
// Add the merge to the mapping.
if err := mc.Add(iv, op); err != nil {
return err
}
// Adjust the dirty counts on the involved labels.
labelsMerging.AddMerge(iv, op)
return nil
}
示例6: writeBlocks
// TODO -- Clean up all the writing and simplify now that we have block-aligned writes.
// writeBlocks ingests blocks of voxel data asynchronously using batch writes.
func (d *Data) writeBlocks(v dvid.VersionID, b storage.TKeyValues, wg1, wg2 *sync.WaitGroup) error {
batcher, err := d.GetKeyValueBatcher()
if err != nil {
return err
}
preCompress, postCompress := 0, 0
ctx := datastore.NewVersionedCtx(d, v)
evt := datastore.SyncEvent{d.DataUUID(), IngestBlockEvent}
<-server.HandlerToken
go func() {
defer func() {
wg1.Done()
wg2.Done()
dvid.Debugf("Wrote voxel blocks. Before %s: %d bytes. After: %d bytes\n", d.Compression(), preCompress, postCompress)
server.HandlerToken <- 1
}()
mutID := d.NewMutationID()
batch := batcher.NewBatch(ctx)
for i, block := range b {
serialization, err := dvid.SerializeData(block.V, d.Compression(), d.Checksum())
preCompress += len(block.V)
postCompress += len(serialization)
if err != nil {
dvid.Errorf("Unable to serialize block: %v\n", err)
return
}
batch.Put(block.K, serialization)
indexZYX, err := DecodeTKey(block.K)
if err != nil {
dvid.Errorf("Unable to recover index from block key: %v\n", block.K)
return
}
msg := datastore.SyncMessage{IngestBlockEvent, v, Block{indexZYX, block.V, mutID}}
if err := datastore.NotifySubscribers(evt, msg); err != nil {
dvid.Errorf("Unable to notify subscribers of ChangeBlockEvent in %s\n", d.DataName())
return
}
// Check if we should commit
if i%KVWriteSize == KVWriteSize-1 {
if err := batch.Commit(); err != nil {
dvid.Errorf("Error on trying to write batch: %v\n", err)
return
}
batch = batcher.NewBatch(ctx)
}
}
if err := batch.Commit(); err != nil {
dvid.Errorf("Error on trying to write batch: %v\n", err)
return
}
}()
return nil
}
示例7: mergeBlock
// Goroutine that handles relabeling of blocks during a merge operation.
// Since the same block coordinate always gets mapped to the same goroutine, we handle
// concurrency by serializing GET/PUT for a particular block coordinate.
func (d *Data) mergeBlock(in <-chan mergeOp) {
store, err := storage.MutableStore()
if err != nil {
dvid.Errorf("Data type labelblk had error initializing store: %v\n", err)
return
}
blockBytes := int(d.BlockSize().Prod() * 8)
for op := range in {
tk := NewTKeyByCoord(op.izyx)
data, err := store.Get(op.ctx, tk)
if err != nil {
dvid.Errorf("Error on GET of labelblk with coord string %q\n", op.izyx)
op.wg.Done()
continue
}
if data == nil {
dvid.Errorf("nil label block where merge was done!\n")
op.wg.Done()
continue
}
blockData, _, err := dvid.DeserializeData(data, true)
if err != nil {
dvid.Criticalf("unable to deserialize label block in '%s': %v\n", d.DataName(), err)
op.wg.Done()
continue
}
if len(blockData) != blockBytes {
dvid.Criticalf("After labelblk deserialization got back %d bytes, expected %d bytes\n", len(blockData), blockBytes)
op.wg.Done()
continue
}
// Iterate through this block of labels and relabel if label in merge.
for i := 0; i < blockBytes; i += 8 {
label := binary.LittleEndian.Uint64(blockData[i : i+8])
if _, merged := op.Merged[label]; merged {
binary.LittleEndian.PutUint64(blockData[i:i+8], op.Target)
}
}
// Store this block.
serialization, err := dvid.SerializeData(blockData, d.Compression(), d.Checksum())
if err != nil {
dvid.Criticalf("Unable to serialize block in %q: %v\n", d.DataName(), err)
op.wg.Done()
continue
}
if err := store.Put(op.ctx, tk, serialization); err != nil {
dvid.Errorf("Error in putting key %v: %v\n", tk, err)
}
op.wg.Done()
}
}
示例8: GetRange
// GetRange returns a range of values spanning (TkBeg, kEnd) keys.
func (db *BigTable) GetRange(ctx storage.Context, TkBeg, TkEnd storage.TKey) ([]*storage.TKeyValue, error) {
if db == nil {
return nil, fmt.Errorf("Can't call GetRange() on nil BigTable")
}
if ctx == nil {
return nil, fmt.Errorf("Received nil context in GetRange()")
}
unvKeyBeg, _, err := ctx.SplitKey(TkBeg)
if err != nil {
dvid.Errorf("Error in GetRange(): %v\n", err)
}
unvKeyEnd, _, err := ctx.SplitKey(TkEnd)
if err != nil {
dvid.Errorf("Error in GetRange(): %v\n", err)
}
tKeyValues := make([]*storage.TKeyValue, 0)
rr := api.NewRange(encodeKey(unvKeyBeg), encodeKey(unvKeyEnd))
err = tbl.ReadRows(db.ctx, rr, func(r api.Row) bool {
unvKeyRow, err := decodeKey(r.Key())
if err != nil {
dvid.Errorf("Error in GetRange() decodeKey(r.Key()): %v\n", err)
return false
}
// dvid.Infof("GetRange() with row key= %v", r.Key())
for _, readItem := range r[familyName] {
verKey, err := decodeKey(readItem.Column)
if err != nil {
dvid.Errorf("Error in GetRange() decodeKey(readItem.Column): %v\n", err)
return false
}
fullKey := storage.MergeKey(unvKeyRow, verKey)
// dvid.Infof("colum key= %v , timestamp = %v", verKey, readItem.Timestamp)
tkey, err := storage.TKeyFromKey(fullKey)
if err != nil {
dvid.Errorf("Error in GetRange() storage.TKeyFromKey(fullKey): %v\n", err)
return false
}
kv := storage.TKeyValue{tkey, readItem.Value}
tKeyValues = append(tKeyValues, &kv)
}
return true // keep going
})
return tKeyValues, err
}
示例9: Run
// Runs a queue of post-processing commands, calling functions previously registered
// through RegisterPostProcessing(). If a command has not been registered, it will
// be skipped and noted in log.
func (q PostProcQueue) Run() {
for _, command := range q {
callback, found := registeredOps.postproc[command.name]
if !found {
dvid.Errorf("Skipping unregistered post-processing command %q\n", command.name)
continue
}
if err := callback(command.data); err != nil {
dvid.Errorf("Error in post-proc command %q: %v\n", command.data, err)
}
}
}
示例10: mergeBlock
// handles relabeling of blocks during a merge operation.
func (d *Data) mergeBlock(ctx *datastore.VersionedCtx, op mergeOp) {
defer d.MutDone(op.mutID)
store, err := d.GetKeyValueDB()
if err != nil {
dvid.Errorf("Data type labelblk had error initializing store: %v\n", err)
return
}
tk := NewTKeyByCoord(op.block)
data, err := store.Get(ctx, tk)
if err != nil {
dvid.Errorf("Error on GET of labelblk with coord string %q\n", op.block)
return
}
if data == nil {
dvid.Errorf("nil label block where merge was done!\n")
return
}
blockData, _, err := dvid.DeserializeData(data, true)
if err != nil {
dvid.Criticalf("unable to deserialize label block in '%s': %v\n", d.DataName(), err)
return
}
blockBytes := int(d.BlockSize().Prod() * 8)
if len(blockData) != blockBytes {
dvid.Criticalf("After labelblk deserialization got back %d bytes, expected %d bytes\n", len(blockData), blockBytes)
return
}
// Iterate through this block of labels and relabel if label in merge.
for i := 0; i < blockBytes; i += 8 {
label := binary.LittleEndian.Uint64(blockData[i : i+8])
if _, merged := op.Merged[label]; merged {
binary.LittleEndian.PutUint64(blockData[i:i+8], op.Target)
}
}
// Store this block.
serialization, err := dvid.SerializeData(blockData, d.Compression(), d.Checksum())
if err != nil {
dvid.Criticalf("Unable to serialize block in %q: %v\n", d.DataName(), err)
return
}
if err := store.Put(ctx, tk, serialization); err != nil {
dvid.Errorf("Error in putting key %v: %v\n", tk, err)
}
// Notify any downstream downres instance.
d.publishBlockChange(ctx.VersionID(), op.mutID, op.block, blockData)
}
示例11: init
func init() {
// Set default Host name for understandability from user perspective.
// Assumes Linux or Mac. From stackoverflow suggestion.
cmd := exec.Command("/bin/hostname", "-f")
var out bytes.Buffer
cmd.Stdout = &out
if err := cmd.Run(); err != nil {
dvid.Errorf("Unable to get default Host name via /bin/hostname: %v\n", err)
dvid.Errorf("Using 'localhost' as default Host name.\n")
return
}
DefaultHost = out.String()
DefaultHost = DefaultHost[:len(DefaultHost)-1] // removes EOL
}
示例12: SendKeysInRange
// SendKeysInRange sends a range of full keys down a key channel.
func (db *BigTable) SendKeysInRange(ctx storage.Context, TkBeg, TkEnd storage.TKey, ch storage.KeyChan) error {
if db == nil {
return fmt.Errorf("Can't call SendKeysInRange() on nil BigTable")
}
if ctx == nil {
return fmt.Errorf("Received nil context in SendKeysInRange()")
}
unvKeyBeg, _, err := ctx.SplitKey(TkBeg)
if err != nil {
dvid.Errorf("Error in SendKeysInRange(): %v\n", err)
}
unvKeyEnd, _, err := ctx.SplitKey(TkEnd)
if err != nil {
dvid.Errorf("Error in SendKeysInRange(): %v\n", err)
}
rr := api.NewRange(encodeKey(unvKeyBeg), encodeKey(unvKeyEnd))
err = tbl.ReadRows(db.ctx, rr, func(r api.Row) bool {
unvKeyRow, err := decodeKey(r.Key())
if err != nil {
dvid.Errorf("Error in SendKeysInRange(): %v\n", err)
return false
}
//I need the versioned key to merged it with the unversioned
// and send it throu the channel
for _, readItem := range r[familyName] {
verKey, err := decodeKey(readItem.Column)
if err != nil {
dvid.Errorf("Error in SendKeysInRange(): %v\n", err)
return false
}
fullKey := storage.MergeKey(unvKeyRow, verKey)
ch <- fullKey
}
return true // keep going
}, api.RowFilter(api.StripValueFilter()))
return err
}
示例13: handleBlockEvent
// Processes each change as we get it.
// TODO -- accumulate larger # of changes before committing to prevent
// excessive compaction time? This assumes LSM storage engine, which
// might not always hold in future, so stick with incremental update
// until proven to be a bottleneck.
func (d *Data) handleBlockEvent() {
store, err := d.GetOrderedKeyValueDB()
if err != nil {
dvid.Errorf("Data type labelvol had error initializing store: %v\n", err)
return
}
batcher, ok := store.(storage.KeyValueBatcher)
if !ok {
dvid.Errorf("Data type labelvol requires batch-enabled store, which %q is not\n", store)
return
}
var stop bool
var wg *sync.WaitGroup
for {
select {
case wg = <-d.syncDone:
queued := len(d.syncCh)
if queued > 0 {
dvid.Infof("Received shutdown signal for %q sync events (%d in queue)\n", d.DataName(), queued)
stop = true
} else {
dvid.Infof("Shutting down sync event handler for instance %q...\n", d.DataName())
wg.Done()
return
}
case msg := <-d.syncCh:
d.StartUpdate()
ctx := datastore.NewVersionedCtx(d, msg.Version)
switch delta := msg.Delta.(type) {
case imageblk.Block:
d.ingestBlock(ctx, delta, batcher)
case imageblk.MutatedBlock:
d.mutateBlock(ctx, delta, batcher)
case labels.DeleteBlock:
d.deleteBlock(ctx, delta, batcher)
default:
dvid.Criticalf("Cannot sync labelvol from block event. Got unexpected delta: %v\n", msg)
}
d.StopUpdate()
if stop && len(d.syncCh) == 0 {
dvid.Infof("Shutting down sync even handler for instance %q after draining sync events.\n", d.DataName())
wg.Done()
return
}
}
}
}
示例14: Get
// Get returns a value given a key.
func (db *BigTable) Get(ctx storage.Context, tk storage.TKey) ([]byte, error) {
if db == nil {
return nil, fmt.Errorf("Can't call Get() on nil BigTable")
}
if ctx == nil {
return nil, fmt.Errorf("Received nil context in Get()")
}
unvKey, verKey, err := ctx.SplitKey(tk)
if err != nil {
dvid.Errorf("Error in Get(): %v\n", err)
}
r, err := tbl.ReadRow(db.ctx, encodeKey(unvKey))
//A missing row will return a zero-length map and a nil error
if len(r) == 0 {
return nil, err
}
if err != nil {
return nil, err
}
value, err := getValue(r, verKey)
if err != nil {
return nil, err
}
return value, nil
}
示例15: GetAssignedStore
// GetAssignedStore returns the store assigned based on (instance name, root uuid) or type.
// In some cases, this store may include a caching wrapper if the data instance has been
// configured to use groupcache.
func GetAssignedStore(dataname dvid.InstanceName, root dvid.UUID, typename dvid.TypeString) (dvid.Store, error) {
if !manager.setup {
return nil, fmt.Errorf("Storage manager not initialized before requesting store for %s/%s", dataname, root)
}
dataid := dvid.GetDataSpecifier(dataname, root)
store, found := manager.instanceStore[dataid]
var err error
if !found {
store, err = assignedStoreByType(typename)
if err != nil {
return nil, fmt.Errorf("Cannot get assigned store for data %q, type %q", dataname, typename)
}
}
// See if this is using caching and if so, establish a wrapper around it.
if _, supported := manager.gcache.supported[dataid]; supported {
store, err = wrapGroupcache(store, manager.gcache.cache)
if err != nil {
dvid.Errorf("Unable to wrap groupcache around store %s for data instance %q (uuid %s): %v\n", store, dataname, root, err)
} else {
dvid.Infof("Returning groupcache-wrapped store %s for data instance %q @ %s\n", store, dataname, root)
}
}
return store, nil
}