本文整理汇总了Golang中github.com/janelia-flyem/dvid/dvid.Debugf函数的典型用法代码示例。如果您正苦于以下问题:Golang Debugf函数的具体用法?Golang Debugf怎么用?Golang Debugf使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了Debugf函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: mainHandler
// Handler for web client and other static content
func mainHandler(w http.ResponseWriter, r *http.Request) {
path := r.URL.Path
if config == nil {
log.Fatalf("mainHandler() called when server was not configured!\n")
}
// Serve from embedded files in executable if not web client directory was specified
if config.WebClient() == "" {
if len(path) > 0 && path[0:1] == "/" {
path = path[1:]
}
dvid.Debugf("[%s] Serving from embedded files: %s\n", r.Method, path)
resource := nrsc.Get(path)
if resource == nil {
http.NotFound(w, r)
return
}
rsrc, err := resource.Open()
if err != nil {
BadRequest(w, r, err)
return
}
data, err := ioutil.ReadAll(rsrc)
if err != nil {
BadRequest(w, r, err)
return
}
dvid.SendHTTP(w, r, path, data)
} else {
filename := filepath.Join(config.WebClient(), path)
dvid.Debugf("[%s] Serving from webclient directory: %s\n", r.Method, filename)
http.ServeFile(w, r, filename)
}
}
示例2: RawPut
func (db *KVAutobus) RawPut(key storage.Key, value []byte) error {
b64key := encodeKey(key)
url := fmt.Sprintf("%s/kvautobus/api/value/%s/", db.host, b64key)
bin := Binary(value)
dvid.Debugf("Begin RawPut on key %s (%d bytes)\n", hex.EncodeToString(key), len(bin))
// Create pipe from encoding to posting
pr, pw := io.Pipe()
w := msgp.NewWriter(pw)
go func() {
dvid.Debugf("Starting msgpack encoding...\n")
bin.EncodeMsg(w)
w.Flush()
pw.Close()
dvid.Debugf("Done msgpack encoding.\n")
}()
dvid.Debugf("Beginning POST to kvautobus: %s\n", url)
resp, err := http.Post(url, "application/x-msgpack", pr)
dvid.Debugf("Done POST with err %v\n", err)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusConflict {
return fmt.Errorf("Can't POST to an already stored key. KVAutobus returned status %d (%s)", resp.StatusCode, url)
}
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("Bad status code returned (%d) from put request: %s", resp.StatusCode, url)
}
return nil
}
示例3: Close
func (s *Session) Close() error {
dvid.Debugf("session %d close: waiting for any jobs to complete...\n", s.id)
s.Wait()
dvid.Debugf("sending session end to remote...\n")
_, err := s.dc.Call(sendEndSession, s.id)
dvid.Debugf("stopping client...\n")
s.c.Stop()
return err
}
示例4: NewFilter
// NewFilter returns a Filter for use with a push of key-value pairs.
func (d *Data) NewFilter(fs storage.FilterSpec) (storage.Filter, error) {
roiIterator, _, found, err := roi.NewIteratorBySpec(fs, d)
if err != nil {
dvid.Debugf("No filter found that was parsable: %s\n", fs)
return nil, err
}
if !found || roiIterator == nil {
dvid.Debugf("No ROI found so using generic data push for data %q.\n", d.DataName())
return nil, nil
}
return &Filter{d, fs, roiIterator}, nil
}
示例5: getDeltaAll
// compares remote Repo with local one, determining a list of versions that
// need to be sent from remote to bring the local DVID up-to-date.
func getDeltaAll(remote *repoT, uuid dvid.UUID) (map[dvid.VersionID]struct{}, error) {
// Determine all version ids of remote DAG nodes that aren't in the local DAG.
// Since VersionID can differ among DVID servers, we need to compare using UUIDs
// then convert to VersionID.
delta := make(map[dvid.VersionID]struct{})
for _, rnode := range remote.dag.nodes {
lv, found := manager.uuidToVersion[rnode.uuid]
if found {
dvid.Debugf("Both remote and local have uuid %s... skipping\n", rnode.uuid)
} else {
dvid.Debugf("Found version %s in remote not in local: sending local version id %d\n", rnode.uuid, lv)
delta[lv] = struct{}{}
}
}
return delta, nil
}
示例6: MergeLabels
// MergeLabels handles merging of any number of labels throughout the various label data
// structures. It assumes that the merges aren't cascading, e.g., there is no attempt
// to merge label 3 into 4 and also 4 into 5. The caller should have flattened the merges.
// TODO: Provide some indication that subset of labels are under evolution, returning
// an "unavailable" status or 203 for non-authoritative response. This might not be
// feasible for clustered DVID front-ends due to coordination issues.
//
// EVENTS
//
// labels.MergeStartEvent occurs at very start of merge and transmits labels.DeltaMergeStart struct.
//
// labels.MergeBlockEvent occurs for every block of a merged label and transmits labels.DeltaMerge struct.
//
// labels.MergeEndEvent occurs at end of merge and transmits labels.DeltaMergeEnd struct.
//
func (d *Data) MergeLabels(v dvid.VersionID, m labels.MergeOp) error {
dvid.Debugf("Merging %s into label %d ...\n", m.Merged, m.Target)
// Signal that we are starting a merge.
evt := datastore.SyncEvent{d.DataUUID(), labels.MergeStartEvent}
msg := datastore.SyncMessage{labels.MergeStartEvent, v, labels.DeltaMergeStart{m}}
if err := datastore.NotifySubscribers(evt, msg); err != nil {
return err
}
// Asynchronously perform merge and handle any concurrent requests using the cache map until
// labelvol and labelblk are updated and consistent.
// Mark these labels as dirty until done.
if err := labels.MergeStart(d.getMergeIV(v), m); err != nil {
return err
}
go func() {
d.asyncMergeLabels(v, m)
// Remove dirty labels and updating flag when done.
labels.MergeStop(d.getMergeIV(v), m)
}()
return nil
}
示例7: SetWriteBufferSize
// Amount of data to build up in memory (backed by an unsorted log
// on disk) before converting to a sorted on-disk file.
//
// Larger values increase performance, especially during bulk loads.
// Up to two write buffers may be held in memory at the same time,
// so you may wish to adjust this parameter to control memory usage.
// Also, a larger write buffer will result in a longer recovery time
// the next time the database is opened.
func (opts *leveldbOptions) SetWriteBufferSize(nBytes int) {
if nBytes != opts.writeBufferSize {
dvid.Debugf("Write buffer set to %d bytes.\n", nBytes)
opts.Options.SetWriteBufferSize(nBytes)
opts.writeBufferSize = nBytes
}
}
示例8: SetBlockSize
// Approximate size of user data packed per block. Note that the
// block size specified here corresponds to uncompressed data. The
// actual size of the unit read from disk may be smaller if
// compression is enabled. This parameter can be changed dynamically.
func (opts *leveldbOptions) SetBlockSize(nBytes int) {
if nBytes != opts.blockSize {
dvid.Debugf("Block size set to %d bytes.\n", nBytes)
opts.Options.SetBlockSize(nBytes)
opts.blockSize = nBytes
}
}
示例9: GetSparseCoarseVol
// GetSparseCoarseVol returns an encoded sparse volume given a label. The encoding has the
// following format where integers are little endian:
// byte Set to 0
// uint8 Number of dimensions
// uint8 Dimension of run (typically 0 = X)
// byte Reserved (to be used later)
// uint32 # Blocks [TODO. 0 for now]
// uint32 # Spans
// Repeating unit of:
// int32 Block coordinate of run start (dimension 0)
// int32 Block coordinate of run start (dimension 1)
// int32 Block coordinate of run start (dimension 2)
// int32 Length of run
//
func GetSparseCoarseVol(ctx storage.Context, label uint64) ([]byte, error) {
store, err := storage.SmallDataStore()
if err != nil {
return nil, fmt.Errorf("Data type labelvol had error initializing store: %v\n", err)
}
// Create the sparse volume header
buf := new(bytes.Buffer)
buf.WriteByte(dvid.EncodingBinary)
binary.Write(buf, binary.LittleEndian, uint8(3)) // # of dimensions
binary.Write(buf, binary.LittleEndian, byte(0)) // dimension of run (X = 0)
buf.WriteByte(byte(0)) // reserved for later
binary.Write(buf, binary.LittleEndian, uint32(0)) // Placeholder for # blocks
encoding := buf.Bytes()
// Get the start/end indices for this body's KeyLabelSpatialMap (b + s) keys.
begTKey := NewTKey(label, dvid.MinIndexZYX.ToIZYXString())
endTKey := NewTKey(label, dvid.MaxIndexZYX.ToIZYXString())
// Process all the b+s keys and their values, which contain RLE runs for that label.
var numBlocks uint32
var span *dvid.Span
var spans dvid.Spans
keys, err := store.KeysInRange(ctx, begTKey, endTKey)
if err != nil {
return nil, fmt.Errorf("Cannot get keys for coarse sparse volume: %v", err)
}
for _, tk := range keys {
numBlocks++
_, blockStr, err := DecodeTKey(tk)
if err != nil {
return nil, fmt.Errorf("Error retrieving RLE runs for label %d: %v", label, err)
}
indexZYX, err := blockStr.IndexZYX()
if err != nil {
return nil, fmt.Errorf("Error decoding block coordinate (%v) for sparse volume: %v\n", blockStr, err)
}
x, y, z := indexZYX.Unpack()
if span == nil {
span = &dvid.Span{z, y, x, x}
} else if !span.Extends(x, y, z) {
spans = append(spans, *span)
span = &dvid.Span{z, y, x, x}
}
}
if err != nil {
return nil, err
}
if span != nil {
spans = append(spans, *span)
}
spansBytes, err := spans.MarshalBinary()
if err != nil {
return nil, err
}
encoding = append(encoding, spansBytes...)
dvid.Debugf("[%s] coarse subvol for label %d: found %d blocks\n", ctx, label, numBlocks)
return encoding, nil
}
示例10: writeBlocks
// TODO -- Clean up all the writing and simplify now that we have block-aligned writes.
// writeBlocks ingests blocks of voxel data asynchronously using batch writes.
func (d *Data) writeBlocks(v dvid.VersionID, b storage.TKeyValues, wg1, wg2 *sync.WaitGroup) error {
batcher, err := d.GetKeyValueBatcher()
if err != nil {
return err
}
preCompress, postCompress := 0, 0
ctx := datastore.NewVersionedCtx(d, v)
evt := datastore.SyncEvent{d.DataUUID(), IngestBlockEvent}
<-server.HandlerToken
go func() {
defer func() {
wg1.Done()
wg2.Done()
dvid.Debugf("Wrote voxel blocks. Before %s: %d bytes. After: %d bytes\n", d.Compression(), preCompress, postCompress)
server.HandlerToken <- 1
}()
mutID := d.NewMutationID()
batch := batcher.NewBatch(ctx)
for i, block := range b {
serialization, err := dvid.SerializeData(block.V, d.Compression(), d.Checksum())
preCompress += len(block.V)
postCompress += len(serialization)
if err != nil {
dvid.Errorf("Unable to serialize block: %v\n", err)
return
}
batch.Put(block.K, serialization)
indexZYX, err := DecodeTKey(block.K)
if err != nil {
dvid.Errorf("Unable to recover index from block key: %v\n", block.K)
return
}
msg := datastore.SyncMessage{IngestBlockEvent, v, Block{indexZYX, block.V, mutID}}
if err := datastore.NotifySubscribers(evt, msg); err != nil {
dvid.Errorf("Unable to notify subscribers of ChangeBlockEvent in %s\n", d.DataName())
return
}
// Check if we should commit
if i%KVWriteSize == KVWriteSize-1 {
if err := batch.Commit(); err != nil {
dvid.Errorf("Error on trying to write batch: %v\n", err)
return
}
batch = batcher.NewBatch(ctx)
}
}
if err := batch.Commit(); err != nil {
dvid.Errorf("Error on trying to write batch: %v\n", err)
return
}
}()
return nil
}
示例11: RegisterEngine
// RegisterEngine registers an Engine for DVID use.
func RegisterEngine(e Engine) {
dvid.Debugf("Engine %q registered with DVID server.\n", e)
if availEngines == nil {
availEngines = map[string]Engine{e.GetName(): e}
} else {
availEngines[e.GetName()] = e
}
}
示例12: DeleteRange
// DeleteRange removes all key-value pairs with keys in the given range.
func (db *LevelDB) DeleteRange(ctx storage.Context, kStart, kEnd storage.TKey) error {
if ctx == nil {
return fmt.Errorf("Received nil context in DeleteRange()")
}
// For leveldb, we just iterate over keys in range and delete each one using batch.
const BATCH_SIZE = 10000
batch := db.NewBatch(ctx).(*goBatch)
ch := make(chan errorableKV)
// Run the keys-only range query in a goroutine.
go func() {
if ctx == nil || !ctx.Versioned() {
db.unversionedRange(ctx, kStart, kEnd, ch, true)
} else {
db.versionedRange(ctx.(storage.VersionedCtx), kStart, kEnd, ch, true)
}
}()
// Consume the key-value pairs.
numKV := 0
for {
result := <-ch
if result.KeyValue == nil {
break
}
if result.error != nil {
return result.error
}
// The key coming down channel is not index but full key, so no need to construct key using context.
// If versioned, write a tombstone using current version id since we don't want to delete locked ancestors.
// If unversioned, just delete.
tk, err := ctx.TKeyFromKey(result.KeyValue.K)
if err != nil {
return err
}
batch.Delete(tk)
if (numKV+1)%BATCH_SIZE == 0 {
if err := batch.Commit(); err != nil {
batch.Close()
return fmt.Errorf("Error on batch DELETE at key-value pair %d: %v\n", numKV, err)
}
batch = db.NewBatch(ctx).(*goBatch)
}
numKV++
}
if numKV%BATCH_SIZE != 0 {
if err := batch.Commit(); err != nil {
batch.Close()
return fmt.Errorf("Error on last batch DELETE: %v\n", err)
}
}
dvid.Debugf("Deleted %d key-value pairs via delete range for %s.\n", numKV, ctx)
return nil
}
示例13: DoRPC
// DoRPC acts as a switchboard for RPC commands.
func (d *Data) DoRPC(req datastore.Request, reply *datastore.Response) error {
switch req.TypeCommand() {
case "load":
if len(req.Command) < 5 {
return fmt.Errorf("Poorly formatted load command. See command-line help.")
}
// Parse the request
var uuidStr, dataName, cmdStr, offsetStr string
filenames, err := req.FilenameArgs(1, &uuidStr, &dataName, &cmdStr, &offsetStr)
if err != nil {
return err
}
if len(filenames) == 0 {
return fmt.Errorf("Need to include at least one file to add: %s", req)
}
// Get offset
offset, err := dvid.StringToPoint(offsetStr, ",")
if err != nil {
return fmt.Errorf("Illegal offset specification: %s: %v", offsetStr, err)
}
// Get list of files to add
var addedFiles string
if len(filenames) == 1 {
addedFiles = filenames[0]
} else {
addedFiles = fmt.Sprintf("filenames: %s [%d more]", filenames[0], len(filenames)-1)
}
dvid.Debugf(addedFiles + "\n")
uuid, versionID, err := datastore.MatchingUUID(uuidStr)
if err != nil {
return err
}
if err = datastore.AddToNodeLog(uuid, []string{req.Command.String()}); err != nil {
return err
}
if err = d.LoadImages(versionID, offset, filenames); err != nil {
return err
}
if err := datastore.SaveDataByUUID(uuid, d); err != nil {
return err
}
return nil
case "composite":
if len(req.Command) < 6 {
return fmt.Errorf("Poorly formatted composite command. See command-line help.")
}
return d.CreateComposite(req, reply)
default:
return fmt.Errorf("Unknown command. Data type '%s' [%s] does not support '%s' command.",
d.DataName(), d.TypeName(), req.TypeCommand())
}
return nil
}
示例14: Close
func (p *pusher) Close() error {
gb := float64(p.received) / 1000000000
dvid.Debugf("Closing push of uuid %s: received %.1f GBytes in %s\n", p.repo.uuid, gb, time.Since(p.startTime))
// Add this repo to current DVID server
if err := manager.addRepo(p.repo); err != nil {
return err
}
return nil
}
示例15: SetLRUCacheSize
// SetCache sets the size of the LRU cache that caches frequently used
// uncompressed blocks.
func (opts *leveldbOptions) SetLRUCacheSize(nBytes int) {
if nBytes != opts.nLRUCacheBytes {
if opts.cache != nil {
opts.cache.Close()
}
dvid.Debugf("LRU cache size set to %d bytes.\n", nBytes)
opts.cache = levigo.NewLRUCache(nBytes)
opts.nLRUCacheBytes = nBytes
opts.Options.SetCache(opts.cache)
}
}