本文整理汇总了Golang中restic/debug.Log函数的典型用法代码示例。如果您正苦于以下问题:Golang Log函数的具体用法?Golang Log怎么用?Golang Log使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了Log函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: LoadTree
// LoadTree loads a tree from the repository.
func (r *Repository) LoadTree(id restic.ID) (*restic.Tree, error) {
debug.Log("load tree %v", id.Str())
size, err := r.idx.LookupSize(id, restic.TreeBlob)
if err != nil {
return nil, err
}
debug.Log("size is %d, create buffer", size)
buf := make([]byte, size)
n, err := r.loadBlob(id, restic.TreeBlob, buf)
if err != nil {
return nil, err
}
buf = buf[:n]
t := &restic.Tree{}
err = json.Unmarshal(buf, t)
if err != nil {
return nil, err
}
return t, nil
}
示例2: lockRepository
func lockRepository(repo *repository.Repository, exclusive bool) (*restic.Lock, error) {
lockFn := restic.NewLock
if exclusive {
lockFn = restic.NewExclusiveLock
}
lock, err := lockFn(repo)
if err != nil {
return nil, err
}
debug.Log("create lock %p (exclusive %v)", lock, exclusive)
globalLocks.Lock()
if globalLocks.cancelRefresh == nil {
debug.Log("start goroutine for lock refresh")
globalLocks.cancelRefresh = make(chan struct{})
globalLocks.refreshWG = sync.WaitGroup{}
globalLocks.refreshWG.Add(1)
go refreshLocks(&globalLocks.refreshWG, globalLocks.cancelRefresh)
}
globalLocks.locks = append(globalLocks.locks, lock)
globalLocks.Unlock()
return lock, err
}
示例3: refreshLocks
func refreshLocks(wg *sync.WaitGroup, done <-chan struct{}) {
debug.Log("start")
defer func() {
wg.Done()
globalLocks.Lock()
globalLocks.cancelRefresh = nil
globalLocks.Unlock()
}()
ticker := time.NewTicker(refreshInterval)
for {
select {
case <-done:
debug.Log("terminate")
return
case <-ticker.C:
debug.Log("refreshing locks")
globalLocks.Lock()
for _, lock := range globalLocks.locks {
err := lock.Refresh()
if err != nil {
fmt.Fprintf(os.Stderr, "unable to refresh lock: %v\n", err)
}
}
globalLocks.Unlock()
}
}
}
示例4: Save
// Save stores data in the backend at the handle.
func (r *SFTP) Save(h backend.Handle, p []byte) (err error) {
if err := h.Valid(); err != nil {
return err
}
filename, tmpfile, err := r.tempFile()
debug.Log("sftp.Save", "save %v (%d bytes) to %v", h, len(p), filename)
n, err := tmpfile.Write(p)
if err != nil {
return err
}
if n != len(p) {
return errors.New("not all bytes writen")
}
err = tmpfile.Close()
if err != nil {
return err
}
err = r.renameFile(filename, h.Type, h.Name)
debug.Log("sftp.Save", "save %v: rename %v: %v",
h, filepath.Base(filename), err)
if err != nil {
return fmt.Errorf("sftp: renameFile: %v", err)
}
return nil
}
示例5: Tree
// Tree walks the tree specified by id recursively and sends a job for each
// file and directory it finds. When the channel done is closed, processing
// stops.
func Tree(repo TreeLoader, id restic.ID, done chan struct{}, jobCh chan<- TreeJob) {
debug.Log("start on %v, start workers", id.Str())
load := func(id restic.ID) (*restic.Tree, error) {
tree, err := repo.LoadTree(id)
if err != nil {
return nil, err
}
return tree, nil
}
ch := make(chan loadTreeJob)
var wg sync.WaitGroup
for i := 0; i < loadTreeWorkers; i++ {
wg.Add(1)
go loadTreeWorker(&wg, ch, load, done)
}
tw := NewTreeWalker(ch, jobCh)
tw.Walk("", id, done)
close(jobCh)
close(ch)
wg.Wait()
debug.Log("done")
}
示例6: Stale
// Stale returns true if the lock is stale. A lock is stale if the timestamp is
// older than 30 minutes or if it was created on the current machine and the
// process isn't alive any more.
func (l *Lock) Stale() bool {
debug.Log("testing if lock %v for process %d is stale", l, l.PID)
if time.Since(l.Time) > staleTimeout {
debug.Log("lock is stale, timestamp is too old: %v\n", l.Time)
return true
}
hn, err := os.Hostname()
if err != nil {
debug.Log("unable to find current hostnanme: %v", err)
// since we cannot find the current hostname, assume that the lock is
// not stale.
return false
}
if hn != l.Hostname {
// lock was created on a different host, assume the lock is not stale.
return false
}
// check if we can reach the process retaining the lock
exists := l.processExists()
if !exists {
debug.Log("could not reach process, %d, lock is probably stale\n", l.PID)
return true
}
debug.Log("lock not stale\n")
return false
}
示例7: repackBlob
// repackBlob loads a single blob from src and saves it in dst.
func repackBlob(src, dst *repository.Repository, id backend.ID) error {
blob, err := src.Index().Lookup(id)
if err != nil {
return err
}
debug.Log("RepackBlobs", "repacking blob %v, len %v", id.Str(), blob.PlaintextLength())
buf := make([]byte, 0, blob.PlaintextLength())
buf, err = src.LoadBlob(blob.Type, id, buf)
if err != nil {
return err
}
if uint(len(buf)) != blob.PlaintextLength() {
debug.Log("RepackBlobs", "repack blob %v: len(buf) isn't equal to length: %v = %v", id.Str(), len(buf), blob.PlaintextLength())
return errors.New("LoadBlob returned wrong data, len() doesn't match")
}
_, err = dst.SaveAndEncrypt(blob.Type, buf, &id)
if err != nil {
return err
}
return nil
}
示例8: Lookup
// Lookup queries the index for the blob ID and returns a restic.PackedBlob.
func (idx *Index) Lookup(id restic.ID, tpe restic.BlobType) (blobs []restic.PackedBlob, err error) {
idx.m.Lock()
defer idx.m.Unlock()
h := restic.BlobHandle{ID: id, Type: tpe}
if packs, ok := idx.pack[h]; ok {
blobs = make([]restic.PackedBlob, 0, len(packs))
for _, p := range packs {
debug.Log("id %v found in pack %v at %d, length %d",
id.Str(), p.packID.Str(), p.offset, p.length)
blob := restic.PackedBlob{
Blob: restic.Blob{
Type: tpe,
Length: p.length,
ID: id,
Offset: p.offset,
},
PackID: p.packID,
}
blobs = append(blobs, blob)
}
return blobs, nil
}
debug.Log("id %v not found", id.Str())
return nil, errors.Errorf("id %v not found in index", id)
}
示例9: Open
// Open opens the S3 backend at bucket and region. The bucket is created if it
// does not exist yet.
func Open(cfg Config) (restic.Backend, error) {
debug.Log("open, config %#v", cfg)
client, err := minio.New(cfg.Endpoint, cfg.KeyID, cfg.Secret, !cfg.UseHTTP)
if err != nil {
return nil, errors.Wrap(err, "minio.New")
}
be := &s3{client: client, bucketname: cfg.Bucket, prefix: cfg.Prefix}
be.createConnections()
ok, err := client.BucketExists(cfg.Bucket)
if err != nil {
debug.Log("BucketExists(%v) returned err %v, trying to create the bucket", cfg.Bucket, err)
return nil, errors.Wrap(err, "client.BucketExists")
}
if !ok {
// create new bucket with default ACL in default region
err = client.MakeBucket(cfg.Bucket, "")
if err != nil {
return nil, errors.Wrap(err, "client.MakeBucket")
}
}
return be, nil
}
示例10: Stat
// Stat returns information about a blob.
func (be s3) Stat(h restic.Handle) (bi restic.FileInfo, err error) {
debug.Log("%v", h)
path := be.s3path(h.Type, h.Name)
var obj *minio.Object
obj, err = be.client.GetObject(be.bucketname, path)
if err != nil {
debug.Log("GetObject() err %v", err)
return restic.FileInfo{}, errors.Wrap(err, "client.GetObject")
}
// make sure that the object is closed properly.
defer func() {
e := obj.Close()
if err == nil {
err = errors.Wrap(e, "Close")
}
}()
fi, err := obj.Stat()
if err != nil {
debug.Log("Stat() err %v", err)
return restic.FileInfo{}, errors.Wrap(err, "Stat")
}
return restic.FileInfo{Size: fi.Size}, nil
}
示例11: Save
// Save stores data in the backend at the handle.
func (be s3) Save(h restic.Handle, p []byte) (err error) {
if err := h.Valid(); err != nil {
return err
}
debug.Log("%v with %d bytes", h, len(p))
path := be.s3path(h.Type, h.Name)
// Check key does not already exist
_, err = be.client.StatObject(be.bucketname, path)
if err == nil {
debug.Log("%v already exists", h)
return errors.New("key already exists")
}
<-be.connChan
defer func() {
be.connChan <- struct{}{}
}()
debug.Log("PutObject(%v, %v, %v, %v)",
be.bucketname, path, int64(len(p)), "binary/octet-stream")
n, err := be.client.PutObject(be.bucketname, path, bytes.NewReader(p), "binary/octet-stream")
debug.Log("%v -> %v bytes, err %#v", path, n, err)
return errors.Wrap(err, "client.PutObject")
}
示例12: findPacker
// findPacker returns a packer for a new blob of size bytes. Either a new one is
// created or one is returned that already has some blobs.
func (r *packerManager) findPacker(size uint) (packer *pack.Packer, err error) {
r.pm.Lock()
defer r.pm.Unlock()
// search for a suitable packer
if len(r.packs) > 0 {
debug.Log("searching packer for %d bytes\n", size)
for i, p := range r.packs {
if p.Size()+size < maxPackSize {
debug.Log("found packer %v", p)
// remove from list
r.packs = append(r.packs[:i], r.packs[i+1:]...)
return p, nil
}
}
}
// no suitable packer found, return new
debug.Log("create new pack for %d bytes", size)
tmpfile, err := ioutil.TempFile("", "restic-temp-pack-")
if err != nil {
return nil, errors.Wrap(err, "ioutil.TempFile")
}
return pack.NewPacker(r.key, tmpfile), nil
}
示例13: Dump
// Dump writes the pretty-printed JSON representation of the index to w.
func (idx *Index) Dump(w io.Writer) error {
debug.Log("Index.Dump", "dumping index")
idx.m.Lock()
defer idx.m.Unlock()
list, err := idx.generatePackList()
if err != nil {
return err
}
outer := jsonIndex{
Supersedes: idx.Supersedes(),
Packs: list,
}
buf, err := json.MarshalIndent(outer, "", " ")
if err != nil {
return err
}
_, err = w.Write(append(buf, '\n'))
if err != nil {
return err
}
debug.Log("Index.Dump", "done")
return nil
}
示例14: savePacker
// savePacker stores p in the backend.
func (r *Repository) savePacker(p *pack.Packer) error {
debug.Log("Repo.savePacker", "save packer with %d blobs\n", p.Count())
data, err := p.Finalize()
if err != nil {
return err
}
id := backend.Hash(data)
h := backend.Handle{Type: backend.Data, Name: id.String()}
err = r.be.Save(h, data)
if err != nil {
debug.Log("Repo.savePacker", "Save(%v) error: %v", h, err)
return err
}
debug.Log("Repo.savePacker", "saved as %v", h)
// update blobs in the index
for _, b := range p.Blobs() {
debug.Log("Repo.savePacker", " updating blob %v to pack %v", b.ID.Str(), id.Str())
r.idx.Current().Store(PackedBlob{
Type: b.Type,
ID: b.ID,
PackID: id,
Offset: b.Offset,
Length: uint(b.Length),
})
}
return nil
}
示例15: Copy
func (j archiveJob) Copy() pipe.Job {
if !j.hasOld {
return j.new
}
// handle files
if isRegularFile(j.new.Info()) {
debug.Log("archiveJob.Copy", " job %v is file", j.new.Path())
// if type has changed, return new job directly
if j.old.Node == nil {
return j.new
}
// if file is newer, return the new job
if j.old.Node.isNewer(j.new.Fullpath(), j.new.Info()) {
debug.Log("archiveJob.Copy", " job %v is newer", j.new.Path())
return j.new
}
debug.Log("archiveJob.Copy", " job %v add old data", j.new.Path())
// otherwise annotate job with old data
e := j.new.(pipe.Entry)
e.Node = j.old.Node
return e
}
// dirs and other types are just returned
return j.new
}