本文整理汇总了Golang中restic/repository.Repository.Flush方法的典型用法代码示例。如果您正苦于以下问题:Golang Repository.Flush方法的具体用法?Golang Repository.Flush怎么用?Golang Repository.Flush使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类restic/repository.Repository
的用法示例。
在下文中一共展示了Repository.Flush方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: TestCreateSnapshot
// TestCreateSnapshot creates a snapshot filled with fake data. The
// fake data is generated deterministically from the timestamp `at`, which is
// also used as the snapshot's timestamp.
func TestCreateSnapshot(t testing.TB, repo *repository.Repository, at time.Time) backend.ID {
fakedir := fmt.Sprintf("fakedir-at-%v", at.Format("2006-01-02 15:04:05"))
snapshot, err := NewSnapshot([]string{fakedir})
if err != nil {
t.Fatal(err)
}
snapshot.Time = at
treeID := saveTree(t, repo, at.UnixNano())
snapshot.Tree = &treeID
id, err := repo.SaveJSONUnpacked(backend.Snapshot, snapshot)
if err != nil {
t.Fatal(err)
}
t.Logf("saved snapshot %v", id.Str())
err = repo.Flush()
if err != nil {
t.Fatal(err)
}
err = repo.SaveIndex()
if err != nil {
t.Fatal(err)
}
return id
}
示例2: RepackBlobs
// RepackBlobs reads all blobs in blobIDs from src and saves them into new pack
// files in dst. Source and destination repo may be the same.
func RepackBlobs(src, dst *repository.Repository, blobIDs backend.IDSet) (err error) {
for id := range blobIDs {
err = repackBlob(src, dst, id)
if err != nil {
return err
}
}
err = dst.Flush()
if err != nil {
return err
}
return nil
}
示例3: ArchiveReader
// ArchiveReader reads from the reader and archives the data. Returned is the
// resulting snapshot and its ID.
func ArchiveReader(repo *repository.Repository, p *Progress, rd io.Reader, name string) (*Snapshot, backend.ID, error) {
debug.Log("ArchiveReader", "start archiving %s", name)
sn, err := NewSnapshot([]string{name})
if err != nil {
return nil, backend.ID{}, err
}
p.Start()
defer p.Done()
chnker := chunker.New(rd, repo.Config.ChunkerPolynomial)
var ids backend.IDs
var fileSize uint64
for {
chunk, err := chnker.Next(getBuf())
if err == io.EOF {
break
}
if err != nil {
return nil, backend.ID{}, err
}
id := backend.Hash(chunk.Data)
if !repo.Index().Has(id) {
_, err := repo.SaveAndEncrypt(pack.Data, chunk.Data, nil)
if err != nil {
return nil, backend.ID{}, err
}
debug.Log("ArchiveReader", "saved blob %v (%d bytes)\n", id.Str(), chunk.Length)
} else {
debug.Log("ArchiveReader", "blob %v already saved in the repo\n", id.Str())
}
freeBuf(chunk.Data)
ids = append(ids, id)
p.Report(Stat{Bytes: uint64(chunk.Length)})
fileSize += uint64(chunk.Length)
}
tree := &Tree{
Nodes: []*Node{
&Node{
Name: name,
AccessTime: time.Now(),
ModTime: time.Now(),
Type: "file",
Mode: 0644,
Size: fileSize,
UID: sn.UID,
GID: sn.GID,
User: sn.Username,
Content: ids,
},
},
}
treeID, err := saveTreeJSON(repo, tree)
if err != nil {
return nil, backend.ID{}, err
}
sn.Tree = &treeID
debug.Log("ArchiveReader", "tree saved as %v", treeID.Str())
id, err := repo.SaveJSONUnpacked(backend.Snapshot, sn)
if err != nil {
return nil, backend.ID{}, err
}
sn.id = &id
debug.Log("ArchiveReader", "snapshot saved as %v", id.Str())
err = repo.Flush()
if err != nil {
return nil, backend.ID{}, err
}
err = repo.SaveIndex()
if err != nil {
return nil, backend.ID{}, err
}
return sn, id, nil
}