本文整理汇总了Golang中github.com/syncthing/syncthing/lib/protocol.FileInfo.Size方法的典型用法代码示例。如果您正苦于以下问题:Golang FileInfo.Size方法的具体用法?Golang FileInfo.Size怎么用?Golang FileInfo.Size使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类github.com/syncthing/syncthing/lib/protocol.FileInfo
的用法示例。
在下文中一共展示了FileInfo.Size方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: main
func main() {
log.SetFlags(0)
log.SetOutput(os.Stdout)
flag.Parse()
ldb, err := leveldb.OpenFile(flag.Arg(0), &opt.Options{
ErrorIfMissing: true,
Strict: opt.StrictAll,
OpenFilesCacheCapacity: 100,
})
if err != nil {
log.Fatal(err)
}
it := ldb.NewIterator(nil, nil)
var dev protocol.DeviceID
for it.Next() {
key := it.Key()
switch key[0] {
case db.KeyTypeDevice:
folder := nulString(key[1 : 1+64])
devBytes := key[1+64 : 1+64+32]
name := nulString(key[1+64+32:])
copy(dev[:], devBytes)
fmt.Printf("[device] F:%q N:%q D:%v\n", folder, name, dev)
var f protocol.FileInfo
err := f.UnmarshalXDR(it.Value())
if err != nil {
log.Fatal(err)
}
fmt.Printf(" N:%q\n F:%#o\n M:%d\n V:%v\n S:%d\n B:%d\n", f.Name, f.Flags, f.Modified, f.Version, f.Size(), len(f.Blocks))
case db.KeyTypeGlobal:
folder := nulString(key[1 : 1+64])
name := nulString(key[1+64:])
fmt.Printf("[global] F:%q N:%q V:%x\n", folder, name, it.Value())
case db.KeyTypeBlock:
folder := nulString(key[1 : 1+64])
hash := key[1+64 : 1+64+32]
name := nulString(key[1+64+32:])
fmt.Printf("[block] F:%q H:%x N:%q I:%d\n", folder, hash, name, binary.BigEndian.Uint32(it.Value()))
case db.KeyTypeDeviceStatistic:
fmt.Printf("[dstat]\n %x\n %x\n", it.Key(), it.Value())
case db.KeyTypeFolderStatistic:
fmt.Printf("[fstat]\n %x\n %x\n", it.Key(), it.Value())
default:
fmt.Printf("[???]\n %x\n %x\n", it.Key(), it.Value())
}
}
}
示例2: handleFile
// handleFile queues the copies and pulls as necessary for a single new or
// changed file.
func (p *rwFolder) handleFile(file protocol.FileInfo, copyChan chan<- copyBlocksState, finisherChan chan<- *sharedPullerState) {
curFile, ok := p.model.CurrentFolderFile(p.folder, file.Name)
if ok && len(curFile.Blocks) == len(file.Blocks) && scanner.BlocksEqual(curFile.Blocks, file.Blocks) {
// We are supposed to copy the entire file, and then fetch nothing. We
// are only updating metadata, so we don't actually *need* to make the
// copy.
l.Debugln(p, "taking shortcut on", file.Name)
events.Default.Log(events.ItemStarted, map[string]string{
"folder": p.folder,
"item": file.Name,
"type": "file",
"action": "metadata",
})
p.queue.Done(file.Name)
var err error
if file.IsSymlink() {
err = p.shortcutSymlink(file)
} else {
err = p.shortcutFile(file)
}
events.Default.Log(events.ItemFinished, map[string]interface{}{
"folder": p.folder,
"item": file.Name,
"error": events.Error(err),
"type": "file",
"action": "metadata",
})
if err != nil {
l.Infoln("Puller: shortcut:", err)
p.newError(file.Name, err)
} else {
p.dbUpdates <- dbUpdateJob{file, dbUpdateShortcutFile}
}
return
}
if free, err := osutil.DiskFreeBytes(p.dir); err == nil && free < file.Size() {
l.Warnf(`Folder "%s": insufficient disk space in %s for %s: have %.2f MiB, need %.2f MiB`, p.folder, p.dir, file.Name, float64(free)/1024/1024, float64(file.Size())/1024/1024)
p.newError(file.Name, errors.New("insufficient space"))
return
}
events.Default.Log(events.ItemStarted, map[string]string{
"folder": p.folder,
"item": file.Name,
"type": "file",
"action": "update",
})
scanner.PopulateOffsets(file.Blocks)
// Figure out the absolute filenames we need once and for all
tempName := filepath.Join(p.dir, defTempNamer.TempName(file.Name))
realName := filepath.Join(p.dir, file.Name)
reused := 0
var blocks []protocol.BlockInfo
// Check for an old temporary file which might have some blocks we could
// reuse.
tempBlocks, err := scanner.HashFile(tempName, protocol.BlockSize, 0, nil)
if err == nil {
// Check for any reusable blocks in the temp file
tempCopyBlocks, _ := scanner.BlockDiff(tempBlocks, file.Blocks)
// block.String() returns a string unique to the block
existingBlocks := make(map[string]struct{}, len(tempCopyBlocks))
for _, block := range tempCopyBlocks {
existingBlocks[block.String()] = struct{}{}
}
// Since the blocks are already there, we don't need to get them.
for _, block := range file.Blocks {
_, ok := existingBlocks[block.String()]
if !ok {
blocks = append(blocks, block)
}
}
// The sharedpullerstate will know which flags to use when opening the
// temp file depending if we are reusing any blocks or not.
reused = len(file.Blocks) - len(blocks)
if reused == 0 {
// Otherwise, discard the file ourselves in order for the
// sharedpuller not to panic when it fails to exclusively create a
// file which already exists
osutil.InWritableDir(osutil.Remove, tempName)
}
} else {
blocks = file.Blocks
}
//.........这里部分代码省略.........
示例3: handleFile
// handleFile queues the copies and pulls as necessary for a single new or
// changed file.
func (p *rwFolder) handleFile(file protocol.FileInfo, copyChan chan<- copyBlocksState, finisherChan chan<- *sharedPullerState) {
curFile, hasCurFile := p.model.CurrentFolderFile(p.folder, file.Name)
if hasCurFile && len(curFile.Blocks) == len(file.Blocks) && scanner.BlocksEqual(curFile.Blocks, file.Blocks) {
// We are supposed to copy the entire file, and then fetch nothing. We
// are only updating metadata, so we don't actually *need* to make the
// copy.
l.Debugln(p, "taking shortcut on", file.Name)
events.Default.Log(events.ItemStarted, map[string]string{
"folder": p.folder,
"item": file.Name,
"type": "file",
"action": "metadata",
})
p.queue.Done(file.Name)
var err error
if file.IsSymlink() {
err = p.shortcutSymlink(file)
} else {
err = p.shortcutFile(file)
}
events.Default.Log(events.ItemFinished, map[string]interface{}{
"folder": p.folder,
"item": file.Name,
"error": events.Error(err),
"type": "file",
"action": "metadata",
})
if err != nil {
l.Infoln("Puller: shortcut:", err)
p.newError(file.Name, err)
} else {
p.dbUpdates <- dbUpdateJob{file, dbUpdateShortcutFile}
}
return
}
// Figure out the absolute filenames we need once and for all
tempName := filepath.Join(p.dir, defTempNamer.TempName(file.Name))
realName := filepath.Join(p.dir, file.Name)
if hasCurFile && !curFile.IsDirectory() && !curFile.IsSymlink() {
// Check that the file on disk is what we expect it to be according to
// the database. If there's a mismatch here, there might be local
// changes that we don't know about yet and we should scan before
// touching the file. If we can't stat the file we'll just pull it.
if info, err := osutil.Lstat(realName); err == nil {
mtime := p.virtualMtimeRepo.GetMtime(file.Name, info.ModTime())
if mtime.Unix() != curFile.Modified || info.Size() != curFile.Size() {
l.Debugln("file modified but not rescanned; not pulling:", realName)
// Scan() is synchronous (i.e. blocks until the scan is
// completed and returns an error), but a scan can't happen
// while we're in the puller routine. Request the scan in the
// background and it'll be handled when the current pulling
// sweep is complete. As we do retries, we'll queue the scan
// for this file up to ten times, but the last nine of those
// scans will be cheap...
go p.Scan([]string{file.Name})
return
}
}
}
scanner.PopulateOffsets(file.Blocks)
reused := 0
var blocks []protocol.BlockInfo
var blocksSize int64
// Check for an old temporary file which might have some blocks we could
// reuse.
tempBlocks, err := scanner.HashFile(tempName, protocol.BlockSize, 0, nil)
if err == nil {
// Check for any reusable blocks in the temp file
tempCopyBlocks, _ := scanner.BlockDiff(tempBlocks, file.Blocks)
// block.String() returns a string unique to the block
existingBlocks := make(map[string]struct{}, len(tempCopyBlocks))
for _, block := range tempCopyBlocks {
existingBlocks[block.String()] = struct{}{}
}
// Since the blocks are already there, we don't need to get them.
for _, block := range file.Blocks {
_, ok := existingBlocks[block.String()]
if !ok {
blocks = append(blocks, block)
blocksSize += int64(block.Size)
}
}
// The sharedpullerstate will know which flags to use when opening the
//.........这里部分代码省略.........
示例4: walkAndHashFiles
//.........这里部分代码省略.........
l.Infof(`File "%s" has UTF8 encoding conflict with another file; ignoring.`, rn)
return skip
}
rn = normalizedRn
}
var cf protocol.FileInfo
var ok bool
// Index wise symlinks are always files, regardless of what the target
// is, because symlinks carry their target path as their content.
if info.Mode()&os.ModeSymlink == os.ModeSymlink {
// If the target is a directory, do NOT descend down there. This
// will cause files to get tracked, and removing the symlink will
// as a result remove files in their real location.
if !symlinks.Supported {
return skip
}
// We always rehash symlinks as they have no modtime or
// permissions. We check if they point to the old target by
// checking that their existing blocks match with the blocks in
// the index.
target, targetType, err := symlinks.Read(p)
if err != nil {
if debug {
l.Debugln("readlink error:", p, err)
}
return skip
}
blocks, err := Blocks(strings.NewReader(target), w.BlockSize, 0, nil)
if err != nil {
if debug {
l.Debugln("hash link error:", p, err)
}
return skip
}
if w.CurrentFiler != nil {
// A symlink is "unchanged", if
// - it exists
// - it wasn't deleted (because it isn't now)
// - it was a symlink
// - it wasn't invalid
// - the symlink type (file/dir) was the same
// - the block list (i.e. hash of target) was the same
cf, ok = w.CurrentFiler.CurrentFile(rn)
if ok && !cf.IsDeleted() && cf.IsSymlink() && !cf.IsInvalid() && SymlinkTypeEqual(targetType, cf) && BlocksEqual(cf.Blocks, blocks) {
return skip
}
}
f := protocol.FileInfo{
Name: rn,
Version: cf.Version.Update(w.ShortID),
Flags: uint32(protocol.FlagSymlink | protocol.FlagNoPermBits | 0666 | SymlinkFlags(targetType)),
Modified: 0,
Blocks: blocks,
}
if debug {
l.Debugln("symlink changedb:", p, f)
}
示例5: dump
func dump(ldb *leveldb.DB) {
it := ldb.NewIterator(nil, nil)
var dev protocol.DeviceID
for it.Next() {
key := it.Key()
switch key[0] {
case db.KeyTypeDevice:
folder := nulString(key[1 : 1+64])
devBytes := key[1+64 : 1+64+32]
name := nulString(key[1+64+32:])
copy(dev[:], devBytes)
fmt.Printf("[device] F:%q N:%q D:%v\n", folder, name, dev)
var f protocol.FileInfo
err := f.UnmarshalXDR(it.Value())
if err != nil {
log.Fatal(err)
}
fmt.Printf(" N:%q\n F:%#o\n M:%d\n V:%v\n S:%d\n B:%d\n", f.Name, f.Flags, f.Modified, f.Version, f.Size(), len(f.Blocks))
case db.KeyTypeGlobal:
folder := nulString(key[1 : 1+64])
name := nulString(key[1+64:])
fmt.Printf("[global] F:%q N:%q V:%x\n", folder, name, it.Value())
case db.KeyTypeBlock:
folder := nulString(key[1 : 1+64])
hash := key[1+64 : 1+64+32]
name := nulString(key[1+64+32:])
fmt.Printf("[block] F:%q H:%x N:%q I:%d\n", folder, hash, name, binary.BigEndian.Uint32(it.Value()))
case db.KeyTypeDeviceStatistic:
fmt.Printf("[dstat]\n %x\n %x\n", it.Key(), it.Value())
case db.KeyTypeFolderStatistic:
fmt.Printf("[fstat]\n %x\n %x\n", it.Key(), it.Value())
case db.KeyTypeVirtualMtime:
fmt.Printf("[mtime]\n %x\n %x\n", it.Key(), it.Value())
default:
fmt.Printf("[???]\n %x\n %x\n", it.Key(), it.Value())
}
}
}