本文整理匯總了Golang中github.com/cheggaaa/pb.ProgressBar.Prefix方法的典型用法代碼示例。如果您正苦於以下問題:Golang ProgressBar.Prefix方法的具體用法?Golang ProgressBar.Prefix怎麽用?Golang ProgressBar.Prefix使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類github.com/cheggaaa/pb.ProgressBar
的用法示例。
在下文中一共展示了ProgressBar.Prefix方法的3個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: CheckMetadata
// CheckMetadata downloads the metadata about all of the files currently
// stored on Drive and compares it with the local cache.
func (gd *GDrive) CheckMetadata(filename string, report func(string)) error {
idToFile, err := gd.getIdToFile(filename)
if err != nil {
return err
}
// This will almost certainly take a while, so put up a progress bar.
var bar *pb.ProgressBar
if !gd.quiet {
bar = pb.New(len(idToFile))
bar.ShowBar = true
bar.ShowCounters = false
bar.Output = os.Stderr
bar.Prefix("Checking metadata cache: ")
bar.Start()
}
err = gd.runQuery("trashed=false", func(f *drive.File) {
if file, ok := idToFile[f.Id]; ok {
df := newFile(f.Title, f)
if !filesEqual(df, file) {
report(fmt.Sprintf("%s: metadata mismatch.\nLocal: %+v\nDrive: %+v",
file.Path, file, df))
}
if bar != nil {
bar.Increment()
}
delete(idToFile, f.Id)
} else {
// It'd be preferable to have "sharedWithMe=false" included in
// the query string above, but the combination of that with
// "trashed=false" seems to lead to no results being returned.
if f.Shared == false {
report(fmt.Sprintf("%s: found on Drive, not in local cache [%+v]",
f.Title, f))
}
}
})
for _, f := range idToFile {
report(fmt.Sprintf("%s: found in local cache, not on Drive [%+v]",
f.Path, f))
}
if bar != nil {
bar.Finish()
}
return nil
}
示例2: syncHierarchyUp
// Synchronize a local directory hierarchy with Google Drive.
// localPath is the file or directory to start with, driveRoot is
// the directory into which the file/directory will be sent
func syncHierarchyUp(localPath string, driveRoot string, encrypt bool, trustTimes bool,
maxSymlinkDepth int) int {
if encrypt && key == nil {
key = decryptEncryptionKey()
}
fileMappings, nUploadErrors := compileUploadFileTree(localPath, driveRoot,
encrypt, trustTimes, maxSymlinkDepth)
if len(fileMappings) == 0 {
message("No files to be uploaded.")
return 0
}
nBytesToUpload := int64(0)
for _, info := range fileMappings {
if !info.LocalFileInfo.IsDir() {
nBytesToUpload += info.LocalFileInfo.Size()
}
}
// Given the list of files to sync, first find all of the directories and
// then either get or create a Drive folder for each one.
directoryMappingMap := make(map[string]localToRemoteFileMapping)
var directoryNames []string
for _, localfile := range fileMappings {
if localfile.LocalFileInfo.IsDir() {
directoryNames = append(directoryNames, localfile.DrivePath)
directoryMappingMap[localfile.DrivePath] = localfile
}
}
// Now sort the directories by name, which ensures that the parent of each
// directory has already been created if we need to create its children.
sort.Strings(directoryNames)
if len(directoryNames) > 0 {
// Actually create/update the directories.
var dirProgressBar *pb.ProgressBar
if !quiet {
dirProgressBar = pb.New(len(directoryNames))
dirProgressBar.Output = os.Stderr
dirProgressBar.Prefix("Directories: ")
dirProgressBar.Start()
}
// Sync each of the directories, which serves to create any missing ones.
for _, dirName := range directoryNames {
file := directoryMappingMap[dirName]
err := syncFileUp(file.LocalPath, file.LocalFileInfo, file.DrivePath,
encrypt, dirProgressBar)
if err != nil {
// Errors creating directories are basically unrecoverable,
// as they'll prevent us from later uploading any files in
// them.
printErrorAndExit(err)
}
}
if dirProgressBar != nil {
dirProgressBar.Finish()
}
}
var fileProgressBar *pb.ProgressBar
if !quiet {
fileProgressBar = pb.New64(nBytesToUpload).SetUnits(pb.U_BYTES)
fileProgressBar.Output = os.Stderr
fileProgressBar.Prefix("Files: ")
fileProgressBar.Start()
}
// Sort the files by size, small to large.
sort.Sort(localToRemoteBySize(fileMappings))
// The two indices uploadFrontIndex and uploadBackIndex point to the
// range of elements in the fileMappings array that haven't yet been
// uploaded.
uploadFrontIndex := 0
uploadBackIndex := len(fileMappings) - 1
// First, upload any large files that will use the resumable upload
// protocol using a single thread; more threads here doesn't generally
// help improve bandwidth utilizaiton and seems to make rate limit
// errors from the Drive API more frequent...
for ; uploadBackIndex >= 0; uploadBackIndex-- {
if fileMappings[uploadBackIndex].LocalFileInfo.Size() < resumableUploadMinSize {
break
}
fm := fileMappings[uploadBackIndex]
if fm.LocalFileInfo.IsDir() {
continue
}
if err := syncFileUp(fm.LocalPath, fm.LocalFileInfo, fm.DrivePath, encrypt,
fileProgressBar); err != nil {
addErrorAndPrintMessage(&nUploadErrors, fm.LocalPath, err)
}
//.........這裏部分代碼省略.........
示例3: getMetadataChanges
func (gd *GDrive) getMetadataChanges(svc *drive.Service, startChangeId int64,
changeChan chan<- []*drive.Change, errorChan chan<- error) {
var about *drive.About
var err error
// Get the Drive About information in order to figure out how many
// changes we need to download to get up to date.
for try := 0; ; try++ {
about, err = svc.About.Get().Do()
if err == nil {
break
} else {
err = gd.tryToHandleDriveAPIError(err, try)
}
if err != nil {
errorChan <- err
return
}
}
// Don't clutter the output with a progress bar unless it looks like
// downloading changes may take a while.
// TODO: consider using timer.AfterFunc to put up the progress bar if
// we're not done after a few seconds? It's not clear if this is worth
// the trouble.
var bar *pb.ProgressBar
numChanges := about.LargestChangeId - startChangeId
if numChanges > 1000 && !gd.quiet {
bar = pb.New64(numChanges)
bar.ShowBar = true
bar.ShowCounters = false
bar.Output = os.Stderr
bar.Prefix("Updating metadata cache: ")
bar.Start()
}
pageToken := ""
try := 0
// Keep asking Drive for more changes until we get through them all.
for {
// Only ask for the fields in the drive.Change structure that we
// actually to be filled in to save some bandwidth...
fields := []googleapi.Field{"nextPageToken",
"items/id", "items/fileId", "items/deleted",
"items/file/id", "items/file/parents", "items/file/title",
"items/file/fileSize", "items/file/mimeType", "items/file/properties",
"items/file/modifiedDate", "items/file/md5Checksum", "items/file/labels"}
q := svc.Changes.List().MaxResults(1000).IncludeSubscribed(false).Fields(fields...)
if startChangeId >= 0 {
q = q.StartChangeId(startChangeId + 1)
}
if pageToken != "" {
q = q.PageToken(pageToken)
}
r, err := q.Do()
if err != nil {
err = gd.tryToHandleDriveAPIError(err, try)
if err != nil {
errorChan <- err
return
}
try++
continue
}
// Success. Reset the try counter in case we had errors leading up
// to this.
try = 0
if len(r.Items) > 0 {
// Send the changes along to the goroutine that's updating the
// local cache.
changeChan <- r.Items
if bar != nil {
bar.Set(int(r.Items[len(r.Items)-1].Id - startChangeId))
}
}
pageToken = string(r.NextPageToken)
if pageToken == "" {
break
}
}
// Signal that no more changes are coming.
close(changeChan)
if bar != nil {
bar.Finish()
}
gd.debug("Done updating metadata from Drive")
}