本文整理匯總了Golang中github.com/cheggaaa/pb.ProgressBar.Output方法的典型用法代碼示例。如果您正苦於以下問題:Golang ProgressBar.Output方法的具體用法?Golang ProgressBar.Output怎麽用?Golang ProgressBar.Output使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類github.com/cheggaaa/pb.ProgressBar
的用法示例。
在下文中一共展示了ProgressBar.Output方法的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: actionRunner
// actionRunner handles running an action which may take a while to complete
// providing progress bars and signal handling.
func actionRunner(cmd *cli.Cmd, action action) func() {
cmd.Spec = "[--silent] [--no-progress] " + cmd.Spec
silent := cmd.BoolOpt("silent", false, "Set to true to disable all non-error output")
noProgress := cmd.BoolOpt("no-progress", false, "Set to true to disable the progress bar")
return func() {
var infoWriter io.Writer = os.Stderr
var ticker <-chan time.Time
if err := action.init(); err != nil {
fail("Initialization failed: %v", err)
}
done, err := action.start(infoWriter)
if err != nil {
fail("Startup failed: %v", err)
}
var bar *pb.ProgressBar
if !*silent && !*noProgress {
ticker = time.Tick(statsFrequency)
bar = action.newProgressBar()
if bar != nil {
bar.Output = os.Stderr
bar.ShowSpeed = true
bar.ManualUpdate = true
bar.SetMaxWidth(78)
bar.Start()
bar.Update()
}
}
if *silent {
infoWriter = ioutil.Discard
}
sigchan := make(chan os.Signal, 1)
signal.Notify(sigchan, syscall.SIGTERM, syscall.SIGKILL, syscall.SIGINT)
LOOP:
for {
select {
case <-ticker:
action.updateProgress(bar)
bar.Update()
case <-sigchan:
bar.Finish()
fmt.Fprintf(os.Stderr, "\nAborting..")
action.abort()
<-done
fmt.Fprintf(os.Stderr, "Aborted.\n")
break LOOP
case err := <-done:
if err != nil {
fail("Processing failed: %v", err)
}
break LOOP
}
}
if bar != nil {
bar.Finish()
}
if !*silent {
action.printFinalStats(infoWriter)
}
}
}
示例2: CheckMetadata
// CheckMetadata downloads the metadata about all of the files currently
// stored on Drive and compares it with the local cache.
func (gd *GDrive) CheckMetadata(filename string, report func(string)) error {
idToFile, err := gd.getIdToFile(filename)
if err != nil {
return err
}
// This will almost certainly take a while, so put up a progress bar.
var bar *pb.ProgressBar
if !gd.quiet {
bar = pb.New(len(idToFile))
bar.ShowBar = true
bar.ShowCounters = false
bar.Output = os.Stderr
bar.Prefix("Checking metadata cache: ")
bar.Start()
}
err = gd.runQuery("trashed=false", func(f *drive.File) {
if file, ok := idToFile[f.Id]; ok {
df := newFile(f.Title, f)
if !filesEqual(df, file) {
report(fmt.Sprintf("%s: metadata mismatch.\nLocal: %+v\nDrive: %+v",
file.Path, file, df))
}
if bar != nil {
bar.Increment()
}
delete(idToFile, f.Id)
} else {
// It'd be preferable to have "sharedWithMe=false" included in
// the query string above, but the combination of that with
// "trashed=false" seems to lead to no results being returned.
if f.Shared == false {
report(fmt.Sprintf("%s: found on Drive, not in local cache [%+v]",
f.Title, f))
}
}
})
for _, f := range idToFile {
report(fmt.Sprintf("%s: found in local cache, not on Drive [%+v]",
f.Path, f))
}
if bar != nil {
bar.Finish()
}
return nil
}
示例3: runClusterBackup
func runClusterBackup(args *docopt.Args) error {
client, err := getClusterClient()
if err != nil {
return err
}
var bar *pb.ProgressBar
var progress backup.ProgressBar
if term.IsTerminal(os.Stderr.Fd()) {
bar = pb.New(0)
bar.SetUnits(pb.U_BYTES)
bar.ShowBar = false
bar.ShowSpeed = true
bar.Output = os.Stderr
bar.Start()
progress = bar
}
var dest io.Writer = os.Stdout
if filename := args.String["--file"]; filename != "" {
f, err := os.Create(filename)
if err != nil {
return err
}
defer f.Close()
dest = f
}
fmt.Fprintln(os.Stderr, "Creating cluster backup...")
if err := backup.Run(client, dest, progress); err != nil {
return err
}
if bar != nil {
bar.Finish()
}
fmt.Fprintln(os.Stderr, "Backup complete.")
return nil
}
示例4: runImport
//.........這裏部分代碼省略.........
if err != nil {
return fmt.Errorf("error creating db tempfile: %s", err)
}
defer f.Close()
defer os.Remove(f.Name())
if _, err := io.Copy(f, tr); err != nil {
return fmt.Errorf("error reading db dump: %s", err)
}
if _, err := f.Seek(0, os.SEEK_SET); err != nil {
return fmt.Errorf("error seeking db tempfile: %s", err)
}
mysqlDump = f
uploadSize += header.Size
}
}
if app == nil {
return fmt.Errorf("missing app.json")
}
oldName := app.Name
if name := args.String["--name"]; name != "" {
app.Name = name
}
if err := client.CreateApp(app); err != nil {
return fmt.Errorf("error creating app: %s", err)
}
var bar *pb.ProgressBar
if !args.Bool["--quiet"] && uploadSize > 0 && term.IsTerminal(os.Stderr.Fd()) {
bar = pb.New(0)
bar.SetUnits(pb.U_BYTES)
bar.Total = uploadSize
bar.ShowSpeed = true
bar.Output = os.Stderr
bar.Start()
defer bar.Finish()
}
if pgDump != nil && release != nil {
res, err := client.ProvisionResource(&ct.ResourceReq{
ProviderID: "postgres",
Apps: []string{app.ID},
})
if err != nil {
return fmt.Errorf("error provisioning postgres resource: %s", err)
}
numResources++
if release.Env == nil {
release.Env = make(map[string]string, len(res.Env))
}
for k, v := range res.Env {
release.Env[k] = v
}
config, err := getPgRunConfig(client, app.ID, release)
if err != nil {
return fmt.Errorf("error getting postgres config: %s", err)
}
config.Stdin = pgDump
if bar != nil {
config.Stdin = bar.NewProxyReader(config.Stdin)
}
config.Exit = false
if err := pgRestore(client, config); err != nil {
return fmt.Errorf("error restoring postgres database: %s", err)
示例5: syncHierarchyUp
// Synchronize a local directory hierarchy with Google Drive.
// localPath is the file or directory to start with, driveRoot is
// the directory into which the file/directory will be sent
func syncHierarchyUp(localPath string, driveRoot string, encrypt bool, trustTimes bool,
maxSymlinkDepth int) int {
if encrypt && key == nil {
key = decryptEncryptionKey()
}
fileMappings, nUploadErrors := compileUploadFileTree(localPath, driveRoot,
encrypt, trustTimes, maxSymlinkDepth)
if len(fileMappings) == 0 {
message("No files to be uploaded.")
return 0
}
nBytesToUpload := int64(0)
for _, info := range fileMappings {
if !info.LocalFileInfo.IsDir() {
nBytesToUpload += info.LocalFileInfo.Size()
}
}
// Given the list of files to sync, first find all of the directories and
// then either get or create a Drive folder for each one.
directoryMappingMap := make(map[string]localToRemoteFileMapping)
var directoryNames []string
for _, localfile := range fileMappings {
if localfile.LocalFileInfo.IsDir() {
directoryNames = append(directoryNames, localfile.DrivePath)
directoryMappingMap[localfile.DrivePath] = localfile
}
}
// Now sort the directories by name, which ensures that the parent of each
// directory has already been created if we need to create its children.
sort.Strings(directoryNames)
if len(directoryNames) > 0 {
// Actually create/update the directories.
var dirProgressBar *pb.ProgressBar
if !quiet {
dirProgressBar = pb.New(len(directoryNames))
dirProgressBar.Output = os.Stderr
dirProgressBar.Prefix("Directories: ")
dirProgressBar.Start()
}
// Sync each of the directories, which serves to create any missing ones.
for _, dirName := range directoryNames {
file := directoryMappingMap[dirName]
err := syncFileUp(file.LocalPath, file.LocalFileInfo, file.DrivePath,
encrypt, dirProgressBar)
if err != nil {
// Errors creating directories are basically unrecoverable,
// as they'll prevent us from later uploading any files in
// them.
printErrorAndExit(err)
}
}
if dirProgressBar != nil {
dirProgressBar.Finish()
}
}
var fileProgressBar *pb.ProgressBar
if !quiet {
fileProgressBar = pb.New64(nBytesToUpload).SetUnits(pb.U_BYTES)
fileProgressBar.Output = os.Stderr
fileProgressBar.Prefix("Files: ")
fileProgressBar.Start()
}
// Sort the files by size, small to large.
sort.Sort(localToRemoteBySize(fileMappings))
// The two indices uploadFrontIndex and uploadBackIndex point to the
// range of elements in the fileMappings array that haven't yet been
// uploaded.
uploadFrontIndex := 0
uploadBackIndex := len(fileMappings) - 1
// First, upload any large files that will use the resumable upload
// protocol using a single thread; more threads here doesn't generally
// help improve bandwidth utilizaiton and seems to make rate limit
// errors from the Drive API more frequent...
for ; uploadBackIndex >= 0; uploadBackIndex-- {
if fileMappings[uploadBackIndex].LocalFileInfo.Size() < resumableUploadMinSize {
break
}
fm := fileMappings[uploadBackIndex]
if fm.LocalFileInfo.IsDir() {
continue
}
if err := syncFileUp(fm.LocalPath, fm.LocalFileInfo, fm.DrivePath, encrypt,
fileProgressBar); err != nil {
addErrorAndPrintMessage(&nUploadErrors, fm.LocalPath, err)
}
//.........這裏部分代碼省略.........
示例6: getMetadataChanges
func (gd *GDrive) getMetadataChanges(svc *drive.Service, startChangeId int64,
changeChan chan<- []*drive.Change, errorChan chan<- error) {
var about *drive.About
var err error
// Get the Drive About information in order to figure out how many
// changes we need to download to get up to date.
for try := 0; ; try++ {
about, err = svc.About.Get().Do()
if err == nil {
break
} else {
err = gd.tryToHandleDriveAPIError(err, try)
}
if err != nil {
errorChan <- err
return
}
}
// Don't clutter the output with a progress bar unless it looks like
// downloading changes may take a while.
// TODO: consider using timer.AfterFunc to put up the progress bar if
// we're not done after a few seconds? It's not clear if this is worth
// the trouble.
var bar *pb.ProgressBar
numChanges := about.LargestChangeId - startChangeId
if numChanges > 1000 && !gd.quiet {
bar = pb.New64(numChanges)
bar.ShowBar = true
bar.ShowCounters = false
bar.Output = os.Stderr
bar.Prefix("Updating metadata cache: ")
bar.Start()
}
pageToken := ""
try := 0
// Keep asking Drive for more changes until we get through them all.
for {
// Only ask for the fields in the drive.Change structure that we
// actually to be filled in to save some bandwidth...
fields := []googleapi.Field{"nextPageToken",
"items/id", "items/fileId", "items/deleted",
"items/file/id", "items/file/parents", "items/file/title",
"items/file/fileSize", "items/file/mimeType", "items/file/properties",
"items/file/modifiedDate", "items/file/md5Checksum", "items/file/labels"}
q := svc.Changes.List().MaxResults(1000).IncludeSubscribed(false).Fields(fields...)
if startChangeId >= 0 {
q = q.StartChangeId(startChangeId + 1)
}
if pageToken != "" {
q = q.PageToken(pageToken)
}
r, err := q.Do()
if err != nil {
err = gd.tryToHandleDriveAPIError(err, try)
if err != nil {
errorChan <- err
return
}
try++
continue
}
// Success. Reset the try counter in case we had errors leading up
// to this.
try = 0
if len(r.Items) > 0 {
// Send the changes along to the goroutine that's updating the
// local cache.
changeChan <- r.Items
if bar != nil {
bar.Set(int(r.Items[len(r.Items)-1].Id - startChangeId))
}
}
pageToken = string(r.NextPageToken)
if pageToken == "" {
break
}
}
// Signal that no more changes are coming.
close(changeChan)
if bar != nil {
bar.Finish()
}
gd.debug("Done updating metadata from Drive")
}
示例7: runImport
//.........這裏部分代碼省略.........
if err != nil {
return fmt.Errorf("error creating db tempfile: %s", err)
}
defer f.Close()
defer os.Remove(f.Name())
if _, err := io.Copy(f, tr); err != nil {
return fmt.Errorf("error reading db dump: %s", err)
}
if _, err := f.Seek(0, os.SEEK_SET); err != nil {
return fmt.Errorf("error seeking db tempfile: %s", err)
}
mysqlDump = f
uploadSize += header.Size
}
}
if app == nil {
return fmt.Errorf("missing app.json")
}
oldName := app.Name
if name := args.String["--name"]; name != "" {
app.Name = name
}
if err := client.CreateApp(app); err != nil {
return fmt.Errorf("error creating app: %s", err)
}
var bar *pb.ProgressBar
if !args.Bool["--quiet"] && uploadSize > 0 && term.IsTerminal(os.Stderr.Fd()) {
bar = pb.New(0)
bar.SetUnits(pb.U_BYTES)
bar.Total = uploadSize
bar.ShowSpeed = true
bar.Output = os.Stderr
bar.Start()
defer bar.Finish()
}
if pgDump != nil && release != nil {
res, err := client.ProvisionResource(&ct.ResourceReq{
ProviderID: "postgres",
Apps: []string{app.ID},
})
if err != nil {
return fmt.Errorf("error provisioning postgres resource: %s", err)
}
numResources++
if release.Env == nil {
release.Env = make(map[string]string, len(res.Env))
}
for k, v := range res.Env {
release.Env[k] = v
}
config, err := getPgRunConfig(client, app.ID, release)
if err != nil {
return fmt.Errorf("error getting postgres config: %s", err)
}
config.Stdin = pgDump
if bar != nil {
config.Stdin = bar.NewProxyReader(config.Stdin)
}
config.Exit = false
if err := pgRestore(client, config, jobs); err != nil {
return fmt.Errorf("error restoring postgres database: %s", err)