本文整理匯總了Golang中github.com/cheggaaa/pb.ProgressBar.Finish方法的典型用法代碼示例。如果您正苦於以下問題:Golang ProgressBar.Finish方法的具體用法?Golang ProgressBar.Finish怎麽用?Golang ProgressBar.Finish使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類github.com/cheggaaa/pb.ProgressBar
的用法示例。
在下文中一共展示了ProgressBar.Finish方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: actionRunner
// actionRunner handles running an action which may take a while to complete
// providing progress bars and signal handling.
func actionRunner(cmd *cli.Cmd, action action) func() {
cmd.Spec = "[--silent] [--no-progress] " + cmd.Spec
silent := cmd.BoolOpt("silent", false, "Set to true to disable all non-error output")
noProgress := cmd.BoolOpt("no-progress", false, "Set to true to disable the progress bar")
return func() {
var infoWriter io.Writer = os.Stderr
var ticker <-chan time.Time
if err := action.init(); err != nil {
fail("Initialization failed: %v", err)
}
done, err := action.start(infoWriter)
if err != nil {
fail("Startup failed: %v", err)
}
var bar *pb.ProgressBar
if !*silent && !*noProgress {
ticker = time.Tick(statsFrequency)
bar = action.newProgressBar()
if bar != nil {
bar.Output = os.Stderr
bar.ShowSpeed = true
bar.ManualUpdate = true
bar.SetMaxWidth(78)
bar.Start()
bar.Update()
}
}
if *silent {
infoWriter = ioutil.Discard
}
sigchan := make(chan os.Signal, 1)
signal.Notify(sigchan, syscall.SIGTERM, syscall.SIGKILL, syscall.SIGINT)
LOOP:
for {
select {
case <-ticker:
action.updateProgress(bar)
bar.Update()
case <-sigchan:
bar.Finish()
fmt.Fprintf(os.Stderr, "\nAborting..")
action.abort()
<-done
fmt.Fprintf(os.Stderr, "Aborted.\n")
break LOOP
case err := <-done:
if err != nil {
fail("Processing failed: %v", err)
}
break LOOP
}
}
if bar != nil {
bar.Finish()
}
if !*silent {
action.printFinalStats(infoWriter)
}
}
}
示例2: HostMerge
func HostMerge(List []Host, ShowBar bool) []string {
count := 0
filterList := []string{""}
length := len(List)
var bar *pb.ProgressBar
if ShowBar == true {
bar = pb.StartNew(length)
bar.SetMaxWidth(80)
}
for _, Host := range List {
length = len(filterList[count])
if length == 0 {
filterList[count] = Host.Hostname
} else if length+Host.length() <= 255 && length != 0 {
filterList[count] += "|"
filterList[count] += Host.Hostname
} else {
count++
filterList = append(filterList, Host.Hostname)
// filterList[count] = Ref.Referrer
}
if ShowBar == true {
bar.Increment()
time.Sleep(time.Millisecond * 50)
}
}
if ShowBar == true {
bar.Finish()
}
return filterList
}
示例3: CheckMetadata
// CheckMetadata downloads the metadata about all of the files currently
// stored on Drive and compares it with the local cache.
func (gd *GDrive) CheckMetadata(filename string, report func(string)) error {
idToFile, err := gd.getIdToFile(filename)
if err != nil {
return err
}
// This will almost certainly take a while, so put up a progress bar.
var bar *pb.ProgressBar
if !gd.quiet {
bar = pb.New(len(idToFile))
bar.ShowBar = true
bar.ShowCounters = false
bar.Output = os.Stderr
bar.Prefix("Checking metadata cache: ")
bar.Start()
}
err = gd.runQuery("trashed=false", func(f *drive.File) {
if file, ok := idToFile[f.Id]; ok {
df := newFile(f.Title, f)
if !filesEqual(df, file) {
report(fmt.Sprintf("%s: metadata mismatch.\nLocal: %+v\nDrive: %+v",
file.Path, file, df))
}
if bar != nil {
bar.Increment()
}
delete(idToFile, f.Id)
} else {
// It'd be preferable to have "sharedWithMe=false" included in
// the query string above, but the combination of that with
// "trashed=false" seems to lead to no results being returned.
if f.Shared == false {
report(fmt.Sprintf("%s: found on Drive, not in local cache [%+v]",
f.Title, f))
}
}
})
for _, f := range idToFile {
report(fmt.Sprintf("%s: found in local cache, not on Drive [%+v]",
f.Path, f))
}
if bar != nil {
bar.Finish()
}
return nil
}
示例4: download
// download a file with the HTTP/HTTPS protocol showing a progress bar. The destination file is
// always overwritten.
func download(rawurl string, destinationPath string) {
tempDestinationPath := destinationPath + ".tmp"
destination, err := os.Create(tempDestinationPath)
if err != nil {
log.Fatalf("Unable to open the destination file: %s", tempDestinationPath)
}
defer destination.Close()
response, err := customGet(rawurl)
if err != nil {
log.Fatalf("Unable to open a connection to %s", rawurl)
}
defer response.Body.Close()
if response.StatusCode != http.StatusOK {
log.Fatalf("Unexpected HTTP response code. Wanted 200 but got %d", response.StatusCode)
}
var progressBar *pb.ProgressBar
contentLength, err := strconv.Atoi(response.Header.Get("Content-Length"))
if err == nil {
progressBar = pb.New(int(contentLength))
} else {
progressBar = pb.New(0)
}
defer progressBar.Finish()
progressBar.ShowSpeed = true
progressBar.SetRefreshRate(time.Millisecond * 1000)
progressBar.SetUnits(pb.U_BYTES)
progressBar.Start()
writer := io.MultiWriter(destination, progressBar)
io.Copy(writer, response.Body)
destination.Close()
os.Rename(tempDestinationPath, destinationPath)
}
示例5: runClusterBackup
func runClusterBackup(args *docopt.Args) error {
client, err := getClusterClient()
if err != nil {
return err
}
var bar *pb.ProgressBar
var progress backup.ProgressBar
if term.IsTerminal(os.Stderr.Fd()) {
bar = pb.New(0)
bar.SetUnits(pb.U_BYTES)
bar.ShowBar = false
bar.ShowSpeed = true
bar.Output = os.Stderr
bar.Start()
progress = bar
}
var dest io.Writer = os.Stdout
if filename := args.String["--file"]; filename != "" {
f, err := os.Create(filename)
if err != nil {
return err
}
defer f.Close()
dest = f
}
fmt.Fprintln(os.Stderr, "Creating cluster backup...")
if err := backup.Run(client, dest, progress); err != nil {
return err
}
if bar != nil {
bar.Finish()
}
fmt.Fprintln(os.Stderr, "Backup complete.")
return nil
}
示例6: processSystemArchives
// processSystemArchives processes archives for given system
func (h *Harvester) processSystemArchives(s *system.System, archives []string) error {
var bar *pb.ProgressBar
nb := len(archives)
// extract archives
if !s.Options.Quiet {
fmt.Printf("[%s] Extracting %v archive(s)\n", s.Infos.Name, nb)
if !s.Options.Debug {
bar = pb.StartNew(nb)
bar.ShowCounters = true
bar.ShowPercent = false
bar.ShowTimeLeft = true
bar.SetMaxWidth(80)
}
}
for _, archive := range archives {
if !s.Options.Quiet && !s.Options.Debug {
bar.Increment()
}
if err := s.ProcessArchive(archive, h.Options.Output); err != nil {
return err
}
}
if !s.Options.Quiet && !s.Options.Debug {
bar.Finish()
fmt.Printf("[%s] Processed %v files (skipped: %v)\n", s.Infos.Name, s.Processed, s.Skipped)
}
fmt.Printf("[%s] Selected %v games\n", s.Infos.Name, len(s.Games))
return nil
}
示例7: FetchHTTPFile
// Fetch http file url to destination dest, with or without progress.
func FetchHTTPFile(url string, dest string, progress bool) (err error) {
gologit.Debugf("Creating file: %s\n", dest)
out, err := os.Create(dest)
if err != nil {
return err
}
defer out.Close()
var r io.Reader
gologit.Debugf("Fetching url: %s\n", url)
resp, err := http.Get(url)
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("Server return non-200 status: %v", resp.Status)
}
msgPrefix := fmt.Sprintf("%s: ", path.Base(dest))
var bar *pb.ProgressBar
i, _ := strconv.Atoi(resp.Header.Get("Content-Length"))
if i > 0 && progress {
bar = pb.New(i).Prefix(msgPrefix).SetUnits(pb.U_BYTES)
bar.ShowSpeed = true
bar.RefreshRate = time.Millisecond * 700
bar.ShowFinalTime = false
bar.ShowTimeLeft = false
bar.Start()
defer bar.Finish()
r = bar.NewProxyReader(resp.Body)
} else {
r = resp.Body
}
_, err = io.Copy(out, r)
return err
}
示例8: Load
func (ctx *Context) Load() error {
var bar *pb.ProgressBar
if Verbose {
log.Println("loading database")
}
db, err := LoadDB(ctx.DatabaseName, *decompress)
if os.IsNotExist(err) {
log.Printf("database not found")
return nil
} else if err != nil {
log.Printf("error loading database: %s", err)
return err
}
if Verbose {
log.Println("loading teams")
bar = pb.StartNew(len(db.Teams))
}
for _, team := range db.Teams {
ctx.AddTeam(team)
if Verbose {
bar.Increment()
}
}
if Verbose {
bar.Finish()
}
if Verbose {
log.Println("loading match history")
bar = pb.StartNew(len(db.Outcomes))
}
loadedOutcomes := map[string]*Outcome{}
for _, outcome := range db.Outcomes {
loadedOutcomes[outcome.ID] = outcome
if Verbose {
bar.Increment()
}
}
if Verbose {
bar.Finish()
}
if Verbose {
bar = pb.StartNew(len(db.Matches))
}
for _, match := range db.Matches {
outcome, ok := loadedOutcomes[match.OutcomeID]
if !ok {
log.Panicf("corrupted history %q", match.ID)
}
ctx.AddMatch(match, outcome)
if Verbose {
bar.Increment()
}
}
if Verbose {
bar.Finish()
}
return nil
}
示例9: main
func main() {
clientID := flag.String("id", "", "Github client ID")
clientSecret := flag.String("secret", "", "Github client secret")
file := flag.String("file", "", "File containing the list of packages")
output := flag.String("output", "gddofork.out", "Output file")
progress := flag.Bool("progress", false, "Show a progress bar")
flag.Parse()
var auth *gddoexp.GithubAuth
if (clientID != nil && *clientID != "") || (clientSecret != nil && *clientSecret != "") {
if *clientID == "" || *clientSecret == "" {
fmt.Println("to enable Gthub authentication, you need to inform the id and secret")
flag.PrintDefaults()
return
}
auth = &gddoexp.GithubAuth{
ID: *clientID,
Secret: *clientSecret,
}
}
var pkgs []database.Package
var err error
if file != nil && *file != "" {
pkgs, err = readFromFile(*file)
} else {
pkgs, err = readFromStdin()
}
if err != nil {
fmt.Println(err)
return
}
o, err := os.OpenFile(*output, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
if err != nil {
fmt.Println("error creating output file:", err)
return
}
defer o.Close()
log.SetOutput(o)
log.Println("BEGIN")
log.Printf("%d packages will be analyzed", len(pkgs))
var progressBar *pb.ProgressBar
if progress != nil && *progress {
progressBar = pb.StartNew(len(pkgs))
}
var cache int
for response := range gddoexp.AreFastForkPackages(pkgs, auth) {
if progress != nil && *progress {
progressBar.Increment()
}
if response.Cache {
cache++
}
if response.Error != nil {
log.Println(response.Error)
} else if response.FastFork {
log.Printf("package “%s” is a fast fork\n", response.Path)
if progress != nil && !*progress {
fmt.Println(response.Path)
}
} else {
log.Printf("package “%s” is not a fast fork\n", response.Path)
}
}
if progress != nil && *progress {
progressBar.Finish()
}
log.Println("Cache hits:", cache)
log.Println("END")
}
示例10: syncHierarchyUp
// Synchronize a local directory hierarchy with Google Drive.
// localPath is the file or directory to start with, driveRoot is
// the directory into which the file/directory will be sent
func syncHierarchyUp(localPath string, driveRoot string, encrypt bool, trustTimes bool,
maxSymlinkDepth int) int {
if encrypt && key == nil {
key = decryptEncryptionKey()
}
fileMappings, nUploadErrors := compileUploadFileTree(localPath, driveRoot,
encrypt, trustTimes, maxSymlinkDepth)
if len(fileMappings) == 0 {
message("No files to be uploaded.")
return 0
}
nBytesToUpload := int64(0)
for _, info := range fileMappings {
if !info.LocalFileInfo.IsDir() {
nBytesToUpload += info.LocalFileInfo.Size()
}
}
// Given the list of files to sync, first find all of the directories and
// then either get or create a Drive folder for each one.
directoryMappingMap := make(map[string]localToRemoteFileMapping)
var directoryNames []string
for _, localfile := range fileMappings {
if localfile.LocalFileInfo.IsDir() {
directoryNames = append(directoryNames, localfile.DrivePath)
directoryMappingMap[localfile.DrivePath] = localfile
}
}
// Now sort the directories by name, which ensures that the parent of each
// directory has already been created if we need to create its children.
sort.Strings(directoryNames)
if len(directoryNames) > 0 {
// Actually create/update the directories.
var dirProgressBar *pb.ProgressBar
if !quiet {
dirProgressBar = pb.New(len(directoryNames))
dirProgressBar.Output = os.Stderr
dirProgressBar.Prefix("Directories: ")
dirProgressBar.Start()
}
// Sync each of the directories, which serves to create any missing ones.
for _, dirName := range directoryNames {
file := directoryMappingMap[dirName]
err := syncFileUp(file.LocalPath, file.LocalFileInfo, file.DrivePath,
encrypt, dirProgressBar)
if err != nil {
// Errors creating directories are basically unrecoverable,
// as they'll prevent us from later uploading any files in
// them.
printErrorAndExit(err)
}
}
if dirProgressBar != nil {
dirProgressBar.Finish()
}
}
var fileProgressBar *pb.ProgressBar
if !quiet {
fileProgressBar = pb.New64(nBytesToUpload).SetUnits(pb.U_BYTES)
fileProgressBar.Output = os.Stderr
fileProgressBar.Prefix("Files: ")
fileProgressBar.Start()
}
// Sort the files by size, small to large.
sort.Sort(localToRemoteBySize(fileMappings))
// The two indices uploadFrontIndex and uploadBackIndex point to the
// range of elements in the fileMappings array that haven't yet been
// uploaded.
uploadFrontIndex := 0
uploadBackIndex := len(fileMappings) - 1
// First, upload any large files that will use the resumable upload
// protocol using a single thread; more threads here doesn't generally
// help improve bandwidth utilizaiton and seems to make rate limit
// errors from the Drive API more frequent...
for ; uploadBackIndex >= 0; uploadBackIndex-- {
if fileMappings[uploadBackIndex].LocalFileInfo.Size() < resumableUploadMinSize {
break
}
fm := fileMappings[uploadBackIndex]
if fm.LocalFileInfo.IsDir() {
continue
}
if err := syncFileUp(fm.LocalPath, fm.LocalFileInfo, fm.DrivePath, encrypt,
fileProgressBar); err != nil {
addErrorAndPrintMessage(&nUploadErrors, fm.LocalPath, err)
}
//.........這裏部分代碼省略.........
示例11: runImport
//.........這裏部分代碼省略.........
}
defer f.Close()
defer os.Remove(f.Name())
if _, err := io.Copy(f, tr); err != nil {
return fmt.Errorf("error reading db dump: %s", err)
}
if _, err := f.Seek(0, os.SEEK_SET); err != nil {
return fmt.Errorf("error seeking db tempfile: %s", err)
}
mysqlDump = f
uploadSize += header.Size
}
}
if app == nil {
return fmt.Errorf("missing app.json")
}
oldName := app.Name
if name := args.String["--name"]; name != "" {
app.Name = name
}
if err := client.CreateApp(app); err != nil {
return fmt.Errorf("error creating app: %s", err)
}
var bar *pb.ProgressBar
if !args.Bool["--quiet"] && uploadSize > 0 && term.IsTerminal(os.Stderr.Fd()) {
bar = pb.New(0)
bar.SetUnits(pb.U_BYTES)
bar.Total = uploadSize
bar.ShowSpeed = true
bar.Output = os.Stderr
bar.Start()
defer bar.Finish()
}
if pgDump != nil && release != nil {
res, err := client.ProvisionResource(&ct.ResourceReq{
ProviderID: "postgres",
Apps: []string{app.ID},
})
if err != nil {
return fmt.Errorf("error provisioning postgres resource: %s", err)
}
numResources++
if release.Env == nil {
release.Env = make(map[string]string, len(res.Env))
}
for k, v := range res.Env {
release.Env[k] = v
}
config, err := getPgRunConfig(client, app.ID, release)
if err != nil {
return fmt.Errorf("error getting postgres config: %s", err)
}
config.Stdin = pgDump
if bar != nil {
config.Stdin = bar.NewProxyReader(config.Stdin)
}
config.Exit = false
if err := pgRestore(client, config, jobs); err != nil {
return fmt.Errorf("error restoring postgres database: %s", err)
}
}
示例12: getMetadataChanges
func (gd *GDrive) getMetadataChanges(svc *drive.Service, startChangeId int64,
changeChan chan<- []*drive.Change, errorChan chan<- error) {
var about *drive.About
var err error
// Get the Drive About information in order to figure out how many
// changes we need to download to get up to date.
for try := 0; ; try++ {
about, err = svc.About.Get().Do()
if err == nil {
break
} else {
err = gd.tryToHandleDriveAPIError(err, try)
}
if err != nil {
errorChan <- err
return
}
}
// Don't clutter the output with a progress bar unless it looks like
// downloading changes may take a while.
// TODO: consider using timer.AfterFunc to put up the progress bar if
// we're not done after a few seconds? It's not clear if this is worth
// the trouble.
var bar *pb.ProgressBar
numChanges := about.LargestChangeId - startChangeId
if numChanges > 1000 && !gd.quiet {
bar = pb.New64(numChanges)
bar.ShowBar = true
bar.ShowCounters = false
bar.Output = os.Stderr
bar.Prefix("Updating metadata cache: ")
bar.Start()
}
pageToken := ""
try := 0
// Keep asking Drive for more changes until we get through them all.
for {
// Only ask for the fields in the drive.Change structure that we
// actually to be filled in to save some bandwidth...
fields := []googleapi.Field{"nextPageToken",
"items/id", "items/fileId", "items/deleted",
"items/file/id", "items/file/parents", "items/file/title",
"items/file/fileSize", "items/file/mimeType", "items/file/properties",
"items/file/modifiedDate", "items/file/md5Checksum", "items/file/labels"}
q := svc.Changes.List().MaxResults(1000).IncludeSubscribed(false).Fields(fields...)
if startChangeId >= 0 {
q = q.StartChangeId(startChangeId + 1)
}
if pageToken != "" {
q = q.PageToken(pageToken)
}
r, err := q.Do()
if err != nil {
err = gd.tryToHandleDriveAPIError(err, try)
if err != nil {
errorChan <- err
return
}
try++
continue
}
// Success. Reset the try counter in case we had errors leading up
// to this.
try = 0
if len(r.Items) > 0 {
// Send the changes along to the goroutine that's updating the
// local cache.
changeChan <- r.Items
if bar != nil {
bar.Set(int(r.Items[len(r.Items)-1].Id - startChangeId))
}
}
pageToken = string(r.NextPageToken)
if pageToken == "" {
break
}
}
// Signal that no more changes are coming.
close(changeChan)
if bar != nil {
bar.Finish()
}
gd.debug("Done updating metadata from Drive")
}
示例13: finishProgressBar
func finishProgressBar(pb *pb.ProgressBar) {
pb.Set64(pb.Total)
pb.Finish()
}
示例14: PostMultipartP
// PostMultipartP posts a multipart message in the MIME internet format with a callback function with a string stating the upload Progress.
func (c *Client) PostMultipartP(path string, files map[string][]byte, params Params, out interface{}, callback func(s string)) error {
body := &bytes.Buffer{}
writer := multipart.NewWriter(body)
for name, source := range files {
part, err := writer.CreateFormFile(name, "source.tgz")
if err != nil {
return err
}
_, err = io.Copy(part, bytes.NewReader(source))
if err != nil {
return err
}
}
for name, value := range params {
writer.WriteField(name, value)
}
err := writer.Close()
if err != nil {
return err
}
var bodyReader io.Reader
bodyReader = body
var bar *pb.ProgressBar
if callback != nil {
bar = pb.New(body.Len()).SetUnits(pb.U_BYTES)
bar.NotPrint = true
bar.ShowBar = false
bar.Callback = callback
bar.Start()
bodyReader = bar.NewProxyReader(body)
}
req, err := c.request("POST", path, bodyReader)
if err != nil {
return err
}
req.SetBasicAuth("convox", string(c.Password))
req.Header.Set("Content-Type", writer.FormDataContentType())
res, err := c.client().Do(req)
if err != nil {
return err
}
defer res.Body.Close()
if err := responseError(res); err != nil {
return err
}
data, err := ioutil.ReadAll(res.Body)
if err != nil {
return err
}
if out != nil {
err = json.Unmarshal(data, out)
if err != nil {
return err
}
}
if callback != nil {
bar.Finish()
}
return nil
}
示例15: importCSV
func importCSV(filename string, connStr string, schema string, tableName string, ignoreErrors bool, skipHeader bool, fields string, delimiter string) error {
db, err := connect(connStr, schema)
if err != nil {
return err
}
defer db.Close()
var reader *csv.Reader
var bar *pb.ProgressBar
if filename != "" {
file, err := os.Open(filename)
if err != nil {
return err
}
defer file.Close()
bar = NewProgressBar(file)
reader = csv.NewReader(io.TeeReader(file, bar))
} else {
reader = csv.NewReader(os.Stdin)
}
reader.Comma, _ = utf8.DecodeRuneInString(delimiter)
reader.LazyQuotes = true
columns, err := parseColumns(reader, skipHeader, fields)
if err != nil {
return err
}
reader.FieldsPerRecord = len(columns)
i, err := NewCSVImport(db, schema, tableName, columns)
if err != nil {
return err
}
var success, failed int
if filename != "" {
bar.Start()
err, success, failed = copyCSVRows(i, reader, ignoreErrors, delimiter, columns)
bar.Finish()
} else {
err, success, failed = copyCSVRows(i, reader, ignoreErrors, delimiter, columns)
}
if err != nil {
lineNumber := success + failed
if !skipHeader {
lineNumber++
}
return errors.New(fmt.Sprintf("line %d: %s", lineNumber, err))
} else {
fmt.Println(fmt.Sprintf("%d rows imported into %s.%s", success, schema, tableName))
if ignoreErrors && failed > 0 {
fmt.Println(fmt.Sprintf("%d rows could not be imported into %s.%s and have been written to stderr.", failed, schema, tableName))
}
return i.Commit()
}
}