本文整理匯總了Golang中sync.WaitGroup.Done方法的典型用法代碼示例。如果您正苦於以下問題:Golang WaitGroup.Done方法的具體用法?Golang WaitGroup.Done怎麽用?Golang WaitGroup.Done使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類sync.WaitGroup
的用法示例。
在下文中一共展示了WaitGroup.Done方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: createRunningPod
func createRunningPod(wg *sync.WaitGroup, c *client.Client, name, ns, image string, labels map[string]string) {
defer GinkgoRecover()
defer wg.Done()
pod := &api.Pod{
TypeMeta: unversioned.TypeMeta{
Kind: "Pod",
},
ObjectMeta: api.ObjectMeta{
Name: name,
Labels: labels,
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: name,
Image: image,
},
},
DNSPolicy: api.DNSDefault,
},
}
_, err := c.Pods(ns).Create(pod)
expectNoError(err)
expectNoError(waitForPodRunningInNamespace(c, name, ns))
}
示例2: Run
// Run runs the query concurrently, and returns the results.
func (q *Query) Run() []interface{} {
rand.Seed(time.Now().UnixNano())
var w sync.WaitGroup
var l sync.Mutex
places := make([]interface{}, len(q.Journey))
for i, r := range q.Journey {
w.Add(1)
go func(types string, i int) {
defer w.Done()
response, err := q.find(types)
if err != nil {
log.Println("Failed to find places:", err)
return
}
if len(response.Results) == 0 {
log.Println("No places found for", types)
return
}
for _, result := range response.Results {
for _, photo := range result.Photos {
photo.URL = "https://maps.googleapis.com/maps/api/place/photo?" +
"maxwidth=1000&photoreference=" + photo.PhotoRef + "&key=" + APIKey
}
}
randI := rand.Intn(len(response.Results))
l.Lock()
places[i] = response.Results[randI]
l.Unlock()
}(r, i)
}
w.Wait() // wait for everything to finish
return places
}
示例3: Work
// Work turns on the worker
func (w *Worker) Work(wg *sync.WaitGroup) {
defer wg.Done()
for {
select {
// safely stop the worker
case <-w.stop:
return
case task := <-w.reader:
tasks, err := w.processFn(task)
if err != nil {
if task.Retries < MaxRetries-1 {
task.Retries++
w.writer <- task
continue
}
}
// submit any new tasks returned by the old one
if tasks != nil {
for _, t := range tasks {
w.writer <- t
}
}
}
}
}
示例4: Serve
func (hp *httpProxy) Serve(wg *sync.WaitGroup) {
defer func() {
wg.Done()
}()
ln, err := net.Listen("tcp", hp.addr)
if err != nil {
fmt.Println("listen http failed:", err)
return
}
host, _, _ := net.SplitHostPort(hp.addr)
var pacURL string
if host == "" || host == "0.0.0.0" {
pacURL = fmt.Sprintf("http://<hostip>:%s/pac", hp.port)
} else if hp.addrInPAC == "" {
pacURL = fmt.Sprintf("http://%s/pac", hp.addr)
} else {
pacURL = fmt.Sprintf("http://%s/pac", hp.addrInPAC)
}
info.Printf("listen http %s, PAC url %s\n", hp.addr, pacURL)
for {
conn, err := ln.Accept()
if err != nil {
errl.Printf("http proxy(%s) accept %v\n", ln.Addr(), err)
if isErrTooManyOpenFd(err) {
connPool.CloseAll()
}
time.Sleep(time.Millisecond)
continue
}
c := newClientConn(conn, hp)
go c.serve()
}
}
示例5: runPipeline
func (a *apiServer) runPipeline(pipelineInfo *pps.PipelineInfo) error {
ctx, cancel := context.WithCancel(context.Background())
a.lock.Lock()
a.cancelFuncs[*pipelineInfo.Pipeline] = cancel
a.lock.Unlock()
var loopErr error
//TODO this gets really weird with branching... we need to figure out what that looks like.
mostRecentCommit := make(map[pfs.Repo]*pfs.Commit)
var lock sync.Mutex
var wg sync.WaitGroup
for _, inputRepo := range pipelineInfo.InputRepo {
inputRepo := inputRepo
wg.Add(1)
go func() {
defer wg.Done()
var lastCommit *pfs.Commit
listCommitRequest := &pfs.ListCommitRequest{
Repo: inputRepo,
CommitType: pfs.CommitType_COMMIT_TYPE_READ,
From: lastCommit,
Block: true,
}
commitInfos, err := a.pfsAPIClient.ListCommit(ctx, listCommitRequest)
if err != nil && loopErr == nil {
loopErr = err
return
}
for _, commitInfo := range commitInfos.CommitInfo {
lock.Lock()
mostRecentCommit[*inputRepo] = commitInfo.Commit
var commits []*pfs.Commit
for _, commit := range mostRecentCommit {
commits = append(commits, commit)
}
lock.Unlock()
if len(commits) < len(pipelineInfo.InputRepo) {
// we don't yet have a commit for every input repo so there's no way to run the job
continue
}
outParentCommit, err := a.bestParent(pipelineInfo, commitInfo)
if err != nil && loopErr == nil {
loopErr = err
return
}
_, err = a.jobAPIClient.CreateJob(
ctx,
&pps.CreateJobRequest{
Spec: &pps.CreateJobRequest_Pipeline{
Pipeline: pipelineInfo.Pipeline,
},
InputCommit: []*pfs.Commit{commitInfo.Commit},
OutputParent: outParentCommit,
},
)
}
}()
}
wg.Wait()
return loopErr
}
示例6: checkPutResponse
func (s *managedStorageSuite) checkPutResponse(c *gc.C, index int, wg *sync.WaitGroup,
requestId int64, sha384Hash string, blob []byte) {
// After a random time, respond to a previously queued put request and check the result.
go func() {
delay := rand.Intn(3)
time.Sleep(time.Duration(delay) * time.Millisecond)
expectError := index == 2
if expectError {
sha384Hash = "bad"
}
response := blobstore.NewPutResponse(requestId, sha384Hash)
err := s.managedStorage.ProofOfAccessResponse(response)
if expectError {
c.Check(err, gc.NotNil)
} else {
c.Check(err, gc.IsNil)
if err == nil {
r, length, err := s.managedStorage.GetForEnvironment("env", fmt.Sprintf("path/to/blob%d", index))
c.Check(err, gc.IsNil)
if err == nil {
data, err := ioutil.ReadAll(r)
c.Check(err, gc.IsNil)
c.Check(data, gc.DeepEquals, blob)
c.Check(int(length), gc.DeepEquals, len(blob))
}
}
}
wg.Done()
}()
}
示例7: GetData
func (cd *CheckDocker) GetData() error {
errChan := make(chan error)
var err error
var wg sync.WaitGroup
wg.Add(2)
go func(cd *CheckDocker, errChan chan error) {
defer wg.Done()
cd.dockerInfoData, err = cd.dockerclient.Info()
if err != nil {
errChan <- err
}
}(cd, errChan)
go func(cd *CheckDocker, errChan chan error) {
defer wg.Done()
cd.dockerContainersData, err = cd.dockerclient.ListContainers(dockerlib.ListContainersOptions{})
if err != nil {
errChan <- err
}
}(cd, errChan)
go func() {
wg.Wait()
close(errChan)
}()
err = <-errChan
return err
}
示例8: Run
func (n *network) Run(ctx context.Context) {
wg := sync.WaitGroup{}
log.Info("Watching for new subnet leases")
evts := make(chan []subnet.Event)
wg.Add(1)
go func() {
subnet.WatchLeases(ctx, n.sm, n.name, n.lease, evts)
wg.Done()
}()
n.rl = make([]netlink.Route, 0, 10)
wg.Add(1)
go func() {
n.routeCheck(ctx)
wg.Done()
}()
defer wg.Wait()
for {
select {
case evtBatch := <-evts:
n.handleSubnetEvents(evtBatch)
case <-ctx.Done():
return
}
}
}
示例9: ReadWrite
// ReadWrite does read and write in parallel.
// qRead is num goroutines for reading.
// qWrite is num goroutines for writing.
// Assume n divisible by (qRead + qWrite).
func ReadWrite(n, qRead, qWrite int, newFunc func() HashMap, b *testing.B) {
q := qRead + qWrite
check(n, q)
work := intPairArray(n)
b.StartTimer()
for i := 0; i < b.N; i++ { // N reps.
h := newFunc()
var wg sync.WaitGroup
for j := 0; j < qRead; j++ { // Read goroutines.
wg.Add(1)
go func(j int) {
defer wg.Done()
start, end := workRange(n, q, j)
for k := start; k < end; k++ {
h.Get(work[k].Key)
}
}(j)
}
for j := qRead; j < q; j++ { // Write goroutines.
wg.Add(1)
go func(j int) {
defer wg.Done()
start, end := workRange(n, q, j)
for k := start; k < end; k++ {
h.Put(work[k].Key, work[k].Val)
}
}(j)
}
wg.Wait()
}
}
示例10: Gather
// Gathers data for all servers.
func (h *HttpJson) Gather(acc telegraf.Accumulator) error {
var wg sync.WaitGroup
errorChannel := make(chan error, len(h.Servers))
for _, server := range h.Servers {
wg.Add(1)
go func(server string) {
defer wg.Done()
if err := h.gatherServer(acc, server); err != nil {
errorChannel <- err
}
}(server)
}
wg.Wait()
close(errorChannel)
// Get all errors and return them as one giant error
errorStrings := []string{}
for err := range errorChannel {
errorStrings = append(errorStrings, err.Error())
}
if len(errorStrings) == 0 {
return nil
}
return errors.New(strings.Join(errorStrings, "\n"))
}
示例11: TestFatalRxError
func TestFatalRxError(t *testing.T) {
t.Parallel()
conn := mustConnect(t, *defaultConnConfig)
defer closeConn(t, conn)
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
var n int32
var s string
err := conn.QueryRow("select 1::int4, pg_sleep(10)::varchar").Scan(&n, &s)
if err, ok := err.(pgx.PgError); !ok || err.Severity != "FATAL" {
t.Fatalf("Expected QueryRow Scan to return fatal PgError, but instead received %v", err)
}
}()
otherConn, err := pgx.Connect(*defaultConnConfig)
if err != nil {
t.Fatalf("Unable to establish connection: %v", err)
}
defer otherConn.Close()
if _, err := otherConn.Exec("select pg_terminate_backend($1)", conn.Pid); err != nil {
t.Fatalf("Unable to kill backend PostgreSQL process: %v", err)
}
wg.Wait()
if conn.IsAlive() {
t.Fatal("Connection should not be live but was")
}
}
示例12: getTribValuesFromHashIds
func (ts *tribServer) getTribValuesFromHashIds(user string, hashIds []string) ([]string, error) {
var err error
tribValues := make([]string, len(hashIds))
returnValues := make([]string, len(hashIds))
var wg sync.WaitGroup
wg.Add(len(hashIds))
for i := range hashIds {
go func(i int) {
defer wg.Done()
tribValues[i], err = ts.Libstore.Get(makeTribId(user, hashIds[i]))
if err != nil {
if err != libstore.ErrorKeyNotFound { // ignore cross inconsistency
panic(err)
}
}
}(i)
}
wg.Wait()
j := 0
for i := range tribValues {
if tribValues[i] != "" {
returnValues[j] = tribValues[i]
j++
}
}
return returnValues[0:j], nil
}
示例13: cWriteShards
// cWriteShards writes shards concurrently
func cWriteShards(out []io.Writer, in [][]byte) error {
if len(out) != len(in) {
panic("internal error: in and out size does not match")
}
var errs = make(chan error, len(out))
var wg sync.WaitGroup
wg.Add(len(out))
for i := range in {
go func(i int) {
defer wg.Done()
if out[i] == nil {
errs <- nil
return
}
n, err := out[i].Write(in[i])
if err != nil {
errs <- StreamWriteError{Err: err, Stream: i}
return
}
if n != len(in[i]) {
errs <- StreamWriteError{Err: io.ErrShortWrite, Stream: i}
}
}(i)
}
wg.Wait()
close(errs)
for err := range errs {
if err != nil {
return err
}
}
return nil
}
示例14: Serve
// Serve serves SFTP connections until the streams stop or the SFTP subsystem
// is stopped.
func (svr *Server) Serve() error {
var wg sync.WaitGroup
wg.Add(sftpServerWorkerCount)
for i := 0; i < sftpServerWorkerCount; i++ {
go func() {
defer wg.Done()
if err := svr.sftpServerWorker(); err != nil {
svr.rwc.Close() // shuts down recvPacket
}
}()
}
var err error
for {
var pktType uint8
var pktBytes []byte
pktType, pktBytes, err = recvPacket(svr.rwc)
if err != nil {
break
}
svr.pktChan <- rxPacket{fxp(pktType), pktBytes}
}
close(svr.pktChan) // shuts down sftpServerWorkers
wg.Wait() // wait for all workers to exit
// close any still-open files
for handle, file := range svr.openFiles {
fmt.Fprintf(svr.debugStream, "sftp server file with handle %q left open: %v\n", handle, file.Name())
file.Close()
}
return err // error from recvPacket
}
示例15: TestStoreRangeUpReplicate
// TestStoreRangeUpReplicate verifies that the replication queue will notice
// under-replicated ranges and replicate them.
func TestStoreRangeUpReplicate(t *testing.T) {
defer leaktest.AfterTest(t)
mtc := startMultiTestContext(t, 3)
defer mtc.Stop()
// Initialize the gossip network.
var wg sync.WaitGroup
wg.Add(len(mtc.stores))
key := gossip.MakePrefixPattern(gossip.KeyStorePrefix)
mtc.stores[0].Gossip().RegisterCallback(key, func(_ string, _ roachpb.Value) { wg.Done() })
for _, s := range mtc.stores {
s.GossipStore()
}
wg.Wait()
// Once we know our peers, trigger a scan.
mtc.stores[0].ForceReplicationScanAndProcess()
// The range should become available on every node.
if err := util.IsTrueWithin(func() bool {
for _, s := range mtc.stores {
r := s.LookupReplica(roachpb.RKey("a"), roachpb.RKey("b"))
if r == nil {
return false
}
}
return true
}, replicationTimeout); err != nil {
t.Fatal(err)
}
}