當前位置: 首頁>>代碼示例>>Golang>>正文


Golang Value.Load方法代碼示例

本文整理匯總了Golang中sync/atomic.Value.Load方法的典型用法代碼示例。如果您正苦於以下問題:Golang Value.Load方法的具體用法?Golang Value.Load怎麽用?Golang Value.Load使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在sync/atomic.Value的用法示例。


在下文中一共展示了Value.Load方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。

示例1: TestReportStats

func TestReportStats(t *testing.T) {
	md, nr := startWithMockReporter()
	defer md.Stop()
	var remoteAddr atomic.Value

	// start server with byte counting
	l, err := net.Listen("tcp", "127.0.0.1:0")
	if !assert.NoError(t, err, "Listen should not fail") {
		return
	}

	// large enough interval so it will only report stats in Close()
	ml := md.Listener(l, 10*time.Second)
	s := http.Server{
		Handler: http.NotFoundHandler(),
		ConnState: func(c net.Conn, s http.ConnState) {
			if s == http.StateClosed {
				remoteAddr.Store(c.RemoteAddr().String())
			}
		},
	}
	go func() { _ = s.Serve(ml) }()

	time.Sleep(100 * time.Millisecond)
	// start client with byte counting
	c := http.Client{
		Transport: &http.Transport{
			// carefully chosen interval to report another once before Close()
			Dial: md.Dialer(net.Dial, 160*time.Millisecond),
		},
	}
	req, _ := http.NewRequest("GET", "http://"+l.Addr().String(), nil)
	resp, _ := c.Do(req)
	assert.Equal(t, 404, resp.StatusCode)

	// Close without reading from body, to force server to close connection
	_ = resp.Body.Close()
	time.Sleep(100 * time.Millisecond)
	nr.Lock()
	defer nr.Unlock()
	t.Logf("Traffic entries: %+v", nr.traffic)
	if assert.Equal(t, 2, len(nr.traffic)) {
		ct := nr.traffic[l.Addr().String()]
		st := nr.traffic[remoteAddr.Load().(string)]

		if assert.NotNil(t, ct) {
			assert.Equal(t, 0, int(ct.MinOut), "client stats should only report increased byte count")
			assert.Equal(t, 0, int(ct.MinIn), "client stats should only report increased byte count")
			assert.Equal(t, 96, int(ct.MaxOut), "client stats should only report increased byte count")
			assert.Equal(t, 176, int(ct.MaxIn), "client stats should only report increased byte count")
			assert.Equal(t, 96, int(ct.TotalOut), "client stats should only report increased byte count")
			assert.Equal(t, 176, int(ct.TotalIn), "client stats should only report increased byte count")
		}

		if assert.NotNil(t, st) {
			assert.Equal(t, ct.TotalOut, st.TotalIn, "should report server stats with bytes in")
			assert.Equal(t, ct.TotalIn, st.TotalOut, "should report server stats with bytes out")
		}
	}
}
開發者ID:getlantern,項目名稱:measured,代碼行數:60,代碼來源:measured_test.go

示例2: LeaderReqFn

func LeaderReqFn(name, port string) ReqFn {
	events := make(chan *discoverd.Event)
	if _, err := discoverd.NewService(name).Watch(events); err != nil {
		log.Fatalf("error creating %s cache: %s", name, err)
	}
	var leader atomic.Value // addr string
	leader.Store("")
	go func() {
		for e := range events {
			if e.Kind != discoverd.EventKindLeader || e.Instance == nil {
				continue
			}
			leader.Store(e.Instance.Addr)
		}
	}()
	return func() (*http.Request, error) {
		addr := leader.Load().(string)
		if addr == "" {
			return nil, errors.New("no leader")
		}
		if port != "" {
			host, _, _ := net.SplitHostPort(addr)
			addr = net.JoinHostPort(host, port)
		}
		return http.NewRequest("GET", fmt.Sprintf("http://%s%s", addr, status.Path), nil)
	}
}
開發者ID:ably-forks,項目名稱:flynn,代碼行數:27,代碼來源:status.go

示例3: TailStagingLogs

func (cmd *Start) TailStagingLogs(app models.Application, stopChan chan bool, startWait, doneWait *sync.WaitGroup) {
	var connectionStatus atomic.Value
	connectionStatus.Store(NoConnection)

	onConnect := func() {
		if connectionStatus.Load() != StoppedTrying {
			connectionStatus.Store(ConnectionWasEstablished)
			startWait.Done()
		}
	}

	timer := time.NewTimer(cmd.LogServerConnectionTimeout)

	c := make(chan logs.Loggable)
	e := make(chan error)

	defer doneWait.Done()

	go cmd.logRepo.TailLogsFor(app.GUID, onConnect, c, e)

	for {
		select {
		case <-timer.C:
			if connectionStatus.Load() == NoConnection {
				connectionStatus.Store(StoppedTrying)
				cmd.ui.Warn("timeout connecting to log server, no log will be shown")
				startWait.Done()
				return
			}
		case msg, ok := <-c:
			if !ok {
				return
			} else if msg.GetSourceName() == LogMessageTypeStaging {
				cmd.ui.Say(msg.ToSimpleLog())
			}

		case err, ok := <-e:
			if ok {
				if connectionStatus.Load() != ConnectionWasClosed {
					cmd.ui.Warn(T("Warning: error tailing logs"))
					cmd.ui.Say("%s", err)
					if connectionStatus.Load() == NoConnection {
						startWait.Done()
					}
					return
				}
			}

		case <-stopChan:
			if connectionStatus.Load() == ConnectionWasEstablished {
				connectionStatus.Store(ConnectionWasClosed)
				cmd.logRepo.Close()
			} else {
				return
			}
		}
	}
}
開發者ID:Reejoshi,項目名稱:cli,代碼行數:58,代碼來源:start.go

示例4: TestStoreScanInconsistentResolvesIntents

// TestStoreScanInconsistentResolvesIntents lays down 10 intents,
// commits the txn without resolving intents, then does repeated
// inconsistent reads until the data shows up, showing that the
// inconsistent reads are triggering intent resolution.
func TestStoreScanInconsistentResolvesIntents(t *testing.T) {
	defer leaktest.AfterTest(t)
	// This test relies on having a committed Txn record and open intents on
	// the same Range. This only works with auto-gc turned off; alternatively
	// the test could move to splitting its underlying Range.
	defer withoutTxnAutoGC()()
	var intercept atomic.Value
	intercept.Store(true)
	TestingCommandFilter = func(args proto.Request) error {
		if _, ok := args.(*proto.ResolveIntentRequest); ok && intercept.Load().(bool) {
			return util.Errorf("error on purpose")
		}
		return nil
	}
	store, _, stopper := createTestStore(t)
	defer func() { TestingCommandFilter = nil }()
	defer stopper.Stop()

	// Lay down 10 intents to scan over.
	txn := newTransaction("test", proto.Key("foo"), 1, proto.SERIALIZABLE, store.ctx.Clock)
	keys := []proto.Key{}
	for j := 0; j < 10; j++ {
		key := proto.Key(fmt.Sprintf("key%02d", j))
		keys = append(keys, key)
		args := putArgs(key, []byte(fmt.Sprintf("value%02d", j)), 1, store.StoreID())
		args.Txn = txn
		if _, err := store.ExecuteCmd(context.Background(), &args); err != nil {
			t.Fatal(err)
		}
	}

	// Now, commit txn without resolving intents. If we hadn't disabled auto-gc
	// of Txn entries in this test, the Txn entry would be removed and later
	// attempts to resolve the intents would fail.
	etArgs := endTxnArgs(txn, true, 1, store.StoreID())
	etArgs.Timestamp = txn.Timestamp
	if _, err := store.ExecuteCmd(context.Background(), &etArgs); err != nil {
		t.Fatal(err)
	}

	intercept.Store(false) // allow async intent resolution

	// Scan the range repeatedly until we've verified count.
	sArgs := scanArgs(keys[0], keys[9].Next(), 1, store.StoreID())
	sArgs.ReadConsistency = proto.INCONSISTENT
	util.SucceedsWithin(t, time.Second, func() error {
		if reply, err := store.ExecuteCmd(context.Background(), &sArgs); err != nil {
			return err
		} else if sReply := reply.(*proto.ScanResponse); len(sReply.Rows) != 10 {
			return util.Errorf("could not read rows as expected")
		}
		return nil
	})
}
開發者ID:knorwood,項目名稱:cockroach,代碼行數:58,代碼來源:store_test.go

示例5: DesiredLRPs

func (db *ETCDDB) DesiredLRPs(logger lager.Logger, filter models.DesiredLRPFilter) (*models.DesiredLRPs, *models.Error) {
	root, bbsErr := db.fetchRecursiveRaw(logger, DesiredLRPSchemaRoot)
	if bbsErr.Equal(models.ErrResourceNotFound) {
		return &models.DesiredLRPs{}, nil
	}
	if bbsErr != nil {
		return nil, bbsErr
	}
	if root.Nodes.Len() == 0 {
		return &models.DesiredLRPs{}, nil
	}

	desiredLRPs := models.DesiredLRPs{}

	lrpsLock := sync.Mutex{}
	var workErr atomic.Value
	works := []func(){}

	for _, node := range root.Nodes {
		node := node

		works = append(works, func() {
			var lrp models.DesiredLRP
			deserializeErr := models.FromJSON([]byte(node.Value), &lrp)
			if deserializeErr != nil {
				logger.Error("failed-parsing-desired-lrp", deserializeErr)
				workErr.Store(fmt.Errorf("cannot parse lrp JSON for key %s: %s", node.Key, deserializeErr.Error()))
				return
			}

			if filter.Domain == "" || lrp.GetDomain() == filter.Domain {
				lrpsLock.Lock()
				desiredLRPs.DesiredLrps = append(desiredLRPs.DesiredLrps, &lrp)
				lrpsLock.Unlock()
			}
		})
	}

	throttler, err := workpool.NewThrottler(maxDesiredLRPGetterWorkPoolSize, works)
	if err != nil {
		logger.Error("failed-constructing-throttler", err, lager.Data{"max-workers": maxDesiredLRPGetterWorkPoolSize, "num-works": len(works)})
		return &models.DesiredLRPs{}, models.ErrUnknownError
	}

	logger.Debug("performing-deserialization-work")
	throttler.Work()
	if err, ok := workErr.Load().(error); ok {
		logger.Error("failed-performing-deserialization-work", err)
		return &models.DesiredLRPs{}, models.ErrUnknownError
	}
	logger.Debug("succeeded-performing-deserialization-work", lager.Data{"num-desired-lrps": len(desiredLRPs.GetDesiredLrps())})

	return &desiredLRPs, nil
}
開發者ID:Gerg,項目名稱:bbs,代碼行數:54,代碼來源:desired_lrp_db.go

示例6: TestLeaseNotUsedAfterRestart

// Test that leases held before a restart are not used after the restart.
// See replica.mu.minLeaseProposedTS for the reasons why this isn't allowed.
func TestLeaseNotUsedAfterRestart(t *testing.T) {
	defer leaktest.AfterTest(t)()
	sc := storage.TestStoreConfig(nil)
	var leaseAcquisitionTrap atomic.Value
	// Disable the split queue so that no ranges are split. This makes it easy
	// below to trap any lease request and infer that it refers to the range we're
	// interested in.
	sc.TestingKnobs.DisableSplitQueue = true
	sc.TestingKnobs.LeaseRequestEvent = func(ts hlc.Timestamp) {
		val := leaseAcquisitionTrap.Load()
		if val == nil {
			return
		}
		trapCallback := val.(func(ts hlc.Timestamp))
		if trapCallback != nil {
			trapCallback(ts)
		}
	}
	mtc := &multiTestContext{storeConfig: &sc}
	mtc.Start(t, 1)
	defer mtc.Stop()

	// Send a read, to acquire a lease.
	getArgs := getArgs([]byte("a"))
	if _, err := client.SendWrapped(context.Background(), rg1(mtc.stores[0]), &getArgs); err != nil {
		t.Fatal(err)
	}

	// Restart the mtc. Before we do that, we're installing a callback used to
	// assert that a new lease has been requested. The callback is installed
	// before the restart, as the lease might be requested at any time and for
	// many reasons by background processes, even before we send the read below.
	leaseAcquisitionCh := make(chan error)
	var once sync.Once
	leaseAcquisitionTrap.Store(func(_ hlc.Timestamp) {
		once.Do(func() {
			close(leaseAcquisitionCh)
		})
	})
	mtc.restart()

	// Send another read and check that the pre-existing lease has not been used.
	// Concretely, we check that a new lease is requested.
	if _, err := client.SendWrapped(context.Background(), rg1(mtc.stores[0]), &getArgs); err != nil {
		t.Fatal(err)
	}
	// Check that the Send above triggered a lease acquisition.
	select {
	case <-leaseAcquisitionCh:
	case <-time.After(time.Second):
		t.Fatalf("read did not acquire a new lease")
	}
}
開發者ID:bdarnell,項目名稱:cockroach,代碼行數:55,代碼來源:client_replica_test.go

示例7: ActualLRPGroups

func (db *ETCDDB) ActualLRPGroups(logger lager.Logger, filter models.ActualLRPFilter) ([]*models.ActualLRPGroup, error) {
	node, err := db.fetchRecursiveRaw(logger, ActualLRPSchemaRoot)
	bbsErr := models.ConvertError(err)
	if bbsErr != nil {
		if bbsErr.Type == models.Error_ResourceNotFound {
			return []*models.ActualLRPGroup{}, nil
		}
		return nil, err
	}
	if len(node.Nodes) == 0 {
		return []*models.ActualLRPGroup{}, nil
	}

	groups := []*models.ActualLRPGroup{}

	var workErr atomic.Value
	groupChan := make(chan []*models.ActualLRPGroup, len(node.Nodes))
	wg := sync.WaitGroup{}

	logger.Debug("performing-deserialization-work")
	for _, node := range node.Nodes {
		node := node

		wg.Add(1)
		go func() {
			defer wg.Done()
			g, err := db.parseActualLRPGroups(logger, node, filter)
			if err != nil {
				workErr.Store(err)
				return
			}
			groupChan <- g
		}()
	}

	go func() {
		wg.Wait()
		close(groupChan)
	}()

	for g := range groupChan {
		groups = append(groups, g...)
	}

	if err, ok := workErr.Load().(error); ok {
		logger.Error("failed-performing-deserialization-work", err)
		return []*models.ActualLRPGroup{}, models.ErrUnknownError
	}
	logger.Debug("succeeded-performing-deserialization-work", lager.Data{"num_actual_lrp_groups": len(groups)})

	return groups, nil
}
開發者ID:timani,項目名稱:bbs,代碼行數:52,代碼來源:actual_lrp_db.go

示例8: ActualLRPGroups

func (db *ETCDDB) ActualLRPGroups(logger lager.Logger, filter models.ActualLRPFilter) (*models.ActualLRPGroups, *models.Error) {
	node, bbsErr := db.fetchRecursiveRaw(logger, ActualLRPSchemaRoot)
	if bbsErr.Equal(models.ErrResourceNotFound) {
		return &models.ActualLRPGroups{}, nil
	}
	if bbsErr != nil {
		return nil, bbsErr
	}
	if node.Nodes.Len() == 0 {
		return &models.ActualLRPGroups{}, nil
	}

	groups := &models.ActualLRPGroups{}

	groupsLock := sync.Mutex{}
	var workErr atomic.Value
	works := []func(){}

	for _, node := range node.Nodes {
		node := node

		works = append(works, func() {
			g, err := parseActualLRPGroups(logger, node, filter)
			if err != nil {
				workErr.Store(err)
				return
			}
			groupsLock.Lock()
			groups.ActualLrpGroups = append(groups.ActualLrpGroups, g.ActualLrpGroups...)
			groupsLock.Unlock()
		})
	}

	throttler, err := workpool.NewThrottler(maxActualGroupGetterWorkPoolSize, works)
	if err != nil {
		logger.Error("failed-constructing-throttler", err, lager.Data{"max-workers": maxActualGroupGetterWorkPoolSize, "num-works": len(works)})
		return &models.ActualLRPGroups{}, models.ErrUnknownError
	}

	logger.Debug("performing-deserialization-work")
	throttler.Work()
	if err, ok := workErr.Load().(error); ok {
		logger.Error("failed-performing-deserialization-work", err)
		return &models.ActualLRPGroups{}, models.ErrUnknownError
	}
	logger.Debug("succeeded-performing-deserialization-work", lager.Data{"num-actual-lrp-groups": len(groups.ActualLrpGroups)})

	return groups, nil
}
開發者ID:Gerg,項目名稱:bbs,代碼行數:49,代碼來源:actual_lrp_db.go

示例9: processDir

func (tagger *Tagger) processDir(src, dst string) error {
	utils.Log(utils.INFO, "Start processing directory '%v'", src)

	allFiles := getAllFiles(src)
	tagger.counter.setTotal(len(allFiles))
	utils.Log(utils.INFO, "Found %v files", len(allFiles))

	var result atomic.Value
	var index int32 = -1
	var wg sync.WaitGroup

	for i := 0; i < numberOfThreads; i++ {
		wg.Add(1)
		go func() {
			defer wg.Done()
			for {
				if tagger.stop.Load().(bool) {
					utils.Log(utils.WARNING, "Processing directory '%v' interrupted by application stop", src)
					return
				}

				i := atomic.AddInt32(&index, 1)
				if i >= int32(len(allFiles)) {
					return
				}

				fmt.Printf("\rProcessing %v/%v", i, len(allFiles))
				destination, err := tagger.getDestinationPath(allFiles[i])
				if err != nil {
					result.Store(err)
					utils.Log(utils.ERROR, "Failed to get destination path: %v", err)
					continue
				}
				if err := tagger.processFile(allFiles[i], destination); err != nil {
					utils.Log(utils.ERROR, "Failed to process file '%v': %v", allFiles[i], err)
					result.Store(err)
				}
			}
		}()
	}
	wg.Wait()
	fmt.Printf("\r                        \r")

	if result.Load() != nil {
		return result.Load().(error)
	}
	return nil
}
開發者ID:mzinin,項目名稱:tagger,代碼行數:48,代碼來源:tagger.go

示例10: clientHandler

func clientHandler(c *Client) {
	defer c.stopWg.Done()

	var conn io.ReadWriteCloser
	var err error
	var stopping atomic.Value

	for {
		dialChan := make(chan struct{})
		go func() {
			if conn, err = c.Dial(c.Addr); err != nil {
				if stopping.Load() == nil {
					c.LogError("gorpc.Client: [%s]. Cannot establish rpc connection: [%s]", c.Addr, err)
				}
			}
			close(dialChan)
		}()

		select {
		case <-c.clientStopChan:
			stopping.Store(true)
			<-dialChan
			return
		case <-dialChan:
			c.Stats.incDialCalls()
		}

		if err != nil {
			c.Stats.incDialErrors()
			select {
			case <-c.clientStopChan:
				return
			case <-time.After(time.Second):
			}
			continue
		}

		clientHandleConnection(c, conn)

		select {
		case <-c.clientStopChan:
			return
		default:
		}
	}
}
開發者ID:BobbWu,項目名稱:gorpc,代碼行數:46,代碼來源:client.go

示例11: intercept

func (f *LanternProFilter) intercept(key []byte, atomicClient atomic.Value, w http.ResponseWriter, req *http.Request) {
	var err error
	if req.Method == "CONNECT" {
		var clientConn net.Conn
		var connOut net.Conn

		utils.RespondOK(w, req)
		if clientConn, _, err = w.(http.Hijacker).Hijack(); err != nil {
			utils.RespondBadGateway(w, req, fmt.Sprintf("Unable to hijack connection: %s", err))
			return
		}
		connOut, err = net.Dial("tcp", req.Host)
		// Pipe data through CONNECT tunnel
		closeConns := func() {
			if clientConn != nil {
				if err := clientConn.Close(); err != nil {
					fmt.Printf("Error closing the out connection: %s", err)
				}
			}
			if connOut != nil {
				if err := connOut.Close(); err != nil {
					fmt.Printf("Error closing the client connection: %s", err)
				}
			}
		}
		var closeOnce sync.Once
		go func() {
			n, _ := io.Copy(connOut, clientConn)
			client := atomicClient.Load().(*Client)
			atomic.AddInt64(&client.TransferIn, n)
			closeOnce.Do(closeConns)

		}()
		n, _ := io.Copy(clientConn, connOut)

		client := atomicClient.Load().(*Client)
		atomic.AddInt64(&client.TransferOut, n)

		closeOnce.Do(closeConns)
		fmt.Println("== CONNECT DONE ==")
	} else {
		f.next.ServeHTTP(w, req)
		// TODO: byte counting in this case
	}
}
開發者ID:Nuos,項目名稱:chained-server,代碼行數:45,代碼來源:lanternpro.go

示例12: serverHandler

func serverHandler(s *Server, workersCh chan struct{}) {
	defer s.stopWg.Done()

	var conn io.ReadWriteCloser
	var clientAddr string
	var err error
	var stopping atomic.Value

	for {
		acceptChan := make(chan struct{})
		go func() {
			if conn, clientAddr, err = s.Listener.Accept(); err != nil {
				if stopping.Load() == nil {
					s.LogError("gorpc.Server: [%s]. Cannot accept new connection: [%s]", s.Addr, err)
				}
			}
			close(acceptChan)
		}()

		select {
		case <-s.serverStopChan:
			stopping.Store(true)
			s.Listener.Close()
			<-acceptChan
			return
		case <-acceptChan:
			s.Stats.incAcceptCalls()
		}

		if err != nil {
			s.Stats.incAcceptErrors()
			select {
			case <-s.serverStopChan:
				return
			case <-time.After(time.Second):
			}
			continue
		}

		s.stopWg.Add(1)
		go serverHandleConnection(s, conn, clientAddr, workersCh)
	}
}
開發者ID:running-fish,項目名稱:gorpc,代碼行數:43,代碼來源:server.go

示例13: Start

func Start(cfg *config.Config, version string) func() {
	var addr atomic.Value
	go func() {
		ip := geolookup.GetIP(maxWaitForIP)
		if ip == "" {
			log.Errorf("No IP found within %v, not starting analytics session", maxWaitForIP)
			return
		}
		addr.Store(ip)
		log.Debugf("Starting analytics session with ip %v", ip)
		startSession(ip, version, client.Addr, cfg.Client.DeviceID)
	}()

	stop := func() {
		if addr.Load() != nil {
			ip := addr.Load().(string)
			log.Debugf("Ending analytics session with ip %v", ip)
			endSession(ip, version, client.Addr, cfg.Client.DeviceID)
		}
	}
	return stop
}
開發者ID:2722,項目名稱:lantern,代碼行數:22,代碼來源:analytics.go

示例14: doWithError

// doWithError spawns workers with index 0 to n-1, limiting their numbers by max.
// Similar to do but with error handling.
// The first error encountered aborts all processing and is then returned.
func doWithError(n int, worker func(int) error, max int) error {
	var (
		errv atomic.Value // worker error
		wg   sync.WaitGroup
	)

	if n <= max {
		// spawn as many goroutines as number of workers
		wg.Add(n)
		for i := 0; i < n; i++ {
			go func(idx int) {
				if errv.Load() == nil {
					if err := worker(idx); err != nil {
						errv.Store(err)
					}
				}
				wg.Done()
			}(i)
		}
		wg.Wait()

		if err := errv.Load(); err != nil {
			return err.(error)
		}
		return nil
	}

	// spawn the maximum number of goroutines
	wg.Add(max)
	for i := 0; i < max; i++ {
		go func(idx int) {
			for ; idx < n && errv.Load() == nil; idx += max {
				if err := worker(idx); err != nil {
					errv.Store(err)
					break
				}
			}
			wg.Done()
		}(i)
	}
	wg.Wait()

	if err := errv.Load(); err != nil {
		return err.(error)
	}
	return nil
}
開發者ID:pierrec,項目名稱:go-work,代碼行數:50,代碼來源:work.go

示例15: natsOptions

func natsOptions(logger lager.Logger, c *config.Config, natsHost *atomic.Value, startMsg chan<- struct{}) nats.Options {
	natsServers := c.NatsServers()

	options := nats.DefaultOptions
	options.Servers = natsServers
	options.PingInterval = c.NatsClientPingInterval
	options.ClosedCB = func(conn *nats.Conn) {
		logger.Fatal("nats-connection-closed", errors.New("unexpected close"), lager.Data{"last_error": conn.LastError()})
	}

	options.DisconnectedCB = func(conn *nats.Conn) {
		hostStr := natsHost.Load().(string)
		logger.Info("nats-connection-disconnected", lager.Data{"nats-host": hostStr})
	}

	options.ReconnectedCB = func(conn *nats.Conn) {
		natsURL, err := url.Parse(conn.ConnectedUrl())
		natsHostStr := ""
		if err != nil {
			logger.Error("nats-url-parse-error", err)
		} else {
			natsHostStr = natsURL.Host
		}
		natsHost.Store(natsHostStr)

		data := lager.Data{"nats-host": natsHostStr}
		logger.Info("nats-connection-reconnected", data)
		startMsg <- struct{}{}
	}

	// in the case of suspending pruning, we need to ensure we retry reconnects indefinitely
	if c.SuspendPruningIfNatsUnavailable {
		options.MaxReconnect = -1
	}

	return options
}
開發者ID:cloudfoundry,項目名稱:gorouter,代碼行數:37,代碼來源:main.go


注:本文中的sync/atomic.Value.Load方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。