本文整理匯總了Golang中sync/atomic.Value類的典型用法代碼示例。如果您正苦於以下問題:Golang Value類的具體用法?Golang Value怎麽用?Golang Value使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
在下文中一共展示了Value類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: connectToNatsServer
func connectToNatsServer(logger lager.Logger, c *config.Config, startMsg chan<- struct{}) *nats.Conn {
var natsClient *nats.Conn
var natsHost atomic.Value
var err error
options := natsOptions(logger, c, &natsHost, startMsg)
attempts := 3
for attempts > 0 {
natsClient, err = options.Connect()
if err == nil {
break
} else {
attempts--
time.Sleep(100 * time.Millisecond)
}
}
if err != nil {
logger.Fatal("nats-connection-error", err)
}
var natsHostStr string
natsUrl, err := url.Parse(natsClient.ConnectedUrl())
if err == nil {
natsHostStr = natsUrl.Host
}
logger.Info("Successfully-connected-to-nats", lager.Data{"host": natsHostStr})
natsHost.Store(natsHostStr)
return natsClient
}
示例2: TestReportStats
func TestReportStats(t *testing.T) {
md, nr := startWithMockReporter()
defer md.Stop()
var remoteAddr atomic.Value
// start server with byte counting
l, err := net.Listen("tcp", "127.0.0.1:0")
if !assert.NoError(t, err, "Listen should not fail") {
return
}
// large enough interval so it will only report stats in Close()
ml := md.Listener(l, 10*time.Second)
s := http.Server{
Handler: http.NotFoundHandler(),
ConnState: func(c net.Conn, s http.ConnState) {
if s == http.StateClosed {
remoteAddr.Store(c.RemoteAddr().String())
}
},
}
go func() { _ = s.Serve(ml) }()
time.Sleep(100 * time.Millisecond)
// start client with byte counting
c := http.Client{
Transport: &http.Transport{
// carefully chosen interval to report another once before Close()
Dial: md.Dialer(net.Dial, 160*time.Millisecond),
},
}
req, _ := http.NewRequest("GET", "http://"+l.Addr().String(), nil)
resp, _ := c.Do(req)
assert.Equal(t, 404, resp.StatusCode)
// Close without reading from body, to force server to close connection
_ = resp.Body.Close()
time.Sleep(100 * time.Millisecond)
nr.Lock()
defer nr.Unlock()
t.Logf("Traffic entries: %+v", nr.traffic)
if assert.Equal(t, 2, len(nr.traffic)) {
ct := nr.traffic[l.Addr().String()]
st := nr.traffic[remoteAddr.Load().(string)]
if assert.NotNil(t, ct) {
assert.Equal(t, 0, int(ct.MinOut), "client stats should only report increased byte count")
assert.Equal(t, 0, int(ct.MinIn), "client stats should only report increased byte count")
assert.Equal(t, 96, int(ct.MaxOut), "client stats should only report increased byte count")
assert.Equal(t, 176, int(ct.MaxIn), "client stats should only report increased byte count")
assert.Equal(t, 96, int(ct.TotalOut), "client stats should only report increased byte count")
assert.Equal(t, 176, int(ct.TotalIn), "client stats should only report increased byte count")
}
if assert.NotNil(t, st) {
assert.Equal(t, ct.TotalOut, st.TotalIn, "should report server stats with bytes in")
assert.Equal(t, ct.TotalIn, st.TotalOut, "should report server stats with bytes out")
}
}
}
示例3: NewLocalResolver
func NewLocalResolver() Resolver {
var cache atomic.Value
cache.Store(make(cacheMap))
return &localCocaineResolver{
cache: cache,
}
}
示例4: ServeHTTP
func (f *LanternProFilter) ServeHTTP(w http.ResponseWriter, req *http.Request) {
lanternUID := req.Header.Get("X-Lantern-UID")
lanternProToken := req.Header.Get("X-Lantern-Pro-Token")
// If a Pro token is found in the header, test if its valid and then let
// the request pass
if lanternProToken != "" {
if f.proTokens.Exists(lanternProToken) {
f.next.ServeHTTP(w, req)
} else {
w.WriteHeader(http.StatusBadGateway)
}
return
}
// A UID must be provided always by the client
if lanternUID == "" {
w.WriteHeader(http.StatusNotFound)
return
}
// If this point is reached, handle the request as a non-Pro user
var client *Client
key := []byte(lanternUID)
// Try first in the cache
// TODO: Actually, leave optimizations for later
/*
if client, ok := clientCache.Get(key); ok {
client.(*Client).LastAccess = time.Now()
// TODO: numbytes
clientRegistry.Insert(key, *(client.(*Client)))
return
} else {
clientCache.Set(key, *client)
}
*/
if val, ok := f.clientRegistry.Lookup(key); ok {
client = val.(*Client)
//client.LastAccess = time.Now()
//f.clientRegistry.Insert(key, client)
} else {
client = &Client{
Created: time.Now(),
LastAccess: time.Now(),
TransferIn: 0,
TransferOut: 0,
}
f.clientRegistry.Insert(key, client)
}
var atomicClient atomic.Value
atomicClient.Store(client)
f.intercept(key, atomicClient, w, req)
//clientCache.Add(key, client)
}
示例5: DesiredLRPs
func (db *ETCDDB) DesiredLRPs(logger lager.Logger, filter models.DesiredLRPFilter) (*models.DesiredLRPs, *models.Error) {
root, bbsErr := db.fetchRecursiveRaw(logger, DesiredLRPSchemaRoot)
if bbsErr.Equal(models.ErrResourceNotFound) {
return &models.DesiredLRPs{}, nil
}
if bbsErr != nil {
return nil, bbsErr
}
if root.Nodes.Len() == 0 {
return &models.DesiredLRPs{}, nil
}
desiredLRPs := models.DesiredLRPs{}
lrpsLock := sync.Mutex{}
var workErr atomic.Value
works := []func(){}
for _, node := range root.Nodes {
node := node
works = append(works, func() {
var lrp models.DesiredLRP
deserializeErr := models.FromJSON([]byte(node.Value), &lrp)
if deserializeErr != nil {
logger.Error("failed-parsing-desired-lrp", deserializeErr)
workErr.Store(fmt.Errorf("cannot parse lrp JSON for key %s: %s", node.Key, deserializeErr.Error()))
return
}
if filter.Domain == "" || lrp.GetDomain() == filter.Domain {
lrpsLock.Lock()
desiredLRPs.DesiredLrps = append(desiredLRPs.DesiredLrps, &lrp)
lrpsLock.Unlock()
}
})
}
throttler, err := workpool.NewThrottler(maxDesiredLRPGetterWorkPoolSize, works)
if err != nil {
logger.Error("failed-constructing-throttler", err, lager.Data{"max-workers": maxDesiredLRPGetterWorkPoolSize, "num-works": len(works)})
return &models.DesiredLRPs{}, models.ErrUnknownError
}
logger.Debug("performing-deserialization-work")
throttler.Work()
if err, ok := workErr.Load().(error); ok {
logger.Error("failed-performing-deserialization-work", err)
return &models.DesiredLRPs{}, models.ErrUnknownError
}
logger.Debug("succeeded-performing-deserialization-work", lager.Data{"num-desired-lrps": len(desiredLRPs.GetDesiredLrps())})
return &desiredLRPs, nil
}
示例6: TestLeaseNotUsedAfterRestart
// Test that leases held before a restart are not used after the restart.
// See replica.mu.minLeaseProposedTS for the reasons why this isn't allowed.
func TestLeaseNotUsedAfterRestart(t *testing.T) {
defer leaktest.AfterTest(t)()
sc := storage.TestStoreConfig(nil)
var leaseAcquisitionTrap atomic.Value
// Disable the split queue so that no ranges are split. This makes it easy
// below to trap any lease request and infer that it refers to the range we're
// interested in.
sc.TestingKnobs.DisableSplitQueue = true
sc.TestingKnobs.LeaseRequestEvent = func(ts hlc.Timestamp) {
val := leaseAcquisitionTrap.Load()
if val == nil {
return
}
trapCallback := val.(func(ts hlc.Timestamp))
if trapCallback != nil {
trapCallback(ts)
}
}
mtc := &multiTestContext{storeConfig: &sc}
mtc.Start(t, 1)
defer mtc.Stop()
// Send a read, to acquire a lease.
getArgs := getArgs([]byte("a"))
if _, err := client.SendWrapped(context.Background(), rg1(mtc.stores[0]), &getArgs); err != nil {
t.Fatal(err)
}
// Restart the mtc. Before we do that, we're installing a callback used to
// assert that a new lease has been requested. The callback is installed
// before the restart, as the lease might be requested at any time and for
// many reasons by background processes, even before we send the read below.
leaseAcquisitionCh := make(chan error)
var once sync.Once
leaseAcquisitionTrap.Store(func(_ hlc.Timestamp) {
once.Do(func() {
close(leaseAcquisitionCh)
})
})
mtc.restart()
// Send another read and check that the pre-existing lease has not been used.
// Concretely, we check that a new lease is requested.
if _, err := client.SendWrapped(context.Background(), rg1(mtc.stores[0]), &getArgs); err != nil {
t.Fatal(err)
}
// Check that the Send above triggered a lease acquisition.
select {
case <-leaseAcquisitionCh:
case <-time.After(time.Second):
t.Fatalf("read did not acquire a new lease")
}
}
示例7: ActualLRPGroups
func (db *ETCDDB) ActualLRPGroups(logger lager.Logger, filter models.ActualLRPFilter) ([]*models.ActualLRPGroup, error) {
node, err := db.fetchRecursiveRaw(logger, ActualLRPSchemaRoot)
bbsErr := models.ConvertError(err)
if bbsErr != nil {
if bbsErr.Type == models.Error_ResourceNotFound {
return []*models.ActualLRPGroup{}, nil
}
return nil, err
}
if len(node.Nodes) == 0 {
return []*models.ActualLRPGroup{}, nil
}
groups := []*models.ActualLRPGroup{}
var workErr atomic.Value
groupChan := make(chan []*models.ActualLRPGroup, len(node.Nodes))
wg := sync.WaitGroup{}
logger.Debug("performing-deserialization-work")
for _, node := range node.Nodes {
node := node
wg.Add(1)
go func() {
defer wg.Done()
g, err := db.parseActualLRPGroups(logger, node, filter)
if err != nil {
workErr.Store(err)
return
}
groupChan <- g
}()
}
go func() {
wg.Wait()
close(groupChan)
}()
for g := range groupChan {
groups = append(groups, g...)
}
if err, ok := workErr.Load().(error); ok {
logger.Error("failed-performing-deserialization-work", err)
return []*models.ActualLRPGroup{}, models.ErrUnknownError
}
logger.Debug("succeeded-performing-deserialization-work", lager.Data{"num_actual_lrp_groups": len(groups)})
return groups, nil
}
示例8: ActualLRPGroups
func (db *ETCDDB) ActualLRPGroups(logger lager.Logger, filter models.ActualLRPFilter) (*models.ActualLRPGroups, *models.Error) {
node, bbsErr := db.fetchRecursiveRaw(logger, ActualLRPSchemaRoot)
if bbsErr.Equal(models.ErrResourceNotFound) {
return &models.ActualLRPGroups{}, nil
}
if bbsErr != nil {
return nil, bbsErr
}
if node.Nodes.Len() == 0 {
return &models.ActualLRPGroups{}, nil
}
groups := &models.ActualLRPGroups{}
groupsLock := sync.Mutex{}
var workErr atomic.Value
works := []func(){}
for _, node := range node.Nodes {
node := node
works = append(works, func() {
g, err := parseActualLRPGroups(logger, node, filter)
if err != nil {
workErr.Store(err)
return
}
groupsLock.Lock()
groups.ActualLrpGroups = append(groups.ActualLrpGroups, g.ActualLrpGroups...)
groupsLock.Unlock()
})
}
throttler, err := workpool.NewThrottler(maxActualGroupGetterWorkPoolSize, works)
if err != nil {
logger.Error("failed-constructing-throttler", err, lager.Data{"max-workers": maxActualGroupGetterWorkPoolSize, "num-works": len(works)})
return &models.ActualLRPGroups{}, models.ErrUnknownError
}
logger.Debug("performing-deserialization-work")
throttler.Work()
if err, ok := workErr.Load().(error); ok {
logger.Error("failed-performing-deserialization-work", err)
return &models.ActualLRPGroups{}, models.ErrUnknownError
}
logger.Debug("succeeded-performing-deserialization-work", lager.Data{"num-actual-lrp-groups": len(groups.ActualLrpGroups)})
return groups, nil
}
示例9: updateQueryCacheTask
func (a *HotActor) updateQueryCacheTask(from, to time.Time, btyp proto.TopicListRequest_BucketType, ret *atomic.Value, wg *utils.WaitGroup) {
var (
results []FetchResult
err error
)
defer func() {
wg.Done(err)
}()
if results, err = a.fetcher(btyp, from, to, a.locale); err != nil {
return
}
//store new fetch results to current top 10000
next := NewHotBucket(DEFAULT_HOTBUCKET_SIZE).addResults(results).sort(btyp).truncate(ALLTIME_SUMMERY_LIMIT).markUpdate(to)
ret.Store(next)
}
示例10: clientHandler
func clientHandler(c *Client) {
defer c.stopWg.Done()
var conn io.ReadWriteCloser
var err error
var stopping atomic.Value
for {
dialChan := make(chan struct{})
go func() {
if conn, err = c.Dial(c.Addr); err != nil {
if stopping.Load() == nil {
c.LogError("gorpc.Client: [%s]. Cannot establish rpc connection: [%s]", c.Addr, err)
}
}
close(dialChan)
}()
select {
case <-c.clientStopChan:
stopping.Store(true)
<-dialChan
return
case <-dialChan:
c.Stats.incDialCalls()
}
if err != nil {
c.Stats.incDialErrors()
select {
case <-c.clientStopChan:
return
case <-time.After(time.Second):
}
continue
}
clientHandleConnection(c, conn)
select {
case <-c.clientStopChan:
return
default:
}
}
}
示例11: LeaderReqFn
func LeaderReqFn(name, port string) ReqFn {
events := make(chan *discoverd.Event)
if _, err := discoverd.NewService(name).Watch(events); err != nil {
log.Fatalf("error creating %s cache: %s", name, err)
}
var leader atomic.Value // addr string
leader.Store("")
go func() {
for e := range events {
if e.Kind != discoverd.EventKindLeader || e.Instance == nil {
continue
}
leader.Store(e.Instance.Addr)
}
}()
return func() (*http.Request, error) {
addr := leader.Load().(string)
if addr == "" {
return nil, errors.New("no leader")
}
if port != "" {
host, _, _ := net.SplitHostPort(addr)
addr = net.JoinHostPort(host, port)
}
return http.NewRequest("GET", fmt.Sprintf("http://%s%s", addr, status.Path), nil)
}
}
示例12: intercept
func (f *LanternProFilter) intercept(key []byte, atomicClient atomic.Value, w http.ResponseWriter, req *http.Request) {
var err error
if req.Method == "CONNECT" {
var clientConn net.Conn
var connOut net.Conn
utils.RespondOK(w, req)
if clientConn, _, err = w.(http.Hijacker).Hijack(); err != nil {
utils.RespondBadGateway(w, req, fmt.Sprintf("Unable to hijack connection: %s", err))
return
}
connOut, err = net.Dial("tcp", req.Host)
// Pipe data through CONNECT tunnel
closeConns := func() {
if clientConn != nil {
if err := clientConn.Close(); err != nil {
fmt.Printf("Error closing the out connection: %s", err)
}
}
if connOut != nil {
if err := connOut.Close(); err != nil {
fmt.Printf("Error closing the client connection: %s", err)
}
}
}
var closeOnce sync.Once
go func() {
n, _ := io.Copy(connOut, clientConn)
client := atomicClient.Load().(*Client)
atomic.AddInt64(&client.TransferIn, n)
closeOnce.Do(closeConns)
}()
n, _ := io.Copy(clientConn, connOut)
client := atomicClient.Load().(*Client)
atomic.AddInt64(&client.TransferOut, n)
closeOnce.Do(closeConns)
fmt.Println("== CONNECT DONE ==")
} else {
f.next.ServeHTTP(w, req)
// TODO: byte counting in this case
}
}
示例13: processDir
func (tagger *Tagger) processDir(src, dst string) error {
utils.Log(utils.INFO, "Start processing directory '%v'", src)
allFiles := getAllFiles(src)
tagger.counter.setTotal(len(allFiles))
utils.Log(utils.INFO, "Found %v files", len(allFiles))
var result atomic.Value
var index int32 = -1
var wg sync.WaitGroup
for i := 0; i < numberOfThreads; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for {
if tagger.stop.Load().(bool) {
utils.Log(utils.WARNING, "Processing directory '%v' interrupted by application stop", src)
return
}
i := atomic.AddInt32(&index, 1)
if i >= int32(len(allFiles)) {
return
}
fmt.Printf("\rProcessing %v/%v", i, len(allFiles))
destination, err := tagger.getDestinationPath(allFiles[i])
if err != nil {
result.Store(err)
utils.Log(utils.ERROR, "Failed to get destination path: %v", err)
continue
}
if err := tagger.processFile(allFiles[i], destination); err != nil {
utils.Log(utils.ERROR, "Failed to process file '%v': %v", allFiles[i], err)
result.Store(err)
}
}
}()
}
wg.Wait()
fmt.Printf("\r \r")
if result.Load() != nil {
return result.Load().(error)
}
return nil
}
示例14: serverHandler
func serverHandler(s *Server, workersCh chan struct{}) {
defer s.stopWg.Done()
var conn io.ReadWriteCloser
var clientAddr string
var err error
var stopping atomic.Value
for {
acceptChan := make(chan struct{})
go func() {
if conn, clientAddr, err = s.Listener.Accept(); err != nil {
if stopping.Load() == nil {
s.LogError("gorpc.Server: [%s]. Cannot accept new connection: [%s]", s.Addr, err)
}
}
close(acceptChan)
}()
select {
case <-s.serverStopChan:
stopping.Store(true)
s.Listener.Close()
<-acceptChan
return
case <-acceptChan:
s.Stats.incAcceptCalls()
}
if err != nil {
s.Stats.incAcceptErrors()
select {
case <-s.serverStopChan:
return
case <-time.After(time.Second):
}
continue
}
s.stopWg.Add(1)
go serverHandleConnection(s, conn, clientAddr, workersCh)
}
}
示例15: TestStoreScanInconsistentResolvesIntents
// TestStoreScanInconsistentResolvesIntents lays down 10 intents,
// commits the txn without resolving intents, then does repeated
// inconsistent reads until the data shows up, showing that the
// inconsistent reads are triggering intent resolution.
func TestStoreScanInconsistentResolvesIntents(t *testing.T) {
defer leaktest.AfterTest(t)
// This test relies on having a committed Txn record and open intents on
// the same Range. This only works with auto-gc turned off; alternatively
// the test could move to splitting its underlying Range.
defer withoutTxnAutoGC()()
var intercept atomic.Value
intercept.Store(true)
TestingCommandFilter = func(args proto.Request) error {
if _, ok := args.(*proto.ResolveIntentRequest); ok && intercept.Load().(bool) {
return util.Errorf("error on purpose")
}
return nil
}
store, _, stopper := createTestStore(t)
defer func() { TestingCommandFilter = nil }()
defer stopper.Stop()
// Lay down 10 intents to scan over.
txn := newTransaction("test", proto.Key("foo"), 1, proto.SERIALIZABLE, store.ctx.Clock)
keys := []proto.Key{}
for j := 0; j < 10; j++ {
key := proto.Key(fmt.Sprintf("key%02d", j))
keys = append(keys, key)
args := putArgs(key, []byte(fmt.Sprintf("value%02d", j)), 1, store.StoreID())
args.Txn = txn
if _, err := store.ExecuteCmd(context.Background(), &args); err != nil {
t.Fatal(err)
}
}
// Now, commit txn without resolving intents. If we hadn't disabled auto-gc
// of Txn entries in this test, the Txn entry would be removed and later
// attempts to resolve the intents would fail.
etArgs := endTxnArgs(txn, true, 1, store.StoreID())
etArgs.Timestamp = txn.Timestamp
if _, err := store.ExecuteCmd(context.Background(), &etArgs); err != nil {
t.Fatal(err)
}
intercept.Store(false) // allow async intent resolution
// Scan the range repeatedly until we've verified count.
sArgs := scanArgs(keys[0], keys[9].Next(), 1, store.StoreID())
sArgs.ReadConsistency = proto.INCONSISTENT
util.SucceedsWithin(t, time.Second, func() error {
if reply, err := store.ExecuteCmd(context.Background(), &sArgs); err != nil {
return err
} else if sReply := reply.(*proto.ScanResponse); len(sReply.Rows) != 10 {
return util.Errorf("could not read rows as expected")
}
return nil
})
}