本文整理匯總了Golang中sync/atomic.StoreUint32函數的典型用法代碼示例。如果您正苦於以下問題:Golang StoreUint32函數的具體用法?Golang StoreUint32怎麽用?Golang StoreUint32使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了StoreUint32函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: SetSync
// SetSync controls synchronous event mode. When set to true, a function call
// to generate an event does not return until the event has been processed.
func (logger *Logger) SetSync(enabled bool) {
if enabled {
atomic.StoreUint32(&logger.syncEnabled, 1)
} else {
atomic.StoreUint32(&logger.syncEnabled, 0)
}
}
示例2: setBinaryOption
func (c *conf) setBinaryOption(opt *uint32, value bool) {
if value {
atomic.StoreUint32(opt, 1)
return
}
atomic.StoreUint32(opt, 0)
}
示例3: Wait
// Wait waits for the queue to finish processing all transfers. Once Wait is
// called, Add will no longer add transferables to the queue. Any failed
// transfers will be automatically retried once.
func (q *TransferQueue) Wait() {
if q.batcher != nil {
q.batcher.Exit()
}
q.wait.Wait()
// Handle any retries
close(q.retriesc)
q.retrywait.Wait()
atomic.StoreUint32(&q.retrying, 1)
if len(q.retries) > 0 && q.batcher != nil {
tracerx.Printf("tq: retrying %d failed transfers", len(q.retries))
for _, t := range q.retries {
q.Add(t)
}
q.batcher.Exit()
q.wait.Wait()
}
atomic.StoreUint32(&q.retrying, 0)
close(q.apic)
close(q.transferc)
close(q.errorc)
for _, watcher := range q.watchers {
close(watcher)
}
q.meter.Finish()
q.errorwait.Wait()
}
示例4: Set
func (b *Bool) Set(v bool) {
if v {
atomic.StoreUint32(&b.v, 1)
return
}
atomic.StoreUint32(&b.v, 0)
}
示例5: testThrottling
func testThrottling(t *testing.T, protocol int) {
// Create a long block chain to download and the tester
targetBlocks := 8 * blockCacheLimit
hashes, blocks := makeChain(targetBlocks, 0, genesis)
tester := newTester()
tester.newPeer("peer", protocol, hashes, blocks)
// Wrap the importer to allow stepping
blocked, proceed := uint32(0), make(chan struct{})
tester.downloader.chainInsertHook = func(blocks []*Block) {
atomic.StoreUint32(&blocked, uint32(len(blocks)))
<-proceed
}
// Start a synchronisation concurrently
errc := make(chan error)
go func() {
errc <- tester.sync("peer", nil)
}()
// Iteratively take some blocks, always checking the retrieval count
for {
// Check the retrieval count synchronously (! reason for this ugly block)
tester.lock.RLock()
retrieved := len(tester.ownBlocks)
tester.lock.RUnlock()
if retrieved >= targetBlocks+1 {
break
}
// Wait a bit for sync to throttle itself
var cached int
for start := time.Now(); time.Since(start) < time.Second; {
time.Sleep(25 * time.Millisecond)
tester.downloader.queue.lock.RLock()
cached = len(tester.downloader.queue.blockPool)
tester.downloader.queue.lock.RUnlock()
if cached == blockCacheLimit || len(tester.ownBlocks)+cached+int(atomic.LoadUint32(&blocked)) == targetBlocks+1 {
break
}
}
// Make sure we filled up the cache, then exhaust it
time.Sleep(25 * time.Millisecond) // give it a chance to screw up
if cached != blockCacheLimit && len(tester.ownBlocks)+cached+int(atomic.LoadUint32(&blocked)) != targetBlocks+1 {
t.Fatalf("block count mismatch: have %v, want %v (owned %v, target %v)", cached, blockCacheLimit, len(tester.ownBlocks), targetBlocks+1)
}
// Permit the blocked blocks to import
if atomic.LoadUint32(&blocked) > 0 {
atomic.StoreUint32(&blocked, uint32(0))
proceed <- struct{}{}
}
}
// Check that we haven't pulled more blocks than available
if len(tester.ownBlocks) > targetBlocks+1 {
t.Fatalf("target block count mismatch: have %v, want %v", len(tester.ownBlocks), targetBlocks+1)
}
if err := <-errc; err != nil {
t.Fatalf("block synchronization failed: %v", err)
}
}
示例6: SetAllHeadersDone
func SetAllHeadersDone(res bool) {
if res {
atomic.StoreUint32(&allheadersdone, 1)
} else {
atomic.StoreUint32(&allheadersdone, 0)
}
}
示例7: Run
func (j *Job) Run() {
// If the job panics, just print a stack trace.
// Don't let the whole process die.
defer func() {
if err := recover(); err != nil {
if revelError := revel.NewErrorFromPanic(err); revelError != nil {
revel.ERROR.Print(err, "\n", revelError.Stack)
} else {
revel.ERROR.Print(err, "\n", string(debug.Stack()))
}
}
}()
if !selfConcurrent {
j.running.Lock()
defer j.running.Unlock()
}
if workPermits != nil {
workPermits <- struct{}{}
defer func() { <-workPermits }()
}
atomic.StoreUint32(&j.status, 1)
defer atomic.StoreUint32(&j.status, 0)
j.inner.Run()
}
示例8: SetMode
// Switches between startup (fast) and maintenance (slow) sampling speeds.
func (b *Bootstrapper) SetMode(startup bool) {
if startup {
atomic.StoreUint32(&b.phase, uint32(0))
} else {
atomic.StoreUint32(&b.phase, uint32(1))
}
}
示例9: EnableDebugLogs
// EnableDebugLogs has log messages directed to standard output if enable is true.
func EnableDebugLogs(enable bool) {
if enable {
atomic.StoreUint32(&enableDebugLogs, 1)
} else {
atomic.StoreUint32(&enableDebugLogs, 0)
}
}
示例10: Call
// Call invokes the (constant) contract method with params as input values and
// sets the output to result. The result type might be a single field for simple
// returns, a slice of interfaces for anonymous returns and a struct for named
// returns.
func (c *BoundContract) Call(opts *CallOpts, result interface{}, method string, params ...interface{}) error {
// Don't crash on a lazy user
if opts == nil {
opts = new(CallOpts)
}
// Make sure we have a contract to operate on, and bail out otherwise
if (opts.Pending && atomic.LoadUint32(&c.pendingHasCode) == 0) || (!opts.Pending && atomic.LoadUint32(&c.latestHasCode) == 0) {
if code, err := c.caller.HasCode(c.address, opts.Pending); err != nil {
return err
} else if !code {
return ErrNoCode
}
if opts.Pending {
atomic.StoreUint32(&c.pendingHasCode, 1)
} else {
atomic.StoreUint32(&c.latestHasCode, 1)
}
}
// Pack the input, call and unpack the results
input, err := c.abi.Pack(method, params...)
if err != nil {
return err
}
output, err := c.caller.ContractCall(c.address, input, opts.Pending)
if err != nil {
return err
}
return c.abi.Unpack(result, method, output)
}
示例11: Run
func (j *Job) Run() {
start := time.Now()
// If the job panics, just print a stack trace.
// Don't let the whole process die.
defer func() {
if err := recover(); err != nil {
var buf bytes.Buffer
logger := log.New(&buf, "JobRunner Log: ", log.Lshortfile)
logger.Panic(err, "\n", string(debug.Stack()))
}
}()
if !selfConcurrent {
j.running.Lock()
defer j.running.Unlock()
}
if workPermits != nil {
workPermits <- struct{}{}
defer func() { <-workPermits }()
}
atomic.StoreUint32(&j.status, 1)
j.StatusUpdate()
defer j.StatusUpdate()
defer atomic.StoreUint32(&j.status, 0)
j.inner.Run()
end := time.Now()
j.Latency = end.Sub(start).String()
}
示例12: synchronise
// synchronise tries to sync up our local block chain with a remote peer.
func (pm *ProtocolManager) synchronise(peer *peer) {
// Short circuit if no peers are available
if peer == nil {
return
}
// Make sure the peer's TD is higher than our own
currentBlock := pm.blockchain.CurrentBlock()
td := pm.blockchain.GetTd(currentBlock.Hash())
pHead, pTd := peer.Head()
if pTd.Cmp(td) <= 0 {
return
}
// Otherwise try to sync with the downloader
mode := downloader.FullSync
if atomic.LoadUint32(&pm.fastSync) == 1 {
mode = downloader.FastSync
}
if err := pm.downloader.Synchronise(peer.id, pHead, pTd, mode); err != nil {
return
}
atomic.StoreUint32(&pm.synced, 1) // Mark initial sync done
// If fast sync was enabled, and we synced up, disable it
if atomic.LoadUint32(&pm.fastSync) == 1 {
// Disable fast sync if we indeed have something in our chain
if pm.blockchain.CurrentBlock().NumberU64() > 0 {
glog.V(logger.Info).Infof("fast sync complete, auto disabling")
atomic.StoreUint32(&pm.fastSync, 0)
}
}
}
示例13: updateNow
func updateNow() {
t := time.Now()
dt := uint32(t.Year()%100*10000 + int(t.Month())*100 + t.Day())
tm := uint32(t.Hour()*10000 + t.Minute()*100 + t.Second())
atomic.StoreUint32(&lastDate, dt)
atomic.StoreUint32(&lastTime, tm)
lastDateTimeStr = fmt.Sprintf("%04d %06d", dt%10000, tm)
}
示例14: Toggle
// Toggle enables or disables logging.
func (el *ErrLog) Toggle(toggle bool) {
el32 := unsafe.Pointer(el)
if toggle {
atomic.StoreUint32((*uint32)(el32), 1)
} else {
atomic.StoreUint32((*uint32)(el32), 0)
}
}
示例15: Get
// Get returns buffer with length of n.
func (p *BufferPool) Get(n int) []byte {
atomic.AddUint32(&p.get, 1)
if poolNum := p.poolNum(n); poolNum == 0 {
// Fast path.
if b, ok := p.pool[0].Get().([]byte); ok {
switch {
case cap(b) > n:
atomic.AddUint32(&p.less, 1)
return b[:n]
case cap(b) == n:
atomic.AddUint32(&p.equal, 1)
return b[:n]
default:
panic("not reached")
}
} else {
atomic.AddUint32(&p.miss, 1)
}
return make([]byte, n, p.baseline0)
} else {
sizePtr := &p.size[poolNum-1]
if b, ok := p.pool[poolNum].Get().([]byte); ok {
switch {
case cap(b) > n:
atomic.AddUint32(&p.less, 1)
return b[:n]
case cap(b) == n:
atomic.AddUint32(&p.equal, 1)
return b[:n]
default:
atomic.AddUint32(&p.greater, 1)
if uint32(cap(b)) >= atomic.LoadUint32(sizePtr) {
p.pool[poolNum].Put(b)
}
}
} else {
atomic.AddUint32(&p.miss, 1)
}
if size := atomic.LoadUint32(sizePtr); uint32(n) > size {
if size == 0 {
atomic.CompareAndSwapUint32(sizePtr, 0, uint32(n))
} else {
sizeMissPtr := &p.sizeMiss[poolNum-1]
if atomic.AddUint32(sizeMissPtr, 1) == 20 {
atomic.StoreUint32(sizePtr, uint32(n))
atomic.StoreUint32(sizeMissPtr, 0)
}
}
return make([]byte, n)
} else {
return make([]byte, n, size)
}
}
}