本文整理匯總了Golang中sync/atomic.AddInt32函數的典型用法代碼示例。如果您正苦於以下問題:Golang AddInt32函數的具體用法?Golang AddInt32怎麽用?Golang AddInt32使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了AddInt32函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: Upload
// Upload is called to perform the upload.
func (u *mockUploadDescriptor) Upload(ctx context.Context, progressOutput progress.Output) (distribution.Descriptor, error) {
if u.currentUploads != nil {
defer atomic.AddInt32(u.currentUploads, -1)
if atomic.AddInt32(u.currentUploads, 1) > maxUploadConcurrency {
return distribution.Descriptor{}, errors.New("concurrency limit exceeded")
}
}
// Sleep a bit to simulate a time-consuming upload.
for i := int64(0); i <= 10; i++ {
select {
case <-ctx.Done():
return distribution.Descriptor{}, ctx.Err()
case <-time.After(10 * time.Millisecond):
progressOutput.WriteProgress(progress.Progress{ID: u.ID(), Current: i, Total: 10})
}
}
if u.simulateRetries != 0 {
u.simulateRetries--
return distribution.Descriptor{}, errors.New("simulating retry")
}
return distribution.Descriptor{}, nil
}
示例2: inBulkSet
// inBulkSet actually processes incoming bulk-set messages; there may be more
// than one of these workers.
func (store *defaultGroupStore) inBulkSet(wg *sync.WaitGroup) {
for {
bsm := <-store.bulkSetState.inMsgChan
if bsm == nil {
break
}
body := bsm.body
var err error
ring := store.msgRing.Ring()
var rightwardPartitionShift uint64
var bsam *groupBulkSetAckMsg
var ptimestampbits uint64
if ring != nil {
rightwardPartitionShift = 64 - uint64(ring.PartitionBitCount())
// Only ack if there is someone to ack to.
if bsm.nodeID() != 0 {
bsam = store.newOutBulkSetAckMsg()
}
}
for len(body) > _GROUP_BULK_SET_MSG_ENTRY_HEADER_LENGTH {
keyA := binary.BigEndian.Uint64(body)
keyB := binary.BigEndian.Uint64(body[8:])
childKeyA := binary.BigEndian.Uint64(body[16:])
childKeyB := binary.BigEndian.Uint64(body[24:])
timestampbits := binary.BigEndian.Uint64(body[32:])
l := binary.BigEndian.Uint32(body[40:])
atomic.AddInt32(&store.inBulkSetWrites, 1)
/*
// REMOVEME logging when we get zero-length values.
if l == 0 && timestampbits&_TSB_DELETION == 0 {
fmt.Printf("REMOVEME inbulkset got a zero-length value, %x %x %x %x %x\n", keyA, keyB, childKeyA, childKeyB, timestampbits)
}
*/
// Attempt to store everything received...
// Note that deletions are acted upon as internal requests (work
// even if writes are disabled due to disk fullness) and new data
// writes are not.
ptimestampbits, err = store.write(keyA, keyB, childKeyA, childKeyB, timestampbits, body[_GROUP_BULK_SET_MSG_ENTRY_HEADER_LENGTH:_GROUP_BULK_SET_MSG_ENTRY_HEADER_LENGTH+l], timestampbits&_TSB_DELETION != 0)
if err != nil {
atomic.AddInt32(&store.inBulkSetWriteErrors, 1)
} else if ptimestampbits >= timestampbits {
atomic.AddInt32(&store.inBulkSetWritesOverridden, 1)
}
// But only ack on success, there is someone to ack to, and the
// local node is responsible for the data.
if err == nil && bsam != nil && ring != nil && ring.Responsible(uint32(keyA>>rightwardPartitionShift)) {
bsam.add(keyA, keyB, childKeyA, childKeyB, timestampbits)
}
body = body[_GROUP_BULK_SET_MSG_ENTRY_HEADER_LENGTH+l:]
}
if bsam != nil {
atomic.AddInt32(&store.outBulkSetAcks, 1)
store.msgRing.MsgToNode(bsam, bsm.nodeID(), store.bulkSetState.inResponseMsgTimeout)
}
store.bulkSetState.inFreeMsgChan <- bsm
}
wg.Done()
}
示例3: sendCommandAsync
func (w *Producer) sendCommandAsync(cmd *Command, doneChan chan *ProducerTransaction,
args []interface{}) error {
// keep track of how many outstanding producers we're dealing with
// in order to later ensure that we clean them all up...
atomic.AddInt32(&w.concurrentProducers, 1)
defer atomic.AddInt32(&w.concurrentProducers, -1)
if atomic.LoadInt32(&w.state) != StateConnected {
err := w.connect()
if err != nil {
return err
}
}
t := &ProducerTransaction{
cmd: cmd,
doneChan: doneChan,
Args: args,
}
select {
case w.transactionChan <- t:
case <-w.exitChan:
return ErrStopped
}
return nil
}
示例4: TestMock_Ticker_Multi
// Ensure that multiple tickers can be used together.
func TestMock_Ticker_Multi(t *testing.T) {
var n int32
clock := NewMock()
go func() {
a := clock.Ticker(1 * time.Microsecond)
b := clock.Ticker(3 * time.Microsecond)
for {
select {
case <-a.C:
atomic.AddInt32(&n, 1)
case <-b.C:
atomic.AddInt32(&n, 100)
}
}
}()
gosched()
// Move clock forward.
clock.Add(10 * time.Microsecond)
gosched()
if atomic.LoadInt32(&n) != 310 {
t.Fatalf("unexpected: %d", n)
}
}
示例5: attrHelper
func (c *ConcurrentSolver) attrHelper(G *graphs.Graph, removed []bool, tmpMap []int32, flags []uint32, ch chan int, i int, node int, wg *sync.WaitGroup) {
for _, v0 := range G.Nodes[node].Inc {
if !removed[v0] {
flag := G.Nodes[v0].Player == i
if atomic.CompareAndSwapUint32(&flags[v0], 0, 1) {
if flag {
ch <- v0
atomic.AddInt32(&tmpMap[v0], 1)
} else {
adj_counter := 0
for _, x := range G.Nodes[v0].Adj {
if !removed[x] {
adj_counter += 1
}
}
atomic.AddInt32(&tmpMap[v0], int32(adj_counter))
if adj_counter == 1 {
ch <- v0
}
}
} else if !flag {
if atomic.AddInt32(&tmpMap[v0], -1) == 1 {
ch <- v0
}
}
}
}
wg.Done()
}
示例6: inBulkSetAck
// inBulkSetAck actually processes incoming bulk-set-ack messages; there may be
// more than one of these workers.
func (store *DefaultGroupStore) inBulkSetAck(wg *sync.WaitGroup) {
for {
bsam := <-store.bulkSetAckState.inMsgChan
if bsam == nil {
break
}
ring := store.msgRing.Ring()
var rightwardPartitionShift uint64
if ring != nil {
rightwardPartitionShift = 64 - uint64(ring.PartitionBitCount())
}
b := bsam.body
// div mul just ensures any trailing bytes are dropped
l := len(b) / _GROUP_BULK_SET_ACK_MSG_ENTRY_LENGTH * _GROUP_BULK_SET_ACK_MSG_ENTRY_LENGTH
for o := 0; o < l; o += _GROUP_BULK_SET_ACK_MSG_ENTRY_LENGTH {
keyA := binary.BigEndian.Uint64(b[o:])
if ring != nil && !ring.Responsible(uint32(keyA>>rightwardPartitionShift)) {
atomic.AddInt32(&store.inBulkSetAckWrites, 1)
timestampbits := binary.BigEndian.Uint64(b[o+32:]) | _TSB_LOCAL_REMOVAL
ptimestampbits, err := store.write(keyA, binary.BigEndian.Uint64(b[o+8:]), binary.BigEndian.Uint64(b[o+16:]), binary.BigEndian.Uint64(b[o+24:]), timestampbits, nil, true)
if err != nil {
atomic.AddInt32(&store.inBulkSetAckWriteErrors, 1)
} else if ptimestampbits >= timestampbits {
atomic.AddInt32(&store.inBulkSetAckWritesOverridden, 1)
}
}
}
store.bulkSetAckState.inFreeMsgChan <- bsam
}
wg.Done()
}
示例7: AddWindow
func (g *Group) AddWindow(w Window) (err error) {
if atomic.LoadInt32(&g.count) >= 10 {
return ErrTooMany
}
if g.ids.exists(w.Id()) {
return ErrNotUnique
}
g.ids.add(w.Id())
atomic.AddInt32(&g.count, 1)
g.Add(1)
go func() {
for ch := range w.Childs() {
if err := g.AddWindow(ch); err != nil {
w.SetError(err)
}
}
}()
go func() {
<-w.Show()
g.ids.remove(w.Id())
atomic.AddInt32(&g.count, -1)
g.Done()
}()
return
}
示例8: compactionWorker
func (store *defaultValueStore) compactionWorker(jobChan chan *valueCompactionJob, controlChan chan struct{}, wg *sync.WaitGroup) {
for c := range jobChan {
select {
case <-controlChan:
break
default:
}
total, err := valueTOCStat(path.Join(store.pathtoc, c.nametoc), store.stat, store.openReadSeeker)
if err != nil {
store.logError("compaction: unable to stat %s because: %v", path.Join(store.pathtoc, c.nametoc), err)
continue
}
// TODO: This 1000 should be in the Config.
// If total is less than 100, it'll automatically get compacted.
if total < 1000 {
atomic.AddInt32(&store.smallFileCompactions, 1)
} else {
toCheck := uint32(total)
// If there are more than a million entries, we'll just check the
// first million and extrapolate.
if toCheck > 1000000 {
toCheck = 1000000
}
if !store.needsCompaction(c.nametoc, c.candidateBlockID, total, toCheck) {
continue
}
atomic.AddInt32(&store.compactions, 1)
}
store.compactFile(c.nametoc, c.candidateBlockID, controlChan, "compactionWorker")
}
wg.Done()
}
示例9: TestRun
func TestRun(t *testing.T) {
i := int32(0)
errs := Run(
func() error {
atomic.AddInt32(&i, 1)
return nil
},
func() error {
atomic.AddInt32(&i, 5)
return nil
},
)
if len(errs) != 0 || i != 6 {
t.Error("unexpected run")
}
testErr := fmt.Errorf("an error")
i = int32(0)
errs = Run(
func() error {
return testErr
},
func() error {
atomic.AddInt32(&i, 5)
return nil
},
)
if len(errs) != 1 && errs[0] != testErr && i != 5 {
t.Error("unexpected run")
}
}
示例10: msgToAddr
func (t *TCPMsgRing) msgToAddr(msg Msg, addr string, timeout time.Duration) {
atomic.AddInt32(&t.msgToAddrs, 1)
msgChan, created := t.msgChanForAddr(addr)
if created {
go t.connection(addr, nil, msgChan, true)
}
timer := time.NewTimer(timeout)
select {
case <-t.controlChan:
atomic.AddInt32(&t.msgToAddrShutdownDrops, 1)
timer.Stop()
msg.Free()
case msgChan <- msg:
atomic.AddInt32(&t.msgToAddrQueues, 1)
timer.Stop()
case <-timer.C:
atomic.AddInt32(&t.msgToAddrTimeoutDrops, 1)
msg.Free()
}
// TODO: Uncertain the code block above is better than that below.
// Seems reasonable to Stop a timer if it won't be used; but
// perhaps that's more expensive than just letting it fire.
// select {
// case <-t.controlChan:
// msg.Free()
// case msgChan <- msg:
// case <-time.After(timeout):
// msg.Free()
// }
}
示例11: MsgToOtherReplicas
// MsgToNode queues the message for delivery to all other replicas of a
// partition; the timeout should be considered for queueing, not for actual
// delivery.
//
// If the ring is not bound to a specific node (LocalNode() returns nil) then
// the delivery attempts will be to all replicas.
//
// When the msg has actually been sent or has been discarded due to delivery
// errors or delays, msg.Free() will be called.
func (t *TCPMsgRing) MsgToOtherReplicas(msg Msg, partition uint32, timeout time.Duration) {
atomic.AddInt32(&t.msgToOtherReplicas, 1)
ring := t.Ring()
if ring == nil {
atomic.AddInt32(&t.msgToOtherReplicasNoRings, 1)
msg.Free()
return
}
nodes := ring.ResponsibleNodes(partition)
mmsg := &multiMsg{msg: msg, freerChan: make(chan struct{}, len(nodes))}
toAddrChan := make(chan struct{}, len(nodes))
toAddr := func(addr string) {
t.msgToAddr(mmsg, addr, timeout)
toAddrChan <- struct{}{}
}
var localID uint64
if localNode := ring.LocalNode(); localNode != nil {
localID = localNode.ID()
}
toAddrs := 0
for _, node := range nodes {
if node.ID() != localID {
go toAddr(node.Address(t.addressIndex))
toAddrs++
}
}
if toAddrs == 0 {
msg.Free()
return
}
for i := 0; i < toAddrs; i++ {
<-toAddrChan
}
go mmsg.freer(toAddrs)
}
示例12: inBulkSetAck
// inBulkSetAck actually processes incoming bulk-set-ack messages; there may be
// more than one of these workers.
func (vs *DefaultValueStore) inBulkSetAck(doneChan chan struct{}) {
for {
bsam := <-vs.bulkSetAckState.inMsgChan
if bsam == nil {
break
}
ring := vs.msgRing.Ring()
var rightwardPartitionShift uint64
if ring != nil {
rightwardPartitionShift = 64 - uint64(ring.PartitionBitCount())
}
b := bsam.body
// div mul just ensures any trailing bytes are dropped
l := len(b) / _BULK_SET_ACK_MSG_ENTRY_LENGTH * _BULK_SET_ACK_MSG_ENTRY_LENGTH
for o := 0; o < l; o += _BULK_SET_ACK_MSG_ENTRY_LENGTH {
keyA := binary.BigEndian.Uint64(b[o:])
if ring != nil && !ring.Responsible(uint32(keyA>>rightwardPartitionShift)) {
atomic.AddInt32(&vs.inBulkSetAckWrites, 1)
timestampbits := binary.BigEndian.Uint64(b[o+16:]) | _TSB_LOCAL_REMOVAL
rtimestampbits, err := vs.write(keyA, binary.BigEndian.Uint64(b[o+8:]), timestampbits, nil)
if err != nil {
atomic.AddInt32(&vs.inBulkSetAckWriteErrors, 1)
} else if rtimestampbits != timestampbits {
atomic.AddInt32(&vs.inBulkSetAckWritesOverridden, 1)
}
}
}
vs.bulkSetAckState.inFreeMsgChan <- bsam
}
doneChan <- struct{}{}
}
示例13: Start
func (v *basePE) Start() {
v.mutex.Lock()
defer v.mutex.Unlock()
if v.started {
// already started
return
}
cs := make(chan bool, v.parallel)
for i := 0; i < v.parallel; i++ {
go func(vv *basePE) {
vv.wg.Add(1)
atomic.AddInt32(&vv.running, 1)
cs <- true
defer func() {
atomic.AddInt32(&vv.running, -1)
vv.wg.Done()
}()
for vf := range vv.queue {
vf()
vv.completed++
}
}(v)
}
for i := 0; i < v.parallel; i++ {
<-cs
}
v.started = true
}
示例14: decRef
func (r *IndexReaderImpl) decRef() error {
// only check refcount here (don't call ensureOpen()), so we can
// still close the reader if it was made invalid by a child:
if r.refCount <= 0 {
return errors.New("this IndexReader is closed")
}
rc := atomic.AddInt32(&r.refCount, -1)
if rc == 0 {
success := false
defer func() {
if !success {
// Put reference back on failure
atomic.AddInt32(&r.refCount, 1)
}
}()
r.doClose()
success = true
r.reportCloseToParentReaders()
r.notifyReaderClosedListeners()
} else if rc < 0 {
panic(fmt.Sprintf("too many decRef calls: refCount is %v after decrement", rc))
}
return nil
}
示例15: renderRows
func renderRows(wg *sync.WaitGroup, s32 int32) {
var y int32
for y = atomic.AddInt32(&yAt, 1); y < s32; y = atomic.AddInt32(&yAt, 1) {
fields[y] = renderRow(&y)
}
wg.Done()
}