本文整理汇总了Golang中runtime/internal/atomic.Load函数的典型用法代码示例。如果您正苦于以下问题:Golang Load函数的具体用法?Golang Load怎么用?Golang Load使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了Load函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: semrelease
func semrelease(addr *uint32) {
root := semroot(addr)
atomic.Xadd(addr, 1)
// Easy case: no waiters?
// This check must happen after the xadd, to avoid a missed wakeup
// (see loop in semacquire).
if atomic.Load(&root.nwait) == 0 {
return
}
// Harder case: search for a waiter and wake it.
lock(&root.lock)
if atomic.Load(&root.nwait) == 0 {
// The count is already consumed by another goroutine,
// so no need to wake up another goroutine.
unlock(&root.lock)
return
}
s := root.head
for ; s != nil; s = s.next {
if s.elem == unsafe.Pointer(addr) {
atomic.Xadd(&root.nwait, -1)
root.dequeue(s)
break
}
}
unlock(&root.lock)
if s != nil {
if s.releasetime != 0 {
s.releasetime = cputicks()
}
goready(s.g, 4)
}
}
示例2: notifyListNotifyAll
// notifyListNotifyAll notifies all entries in the list.
//go:linkname notifyListNotifyAll sync.runtime_notifyListNotifyAll
func notifyListNotifyAll(l *notifyList) {
// Fast-path: if there are no new waiters since the last notification
// we don't need to acquire the lock.
if atomic.Load(&l.wait) == atomic.Load(&l.notify) {
return
}
// Pull the list out into a local variable, waiters will be readied
// outside the lock.
lock(&l.lock)
s := l.head
l.head = nil
l.tail = nil
// Update the next ticket to be notified. We can set it to the current
// value of wait because any previous waiters are already in the list
// or will notice that they have already been notified when trying to
// add themselves to the list.
atomic.Store(&l.notify, atomic.Load(&l.wait))
unlock(&l.lock)
// Go through the local list and ready all waiters.
for s != nil {
next := s.next
s.next = nil
readyWithTime(s, 4)
s = next
}
}
示例3: notetsleep_internal
// May run with m.p==nil if called from notetsleep, so write barriers
// are not allowed.
//
//go:nosplit
//go:nowritebarrier
func notetsleep_internal(n *note, ns int64) bool {
gp := getg()
if ns < 0 {
for atomic.Load(key32(&n.key)) == 0 {
gp.m.blocked = true
futexsleep(key32(&n.key), 0, -1)
gp.m.blocked = false
}
return true
}
if atomic.Load(key32(&n.key)) != 0 {
return true
}
deadline := nanotime() + ns
for {
gp.m.blocked = true
futexsleep(key32(&n.key), 0, ns)
gp.m.blocked = false
if atomic.Load(key32(&n.key)) != 0 {
break
}
now := nanotime()
if now >= deadline {
break
}
ns = deadline - now
}
return atomic.Load(key32(&n.key)) != 0
}
示例4: RunSchedLocalQueueEmptyTest
func RunSchedLocalQueueEmptyTest(iters int) {
// Test that runq is not spuriously reported as empty.
// Runq emptiness affects scheduling decisions and spurious emptiness
// can lead to underutilization (both runnable Gs and idle Ps coexist
// for arbitrary long time).
done := make(chan bool, 1)
p := new(p)
gs := make([]g, 2)
ready := new(uint32)
for i := 0; i < iters; i++ {
*ready = 0
next0 := (i & 1) == 0
next1 := (i & 2) == 0
runqput(p, &gs[0], next0)
go func() {
for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
}
if runqempty(p) {
println("next:", next0, next1)
throw("queue is empty")
}
done <- true
}()
for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
}
runqput(p, &gs[1], next1)
runqget(p)
<-done
runqget(p)
}
}
示例5: signal_recv
// Called to receive the next queued signal.
// Must only be called from a single goroutine at a time.
//go:linkname signal_recv os/signal.signal_recv
func signal_recv() uint32 {
for {
// Serve any signals from local copy.
for i := uint32(0); i < _NSIG; i++ {
if sig.recv[i/32]&(1<<(i&31)) != 0 {
sig.recv[i/32] &^= 1 << (i & 31)
return i
}
}
// Wait for updates to be available from signal sender.
Receive:
for {
switch atomic.Load(&sig.state) {
default:
throw("signal_recv: inconsistent state")
case sigIdle:
if atomic.Cas(&sig.state, sigIdle, sigReceiving) {
notetsleepg(&sig.note, -1)
noteclear(&sig.note)
break Receive
}
case sigSending:
if atomic.Cas(&sig.state, sigSending, sigIdle) {
break Receive
}
}
}
// Incorporate updates from sender into local copy.
for i := range sig.mask {
sig.recv[i] = atomic.Xchg(&sig.mask[i], 0)
}
}
}
示例6: block
// block returns the spans in the i'th block of buffer b. block is
// safe to call concurrently with push.
func (b *gcSweepBuf) block(i int) []*mspan {
// Perform bounds check before loading spine address since
// push ensures the allocated length is at least spineLen.
if i < 0 || uintptr(i) >= atomic.Loaduintptr(&b.spineLen) {
throw("block index out of range")
}
// Get block i.
spine := atomic.Loadp(unsafe.Pointer(&b.spine))
blockp := add(spine, sys.PtrSize*uintptr(i))
block := (*gcSweepBlock)(atomic.Loadp(blockp))
// Slice the block if necessary.
cursor := uintptr(atomic.Load(&b.index))
top, bottom := cursor/gcSweepBlockEntries, cursor%gcSweepBlockEntries
var spans []*mspan
if uintptr(i) < top {
spans = block.spans[:]
} else {
spans = block.spans[:bottom]
}
// push may have reserved a slot but not filled it yet, so
// trim away unused entries.
for len(spans) > 0 && spans[len(spans)-1] == nil {
spans = spans[:len(spans)-1]
}
return spans
}
示例7: semasleep
//go:nosplit
func semasleep(ns int64) int32 {
_g_ := getg()
// Compute sleep deadline.
var tsp *timespec
if ns >= 0 {
var ts timespec
var nsec int32
ns += nanotime()
ts.set_sec(timediv(ns, 1000000000, &nsec))
ts.set_nsec(nsec)
tsp = &ts
}
for {
v := atomic.Load(&_g_.m.waitsemacount)
if v > 0 {
if atomic.Cas(&_g_.m.waitsemacount, v, v-1) {
return 0 // semaphore acquired
}
continue
}
// Sleep until unparked by semawakeup or timeout.
ret := lwp_park(tsp, 0, unsafe.Pointer(&_g_.m.waitsemacount), nil)
if ret == _ETIMEDOUT {
return -1
}
}
}
示例8: minit
// Called to initialize a new m (including the bootstrap m).
// Called on the new thread, cannot allocate memory.
func minit() {
if atomic.Load(&exiting) != 0 {
exits(&emptystatus[0])
}
// Mask all SSE floating-point exceptions
// when running on the 64-bit kernel.
setfpmasks()
}
示例9: semrelease
func semrelease(addr *uint32) {
root := semroot(addr)
atomic.Xadd(addr, 1)
// Easy case: no waiters?
// This check must happen after the xadd, to avoid a missed wakeup
// (see loop in semacquire).
if atomic.Load(&root.nwait) == 0 {
return
}
// Harder case: search for a waiter and wake it.
lock(&root.lock)
if atomic.Load(&root.nwait) == 0 {
// The count is already consumed by another goroutine,
// so no need to wake up another goroutine.
unlock(&root.lock)
return
}
s := root.head
for ; s != nil; s = s.next {
if s.elem == unsafe.Pointer(addr) {
atomic.Xadd(&root.nwait, -1)
root.dequeue(s)
break
}
}
if s != nil {
if s.acquiretime != 0 {
t0 := cputicks()
for x := root.head; x != nil; x = x.next {
if x.elem == unsafe.Pointer(addr) {
x.acquiretime = t0
}
}
mutexevent(t0-s.acquiretime, 3)
}
}
unlock(&root.lock)
if s != nil { // May be slow, so unlock first
readyWithTime(s, 5)
}
}
示例10: cansemacquire
func cansemacquire(addr *uint32) bool {
for {
v := atomic.Load(addr)
if v == 0 {
return false
}
if atomic.Cas(addr, v, v-1) {
return true
}
}
}
示例11: notesleep
func notesleep(n *note) {
gp := getg()
if gp != gp.m.g0 {
throw("notesleep not on g0")
}
for atomic.Load(key32(&n.key)) == 0 {
gp.m.blocked = true
futexsleep(key32(&n.key), 0, -1)
gp.m.blocked = false
}
}
示例12: gotraceback
// gotraceback returns the current traceback settings.
//
// If level is 0, suppress all tracebacks.
// If level is 1, show tracebacks, but exclude runtime frames.
// If level is 2, show tracebacks including runtime frames.
// If all is set, print all goroutine stacks. Otherwise, print just the current goroutine.
// If crash is set, crash (core dump, etc) after tracebacking.
//
//go:nosplit
func gotraceback() (level int32, all, crash bool) {
_g_ := getg()
all = _g_.m.throwing > 0
if _g_.m.traceback != 0 {
level = int32(_g_.m.traceback)
return
}
t := atomic.Load(&traceback_cache)
crash = t&tracebackCrash != 0
all = all || t&tracebackAll != 0
level = int32(t >> tracebackShift)
return
}
示例13: notifyListNotifyOne
// notifyListNotifyOne notifies one entry in the list.
//go:linkname notifyListNotifyOne sync.runtime_notifyListNotifyOne
func notifyListNotifyOne(l *notifyList) {
// Fast-path: if there are no new waiters since the last notification
// we don't need to acquire the lock at all.
if atomic.Load(&l.wait) == atomic.Load(&l.notify) {
return
}
lock(&l.lock)
// Re-check under the lock if we need to do anything.
t := l.notify
if t == atomic.Load(&l.wait) {
unlock(&l.lock)
return
}
// Update the next notify ticket number, and try to find the G that
// needs to be notified. If it hasn't made it to the list yet we won't
// find it, but it won't park itself once it sees the new notify number.
atomic.Store(&l.notify, t+1)
for p, s := (*sudog)(nil), l.head; s != nil; p, s = s, s.next {
if s.ticket == t {
n := s.next
if p != nil {
p.next = n
} else {
l.head = n
}
if n == nil {
l.tail = p
}
unlock(&l.lock)
s.next = nil
readyWithTime(s, 4)
return
}
}
unlock(&l.lock)
}
示例14: ensureSwept
// Returns only when span s has been swept.
//go:nowritebarrier
func (s *mspan) ensureSwept() {
// Caller must disable preemption.
// Otherwise when this function returns the span can become unswept again
// (if GC is triggered on another goroutine).
_g_ := getg()
if _g_.m.locks == 0 && _g_.m.mallocing == 0 && _g_ != _g_.m.g0 {
throw("MSpan_EnsureSwept: m is not locked")
}
sg := mheap_.sweepgen
if atomic.Load(&s.sweepgen) == sg {
return
}
// The caller must be sure that the span is a MSpanInUse span.
if atomic.Cas(&s.sweepgen, sg-2, sg-1) {
s.sweep(false)
return
}
// unfortunate condition, and we don't have efficient means to wait
for atomic.Load(&s.sweepgen) != sg {
osyield()
}
}
示例15: recordForPanic
// recordForPanic maintains a circular buffer of messages written by the
// runtime leading up to a process crash, allowing the messages to be
// extracted from a core dump.
//
// The text written during a process crash (following "panic" or "fatal
// error") is not saved, since the goroutine stacks will generally be readable
// from the runtime datastructures in the core file.
func recordForPanic(b []byte) {
printlock()
if atomic.Load(&panicking) == 0 {
// Not actively crashing: maintain circular buffer of print output.
for i := 0; i < len(b); {
n := copy(printBacklog[printBacklogIndex:], b[i:])
i += n
printBacklogIndex += n
printBacklogIndex %= len(printBacklog)
}
}
printunlock()
}