本文整理汇总了Golang中runtime/internal/atomic.Loadp函数的典型用法代码示例。如果您正苦于以下问题:Golang Loadp函数的具体用法?Golang Loadp怎么用?Golang Loadp使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了Loadp函数的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: block
// block returns the spans in the i'th block of buffer b. block is
// safe to call concurrently with push.
func (b *gcSweepBuf) block(i int) []*mspan {
// Perform bounds check before loading spine address since
// push ensures the allocated length is at least spineLen.
if i < 0 || uintptr(i) >= atomic.Loaduintptr(&b.spineLen) {
throw("block index out of range")
}
// Get block i.
spine := atomic.Loadp(unsafe.Pointer(&b.spine))
blockp := add(spine, sys.PtrSize*uintptr(i))
block := (*gcSweepBlock)(atomic.Loadp(blockp))
// Slice the block if necessary.
cursor := uintptr(atomic.Load(&b.index))
top, bottom := cursor/gcSweepBlockEntries, cursor%gcSweepBlockEntries
var spans []*mspan
if uintptr(i) < top {
spans = block.spans[:]
} else {
spans = block.spans[:bottom]
}
// push may have reserved a slot but not filled it yet, so
// trim away unused entries.
for len(spans) > 0 && spans[len(spans)-1] == nil {
spans = spans[:len(spans)-1]
}
return spans
}
示例2: mapaccess2
func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) {
if raceenabled && h != nil {
callerpc := getcallerpc(unsafe.Pointer(&t))
pc := funcPC(mapaccess2)
racereadpc(unsafe.Pointer(h), callerpc, pc)
raceReadObjectPC(t.key, key, callerpc, pc)
}
if msanenabled && h != nil {
msanread(key, t.key.size)
}
if h == nil || h.count == 0 {
return atomic.Loadp(unsafe.Pointer(&zeroptr)), false
}
if h.flags&hashWriting != 0 {
throw("concurrent map read and map write")
}
alg := t.key.alg
hash := alg.hash(key, uintptr(h.hash0))
m := uintptr(1)<<h.B - 1
b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
oldb := (*bmap)(unsafe.Pointer(uintptr(c) + (hash&(m>>1))*uintptr(t.bucketsize)))
if !evacuated(oldb) {
b = oldb
}
}
top := uint8(hash >> (sys.PtrSize*8 - 8))
if top < minTopHash {
top += minTopHash
}
for {
for i := uintptr(0); i < bucketCnt; i++ {
if b.tophash[i] != top {
continue
}
k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
if t.indirectkey {
k = *((*unsafe.Pointer)(k))
}
if alg.equal(key, k) {
v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
if t.indirectvalue {
v = *((*unsafe.Pointer)(v))
}
return v, true
}
}
b = b.overflow(t)
if b == nil {
return atomic.Loadp(unsafe.Pointer(&zeroptr)), false
}
}
}
示例3: NumCgoCall
// NumCgoCall returns the number of cgo calls made by the current process.
func NumCgoCall() int64 {
var n int64
for mp := (*m)(atomic.Loadp(unsafe.Pointer(&allm))); mp != nil; mp = mp.alllink {
n += int64(mp.ncgocall)
}
return n
}
示例4: activeModules
// activeModules returns a slice of active modules.
//
// A module is active once its gcdatamask and gcbssmask have been
// assembled and it is usable by the GC.
func activeModules() []*moduledata {
p := (*[]*moduledata)(atomic.Loadp(unsafe.Pointer(&modulesSlice)))
if p == nil {
return nil
}
return *p
}
示例5: mapaccess2_fast32
func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
if raceenabled && h != nil {
callerpc := getcallerpc(unsafe.Pointer(&t))
racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast32))
}
if h == nil || h.count == 0 {
return atomic.Loadp(unsafe.Pointer(&zeroptr)), false
}
if h.flags&hashWriting != 0 {
throw("concurrent map read and map write")
}
var b *bmap
if h.B == 0 {
// One-bucket table. No need to hash.
b = (*bmap)(h.buckets)
} else {
hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
m := uintptr(1)<<h.B - 1
b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize)))
if !evacuated(oldb) {
b = oldb
}
}
}
for {
for i := uintptr(0); i < bucketCnt; i++ {
k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4)))
if k != key {
continue
}
x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
if x == empty {
continue
}
return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize)), true
}
b = b.overflow(t)
if b == nil {
return atomic.Loadp(unsafe.Pointer(&zeroptr)), false
}
}
}
示例6: getitab
func getitab(inter *interfacetype, typ *_type, canfail bool) *itab {
if len(inter.mhdr) == 0 {
throw("internal error - misuse of itab")
}
// easy case
if typ.tflag&tflagUncommon == 0 {
if canfail {
return nil
}
name := inter.typ.nameOff(inter.mhdr[0].name)
panic(&TypeAssertionError{"", typ.string(), inter.typ.string(), name.name()})
}
h := itabhash(inter, typ)
// look twice - once without lock, once with.
// common case will be no lock contention.
var m *itab
var locked int
for locked = 0; locked < 2; locked++ {
if locked != 0 {
lock(&ifaceLock)
}
for m = (*itab)(atomic.Loadp(unsafe.Pointer(&hash[h]))); m != nil; m = m.link {
if m.inter == inter && m._type == typ {
if m.bad != 0 {
if !canfail {
// this can only happen if the conversion
// was already done once using the , ok form
// and we have a cached negative result.
// the cached result doesn't record which
// interface function was missing, so try
// adding the itab again, which will throw an error.
additab(m, locked != 0, false)
}
m = nil
}
if locked != 0 {
unlock(&ifaceLock)
}
return m
}
}
}
m = (*itab)(persistentalloc(unsafe.Sizeof(itab{})+uintptr(len(inter.mhdr)-1)*sys.PtrSize, 0, &memstats.other_sys))
m.inter = inter
m._type = typ
additab(m, true, canfail)
unlock(&ifaceLock)
if m.bad != 0 {
return nil
}
return m
}
示例7: goexitsall
func goexitsall(status *byte) {
var buf [_ERRMAX]byte
n := copy(buf[:], goexits)
n = copy(buf[n:], gostringnocopy(status))
pid := getpid()
for mp := (*m)(atomic.Loadp(unsafe.Pointer(&allm))); mp != nil; mp = mp.alllink {
if mp.procid != pid {
postnote(mp.procid, buf[:])
}
}
}
示例8: mapzero
// mapzero ensures that zeroptr points to a buffer large enough to
// serve as the zero value for t.
func mapzero(t *_type) {
// Is the type small enough for existing buffer?
cursize := uintptr(atomic.Loadp(unsafe.Pointer(&zerosize)))
if t.size <= cursize {
return
}
// Allocate a new buffer.
lock(&zerolock)
cursize = uintptr(atomic.Loadp(unsafe.Pointer(&zerosize)))
if cursize < t.size {
for cursize < t.size {
cursize *= 2
if cursize == 0 {
// need >2GB zero on 32-bit machine
throw("map element too large")
}
}
atomic.Storep1(unsafe.Pointer(&zeroptr), persistentalloc(cursize, 64, &memstats.other_sys))
atomic.Storep1(unsafe.Pointer(&zerosize), unsafe.Pointer(zerosize))
}
unlock(&zerolock)
}
示例9: ThreadCreateProfile
// ThreadCreateProfile returns n, the number of records in the thread creation profile.
// If len(p) >= n, ThreadCreateProfile copies the profile into p and returns n, true.
// If len(p) < n, ThreadCreateProfile does not change p and returns n, false.
//
// Most clients should use the runtime/pprof package instead
// of calling ThreadCreateProfile directly.
func ThreadCreateProfile(p []StackRecord) (n int, ok bool) {
first := (*m)(atomic.Loadp(unsafe.Pointer(&allm)))
for mp := first; mp != nil; mp = mp.alllink {
n++
}
if n <= len(p) {
ok = true
i := 0
for mp := first; mp != nil; mp = mp.alllink {
p[i].Stack0 = mp.createstack
i++
}
}
return
}
示例10: goexitsall
func goexitsall(status *byte) {
var buf [_ERRMAX]byte
if !atomic.Cas(&exiting, 0, 1) {
return
}
getg().m.locks++
n := copy(buf[:], goexits)
n = copy(buf[n:], gostringnocopy(status))
pid := getpid()
for mp := (*m)(atomic.Loadp(unsafe.Pointer(&allm))); mp != nil; mp = mp.alllink {
if mp.procid != 0 && mp.procid != pid {
postnote(mp.procid, buf[:])
}
}
getg().m.locks--
}
示例11: profileloop1
func profileloop1(param uintptr) uint32 {
stdcall2(_SetThreadPriority, currentThread, _THREAD_PRIORITY_HIGHEST)
for {
stdcall2(_WaitForSingleObject, profiletimer, _INFINITE)
first := (*m)(atomic.Loadp(unsafe.Pointer(&allm)))
for mp := first; mp != nil; mp = mp.alllink {
thread := atomic.Loaduintptr(&mp.thread)
// Do not profile threads blocked on Notes,
// this includes idle worker threads,
// idle timer thread, idle heap scavenger, etc.
if thread == 0 || mp.profilehz == 0 || mp.blocked {
continue
}
stdcall1(_SuspendThread, thread)
if mp.profilehz != 0 && !mp.blocked {
profilem(mp)
}
stdcall1(_ResumeThread, thread)
}
}
}
示例12: push
// push adds span s to buffer b. push is safe to call concurrently
// with other push operations, but NOT to call concurrently with pop.
func (b *gcSweepBuf) push(s *mspan) {
// Obtain our slot.
cursor := uintptr(atomic.Xadd(&b.index, +1) - 1)
top, bottom := cursor/gcSweepBlockEntries, cursor%gcSweepBlockEntries
// Do we need to add a block?
spineLen := atomic.Loaduintptr(&b.spineLen)
var block *gcSweepBlock
retry:
if top < spineLen {
spine := atomic.Loadp(unsafe.Pointer(&b.spine))
blockp := add(spine, sys.PtrSize*top)
block = (*gcSweepBlock)(atomic.Loadp(blockp))
} else {
// Add a new block to the spine, potentially growing
// the spine.
lock(&b.spineLock)
// spineLen cannot change until we release the lock,
// but may have changed while we were waiting.
spineLen = atomic.Loaduintptr(&b.spineLen)
if top < spineLen {
unlock(&b.spineLock)
goto retry
}
if spineLen == b.spineCap {
// Grow the spine.
newCap := b.spineCap * 2
if newCap == 0 {
newCap = gcSweepBufInitSpineCap
}
newSpine := persistentalloc(newCap*sys.PtrSize, sys.CacheLineSize, &memstats.gc_sys)
if b.spineCap != 0 {
// Blocks are allocated off-heap, so
// no write barriers.
memmove(newSpine, b.spine, b.spineCap*sys.PtrSize)
}
// Spine is allocated off-heap, so no write barrier.
atomic.StorepNoWB(unsafe.Pointer(&b.spine), newSpine)
b.spineCap = newCap
// We can't immediately free the old spine
// since a concurrent push with a lower index
// could still be reading from it. We let it
// leak because even a 1TB heap would waste
// less than 2MB of memory on old spines. If
// this is a problem, we could free old spines
// during STW.
}
// Allocate a new block and add it to the spine.
block = (*gcSweepBlock)(persistentalloc(unsafe.Sizeof(gcSweepBlock{}), sys.CacheLineSize, &memstats.gc_sys))
blockp := add(b.spine, sys.PtrSize*top)
// Blocks are allocated off-heap, so no write barrier.
atomic.StorepNoWB(blockp, unsafe.Pointer(block))
atomic.Storeuintptr(&b.spineLen, spineLen+1)
unlock(&b.spineLock)
}
// We have a block. Insert the span.
block.spans[bottom] = s
}
示例13: getitab
func getitab(inter *interfacetype, typ *_type, canfail bool) *itab {
if len(inter.mhdr) == 0 {
throw("internal error - misuse of itab")
}
// easy case
x := typ.x
if x == nil {
if canfail {
return nil
}
panic(&TypeAssertionError{"", typ._string, inter.typ._string, *inter.mhdr[0].name})
}
// compiler has provided some good hash codes for us.
h := inter.typ.hash
h += 17 * typ.hash
// TODO(rsc): h += 23 * x.mhash ?
h %= hashSize
// look twice - once without lock, once with.
// common case will be no lock contention.
var m *itab
var locked int
for locked = 0; locked < 2; locked++ {
if locked != 0 {
lock(&ifaceLock)
}
for m = (*itab)(atomic.Loadp(unsafe.Pointer(&hash[h]))); m != nil; m = m.link {
if m.inter == inter && m._type == typ {
if m.bad != 0 {
m = nil
if !canfail {
// this can only happen if the conversion
// was already done once using the , ok form
// and we have a cached negative result.
// the cached result doesn't record which
// interface function was missing, so jump
// down to the interface check, which will
// do more work but give a better error.
goto search
}
}
if locked != 0 {
unlock(&ifaceLock)
}
return m
}
}
}
m = (*itab)(persistentalloc(unsafe.Sizeof(itab{})+uintptr(len(inter.mhdr)-1)*sys.PtrSize, 0, &memstats.other_sys))
m.inter = inter
m._type = typ
search:
// both inter and typ have method sorted by name,
// and interface names are unique,
// so can iterate over both in lock step;
// the loop is O(ni+nt) not O(ni*nt).
ni := len(inter.mhdr)
nt := len(x.mhdr)
j := 0
for k := 0; k < ni; k++ {
i := &inter.mhdr[k]
iname := i.name
ipkgpath := i.pkgpath
itype := i._type
for ; j < nt; j++ {
t := &x.mhdr[j]
if t.mtyp == itype && (t.name == iname || *t.name == *iname) && t.pkgpath == ipkgpath {
if m != nil {
*(*unsafe.Pointer)(add(unsafe.Pointer(&m.fun[0]), uintptr(k)*sys.PtrSize)) = t.ifn
}
goto nextimethod
}
}
// didn't find method
if !canfail {
if locked != 0 {
unlock(&ifaceLock)
}
panic(&TypeAssertionError{"", typ._string, inter.typ._string, *iname})
}
m.bad = 1
break
nextimethod:
}
if locked == 0 {
throw("invalid itab locking")
}
m.link = hash[h]
atomicstorep(unsafe.Pointer(&hash[h]), unsafe.Pointer(m))
unlock(&ifaceLock)
if m.bad != 0 {
return nil
}
return m
}
//.........这里部分代码省略.........
示例14: mapaccess2_faststr
func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
if raceenabled && h != nil {
callerpc := getcallerpc(unsafe.Pointer(&t))
racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_faststr))
}
if h == nil || h.count == 0 {
return atomic.Loadp(unsafe.Pointer(&zeroptr)), false
}
key := stringStructOf(&ky)
if h.B == 0 {
// One-bucket table.
b := (*bmap)(h.buckets)
if key.len < 32 {
// short key, doing lots of comparisons is ok
for i := uintptr(0); i < bucketCnt; i++ {
x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
if x == empty {
continue
}
k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*ptrSize))
if k.len != key.len {
continue
}
if k.str == key.str || memeq(k.str, key.str, uintptr(key.len)) {
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize)), true
}
}
return atomic.Loadp(unsafe.Pointer(&zeroptr)), false
}
// long key, try not to do more comparisons than necessary
keymaybe := uintptr(bucketCnt)
for i := uintptr(0); i < bucketCnt; i++ {
x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
if x == empty {
continue
}
k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*ptrSize))
if k.len != key.len {
continue
}
if k.str == key.str {
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize)), true
}
// check first 4 bytes
if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
continue
}
// check last 4 bytes
if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
continue
}
if keymaybe != bucketCnt {
// Two keys are potential matches. Use hash to distinguish them.
goto dohash
}
keymaybe = i
}
if keymaybe != bucketCnt {
k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*ptrSize))
if memeq(k.str, key.str, uintptr(key.len)) {
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+keymaybe*uintptr(t.valuesize)), true
}
}
return atomic.Loadp(unsafe.Pointer(&zeroptr)), false
}
dohash:
hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
m := uintptr(1)<<h.B - 1
b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize)))
if !evacuated(oldb) {
b = oldb
}
}
top := uint8(hash >> (ptrSize*8 - 8))
if top < minTopHash {
top += minTopHash
}
for {
for i := uintptr(0); i < bucketCnt; i++ {
x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
if x != top {
continue
}
k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*ptrSize))
if k.len != key.len {
continue
}
if k.str == key.str || memeq(k.str, key.str, uintptr(key.len)) {
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize)), true
}
}
b = b.overflow(t)
if b == nil {
return atomic.Loadp(unsafe.Pointer(&zeroptr)), false
}
}
}