本文整理汇总了Golang中runtime/debug.SetGCPercent函数的典型用法代码示例。如果您正苦于以下问题:Golang SetGCPercent函数的具体用法?Golang SetGCPercent怎么用?Golang SetGCPercent使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了SetGCPercent函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: TestGoroutineParallelism
func TestGoroutineParallelism(t *testing.T) {
P := 4
N := 10
if testing.Short() {
P = 3
N = 3
}
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(P))
// If runtime triggers a forced GC during this test then it will deadlock,
// since the goroutines can't be stopped/preempted.
// Disable GC for this test (see issue #10958).
defer debug.SetGCPercent(debug.SetGCPercent(-1))
for try := 0; try < N; try++ {
done := make(chan bool)
x := uint32(0)
for p := 0; p < P; p++ {
// Test that all P goroutines are scheduled at the same time
go func(p int) {
for i := 0; i < 3; i++ {
expected := uint32(P*i + p)
for atomic.LoadUint32(&x) != expected {
}
atomic.StoreUint32(&x, expected+1)
}
done <- true
}(p)
}
for p := 0; p < P; p++ {
<-done
}
}
}
示例2: main
func main() {
// 禁用GC,并保证在main函数执行结束前恢复GC
defer debug.SetGCPercent(debug.SetGCPercent(-1))
var count int32
newFunc := func() interface{} {
return atomic.AddInt32(&count, 1)
}
pool := sync.Pool{New: newFunc}
// New 字段值的作用
v1 := pool.Get()
fmt.Printf("v1: %v\n", v1)
// 临时对象池的存取
pool.Put(newFunc())
pool.Put(newFunc())
pool.Put(newFunc())
v2 := pool.Get()
fmt.Printf("v2: %v\n", v2)
// 垃圾回收对临时对象池的影响
debug.SetGCPercent(100)
runtime.GC()
v3 := pool.Get()
fmt.Printf("v3: %v\n", v3)
pool.New = nil
v4 := pool.Get()
fmt.Printf("v4: %v\n", v4)
}
示例3: LoadTxt
func (t *MSTree) LoadTxt(filename string, limit int) error {
f, err := os.Open(filename)
if err != nil {
return err
}
defer f.Close()
// Turn GC off
prevGC := debug.SetGCPercent(-1)
// Defer to turn GC back on
defer debug.SetGCPercent(prevGC)
scanner := bufio.NewScanner(f)
count := 0
for scanner.Scan() {
line := strings.TrimRight(scanner.Text(), "\n")
t.AddNoSync(line)
count++
if count%1000000 == 0 {
log.Info("Reindexed %d items", count)
}
if limit != -1 && count == limit {
break
}
}
log.Info("Reindexed %d items", count)
err = t.DumpIndex()
if err != nil {
return err
}
return nil
}
示例4: mine
func mine(numProcs int) {
runtime.GOMAXPROCS(numProcs)
updateWork()
updateLastBlock()
debug.SetGCPercent(-1)
log.Println("using", numProcs, "processes")
switch {
case cpuid.AVX2:
log.Println("using AVX2 optimisations")
case cpuid.AVX:
log.Println("using AVX optimisations")
case cpuid.SSSE3:
log.Println("using SSSE3 optimisations")
case cpuid.ArmSha:
log.Println("using ARMSHA optimisations")
default:
log.Println("your CPU isn't supported for optimised mining")
log.Println("please use v1.1 or get a new CPU.")
os.Exit(1)
}
for proc := 0; proc < numProcs; proc++ {
// decide on miner and execute
switch {
case cpuid.AVX2:
go mineAVX2()
case cpuid.AVX:
go mineAVX()
case cpuid.SSSE3:
go mineSSSE3()
case cpuid.ArmSha:
go mineARM()
}
}
log.Println("mining for address " + address + "...")
previousTime := time.Now()
for {
for i := 0; i < 10; i++ {
time.Sleep(time.Second * 5)
log.Printf("%.2f MH/s\n", float64(hashesThisPeriod)/
time.Now().Sub(previousTime).Seconds())
previousTime = time.Now()
hashesThisPeriod = 0
updateWork()
updateLastBlock()
}
debug.SetGCPercent(10)
debug.SetGCPercent(-1)
}
}
示例5: monitor
func monitor() {
c := time.Tick(1 * time.Second)
mem := new(runtime.MemStats)
origPct := debug.SetGCPercent(100)
debug.SetGCPercent(origPct)
for _ = range c {
runtime.ReadMemStats(mem)
mu.Lock()
defer mu.Unlock()
if tSize < 0 {
continue
}
// Occupancy fraction: 70%. Don't GC before hitting this.
softLimit := float64(tSize) * 0.7
pct := softLimit / float64(mem.Alloc) * 100
fmt.Printf("gctune: pct: %0.5f, target: %d, softLimit: %0.2f, Alloc: %d, Sys: %d\n", pct, tSize, softLimit, mem.Alloc, mem.Sys)
if pct < 50 {
// If this is too low, GC frequency increases too much.
pct = 50
}
debug.SetGCPercent(int(pct))
if mem.Sys > uint64(tSize*70/100) {
fmt.Println("freeing")
debug.FreeOSMemory()
}
}
}
示例6: TestPool
func TestPool(t *testing.T) {
// disable GC so we can control when it happens.
defer debug.SetGCPercent(debug.SetGCPercent(-1))
var p Pool
if p.Get() != nil {
t.Fatal("expected empty")
}
p.Put("a")
p.Put("b")
if g := p.Get(); g != "b" {
t.Fatalf("got %#v; want b", g)
}
if g := p.Get(); g != "a" {
t.Fatalf("got %#v; want a", g)
}
if g := p.Get(); g != nil {
t.Fatalf("got %#v; want nil", g)
}
p.Put("c")
debug.SetGCPercent(100) // to allow following GC to actually run
runtime.GC()
if g := p.Get(); g != nil {
t.Fatalf("got %#v; want nil after GC", g)
}
}
示例7: TestPoolNew
func TestPoolNew(t *testing.T) {
// disable GC so we can control when it happens.
defer debug.SetGCPercent(debug.SetGCPercent(-1))
i := 0
p := Pool{
New: func() interface{} {
i++
return i
},
}
if v := p.Get(); v != 1 {
t.Fatalf("got %v; want 1", v)
}
if v := p.Get(); v != 2 {
t.Fatalf("got %v; want 2", v)
}
p.Put(42)
if v := p.Get(); v != 42 {
t.Fatalf("got %v; want 42", v)
}
if v := p.Get(); v != 3 {
t.Fatalf("got %v; want 3", v)
}
}
示例8: applyGcPercent
func (s *Server) applyGcPercent(c *core.Config) (err error) {
if c.Go.GcPercent == 0 {
debug.SetGCPercent(100)
return
}
pv := debug.SetGCPercent(c.Go.GcPercent)
core.Trace.Println("set gc percent from", pv, "to", c.Go.GcPercent)
return
}
示例9: garbageCollection
func garbageCollection() {
log.Printf("Starting garbageCollection()\n")
h.broadcastSys <- []byte("{\"gc\":\"starting\"}")
memoryStats()
debug.SetGCPercent(100)
debug.FreeOSMemory()
debug.SetGCPercent(-1)
log.Printf("Done with garbageCollection()\n")
h.broadcastSys <- []byte("{\"gc\":\"done\"}")
memoryStats()
}
示例10: TestGcHashmapIndirection
func TestGcHashmapIndirection(t *testing.T) {
defer debug.SetGCPercent(debug.SetGCPercent(1))
runtime.GC()
type T struct {
a [256]int
}
m := make(map[T]T)
for i := 0; i < 2000; i++ {
var a T
a.a[0] = i
m[a] = T{}
}
}
示例11: TestPoolsPutGet
func TestPoolsPutGet(t *testing.T) {
// disable GC so we can control when it happens.
defer debug.SetGCPercent(debug.SetGCPercent(-1))
N := 10000 * 100
var p = Pools{PrivateSize: N}
for i := 0; i < N; i++ {
p.Put(i)
}
for i := N - 1; i > 0; i-- {
if n := p.Get(); n != i {
t.Fatalf("got %v; want %d", n, i)
}
}
}
示例12: GCFairness2
func GCFairness2() {
// Make sure user code can't exploit the GC's high priority
// scheduling to make scheduling of user code unfair. See
// issue #15706.
runtime.GOMAXPROCS(1)
debug.SetGCPercent(1)
var count [3]int64
var sink [3]interface{}
for i := range count {
go func(i int) {
for {
sink[i] = make([]byte, 1024)
atomic.AddInt64(&count[i], 1)
}
}(i)
}
// Note: If the unfairness is really bad, it may not even get
// past the sleep.
//
// If the scheduling rules change, this may not be enough time
// to let all goroutines run, but for now we cycle through
// them rapidly.
time.Sleep(30 * time.Millisecond)
for i := range count {
if atomic.LoadInt64(&count[i]) == 0 {
fmt.Printf("goroutine %d did not run\n", i)
return
}
}
fmt.Println("OK")
}
示例13: proxyServer
func proxyServer() {
loadConfigOrDie()
debug.SetGCPercent(config.GCPercent)
db, err := kvl.Open(config.Proxy.Database.Type, config.Proxy.Database.DSN)
if err != nil {
log.Fatalf("Couldn't connect to %v database: %v",
config.Proxy.Database.Type, err)
}
defer db.Close()
var h http.Handler
h, err = proxyserver.New(db, config.Proxy.Scrubbers, config.Proxy.CacheSize)
if err != nil {
log.Fatalf("Couldn't initialize handler: %v", err)
}
h = httputil.NewLimitParallelism(config.Proxy.ParallelRequests, h)
h = httputil.AddDebugHandlers(h, config.Proxy.Debug)
if !config.Proxy.DisableHTTPLogging {
h = httputil.LogHTTPRequests(h)
}
if config.Proxy.Listen == "none" {
for {
time.Sleep(time.Hour)
}
} else {
serveOrDie(config.Proxy.Listen, h)
}
}
示例14: main
func main() {
// Use all processor cores.
runtime.GOMAXPROCS(runtime.NumCPU())
// Block and transaction processing can cause bursty allocations. This
// limits the garbage collector from excessively overallocating during
// bursts. This value was arrived at with the help of profiling live
// usage.
debug.SetGCPercent(10)
// Up some limits.
if err := limits.SetLimits(); err != nil {
fmt.Fprintf(os.Stderr, "failed to set limits: %v\n", err)
os.Exit(1)
}
// Call serviceMain on Windows to handle running as a service. When
// the return isService flag is true, exit now since we ran as a
// service. Otherwise, just fall through to normal operation.
if runtime.GOOS == "windows" {
isService, err := winServiceMain()
if err != nil {
fmt.Println(err)
os.Exit(1)
}
if isService {
os.Exit(0)
}
}
// Work around defer not working after os.Exit()
if err := btcdMain(nil); err != nil {
os.Exit(1)
}
}
示例15: handleMem
func handleMem(g *Req) error {
if g.R.Method == "POST" {
type memParams struct {
GCNow int `schema:"gc_now"`
GCPercent int `schema:"gc_percent"`
}
params := memParams{}
err := g.Decoder.Decode(¶ms, g.R.Form)
if err != nil {
g.Error("Failed to decode params: " + err.Error())
return ServerError("Failed to decode params: " + err.Error())
}
msg := "Adjusting mem system\n"
if params.GCNow > 0 {
info := "Running GC by request to handler"
g.Info(info)
msg += info + "\n"
runtime.GC()
}
if params.GCPercent > 0 {
oldVal := debug.SetGCPercent(params.GCPercent)
info := fmt.Sprintf("Set GC%% to [%d] was [%d]", params.GCPercent, oldVal)
g.Info(info)
msg += info + "\n"
}
return g.SendText([]byte(msg))
}
var memStats runtime.MemStats
runtime.ReadMemStats(&memStats)
return g.SendJson("memstats", memStats)
}