当前位置: 首页>>代码示例>>Golang>>正文


Golang runtime.MemProfile函数代码示例

本文整理汇总了Golang中runtime.MemProfile函数的典型用法代码示例。如果您正苦于以下问题:Golang MemProfile函数的具体用法?Golang MemProfile怎么用?Golang MemProfile使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了MemProfile函数的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。

示例1: MemoryStatistics

func MemoryStatistics() string {
	var p []runtime.MemProfileRecord
	n, ok := runtime.MemProfile(nil, false)
	for {
		p = make([]runtime.MemProfileRecord, n+50)
		n, ok = runtime.MemProfile(p, false)
		if ok {
			p = p[0:n]
			break
		}
	}

	var total runtime.MemProfileRecord
	for i := range p {
		r := &p[i]
		total.AllocBytes += r.AllocBytes
		total.AllocObjects += r.AllocObjects
		total.FreeBytes += r.FreeBytes
		total.FreeObjects += r.FreeObjects
	}

	var m runtime.MemStats
	runtime.ReadMemStats(&m)

	return fmt.Sprintf("%d in use objects (%d in use bytes) | Alloc: %d TotalAlloc: %d",
		total.InUseObjects(), total.InUseBytes(), m.Alloc, m.TotalAlloc)
}
开发者ID:Norbell,项目名称:dyndnscd,代码行数:27,代码来源:main.go

示例2: getMemProfileRecords

func getMemProfileRecords() []runtime.MemProfileRecord {
	// Force the runtime to update the object and byte counts.
	// This can take up to two GC cycles to get a complete
	// snapshot of the current point in time.
	runtime.GC()
	runtime.GC()

	// Find out how many records there are (MemProfile(nil, true)),
	// allocate that many records, and get the data.
	// There's a race—more records might be added between
	// the two calls—so allocate a few extra records for safety
	// and also try again if we're very unlucky.
	// The loop should only execute one iteration in the common case.
	var p []runtime.MemProfileRecord
	n, ok := runtime.MemProfile(nil, true)
	for {
		// Allocate room for a slightly bigger profile,
		// in case a few more entries have been added
		// since the call to MemProfile.
		p = make([]runtime.MemProfileRecord, n+50)
		n, ok = runtime.MemProfile(p, true)
		if ok {
			p = p[0:n]
			break
		}
		// Profile grew; try again.
	}
	return p
}
开发者ID:2thetop,项目名称:go,代码行数:29,代码来源:heapsampling.go

示例3: mem_in_go

func mem_in_go(include_zero bool) runtime.MemProfileRecord {
	var p []runtime.MemProfileRecord
	n, ok := runtime.MemProfile(nil, include_zero)
	for {
		// Allocate room for a slightly bigger profile,
		// in case a few more entries have been added
		// since the call to MemProfile.
		p = make([]runtime.MemProfileRecord, n+50)
		n, ok = runtime.MemProfile(p, include_zero)
		if ok {
			p = p[0:n]
			break
		}
		// Profile grew; try again.
	}

	var total runtime.MemProfileRecord
	for i := range p {
		r := &p[i]
		total.AllocBytes += r.AllocBytes
		total.AllocObjects += r.AllocObjects
		total.FreeBytes += r.FreeBytes
		total.FreeObjects += r.FreeObjects
	}
	return total
}
开发者ID:JWZH,项目名称:caskdb,代码行数:26,代码来源:stats.go

示例4: writeHeap

// writeHeap 将当前运行时堆的分析报告写入到 w 中。
func writeHeap(w io.Writer, debug int) error {
	// Find out how many records there are (MemProfile(nil, true)),
	// allocate that many records, and get the data.
	// There's a race—more records might be added between
	// the two calls—so allocate a few extra records for safety
	// and also try again if we're very unlucky.
	// The loop should only execute one iteration in the common case.
	// 找出这里有多少记录(MemProfile(nil, true)),为它们分配一些记录,并获取数据。
	// 这里有个竞争——在两次调用之间可能会添加更多记录——因此为安全起见,
	// 我们分配了额外的记录,如果不走运的话可以再试一次。
	// 此循环在一般情况下应当只执行一次迭代。
	var p []runtime.MemProfileRecord
	n, ok := runtime.MemProfile(nil, true)
	for {
		// Allocate room for a slightly bigger profile,
		// in case a few more entries have been added
		// since the call to MemProfile.
		// 为稍大一点的分析报告分配空间,以防调用 MemProfile 时增加更多条目。
		p = make([]runtime.MemProfileRecord, n+50)
		n, ok = runtime.MemProfile(p, true)
		if ok {
			p = p[0:n]
			break
		}
		// Profile grew; try again.
		// 分析报告增加,然后重试。
	}

	sort.Sort(byInUseBytes(p))

	b := bufio.NewWriter(w)
	var tw *tabwriter.Writer
	w = b
	if debug > 0 {
		tw = tabwriter.NewWriter(w, 1, 8, 1, '\t', 0)
		w = tw
	}

	var total runtime.MemProfileRecord
	for i := range p {
		r := &p[i]
		total.AllocBytes += r.AllocBytes
		total.AllocObjects += r.AllocObjects
		total.FreeBytes += r.FreeBytes
		total.FreeObjects += r.FreeObjects
	}

	// Technically the rate is MemProfileRate not 2*MemProfileRate,
	// but early versions of the C++ heap profiler reported 2*MemProfileRate,
	// so that's what pprof has come to expect.
	// 技术上速率应为 MemProfileRate 而非 2*MemProfileRate,但早期版本的 C++
	// 堆分析器会报告2*MemProfileRate,所以这就是pprof必须这样预期的原因。
	fmt.Fprintf(w, "heap profile: %d: %d [%d: %d] @ heap/%d\n",
		total.InUseObjects(), total.InUseBytes(),
		total.AllocObjects, total.AllocBytes,
		2*runtime.MemProfileRate)

	for i := range p {
		r := &p[i]
		fmt.Fprintf(w, "%d: %d [%d: %d] @",
			r.InUseObjects(), r.InUseBytes(),
			r.AllocObjects, r.AllocBytes)
		for _, pc := range r.Stack() {
			fmt.Fprintf(w, " %#x", pc)
		}
		fmt.Fprintf(w, "\n")
		if debug > 0 {
			printStackRecord(w, r.Stack(), false)
		}
	}

	// Print memstats information too.
	// Pprof will ignore, but useful for people
	// 打印 memstats 信息。pprof 会忽略它,但这对人有用。
	s := new(runtime.MemStats)
	runtime.ReadMemStats(s)
	fmt.Fprintf(w, "\n# runtime.MemStats\n")
	fmt.Fprintf(w, "# Alloc = %d\n", s.Alloc)
	fmt.Fprintf(w, "# TotalAlloc = %d\n", s.TotalAlloc)
	fmt.Fprintf(w, "# Sys = %d\n", s.Sys)
	fmt.Fprintf(w, "# Lookups = %d\n", s.Lookups)
	fmt.Fprintf(w, "# Mallocs = %d\n", s.Mallocs)
	fmt.Fprintf(w, "# Frees = %d\n", s.Frees)

	fmt.Fprintf(w, "# HeapAlloc = %d\n", s.HeapAlloc)
	fmt.Fprintf(w, "# HeapSys = %d\n", s.HeapSys)
	fmt.Fprintf(w, "# HeapIdle = %d\n", s.HeapIdle)
	fmt.Fprintf(w, "# HeapInuse = %d\n", s.HeapInuse)
	fmt.Fprintf(w, "# HeapReleased = %d\n", s.HeapReleased)
	fmt.Fprintf(w, "# HeapObjects = %d\n", s.HeapObjects)

	fmt.Fprintf(w, "# Stack = %d / %d\n", s.StackInuse, s.StackSys)
	fmt.Fprintf(w, "# MSpan = %d / %d\n", s.MSpanInuse, s.MSpanSys)
	fmt.Fprintf(w, "# MCache = %d / %d\n", s.MCacheInuse, s.MCacheSys)
	fmt.Fprintf(w, "# BuckHashSys = %d\n", s.BuckHashSys)

	fmt.Fprintf(w, "# NextGC = %d\n", s.NextGC)
	fmt.Fprintf(w, "# PauseNs = %d\n", s.PauseNs)
	fmt.Fprintf(w, "# NumGC = %d\n", s.NumGC)
//.........这里部分代码省略.........
开发者ID:yhtsnda,项目名称:go,代码行数:101,代码来源:pprof.go

示例5: main

func main() {
	runtime.MemProfileRate = 1
	// Allocate 1M 4-byte objects and set a finalizer for every third object.
	// Assuming that tiny block size is 16, some objects get finalizers setup
	// only for middle bytes. The finalizer resurrects that object.
	// As the result, all allocated memory must stay alive.
	const (
		N             = 1 << 20
		tinyBlockSize = 16 // runtime._TinySize
	)
	hold := make([]*int32, 0, N)
	for i := 0; i < N; i++ {
		x := new(int32)
		if i%3 == 0 {
			runtime.SetFinalizer(x, func(p *int32) {
				hold = append(hold, p)
			})
		}
	}
	// Finalize as much as possible.
	// Note: the sleep only increases probility of bug detection,
	// it cannot lead to false failure.
	for i := 0; i < 5; i++ {
		runtime.GC()
		time.Sleep(10 * time.Millisecond)
	}
	// Read memory profile.
	var prof []runtime.MemProfileRecord
	for {
		if n, ok := runtime.MemProfile(prof, false); ok {
			prof = prof[:n]
			break
		} else {
			prof = make([]runtime.MemProfileRecord, n+10)
		}
	}
	// See how much memory in tiny objects is profiled.
	var totalBytes int64
	for _, p := range prof {
		bytes := p.AllocBytes - p.FreeBytes
		nobj := p.AllocObjects - p.FreeObjects
		size := bytes / nobj
		if size == tinyBlockSize {
			totalBytes += bytes
		}
	}
	// 2*tinyBlockSize slack is for any boundary effects.
	if want := N*int64(unsafe.Sizeof(int32(0))) - 2*tinyBlockSize; totalBytes < want {
		println("got", totalBytes, "want >=", want)
		panic("some of the tiny objects are not profiled")
	}
	// Just to keep hold alive.
	if len(hold) != 0 && hold[0] == nil {
		panic("bad")
	}
}
开发者ID:Xiahl1990,项目名称:go,代码行数:56,代码来源:finprofiled.go

示例6: MonitorMemProfile

func MonitorMemProfile() {
	go func() {
		for {
			time.Sleep(10e9)
			mem, _ := runtime.MemProfile(nil, false)
			runtime.GC()
			log.Printf("Mem# %d\n", mem)
		}
	}()
}
开发者ID:andradeandrey,项目名称:Faff,代码行数:10,代码来源:dbg.go

示例7: TestGITForLeaks

func TestGITForLeaks(t *testing.T) {
	gitcmd := envOrDefault("git", "/usr/local/git/bin/git")
	repo := "/Users/petar/popalg.org-git"
	filename := "sparsification-by-spanners"
	for i := 0; i < 10000; i++ {
		_, _, err := GITGetCreateUpdateTime(gitcmd, repo, filename)
		if err != nil {
			t.Errorf("git: %s", err)
		}
		if i%100 == 0 {
			mem, _ := runtime.MemProfile(nil, false)
			fmt.Printf("i= %d,    mem= %d             \r", i, mem)
		}
	}
	fmt.Printf("\n")
}
开发者ID:andradeandrey,项目名称:Faff,代码行数:16,代码来源:gitleak_test.go

示例8: writeHeap

// writeHeap writes the current runtime heap profile to w.
func writeHeap(w io.Writer, debug int) error {
	// Find out how many records there are (MemProfile(nil, true)),
	// allocate that many records, and get the data.
	// There's a race—more records might be added between
	// the two calls—so allocate a few extra records for safety
	// and also try again if we're very unlucky.
	// The loop should only execute one iteration in the common case.
	var p []runtime.MemProfileRecord
	n, ok := runtime.MemProfile(nil, true)
	for {
		p = make([]runtime.MemProfileRecord, n+50)
		n, ok = runtime.MemProfile(p, true)
		if ok {
			p = p[0:n]
			break
		}
	}

	sort.Slice(p, func(i, j int) bool { return p[i].InUseBytes() > p[j].InUseBytes() })

	var total runtime.MemProfileRecord
	for i := range p {
		r := &p[i]
		total.AllocBytes += r.AllocBytes
		total.AllocObjects += r.AllocObjects
		total.FreeBytes += r.FreeBytes
		total.FreeObjects += r.FreeObjects
	}

	prof := &profile.Profile{
		PeriodType: &profile.ValueType{Type: "space", Unit: "bytes"},
		SampleType: []*profile.ValueType{
			{Type: "alloc_objects", Unit: "count"},
			{Type: "alloc_space", Unit: "bytes"},
			{Type: "inuse_objects", Unit: "count"},
			{Type: "inuse_space", Unit: "bytes"},
		},
		Period: int64(runtime.MemProfileRate),
	}

	locs := make(map[uint64]*(profile.Location))
	for i := range p {
		var v1, v2, v3, v4, blocksize int64
		r := &p[i]
		v1, v2 = int64(r.InUseObjects()), int64(r.InUseBytes())
		v3, v4 = int64(r.AllocObjects), int64(r.AllocBytes)
		if (v1 == 0 && v2 != 0) || (v3 == 0 && v4 != 0) {
			return fmt.Errorf("error writing memory profile: inuse object count was 0 but inuse bytes was %d", v2)
		} else {
			if v1 != 0 {
				blocksize = v2 / v1
				v1, v2 = scaleHeapSample(v1, v2, prof.Period)
			}
			if v3 != 0 {
				v3, v4 = scaleHeapSample(v3, v4, prof.Period)
			}
		}
		value := []int64{v1, v2, v3, v4}
		var sloc []*profile.Location
		for _, pc := range r.Stack() {
			addr := uint64(pc)
			addr--
			loc := locs[addr]
			if locs[addr] == nil {
				loc = &(profile.Location{
					Address: addr,
				})
				prof.Location = append(prof.Location, loc)
				locs[addr] = loc
			}
			sloc = append(sloc, loc)
		}
		prof.Sample = append(prof.Sample, &profile.Sample{
			Value:    value,
			Location: sloc,
			NumLabel: map[string][]int64{"bytes": {blocksize}},
		})
	}
	prof.RemapAll()
	protopprof.Symbolize(prof)
	return prof.Write(w)
}
开发者ID:achanda,项目名称:go,代码行数:83,代码来源:pprof.go

示例9: countHeap

// countHeap returns the number of records in the heap profile.
func countHeap() int {
	n, _ := runtime.MemProfile(nil, true)
	return n
}
开发者ID:achanda,项目名称:go,代码行数:5,代码来源:pprof.go

示例10: writeHeap

// writeHeap writes the current runtime heap profile to w.
func writeHeap(w io.Writer, debug int) error {
	// Find out how many records there are (MemProfile(nil, true)),
	// allocate that many records, and get the data.
	// There's a race—more records might be added between
	// the two calls—so allocate a few extra records for safety
	// and also try again if we're very unlucky.
	// The loop should only execute one iteration in the common case.
	var p []runtime.MemProfileRecord
	n, ok := runtime.MemProfile(nil, true)
	for {
		// Allocate room for a slightly bigger profile,
		// in case a few more entries have been added
		// since the call to MemProfile.
		p = make([]runtime.MemProfileRecord, n+50)
		n, ok = runtime.MemProfile(p, true)
		if ok {
			p = p[0:n]
			break
		}
		// Profile grew; try again.
	}

	sort.Sort(byInUseBytes(p))

	b := bufio.NewWriter(w)
	var tw *tabwriter.Writer
	w = b
	if debug > 0 {
		tw = tabwriter.NewWriter(w, 1, 8, 1, '\t', 0)
		w = tw
	}

	var total runtime.MemProfileRecord
	for i := range p {
		r := &p[i]
		total.AllocBytes += r.AllocBytes
		total.AllocObjects += r.AllocObjects
		total.FreeBytes += r.FreeBytes
		total.FreeObjects += r.FreeObjects
	}

	// Technically the rate is MemProfileRate not 2*MemProfileRate,
	// but early versions of the C++ heap profiler reported 2*MemProfileRate,
	// so that's what pprof has come to expect.
	fmt.Fprintf(w, "heap profile: %d: %d [%d: %d] @ heap/%d\n",
		total.InUseObjects(), total.InUseBytes(),
		total.AllocObjects, total.AllocBytes,
		2*runtime.MemProfileRate)

	for i := range p {
		r := &p[i]
		fmt.Fprintf(w, "%d: %d [%d: %d] @",
			r.InUseObjects(), r.InUseBytes(),
			r.AllocObjects, r.AllocBytes)
		for _, pc := range r.Stack() {
			fmt.Fprintf(w, " %#x", pc)
		}
		fmt.Fprintf(w, "\n")
		if debug > 0 {
			printStackRecord(w, r.Stack(), false)
		}
	}

	// Print memstats information too.
	// Pprof will ignore, but useful for people
	s := new(runtime.MemStats)
	runtime.ReadMemStats(s)
	fmt.Fprintf(w, "\n# runtime.MemStats\n")
	fmt.Fprintf(w, "# Alloc = %d\n", s.Alloc)
	fmt.Fprintf(w, "# TotalAlloc = %d\n", s.TotalAlloc)
	fmt.Fprintf(w, "# Sys = %d\n", s.Sys)
	fmt.Fprintf(w, "# Lookups = %d\n", s.Lookups)
	fmt.Fprintf(w, "# Mallocs = %d\n", s.Mallocs)
	fmt.Fprintf(w, "# Frees = %d\n", s.Frees)

	fmt.Fprintf(w, "# HeapAlloc = %d\n", s.HeapAlloc)
	fmt.Fprintf(w, "# HeapSys = %d\n", s.HeapSys)
	fmt.Fprintf(w, "# HeapIdle = %d\n", s.HeapIdle)
	fmt.Fprintf(w, "# HeapInuse = %d\n", s.HeapInuse)
	fmt.Fprintf(w, "# HeapReleased = %d\n", s.HeapReleased)
	fmt.Fprintf(w, "# HeapObjects = %d\n", s.HeapObjects)

	fmt.Fprintf(w, "# Stack = %d / %d\n", s.StackInuse, s.StackSys)
	fmt.Fprintf(w, "# MSpan = %d / %d\n", s.MSpanInuse, s.MSpanSys)
	fmt.Fprintf(w, "# MCache = %d / %d\n", s.MCacheInuse, s.MCacheSys)
	fmt.Fprintf(w, "# BuckHashSys = %d\n", s.BuckHashSys)

	fmt.Fprintf(w, "# NextGC = %d\n", s.NextGC)
	fmt.Fprintf(w, "# PauseNs = %d\n", s.PauseNs)
	fmt.Fprintf(w, "# NumGC = %d\n", s.NumGC)
	fmt.Fprintf(w, "# EnableGC = %v\n", s.EnableGC)
	fmt.Fprintf(w, "# DebugGC = %v\n", s.DebugGC)

	if tw != nil {
		tw.Flush()
	}
	return b.Flush()
}
开发者ID:RBEGamer,项目名称:go,代码行数:99,代码来源:pprof.go

示例11: init

func init() {

	store.DefaultDBManager.AddFunc("select", func(db *store.DB, args []string) string {
		return reply.OKReply
	})

	store.DefaultDBManager.AddFunc("info", func(db *store.DB, args []string) string {
		return reply.BulkReply("# Server\r\nredis_version:2.6.7\r\n")
	})

	store.DefaultDBManager.AddFunc("meminuse", func(db *store.DB, args []string) string {
		return reply.IntReply(int(store.MemInUse()))
	})

	store.DefaultDBManager.AddFunc("objectsinuse", func(db *store.DB, args []string) string {
		var memProfiles []runtime.MemProfileRecord
		n, _ := runtime.MemProfile(memProfiles, false)
		memProfiles = make([]runtime.MemProfileRecord, n)
		n, _ = runtime.MemProfile(memProfiles, false)

		objects := int64(0)
		for _, prof := range memProfiles {
			objects += prof.InUseObjects()
		}

		return reply.IntReply(int(objects))
	})

	store.DefaultDBManager.AddFunc("gc", func(db *store.DB, args []string) string {
		runtime.GC()
		return reply.OKReply
	})

	store.DefaultDBManager.AddFunc("freeosmemory", func(db *store.DB, args []string) string {
		debug.FreeOSMemory()
		return reply.OKReply
	})

	store.DefaultDBManager.AddFunc("save", func(db *store.DB, args []string) string {
		err := db.SaveToDiskSync()
		if err != nil {
			return reply.ErrorReply(err)
		} else {
			return reply.OKReply
		}
	})

	store.DefaultDBManager.AddFunc("bgsave", func(db *store.DB, args []string) string {
		store.DefaultDBManager.SaveToDiskAsync(nil)
		return reply.OKReply
	})

	store.DefaultDBManager.AddFunc("__end_save_mode__", func(db *store.DB, args []string) string {
		db.EndSaveMode()
		return reply.OKReply
	})

	store.DefaultDBManager.AddFunc("dbsize", func(db *store.DB, args []string) string {
		return reply.IntReply(store.DefaultDBManager.DBSize())
	})

	store.DefaultDBManager.AddFunc("lastdump", func(db *store.DB, args []string) string {
		elem, ok, _ := db.StoreGet(args[0], data.Any)
		if ok {
			return reply.IntReply(int(elem.LastDump()))
		} else {
			return reply.NilReply
		}
	})

	store.DefaultDBManager.AddFunc("lastdumpdb", func(db *store.DB, args []string) string {
		return reply.IntReply(int(db.LastDump()))
	})

	store.DefaultDBManager.AddFunc("flush", func(db *store.DB, args []string) string {
		db.Flush()
		return reply.OKReply
	})

	store.DefaultDBManager.AddFunc("keysperdb", func(db *store.DB, args []string) string {
		var w reply.MultiBulkWriter
		dbs := store.DefaultDBManager.GetDBs()
		w.WriteCount(len(dbs))
		for _, db := range dbs {
			w.WriteString(fmt.Sprintf("%d", len(db.Store)))
		}
		return w.String()
	})

}
开发者ID:brianbrunner,项目名称:omg,代码行数:90,代码来源:server.go

示例12: Heap

// Based on: https://github.com/golang/go/blob/6b8762104a90c93ebd51149e7a031738832c5cdc/src/runtime/pprof/pprof.go#L387
func Heap(w io.Writer, sortorder string) {
	var p []runtime.MemProfileRecord
	n, ok := runtime.MemProfile(nil, true)
	for {
		// Allocate room for a slightly bigger profile,
		// in case a few more entries have been added
		// since the call to MemProfile.
		p = make([]runtime.MemProfileRecord, n+50)
		n, ok = runtime.MemProfile(p, true)
		if ok {
			p = p[0:n]
			break
		}
		// Profile grew; try again.
	}

	pm := make(map[uintptr]runtime.MemProfileRecord, len(p))

	for _, r := range p {
		// Based on: https://github.com/golang/go/blob/f9ed2f75c43cb8745a1593ec3e4208c46419216a/src/runtime/mprof.go#L150
		var h uintptr
		for _, pc := range r.Stack0 {
			h += pc
			h += h << 10
			h ^= h >> 6
		}
		h += h << 3
		h ^= h >> 11

		if _, ok := pm[h]; ok {
			r.AllocBytes += pm[h].AllocBytes
			r.FreeBytes += pm[h].FreeBytes
			r.AllocObjects += pm[h].AllocObjects
			r.FreeObjects += pm[h].FreeObjects
		}
		pm[h] = r
	}

	p = make([]runtime.MemProfileRecord, 0, len(pm))

	for _, r := range pm {
		p = append(p, r)
	}

	switch string(sortorder) {
	default:
		sort.Sort(byInUseBytes(p))
	case "allocbytes":
		sort.Sort(byAllocBytes(p))
	case "allocobjects":
		sort.Sort(byAllocObjects(p))
	case "inuseobjects":
		sort.Sort(byInUseObjects(p))
	}

	tw := tabwriter.NewWriter(w, 1, 8, 1, '\t', 0)

	var total runtime.MemProfileRecord
	for _, r := range p {
		total.AllocBytes += r.AllocBytes
		total.AllocObjects += r.AllocObjects
		total.FreeBytes += r.FreeBytes
		total.FreeObjects += r.FreeObjects
	}

	// Technically the rate is MemProfileRate not 2*MemProfileRate,
	// but early versions of the C++ heap profiler reported 2*MemProfileRate,
	// so that's what pprof has come to expect.
	fmt.Fprintf(tw, "heap profile: %d: %d [%d: %d] @ heap/%d\n",
		total.InUseObjects(), total.InUseBytes(),
		total.AllocObjects, total.AllocBytes,
		2*runtime.MemProfileRate)

	fmt.Fprintf(tw, "# heap profile: %d: %s [%d: %s] @ heap/%d\n\n",
		total.InUseObjects(), formatSize(total.InUseBytes()),
		total.AllocObjects, formatSize(total.AllocBytes),
		2*runtime.MemProfileRate)

	for _, r := range p {
		fmt.Fprintf(tw, "%d: %d [%d: %d] @",
			r.InUseObjects(), r.InUseBytes(),
			r.AllocObjects, r.AllocBytes)
		for _, pc := range r.Stack() {
			fmt.Fprintf(tw, " %#x", pc)
		}
		fmt.Fprintf(tw, "\n# %d: %s [%d: %s]\n",
			r.InUseObjects(), formatSize(r.InUseBytes()),
			r.AllocObjects, formatSize(r.AllocBytes))
		printStackRecord(tw, r.Stack(), false)
	}

	// Print memstats information too.
	// Pprof will ignore, but useful for people
	s := new(runtime.MemStats)
	runtime.ReadMemStats(s)

	// Sort pauseNs in newer first,
	// and make it a nice to print duration.
	pauseNs := make([]time.Duration, 0, len(s.PauseNs))
//.........这里部分代码省略.........
开发者ID:erikdubbelboer,项目名称:bpprof,代码行数:101,代码来源:bpprof.go


注:本文中的runtime.MemProfile函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。