本文整理匯總了Golang中github.com/cockroachdb/cockroach/pkg/build.GetInfo函數的典型用法代碼示例。如果您正苦於以下問題:Golang GetInfo函數的具體用法?Golang GetInfo怎麽用?Golang GetInfo使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了GetInfo函數的14個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: runGenManCmd
func runGenManCmd(cmd *cobra.Command, args []string) error {
info := build.GetInfo()
header := &doc.GenManHeader{
Section: "1",
Manual: "CockroachDB Manual",
Source: fmt.Sprintf("CockroachDB %s", info.Tag),
}
if !strings.HasSuffix(manPath, string(os.PathSeparator)) {
manPath += string(os.PathSeparator)
}
if _, err := os.Stat(manPath); err != nil {
if os.IsNotExist(err) {
if err := os.MkdirAll(manPath, 0755); err != nil {
return err
}
} else {
return err
}
}
if err := doc.GenManTree(cmd.Root(), header, manPath); err != nil {
return err
}
// TODO(cdo): The man page generated by the cobra package doesn't include a list of commands, so
// one has to notice the "See Also" section at the bottom of the page to know which commands
// are supported. I'd like to make this better somehow.
fmt.Println("Generated CockroachDB man pages in", manPath)
return nil
}
示例2: reportUsage
func (s *Server) reportUsage(ctx context.Context) {
b := new(bytes.Buffer)
if err := json.NewEncoder(b).Encode(s.getReportingInfo()); err != nil {
log.Warning(ctx, err)
return
}
q := reportingURL.Query()
q.Set("version", build.GetInfo().Tag)
q.Set("uuid", s.node.ClusterID.String())
reportingURL.RawQuery = q.Encode()
res, err := http.Post(reportingURL.String(), "application/json", b)
if err != nil && log.V(2) {
// This is probably going to be relatively common in production
// environments where network access is usually curtailed.
log.Warning(ctx, "Failed to report node usage metrics: ", err)
return
}
if res.StatusCode != http.StatusOK {
b, err := ioutil.ReadAll(res.Body)
log.Warningf(ctx, "Failed to report node usage metrics: status: %s, body: %s, "+
"error: %v", res.Status, b, err)
}
}
示例3: GetStatusSummary
// GetStatusSummary returns a status summary messages for the node. The summary
// includes the recent values of metrics for both the node and all of its
// component stores.
func (mr *MetricsRecorder) GetStatusSummary() *NodeStatus {
mr.mu.Lock()
defer mr.mu.Unlock()
if mr.mu.nodeRegistry == nil {
// We haven't yet processed initialization information; do nothing.
if log.V(1) {
log.Warning(context.TODO(), "attempt to generate status summary before NodeID allocation.")
}
return nil
}
now := mr.mu.clock.PhysicalNow()
// Generate an node status with no store data.
nodeStat := &NodeStatus{
Desc: mr.mu.desc,
BuildInfo: build.GetInfo(),
UpdatedAt: now,
StartedAt: mr.mu.startedAt,
StoreStatuses: make([]StoreStatus, 0, mr.mu.lastSummaryCount),
Metrics: make(map[string]float64, mr.mu.lastNodeMetricCount),
}
eachRecordableValue(mr.mu.nodeRegistry, func(name string, val float64) {
nodeStat.Metrics[name] = val
})
// Generate status summaries for stores.
for storeID, r := range mr.mu.storeRegistries {
storeMetrics := make(map[string]float64, mr.mu.lastStoreMetricCount)
eachRecordableValue(r, func(name string, val float64) {
storeMetrics[name] = val
})
// Gather descriptor from store.
descriptor, err := mr.mu.stores[storeID].Descriptor()
if err != nil {
log.Errorf(context.TODO(), "Could not record status summaries: Store %d could not return descriptor, error: %s", storeID, err)
continue
}
nodeStat.StoreStatuses = append(nodeStat.StoreStatuses, StoreStatus{
Desc: *descriptor,
Metrics: storeMetrics,
})
}
mr.mu.lastSummaryCount = len(nodeStat.StoreStatuses)
mr.mu.lastNodeMetricCount = len(nodeStat.Metrics)
if len(nodeStat.StoreStatuses) > 0 {
mr.mu.lastStoreMetricCount = len(nodeStat.StoreStatuses[0].Metrics)
}
return nodeStat
}
示例4: checkForUpdates
func (s *Server) checkForUpdates(ctx context.Context) {
q := updatesURL.Query()
q.Set("version", build.GetInfo().Tag)
q.Set("uuid", s.node.ClusterID.String())
updatesURL.RawQuery = q.Encode()
res, err := http.Get(updatesURL.String())
if err != nil {
// This is probably going to be relatively common in production
// environments where network access is usually curtailed.
if log.V(2) {
log.Warning(ctx, "Failed to check for updates: ", err)
}
return
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
b, err := ioutil.ReadAll(res.Body)
log.Warningf(ctx, "Failed to check for updates: status: %s, body: %s, error: %v",
res.Status, b, err)
return
}
decoder := json.NewDecoder(res.Body)
r := struct {
Details []versionInfo `json:"details"`
}{}
err = decoder.Decode(&r)
if err != nil && err != io.EOF {
log.Warning(ctx, "Error decoding updates info: ", err)
return
}
// Ideally the updates server only returns the most relevant updates for us,
// but if it replied with an excessive number of updates, limit log spam by
// only printing the last few.
if len(r.Details) > updateMaxVersionsToReport {
r.Details = r.Details[len(r.Details)-updateMaxVersionsToReport:]
}
for _, v := range r.Details {
log.Infof(ctx, "A new version is available: %s, details: %s", v.Version, v.Details)
}
}
示例5: TestStatusJson
// TestStatusJson verifies that status endpoints return expected Json results.
// The content type of the responses is always httputil.JSONContentType.
func TestStatusJson(t *testing.T) {
defer leaktest.AfterTest(t)()
s, _, _ := serverutils.StartServer(t, base.TestServerArgs{})
defer s.Stopper().Stop()
ts := s.(*TestServer)
nodeID := ts.Gossip().NodeID.Get()
addr, err := ts.Gossip().GetNodeIDAddress(nodeID)
if err != nil {
t.Fatal(err)
}
var nodes serverpb.NodesResponse
util.SucceedsSoon(t, func() error {
if err := getStatusJSONProto(s, "nodes", &nodes); err != nil {
t.Fatal(err)
}
if len(nodes.Nodes) == 0 {
return errors.Errorf("expected non-empty node list, got: %v", nodes)
}
return nil
})
for _, path := range []string{
"/health",
statusPrefix + "details/local",
statusPrefix + "details/" + strconv.FormatUint(uint64(nodeID), 10),
} {
var details serverpb.DetailsResponse
if err := serverutils.GetJSONProto(s, path, &details); err != nil {
t.Fatal(err)
}
if a, e := details.NodeID, nodeID; a != e {
t.Errorf("expected: %d, got: %d", e, a)
}
if a, e := details.Address, *addr; a != e {
t.Errorf("expected: %v, got: %v", e, a)
}
if a, e := details.BuildInfo, build.GetInfo(); a != e {
t.Errorf("expected: %v, got: %v", e, a)
}
}
}
示例6: rotateFile
// rotateFile closes the syncBuffer's file and starts a new one.
func (sb *syncBuffer) rotateFile(now time.Time) error {
if sb.file != nil {
if err := sb.Flush(); err != nil {
return err
}
if err := sb.file.Close(); err != nil {
return err
}
}
var err error
sb.file, _, err = create(sb.sev, now)
sb.nbytes = 0
if err != nil {
return err
}
sb.Writer = bufio.NewWriterSize(sb.file, bufferSize)
f, l, _ := caller.Lookup(1)
for _, msg := range []string{
fmt.Sprintf("[config] file created at: %s\n", now.Format("2006/01/02 15:04:05")),
fmt.Sprintf("[config] running on machine: %s\n", host),
fmt.Sprintf("[config] binary: %s\n", build.GetInfo().Short()),
fmt.Sprintf("[config] arguments: %s\n", os.Args),
fmt.Sprintf("line format: [IWEF]yymmdd hh:mm:ss.uuuuuu goid file:line msg\n"),
} {
buf := formatLogEntry(Entry{
Severity: sb.sev,
Time: now.UnixNano(),
Goroutine: goid.Get(),
File: f,
Line: int64(l),
Message: msg,
}, nil, nil)
var n int
n, err = sb.file.Write(buf.Bytes())
sb.nbytes += uint64(n)
if err != nil {
return err
}
logging.putBuffer(buf)
}
return nil
}
示例7: checkForUpdates
func (s *Server) checkForUpdates(ctx context.Context) {
q := updatesURL.Query()
q.Set("version", build.GetInfo().Tag)
q.Set("uuid", s.node.ClusterID.String())
updatesURL.RawQuery = q.Encode()
res, err := http.Get(updatesURL.String())
if err != nil {
// This is probably going to be relatively common in production
// environments where network access is usually curtailed.
if log.V(2) {
log.Warning(ctx, "Failed to check for updates: ", err)
}
return
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
b, err := ioutil.ReadAll(res.Body)
log.Warningf(ctx, "Failed to check for updates: status: %s, body: %s, error: %v",
res.Status, b, err)
return
}
decoder := json.NewDecoder(res.Body)
r := struct {
Details []versionInfo `json:"details"`
}{}
err = decoder.Decode(&r)
if err != nil && err != io.EOF {
log.Warning(ctx, "Error decoding updates info: ", err)
return
}
for _, v := range r.Details {
log.Infof(ctx, "A new version is available: %s, details: %s", v.Version, v.Details)
}
}
示例8: MakeRuntimeStatSampler
// MakeRuntimeStatSampler constructs a new RuntimeStatSampler object.
func MakeRuntimeStatSampler(clock *hlc.Clock) RuntimeStatSampler {
// Construct the build info metric. It is constant.
// We first build set the labels on the metadata.
info := build.GetInfo()
timestamp, err := info.Timestamp()
if err != nil {
// We can't panic here, tests don't have a build timestamp.
log.Warningf(context.TODO(), "Could not parse build timestamp: %v", err)
}
metaBuildTimestamp.AddLabel("tag", info.Tag)
metaBuildTimestamp.AddLabel("go_version", info.GoVersion)
buildTimestamp := metric.NewGauge(metaBuildTimestamp)
buildTimestamp.Update(timestamp)
return RuntimeStatSampler{
clock: clock,
startTimeNanos: clock.PhysicalNow(),
CgoCalls: metric.NewGauge(metaCgoCalls),
Goroutines: metric.NewGauge(metaGoroutines),
GoAllocBytes: metric.NewGauge(metaGoAllocBytes),
GoTotalBytes: metric.NewGauge(metaGoTotalBytes),
CgoAllocBytes: metric.NewGauge(metaCgoAllocBytes),
CgoTotalBytes: metric.NewGauge(metaCgoTotalBytes),
GcCount: metric.NewGauge(metaGCCount),
GcPauseNS: metric.NewGauge(metaGCPauseNS),
GcPausePercent: metric.NewGaugeFloat64(metaGCPausePercent),
CPUUserNS: metric.NewGauge(metaCPUUserNS),
CPUUserPercent: metric.NewGaugeFloat64(metaCPUUserPercent),
CPUSysNS: metric.NewGauge(metaCPUSysNS),
CPUSysPercent: metric.NewGaugeFloat64(metaCPUSysPercent),
Rss: metric.NewGauge(metaRSS),
FDOpen: metric.NewGauge(metaFDOpen),
FDSoftLimit: metric.NewGauge(metaFDSoftLimit),
Uptime: metric.NewGauge(metaUptime),
BuildTimestamp: buildTimestamp,
}
}
示例9: Details
// Details returns node details.
func (s *statusServer) Details(
ctx context.Context, req *serverpb.DetailsRequest,
) (*serverpb.DetailsResponse, error) {
ctx = s.AnnotateCtx(ctx)
nodeID, local, err := s.parseNodeID(req.NodeId)
if err != nil {
return nil, grpc.Errorf(codes.InvalidArgument, err.Error())
}
if local {
resp := &serverpb.DetailsResponse{
NodeID: s.gossip.NodeID.Get(),
BuildInfo: build.GetInfo(),
}
if addr, err := s.gossip.GetNodeIDAddress(s.gossip.NodeID.Get()); err == nil {
resp.Address = *addr
}
return resp, nil
}
status, err := s.dialNode(nodeID)
if err != nil {
return nil, err
}
return status.Details(ctx, req)
}
示例10: initBacktrace
func initBacktrace(logDir string) *stop.Stopper {
const ptracePath = "/opt/backtrace/bin/ptrace"
if _, err := os.Stat(ptracePath); err != nil {
log.Infof(context.TODO(), "backtrace disabled: %s", err)
return stop.NewStopper()
}
if err := bcd.EnableTracing(); err != nil {
log.Infof(context.TODO(), "unable to enable backtrace: %s", err)
return stop.NewStopper()
}
bcd.UpdateConfig(bcd.GlobalConfig{
PanicOnKillFailure: true,
ResendSignal: true,
RateLimit: time.Second * 3,
SynchronousPut: true,
})
// Use the default tracer implementation.
// false: Exclude system goroutines.
tracer := bcd.New(bcd.NewOptions{
IncludeSystemGs: false,
})
if err := tracer.SetOutputPath(logDir, 0755); err != nil {
log.Infof(context.TODO(), "unable to set output path: %s", err)
// Not a fatal error, continue.
}
// Enable WARNING log output from the tracer.
tracer.AddOptions(nil, "-L", "WARNING")
info := build.GetInfo()
tracer.AddKV(nil, "cgo-compiler", info.CgoCompiler)
tracer.AddKV(nil, "go-version", info.GoVersion)
tracer.AddKV(nil, "platform", info.Platform)
tracer.AddKV(nil, "tag", info.Tag)
tracer.AddKV(nil, "time", info.Time)
// Register for traces on signal reception.
tracer.SetSigset(
[]os.Signal{
syscall.SIGABRT,
syscall.SIGFPE,
syscall.SIGSEGV,
syscall.SIGILL,
syscall.SIGBUS}...)
bcd.Register(tracer)
// Hook log.Fatal*.
log.SetExitFunc(func(code int) {
_ = bcd.Trace(tracer, fmt.Errorf("exit %d", code), nil)
os.Exit(code)
})
stopper := stop.NewStopper(stop.OnPanic(func(val interface{}) {
err, ok := val.(error)
if !ok {
err = fmt.Errorf("%v", val)
}
_ = bcd.Trace(tracer, err, nil)
panic(val)
}))
// Internally, backtrace uses an external program (/opt/backtrace/bin/ptrace)
// to generate traces. We direct the stdout for this program to a file for
// debugging our usage of backtrace.
if f, err := os.OpenFile(filepath.Join(logDir, "backtrace.out"),
os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666); err != nil {
log.Infof(context.TODO(), "unable to open: %s", err)
} else {
stopper.AddCloser(stop.CloserFn(func() {
f.Close()
}))
tracer.SetPipes(nil, f)
}
tracer.SetLogLevel(bcd.LogMax)
log.Infof(context.TODO(), "backtrace enabled")
return stopper
}
示例11: checkNodeStatus
func checkNodeStatus(t *testing.T, c cliTest, output string, start time.Time) {
buf := bytes.NewBufferString(output)
s := bufio.NewScanner(buf)
// Skip command line.
if !s.Scan() {
t.Fatalf("Couldn't skip command line: %s", s.Err())
}
checkSeparatorLine(t, s)
// check column names.
if !s.Scan() {
t.Fatalf("Error reading column names: %s", s.Err())
}
cols, err := extractFields(s.Text())
if err != nil {
t.Fatalf("%s", err)
}
if !reflect.DeepEqual(cols, nodesColumnHeaders) {
t.Fatalf("columns (%s) don't match expected (%s)", cols, nodesColumnHeaders)
}
checkSeparatorLine(t, s)
// Check node status.
if !s.Scan() {
t.Fatalf("error reading node status: %s", s.Err())
}
fields, err := extractFields(s.Text())
if err != nil {
t.Fatalf("%s", err)
}
nodeID := c.Gossip().NodeID.Get()
nodeIDStr := strconv.FormatInt(int64(nodeID), 10)
if a, e := fields[0], nodeIDStr; a != e {
t.Errorf("node id (%s) != expected (%s)", a, e)
}
nodeAddr, err := c.Gossip().GetNodeIDAddress(nodeID)
if err != nil {
t.Fatal(err)
}
if a, e := fields[1], nodeAddr.String(); a != e {
t.Errorf("node address (%s) != expected (%s)", a, e)
}
// Verify Build Tag.
if a, e := fields[2], build.GetInfo().Tag; a != e {
t.Errorf("build tag (%s) != expected (%s)", a, e)
}
// Verify that updated_at and started_at are reasonably recent.
// CircleCI can be very slow. This was flaky at 5s.
checkTimeElapsed(t, fields[3], 15*time.Second, start)
checkTimeElapsed(t, fields[4], 15*time.Second, start)
// Verify all byte/range metrics.
testcases := []struct {
name string
idx int
maxval int64
}{
{"live_bytes", 5, 40000},
{"key_bytes", 6, 30000},
{"value_bytes", 7, 40000},
{"intent_bytes", 8, 30000},
{"system_bytes", 9, 30000},
{"leader_ranges", 10, 3},
{"repl_ranges", 11, 3},
{"avail_ranges", 12, 3},
}
for _, tc := range testcases {
val, err := strconv.ParseInt(fields[tc.idx], 10, 64)
if err != nil {
t.Errorf("couldn't parse %s '%s': %v", tc.name, fields[tc.idx], err)
continue
}
if val < 0 {
t.Errorf("value for %s (%d) cannot be less than 0", tc.name, val)
continue
}
if val > tc.maxval {
t.Errorf("value for %s (%d) greater than max (%d)", tc.name, val, tc.maxval)
}
}
checkSeparatorLine(t, s)
}
示例12: TestMetricsRecorder
//.........這裏部分代碼省略.........
if isNode {
expectedNodeSummaryMetrics[prefix+name] = float64(val)
} else {
// This can overwrite the previous value, but this is expected as
// all stores in our tests have identical values; when comparing
// status summaries, the same map is used as expected data for all
// stores.
expectedStoreSummaryMetrics[prefix+name] = float64(val)
}
}
for _, reg := range regList {
for _, data := range metricNames {
switch data.typ {
case "gauge":
g := metric.NewGauge(metric.Metadata{Name: reg.prefix + data.name})
reg.reg.AddMetric(g)
g.Update(data.val)
addExpected(reg.prefix, data.name, reg.source, 100, data.val, reg.isNode)
case "floatgauge":
g := metric.NewGaugeFloat64(metric.Metadata{Name: reg.prefix + data.name})
reg.reg.AddMetric(g)
g.Update(float64(data.val))
addExpected(reg.prefix, data.name, reg.source, 100, data.val, reg.isNode)
case "counter":
c := metric.NewCounter(metric.Metadata{Name: reg.prefix + data.name})
reg.reg.AddMetric(c)
c.Inc((data.val))
addExpected(reg.prefix, data.name, reg.source, 100, data.val, reg.isNode)
case "counterwithrates":
r := metric.NewCounterWithRates(metric.Metadata{Name: reg.prefix + data.name})
reg.reg.AddMetric(r)
r.Inc(data.val)
addExpected(reg.prefix, data.name, reg.source, 100, data.val, reg.isNode)
case "histogram":
h := metric.NewHistogram(metric.Metadata{Name: reg.prefix + data.name}, time.Second, 1000, 2)
reg.reg.AddMetric(h)
h.RecordValue(data.val)
for _, q := range recordHistogramQuantiles {
addExpected(reg.prefix, data.name+q.suffix, reg.source, 100, data.val, reg.isNode)
}
case "latency":
l := metric.NewLatency(metric.Metadata{Name: reg.prefix + data.name}, time.Hour)
reg.reg.AddMetric(l)
l.RecordValue(data.val)
// Latency is simply three histograms (at different resolution
// time scales).
for _, q := range recordHistogramQuantiles {
addExpected(reg.prefix, data.name+q.suffix, reg.source, 100, data.val, reg.isNode)
}
default:
t.Fatalf("unexpected: %+v", data)
}
}
}
// ========================================
// Verify time series data
// ========================================
actual := recorder.GetTimeSeriesData()
// Actual comparison is simple: sort the resulting arrays by time and name,
// and use reflect.DeepEqual.
sort.Sort(byTimeAndName(actual))
sort.Sort(byTimeAndName(expected))
if a, e := actual, expected; !reflect.DeepEqual(a, e) {
t.Errorf("recorder did not yield expected time series collection; diff:\n %v", pretty.Diff(e, a))
}
// ========================================
// Verify node summary generation
// ========================================
expectedNodeSummary := &NodeStatus{
Desc: nodeDesc,
BuildInfo: build.GetInfo(),
StartedAt: 50,
UpdatedAt: 100,
Metrics: expectedNodeSummaryMetrics,
StoreStatuses: []StoreStatus{
{
Desc: storeDesc1,
Metrics: expectedStoreSummaryMetrics,
},
{
Desc: storeDesc2,
Metrics: expectedStoreSummaryMetrics,
},
},
}
nodeSummary := recorder.GetStatusSummary()
if nodeSummary == nil {
t.Fatalf("recorder did not return nodeSummary")
}
sort.Sort(byStoreDescID(nodeSummary.StoreStatuses))
if a, e := nodeSummary, expectedNodeSummary; !reflect.DeepEqual(a, e) {
t.Errorf("recorder did not produce expected NodeSummary; diff:\n %v", pretty.Diff(e, a))
}
}
示例13: init
floatBuiltin1(func(x float64) (Datum, error) {
return NewDFloat(DFloat(math.Trunc(x))), nil
}),
decimalBuiltin1(func(x *inf.Dec) (Datum, error) {
dd := &DDecimal{}
dd.Round(x, 0, inf.RoundDown)
return dd, nil
}),
},
"version": {
Builtin{
Types: ArgTypes{},
ReturnType: TypeString,
category: categorySystemInfo,
fn: func(_ *EvalContext, args DTuple) (Datum, error) {
return NewDString(build.GetInfo().Short()), nil
},
},
},
}
func init() {
for k, v := range Builtins {
Builtins[strings.ToUpper(k)] = v
}
}
var substringImpls = []Builtin{
{
Types: ArgTypes{TypeString, TypeInt},
ReturnType: TypeString,
示例14:
"github.com/spf13/cobra"
)
// Proxy to allow overrides in tests.
var osStderr = os.Stderr
var versionIncludesDeps bool
var versionCmd = &cobra.Command{
Use: "version",
Short: "output version information",
Long: `
Output build version information.
`,
Run: func(cmd *cobra.Command, args []string) {
info := build.GetInfo()
tw := tabwriter.NewWriter(os.Stdout, 2, 1, 2, ' ', 0)
fmt.Fprintf(tw, "Build Tag: %s\n", info.Tag)
fmt.Fprintf(tw, "Build Time: %s\n", info.Time)
fmt.Fprintf(tw, "Platform: %s\n", info.Platform)
fmt.Fprintf(tw, "Go Version: %s\n", info.GoVersion)
fmt.Fprintf(tw, "C Compiler: %s\n", info.CgoCompiler)
if versionIncludesDeps {
fmt.Fprintf(tw, "Build Deps:\n\t%s\n",
strings.Replace(strings.Replace(info.Dependencies, " ", "\n\t", -1), ":", "\t", -1))
}
_ = tw.Flush()
},
}
var cockroachCmd = &cobra.Command{