本文整理匯總了Golang中github.com/cockroachdb/cockroach/pkg/util/log.Warning函數的典型用法代碼示例。如果您正苦於以下問題:Golang Warning函數的具體用法?Golang Warning怎麽用?Golang Warning使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了Warning函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: reportUsage
func (s *Server) reportUsage(ctx context.Context) {
b := new(bytes.Buffer)
if err := json.NewEncoder(b).Encode(s.getReportingInfo()); err != nil {
log.Warning(ctx, err)
return
}
q := reportingURL.Query()
q.Set("version", build.GetInfo().Tag)
q.Set("uuid", s.node.ClusterID.String())
reportingURL.RawQuery = q.Encode()
res, err := http.Post(reportingURL.String(), "application/json", b)
if err != nil && log.V(2) {
// This is probably going to be relatively common in production
// environments where network access is usually curtailed.
log.Warning(ctx, "Failed to report node usage metrics: ", err)
return
}
if res.StatusCode != http.StatusOK {
b, err := ioutil.ReadAll(res.Body)
log.Warningf(ctx, "Failed to report node usage metrics: status: %s, body: %s, "+
"error: %v", res.Status, b, err)
}
}
示例2: eachRecordableValue
// eachRecordableValue visits each metric in the registry, calling the supplied
// function once for each recordable value represented by that metric. This is
// useful to expand certain metric types (such as histograms) into multiple
// recordable values.
func eachRecordableValue(reg *metric.Registry, fn func(string, float64)) {
reg.Each(func(name string, mtr interface{}) {
if histogram, ok := mtr.(*metric.Histogram); ok {
// TODO(mrtracy): Where should this comment go for better
// visibility?
//
// Proper support of Histograms for time series is difficult and
// likely not worth the trouble. Instead, we aggregate a windowed
// histogram at fixed quantiles. If the scraping window and the
// histogram's eviction duration are similar, this should give
// good results; if the two durations are very different, we either
// report stale results or report only the more recent data.
//
// Additionally, we can only aggregate max/min of the quantiles;
// roll-ups don't know that and so they will return mathematically
// nonsensical values, but that seems acceptable for the time
// being.
curr, _ := histogram.Windowed()
for _, pt := range recordHistogramQuantiles {
fn(name+pt.suffix, float64(curr.ValueAtQuantile(pt.quantile)))
}
} else {
val, err := extractValue(mtr)
if err != nil {
log.Warning(context.TODO(), err)
return
}
fn(name, val)
}
})
}
示例3: removeLeaseIfExpiring
// removeLeaseIfExpiring removes a lease and returns true if it is about to expire.
// The method also resets the transaction deadline.
func (p *planner) removeLeaseIfExpiring(lease *LeaseState) bool {
if lease == nil || lease.hasSomeLifeLeft(p.leaseMgr.clock) {
return false
}
// Remove the lease from p.leases.
idx := -1
for i, l := range p.leases {
if l == lease {
idx = i
break
}
}
if idx == -1 {
log.Warningf(p.ctx(), "lease (%s) not found", lease)
return false
}
p.leases[idx] = p.leases[len(p.leases)-1]
p.leases[len(p.leases)-1] = nil
p.leases = p.leases[:len(p.leases)-1]
if err := p.leaseMgr.Release(lease); err != nil {
log.Warning(p.ctx(), err)
}
// Reset the deadline so that a new deadline will be set after the lease is acquired.
p.txn.ResetDeadline()
for _, l := range p.leases {
p.txn.UpdateDeadlineMaybe(hlc.Timestamp{WallTime: l.Expiration().UnixNano()})
}
return true
}
示例4: checkForUpdates
func (s *Server) checkForUpdates(ctx context.Context) {
q := updatesURL.Query()
q.Set("version", build.GetInfo().Tag)
q.Set("uuid", s.node.ClusterID.String())
updatesURL.RawQuery = q.Encode()
res, err := http.Get(updatesURL.String())
if err != nil {
// This is probably going to be relatively common in production
// environments where network access is usually curtailed.
if log.V(2) {
log.Warning(ctx, "Failed to check for updates: ", err)
}
return
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
b, err := ioutil.ReadAll(res.Body)
log.Warningf(ctx, "Failed to check for updates: status: %s, body: %s, error: %v",
res.Status, b, err)
return
}
decoder := json.NewDecoder(res.Body)
r := struct {
Details []versionInfo `json:"details"`
}{}
err = decoder.Decode(&r)
if err != nil && err != io.EOF {
log.Warning(ctx, "Error decoding updates info: ", err)
return
}
// Ideally the updates server only returns the most relevant updates for us,
// but if it replied with an excessive number of updates, limit log spam by
// only printing the last few.
if len(r.Details) > updateMaxVersionsToReport {
r.Details = r.Details[len(r.Details)-updateMaxVersionsToReport:]
}
for _, v := range r.Details {
log.Infof(ctx, "A new version is available: %s, details: %s", v.Version, v.Details)
}
}
示例5: GetStatusSummary
// GetStatusSummary returns a status summary messages for the node. The summary
// includes the recent values of metrics for both the node and all of its
// component stores.
func (mr *MetricsRecorder) GetStatusSummary() *NodeStatus {
mr.mu.Lock()
defer mr.mu.Unlock()
if mr.mu.nodeRegistry == nil {
// We haven't yet processed initialization information; do nothing.
if log.V(1) {
log.Warning(context.TODO(), "attempt to generate status summary before NodeID allocation.")
}
return nil
}
now := mr.mu.clock.PhysicalNow()
// Generate an node status with no store data.
nodeStat := &NodeStatus{
Desc: mr.mu.desc,
BuildInfo: build.GetInfo(),
UpdatedAt: now,
StartedAt: mr.mu.startedAt,
StoreStatuses: make([]StoreStatus, 0, mr.mu.lastSummaryCount),
Metrics: make(map[string]float64, mr.mu.lastNodeMetricCount),
}
eachRecordableValue(mr.mu.nodeRegistry, func(name string, val float64) {
nodeStat.Metrics[name] = val
})
// Generate status summaries for stores.
for storeID, r := range mr.mu.storeRegistries {
storeMetrics := make(map[string]float64, mr.mu.lastStoreMetricCount)
eachRecordableValue(r, func(name string, val float64) {
storeMetrics[name] = val
})
// Gather descriptor from store.
descriptor, err := mr.mu.stores[storeID].Descriptor()
if err != nil {
log.Errorf(context.TODO(), "Could not record status summaries: Store %d could not return descriptor, error: %s", storeID, err)
continue
}
nodeStat.StoreStatuses = append(nodeStat.StoreStatuses, StoreStatus{
Desc: *descriptor,
Metrics: storeMetrics,
})
}
mr.mu.lastSummaryCount = len(nodeStat.StoreStatuses)
mr.mu.lastNodeMetricCount = len(nodeStat.Metrics)
if len(nodeStat.StoreStatuses) > 0 {
mr.mu.lastStoreMetricCount = len(nodeStat.StoreStatuses[0].Metrics)
}
return nodeStat
}
示例6: Wait
// Wait waits for a running container to exit.
func (c *Container) Wait() error {
exitCode, err := c.cluster.client.ContainerWait(context.Background(), c.id)
if err == nil && exitCode != 0 {
err = errors.Errorf("non-zero exit code: %d", exitCode)
}
if err != nil {
if err := c.Logs(os.Stderr); err != nil {
log.Warning(context.TODO(), err)
}
}
return err
}
示例7: gossipStores
// gossipStores broadcasts each store and dead replica to the gossip network.
func (n *Node) gossipStores(ctx context.Context) {
if err := n.stores.VisitStores(func(s *storage.Store) error {
if err := s.GossipStore(ctx); err != nil {
return err
}
if err := s.GossipDeadReplicas(ctx); err != nil {
return err
}
return nil
}); err != nil {
log.Warning(ctx, err)
}
}
示例8: releaseLeases
// releaseLeases implements the SchemaAccessor interface.
func (p *planner) releaseLeases() {
if p.leases != nil {
if log.V(2) {
log.Infof(p.ctx(), "planner releasing %d leases", len(p.leases))
}
for _, lease := range p.leases {
if err := p.leaseMgr.Release(lease); err != nil {
log.Warning(p.ctx(), err)
}
}
p.leases = nil
}
}
示例9: checkForUpdates
func (s *Server) checkForUpdates(ctx context.Context) {
q := updatesURL.Query()
q.Set("version", build.GetInfo().Tag)
q.Set("uuid", s.node.ClusterID.String())
updatesURL.RawQuery = q.Encode()
res, err := http.Get(updatesURL.String())
if err != nil {
// This is probably going to be relatively common in production
// environments where network access is usually curtailed.
if log.V(2) {
log.Warning(ctx, "Failed to check for updates: ", err)
}
return
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
b, err := ioutil.ReadAll(res.Body)
log.Warningf(ctx, "Failed to check for updates: status: %s, body: %s, error: %v",
res.Status, b, err)
return
}
decoder := json.NewDecoder(res.Body)
r := struct {
Details []versionInfo `json:"details"`
}{}
err = decoder.Decode(&r)
if err != nil && err != io.EOF {
log.Warning(ctx, "Error decoding updates info: ", err)
return
}
for _, v := range r.Details {
log.Infof(ctx, "A new version is available: %s, details: %s", v.Version, v.Details)
}
}
示例10: scrapePrometheus
// scrapePrometheus updates the prometheusExporter's metrics snapshot.
func (mr *MetricsRecorder) scrapePrometheus() {
mr.mu.Lock()
defer mr.mu.Unlock()
if mr.mu.nodeRegistry == nil {
// We haven't yet processed initialization information; output nothing.
if log.V(1) {
log.Warning(context.TODO(), "MetricsRecorder asked to scrape metrics before NodeID allocation")
}
}
mr.prometheusExporter.ScrapeRegistry(mr.mu.nodeRegistry)
for _, reg := range mr.mu.storeRegistries {
mr.prometheusExporter.ScrapeRegistry(reg)
}
}
示例11: start
func (ia *idAllocator) start() {
ia.stopper.RunWorker(func() {
ctx := ia.AnnotateCtx(context.Background())
defer close(ia.ids)
for {
var newValue int64
for newValue <= int64(ia.minID) {
var err error
var res client.KeyValue
for r := retry.Start(base.DefaultRetryOptions()); r.Next(); {
idKey := ia.idKey.Load().(roachpb.Key)
if err := ia.stopper.RunTask(func() {
res, err = ia.db.Inc(ctx, idKey, int64(ia.blockSize))
}); err != nil {
log.Warning(ctx, err)
return
}
if err == nil {
newValue = res.ValueInt()
break
}
log.Warningf(ctx, "unable to allocate %d ids from %s: %s", ia.blockSize, idKey, err)
}
if err != nil {
panic(fmt.Sprintf("unexpectedly exited id allocation retry loop: %s", err))
}
}
end := newValue + 1
start := end - int64(ia.blockSize)
if start < int64(ia.minID) {
start = int64(ia.minID)
}
// Add all new ids to the channel for consumption.
for i := start; i < end; i++ {
select {
case ia.ids <- uint32(i):
case <-ia.stopper.ShouldStop():
return
}
}
}
})
}
示例12: execSchemaChanges
// execSchemaChanges releases schema leases and runs the queued
// schema changers. This needs to be run after the transaction
// scheduling the schema change has finished.
//
// The list of closures is cleared after (attempting) execution.
//
// Args:
// results: The results from all statements in the group that scheduled the
// schema changes we're about to execute. Results corresponding to the
// schema change statements will be changed in case an error occurs.
func (scc *schemaChangerCollection) execSchemaChanges(
e *Executor, planMaker *planner, results ResultList,
) {
if planMaker.txn != nil {
panic("trying to execute schema changes while still in a transaction")
}
ctx := e.AnnotateCtx(context.TODO())
// Release the leases once a transaction is complete.
planMaker.releaseLeases()
if e.cfg.SchemaChangerTestingKnobs.SyncFilter != nil {
e.cfg.SchemaChangerTestingKnobs.SyncFilter(TestingSchemaChangerCollection{scc})
}
// Execute any schema changes that were scheduled, in the order of the
// statements that scheduled them.
for _, scEntry := range scc.schemaChangers {
sc := &scEntry.sc
sc.db = *e.cfg.DB
sc.testingKnobs = e.cfg.SchemaChangerTestingKnobs
for r := retry.Start(base.DefaultRetryOptions()); r.Next(); {
if done, err := sc.IsDone(); err != nil {
log.Warning(ctx, err)
break
} else if done {
break
}
if err := sc.exec(); err != nil {
if isSchemaChangeRetryError(err) {
// Try again
continue
}
// All other errors can be reported; we report it as the result
// corresponding to the statement that enqueued this changer.
// There's some sketchiness here: we assume there's a single result
// per statement and we clobber the result/error of the corresponding
// statement.
// There's also another subtlety: we can only report results for
// statements in the current batch; we can't modify the results of older
// statements.
if scEntry.epoch == scc.curGroupNum {
results[scEntry.idx] = Result{Err: err}
}
log.Warningf(ctx, "error executing schema change: %s", err)
}
break
}
}
scc.schemaChangers = scc.schemaChangers[:0]
}
示例13: poll
// poll retrieves data from the underlying DataSource a single time, storing any
// returned time series data on the server.
func (p *poller) poll() {
if err := p.stopper.RunTask(func() {
data := p.source.GetTimeSeriesData()
if len(data) == 0 {
return
}
ctx, span := p.AnnotateCtxWithSpan(context.Background(), "ts-poll")
defer span.Finish()
if err := p.db.StoreData(ctx, p.r, data); err != nil {
log.Warningf(ctx, "error writing time series data: %s", err)
}
}); err != nil {
log.Warning(p.AnnotateCtx(context.TODO()), err)
}
}
示例14: tryAsyncAbort
// tryAsyncAbort (synchronously) grabs a copy of the txn proto and the intents
// (which it then clears from txnMeta), and asynchronously tries to abort the
// transaction.
func (tc *TxnCoordSender) tryAsyncAbort(txnID uuid.UUID) {
tc.Lock()
txnMeta := tc.txns[txnID]
// Clone the intents and the txn to avoid data races.
intentSpans, _ := roachpb.MergeSpans(append([]roachpb.Span(nil), txnMeta.keys...))
txnMeta.keys = nil
txn := txnMeta.txn.Clone()
tc.Unlock()
// Since we don't hold the lock continuously, it's possible that two aborts
// raced here. That's fine (and probably better than the alternative, which
// is missing new intents sometimes).
if txn.Status != roachpb.PENDING {
return
}
ba := roachpb.BatchRequest{}
ba.Txn = &txn
et := &roachpb.EndTransactionRequest{
Span: roachpb.Span{
Key: txn.Key,
},
Commit: false,
IntentSpans: intentSpans,
}
ba.Add(et)
ctx := tc.AnnotateCtx(context.TODO())
if err := tc.stopper.RunAsyncTask(ctx, func(ctx context.Context) {
// Use the wrapped sender since the normal Sender does not allow
// clients to specify intents.
if _, pErr := tc.wrapped.Send(ctx, ba); pErr != nil {
if log.V(1) {
log.Warningf(ctx, "abort due to inactivity failed for %s: %s ", txn, pErr)
}
}
}); err != nil {
log.Warning(ctx, err)
}
}
示例15: MarshalJSON
// MarshalJSON returns an appropriate JSON representation of the current values
// of the metrics being tracked by this recorder.
func (mr *MetricsRecorder) MarshalJSON() ([]byte, error) {
mr.mu.Lock()
defer mr.mu.Unlock()
if mr.mu.nodeRegistry == nil {
// We haven't yet processed initialization information; return an empty
// JSON object.
if log.V(1) {
log.Warning(context.TODO(), "MetricsRecorder.MarshalJSON() called before NodeID allocation")
}
return []byte("{}"), nil
}
topLevel := map[string]interface{}{
fmt.Sprintf("node.%d", mr.mu.desc.NodeID): mr.mu.nodeRegistry,
}
// Add collection of stores to top level. JSON requires that keys be strings,
// so we must convert the store ID to a string.
storeLevel := make(map[string]interface{})
for id, reg := range mr.mu.storeRegistries {
storeLevel[strconv.Itoa(int(id))] = reg
}
topLevel["stores"] = storeLevel
return json.Marshal(topLevel)
}