本文整理汇总了Golang中github.com/cockroachdb/cockroach/pkg/util/log.Errorf函数的典型用法代码示例。如果您正苦于以下问题:Golang Errorf函数的具体用法?Golang Errorf怎么用?Golang Errorf使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了Errorf函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: processEvent
// returns false is the event
func (l *LocalCluster) processEvent(event events.Message) bool {
l.mu.Lock()
defer l.mu.Unlock()
// If there's currently a oneshot container, ignore any die messages from
// it because those are expected.
if l.oneshot != nil && event.ID == l.oneshot.id && event.Status == eventDie {
return true
}
for i, n := range l.Nodes {
if n != nil && n.id == event.ID {
if log.V(1) {
log.Errorf(context.Background(), "node=%d status=%s", i, event.Status)
}
select {
case l.events <- Event{NodeIndex: i, Status: event.Status}:
default:
panic("events channel filled up")
}
return true
}
}
log.Infof(context.Background(), "received docker event for unrecognized container: %+v",
event)
// An event on any other container is unexpected. Die.
select {
case <-l.stopper:
case <-l.monitorCtx.Done():
default:
// There is a very tiny race here: the signal handler might be closing the
// stopper simultaneously.
log.Errorf(context.Background(), "stopping due to unexpected event: %+v", event)
if rc, err := l.client.ContainerLogs(context.Background(), event.Actor.ID, types.ContainerLogsOptions{
ShowStdout: true,
ShowStderr: true,
}); err == nil {
defer rc.Close()
if _, err := io.Copy(os.Stderr, rc); err != nil {
log.Infof(context.Background(), "error listing logs: %s", err)
}
}
close(l.stopper)
}
return false
}
示例2: shouldQueue
// shouldQueue determines whether a replica should be queued for garbage
// collection, and if so, at what priority. Returns true for shouldQ
// in the event that the cumulative ages of GC'able bytes or extant
// intents exceed thresholds.
func (gcq *gcQueue) shouldQueue(
ctx context.Context, now hlc.Timestamp, repl *Replica, sysCfg config.SystemConfig,
) (shouldQ bool, priority float64) {
desc := repl.Desc()
zone, err := sysCfg.GetZoneConfigForKey(desc.StartKey)
if err != nil {
log.Errorf(ctx, "could not find zone config for range %s: %s", repl, err)
return
}
ms := repl.GetMVCCStats()
// GC score is the total GC'able bytes age normalized by 1 MB * the replica's TTL in seconds.
gcScore := float64(ms.GCByteAge(now.WallTime)) / float64(zone.GC.TTLSeconds) / float64(gcByteCountNormalization)
// Intent score. This computes the average age of outstanding intents
// and normalizes.
intentScore := ms.AvgIntentAge(now.WallTime) / float64(intentAgeNormalization.Nanoseconds()/1E9)
// Compute priority.
if gcScore >= considerThreshold {
priority += gcScore
}
if intentScore >= considerThreshold {
priority += intentScore
}
shouldQ = priority > 0
return
}
示例3: WriteStatusSummary
// WriteStatusSummary generates a summary and immediately writes it to the given
// client.
func (mr *MetricsRecorder) WriteStatusSummary(ctx context.Context, db *client.DB) error {
mr.writeSummaryMu.Lock()
defer mr.writeSummaryMu.Unlock()
nodeStatus := mr.GetStatusSummary()
if nodeStatus != nil {
key := keys.NodeStatusKey(nodeStatus.Desc.NodeID)
// We use PutInline to store only a single version of the node status.
// There's not much point in keeping the historical versions as we keep
// all of the constituent data as timeseries. Further, due to the size
// of the build info in the node status, writing one of these every 10s
// will generate more versions than will easily fit into a range over
// the course of a day.
if err := db.PutInline(ctx, key, nodeStatus); err != nil {
return err
}
if log.V(2) {
statusJSON, err := json.Marshal(nodeStatus)
if err != nil {
log.Errorf(ctx, "error marshaling nodeStatus to json: %s", err)
}
log.Infof(ctx, "node %d status: %s", nodeStatus.Desc.NodeID, statusJSON)
}
}
return nil
}
示例4: AddReplicas
// AddReplicas adds replicas for a range on a set of stores.
// It's illegal to have multiple replicas of the same range on stores of a single
// node.
// The method blocks until a snapshot of the range has been copied to all the
// new replicas and the new replicas become part of the Raft group.
func (tc *TestCluster) AddReplicas(
startKey roachpb.Key, targets ...ReplicationTarget,
) (*roachpb.RangeDescriptor, error) {
rKey := keys.MustAddr(startKey)
rangeDesc, err := tc.changeReplicas(
roachpb.ADD_REPLICA, rKey, targets...,
)
if err != nil {
return nil, err
}
// Wait for the replication to complete on all destination nodes.
if err := util.RetryForDuration(time.Second*5, func() error {
for _, target := range targets {
// Use LookupReplica(keys) instead of GetRange(rangeID) to ensure that the
// snapshot has been transferred and the descriptor initialized.
store, err := tc.findMemberStore(target.StoreID)
if err != nil {
log.Errorf(context.TODO(), "unexpected error: %s", err)
return err
}
if store.LookupReplica(rKey, nil) == nil {
return errors.Errorf("range not found on store %d", target)
}
}
return nil
}); err != nil {
return nil, err
}
return rangeDesc, nil
}
示例5: shouldQueue
// shouldQueue determines whether a replica should be queued for GC,
// and if so at what priority. To be considered for possible GC, a
// replica's range lease must not have been active for longer than
// ReplicaGCQueueInactivityThreshold. Further, the last replica GC
// check must have occurred more than ReplicaGCQueueInactivityThreshold
// in the past.
func (q *replicaGCQueue) shouldQueue(
ctx context.Context, now hlc.Timestamp, rng *Replica, _ config.SystemConfig,
) (bool, float64) {
lastCheck, err := rng.getLastReplicaGCTimestamp(ctx)
if err != nil {
log.Errorf(ctx, "could not read last replica GC timestamp: %s", err)
return false, 0
}
lastActivity := hlc.ZeroTimestamp.Add(rng.store.startedAt, 0)
lease, nextLease := rng.getLease()
if lease != nil {
lastActivity.Forward(lease.Expiration)
}
if nextLease != nil {
lastActivity.Forward(nextLease.Expiration)
}
var isCandidate bool
if raftStatus := rng.RaftStatus(); raftStatus != nil {
isCandidate = (raftStatus.SoftState.RaftState == raft.StateCandidate)
}
return replicaGCShouldQueueImpl(now, lastCheck, lastActivity, isCandidate)
}
示例6: flush
// flush sends the rows accumulated so far in a StreamMessage.
func (m *outbox) flush(last bool, err error) error {
if !last && m.numRows == 0 {
return nil
}
msg := m.encoder.FormMessage(last, err)
if log.V(3) {
log.Infof(m.flowCtx.Context, "flushing outbox")
}
var sendErr error
if m.stream != nil {
sendErr = m.stream.Send(msg)
} else {
sendErr = m.syncFlowStream.Send(msg)
}
if sendErr != nil {
if log.V(1) {
log.Errorf(m.flowCtx.Context, "outbox flush error: %s", sendErr)
}
} else if log.V(3) {
log.Infof(m.flowCtx.Context, "outbox flushed")
}
if sendErr != nil {
return sendErr
}
m.numRows = 0
return nil
}
示例7: writeSummaries
// writeSummaries retrieves status summaries from the supplied
// NodeStatusRecorder and persists them to the cockroach data store.
func (n *Node) writeSummaries(ctx context.Context) error {
var err error
if runErr := n.stopper.RunTask(func() {
nodeStatus := n.recorder.GetStatusSummary()
if nodeStatus != nil {
key := keys.NodeStatusKey(nodeStatus.Desc.NodeID)
// We use PutInline to store only a single version of the node
// status. There's not much point in keeping the historical
// versions as we keep all of the constituent data as
// timeseries. Further, due to the size of the build info in the
// node status, writing one of these every 10s will generate
// more versions than will easily fit into a range over the
// course of a day.
if err = n.storeCfg.DB.PutInline(ctx, key, nodeStatus); err != nil {
return
}
if log.V(2) {
statusJSON, err := json.Marshal(nodeStatus)
if err != nil {
log.Errorf(ctx, "error marshaling nodeStatus to json: %s", err)
}
log.Infof(ctx, "node %d status: %s", nodeStatus.Desc.NodeID, statusJSON)
}
}
}); runErr != nil {
err = runErr
}
return err
}
示例8: TestStoreRangeMergeNonCollocated
// TestStoreRangeMergeNonCollocated attempts to merge two ranges
// that are not on the same stores.
func TestStoreRangeMergeNonCollocated(t *testing.T) {
defer leaktest.AfterTest(t)()
mtc := startMultiTestContext(t, 4)
defer mtc.Stop()
store := mtc.stores[0]
// Split into 3 ranges
argsSplit := adminSplitArgs(roachpb.KeyMin, []byte("d"))
if _, pErr := client.SendWrapped(context.Background(), rg1(store), &argsSplit); pErr != nil {
t.Fatalf("Can't split range %s", pErr)
}
argsSplit = adminSplitArgs(roachpb.KeyMin, []byte("b"))
if _, pErr := client.SendWrapped(context.Background(), rg1(store), &argsSplit); pErr != nil {
t.Fatalf("Can't split range %s", pErr)
}
rangeA := store.LookupReplica([]byte("a"), nil)
rangeADesc := rangeA.Desc()
rangeB := store.LookupReplica([]byte("c"), nil)
rangeBDesc := rangeB.Desc()
rangeC := store.LookupReplica([]byte("e"), nil)
rangeCDesc := rangeC.Desc()
if bytes.Equal(rangeADesc.StartKey, rangeBDesc.StartKey) {
log.Errorf(context.TODO(), "split ranges keys are equal %q!=%q", rangeADesc.StartKey, rangeBDesc.StartKey)
}
if bytes.Equal(rangeBDesc.StartKey, rangeCDesc.StartKey) {
log.Errorf(context.TODO(), "split ranges keys are equal %q!=%q", rangeBDesc.StartKey, rangeCDesc.StartKey)
}
if bytes.Equal(rangeADesc.StartKey, rangeCDesc.StartKey) {
log.Errorf(context.TODO(), "split ranges keys are equal %q!=%q", rangeADesc.StartKey, rangeCDesc.StartKey)
}
// Replicate the ranges to different sets of stores. Ranges A and C
// are collocated, but B is different.
mtc.replicateRange(rangeA.RangeID, 1, 2)
mtc.replicateRange(rangeB.RangeID, 1, 3)
mtc.replicateRange(rangeC.RangeID, 1, 2)
// Attempt to merge.
rangeADesc = rangeA.Desc()
argsMerge := adminMergeArgs(roachpb.Key(rangeADesc.StartKey))
if _, pErr := rangeA.AdminMerge(context.Background(), argsMerge, rangeADesc); !testutils.IsPError(pErr, "ranges not collocated") {
t.Fatalf("did not got expected error; got %s", pErr)
}
}
示例9: Start
// Start starts a node.
func (n *Node) Start() {
n.Lock()
defer n.Unlock()
if n.cmd != nil {
return
}
n.cmd = exec.Command(n.args[0], n.args[1:]...)
n.cmd.Env = os.Environ()
n.cmd.Env = append(n.cmd.Env, n.env...)
stdoutPath := filepath.Join(n.logDir, "stdout")
stdout, err := os.OpenFile(stdoutPath,
os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
if err != nil {
log.Fatalf(context.Background(), "unable to open file %s: %s", stdoutPath, err)
}
n.cmd.Stdout = stdout
stderrPath := filepath.Join(n.logDir, "stderr")
stderr, err := os.OpenFile(stderrPath,
os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
if err != nil {
log.Fatalf(context.Background(), "unable to open file %s: %s", stderrPath, err)
}
n.cmd.Stderr = stderr
err = n.cmd.Start()
if n.cmd.Process != nil {
log.Infof(context.Background(), "process %d started: %s",
n.cmd.Process.Pid, strings.Join(n.args, " "))
}
if err != nil {
log.Infof(context.Background(), "%v", err)
_ = stdout.Close()
_ = stderr.Close()
return
}
go func(cmd *exec.Cmd) {
if err := cmd.Wait(); err != nil {
log.Errorf(context.Background(), "waiting for command: %v", err)
}
_ = stdout.Close()
_ = stderr.Close()
ps := cmd.ProcessState
sy := ps.Sys().(syscall.WaitStatus)
log.Infof(context.Background(), "Process %d exited with status %d",
ps.Pid(), sy.ExitStatus())
log.Infof(context.Background(), ps.String())
n.Lock()
n.cmd = nil
n.Unlock()
}(n.cmd)
}
示例10: CleanupOnError
// CleanupOnError cleans up the transaction as a result of an error.
func (txn *Txn) CleanupOnError(err error) {
if err == nil {
panic("no error")
}
if replyErr := txn.Rollback(); replyErr != nil {
log.Errorf(txn.Context, "failure aborting transaction: %s; abort caused by: %s", replyErr, err)
}
}
示例11: GetStatusSummary
// GetStatusSummary returns a status summary messages for the node. The summary
// includes the recent values of metrics for both the node and all of its
// component stores.
func (mr *MetricsRecorder) GetStatusSummary() *NodeStatus {
mr.mu.Lock()
defer mr.mu.Unlock()
if mr.mu.nodeRegistry == nil {
// We haven't yet processed initialization information; do nothing.
if log.V(1) {
log.Warning(context.TODO(), "attempt to generate status summary before NodeID allocation.")
}
return nil
}
now := mr.mu.clock.PhysicalNow()
// Generate an node status with no store data.
nodeStat := &NodeStatus{
Desc: mr.mu.desc,
BuildInfo: build.GetInfo(),
UpdatedAt: now,
StartedAt: mr.mu.startedAt,
StoreStatuses: make([]StoreStatus, 0, mr.mu.lastSummaryCount),
Metrics: make(map[string]float64, mr.mu.lastNodeMetricCount),
}
eachRecordableValue(mr.mu.nodeRegistry, func(name string, val float64) {
nodeStat.Metrics[name] = val
})
// Generate status summaries for stores.
for storeID, r := range mr.mu.storeRegistries {
storeMetrics := make(map[string]float64, mr.mu.lastStoreMetricCount)
eachRecordableValue(r, func(name string, val float64) {
storeMetrics[name] = val
})
// Gather descriptor from store.
descriptor, err := mr.mu.stores[storeID].Descriptor()
if err != nil {
log.Errorf(context.TODO(), "Could not record status summaries: Store %d could not return descriptor, error: %s", storeID, err)
continue
}
nodeStat.StoreStatuses = append(nodeStat.StoreStatuses, StoreStatus{
Desc: *descriptor,
Metrics: storeMetrics,
})
}
mr.mu.lastSummaryCount = len(nodeStat.StoreStatuses)
mr.mu.lastNodeMetricCount = len(nodeStat.Metrics)
if len(nodeStat.StoreStatuses) > 0 {
mr.mu.lastStoreMetricCount = len(nodeStat.StoreStatuses[0].Metrics)
}
return nodeStat
}
示例12: DrainQueue
// DrainQueue locks the queue and processes the remaining queued replicas. It
// processes the replicas in the order they're queued in, one at a time.
// Exposed for testing only.
func (bq *baseQueue) DrainQueue(clock *hlc.Clock) {
ctx := bq.AnnotateCtx(context.TODO())
repl := bq.pop()
for repl != nil {
if err := bq.processReplica(ctx, repl, clock); err != nil {
bq.failures.Inc(1)
log.Errorf(ctx, "failed processing replica %s: %s", repl, err)
}
repl = bq.pop()
}
}
示例13: EnvOrDefaultBytes
// EnvOrDefaultBytes returns the value set by the specified environment
// variable, if any, otherwise the specified default value.
func EnvOrDefaultBytes(name string, value int64) int64 {
if str, present := getEnv(name, 1); present {
v, err := humanizeutil.ParseBytes(str)
if err != nil {
log.Errorf(context.Background(), "error parsing %s: %s", name, err)
return value
}
return v
}
return value
}
示例14: EnvOrDefaultDuration
// EnvOrDefaultDuration returns the value set by the specified environment
// variable, if any, otherwise the specified default value.
func EnvOrDefaultDuration(name string, value time.Duration) time.Duration {
if str, present := getEnv(name, 1); present {
v, err := time.ParseDuration(str)
if err != nil {
log.Errorf(context.Background(), "error parsing %s: %s", name, err)
return value
}
return v
}
return value
}
示例15: EnvOrDefaultInt
// EnvOrDefaultInt returns the value set by the specified environment
// variable, if any, otherwise the specified default value.
func EnvOrDefaultInt(name string, value int) int {
if str, present := getEnv(name, 1); present {
v, err := strconv.ParseInt(str, 0, 0)
if err != nil {
log.Errorf(context.Background(), "error parsing %s: %s", name, err)
return value
}
return int(v)
}
return value
}