本文整理汇总了Golang中github.com/cockroachdb/cockroach/util/log.Warning函数的典型用法代码示例。如果您正苦于以下问题:Golang Warning函数的具体用法?Golang Warning怎么用?Golang Warning使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了Warning函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: reportUsage
func (s *Server) reportUsage() {
b := new(bytes.Buffer)
if err := json.NewEncoder(b).Encode(s.getReportingInfo()); err != nil {
log.Warning(context.TODO(), err)
return
}
q := reportingURL.Query()
q.Set("version", build.GetInfo().Tag)
q.Set("uuid", s.node.ClusterID.String())
reportingURL.RawQuery = q.Encode()
res, err := http.Post(reportingURL.String(), "application/json", b)
if err != nil && log.V(2) {
// This is probably going to be relatively common in production
// environments where network access is usually curtailed.
log.Warning(context.TODO(), "Failed to report node usage metrics: ", err)
return
}
if res.StatusCode != http.StatusOK {
b, err := ioutil.ReadAll(res.Body)
log.Warningf(context.TODO(), "Failed to report node usage metrics: status: %s, body: %s, "+
"error: %v", res.Status, b, err)
}
}
示例2: exec
func (txn *Txn) exec(retryable func(txn *Txn) *roachpb.Error) *roachpb.Error {
// Run retryable in a retry loop until we encounter a success or
// error condition this loop isn't capable of handling.
var pErr *roachpb.Error
for r := retry.Start(txn.db.txnRetryOptions); r.Next(); {
pErr = retryable(txn)
if pErr == nil && txn.Proto.Status == roachpb.PENDING {
// retryable succeeded, but didn't commit.
pErr = txn.commit(nil)
}
if pErr != nil {
switch pErr.TransactionRestart {
case roachpb.TransactionRestart_IMMEDIATE:
if log.V(2) {
log.Warning(pErr)
}
r.Reset()
continue
case roachpb.TransactionRestart_BACKOFF:
if log.V(2) {
log.Warning(pErr)
}
continue
}
// By default, fall through and break.
}
break
}
txn.Cleanup(pErr)
return pErr
}
示例3: releaseLeases
func (p *planner) releaseLeases(db client.DB) {
if p.leases != nil {
for _, lease := range p.leases {
if err := p.leaseMgr.Release(lease); err != nil {
log.Warning(err)
}
}
p.leases = nil
}
// TODO(pmattis): This is a hack. Remove when schema change operations work
// properly.
if p.modifiedSchemas != nil {
for _, d := range p.modifiedSchemas {
var lease *LeaseState
err := db.Txn(func(txn *client.Txn) error {
var err error
lease, err = p.leaseMgr.Acquire(txn, d.id, d.version)
return err
})
if err != nil {
log.Warning(err)
continue
}
if err := p.leaseMgr.Release(lease); err != nil {
log.Warning(err)
}
}
p.modifiedSchemas = nil
}
}
示例4: exec
// Execute the entire schema change in steps.
func (sc SchemaChanger) exec() *roachpb.Error {
// Acquire lease.
lease, pErr := sc.AcquireLease()
if pErr != nil {
return pErr
}
// Always try to release lease.
defer func(l *TableDescriptor_SchemaChangeLease) {
if pErr := sc.ReleaseLease(*l); pErr != nil {
log.Warning(pErr)
}
}(&lease)
// Increment the version and unset tableDescriptor.UpVersion.
if pErr := sc.MaybeIncrementVersion(); pErr != nil {
return pErr
}
// Wait for the schema change to propagate to all nodes after this function
// returns, so that the new schema is live everywhere. This is not needed for
// correctness but is done to make the UI experience/tests predictable.
defer func() {
if pErr := sc.waitToUpdateLeases(); pErr != nil {
log.Warning(pErr)
}
}()
if sc.mutationID == invalidMutationID {
// Nothing more to do.
return nil
}
// Another transaction might set the up_version bit again,
// but we're no longer responsible for taking care of that.
// Run through mutation state machine before backfill.
if pErr := sc.RunStateMachineBeforeBackfill(); pErr != nil {
return pErr
}
// Apply backfill.
if pErr := sc.applyMutations(&lease); pErr != nil {
// Purge the mutations if the application of the mutations fail.
if errPurge := sc.purgeMutations(&lease); errPurge != nil {
return roachpb.NewErrorf("error purging mutation: %s, after error: %s", errPurge, pErr)
}
return pErr
}
// Mark the mutations as completed.
return sc.done()
}
示例5: runHeartbeat
// runHeartbeat sends periodic heartbeats to client, marking the client healthy
// or unhealthy and reconnecting appropriately until either the Client or the
// supplied channel is closed.
func (c *Client) runHeartbeat(retryOpts retry.Options, closer <-chan struct{}) {
isHealthy := false
setHealthy := func() {
if isHealthy {
return
}
isHealthy = true
close(c.healthy.Load().(chan struct{}))
}
setUnhealthy := func() {
if isHealthy {
isHealthy = false
c.healthy.Store(make(chan struct{}))
}
}
var err = errUnstarted // initial condition
for {
for r := retry.Start(retryOpts); r.Next(); {
// Reconnect on failure.
if err != nil {
if err = c.connect(); err != nil {
setUnhealthy()
log.Warning(err)
continue
}
}
// Heartbeat regardless of failure.
if err = c.heartbeat(); err != nil {
setUnhealthy()
log.Warning(err)
continue
}
setHealthy()
break
}
// Wait after the heartbeat so that the first iteration gets a wait-free
// heartbeat attempt.
select {
case <-closer:
c.Close()
return
case <-c.Closed:
return
case <-time.After(heartbeatInterval):
// TODO(tamird): Perhaps retry more aggressively when the client is unhealthy.
}
}
}
示例6: runHeartbeat
// runHeartbeat sends periodic heartbeats to client, marking the client healthy
// or unhealthy and reconnecting appropriately until either the Client or the
// supplied channel is closed.
func (c *Client) runHeartbeat(retryOpts retry.Options, closer <-chan struct{}) {
isHealthy := false
setHealthy := func() {
if isHealthy {
return
}
isHealthy = true
close(c.healthy.Load().(chan struct{}))
}
setUnhealthy := func() {
if isHealthy {
isHealthy = false
c.healthy.Store(make(chan struct{}))
}
}
connErr := errUnstarted // initial condition
var beatErr error
for {
for r := retry.Start(retryOpts); r.Next(); {
// Reconnect if connection failed or heartbeat error is not
// definitely temporary.
if netErr, ok := beatErr.(net.Error); connErr != nil || beatErr != nil && !(ok && netErr.Temporary()) {
if connErr = c.connect(); connErr != nil {
log.Warning(connErr)
setUnhealthy()
continue
}
}
if beatErr = c.heartbeat(); beatErr == nil {
setHealthy()
break
} else {
log.Warning(beatErr)
setUnhealthy()
}
}
// Wait after the heartbeat so that the first iteration gets a wait-free
// heartbeat attempt.
select {
case <-closer:
c.Close()
return
case <-c.Closed:
return
case <-time.After(heartbeatInterval):
// TODO(tamird): Perhaps retry more aggressively when the client is unhealthy.
}
}
}
示例7: removeLeaseIfExpiring
// removeLeaseIfExpiring removes a lease and returns true if it is about to expire.
// The method also resets the transaction deadline.
func (p *planner) removeLeaseIfExpiring(lease *LeaseState) bool {
if lease == nil || lease.hasSomeLifeLeft(p.leaseMgr.clock) {
return false
}
// Remove the lease from p.leases.
idx := -1
for i, l := range p.leases {
if l == lease {
idx = i
break
}
}
if idx == -1 {
log.Warningf(p.ctx(), "lease (%s) not found", lease)
return false
}
p.leases[idx] = p.leases[len(p.leases)-1]
p.leases[len(p.leases)-1] = nil
p.leases = p.leases[:len(p.leases)-1]
if err := p.leaseMgr.Release(lease); err != nil {
log.Warning(p.ctx(), err)
}
// Reset the deadline so that a new deadline will be set after the lease is acquired.
p.txn.ResetDeadline()
for _, l := range p.leases {
p.txn.UpdateDeadlineMaybe(hlc.Timestamp{WallTime: l.Expiration().UnixNano()})
}
return true
}
示例8: Send
// Batch sends a request to Cockroach via RPC. Errors which are retryable are
// retried with backoff in a loop using the default retry options. Other errors
// sending the request are retried indefinitely using the same client command
// ID to avoid reporting failure when in fact the command may have gone through
// and been executed successfully. We retry here to eventually get through with
// the same client command ID and be given the cached response.
func (s *rpcSender) Send(ctx context.Context, ba proto.BatchRequest) (*proto.BatchResponse, *proto.Error) {
var err error
var br proto.BatchResponse
for r := retry.Start(s.retryOpts); r.Next(); {
select {
case <-s.client.Healthy():
default:
err = fmt.Errorf("failed to send RPC request %s: client is unhealthy", method)
log.Warning(err)
continue
}
if err = s.client.Call(method, &ba, &br); err != nil {
br.Reset() // don't trust anyone.
// Assume all errors sending request are retryable. The actual
// number of things that could go wrong is vast, but we don't
// want to miss any which should in theory be retried with the
// same client command ID. We log the error here as a warning so
// there's visiblity that this is happening. Some of the errors
// we'll sweep up in this net shouldn't be retried, but we can't
// really know for sure which.
log.Warningf("failed to send RPC request %s: %s", method, err)
continue
}
// On successful post, we're done with retry loop.
break
}
if err != nil {
return nil, proto.NewError(err)
}
pErr := br.Error
br.Error = nil
return &br, pErr
}
示例9: exec
func (txn *Txn) exec(retryable func(txn *Txn) error) error {
// Run retryable in a retry loop until we encounter a success or
// error condition this loop isn't capable of handling.
var err error
for r := retry.Start(txn.db.txnRetryOptions); r.Next(); {
err = retryable(txn)
if err == nil && txn.Proto.Status == roachpb.PENDING {
// retryable succeeded, but didn't commit.
err = txn.commit(nil)
}
if restartErr, ok := err.(roachpb.TransactionRestartError); ok {
if log.V(2) {
log.Warning(err)
}
switch restartErr.CanRestartTransaction() {
case roachpb.TransactionRestart_IMMEDIATE:
r.Reset()
continue
case roachpb.TransactionRestart_BACKOFF:
continue
}
// By default, fall through and break.
}
break
}
txn.Cleanup(err)
return err
}
示例10: Send
// Send implements the client.Sender interface.
func (rls *retryableLocalSender) Send(_ context.Context, call proto.Call) {
// Instant retry to handle the case of a range split, which is
// exposed here as a RangeKeyMismatchError.
retryOpts := retry.Options{}
// In local tests, the RPCs are not actually sent over the wire. We
// need to clone the Txn in order to avoid unexpected sharing
// between TxnCoordSender and client.Txn.
if header := call.Args.Header(); header.Txn != nil {
header.Txn = gogoproto.Clone(header.Txn).(*proto.Transaction)
}
var err error
for r := retry.Start(retryOpts); r.Next(); {
call.Reply.Header().Error = nil
rls.LocalSender.Send(context.TODO(), call)
// Check for range key mismatch error (this could happen if
// range was split between lookup and execution). In this case,
// reset header.Replica and engage retry loop.
if err = call.Reply.Header().GoError(); err != nil {
if _, ok := err.(*proto.RangeKeyMismatchError); ok {
// Clear request replica.
call.Args.Header().Replica = proto.Replica{}
log.Warning(err)
continue
}
}
return
}
panic(fmt.Sprintf("local sender did not succeed: %s", err))
}
示例11: exec
func (txn *Txn) exec(retryable func(txn *Txn) error) (err error) {
// Run retryable in a retry loop until we encounter a success or
// error condition this loop isn't capable of handling.
for r := retry.Start(txn.db.txnRetryOptions); r.Next(); {
txn.haveTxnWrite, txn.haveEndTxn = false, false // always reset before [re]starting txn
if err = retryable(txn); err == nil {
if !txn.haveEndTxn && txn.haveTxnWrite {
// If there were no errors running retryable, commit the txn. This
// may block waiting for outstanding writes to complete in case
// retryable didn't -- we need the most recent of all response
// timestamps in order to commit.
err = txn.Commit()
}
}
if restartErr, ok := err.(proto.TransactionRestartError); ok {
if log.V(2) {
log.Warning(err)
}
if restartErr.CanRestartTransaction() == proto.TransactionRestart_IMMEDIATE {
r.Reset()
continue
} else if restartErr.CanRestartTransaction() == proto.TransactionRestart_BACKOFF {
continue
}
// By default, fall through and break.
}
break
}
if err != nil && txn.haveTxnWrite {
if replyErr := txn.Rollback(); replyErr != nil {
log.Errorf("failure aborting transaction: %s; abort caused by: %s", replyErr, err)
}
}
return
}
示例12: Send
// Send sends call to Cockroach via an RPC request. Errors which are retryable
// are retried with backoff in a loop using the default retry options. Other
// errors sending the request are retried indefinitely using the same client
// command ID to avoid reporting failure when in fact the command may have gone
// through and been executed successfully. We retry here to eventually get
// through with the same client command ID and be given the cached response.
func (s *rpcSender) Send(_ context.Context, call proto.Call) {
method := fmt.Sprintf("Server.%s", call.Args.Method())
var err error
for r := retry.Start(s.retryOpts); r.Next(); {
select {
case <-s.client.Healthy():
default:
err = fmt.Errorf("failed to send RPC request %s: client is unhealthy", method)
log.Warning(err)
continue
}
if err = s.client.Call(method, call.Args, call.Reply); err != nil {
// Assume all errors sending request are retryable. The actual
// number of things that could go wrong is vast, but we don't
// want to miss any which should in theory be retried with the
// same client command ID. We log the error here as a warning so
// there's visiblity that this is happening. Some of the errors
// we'll sweep up in this net shouldn't be retried, but we can't
// really know for sure which.
log.Warningf("failed to send RPC request %s: %s", method, err)
continue
}
// On successful post, we're done with retry loop.
break
}
if err != nil {
call.Reply.Header().SetGoError(err)
}
}
示例13: execStmts
// exec executes the request. Any error encountered is returned; it is
// the caller's responsibility to update the response.
func (e *Executor) execStmts(sql string, planMaker *planner) driver.Response {
var resp driver.Response
stmts, err := planMaker.parser.Parse(sql, parser.Syntax(planMaker.session.Syntax))
if err != nil {
// A parse error occurred: we can't determine if there were multiple
// statements or only one, so just pretend there was one.
resp.Results = append(resp.Results, makeResultFromError(planMaker, err))
return resp
}
for _, stmt := range stmts {
result, err := e.execStmt(stmt, planMaker)
if err != nil {
result = makeResultFromError(planMaker, err)
}
resp.Results = append(resp.Results, result)
// Release the leases once a transaction is complete.
if planMaker.txn == nil {
planMaker.releaseLeases(e.db)
// The previous transaction finished executing some schema changes. Wait for
// the schema changes to propagate to all nodes, so that once the executor
// returns the new schema are live everywhere. This is not needed for
// correctness but is done to make the UI experience/tests predictable.
if err := e.waitForCompletedSchemaChangesToPropagate(planMaker); err != nil {
log.Warning(err)
}
}
}
return resp
}
示例14: runBenchmarkBank
// runBenchmarkBank mirrors the SQL performed by examples/sql_bank, but
// structured as a benchmark for easier usage of the Go performance analysis
// tools like pprof, memprof and trace.
func runBenchmarkBank(b *testing.B, db *sql.DB) {
if _, err := db.Exec(`CREATE DATABASE IF NOT EXISTS bank`); err != nil {
b.Fatal(err)
}
{
// Initialize the "accounts" table.
schema := `
CREATE TABLE IF NOT EXISTS bank.accounts (
id INT PRIMARY KEY,
balance INT NOT NULL
)`
if _, err := db.Exec(schema); err != nil {
b.Fatal(err)
}
if _, err := db.Exec("TRUNCATE TABLE bank.accounts"); err != nil {
b.Fatal(err)
}
var placeholders bytes.Buffer
var values []interface{}
for i := 0; i < *numAccounts; i++ {
if i > 0 {
placeholders.WriteString(", ")
}
fmt.Fprintf(&placeholders, "($%d, 0)", i+1)
values = append(values, i)
}
stmt := `INSERT INTO bank.accounts (id, balance) VALUES ` + placeholders.String()
if _, err := db.Exec(stmt, values...); err != nil {
b.Fatal(err)
}
}
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
from := rand.Intn(*numAccounts)
to := rand.Intn(*numAccounts - 1)
if from == to {
to = *numAccounts - 1
}
amount := rand.Intn(*maxTransfer)
update := `
UPDATE bank.accounts
SET balance = CASE id WHEN $1 THEN balance-$3 WHEN $2 THEN balance+$3 END
WHERE id IN ($1, $2) AND (SELECT balance >= $3 FROM bank.accounts WHERE id = $1)
`
if _, err := db.Exec(update, from, to, amount); err != nil {
if log.V(1) {
log.Warning(err)
}
continue
}
}
})
b.StopTimer()
}
示例15: runCallbacks
func (is *infoStore) runCallbacks(key string, content roachpb.Value, callbacks ...Callback) {
// Add the callbacks to the callback work list.
f := func() {
for _, method := range callbacks {
method(key, content)
}
}
is.callbackWorkMu.Lock()
is.callbackWork = append(is.callbackWork, f)
is.callbackWorkMu.Unlock()
// Run callbacks in a goroutine to avoid mutex reentry. We also guarantee
// callbacks are run in order such that if a key is updated twice in
// succession, the second callback will never be run before the first.
if err := is.stopper.RunAsyncTask(func() {
// Grab the callback mutex to serialize execution of the callbacks.
is.callbackMu.Lock()
defer is.callbackMu.Unlock()
// Grab and execute the list of work.
is.callbackWorkMu.Lock()
work := is.callbackWork
is.callbackWork = nil
is.callbackWorkMu.Unlock()
for _, w := range work {
w()
}
}); err != nil {
log.Warning(err)
}
}