本文整理汇总了Golang中github.com/youtube/vitess/go/vt/concurrency.AllErrorRecorder.RecordError方法的典型用法代码示例。如果您正苦于以下问题:Golang AllErrorRecorder.RecordError方法的具体用法?Golang AllErrorRecorder.RecordError怎么用?Golang AllErrorRecorder.RecordError使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类github.com/youtube/vitess/go/vt/concurrency.AllErrorRecorder
的用法示例。
在下文中一共展示了AllErrorRecorder.RecordError方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: getMastersPosition
func (wr *Wrangler) getMastersPosition(ctx context.Context, shards []*topo.ShardInfo) (map[*topo.ShardInfo]string, error) {
mu := sync.Mutex{}
result := make(map[*topo.ShardInfo]string)
wg := sync.WaitGroup{}
rec := concurrency.AllErrorRecorder{}
for _, si := range shards {
wg.Add(1)
go func(si *topo.ShardInfo) {
defer wg.Done()
wr.Logger().Infof("Gathering master position for %v", topoproto.TabletAliasString(si.MasterAlias))
ti, err := wr.ts.GetTablet(ctx, si.MasterAlias)
if err != nil {
rec.RecordError(err)
return
}
pos, err := wr.tmc.MasterPosition(ctx, ti)
if err != nil {
rec.RecordError(err)
return
}
wr.Logger().Infof("Got master position for %v", topoproto.TabletAliasString(si.MasterAlias))
mu.Lock()
result[si] = pos
mu.Unlock()
}(si)
}
wg.Wait()
return result, rec.Error()
}
示例2: CleanUp
// CleanUp will run the recorded actions.
// If an action on a target fails, it will not run the next action on
// the same target.
// We return the aggregate errors for all cleanups.
// TODO(alainjobart) Actions should run concurrently on a per target
// basis. They are then serialized on each target.
func (cleaner *Cleaner) CleanUp(wr *Wrangler) error {
actionMap := make(map[string]*cleanUpHelper)
rec := concurrency.AllErrorRecorder{}
cleaner.mu.Lock()
for i := len(cleaner.actions) - 1; i >= 0; i-- {
actionReference := cleaner.actions[i]
helper, ok := actionMap[actionReference.target]
if !ok {
helper = &cleanUpHelper{
err: nil,
}
actionMap[actionReference.target] = helper
}
if helper.err != nil {
log.Warningf("previous action failed on target %v, no running %v", actionReference.target, actionReference.name)
continue
}
err := actionReference.action.CleanUp(wr)
if err != nil {
helper.err = err
rec.RecordError(err)
log.Errorf("action %v failed on %v: %v", actionReference.name, actionReference.target, err)
} else {
log.Infof("action %v successfull on %v", actionReference.name, actionReference.target)
}
}
cleaner.mu.Unlock()
return rec.Error()
}
示例3: getMastersPosition
func (wr *Wrangler) getMastersPosition(shards []*topo.ShardInfo) (map[*topo.ShardInfo]*mysqlctl.ReplicationPosition, error) {
mu := sync.Mutex{}
result := make(map[*topo.ShardInfo]*mysqlctl.ReplicationPosition)
wg := sync.WaitGroup{}
rec := concurrency.AllErrorRecorder{}
for _, si := range shards {
wg.Add(1)
go func(si *topo.ShardInfo) {
log.Infof("Gathering master position for %v", si.MasterAlias)
pos, err := wr.getMasterPosition(si.MasterAlias)
if err != nil {
rec.RecordError(err)
} else {
log.Infof("Got master position for %v", si.MasterAlias)
mu.Lock()
result[si] = pos
mu.Unlock()
}
wg.Done()
}(si)
}
wg.Wait()
return result, rec.Error()
}
示例4: restartSlavesExternal
func (wr *Wrangler) restartSlavesExternal(slaveTabletMap map[topo.TabletAlias]*topo.TabletInfo, masterTablet, masterElectTablet *topo.TabletInfo, scrapStragglers bool) error {
recorder := concurrency.AllErrorRecorder{}
wg := sync.WaitGroup{}
swrd := tm.SlaveWasRestartedData{
Parent: masterElectTablet.Alias(),
ExpectedMasterAddr: masterElectTablet.MysqlAddr,
ExpectedMasterIpAddr: masterElectTablet.MysqlIpAddr,
ScrapStragglers: scrapStragglers,
}
// do all the slaves
for _, ti := range slaveTabletMap {
wg.Add(1)
go func(ti *topo.TabletInfo) {
recorder.RecordError(wr.slaveWasRestarted(ti, &swrd))
wg.Done()
}(ti)
}
wg.Wait()
// then do the master
recorder.RecordError(wr.slaveWasRestarted(masterTablet, &swrd))
return recorder.Error()
}
示例5: waitForFilteredReplication
func (wr *Wrangler) waitForFilteredReplication(sourcePositions map[*topo.ShardInfo]myproto.ReplicationPosition, destinationShards []*topo.ShardInfo) error {
wg := sync.WaitGroup{}
rec := concurrency.AllErrorRecorder{}
for _, si := range destinationShards {
wg.Add(1)
go func(si *topo.ShardInfo) {
for _, sourceShard := range si.SourceShards {
// we're waiting on this guy
blpPosition := blproto.BlpPosition{
Uid: sourceShard.Uid,
}
// find the position it should be at
for s, pos := range sourcePositions {
if s.Keyspace() == sourceShard.Keyspace && s.ShardName() == sourceShard.Shard {
blpPosition.Position = pos
}
}
log.Infof("Waiting for %v to catch up", si.MasterAlias)
if err := wr.ai.WaitBlpPosition(si.MasterAlias, blpPosition, wr.ActionTimeout()); err != nil {
rec.RecordError(err)
} else {
log.Infof("%v caught up", si.MasterAlias)
}
wg.Done()
}
}(si)
}
wg.Wait()
return rec.Error()
}
示例6: execShardAction
// execShardAction executes the action on a particular shard.
// If the action fails, it determines whether the keyspace/shard
// have moved, re-resolves the topology and tries again, if it is
// not executing a transaction.
func (stc *ScatterConn) execShardAction(
context interface{},
keyspace string,
shard string,
tabletType topo.TabletType,
session *SafeSession,
action shardActionFunc,
allErrors *concurrency.AllErrorRecorder,
results chan interface{},
) {
for {
sdc := stc.getConnection(keyspace, shard, tabletType)
transactionId, err := stc.updateSession(context, sdc, keyspace, shard, tabletType, session)
if err != nil {
allErrors.RecordError(err)
return
}
err = action(sdc, transactionId, results)
// Determine whether keyspace can be re-resolved
if shouldResolveKeyspace(err, transactionId) {
newKeyspace, err := getKeyspaceAlias(stc.toposerv, stc.cell, keyspace, tabletType)
if err == nil && newKeyspace != keyspace {
sdc.Close()
stc.cleanupShardConn(keyspace, shard, tabletType)
keyspace = newKeyspace
continue
}
}
if err != nil {
allErrors.RecordError(err)
return
}
break
}
}
示例7: CleanUp
// CleanUp will run the recorded actions.
// If an action on a target fails, it will not run the next action on
// the same target.
// We return the aggregate errors for all cleanups.
// CleanUp uses its own context, with a timeout of 5 minutes, so that clean up action will run even if the original context times out.
// TODO(alainjobart) Actions should run concurrently on a per target
// basis. They are then serialized on each target.
func (cleaner *Cleaner) CleanUp(wr *Wrangler) error {
// we use a background context so we're not dependent on the original context timeout
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
actionMap := make(map[string]*cleanUpHelper)
rec := concurrency.AllErrorRecorder{}
cleaner.mu.Lock()
for i := len(cleaner.actions) - 1; i >= 0; i-- {
actionReference := cleaner.actions[i]
helper, ok := actionMap[actionReference.target]
if !ok {
helper = &cleanUpHelper{
err: nil,
}
actionMap[actionReference.target] = helper
}
if helper.err != nil {
wr.Logger().Warningf("previous action failed on target %v, no running %v", actionReference.target, actionReference.name)
continue
}
err := actionReference.action.CleanUp(ctx, wr)
if err != nil {
helper.err = err
rec.RecordError(err)
wr.Logger().Errorf("action %v failed on %v: %v", actionReference.name, actionReference.target, err)
} else {
wr.Logger().Infof("action %v successful on %v", actionReference.name, actionReference.target)
}
}
cleaner.mu.Unlock()
cancel()
return rec.Error()
}
示例8: execShardAction
// execShardAction executes the action on a particular shard.
// If the action fails, it determines whether the keyspace/shard
// have moved, re-resolves the topology and tries again, if it is
// not executing a transaction.
func (stc *ScatterConn) execShardAction(
context context.Context,
keyspace string,
shard string,
tabletType topo.TabletType,
session *SafeSession,
action shardActionFunc,
allErrors *concurrency.AllErrorRecorder,
results chan interface{},
) {
for {
sdc := stc.getConnection(context, keyspace, shard, tabletType)
transactionId, err := stc.updateSession(context, sdc, keyspace, shard, tabletType, session)
if err != nil {
allErrors.RecordError(err)
return
}
err = action(sdc, transactionId, results)
if err != nil {
allErrors.RecordError(err)
return
}
break
}
}
示例9: WaitForDrain
// WaitForDrain blocks until the selected tablets (cells/keyspace/shard/tablet_type)
// have reported a QPS rate of 0.0.
// NOTE: This is just an observation of one point in time and no guarantee that
// the tablet was actually drained. At later times, a QPS rate > 0.0 could still
// be observed.
func (wr *Wrangler) WaitForDrain(ctx context.Context, cells []string, keyspace, shard string, servedType topodatapb.TabletType,
retryDelay, healthCheckTopologyRefresh, healthcheckRetryDelay, healthCheckTimeout time.Duration) error {
if len(cells) == 0 {
// Retrieve list of cells for the shard from the topology.
shardInfo, err := wr.ts.GetShard(ctx, keyspace, shard)
if err != nil {
return fmt.Errorf("failed to retrieve list of all cells. GetShard() failed: %v", err)
}
cells = shardInfo.Cells
}
// Check all cells in parallel.
wg := sync.WaitGroup{}
rec := concurrency.AllErrorRecorder{}
for _, cell := range cells {
wg.Add(1)
go func(cell string) {
defer wg.Done()
rec.RecordError(wr.waitForDrainInCell(ctx, cell, keyspace, shard, servedType,
retryDelay, healthCheckTopologyRefresh, healthcheckRetryDelay, healthCheckTimeout))
}(cell)
}
wg.Wait()
return rec.Error()
}
示例10: makeMastersReadWrite
// FIXME(alainjobart) no action to become read-write now, just use Ping,
// that forces the shard reload and will stop replication.
func (wr *Wrangler) makeMastersReadWrite(shards []*topo.ShardInfo) error {
wg := sync.WaitGroup{}
rec := concurrency.AllErrorRecorder{}
for _, si := range shards {
wg.Add(1)
go func(si *topo.ShardInfo) {
defer wg.Done()
log.Infof("Pinging master %v", si.MasterAlias)
actionPath, err := wr.ai.Ping(si.MasterAlias)
if err != nil {
rec.RecordError(err)
return
}
if err := wr.WaitForCompletion(actionPath); err != nil {
rec.RecordError(err)
} else {
log.Infof("%v responded", si.MasterAlias)
}
}(si)
}
wg.Wait()
return rec.Error()
}
示例11: getMastersPosition
func (wr *Wrangler) getMastersPosition(shards []*topo.ShardInfo) (map[*topo.ShardInfo]myproto.ReplicationPosition, error) {
mu := sync.Mutex{}
result := make(map[*topo.ShardInfo]myproto.ReplicationPosition)
wg := sync.WaitGroup{}
rec := concurrency.AllErrorRecorder{}
for _, si := range shards {
wg.Add(1)
go func(si *topo.ShardInfo) {
defer wg.Done()
log.Infof("Gathering master position for %v", si.MasterAlias)
ti, err := wr.ts.GetTablet(si.MasterAlias)
if err != nil {
rec.RecordError(err)
return
}
pos, err := wr.ai.MasterPosition(ti, wr.ActionTimeout())
if err != nil {
rec.RecordError(err)
return
}
log.Infof("Got master position for %v", si.MasterAlias)
mu.Lock()
result[si] = pos
mu.Unlock()
}(si)
}
wg.Wait()
return result, rec.Error()
}
示例12: RebuildShard
// RebuildShard updates the SrvShard objects and underlying serving graph.
//
// Re-read from TopologyServer to make sure we are using the side
// effects of all actions.
//
// This function will start each cell over from the beginning on ErrBadVersion,
// so it doesn't need a lock on the shard.
func RebuildShard(ctx context.Context, log logutil.Logger, ts topo.Server, keyspace, shard string, cells []string, lockTimeout time.Duration) (*topo.ShardInfo, error) {
log.Infof("RebuildShard %v/%v", keyspace, shard)
span := trace.NewSpanFromContext(ctx)
span.StartLocal("topotools.RebuildShard")
defer span.Finish()
ctx = trace.NewContext(ctx, span)
// read the existing shard info. It has to exist.
shardInfo, err := ts.GetShard(ctx, keyspace, shard)
if err != nil {
return nil, err
}
// rebuild all cells in parallel
wg := sync.WaitGroup{}
rec := concurrency.AllErrorRecorder{}
for _, cell := range shardInfo.Cells {
// skip this cell if we shouldn't rebuild it
if !topo.InCellList(cell, cells) {
continue
}
wg.Add(1)
go func(cell string) {
defer wg.Done()
rec.RecordError(rebuildCellSrvShard(ctx, log, ts, shardInfo, cell))
}(cell)
}
wg.Wait()
return shardInfo, rec.Error()
}
示例13: FindAllTabletAliasesInShardByCell
// FindAllTabletAliasesInShardByCell uses the replication graph to find all the
// tablet aliases in the given shard.
//
// It can return ErrPartialResult if some cells were not fetched,
// in which case the result only contains the cells that were fetched.
//
// The tablet aliases are sorted by cell, then by UID.
func FindAllTabletAliasesInShardByCell(ctx context.Context, ts Server, keyspace, shard string, cells []string) ([]TabletAlias, error) {
span := trace.NewSpanFromContext(ctx)
span.StartLocal("topo.FindAllTabletAliasesInShardbyCell")
span.Annotate("keyspace", keyspace)
span.Annotate("shard", shard)
span.Annotate("num_cells", len(cells))
defer span.Finish()
ctx = trace.NewContext(ctx, span)
// read the shard information to find the cells
si, err := GetShard(ctx, ts, keyspace, shard)
if err != nil {
return nil, err
}
resultAsMap := make(map[TabletAlias]bool)
if si.MasterAlias != nil && !TabletAliasIsZero(si.MasterAlias) {
if InCellList(si.MasterAlias.Cell, cells) {
resultAsMap[ProtoToTabletAlias(si.MasterAlias)] = true
}
}
// read the replication graph in each cell and add all found tablets
wg := sync.WaitGroup{}
mutex := sync.Mutex{}
rec := concurrency.AllErrorRecorder{}
for _, cell := range si.Cells {
if !InCellList(cell, cells) {
continue
}
wg.Add(1)
go func(cell string) {
defer wg.Done()
sri, err := ts.GetShardReplication(ctx, cell, keyspace, shard)
if err != nil {
rec.RecordError(fmt.Errorf("GetShardReplication(%v, %v, %v) failed: %v", cell, keyspace, shard, err))
return
}
mutex.Lock()
for _, node := range sri.Nodes {
resultAsMap[ProtoToTabletAlias(node.TabletAlias)] = true
}
mutex.Unlock()
}(cell)
}
wg.Wait()
err = nil
if rec.HasErrors() {
log.Warningf("FindAllTabletAliasesInShard(%v,%v): got partial result: %v", keyspace, shard, rec.Error())
err = ErrPartialResult
}
result := make([]TabletAlias, 0, len(resultAsMap))
for a := range resultAsMap {
result = append(result, a)
}
sort.Sort(TabletAliasList(result))
return result, err
}
示例14: DeleteKeyspaceShards
// DeleteKeyspaceShards implements topo.Server.
func (s *Server) DeleteKeyspaceShards(ctx context.Context, keyspace string) error {
shards, err := s.GetShardNames(ctx, keyspace)
if err != nil {
return err
}
wg := sync.WaitGroup{}
rec := concurrency.AllErrorRecorder{}
global := s.getGlobal()
for _, shard := range shards {
wg.Add(1)
go func(shard string) {
defer wg.Done()
_, err := global.Delete(shardDirPath(keyspace, shard), true /* recursive */)
rec.RecordError(convertError(err))
}(shard)
}
wg.Wait()
if err = rec.Error(); err != nil {
return err
}
event.Dispatch(&events.KeyspaceChange{
KeyspaceInfo: *topo.NewKeyspaceInfo(keyspace, nil, -1),
Status: "deleted all shards",
})
return nil
}
示例15: CopyKeyspaces
// CopyKeyspaces will create the keyspaces in the destination topo
func CopyKeyspaces(fromTS, toTS topo.Server) {
keyspaces, err := fromTS.GetKeyspaces()
if err != nil {
log.Fatalf("GetKeyspaces: %v", err)
}
wg := sync.WaitGroup{}
rec := concurrency.AllErrorRecorder{}
for _, keyspace := range keyspaces {
wg.Add(1)
go func(keyspace string) {
defer wg.Done()
k, err := fromTS.GetKeyspace(keyspace)
if err != nil {
rec.RecordError(fmt.Errorf("GetKeyspace(%v): %v", keyspace, err))
return
}
if err := toTS.CreateKeyspace(keyspace, k.Keyspace); err != nil {
if err == topo.ErrNodeExists {
log.Warningf("keyspace %v already exists", keyspace)
} else {
rec.RecordError(fmt.Errorf("CreateKeyspace(%v): %v", keyspace, err))
}
}
}(keyspace)
}
wg.Wait()
if rec.HasErrors() {
log.Fatalf("copyKeyspaces failed: %v", rec.Error())
}
}