本文整理汇总了Golang中github.com/youtube/vitess/go/vt/concurrency.AllErrorRecorder类的典型用法代码示例。如果您正苦于以下问题:Golang AllErrorRecorder类的具体用法?Golang AllErrorRecorder怎么用?Golang AllErrorRecorder使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了AllErrorRecorder类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: getMastersPosition
func (wr *Wrangler) getMastersPosition(shards []*topo.ShardInfo) (map[*topo.ShardInfo]*mysqlctl.ReplicationPosition, error) {
mu := sync.Mutex{}
result := make(map[*topo.ShardInfo]*mysqlctl.ReplicationPosition)
wg := sync.WaitGroup{}
rec := concurrency.AllErrorRecorder{}
for _, si := range shards {
wg.Add(1)
go func(si *topo.ShardInfo) {
log.Infof("Gathering master position for %v", si.MasterAlias)
pos, err := wr.getMasterPosition(si.MasterAlias)
if err != nil {
rec.RecordError(err)
} else {
log.Infof("Got master position for %v", si.MasterAlias)
mu.Lock()
result[si] = pos
mu.Unlock()
}
wg.Done()
}(si)
}
wg.Wait()
return result, rec.Error()
}
示例2: DeleteKeyspaceShards
// DeleteKeyspaceShards implements topo.Server.
func (s *Server) DeleteKeyspaceShards(ctx context.Context, keyspace string) error {
shards, err := s.GetShardNames(ctx, keyspace)
if err != nil {
return err
}
wg := sync.WaitGroup{}
rec := concurrency.AllErrorRecorder{}
global := s.getGlobal()
for _, shard := range shards {
wg.Add(1)
go func(shard string) {
defer wg.Done()
_, err := global.Delete(shardDirPath(keyspace, shard), true /* recursive */)
rec.RecordError(convertError(err))
}(shard)
}
wg.Wait()
if err = rec.Error(); err != nil {
return err
}
event.Dispatch(&events.KeyspaceChange{
KeyspaceInfo: *topo.NewKeyspaceInfo(keyspace, nil, -1),
Status: "deleted all shards",
})
return nil
}
示例3: CleanUp
// CleanUp will run the recorded actions.
// If an action on a target fails, it will not run the next action on
// the same target.
// We return the aggregate errors for all cleanups.
// TODO(alainjobart) Actions should run concurrently on a per target
// basis. They are then serialized on each target.
func (cleaner *Cleaner) CleanUp(wr *Wrangler) error {
actionMap := make(map[string]*cleanUpHelper)
rec := concurrency.AllErrorRecorder{}
cleaner.mu.Lock()
for i := len(cleaner.actions) - 1; i >= 0; i-- {
actionReference := cleaner.actions[i]
helper, ok := actionMap[actionReference.target]
if !ok {
helper = &cleanUpHelper{
err: nil,
}
actionMap[actionReference.target] = helper
}
if helper.err != nil {
log.Warningf("previous action failed on target %v, no running %v", actionReference.target, actionReference.name)
continue
}
err := actionReference.action.CleanUp(wr)
if err != nil {
helper.err = err
rec.RecordError(err)
log.Errorf("action %v failed on %v: %v", actionReference.name, actionReference.target, err)
} else {
log.Infof("action %v successfull on %v", actionReference.name, actionReference.target)
}
}
cleaner.mu.Unlock()
return rec.Error()
}
示例4: FindAllTabletAliasesInShardByCell
// FindAllTabletAliasesInShardByCell uses the replication graph to find all the
// tablet aliases in the given shard.
//
// It can return ErrPartialResult if some cells were not fetched,
// in which case the result only contains the cells that were fetched.
//
// The tablet aliases are sorted by cell, then by UID.
func FindAllTabletAliasesInShardByCell(ctx context.Context, ts Server, keyspace, shard string, cells []string) ([]TabletAlias, error) {
span := trace.NewSpanFromContext(ctx)
span.StartLocal("topo.FindAllTabletAliasesInShardbyCell")
span.Annotate("keyspace", keyspace)
span.Annotate("shard", shard)
span.Annotate("num_cells", len(cells))
defer span.Finish()
ctx = trace.NewContext(ctx, span)
// read the shard information to find the cells
si, err := GetShard(ctx, ts, keyspace, shard)
if err != nil {
return nil, err
}
resultAsMap := make(map[TabletAlias]bool)
if si.MasterAlias != nil && !TabletAliasIsZero(si.MasterAlias) {
if InCellList(si.MasterAlias.Cell, cells) {
resultAsMap[ProtoToTabletAlias(si.MasterAlias)] = true
}
}
// read the replication graph in each cell and add all found tablets
wg := sync.WaitGroup{}
mutex := sync.Mutex{}
rec := concurrency.AllErrorRecorder{}
for _, cell := range si.Cells {
if !InCellList(cell, cells) {
continue
}
wg.Add(1)
go func(cell string) {
defer wg.Done()
sri, err := ts.GetShardReplication(ctx, cell, keyspace, shard)
if err != nil {
rec.RecordError(fmt.Errorf("GetShardReplication(%v, %v, %v) failed: %v", cell, keyspace, shard, err))
return
}
mutex.Lock()
for _, node := range sri.Nodes {
resultAsMap[ProtoToTabletAlias(node.TabletAlias)] = true
}
mutex.Unlock()
}(cell)
}
wg.Wait()
err = nil
if rec.HasErrors() {
log.Warningf("FindAllTabletAliasesInShard(%v,%v): got partial result: %v", keyspace, shard, rec.Error())
err = ErrPartialResult
}
result := make([]TabletAlias, 0, len(resultAsMap))
for a := range resultAsMap {
result = append(result, a)
}
sort.Sort(TabletAliasList(result))
return result, err
}
示例5: execShardAction
// execShardAction executes the action on a particular shard.
// If the action fails, it determines whether the keyspace/shard
// have moved, re-resolves the topology and tries again, if it is
// not executing a transaction.
func (stc *ScatterConn) execShardAction(
context interface{},
keyspace string,
shard string,
tabletType topo.TabletType,
session *SafeSession,
action shardActionFunc,
allErrors *concurrency.AllErrorRecorder,
results chan interface{},
) {
for {
sdc := stc.getConnection(keyspace, shard, tabletType)
transactionId, err := stc.updateSession(context, sdc, keyspace, shard, tabletType, session)
if err != nil {
allErrors.RecordError(err)
return
}
err = action(sdc, transactionId, results)
// Determine whether keyspace can be re-resolved
if shouldResolveKeyspace(err, transactionId) {
newKeyspace, err := getKeyspaceAlias(stc.toposerv, stc.cell, keyspace, tabletType)
if err == nil && newKeyspace != keyspace {
sdc.Close()
stc.cleanupShardConn(keyspace, shard, tabletType)
keyspace = newKeyspace
continue
}
}
if err != nil {
allErrors.RecordError(err)
return
}
break
}
}
示例6: WaitForDrain
// WaitForDrain blocks until the selected tablets (cells/keyspace/shard/tablet_type)
// have reported a QPS rate of 0.0.
// NOTE: This is just an observation of one point in time and no guarantee that
// the tablet was actually drained. At later times, a QPS rate > 0.0 could still
// be observed.
func (wr *Wrangler) WaitForDrain(ctx context.Context, cells []string, keyspace, shard string, servedType topodatapb.TabletType,
retryDelay, healthCheckTopologyRefresh, healthcheckRetryDelay, healthCheckTimeout time.Duration) error {
if len(cells) == 0 {
// Retrieve list of cells for the shard from the topology.
shardInfo, err := wr.ts.GetShard(ctx, keyspace, shard)
if err != nil {
return fmt.Errorf("failed to retrieve list of all cells. GetShard() failed: %v", err)
}
cells = shardInfo.Cells
}
// Check all cells in parallel.
wg := sync.WaitGroup{}
rec := concurrency.AllErrorRecorder{}
for _, cell := range cells {
wg.Add(1)
go func(cell string) {
defer wg.Done()
rec.RecordError(wr.waitForDrainInCell(ctx, cell, keyspace, shard, servedType,
retryDelay, healthCheckTopologyRefresh, healthcheckRetryDelay, healthCheckTimeout))
}(cell)
}
wg.Wait()
return rec.Error()
}
示例7: RebuildShard
// RebuildShard updates the SrvShard objects and underlying serving graph.
//
// Re-read from TopologyServer to make sure we are using the side
// effects of all actions.
//
// This function will start each cell over from the beginning on ErrBadVersion,
// so it doesn't need a lock on the shard.
func RebuildShard(ctx context.Context, log logutil.Logger, ts topo.Server, keyspace, shard string, cells []string, lockTimeout time.Duration) (*topo.ShardInfo, error) {
log.Infof("RebuildShard %v/%v", keyspace, shard)
span := trace.NewSpanFromContext(ctx)
span.StartLocal("topotools.RebuildShard")
defer span.Finish()
ctx = trace.NewContext(ctx, span)
// read the existing shard info. It has to exist.
shardInfo, err := ts.GetShard(ctx, keyspace, shard)
if err != nil {
return nil, err
}
// rebuild all cells in parallel
wg := sync.WaitGroup{}
rec := concurrency.AllErrorRecorder{}
for _, cell := range shardInfo.Cells {
// skip this cell if we shouldn't rebuild it
if !topo.InCellList(cell, cells) {
continue
}
wg.Add(1)
go func(cell string) {
defer wg.Done()
rec.RecordError(rebuildCellSrvShard(ctx, log, ts, shardInfo, cell))
}(cell)
}
wg.Wait()
return shardInfo, rec.Error()
}
示例8: waitForFilteredReplication
func (wr *Wrangler) waitForFilteredReplication(sourcePositions map[*topo.ShardInfo]myproto.ReplicationPosition, destinationShards []*topo.ShardInfo) error {
wg := sync.WaitGroup{}
rec := concurrency.AllErrorRecorder{}
for _, si := range destinationShards {
wg.Add(1)
go func(si *topo.ShardInfo) {
for _, sourceShard := range si.SourceShards {
// we're waiting on this guy
blpPosition := blproto.BlpPosition{
Uid: sourceShard.Uid,
}
// find the position it should be at
for s, pos := range sourcePositions {
if s.Keyspace() == sourceShard.Keyspace && s.ShardName() == sourceShard.Shard {
blpPosition.Position = pos
}
}
log.Infof("Waiting for %v to catch up", si.MasterAlias)
if err := wr.ai.WaitBlpPosition(si.MasterAlias, blpPosition, wr.ActionTimeout()); err != nil {
rec.RecordError(err)
} else {
log.Infof("%v caught up", si.MasterAlias)
}
wg.Done()
}
}(si)
}
wg.Wait()
return rec.Error()
}
示例9: execShardAction
// execShardAction executes the action on a particular shard.
// If the action fails, it determines whether the keyspace/shard
// have moved, re-resolves the topology and tries again, if it is
// not executing a transaction.
func (stc *ScatterConn) execShardAction(
context context.Context,
keyspace string,
shard string,
tabletType topo.TabletType,
session *SafeSession,
action shardActionFunc,
allErrors *concurrency.AllErrorRecorder,
results chan interface{},
) {
for {
sdc := stc.getConnection(context, keyspace, shard, tabletType)
transactionId, err := stc.updateSession(context, sdc, keyspace, shard, tabletType, session)
if err != nil {
allErrors.RecordError(err)
return
}
err = action(sdc, transactionId, results)
if err != nil {
allErrors.RecordError(err)
return
}
break
}
}
示例10: CleanUp
// CleanUp will run the recorded actions.
// If an action on a target fails, it will not run the next action on
// the same target.
// We return the aggregate errors for all cleanups.
// CleanUp uses its own context, with a timeout of 5 minutes, so that clean up action will run even if the original context times out.
// TODO(alainjobart) Actions should run concurrently on a per target
// basis. They are then serialized on each target.
func (cleaner *Cleaner) CleanUp(wr *Wrangler) error {
// we use a background context so we're not dependent on the original context timeout
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
actionMap := make(map[string]*cleanUpHelper)
rec := concurrency.AllErrorRecorder{}
cleaner.mu.Lock()
for i := len(cleaner.actions) - 1; i >= 0; i-- {
actionReference := cleaner.actions[i]
helper, ok := actionMap[actionReference.target]
if !ok {
helper = &cleanUpHelper{
err: nil,
}
actionMap[actionReference.target] = helper
}
if helper.err != nil {
wr.Logger().Warningf("previous action failed on target %v, no running %v", actionReference.target, actionReference.name)
continue
}
err := actionReference.action.CleanUp(ctx, wr)
if err != nil {
helper.err = err
rec.RecordError(err)
wr.Logger().Errorf("action %v failed on %v: %v", actionReference.name, actionReference.target, err)
} else {
wr.Logger().Infof("action %v successful on %v", actionReference.name, actionReference.target)
}
}
cleaner.mu.Unlock()
cancel()
return rec.Error()
}
示例11: DiffSchemaToArray
// DiffSchemaToArray diffs two schemas and return the schema diffs if there is any.
func DiffSchemaToArray(leftName string, left *SchemaDefinition, rightName string, right *SchemaDefinition) (result []string) {
er := concurrency.AllErrorRecorder{}
DiffSchema(leftName, left, rightName, right, &er)
if er.HasErrors() {
return er.ErrorStrings()
}
return nil
}
示例12: DiffPermissionsToArray
// DiffPermissionsToArray difs two sets of permissions, and returns the difference
func DiffPermissionsToArray(leftName string, left *tabletmanagerdatapb.Permissions, rightName string, right *tabletmanagerdatapb.Permissions) (result []string) {
er := concurrency.AllErrorRecorder{}
DiffPermissions(leftName, left, rightName, right, &er)
if er.HasErrors() {
return er.ErrorStrings()
}
return nil
}
示例13: DiffPermissionsToArray
func DiffPermissionsToArray(leftName string, left *Permissions, rightName string, right *Permissions) (result []string) {
er := concurrency.AllErrorRecorder{}
DiffPermissions(leftName, left, rightName, right, &er)
if er.HasErrors() {
return er.Errors
} else {
return nil
}
}
示例14: ShardMultiRestore
func (wr *Wrangler) ShardMultiRestore(keyspace, shard string, sources []topo.TabletAlias, tables []string, concurrency, fetchConcurrency, insertTableConcurrency, fetchRetryCount int, strategy string) error {
// check parameters
if len(tables) > 0 && len(sources) > 1 {
return fmt.Errorf("ShardMultiRestore can only handle one source when tables are specified")
}
// lock the shard to perform the changes we need done
actionNode := actionnode.ShardMultiRestore(&actionnode.MultiRestoreArgs{
SrcTabletAliases: sources,
Concurrency: concurrency,
FetchConcurrency: fetchConcurrency,
InsertTableConcurrency: insertTableConcurrency,
FetchRetryCount: fetchRetryCount,
Strategy: strategy})
lockPath, err := wr.lockShard(keyspace, shard, actionNode)
if err != nil {
return err
}
mrErr := wr.SetSourceShards(keyspace, shard, sources, tables)
err = wr.unlockShard(keyspace, shard, actionNode, lockPath, mrErr)
if err != nil {
if mrErr != nil {
log.Errorf("unlockShard got error back: %v", err)
return mrErr
}
return err
}
if mrErr != nil {
return mrErr
}
// find all tablets in the shard
destTablets, err := topo.FindAllTabletAliasesInShard(wr.ts, keyspace, shard)
if err != nil {
return err
}
// now launch MultiRestore on all tablets we need to do
rec := cc.AllErrorRecorder{}
wg := sync.WaitGroup{}
for _, tabletAlias := range destTablets {
wg.Add(1)
go func(tabletAlias topo.TabletAlias) {
log.Infof("Starting multirestore on tablet %v", tabletAlias)
err := wr.MultiRestore(tabletAlias, sources, concurrency, fetchConcurrency, insertTableConcurrency, fetchRetryCount, strategy)
log.Infof("Multirestore on tablet %v is done (err=%v)", tabletAlias, err)
rec.RecordError(err)
wg.Done()
}(tabletAlias)
}
wg.Wait()
return rec.Error()
}
示例15: makeMastersReadOnly
func (wr *Wrangler) makeMastersReadOnly(shards []*topo.ShardInfo) error {
wg := sync.WaitGroup{}
rec := concurrency.AllErrorRecorder{}
for _, si := range shards {
if si.MasterAlias.IsZero() {
rec.RecordError(fmt.Errorf("Shard %v/%v has no master?", si.Keyspace(), si.ShardName()))
continue
}
wg.Add(1)
go func(si *topo.ShardInfo) {
defer wg.Done()
wr.Logger().Infof("Making master %v read-only", si.MasterAlias)
ti, err := wr.ts.GetTablet(si.MasterAlias)
if err != nil {
rec.RecordError(err)
return
}
if err = wr.tmc.SetReadOnly(ti, wr.ActionTimeout()); err != nil {
rec.RecordError(err)
return
}
wr.Logger().Infof("Master %v is now read-only", si.MasterAlias)
}(si)
}
wg.Wait()
return rec.Error()
}