本文整理匯總了Golang中github.com/henryanand/vitess/go/vt/concurrency.AllErrorRecorder.RecordError方法的典型用法代碼示例。如果您正苦於以下問題:Golang AllErrorRecorder.RecordError方法的具體用法?Golang AllErrorRecorder.RecordError怎麽用?Golang AllErrorRecorder.RecordError使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類github.com/henryanand/vitess/go/vt/concurrency.AllErrorRecorder
的用法示例。
在下文中一共展示了AllErrorRecorder.RecordError方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: getMastersPosition
func (wr *Wrangler) getMastersPosition(shards []*topo.ShardInfo) (map[*topo.ShardInfo]myproto.ReplicationPosition, error) {
mu := sync.Mutex{}
result := make(map[*topo.ShardInfo]myproto.ReplicationPosition)
wg := sync.WaitGroup{}
rec := concurrency.AllErrorRecorder{}
for _, si := range shards {
wg.Add(1)
go func(si *topo.ShardInfo) {
defer wg.Done()
wr.Logger().Infof("Gathering master position for %v", si.MasterAlias)
ti, err := wr.ts.GetTablet(si.MasterAlias)
if err != nil {
rec.RecordError(err)
return
}
pos, err := wr.tmc.MasterPosition(wr.ctx, ti)
if err != nil {
rec.RecordError(err)
return
}
wr.Logger().Infof("Got master position for %v", si.MasterAlias)
mu.Lock()
result[si] = pos
mu.Unlock()
}(si)
}
wg.Wait()
return result, rec.Error()
}
示例2: CopyKeyspaces
// CopyKeyspaces will create the keyspaces in the destination topo
func CopyKeyspaces(fromTS, toTS topo.Server) {
keyspaces, err := fromTS.GetKeyspaces()
if err != nil {
log.Fatalf("GetKeyspaces: %v", err)
}
wg := sync.WaitGroup{}
rec := concurrency.AllErrorRecorder{}
for _, keyspace := range keyspaces {
wg.Add(1)
go func(keyspace string) {
defer wg.Done()
k, err := fromTS.GetKeyspace(keyspace)
if err != nil {
rec.RecordError(fmt.Errorf("GetKeyspace(%v): %v", keyspace, err))
return
}
if err := toTS.CreateKeyspace(keyspace, k.Keyspace); err != nil {
if err == topo.ErrNodeExists {
log.Warningf("keyspace %v already exists", keyspace)
} else {
rec.RecordError(fmt.Errorf("CreateKeyspace(%v): %v", keyspace, err))
}
}
}(keyspace)
}
wg.Wait()
if rec.HasErrors() {
log.Fatalf("copyKeyspaces failed: %v", rec.Error())
}
}
示例3: CleanUp
// CleanUp will run the recorded actions.
// If an action on a target fails, it will not run the next action on
// the same target.
// We return the aggregate errors for all cleanups.
// TODO(alainjobart) Actions should run concurrently on a per target
// basis. They are then serialized on each target.
func (cleaner *Cleaner) CleanUp(wr *Wrangler) error {
actionMap := make(map[string]*cleanUpHelper)
rec := concurrency.AllErrorRecorder{}
cleaner.mu.Lock()
for i := len(cleaner.actions) - 1; i >= 0; i-- {
actionReference := cleaner.actions[i]
helper, ok := actionMap[actionReference.target]
if !ok {
helper = &cleanUpHelper{
err: nil,
}
actionMap[actionReference.target] = helper
}
if helper.err != nil {
wr.Logger().Warningf("previous action failed on target %v, no running %v", actionReference.target, actionReference.name)
continue
}
err := actionReference.action.CleanUp(wr)
if err != nil {
helper.err = err
rec.RecordError(err)
wr.Logger().Errorf("action %v failed on %v: %v", actionReference.name, actionReference.target, err)
} else {
wr.Logger().Infof("action %v successful on %v", actionReference.name, actionReference.target)
}
}
cleaner.mu.Unlock()
return rec.Error()
}
示例4: ValidateVersionKeyspace
func (wr *Wrangler) ValidateVersionKeyspace(keyspace string) error {
// find all the shards
shards, err := wr.ts.GetShardNames(keyspace)
if err != nil {
return err
}
// corner cases
if len(shards) == 0 {
return fmt.Errorf("No shards in keyspace %v", keyspace)
}
sort.Strings(shards)
if len(shards) == 1 {
return wr.ValidateVersionShard(keyspace, shards[0])
}
// find the reference version using the first shard's master
si, err := wr.ts.GetShard(keyspace, shards[0])
if err != nil {
return err
}
if si.MasterAlias.Uid == topo.NO_TABLET {
return fmt.Errorf("No master in shard %v/%v", keyspace, shards[0])
}
referenceAlias := si.MasterAlias
log.Infof("Gathering version for reference master %v", referenceAlias)
referenceVersion, err := wr.GetVersion(referenceAlias)
if err != nil {
return err
}
// then diff with all tablets but master 0
er := concurrency.AllErrorRecorder{}
wg := sync.WaitGroup{}
for _, shard := range shards {
aliases, err := topo.FindAllTabletAliasesInShard(context.TODO(), wr.ts, keyspace, shard)
if err != nil {
er.RecordError(err)
continue
}
for _, alias := range aliases {
if alias == si.MasterAlias {
continue
}
wg.Add(1)
go wr.diffVersion(referenceVersion, referenceAlias, alias, &wg, &er)
}
}
wg.Wait()
if er.HasErrors() {
return fmt.Errorf("Version diffs:\n%v", er.Error().Error())
}
return nil
}
示例5: shardsWithSources
// shardsWithSources returns all the shards that have SourceShards set
// with no Tables list.
func shardsWithSources(wr *wrangler.Wrangler) ([]map[string]string, error) {
keyspaces, err := wr.TopoServer().GetKeyspaces()
if err != nil {
return nil, err
}
wg := sync.WaitGroup{}
mu := sync.Mutex{} // protects result
result := make([]map[string]string, 0, len(keyspaces))
rec := concurrency.AllErrorRecorder{}
for _, keyspace := range keyspaces {
wg.Add(1)
go func(keyspace string) {
defer wg.Done()
shards, err := wr.TopoServer().GetShardNames(keyspace)
if err != nil {
rec.RecordError(err)
return
}
for _, shard := range shards {
wg.Add(1)
go func(keyspace, shard string) {
defer wg.Done()
si, err := wr.TopoServer().GetShard(keyspace, shard)
if err != nil {
rec.RecordError(err)
return
}
if len(si.SourceShards) > 0 && len(si.SourceShards[0].Tables) == 0 {
mu.Lock()
result = append(result, map[string]string{
"Keyspace": keyspace,
"Shard": shard,
})
mu.Unlock()
}
}(keyspace, shard)
}
}(keyspace)
}
wg.Wait()
if rec.HasErrors() {
return nil, rec.Error()
}
if len(result) == 0 {
return nil, fmt.Errorf("There are no shards with SourceShards")
}
return result, nil
}
示例6: CopyShardReplications
// CopyShardReplications will create the ShardReplication objects in
// the destination topo
func CopyShardReplications(fromTS, toTS topo.Server) {
keyspaces, err := fromTS.GetKeyspaces()
if err != nil {
log.Fatalf("fromTS.GetKeyspaces: %v", err)
}
wg := sync.WaitGroup{}
rec := concurrency.AllErrorRecorder{}
for _, keyspace := range keyspaces {
wg.Add(1)
go func(keyspace string) {
defer wg.Done()
shards, err := fromTS.GetShardNames(keyspace)
if err != nil {
rec.RecordError(fmt.Errorf("GetShardNames(%v): %v", keyspace, err))
return
}
for _, shard := range shards {
wg.Add(1)
go func(keyspace, shard string) {
defer wg.Done()
// read the source shard to get the cells
si, err := fromTS.GetShard(keyspace, shard)
if err != nil {
rec.RecordError(fmt.Errorf("GetShard(%v, %v): %v", keyspace, shard, err))
return
}
for _, cell := range si.Cells {
sri, err := fromTS.GetShardReplication(cell, keyspace, shard)
if err != nil {
rec.RecordError(fmt.Errorf("GetShardReplication(%v, %v, %v): %v", cell, keyspace, shard, err))
continue
}
if err := toTS.UpdateShardReplicationFields(cell, keyspace, shard, func(oldSR *topo.ShardReplication) error {
*oldSR = *sri.ShardReplication
return nil
}); err != nil {
rec.RecordError(fmt.Errorf("UpdateShardReplicationFields(%v, %v, %v): %v", cell, keyspace, shard, err))
}
}
}(keyspace, shard)
}
}(keyspace)
}
wg.Wait()
if rec.HasErrors() {
log.Fatalf("copyShards failed: %v", rec.Error())
}
}
示例7: RunUntil
// RunUntil will run all the players until they reach the given position.
// Holds the map lock during that exercise, shouldn't take long at all.
func (blm *BinlogPlayerMap) RunUntil(blpPositionList *blproto.BlpPositionList, waitTimeout time.Duration) error {
// lock and check state
blm.mu.Lock()
defer blm.mu.Unlock()
if blm.state != BPM_STATE_STOPPED {
return fmt.Errorf("RunUntil: player not stopped: %v", blm.state)
}
log.Infof("Starting map of binlog players until position")
// find the exact stop position for all players, to be sure
// we're not doing anything wrong
posMap := make(map[uint32]myproto.ReplicationPosition)
for _, bpc := range blm.players {
blpPos, err := blpPositionList.FindBlpPositionById(bpc.sourceShard.Uid)
if err != nil {
return fmt.Errorf("No binlog position passed in for player Uid %v", bpc.sourceShard.Uid)
}
posMap[bpc.sourceShard.Uid] = blpPos.Position
}
// start all the players giving them where to stop
for _, bpc := range blm.players {
if err := bpc.StartUntil(posMap[bpc.sourceShard.Uid]); err != nil {
return err
}
}
// wait for all players to be stopped, or timeout
wg := sync.WaitGroup{}
rec := concurrency.AllErrorRecorder{}
for _, bpc := range blm.players {
wg.Add(1)
go func(bpc *BinlogPlayerController) {
if err := bpc.WaitForStop(waitTimeout); err != nil {
rec.RecordError(err)
}
wg.Done()
}(bpc)
}
wg.Wait()
return rec.Error()
}
示例8: keyspacesWithOverlappingShards
func keyspacesWithOverlappingShards(wr *wrangler.Wrangler) ([]map[string]string, error) {
keyspaces, err := wr.TopoServer().GetKeyspaces()
if err != nil {
return nil, err
}
wg := sync.WaitGroup{}
mu := sync.Mutex{} // protects result
result := make([]map[string]string, 0, len(keyspaces))
rec := concurrency.AllErrorRecorder{}
for _, keyspace := range keyspaces {
wg.Add(1)
go func(keyspace string) {
defer wg.Done()
osList, err := topotools.FindOverlappingShards(wr.TopoServer(), keyspace)
if err != nil {
rec.RecordError(err)
return
}
mu.Lock()
for _, os := range osList {
result = append(result, map[string]string{
"Keyspace": os.Left[0].Keyspace(),
"Shard": os.Left[0].ShardName(),
})
}
mu.Unlock()
}(keyspace)
}
wg.Wait()
if rec.HasErrors() {
return nil, rec.Error()
}
if len(result) == 0 {
return nil, fmt.Errorf("There are no keyspaces with overlapping shards")
}
return result, nil
}
示例9: Run
// Run runs aggregates health statuses from all the reporters. If any
// errors occur during the reporting, they will be logged, but only
// the first error will be returned.
// It may return an empty map if no health condition is detected. Note
// it will not return nil, but an empty map.
func (ag *Aggregator) Run(tabletType topo.TabletType, shouldQueryServiceBeRunning bool) (map[string]string, error) {
var (
wg sync.WaitGroup
rec concurrency.AllErrorRecorder
)
results := make(chan map[string]string, len(ag.reporters))
ag.mu.Lock()
for name, rep := range ag.reporters {
wg.Add(1)
go func(name string, rep Reporter) {
defer wg.Done()
status, err := rep.Report(tabletType, shouldQueryServiceBeRunning)
if err != nil {
rec.RecordError(fmt.Errorf("%v: %v", name, err))
return
}
results <- status
}(name, rep)
}
ag.mu.Unlock()
wg.Wait()
close(results)
if err := rec.Error(); err != nil {
return nil, err
}
// merge and return the results
result := make(map[string]string)
for part := range results {
for k, v := range part {
if _, ok := result[k]; ok {
return nil, fmt.Errorf("duplicate key: %v", k)
}
result[k] = v
}
}
return result, nil
}
示例10: waitForFilteredReplication
func (wr *Wrangler) waitForFilteredReplication(sourcePositions map[*topo.ShardInfo]myproto.ReplicationPosition, destinationShards []*topo.ShardInfo) error {
wg := sync.WaitGroup{}
rec := concurrency.AllErrorRecorder{}
for _, si := range destinationShards {
wg.Add(1)
go func(si *topo.ShardInfo) {
defer wg.Done()
for _, sourceShard := range si.SourceShards {
// we're waiting on this guy
blpPosition := blproto.BlpPosition{
Uid: sourceShard.Uid,
}
// find the position it should be at
for s, pos := range sourcePositions {
if s.Keyspace() == sourceShard.Keyspace && s.ShardName() == sourceShard.Shard {
blpPosition.Position = pos
}
}
// and wait for it
wr.Logger().Infof("Waiting for %v to catch up", si.MasterAlias)
tablet, err := wr.ts.GetTablet(si.MasterAlias)
if err != nil {
rec.RecordError(err)
return
}
if err := wr.tmc.WaitBlpPosition(context.TODO(), tablet, blpPosition, wr.ActionTimeout()); err != nil {
rec.RecordError(err)
} else {
wr.Logger().Infof("%v caught up", si.MasterAlias)
}
}
}(si)
}
wg.Wait()
return rec.Error()
}
示例11: keyspacesWithServedFrom
// keyspacesWithServedFrom returns all the keyspaces that have ServedFrom set
// to one value.
func keyspacesWithServedFrom(wr *wrangler.Wrangler) ([]string, error) {
keyspaces, err := wr.TopoServer().GetKeyspaces()
if err != nil {
return nil, err
}
wg := sync.WaitGroup{}
mu := sync.Mutex{} // protects result
result := make([]string, 0, len(keyspaces))
rec := concurrency.AllErrorRecorder{}
for _, keyspace := range keyspaces {
wg.Add(1)
go func(keyspace string) {
defer wg.Done()
ki, err := wr.TopoServer().GetKeyspace(keyspace)
if err != nil {
rec.RecordError(err)
return
}
if len(ki.ServedFromMap) > 0 {
mu.Lock()
result = append(result, keyspace)
mu.Unlock()
}
}(keyspace)
}
wg.Wait()
if rec.HasErrors() {
return nil, rec.Error()
}
if len(result) == 0 {
return nil, fmt.Errorf("There are no keyspaces with ServedFrom")
}
return result, nil
}
示例12: refreshMasters
// refreshMasters will just RPC-ping all the masters with RefreshState
func (wr *Wrangler) refreshMasters(shards []*topo.ShardInfo) error {
wg := sync.WaitGroup{}
rec := concurrency.AllErrorRecorder{}
for _, si := range shards {
wg.Add(1)
go func(si *topo.ShardInfo) {
defer wg.Done()
wr.Logger().Infof("RefreshState master %v", si.MasterAlias)
ti, err := wr.ts.GetTablet(si.MasterAlias)
if err != nil {
rec.RecordError(err)
return
}
if err := wr.tmc.RefreshState(wr.ctx, ti); err != nil {
rec.RecordError(err)
} else {
wr.Logger().Infof("%v responded", si.MasterAlias)
}
}(si)
}
wg.Wait()
return rec.Error()
}
示例13: CopyTablets
// CopyTablets will create the tablets in the destination topo
func CopyTablets(fromTS, toTS topo.Server) {
cells, err := fromTS.GetKnownCells()
if err != nil {
log.Fatalf("fromTS.GetKnownCells: %v", err)
}
wg := sync.WaitGroup{}
rec := concurrency.AllErrorRecorder{}
for _, cell := range cells {
wg.Add(1)
go func(cell string) {
defer wg.Done()
tabletAliases, err := fromTS.GetTabletsByCell(cell)
if err != nil {
rec.RecordError(fmt.Errorf("GetTabletsByCell(%v): %v", cell, err))
} else {
for _, tabletAlias := range tabletAliases {
wg.Add(1)
go func(tabletAlias topo.TabletAlias) {
defer wg.Done()
// read the source tablet
ti, err := fromTS.GetTablet(tabletAlias)
if err != nil {
rec.RecordError(fmt.Errorf("GetTablet(%v): %v", tabletAlias, err))
return
}
// try to create the destination
err = toTS.CreateTablet(ti.Tablet)
if err == topo.ErrNodeExists {
// update the destination tablet
log.Warningf("tablet %v already exists, updating it", tabletAlias)
err = toTS.UpdateTabletFields(ti.Alias, func(t *topo.Tablet) error {
*t = *ti.Tablet
return nil
})
}
if err != nil {
rec.RecordError(fmt.Errorf("CreateTablet(%v): %v", tabletAlias, err))
return
}
}(tabletAlias)
}
}
}(cell)
}
wg.Wait()
if rec.HasErrors() {
log.Fatalf("copyTablets failed: %v", rec.Error())
}
}
示例14: InitializeConnections
// InitializeConnections pre-initializes all ShardConn which create underlying connections.
// It also populates topology cache by accessing it.
// It is not necessary to call this function before serving queries,
// but it would reduce connection overhead when serving.
func (stc *ScatterConn) InitializeConnections(ctx context.Context) error {
ksNames, err := stc.toposerv.GetSrvKeyspaceNames(ctx, stc.cell)
if err != nil {
return err
}
var wg sync.WaitGroup
var errRecorder concurrency.AllErrorRecorder
for _, ksName := range ksNames {
wg.Add(1)
go func(keyspace string) {
defer wg.Done()
// get SrvKeyspace for cell/keyspace
ks, err := stc.toposerv.GetSrvKeyspace(ctx, stc.cell, keyspace)
if err != nil {
errRecorder.RecordError(err)
return
}
// work on all shards of all serving tablet types
for _, tabletType := range ks.TabletTypes {
ksPartition, ok := ks.Partitions[tabletType]
if !ok {
errRecorder.RecordError(fmt.Errorf("%v.%v is not in SrvKeyspace.Partitions", keyspace, string(tabletType)))
continue
}
for _, shard := range ksPartition.Shards {
wg.Add(1)
go func(shardName string, tabletType topo.TabletType) {
defer wg.Done()
shardConn := stc.getConnection(ctx, keyspace, shardName, tabletType)
err = shardConn.Dial(ctx)
if err != nil {
errRecorder.RecordError(err)
return
}
}(shard.ShardName(), tabletType)
}
}
}(ksName)
}
wg.Wait()
if errRecorder.HasErrors() {
return errRecorder.Error()
}
return nil
}
示例15: diff
func (sdw *SplitDiffWorker) diff() error {
sdw.setState(stateSDDiff)
sdw.wr.Logger().Infof("Gathering schema information...")
sdw.sourceSchemaDefinitions = make([]*myproto.SchemaDefinition, len(sdw.sourceAliases))
wg := sync.WaitGroup{}
rec := concurrency.AllErrorRecorder{}
wg.Add(1)
go func() {
var err error
sdw.destinationSchemaDefinition, err = sdw.wr.GetSchema(sdw.destinationAlias, nil, nil, false)
rec.RecordError(err)
sdw.wr.Logger().Infof("Got schema from destination %v", sdw.destinationAlias)
wg.Done()
}()
for i, sourceAlias := range sdw.sourceAliases {
wg.Add(1)
go func(i int, sourceAlias topo.TabletAlias) {
var err error
sdw.sourceSchemaDefinitions[i], err = sdw.wr.GetSchema(sourceAlias, nil, nil, false)
rec.RecordError(err)
sdw.wr.Logger().Infof("Got schema from source[%v] %v", i, sourceAlias)
wg.Done()
}(i, sourceAlias)
}
wg.Wait()
if rec.HasErrors() {
return rec.Error()
}
// TODO(alainjobart) Checking against each source may be
// overkill, if all sources have the same schema?
sdw.wr.Logger().Infof("Diffing the schema...")
rec = concurrency.AllErrorRecorder{}
for i, sourceSchemaDefinition := range sdw.sourceSchemaDefinitions {
sourceName := fmt.Sprintf("source[%v]", i)
myproto.DiffSchema("destination", sdw.destinationSchemaDefinition, sourceName, sourceSchemaDefinition, &rec)
}
if rec.HasErrors() {
sdw.wr.Logger().Warningf("Different schemas: %v", rec.Error().Error())
} else {
sdw.wr.Logger().Infof("Schema match, good.")
}
// run the diffs, 8 at a time
sdw.wr.Logger().Infof("Running the diffs...")
sem := sync2.NewSemaphore(8, 0)
for _, tableDefinition := range sdw.destinationSchemaDefinition.TableDefinitions {
wg.Add(1)
go func(tableDefinition *myproto.TableDefinition) {
defer wg.Done()
sem.Acquire()
defer sem.Release()
sdw.wr.Logger().Infof("Starting the diff on table %v", tableDefinition.Name)
if len(sdw.sourceAliases) != 1 {
sdw.wr.Logger().Errorf("Don't support more than one source for table yet: %v", tableDefinition.Name)
return
}
overlap, err := key.KeyRangesOverlap(sdw.shardInfo.KeyRange, sdw.shardInfo.SourceShards[0].KeyRange)
if err != nil {
sdw.wr.Logger().Errorf("Source shard doesn't overlap with destination????: %v", err)
return
}
sourceQueryResultReader, err := TableScanByKeyRange(sdw.wr.Logger(), sdw.wr.TopoServer(), sdw.sourceAliases[0], tableDefinition, overlap, sdw.keyspaceInfo.ShardingColumnType)
if err != nil {
sdw.wr.Logger().Errorf("TableScanByKeyRange(source) failed: %v", err)
return
}
defer sourceQueryResultReader.Close()
destinationQueryResultReader, err := TableScanByKeyRange(sdw.wr.Logger(), sdw.wr.TopoServer(), sdw.destinationAlias, tableDefinition, key.KeyRange{}, sdw.keyspaceInfo.ShardingColumnType)
if err != nil {
sdw.wr.Logger().Errorf("TableScanByKeyRange(destination) failed: %v", err)
return
}
defer destinationQueryResultReader.Close()
differ, err := NewRowDiffer(sourceQueryResultReader, destinationQueryResultReader, tableDefinition)
if err != nil {
sdw.wr.Logger().Errorf("NewRowDiffer() failed: %v", err)
return
}
report, err := differ.Go(sdw.wr.Logger())
if err != nil {
sdw.wr.Logger().Errorf("Differ.Go failed: %v", err.Error())
} else {
if report.HasDifferences() {
sdw.wr.Logger().Warningf("Table %v has differences: %v", tableDefinition.Name, report.String())
} else {
sdw.wr.Logger().Infof("Table %v checks out (%v rows processed, %v qps)", tableDefinition.Name, report.processedRows, report.processingQPS)
}
}
}(tableDefinition)
}
wg.Wait()
return nil
//.........這裏部分代碼省略.........