本文整理匯總了Golang中github.com/henryanand/vitess/go/vt/concurrency.AllErrorRecorder.HasErrors方法的典型用法代碼示例。如果您正苦於以下問題:Golang AllErrorRecorder.HasErrors方法的具體用法?Golang AllErrorRecorder.HasErrors怎麽用?Golang AllErrorRecorder.HasErrors使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類github.com/henryanand/vitess/go/vt/concurrency.AllErrorRecorder
的用法示例。
在下文中一共展示了AllErrorRecorder.HasErrors方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: CopyKeyspaces
// CopyKeyspaces will create the keyspaces in the destination topo
func CopyKeyspaces(fromTS, toTS topo.Server) {
keyspaces, err := fromTS.GetKeyspaces()
if err != nil {
log.Fatalf("GetKeyspaces: %v", err)
}
wg := sync.WaitGroup{}
rec := concurrency.AllErrorRecorder{}
for _, keyspace := range keyspaces {
wg.Add(1)
go func(keyspace string) {
defer wg.Done()
k, err := fromTS.GetKeyspace(keyspace)
if err != nil {
rec.RecordError(fmt.Errorf("GetKeyspace(%v): %v", keyspace, err))
return
}
if err := toTS.CreateKeyspace(keyspace, k.Keyspace); err != nil {
if err == topo.ErrNodeExists {
log.Warningf("keyspace %v already exists", keyspace)
} else {
rec.RecordError(fmt.Errorf("CreateKeyspace(%v): %v", keyspace, err))
}
}
}(keyspace)
}
wg.Wait()
if rec.HasErrors() {
log.Fatalf("copyKeyspaces failed: %v", rec.Error())
}
}
示例2: DiffSchemaToArray
func DiffSchemaToArray(leftName string, left *SchemaDefinition, rightName string, right *SchemaDefinition) (result []string) {
er := concurrency.AllErrorRecorder{}
DiffSchema(leftName, left, rightName, right, &er)
if er.HasErrors() {
return er.ErrorStrings()
} else {
return nil
}
}
示例3: DiffPermissionsToArray
func DiffPermissionsToArray(leftName string, left *Permissions, rightName string, right *Permissions) (result []string) {
er := concurrency.AllErrorRecorder{}
DiffPermissions(leftName, left, rightName, right, &er)
if er.HasErrors() {
return er.ErrorStrings()
} else {
return nil
}
}
示例4: ValidateVersionKeyspace
func (wr *Wrangler) ValidateVersionKeyspace(keyspace string) error {
// find all the shards
shards, err := wr.ts.GetShardNames(keyspace)
if err != nil {
return err
}
// corner cases
if len(shards) == 0 {
return fmt.Errorf("No shards in keyspace %v", keyspace)
}
sort.Strings(shards)
if len(shards) == 1 {
return wr.ValidateVersionShard(keyspace, shards[0])
}
// find the reference version using the first shard's master
si, err := wr.ts.GetShard(keyspace, shards[0])
if err != nil {
return err
}
if si.MasterAlias.Uid == topo.NO_TABLET {
return fmt.Errorf("No master in shard %v/%v", keyspace, shards[0])
}
referenceAlias := si.MasterAlias
log.Infof("Gathering version for reference master %v", referenceAlias)
referenceVersion, err := wr.GetVersion(referenceAlias)
if err != nil {
return err
}
// then diff with all tablets but master 0
er := concurrency.AllErrorRecorder{}
wg := sync.WaitGroup{}
for _, shard := range shards {
aliases, err := topo.FindAllTabletAliasesInShard(context.TODO(), wr.ts, keyspace, shard)
if err != nil {
er.RecordError(err)
continue
}
for _, alias := range aliases {
if alias == si.MasterAlias {
continue
}
wg.Add(1)
go wr.diffVersion(referenceVersion, referenceAlias, alias, &wg, &er)
}
}
wg.Wait()
if er.HasErrors() {
return fmt.Errorf("Version diffs:\n%v", er.Error().Error())
}
return nil
}
示例5: CopyShardReplications
// CopyShardReplications will create the ShardReplication objects in
// the destination topo
func CopyShardReplications(fromTS, toTS topo.Server) {
keyspaces, err := fromTS.GetKeyspaces()
if err != nil {
log.Fatalf("fromTS.GetKeyspaces: %v", err)
}
wg := sync.WaitGroup{}
rec := concurrency.AllErrorRecorder{}
for _, keyspace := range keyspaces {
wg.Add(1)
go func(keyspace string) {
defer wg.Done()
shards, err := fromTS.GetShardNames(keyspace)
if err != nil {
rec.RecordError(fmt.Errorf("GetShardNames(%v): %v", keyspace, err))
return
}
for _, shard := range shards {
wg.Add(1)
go func(keyspace, shard string) {
defer wg.Done()
// read the source shard to get the cells
si, err := fromTS.GetShard(keyspace, shard)
if err != nil {
rec.RecordError(fmt.Errorf("GetShard(%v, %v): %v", keyspace, shard, err))
return
}
for _, cell := range si.Cells {
sri, err := fromTS.GetShardReplication(cell, keyspace, shard)
if err != nil {
rec.RecordError(fmt.Errorf("GetShardReplication(%v, %v, %v): %v", cell, keyspace, shard, err))
continue
}
if err := toTS.UpdateShardReplicationFields(cell, keyspace, shard, func(oldSR *topo.ShardReplication) error {
*oldSR = *sri.ShardReplication
return nil
}); err != nil {
rec.RecordError(fmt.Errorf("UpdateShardReplicationFields(%v, %v, %v): %v", cell, keyspace, shard, err))
}
}
}(keyspace, shard)
}
}(keyspace)
}
wg.Wait()
if rec.HasErrors() {
log.Fatalf("copyShards failed: %v", rec.Error())
}
}
示例6: CopyTablets
// CopyTablets will create the tablets in the destination topo
func CopyTablets(fromTS, toTS topo.Server) {
cells, err := fromTS.GetKnownCells()
if err != nil {
log.Fatalf("fromTS.GetKnownCells: %v", err)
}
wg := sync.WaitGroup{}
rec := concurrency.AllErrorRecorder{}
for _, cell := range cells {
wg.Add(1)
go func(cell string) {
defer wg.Done()
tabletAliases, err := fromTS.GetTabletsByCell(cell)
if err != nil {
rec.RecordError(fmt.Errorf("GetTabletsByCell(%v): %v", cell, err))
} else {
for _, tabletAlias := range tabletAliases {
wg.Add(1)
go func(tabletAlias topo.TabletAlias) {
defer wg.Done()
// read the source tablet
ti, err := fromTS.GetTablet(tabletAlias)
if err != nil {
rec.RecordError(fmt.Errorf("GetTablet(%v): %v", tabletAlias, err))
return
}
// try to create the destination
err = toTS.CreateTablet(ti.Tablet)
if err == topo.ErrNodeExists {
// update the destination tablet
log.Warningf("tablet %v already exists, updating it", tabletAlias)
err = toTS.UpdateTabletFields(ti.Alias, func(t *topo.Tablet) error {
*t = *ti.Tablet
return nil
})
}
if err != nil {
rec.RecordError(fmt.Errorf("CreateTablet(%v): %v", tabletAlias, err))
return
}
}(tabletAlias)
}
}
}(cell)
}
wg.Wait()
if rec.HasErrors() {
log.Fatalf("copyTablets failed: %v", rec.Error())
}
}
示例7: shardsWithSources
// shardsWithSources returns all the shards that have SourceShards set
// with no Tables list.
func shardsWithSources(wr *wrangler.Wrangler) ([]map[string]string, error) {
keyspaces, err := wr.TopoServer().GetKeyspaces()
if err != nil {
return nil, err
}
wg := sync.WaitGroup{}
mu := sync.Mutex{} // protects result
result := make([]map[string]string, 0, len(keyspaces))
rec := concurrency.AllErrorRecorder{}
for _, keyspace := range keyspaces {
wg.Add(1)
go func(keyspace string) {
defer wg.Done()
shards, err := wr.TopoServer().GetShardNames(keyspace)
if err != nil {
rec.RecordError(err)
return
}
for _, shard := range shards {
wg.Add(1)
go func(keyspace, shard string) {
defer wg.Done()
si, err := wr.TopoServer().GetShard(keyspace, shard)
if err != nil {
rec.RecordError(err)
return
}
if len(si.SourceShards) > 0 && len(si.SourceShards[0].Tables) == 0 {
mu.Lock()
result = append(result, map[string]string{
"Keyspace": keyspace,
"Shard": shard,
})
mu.Unlock()
}
}(keyspace, shard)
}
}(keyspace)
}
wg.Wait()
if rec.HasErrors() {
return nil, rec.Error()
}
if len(result) == 0 {
return nil, fmt.Errorf("There are no shards with SourceShards")
}
return result, nil
}
示例8: InitializeConnections
// InitializeConnections pre-initializes all ShardConn which create underlying connections.
// It also populates topology cache by accessing it.
// It is not necessary to call this function before serving queries,
// but it would reduce connection overhead when serving.
func (stc *ScatterConn) InitializeConnections(ctx context.Context) error {
ksNames, err := stc.toposerv.GetSrvKeyspaceNames(ctx, stc.cell)
if err != nil {
return err
}
var wg sync.WaitGroup
var errRecorder concurrency.AllErrorRecorder
for _, ksName := range ksNames {
wg.Add(1)
go func(keyspace string) {
defer wg.Done()
// get SrvKeyspace for cell/keyspace
ks, err := stc.toposerv.GetSrvKeyspace(ctx, stc.cell, keyspace)
if err != nil {
errRecorder.RecordError(err)
return
}
// work on all shards of all serving tablet types
for _, tabletType := range ks.TabletTypes {
ksPartition, ok := ks.Partitions[tabletType]
if !ok {
errRecorder.RecordError(fmt.Errorf("%v.%v is not in SrvKeyspace.Partitions", keyspace, string(tabletType)))
continue
}
for _, shard := range ksPartition.Shards {
wg.Add(1)
go func(shardName string, tabletType topo.TabletType) {
defer wg.Done()
shardConn := stc.getConnection(ctx, keyspace, shardName, tabletType)
err = shardConn.Dial(ctx)
if err != nil {
errRecorder.RecordError(err)
return
}
}(shard.ShardName(), tabletType)
}
}
}(ksName)
}
wg.Wait()
if errRecorder.HasErrors() {
return errRecorder.Error()
}
return nil
}
示例9: ValidateVersionShard
func (wr *Wrangler) ValidateVersionShard(keyspace, shard string) error {
si, err := wr.ts.GetShard(keyspace, shard)
if err != nil {
return err
}
// get version from the master, or error
if si.MasterAlias.Uid == topo.NO_TABLET {
return fmt.Errorf("No master in shard %v/%v", keyspace, shard)
}
log.Infof("Gathering version for master %v", si.MasterAlias)
masterVersion, err := wr.GetVersion(si.MasterAlias)
if err != nil {
return err
}
// read all the aliases in the shard, that is all tablets that are
// replicating from the master
aliases, err := topo.FindAllTabletAliasesInShard(context.TODO(), wr.ts, keyspace, shard)
if err != nil {
return err
}
// then diff with all slaves
er := concurrency.AllErrorRecorder{}
wg := sync.WaitGroup{}
for _, alias := range aliases {
if alias == si.MasterAlias {
continue
}
wg.Add(1)
go wr.diffVersion(masterVersion, si.MasterAlias, alias, &wg, &er)
}
wg.Wait()
if er.HasErrors() {
return fmt.Errorf("Version diffs:\n%v", er.Error().Error())
}
return nil
}
示例10: keyspacesWithOverlappingShards
func keyspacesWithOverlappingShards(wr *wrangler.Wrangler) ([]map[string]string, error) {
keyspaces, err := wr.TopoServer().GetKeyspaces()
if err != nil {
return nil, err
}
wg := sync.WaitGroup{}
mu := sync.Mutex{} // protects result
result := make([]map[string]string, 0, len(keyspaces))
rec := concurrency.AllErrorRecorder{}
for _, keyspace := range keyspaces {
wg.Add(1)
go func(keyspace string) {
defer wg.Done()
osList, err := topotools.FindOverlappingShards(wr.TopoServer(), keyspace)
if err != nil {
rec.RecordError(err)
return
}
mu.Lock()
for _, os := range osList {
result = append(result, map[string]string{
"Keyspace": os.Left[0].Keyspace(),
"Shard": os.Left[0].ShardName(),
})
}
mu.Unlock()
}(keyspace)
}
wg.Wait()
if rec.HasErrors() {
return nil, rec.Error()
}
if len(result) == 0 {
return nil, fmt.Errorf("There are no keyspaces with overlapping shards")
}
return result, nil
}
示例11: keyspacesWithServedFrom
// keyspacesWithServedFrom returns all the keyspaces that have ServedFrom set
// to one value.
func keyspacesWithServedFrom(wr *wrangler.Wrangler) ([]string, error) {
keyspaces, err := wr.TopoServer().GetKeyspaces()
if err != nil {
return nil, err
}
wg := sync.WaitGroup{}
mu := sync.Mutex{} // protects result
result := make([]string, 0, len(keyspaces))
rec := concurrency.AllErrorRecorder{}
for _, keyspace := range keyspaces {
wg.Add(1)
go func(keyspace string) {
defer wg.Done()
ki, err := wr.TopoServer().GetKeyspace(keyspace)
if err != nil {
rec.RecordError(err)
return
}
if len(ki.ServedFromMap) > 0 {
mu.Lock()
result = append(result, keyspace)
mu.Unlock()
}
}(keyspace)
}
wg.Wait()
if rec.HasErrors() {
return nil, rec.Error()
}
if len(result) == 0 {
return nil, fmt.Errorf("There are no keyspaces with ServedFrom")
}
return result, nil
}
示例12: diff
func (vsdw *VerticalSplitDiffWorker) diff() error {
vsdw.setState(stateVSDDiff)
vsdw.wr.Logger().Infof("Gathering schema information...")
wg := sync.WaitGroup{}
rec := concurrency.AllErrorRecorder{}
wg.Add(1)
go func() {
var err error
vsdw.destinationSchemaDefinition, err = vsdw.wr.GetSchema(vsdw.destinationAlias, nil, nil, false)
rec.RecordError(err)
vsdw.wr.Logger().Infof("Got schema from destination %v", vsdw.destinationAlias)
wg.Done()
}()
wg.Add(1)
go func() {
var err error
vsdw.sourceSchemaDefinition, err = vsdw.wr.GetSchema(vsdw.sourceAlias, nil, nil, false)
rec.RecordError(err)
vsdw.wr.Logger().Infof("Got schema from source %v", vsdw.sourceAlias)
wg.Done()
}()
wg.Wait()
if rec.HasErrors() {
return rec.Error()
}
// Build a list of regexp to exclude tables from source schema
tableRegexps := make([]*regexp.Regexp, len(vsdw.shardInfo.SourceShards[0].Tables))
for i, table := range vsdw.shardInfo.SourceShards[0].Tables {
var err error
tableRegexps[i], err = regexp.Compile(table)
if err != nil {
return fmt.Errorf("cannot compile regexp %v for table: %v", table, err)
}
}
// Remove the tables we don't need from the source schema
newSourceTableDefinitions := make([]*myproto.TableDefinition, 0, len(vsdw.destinationSchemaDefinition.TableDefinitions))
for _, tableDefinition := range vsdw.sourceSchemaDefinition.TableDefinitions {
found := false
for _, tableRegexp := range tableRegexps {
if tableRegexp.MatchString(tableDefinition.Name) {
found = true
break
}
}
if !found {
vsdw.wr.Logger().Infof("Removing table %v from source schema", tableDefinition.Name)
continue
}
newSourceTableDefinitions = append(newSourceTableDefinitions, tableDefinition)
}
vsdw.sourceSchemaDefinition.TableDefinitions = newSourceTableDefinitions
// Check the schema
vsdw.wr.Logger().Infof("Diffing the schema...")
rec = concurrency.AllErrorRecorder{}
myproto.DiffSchema("destination", vsdw.destinationSchemaDefinition, "source", vsdw.sourceSchemaDefinition, &rec)
if rec.HasErrors() {
vsdw.wr.Logger().Warningf("Different schemas: %v", rec.Error())
} else {
vsdw.wr.Logger().Infof("Schema match, good.")
}
// run the diffs, 8 at a time
vsdw.wr.Logger().Infof("Running the diffs...")
sem := sync2.NewSemaphore(8, 0)
for _, tableDefinition := range vsdw.destinationSchemaDefinition.TableDefinitions {
wg.Add(1)
go func(tableDefinition *myproto.TableDefinition) {
defer wg.Done()
sem.Acquire()
defer sem.Release()
vsdw.wr.Logger().Infof("Starting the diff on table %v", tableDefinition.Name)
sourceQueryResultReader, err := TableScan(vsdw.wr.Logger(), vsdw.wr.TopoServer(), vsdw.sourceAlias, tableDefinition)
if err != nil {
vsdw.wr.Logger().Errorf("TableScan(source) failed: %v", err)
return
}
defer sourceQueryResultReader.Close()
destinationQueryResultReader, err := TableScan(vsdw.wr.Logger(), vsdw.wr.TopoServer(), vsdw.destinationAlias, tableDefinition)
if err != nil {
vsdw.wr.Logger().Errorf("TableScan(destination) failed: %v", err)
return
}
defer destinationQueryResultReader.Close()
differ, err := NewRowDiffer(sourceQueryResultReader, destinationQueryResultReader, tableDefinition)
if err != nil {
vsdw.wr.Logger().Errorf("NewRowDiffer() failed: %v", err)
return
}
report, err := differ.Go(vsdw.wr.Logger())
if err != nil {
vsdw.wr.Logger().Errorf("Differ.Go failed: %v", err)
} else {
//.........這裏部分代碼省略.........
示例13: MigrateServedTypes
// MigrateServedTypes is used during horizontal splits to migrate a
// served type from a list of shards to another.
func (wr *Wrangler) MigrateServedTypes(keyspace, shard string, cells []string, servedType topo.TabletType, reverse, skipReFreshState bool) error {
if servedType == topo.TYPE_MASTER {
// we cannot migrate a master back, since when master migration
// is done, the source shards are dead
if reverse {
return fmt.Errorf("Cannot migrate master back to %v/%v", keyspace, shard)
}
// we cannot skip refresh state for a master
if skipReFreshState {
return fmt.Errorf("Cannot skip refresh state for master migration on %v/%v", keyspace, shard)
}
}
// find overlapping shards in this keyspace
wr.Logger().Infof("Finding the overlapping shards in keyspace %v", keyspace)
osList, err := topotools.FindOverlappingShards(wr.ts, keyspace)
if err != nil {
return fmt.Errorf("FindOverlappingShards failed: %v", err)
}
// find our shard in there
os := topotools.OverlappingShardsForShard(osList, shard)
if os == nil {
return fmt.Errorf("Shard %v is not involved in any overlapping shards", shard)
}
// find which list is which: the sources have no source
// shards, the destination have source shards. We check the
// first entry in the lists, then just check they're
// consistent
var sourceShards []*topo.ShardInfo
var destinationShards []*topo.ShardInfo
if len(os.Left[0].SourceShards) == 0 {
sourceShards = os.Left
destinationShards = os.Right
} else {
sourceShards = os.Right
destinationShards = os.Left
}
// Verify the sources has the type we're migrating (or not if reverse)
for _, si := range sourceShards {
if err := si.CheckServedTypesMigration(servedType, cells, !reverse); err != nil {
return err
}
}
// Verify the destinations do not have the type we're
// migrating (or do if reverse)
for _, si := range destinationShards {
if err := si.CheckServedTypesMigration(servedType, cells, reverse); err != nil {
return err
}
}
// lock the shards: sources, then destinations
// (note they're all ordered by shard name)
actionNode := actionnode.MigrateServedTypes(servedType)
sourceLockPath := make([]string, len(sourceShards))
for i, si := range sourceShards {
sourceLockPath[i], err = wr.lockShard(si.Keyspace(), si.ShardName(), actionNode)
if err != nil {
wr.Logger().Errorf("Failed to lock source shard %v/%v, may need to unlock other shards manually", si.Keyspace(), si.ShardName())
return err
}
}
destinationLockPath := make([]string, len(destinationShards))
for i, si := range destinationShards {
destinationLockPath[i], err = wr.lockShard(si.Keyspace(), si.ShardName(), actionNode)
if err != nil {
wr.Logger().Errorf("Failed to lock destination shard %v/%v, may need to unlock other shards manually", si.Keyspace(), si.ShardName())
return err
}
}
// record the action error and all unlock errors
rec := concurrency.AllErrorRecorder{}
// execute the migration
rec.RecordError(wr.migrateServedTypes(keyspace, sourceShards, destinationShards, cells, servedType, reverse))
// unlock the shards, we're done
for i := len(destinationShards) - 1; i >= 0; i-- {
rec.RecordError(wr.unlockShard(destinationShards[i].Keyspace(), destinationShards[i].ShardName(), actionNode, destinationLockPath[i], nil))
}
for i := len(sourceShards) - 1; i >= 0; i-- {
rec.RecordError(wr.unlockShard(sourceShards[i].Keyspace(), sourceShards[i].ShardName(), actionNode, sourceLockPath[i], nil))
}
// rebuild the keyspace serving graph if there was no error
if !rec.HasErrors() {
rec.RecordError(wr.RebuildKeyspaceGraph(keyspace, nil))
}
// Send a refresh to the source tablets we just disabled, iff:
// - we're not migrating a master
// - it is not a reverse migration
// - we don't have any errors
//.........這裏部分代碼省略.........
示例14: diff
func (sdw *SplitDiffWorker) diff() error {
sdw.setState(stateSDDiff)
sdw.wr.Logger().Infof("Gathering schema information...")
sdw.sourceSchemaDefinitions = make([]*myproto.SchemaDefinition, len(sdw.sourceAliases))
wg := sync.WaitGroup{}
rec := concurrency.AllErrorRecorder{}
wg.Add(1)
go func() {
var err error
sdw.destinationSchemaDefinition, err = sdw.wr.GetSchema(sdw.destinationAlias, nil, nil, false)
rec.RecordError(err)
sdw.wr.Logger().Infof("Got schema from destination %v", sdw.destinationAlias)
wg.Done()
}()
for i, sourceAlias := range sdw.sourceAliases {
wg.Add(1)
go func(i int, sourceAlias topo.TabletAlias) {
var err error
sdw.sourceSchemaDefinitions[i], err = sdw.wr.GetSchema(sourceAlias, nil, nil, false)
rec.RecordError(err)
sdw.wr.Logger().Infof("Got schema from source[%v] %v", i, sourceAlias)
wg.Done()
}(i, sourceAlias)
}
wg.Wait()
if rec.HasErrors() {
return rec.Error()
}
// TODO(alainjobart) Checking against each source may be
// overkill, if all sources have the same schema?
sdw.wr.Logger().Infof("Diffing the schema...")
rec = concurrency.AllErrorRecorder{}
for i, sourceSchemaDefinition := range sdw.sourceSchemaDefinitions {
sourceName := fmt.Sprintf("source[%v]", i)
myproto.DiffSchema("destination", sdw.destinationSchemaDefinition, sourceName, sourceSchemaDefinition, &rec)
}
if rec.HasErrors() {
sdw.wr.Logger().Warningf("Different schemas: %v", rec.Error().Error())
} else {
sdw.wr.Logger().Infof("Schema match, good.")
}
// run the diffs, 8 at a time
sdw.wr.Logger().Infof("Running the diffs...")
sem := sync2.NewSemaphore(8, 0)
for _, tableDefinition := range sdw.destinationSchemaDefinition.TableDefinitions {
wg.Add(1)
go func(tableDefinition *myproto.TableDefinition) {
defer wg.Done()
sem.Acquire()
defer sem.Release()
sdw.wr.Logger().Infof("Starting the diff on table %v", tableDefinition.Name)
if len(sdw.sourceAliases) != 1 {
sdw.wr.Logger().Errorf("Don't support more than one source for table yet: %v", tableDefinition.Name)
return
}
overlap, err := key.KeyRangesOverlap(sdw.shardInfo.KeyRange, sdw.shardInfo.SourceShards[0].KeyRange)
if err != nil {
sdw.wr.Logger().Errorf("Source shard doesn't overlap with destination????: %v", err)
return
}
sourceQueryResultReader, err := TableScanByKeyRange(sdw.wr.Logger(), sdw.wr.TopoServer(), sdw.sourceAliases[0], tableDefinition, overlap, sdw.keyspaceInfo.ShardingColumnType)
if err != nil {
sdw.wr.Logger().Errorf("TableScanByKeyRange(source) failed: %v", err)
return
}
defer sourceQueryResultReader.Close()
destinationQueryResultReader, err := TableScanByKeyRange(sdw.wr.Logger(), sdw.wr.TopoServer(), sdw.destinationAlias, tableDefinition, key.KeyRange{}, sdw.keyspaceInfo.ShardingColumnType)
if err != nil {
sdw.wr.Logger().Errorf("TableScanByKeyRange(destination) failed: %v", err)
return
}
defer destinationQueryResultReader.Close()
differ, err := NewRowDiffer(sourceQueryResultReader, destinationQueryResultReader, tableDefinition)
if err != nil {
sdw.wr.Logger().Errorf("NewRowDiffer() failed: %v", err)
return
}
report, err := differ.Go(sdw.wr.Logger())
if err != nil {
sdw.wr.Logger().Errorf("Differ.Go failed: %v", err.Error())
} else {
if report.HasDifferences() {
sdw.wr.Logger().Warningf("Table %v has differences: %v", tableDefinition.Name, report.String())
} else {
sdw.wr.Logger().Infof("Table %v checks out (%v rows processed, %v qps)", tableDefinition.Name, report.processedRows, report.processingQPS)
}
}
}(tableDefinition)
}
wg.Wait()
return nil
//.........這裏部分代碼省略.........
示例15: CopyShards
// CopyShards will create the shards in the destination topo
func CopyShards(fromTS, toTS topo.Server, deleteKeyspaceShards bool) {
keyspaces, err := fromTS.GetKeyspaces()
if err != nil {
log.Fatalf("fromTS.GetKeyspaces: %v", err)
}
wg := sync.WaitGroup{}
rec := concurrency.AllErrorRecorder{}
for _, keyspace := range keyspaces {
wg.Add(1)
go func(keyspace string) {
defer wg.Done()
shards, err := fromTS.GetShardNames(keyspace)
if err != nil {
rec.RecordError(fmt.Errorf("GetShardNames(%v): %v", keyspace, err))
return
}
if deleteKeyspaceShards {
if err := toTS.DeleteKeyspaceShards(keyspace); err != nil {
rec.RecordError(fmt.Errorf("DeleteKeyspaceShards(%v): %v", keyspace, err))
return
}
}
for _, shard := range shards {
wg.Add(1)
go func(keyspace, shard string) {
defer wg.Done()
if err := topo.CreateShard(toTS, keyspace, shard); err != nil {
if err == topo.ErrNodeExists {
log.Warningf("shard %v/%v already exists", keyspace, shard)
} else {
rec.RecordError(fmt.Errorf("CreateShard(%v, %v): %v", keyspace, shard, err))
return
}
}
si, err := fromTS.GetShard(keyspace, shard)
if err != nil {
rec.RecordError(fmt.Errorf("GetShard(%v, %v): %v", keyspace, shard, err))
return
}
toSi, err := toTS.GetShard(keyspace, shard)
if err != nil {
rec.RecordError(fmt.Errorf("toTS.GetShard(%v, %v): %v", keyspace, shard, err))
return
}
if _, err := toTS.UpdateShard(si, toSi.Version()); err != nil {
rec.RecordError(fmt.Errorf("UpdateShard(%v, %v): %v", keyspace, shard, err))
}
}(keyspace, shard)
}
}(keyspace)
}
wg.Wait()
if rec.HasErrors() {
log.Fatalf("copyShards failed: %v", rec.Error())
}
}