本文整理汇总了Golang中github.com/youtube/vitess/go/vt/topo.Server.UpdateSrvShard方法的典型用法代码示例。如果您正苦于以下问题:Golang Server.UpdateSrvShard方法的具体用法?Golang Server.UpdateSrvShard怎么用?Golang Server.UpdateSrvShard使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类github.com/youtube/vitess/go/vt/topo.Server
的用法示例。
在下文中一共展示了Server.UpdateSrvShard方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: UpdateSrvShard
// UpdateSrvShard creates the SrvShard object based on the global ShardInfo,
// and writes it to the given cell.
func UpdateSrvShard(ctx context.Context, ts topo.Server, cell string, si *topo.ShardInfo) error {
srvShard := &topo.SrvShard{
Name: si.ShardName(),
KeyRange: si.KeyRange,
MasterCell: si.MasterAlias.Cell,
}
return ts.UpdateSrvShard(ctx, cell, si.Keyspace(), si.ShardName(), srvShard)
}
示例2: CheckServingGraph
func CheckServingGraph(t *testing.T, ts topo.Server) {
cell := getLocalCell(t, ts)
// test individual cell/keyspace/shard/type entries
if _, err := ts.GetSrvTabletTypesPerShard(cell, "test_keyspace", "-10"); err != topo.ErrNoNode {
t.Errorf("GetSrvTabletTypesPerShard(invalid): %v", err)
}
if _, err := ts.GetEndPoints(cell, "test_keyspace", "-10", topo.TYPE_MASTER); err != topo.ErrNoNode {
t.Errorf("GetEndPoints(invalid): %v", err)
}
endPoints := topo.EndPoints{
Entries: []topo.EndPoint{
topo.EndPoint{
Uid: 1,
Host: "host1",
NamedPortMap: map[string]int{"_vt": 1234, "_mysql": 1235, "_vts": 1236},
},
},
}
if err := ts.UpdateEndPoints(cell, "test_keyspace", "-10", topo.TYPE_MASTER, &endPoints); err != nil {
t.Errorf("UpdateEndPoints(master): %v", err)
}
if types, err := ts.GetSrvTabletTypesPerShard(cell, "test_keyspace", "-10"); err != nil || len(types) != 1 || types[0] != topo.TYPE_MASTER {
t.Errorf("GetSrvTabletTypesPerShard(1): %v %v", err, types)
}
addrs, err := ts.GetEndPoints(cell, "test_keyspace", "-10", topo.TYPE_MASTER)
if err != nil {
t.Errorf("GetEndPoints: %v", err)
}
if len(addrs.Entries) != 1 || addrs.Entries[0].Uid != 1 {
t.Errorf("GetEndPoints(1): %v", addrs)
}
if pm := addrs.Entries[0].NamedPortMap; pm["_vt"] != 1234 || pm["_mysql"] != 1235 || pm["_vts"] != 1236 {
t.Errorf("GetSrcTabletType(1).NamedPortmap: want %v, got %v", endPoints.Entries[0].NamedPortMap, pm)
}
if err := ts.UpdateTabletEndpoint(cell, "test_keyspace", "-10", topo.TYPE_REPLICA, &topo.EndPoint{Uid: 2, Host: "host2"}); err != nil {
t.Errorf("UpdateTabletEndpoint(invalid): %v", err)
}
if err := ts.UpdateTabletEndpoint(cell, "test_keyspace", "-10", topo.TYPE_MASTER, &topo.EndPoint{Uid: 1, Host: "host2"}); err != nil {
t.Errorf("UpdateTabletEndpoint(master): %v", err)
}
if addrs, err := ts.GetEndPoints(cell, "test_keyspace", "-10", topo.TYPE_MASTER); err != nil || len(addrs.Entries) != 1 || addrs.Entries[0].Uid != 1 {
t.Errorf("GetEndPoints(2): %v %v", err, addrs)
}
if err := ts.UpdateTabletEndpoint(cell, "test_keyspace", "-10", topo.TYPE_MASTER, &topo.EndPoint{Uid: 3, Host: "host3"}); err != nil {
t.Errorf("UpdateTabletEndpoint(master): %v", err)
}
if addrs, err := ts.GetEndPoints(cell, "test_keyspace", "-10", topo.TYPE_MASTER); err != nil || len(addrs.Entries) != 2 {
t.Errorf("GetEndPoints(2): %v %v", err, addrs)
}
if err := ts.DeleteSrvTabletType(cell, "test_keyspace", "-10", topo.TYPE_REPLICA); err != topo.ErrNoNode {
t.Errorf("DeleteSrvTabletType(unknown): %v", err)
}
if err := ts.DeleteSrvTabletType(cell, "test_keyspace", "-10", topo.TYPE_MASTER); err != nil {
t.Errorf("DeleteSrvTabletType(master): %v", err)
}
// test cell/keyspace/shard entries (SrvShard)
srvShard := topo.SrvShard{
ServedTypes: []topo.TabletType{topo.TYPE_MASTER},
TabletTypes: []topo.TabletType{topo.TYPE_REPLICA, topo.TYPE_RDONLY},
}
if err := ts.UpdateSrvShard(cell, "test_keyspace", "-10", &srvShard); err != nil {
t.Errorf("UpdateSrvShard(1): %v", err)
}
if _, err := ts.GetSrvShard(cell, "test_keyspace", "666"); err != topo.ErrNoNode {
t.Errorf("GetSrvShard(invalid): %v", err)
}
if s, err := ts.GetSrvShard(cell, "test_keyspace", "-10"); err != nil ||
len(s.ServedTypes) != 1 ||
s.ServedTypes[0] != topo.TYPE_MASTER ||
len(s.TabletTypes) != 2 ||
s.TabletTypes[0] != topo.TYPE_REPLICA ||
s.TabletTypes[1] != topo.TYPE_RDONLY {
t.Errorf("GetSrvShard(valid): %v", err)
}
// test cell/keyspace entries (SrvKeyspace)
srvKeyspace := topo.SrvKeyspace{
Partitions: map[topo.TabletType]*topo.KeyspacePartition{
topo.TYPE_MASTER: &topo.KeyspacePartition{
Shards: []topo.SrvShard{
topo.SrvShard{
ServedTypes: []topo.TabletType{topo.TYPE_MASTER},
},
},
},
},
TabletTypes: []topo.TabletType{topo.TYPE_MASTER},
}
if err := ts.UpdateSrvKeyspace(cell, "test_keyspace", &srvKeyspace); err != nil {
t.Errorf("UpdateSrvKeyspace(1): %v", err)
}
if _, err := ts.GetSrvKeyspace(cell, "test_keyspace666"); err != topo.ErrNoNode {
t.Errorf("GetSrvKeyspace(invalid): %v", err)
//.........这里部分代码省略.........
示例3: rebuildCellSrvShard
//.........这里部分代码省略.........
// Update db type addresses in the serving graph
//
// locationAddrsMap is a map:
// key: tabletType
// value: EndPoints (list of server records)
locationAddrsMap := make(map[topo.TabletType]*topo.EndPoints)
for _, tablet := range tablets {
if !tablet.IsInReplicationGraph() {
// only valid case is a scrapped master in the
// catastrophic reparent case
if tablet.Parent.Uid != topo.NO_TABLET {
log.Warningf("Tablet %v should not be in the replication graph, please investigate (it is being ignored in the rebuild)", tablet.Alias)
}
continue
}
// Check IsInServingGraph, we don't want to add tablets that
// are not serving
if !tablet.IsInServingGraph() {
continue
}
// Check the Keyspace and Shard for the tablet are right
if tablet.Keyspace != shardInfo.Keyspace() || tablet.Shard != shardInfo.ShardName() {
return fmt.Errorf("CRITICAL: tablet %v is in replication graph for shard %v/%v but belongs to shard %v:%v", tablet.Alias, shardInfo.Keyspace(), shardInfo.ShardName(), tablet.Keyspace, tablet.Shard)
}
// Add the tablet to the list
addrs, ok := locationAddrsMap[tablet.Type]
if !ok {
addrs = topo.NewEndPoints()
locationAddrsMap[tablet.Type] = addrs
}
entry, err := tablet.Tablet.EndPoint()
if err != nil {
log.Warningf("EndPointForTablet failed for tablet %v: %v", tablet.Alias, err)
continue
}
addrs.Entries = append(addrs.Entries, *entry)
}
// we're gonna parallelize a lot here:
// - writing all the tabletTypes records
// - removing the unused records
// - writing SrvShard
rec := concurrency.AllErrorRecorder{}
wg := sync.WaitGroup{}
// write all the EndPoints nodes everywhere we want them
for tabletType, addrs := range locationAddrsMap {
wg.Add(1)
go func(tabletType topo.TabletType, addrs *topo.EndPoints) {
log.Infof("saving serving graph for cell %v shard %v/%v tabletType %v", cell, shardInfo.Keyspace(), shardInfo.ShardName(), tabletType)
if err := ts.UpdateEndPoints(cell, shardInfo.Keyspace(), shardInfo.ShardName(), tabletType, addrs); err != nil {
rec.RecordError(fmt.Errorf("writing endpoints for cell %v shard %v/%v tabletType %v failed: %v", cell, shardInfo.Keyspace(), shardInfo.ShardName(), tabletType, err))
}
wg.Done()
}(tabletType, addrs)
}
// Delete any pre-existing paths that were not updated by this process.
// That's the existingTabletTypes - locationAddrsMap
for _, tabletType := range existingTabletTypes {
if _, ok := locationAddrsMap[tabletType]; !ok {
wg.Add(1)
go func(tabletType topo.TabletType) {
log.Infof("removing stale db type from serving graph: %v", tabletType)
if err := ts.DeleteEndPoints(cell, shardInfo.Keyspace(), shardInfo.ShardName(), tabletType); err != nil {
log.Warningf("unable to remove stale db type %v from serving graph: %v", tabletType, err)
}
wg.Done()
}(tabletType)
}
}
// Update srvShard object
wg.Add(1)
go func() {
log.Infof("updating shard serving graph in cell %v for %v/%v", cell, shardInfo.Keyspace(), shardInfo.ShardName())
srvShard := &topo.SrvShard{
Name: shardInfo.ShardName(),
KeyRange: shardInfo.KeyRange,
ServedTypes: shardInfo.ServedTypes,
MasterCell: shardInfo.MasterAlias.Cell,
TabletTypes: make([]topo.TabletType, 0, len(locationAddrsMap)),
}
for tabletType := range locationAddrsMap {
srvShard.TabletTypes = append(srvShard.TabletTypes, tabletType)
}
if err := ts.UpdateSrvShard(cell, shardInfo.Keyspace(), shardInfo.ShardName(), srvShard); err != nil {
rec.RecordError(fmt.Errorf("writing serving data in cell %v for %v/%v failed: %v", cell, shardInfo.Keyspace(), shardInfo.ShardName(), err))
}
wg.Done()
}()
wg.Wait()
return rec.Error()
}
示例4: CheckServingGraph
//.........这里部分代码省略.........
if len(addrs.Entries) != 1 || addrs.Entries[0].Uid != 1 {
t.Errorf("GetEndPoints(1): %v", addrs)
}
if pm := addrs.Entries[0].PortMap; pm["vt"] != 1234 || pm["mysql"] != 1235 || pm["grpc"] != 1236 {
t.Errorf("GetSrcTabletType(1).PortMap: want %v, got %v", endPoints.Entries[0].PortMap, pm)
}
// Update with the wrong version.
if err := topo.UpdateEndPoints(ctx, ts, cell, "test_keyspace", "-10", topo.TYPE_MASTER, endPoints, version+1); err != topo.ErrBadVersion {
t.Fatalf("UpdateEndPoints(master): err = %v, want topo.ErrBadVersion", err)
}
// Update with the right version.
if err := topo.UpdateEndPoints(ctx, ts, cell, "test_keyspace", "-10", topo.TYPE_MASTER, endPoints, version); err != nil {
t.Fatalf("UpdateEndPoints(master): %v", err)
}
// Update existing EndPoints unconditionally.
if err := topo.UpdateEndPoints(ctx, ts, cell, "test_keyspace", "-10", topo.TYPE_MASTER, endPoints, -1); err != nil {
t.Fatalf("UpdateEndPoints(master): %v", err)
}
if err := ts.DeleteEndPoints(ctx, cell, "test_keyspace", "-10", topo.TYPE_REPLICA, -1); err != topo.ErrNoNode {
t.Errorf("DeleteEndPoints(unknown): %v", err)
}
if err := ts.DeleteEndPoints(ctx, cell, "test_keyspace", "-10", topo.TYPE_MASTER, -1); err != nil {
t.Errorf("DeleteEndPoints(master): %v", err)
}
// test cell/keyspace/shard entries (SrvShard)
srvShard := &pb.SrvShard{
Name: "-10",
KeyRange: newKeyRange3("-10"),
MasterCell: "test",
}
if err := ts.UpdateSrvShard(ctx, cell, "test_keyspace", "-10", srvShard); err != nil {
t.Fatalf("UpdateSrvShard(1): %v", err)
}
if _, err := ts.GetSrvShard(ctx, cell, "test_keyspace", "666"); err != topo.ErrNoNode {
t.Errorf("GetSrvShard(invalid): %v", err)
}
if s, err := ts.GetSrvShard(ctx, cell, "test_keyspace", "-10"); err != nil ||
s.Name != "-10" ||
!key.KeyRangeEqual(s.KeyRange, newKeyRange3("-10")) ||
s.MasterCell != "test" {
t.Errorf("GetSrvShard(valid): %v", err)
}
// test cell/keyspace entries (SrvKeyspace)
srvKeyspace := topo.SrvKeyspace{
Partitions: map[topo.TabletType]*topo.KeyspacePartition{
topo.TYPE_MASTER: &topo.KeyspacePartition{
ShardReferences: []topo.ShardReference{
topo.ShardReference{
Name: "-80",
KeyRange: newKeyRange("-80"),
},
},
},
},
ShardingColumnName: "video_id",
ShardingColumnType: key.KIT_UINT64,
ServedFrom: map[topo.TabletType]string{
topo.TYPE_REPLICA: "other_keyspace",
},
}
if err := ts.UpdateSrvKeyspace(ctx, cell, "test_keyspace", &srvKeyspace); err != nil {
t.Errorf("UpdateSrvKeyspace(1): %v", err)
示例5: rebuildShardSrvGraph
//.........这里部分代码省略.........
locationAddrsMap[location] = addrs
}
entry, err := tablet.Tablet.EndPoint()
if err != nil {
log.Warningf("EndPointForTablet failed for tablet %v: %v", tablet.Alias, err)
continue
}
addrs.Entries = append(addrs.Entries, *entry)
}
// we're gonna parallelize a lot here
rec := concurrency.AllErrorRecorder{}
wg := sync.WaitGroup{}
// write all the {cell,keyspace,shard,type}
// nodes everywhere we want them
for location, addrs := range locationAddrsMap {
wg.Add(1)
go func(location cellKeyspaceShardType, addrs *topo.EndPoints) {
log.Infof("saving serving graph for cell %v shard %v/%v tabletType %v", location.cell, location.keyspace, location.shard, location.tabletType)
if err := ts.UpdateEndPoints(location.cell, location.keyspace, location.shard, location.tabletType, addrs); err != nil {
rec.RecordError(fmt.Errorf("writing endpoints for cell %v shard %v/%v tabletType %v failed: %v", location.cell, location.keyspace, location.shard, location.tabletType, err))
}
wg.Done()
}(location, addrs)
}
// Delete any pre-existing paths that were not updated by this process.
// That's the existingDbTypeLocations - locationAddrsMap
for dbTypeLocation := range existingDbTypeLocations {
if _, ok := locationAddrsMap[dbTypeLocation]; !ok {
cell := dbTypeLocation.cell
if !topo.InCellList(cell, cells) {
continue
}
wg.Add(1)
go func(dbTypeLocation cellKeyspaceShardType) {
log.Infof("removing stale db type from serving graph: %v", dbTypeLocation)
if err := ts.DeleteEndPoints(dbTypeLocation.cell, dbTypeLocation.keyspace, dbTypeLocation.shard, dbTypeLocation.tabletType); err != nil {
log.Warningf("unable to remove stale db type %v from serving graph: %v", dbTypeLocation, err)
}
wg.Done()
}(dbTypeLocation)
}
}
// wait until we're done with the background stuff to do the rest
// FIXME(alainjobart) this wouldn't be necessary if UpdateSrvShard
// below was creating the zookeeper nodes recursively.
wg.Wait()
if err := rec.Error(); err != nil {
return err
}
// Update per-shard information per cell-specific serving path.
//
// srvShardByPath is a map:
// key: {cell,keyspace,shard}
// value: SrvShard
// this will create all the SrvShard objects
srvShardByPath := make(map[cellKeyspaceShard]*topo.SrvShard)
for location := range locationAddrsMap {
// location will be {cell,keyspace,shard,type}
srvShardPath := cellKeyspaceShard{location.cell, location.keyspace, location.shard}
srvShard, ok := srvShardByPath[srvShardPath]
if !ok {
srvShard = &topo.SrvShard{
KeyRange: shardInfo.KeyRange,
ServedTypes: shardInfo.ServedTypes,
TabletTypes: make([]topo.TabletType, 0, 2),
}
srvShardByPath[srvShardPath] = srvShard
}
foundType := false
for _, t := range srvShard.TabletTypes {
if t == location.tabletType {
foundType = true
}
}
if !foundType {
srvShard.TabletTypes = append(srvShard.TabletTypes, location.tabletType)
}
}
// Save the shard entries
for cks, srvShard := range srvShardByPath {
wg.Add(1)
go func(cks cellKeyspaceShard, srvShard *topo.SrvShard) {
log.Infof("updating shard serving graph in cell %v for %v/%v", cks.cell, cks.keyspace, cks.shard)
if err := ts.UpdateSrvShard(cks.cell, cks.keyspace, cks.shard, srvShard); err != nil {
rec.RecordError(fmt.Errorf("writing serving data in cell %v for %v/%v failed: %v", cks.cell, cks.keyspace, cks.shard, err))
}
wg.Done()
}(cks, srvShard)
}
wg.Wait()
return rec.Error()
}