本文整理汇总了Golang中github.com/cockroachdb/cockroach/gossip.MakeStoreKey函数的典型用法代码示例。如果您正苦于以下问题:Golang MakeStoreKey函数的具体用法?Golang MakeStoreKey怎么用?Golang MakeStoreKey使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了MakeStoreKey函数的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: gossipStore
// GossipStore broadcasts the store on the gossip network.
func (s *Store) gossipStore(rangeCount int) error {
desc := s.getDesc(rangeCount)
// Unique gossip key per store.
gossipKey := gossip.MakeStoreKey(desc.StoreID)
// Gossip store descriptor.
return s.gossip.AddInfoProto(gossipKey, &desc, 0)
}
示例2: GossipStores
// GossipStores queues up a list of stores to gossip and blocks until each one
// is gossiped before returning.
func (sg *StoreGossiper) GossipStores(storeDescs []*roachpb.StoreDescriptor, t *testing.T) {
storeIDs := make([]roachpb.StoreID, len(storeDescs))
for i, store := range storeDescs {
storeIDs[i] = store.StoreID
}
sg.GossipWithFunction(storeIDs, func() {
for i, storeDesc := range storeDescs {
if err := sg.g.AddInfoProto(gossip.MakeStoreKey(storeIDs[i]), storeDesc, 0); err != nil {
t.Fatal(err)
}
}
})
}
示例3: GossipWithFunction
// GossipWithFunction calls gossipFn and blocks until gossip callbacks have
// fired on each of the stores specified by storeIDs.
func (sg *StoreGossiper) GossipWithFunction(storeIDs []roachpb.StoreID, gossipFn func()) {
sg.mu.Lock()
defer sg.mu.Unlock()
sg.storeKeyMap = make(map[string]struct{})
for _, storeID := range storeIDs {
storeKey := gossip.MakeStoreKey(storeID)
sg.storeKeyMap[storeKey] = struct{}{}
}
gossipFn()
// Wait for gossip callbacks to be invoked on all the stores.
for len(sg.storeKeyMap) > 0 {
sg.cond.Wait()
}
}
示例4: RemoveTarget
// RemoveTarget returns a suitable replica to remove from the provided replica
// set. It attempts to consider which of the provided replicas would be the best
// candidate for removal.
//
// TODO(mrtracy): RemoveTarget eventually needs to accept the attributes from
// the zone config associated with the provided replicas. This will allow it to
// make correct decisions in the case of ranges with heterogeneous replica
// requirements (i.e. multiple data centers).
func (a *allocator) RemoveTarget(existing []proto.Replica) (proto.Replica, error) {
if len(existing) == 0 {
return proto.Replica{}, util.Error("must supply at least one replica to allocator.RemoveTarget()")
}
a.Lock()
defer a.Unlock()
// Retrieve store descriptors for the provided replicas from gossip.
type replStore struct {
repl proto.Replica
store *proto.StoreDescriptor
}
replStores := make([]replStore, len(existing))
usedStat := stat{}
for i := range existing {
desc, err := storeDescFromGossip(gossip.MakeStoreKey(existing[i].StoreID), a.gossip)
if err != nil {
return proto.Replica{}, err
}
replStores[i] = replStore{
repl: existing[i],
store: desc,
}
usedStat.Update(desc.Capacity.FractionUsed())
}
// Based on store statistics, determine which replica is the "worst" and
// thus should be removed.
var worst replStore
for i, rs := range replStores {
if i == 0 {
worst = rs
continue
}
if usedStat.mean < minFractionUsedThreshold {
if rs.store.Capacity.RangeCount > worst.store.Capacity.RangeCount {
worst = rs
}
continue
}
if rs.store.Capacity.FractionUsed() > worst.store.Capacity.FractionUsed() {
worst = rs
}
}
return worst.repl, nil
}
示例5: gossipStores
func (sg *storeGossiper) gossipStores(stores []*proto.StoreDescriptor, t *testing.T) {
sg.mu.Lock()
defer sg.mu.Unlock()
sg.wg.Add(len(stores))
for _, s := range stores {
keyStoreGossip := gossip.MakeStoreKey(s.StoreID)
// Gossip store descriptor.
err := sg.g.AddInfoProto(keyStoreGossip, s, 0)
if err != nil {
t.Fatal(err)
}
}
// Wait for all gossip callbacks to be invoked.
sg.wg.Wait()
}
示例6: GossipWithFunction
// GossipWithFunction is similar to GossipStores but instead of gossiping the
// store descriptors directly, call the passed in function to do so.
func (sg *StoreGossiper) GossipWithFunction(stores []roachpb.StoreID, gossiper func()) {
sg.mu.Lock()
sg.storeKeyMap = make(map[string]struct{})
sg.wg.Add(len(stores))
for _, s := range stores {
storeKey := gossip.MakeStoreKey(s)
sg.storeKeyMap[storeKey] = struct{}{}
}
// Gossip the stores via the passed in function.
gossiper()
sg.mu.Unlock()
// Wait for all gossip callbacks to be invoked.
sg.wg.Wait()
}
示例7: gossipStores
func gossipStores(g *gossip.Gossip, stores []*proto.StoreDescriptor, t *testing.T) {
var wg sync.WaitGroup
wg.Add(len(stores))
g.RegisterCallback(gossip.MakePrefixPattern(gossip.KeyStorePrefix), func(_ string, _ bool, _ interface{}) { wg.Done() })
for _, s := range stores {
keyStoreGossip := gossip.MakeStoreKey(s.StoreID)
// Gossip store descriptor.
err := g.AddInfo(keyStoreGossip, *s, 0)
if err != nil {
t.Fatal(err)
}
}
// Wait for all gossip callbacks to be invoked.
wg.Wait()
}
示例8: gossipStores
// gossipStores queues up a list of stores to gossip and blocks until each one
// is gossiped before returning.
func (sg *storeGossiper) gossipStores(stores []*proto.StoreDescriptor, t *testing.T) {
sg.mu.Lock()
sg.storeKeyMap = make(map[string]struct{})
sg.wg.Add(len(stores))
for _, s := range stores {
storeKey := gossip.MakeStoreKey(s.StoreID)
sg.storeKeyMap[storeKey] = struct{}{}
// Gossip store descriptor.
err := sg.g.AddInfoProto(storeKey, s, 0)
if err != nil {
t.Fatal(err)
}
}
sg.mu.Unlock()
// Wait for all gossip callbacks to be invoked.
sg.wg.Wait()
}
示例9: GossipStores
// GossipStores queues up a list of stores to gossip and blocks until each one
// is gossiped before returning.
func (sg *StoreGossiper) GossipStores(stores []*roachpb.StoreDescriptor, t *testing.T) {
sg.mu.Lock()
defer sg.mu.Unlock()
sg.storeKeyMap = make(map[string]struct{})
for _, s := range stores {
storeKey := gossip.MakeStoreKey(s.StoreID)
sg.storeKeyMap[storeKey] = struct{}{}
// Gossip store descriptor.
err := sg.g.AddInfoProto(storeKey, s, 0)
if err != nil {
t.Fatal(err)
}
}
// Wait for all gossip callbacks to be invoked.
for len(sg.storeKeyMap) > 0 {
sg.cond.Wait()
}
}
示例10: Example_rebalancing
func Example_rebalancing() {
stopper := stop.NewStopper()
defer stopper.Stop()
// Model a set of stores in a cluster,
// randomly adding / removing stores and adding bytes.
g := gossip.New(nil, nil, stopper)
// Have to call g.SetNodeID before call g.AddInfo
g.SetNodeID(roachpb.NodeID(1))
sp := NewStorePool(
g,
hlc.NewClock(hlc.UnixNano),
nil,
/* reservationsEnabled */ true,
TestTimeUntilStoreDeadOff,
stopper,
)
alloc := MakeAllocator(sp, AllocatorOptions{AllowRebalance: true, Deterministic: true})
var wg sync.WaitGroup
g.RegisterCallback(gossip.MakePrefixPattern(gossip.KeyStorePrefix), func(_ string, _ roachpb.Value) { wg.Done() })
const generations = 100
const nodes = 20
// Initialize testStores.
var testStores [nodes]testStore
for i := 0; i < len(testStores); i++ {
testStores[i].StoreID = roachpb.StoreID(i)
testStores[i].Node = roachpb.NodeDescriptor{NodeID: roachpb.NodeID(i)}
testStores[i].Capacity = roachpb.StoreCapacity{Capacity: 1 << 30, Available: 1 << 30}
}
// Initialize the cluster with a single range.
testStores[0].add(alloc.randGen.Int63n(1 << 20))
for i := 0; i < generations; i++ {
// First loop through test stores and add data.
wg.Add(len(testStores))
for j := 0; j < len(testStores); j++ {
// Add a pretend range to the testStore if there's already one.
if testStores[j].Capacity.RangeCount > 0 {
testStores[j].add(alloc.randGen.Int63n(1 << 20))
}
if err := g.AddInfoProto(gossip.MakeStoreKey(roachpb.StoreID(j)), &testStores[j].StoreDescriptor, 0); err != nil {
panic(err)
}
}
wg.Wait()
// Next loop through test stores and maybe rebalance.
for j := 0; j < len(testStores); j++ {
ts := &testStores[j]
if alloc.ShouldRebalance(ts.StoreID) {
target := alloc.RebalanceTarget(ts.StoreID, roachpb.Attributes{}, []roachpb.ReplicaDescriptor{{NodeID: ts.Node.NodeID, StoreID: ts.StoreID}})
if target != nil {
testStores[j].rebalance(&testStores[int(target.StoreID)], alloc.randGen.Int63n(1<<20))
}
}
}
// Output store capacities as hexidecimal 2-character values.
if i%(generations/50) == 0 {
var maxBytes int64
for j := 0; j < len(testStores); j++ {
bytes := testStores[j].Capacity.Capacity - testStores[j].Capacity.Available
if bytes > maxBytes {
maxBytes = bytes
}
}
if maxBytes > 0 {
for j := 0; j < len(testStores); j++ {
endStr := " "
if j == len(testStores)-1 {
endStr = ""
}
bytes := testStores[j].Capacity.Capacity - testStores[j].Capacity.Available
fmt.Printf("%03d%s", (999*bytes)/maxBytes, endStr)
}
fmt.Printf("\n")
}
}
}
var totBytes int64
var totRanges int32
for i := 0; i < len(testStores); i++ {
totBytes += testStores[i].Capacity.Capacity - testStores[i].Capacity.Available
totRanges += testStores[i].Capacity.RangeCount
}
fmt.Printf("Total bytes=%d, ranges=%d\n", totBytes, totRanges)
// Output:
// 999 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000
// 999 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000
// 999 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000
// 999 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000
// 999 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000
// 999 000 000 000 000 000 000 000 000 000 045 140 000 000 000 000 000 105 000 000
// 999 014 143 000 000 000 000 039 017 000 112 071 000 088 009 000 097 134 000 151
// 999 196 213 000 000 000 143 098 210 039 262 260 077 139 078 087 237 316 281 267
//.........这里部分代码省略.........
示例11: Example_rebalancing
func Example_rebalancing() {
// Model a set of stores in a cluster,
// randomly adding / removing stores and adding bytes.
g := gossip.New(nil, 0, nil)
alloc := newAllocator(g)
alloc.randGen = rand.New(rand.NewSource(0))
alloc.deterministic = true
var wg sync.WaitGroup
g.RegisterCallback(gossip.MakePrefixPattern(gossip.KeyStorePrefix), func(_ string, _ []byte) { wg.Done() })
const generations = 100
const nodes = 20
// Initialize testStores.
var testStores [nodes]testStore
for i := 0; i < len(testStores); i++ {
testStores[i].StoreID = proto.StoreID(i)
testStores[i].Node = proto.NodeDescriptor{NodeID: proto.NodeID(i)}
testStores[i].Capacity = proto.StoreCapacity{Capacity: 1 << 30, Available: 1 << 30}
}
// Initialize the cluster with a single range.
testStores[0].Add(alloc.randGen.Int63n(1 << 20))
for i := 0; i < generations; i++ {
// First loop through test stores and add data.
wg.Add(len(testStores))
for j := 0; j < len(testStores); j++ {
// Add a pretend range to the testStore if there's already one.
if testStores[j].Capacity.RangeCount > 0 {
testStores[j].Add(alloc.randGen.Int63n(1 << 20))
}
key := gossip.MakeStoreKey(proto.StoreID(j))
if err := g.AddInfoProto(key, &testStores[j].StoreDescriptor, 0); err != nil {
panic(err)
}
}
wg.Wait()
// Next loop through test stores and maybe rebalance.
for j := 0; j < len(testStores); j++ {
ts := &testStores[j]
if alloc.ShouldRebalance(&testStores[j].StoreDescriptor) {
target := alloc.RebalanceTarget(proto.Attributes{}, []proto.Replica{{NodeID: ts.Node.NodeID, StoreID: ts.StoreID}})
if target != nil {
testStores[j].Rebalance(&testStores[int(target.StoreID)], alloc.randGen.Int63n(1<<20))
}
}
}
// Output store capacities as hexidecimal 2-character values.
if i%(generations/50) == 0 {
var maxBytes int64
for j := 0; j < len(testStores); j++ {
bytes := testStores[j].Capacity.Capacity - testStores[j].Capacity.Available
if bytes > maxBytes {
maxBytes = bytes
}
}
if maxBytes > 0 {
for j := 0; j < len(testStores); j++ {
endStr := " "
if j == len(testStores)-1 {
endStr = ""
}
bytes := testStores[j].Capacity.Capacity - testStores[j].Capacity.Available
fmt.Printf("%03d%s", (999*bytes)/maxBytes, endStr)
}
fmt.Printf("\n")
}
}
}
var totBytes int64
var totRanges int32
for i := 0; i < len(testStores); i++ {
totBytes += testStores[i].Capacity.Capacity - testStores[i].Capacity.Available
totRanges += testStores[i].Capacity.RangeCount
}
fmt.Printf("Total bytes=%d, ranges=%d\n", totBytes, totRanges)
// Output:
// 999 000 000 000 000 000 000 739 000 000 000 000 000 000 000 000 000 000 000 000
// 999 107 000 000 204 000 000 375 000 000 000 000 000 000 000 000 000 000 536 000
// 999 310 000 262 872 000 000 208 000 705 000 526 000 000 439 000 000 607 933 000
// 812 258 000 220 999 673 402 480 000 430 516 374 000 431 318 000 551 714 917 000
// 582 625 185 334 720 589 647 619 000 300 483 352 279 502 208 665 816 684 999 374
// 751 617 771 542 738 676 665 525 309 435 612 449 457 616 306 837 993 754 999 445
// 759 659 828 478 693 622 594 591 349 458 630 538 526 613 462 827 879 787 999 550
// 861 658 828 559 801 660 681 560 487 529 652 686 642 716 575 999 989 875 989 581
// 775 647 724 557 779 662 670 494 535 502 681 676 624 695 561 961 999 772 888 592
// 856 712 753 661 767 658 717 606 529 615 755 699 672 700 576 955 999 755 861 671
// 882 735 776 685 844 643 740 578 610 688 787 741 661 767 587 999 955 809 803 731
// 958 716 789 719 861 689 821 608 634 724 800 782 694 799 619 994 999 851 812 818
// 949 726 788 664 873 633 749 599 680 714 790 728 663 842 628 999 978 816 823 791
// 923 698 792 712 816 605 774 651 661 728 802 718 670 819 714 999 966 801 829 791
// 962 779 847 737 900 675 811 691 745 778 835 812 680 894 790 999 989 872 923 799
// 967 812 826 772 891 685 828 683 761 808 864 820 643 873 783 969 999 873 910 781
// 923 813 837 739 867 672 792 664 773 772 879 803 610 845 740 957 999 867 912 732
// 952 803 866 759 881 655 765 668 803 772 929 762 601 844 751 973 999 892 864 731
//.........这里部分代码省略.........
示例12: Example_rebalancing
func Example_rebalancing() {
// Model a set of stores in a cluster,
// randomly adding / removing stores and adding bytes.
g := gossip.New(nil, 0, nil)
stopper := stop.NewStopper()
defer stopper.Stop()
sp := NewStorePool(g, TestTimeUntilStoreDeadOff, stopper)
alloc := makeAllocator(sp)
alloc.randGen = rand.New(rand.NewSource(0))
alloc.deterministic = true
var wg sync.WaitGroup
g.RegisterCallback(gossip.MakePrefixPattern(gossip.KeyStorePrefix), func(_ string, _ []byte) { wg.Done() })
const generations = 100
const nodes = 20
// Initialize testStores.
var testStores [nodes]testStore
for i := 0; i < len(testStores); i++ {
testStores[i].StoreID = proto.StoreID(i)
testStores[i].Node = proto.NodeDescriptor{NodeID: proto.NodeID(i)}
testStores[i].Capacity = proto.StoreCapacity{Capacity: 1 << 30, Available: 1 << 30}
}
// Initialize the cluster with a single range.
testStores[0].add(alloc.randGen.Int63n(1 << 20))
for i := 0; i < generations; i++ {
// First loop through test stores and add data.
wg.Add(len(testStores))
for j := 0; j < len(testStores); j++ {
// Add a pretend range to the testStore if there's already one.
if testStores[j].Capacity.RangeCount > 0 {
testStores[j].add(alloc.randGen.Int63n(1 << 20))
}
key := gossip.MakeStoreKey(proto.StoreID(j))
if err := g.AddInfoProto(key, &testStores[j].StoreDescriptor, 0); err != nil {
panic(err)
}
}
wg.Wait()
// Next loop through test stores and maybe rebalance.
for j := 0; j < len(testStores); j++ {
ts := &testStores[j]
if alloc.shouldRebalance(&testStores[j].StoreDescriptor) {
target := alloc.rebalanceTarget(proto.Attributes{}, []proto.Replica{{NodeID: ts.Node.NodeID, StoreID: ts.StoreID}})
if target != nil {
testStores[j].rebalance(&testStores[int(target.StoreID)], alloc.randGen.Int63n(1<<20))
}
}
}
// Output store capacities as hexidecimal 2-character values.
if i%(generations/50) == 0 {
var maxBytes int64
for j := 0; j < len(testStores); j++ {
bytes := testStores[j].Capacity.Capacity - testStores[j].Capacity.Available
if bytes > maxBytes {
maxBytes = bytes
}
}
if maxBytes > 0 {
for j := 0; j < len(testStores); j++ {
endStr := " "
if j == len(testStores)-1 {
endStr = ""
}
bytes := testStores[j].Capacity.Capacity - testStores[j].Capacity.Available
fmt.Printf("%03d%s", (999*bytes)/maxBytes, endStr)
}
fmt.Printf("\n")
}
}
}
var totBytes int64
var totRanges int32
for i := 0; i < len(testStores); i++ {
totBytes += testStores[i].Capacity.Capacity - testStores[i].Capacity.Available
totRanges += testStores[i].Capacity.RangeCount
}
fmt.Printf("Total bytes=%d, ranges=%d\n", totBytes, totRanges)
// Output:
// 999 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 739 000 000
// 999 107 000 000 000 000 000 000 000 000 177 000 000 000 204 000 000 734 000 000
// 929 288 000 168 000 057 623 000 114 272 471 000 000 565 385 000 000 999 000 284
// 683 367 133 087 000 527 381 607 379 380 502 000 188 824 490 295 420 999 000 490
// 540 443 380 319 000 438 382 534 599 579 602 000 268 859 601 374 450 999 000 532
// 412 428 539 429 170 332 424 696 505 439 503 691 327 752 427 437 451 999 076 441
// 496 583 662 586 280 431 499 714 564 578 540 661 431 784 548 516 547 999 329 589
// 502 563 646 541 430 428 576 693 633 578 537 577 455 803 573 596 528 999 402 639
// 603 641 764 638 764 521 650 764 713 683 648 652 579 860 610 731 665 999 463 749
// 615 642 779 688 813 459 650 791 728 702 743 614 526 829 600 767 760 999 497 700
// 677 677 879 787 867 518 700 852 775 801 793 666 526 820 601 843 767 999 544 772
// 723 696 866 838 853 589 730 882 800 768 782 695 567 776 656 836 832 999 613 832
// 830 764 936 879 976 673 824 974 864 825 835 761 703 874 700 909 888 999 635 957
// 832 766 949 842 995 730 839 965 870 843 790 765 693 931 706 936 936 999 683 948
// 866 787 990 851 999 780 867 968 892 847 783 787 708 912 768 963 951 954 681 942
//.........这里部分代码省略.........
示例13: Example_rebalancing
func Example_rebalancing() {
// Model a set of stores in a cluster,
// randomly adding / removing stores and adding bytes.
g := gossip.New(nil, 0, nil)
stopper := stop.NewStopper()
defer stopper.Stop()
sp := NewStorePool(g, TestTimeUntilStoreDeadOff, stopper)
alloc := MakeAllocator(sp, RebalancingOptions{AllowRebalance: true, Deterministic: true})
alloc.randGen = rand.New(rand.NewSource(0))
var wg sync.WaitGroup
g.RegisterCallback(gossip.MakePrefixPattern(gossip.KeyStorePrefix), func(_ string, _ []byte) { wg.Done() })
const generations = 100
const nodes = 20
// Initialize testStores.
var testStores [nodes]testStore
for i := 0; i < len(testStores); i++ {
testStores[i].StoreID = roachpb.StoreID(i)
testStores[i].Node = roachpb.NodeDescriptor{NodeID: roachpb.NodeID(i)}
testStores[i].Capacity = roachpb.StoreCapacity{Capacity: 1 << 30, Available: 1 << 30}
}
// Initialize the cluster with a single range.
testStores[0].add(alloc.randGen.Int63n(1 << 20))
for i := 0; i < generations; i++ {
// First loop through test stores and add data.
wg.Add(len(testStores))
for j := 0; j < len(testStores); j++ {
// Add a pretend range to the testStore if there's already one.
if testStores[j].Capacity.RangeCount > 0 {
testStores[j].add(alloc.randGen.Int63n(1 << 20))
}
key := gossip.MakeStoreKey(roachpb.StoreID(j))
if err := g.AddInfoProto(key, &testStores[j].StoreDescriptor, 0); err != nil {
panic(err)
}
}
wg.Wait()
// Next loop through test stores and maybe rebalance.
for j := 0; j < len(testStores); j++ {
ts := &testStores[j]
if alloc.ShouldRebalance(ts.StoreID) {
target := alloc.RebalanceTarget(ts.StoreID, roachpb.Attributes{}, []roachpb.ReplicaDescriptor{{NodeID: ts.Node.NodeID, StoreID: ts.StoreID}})
if target != nil {
testStores[j].rebalance(&testStores[int(target.StoreID)], alloc.randGen.Int63n(1<<20))
}
}
}
// Output store capacities as hexidecimal 2-character values.
if i%(generations/50) == 0 {
var maxBytes int64
for j := 0; j < len(testStores); j++ {
bytes := testStores[j].Capacity.Capacity - testStores[j].Capacity.Available
if bytes > maxBytes {
maxBytes = bytes
}
}
if maxBytes > 0 {
for j := 0; j < len(testStores); j++ {
endStr := " "
if j == len(testStores)-1 {
endStr = ""
}
bytes := testStores[j].Capacity.Capacity - testStores[j].Capacity.Available
fmt.Printf("%03d%s", (999*bytes)/maxBytes, endStr)
}
fmt.Printf("\n")
}
}
}
var totBytes int64
var totRanges int32
for i := 0; i < len(testStores); i++ {
totBytes += testStores[i].Capacity.Capacity - testStores[i].Capacity.Available
totRanges += testStores[i].Capacity.RangeCount
}
fmt.Printf("Total bytes=%d, ranges=%d\n", totBytes, totRanges)
// Output:
// 138 000 000 000 000 000 000 999 000 000 000 000 000 000 000 000 000 000 000 000
// 922 319 000 000 000 239 000 999 000 000 000 000 000 214 073 000 000 000 190 000
// 999 505 480 000 634 352 421 644 212 331 396 144 000 242 419 275 000 000 727 028
// 999 678 908 705 350 558 549 714 651 824 895 694 000 373 610 490 372 106 492 796
// 932 701 763 999 660 706 571 702 787 945 848 678 062 692 762 413 603 252 513 882
// 937 656 875 984 734 717 676 685 910 895 847 841 349 754 864 463 722 377 655 999
// 885 701 805 999 647 744 802 659 778 834 830 725 569 761 922 587 684 458 693 935
// 813 650 709 931 583 733 843 619 793 881 768 658 565 713 956 598 733 594 656 999
// 873 727 721 999 544 812 848 666 817 943 831 658 556 769 927 554 799 733 670 869
// 937 765 827 999 543 875 907 670 929 997 913 768 621 853 922 618 878 832 733 937
// 902 819 744 988 547 904 922 688 879 999 812 710 554 789 890 591 808 865 658 932
// 870 873 846 997 596 937 899 765 864 969 855 751 577 824 951 579 858 908 653 999
// 880 833 856 999 640 918 932 774 920 930 869 739 686 784 853 553 885 941 685 986
// 874 797 808 999 645 925 928 781 920 956 859 762 678 761 819 627 899 941 725 959
// 886 801 835 999 638 984 927 825 968 958 860 760 813 716 800 638 908 908 798 945
// 860 840 836 973 634 999 944 834 977 923 848 769 846 728 836 605 865 915 781 896
//.........这里部分代码省略.........
示例14: Example_rebalancing
func Example_rebalancing() {
stopper := stop.NewStopper()
defer stopper.Stop()
// Model a set of stores in a cluster,
// randomly adding / removing stores and adding bytes.
rpcContext := rpc.NewContext(&base.Context{Insecure: true}, nil, stopper)
server := rpc.NewServer(rpcContext) // never started
g := gossip.New(context.Background(), rpcContext, server, nil, stopper, metric.NewRegistry())
// Have to call g.SetNodeID before call g.AddInfo
g.SetNodeID(roachpb.NodeID(1))
sp := NewStorePool(
g,
hlc.NewClock(hlc.UnixNano),
nil,
/* reservationsEnabled */ true,
TestTimeUntilStoreDeadOff,
stopper,
)
alloc := MakeAllocator(sp, AllocatorOptions{AllowRebalance: true, Deterministic: true})
var wg sync.WaitGroup
g.RegisterCallback(gossip.MakePrefixPattern(gossip.KeyStorePrefix), func(_ string, _ roachpb.Value) { wg.Done() })
const generations = 100
const nodes = 20
// Initialize testStores.
var testStores [nodes]testStore
for i := 0; i < len(testStores); i++ {
testStores[i].StoreID = roachpb.StoreID(i)
testStores[i].Node = roachpb.NodeDescriptor{NodeID: roachpb.NodeID(i)}
testStores[i].Capacity = roachpb.StoreCapacity{Capacity: 1 << 30, Available: 1 << 30}
}
// Initialize the cluster with a single range.
testStores[0].add(alloc.randGen.Int63n(1 << 20))
for i := 0; i < generations; i++ {
// First loop through test stores and add data.
wg.Add(len(testStores))
for j := 0; j < len(testStores); j++ {
// Add a pretend range to the testStore if there's already one.
if testStores[j].Capacity.RangeCount > 0 {
testStores[j].add(alloc.randGen.Int63n(1 << 20))
}
if err := g.AddInfoProto(gossip.MakeStoreKey(roachpb.StoreID(j)), &testStores[j].StoreDescriptor, 0); err != nil {
panic(err)
}
}
wg.Wait()
// Next loop through test stores and maybe rebalance.
for j := 0; j < len(testStores); j++ {
ts := &testStores[j]
target := alloc.RebalanceTarget(
roachpb.Attributes{},
[]roachpb.ReplicaDescriptor{{NodeID: ts.Node.NodeID, StoreID: ts.StoreID}},
-1)
if target != nil {
testStores[j].rebalance(&testStores[int(target.StoreID)], alloc.randGen.Int63n(1<<20))
}
}
// Output store capacities as hexadecimal 2-character values.
if i%(generations/50) == 0 {
var maxBytes int64
for j := 0; j < len(testStores); j++ {
bytes := testStores[j].Capacity.Capacity - testStores[j].Capacity.Available
if bytes > maxBytes {
maxBytes = bytes
}
}
if maxBytes > 0 {
for j := 0; j < len(testStores); j++ {
endStr := " "
if j == len(testStores)-1 {
endStr = ""
}
bytes := testStores[j].Capacity.Capacity - testStores[j].Capacity.Available
fmt.Printf("%03d%s", (999*bytes)/maxBytes, endStr)
}
fmt.Printf("\n")
}
}
}
var totBytes int64
var totRanges int32
for i := 0; i < len(testStores); i++ {
totBytes += testStores[i].Capacity.Capacity - testStores[i].Capacity.Available
totRanges += testStores[i].Capacity.RangeCount
}
fmt.Printf("Total bytes=%d, ranges=%d\n", totBytes, totRanges)
// Output:
// 999 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000
// 999 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000
// 999 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000
// 999 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000
// 999 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000
//.........这里部分代码省略.........