本文整理汇总了Golang中github.com/cockroachdb/cockroach/gossip.MakePrefixPattern函数的典型用法代码示例。如果您正苦于以下问题:Golang MakePrefixPattern函数的具体用法?Golang MakePrefixPattern怎么用?Golang MakePrefixPattern使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了MakePrefixPattern函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: NewStorePool
// NewStorePool creates a StorePool and registers the store updating callback
// with gossip.
func NewStorePool(
g *gossip.Gossip,
clock *hlc.Clock,
rpcContext *rpc.Context,
reservationsEnabled bool,
timeUntilStoreDead time.Duration,
stopper *stop.Stopper,
) *StorePool {
sp := &StorePool{
clock: clock,
timeUntilStoreDead: timeUntilStoreDead,
rpcContext: rpcContext,
reservationsEnabled: reservationsEnabled,
failedReservationsTimeout: envutil.EnvOrDefaultDuration("failed_reservation_timeout",
defaultFailedReservationsTimeout),
declinedReservationsTimeout: envutil.EnvOrDefaultDuration("declined_reservation_timeout",
defaultDeclinedReservationsTimeout),
reserveRPCTimeout: envutil.EnvOrDefaultDuration("reserve_rpc_timeout",
defaultReserveRPCTimeout),
resolver: GossipAddressResolver(g),
}
sp.mu.stores = make(map[roachpb.StoreID]*storeDetail)
heap.Init(&sp.mu.queue)
storeRegex := gossip.MakePrefixPattern(gossip.KeyStorePrefix)
g.RegisterCallback(storeRegex, sp.storeGossipUpdate)
deadReplicasRegex := gossip.MakePrefixPattern(gossip.KeyDeadReplicasPrefix)
g.RegisterCallback(deadReplicasRegex, sp.deadReplicasGossipUpdate)
sp.start(stopper)
return sp
}
示例2: waitForStores
// waitForStores waits for all of the store descriptors to be gossiped. Servers
// other than the first "bootstrap" their stores asynchronously, but we'd like
// to wait for all of the stores to be initialized before returning the
// TestCluster.
func (tc *TestCluster) waitForStores(t testing.TB) {
// Register a gossip callback for the store descriptors.
g := tc.Servers[0].Gossip()
var storesMu sync.Mutex
stores := map[roachpb.StoreID]struct{}{}
storesDone := make(chan error)
unregister := g.RegisterCallback(gossip.MakePrefixPattern(gossip.KeyStorePrefix),
func(_ string, content roachpb.Value) {
var desc roachpb.StoreDescriptor
if err := content.GetProto(&desc); err != nil {
storesDone <- err
return
}
storesMu.Lock()
stores[desc.StoreID] = struct{}{}
if len(stores) == len(tc.Servers) {
close(storesDone)
}
storesMu.Unlock()
})
defer unregister()
// Wait for the store descriptors to be gossiped.
if err := <-storesDone; err != nil {
t.Fatal(err)
}
}
示例3: TestStoreRangeReplicate
// TestStoreRangeReplicate verifies that the replication queue will notice
// under-replicated ranges and replicate them.
func TestStoreRangeReplicate(t *testing.T) {
defer leaktest.AfterTest(t)
mtc := multiTestContext{}
mtc.Start(t, 3)
defer mtc.Stop()
// Initialize the gossip network.
var wg sync.WaitGroup
wg.Add(len(mtc.stores))
key := gossip.MakePrefixPattern(gossip.KeyCapacityPrefix)
mtc.stores[0].Gossip().RegisterCallback(key, func(_ string, _ bool) { wg.Done() })
for _, s := range mtc.stores {
s.GossipCapacity()
}
wg.Wait()
// Once we know our peers, trigger a scan.
mtc.stores[0].ForceReplicationScan(t)
// The range should become available on every node.
if err := util.IsTrueWithin(func() bool {
for _, s := range mtc.stores {
r := s.LookupRange(proto.Key("a"), proto.Key("b"))
if r == nil {
return false
}
}
return true
}, 1*time.Second); err != nil {
t.Fatal(err)
}
}
示例4: TestStoreRangeUpReplicate
// TestStoreRangeUpReplicate verifies that the replication queue will notice
// under-replicated ranges and replicate them.
func TestStoreRangeUpReplicate(t *testing.T) {
defer leaktest.AfterTest(t)()
mtc := startMultiTestContext(t, 3)
defer mtc.Stop()
// Initialize the gossip network.
var wg sync.WaitGroup
wg.Add(len(mtc.stores))
key := gossip.MakePrefixPattern(gossip.KeyStorePrefix)
mtc.stores[0].Gossip().RegisterCallback(key, func(_ string, _ roachpb.Value) { wg.Done() })
for _, s := range mtc.stores {
s.GossipStore()
}
wg.Wait()
// Once we know our peers, trigger a scan.
mtc.stores[0].ForceReplicationScanAndProcess()
// The range should become available on every node.
util.SucceedsSoon(t, func() error {
for _, s := range mtc.stores {
r := s.LookupReplica(roachpb.RKey("a"), roachpb.RKey("b"))
if r == nil {
return util.Errorf("expected replica for keys \"a\" - \"b\"")
}
}
return nil
})
}
示例5: TestStoreRangeUpReplicate
// TestStoreRangeUpReplicate verifies that the replication queue will notice
// under-replicated ranges and replicate them.
func TestStoreRangeUpReplicate(t *testing.T) {
defer leaktest.AfterTest(t)
mtc := startMultiTestContext(t, 3)
defer mtc.Stop()
// Initialize the gossip network.
var wg sync.WaitGroup
wg.Add(len(mtc.stores))
key := gossip.MakePrefixPattern(gossip.KeyStorePrefix)
mtc.stores[0].Gossip().RegisterCallback(key, func(_ string, _ roachpb.Value) { wg.Done() })
for _, s := range mtc.stores {
s.GossipStore()
}
wg.Wait()
// Once we know our peers, trigger a scan.
mtc.stores[0].ForceReplicationScanAndProcess()
// The range should become available on every node.
if err := util.IsTrueWithin(func() bool {
for _, s := range mtc.stores {
r := s.LookupReplica(roachpb.RKey("a"), roachpb.RKey("b"))
if r == nil {
return false
}
}
return true
}, replicationTimeout); err != nil {
t.Fatal(err)
}
}
示例6: newStoreGossiper
func newStoreGossiper(g *gossip.Gossip) *storeGossiper {
sg := &storeGossiper{
g: g,
}
g.RegisterCallback(gossip.MakePrefixPattern(gossip.KeyStorePrefix), func(_ string, _ []byte) { sg.wg.Done() })
return sg
}
示例7: newReplicateQueue
// newReplicateQueue returns a new instance of replicateQueue.
func newReplicateQueue(store *Store, g *gossip.Gossip, allocator Allocator, clock *hlc.Clock,
options AllocatorOptions) *replicateQueue {
rq := &replicateQueue{
allocator: allocator,
clock: clock,
updateChan: make(chan struct{}, 1),
}
rq.baseQueue = makeBaseQueue("replicate", rq, store, g, queueConfig{
maxSize: replicateQueueMaxSize,
needsLease: true,
acceptsUnsplitRanges: false,
})
if g != nil { // gossip is nil for some unittests
// Register a gossip callback to signal queue that replicas in
// purgatory might be retried due to new store gossip.
g.RegisterCallback(gossip.MakePrefixPattern(gossip.KeyStorePrefix), func(_ string, _ roachpb.Value) {
select {
case rq.updateChan <- struct{}{}:
default:
}
})
}
return rq
}
示例8: newAllocator
// newAllocator creates a new allocator using the specified gossip.
func newAllocator(g *gossip.Gossip) *allocator {
a := &allocator{
gossip: g,
randGen: rand.New(rand.NewSource(rand.Int63())),
}
// Callback triggers on any capacity gossip updates.
if a.gossip != nil {
capacityRegex := gossip.MakePrefixPattern(gossip.KeyCapacityPrefix)
a.gossip.RegisterCallback(capacityRegex, a.capacityGossipUpdate)
}
return a
}
示例9: newStoreGossiper
// newStoreGossiper creates a store gossiper for use by tests. It adds the
// callback to gossip.
func newStoreGossiper(g *gossip.Gossip) *storeGossiper {
sg := &storeGossiper{
g: g,
storeKeyMap: make(map[string]struct{}),
}
g.RegisterCallback(gossip.MakePrefixPattern(gossip.KeyStorePrefix), func(key string, _ []byte) {
sg.mu.Lock()
defer sg.mu.Unlock()
if _, ok := sg.storeKeyMap[key]; ok {
sg.wg.Done()
}
})
return sg
}
示例10: NewStorePool
// NewStorePool creates a StorePool and registers the store updating callback
// with gossip.
func NewStorePool(g *gossip.Gossip, timeUntilStoreDead time.Duration, stopper *stop.Stopper) *StorePool {
sp := &StorePool{
timeUntilStoreDead: timeUntilStoreDead,
stores: make(map[roachpb.StoreID]*storeDetail),
}
heap.Init(&sp.queue)
storeRegex := gossip.MakePrefixPattern(gossip.KeyStorePrefix)
g.RegisterCallback(storeRegex, sp.storeGossipUpdate)
sp.start(stopper)
return sp
}
示例11: NewStoreGossiper
// NewStoreGossiper creates a store gossiper for use by tests. It adds the
// callback to gossip.
func NewStoreGossiper(g *gossip.Gossip) *StoreGossiper {
sg := &StoreGossiper{
g: g,
storeKeyMap: make(map[string]struct{}),
}
sg.cond = sync.NewCond(&sg.mu)
g.RegisterCallback(gossip.MakePrefixPattern(gossip.KeyStorePrefix), func(key string, _ roachpb.Value) {
sg.mu.Lock()
defer sg.mu.Unlock()
delete(sg.storeKeyMap, key)
sg.cond.Broadcast()
})
return sg
}
示例12: gossipStores
func gossipStores(g *gossip.Gossip, stores []*proto.StoreDescriptor, t *testing.T) {
var wg sync.WaitGroup
wg.Add(len(stores))
g.RegisterCallback(gossip.MakePrefixPattern(gossip.KeyCapacityPrefix), func(_ string, _ bool) { wg.Done() })
for _, s := range stores {
keyMaxCapacity := gossip.MakeCapacityKey(s.Node.NodeID, s.StoreID)
// Gossip store descriptor.
err := g.AddInfo(keyMaxCapacity, *s, 0)
if err != nil {
t.Fatal(err)
}
}
// Wait for all gossip callbacks to be invoked.
wg.Wait()
}
示例13: Example_rebalancing
func Example_rebalancing() {
stopper := stop.NewStopper()
defer stopper.Stop()
// Model a set of stores in a cluster,
// randomly adding / removing stores and adding bytes.
g := gossip.New(nil, nil, stopper)
// Have to call g.SetNodeID before call g.AddInfo
g.SetNodeID(roachpb.NodeID(1))
sp := NewStorePool(
g,
hlc.NewClock(hlc.UnixNano),
nil,
/* reservationsEnabled */ true,
TestTimeUntilStoreDeadOff,
stopper,
)
alloc := MakeAllocator(sp, AllocatorOptions{AllowRebalance: true, Deterministic: true})
var wg sync.WaitGroup
g.RegisterCallback(gossip.MakePrefixPattern(gossip.KeyStorePrefix), func(_ string, _ roachpb.Value) { wg.Done() })
const generations = 100
const nodes = 20
// Initialize testStores.
var testStores [nodes]testStore
for i := 0; i < len(testStores); i++ {
testStores[i].StoreID = roachpb.StoreID(i)
testStores[i].Node = roachpb.NodeDescriptor{NodeID: roachpb.NodeID(i)}
testStores[i].Capacity = roachpb.StoreCapacity{Capacity: 1 << 30, Available: 1 << 30}
}
// Initialize the cluster with a single range.
testStores[0].add(alloc.randGen.Int63n(1 << 20))
for i := 0; i < generations; i++ {
// First loop through test stores and add data.
wg.Add(len(testStores))
for j := 0; j < len(testStores); j++ {
// Add a pretend range to the testStore if there's already one.
if testStores[j].Capacity.RangeCount > 0 {
testStores[j].add(alloc.randGen.Int63n(1 << 20))
}
if err := g.AddInfoProto(gossip.MakeStoreKey(roachpb.StoreID(j)), &testStores[j].StoreDescriptor, 0); err != nil {
panic(err)
}
}
wg.Wait()
// Next loop through test stores and maybe rebalance.
for j := 0; j < len(testStores); j++ {
ts := &testStores[j]
if alloc.ShouldRebalance(ts.StoreID) {
target := alloc.RebalanceTarget(ts.StoreID, roachpb.Attributes{}, []roachpb.ReplicaDescriptor{{NodeID: ts.Node.NodeID, StoreID: ts.StoreID}})
if target != nil {
testStores[j].rebalance(&testStores[int(target.StoreID)], alloc.randGen.Int63n(1<<20))
}
}
}
// Output store capacities as hexidecimal 2-character values.
if i%(generations/50) == 0 {
var maxBytes int64
for j := 0; j < len(testStores); j++ {
bytes := testStores[j].Capacity.Capacity - testStores[j].Capacity.Available
if bytes > maxBytes {
maxBytes = bytes
}
}
if maxBytes > 0 {
for j := 0; j < len(testStores); j++ {
endStr := " "
if j == len(testStores)-1 {
endStr = ""
}
bytes := testStores[j].Capacity.Capacity - testStores[j].Capacity.Available
fmt.Printf("%03d%s", (999*bytes)/maxBytes, endStr)
}
fmt.Printf("\n")
}
}
}
var totBytes int64
var totRanges int32
for i := 0; i < len(testStores); i++ {
totBytes += testStores[i].Capacity.Capacity - testStores[i].Capacity.Available
totRanges += testStores[i].Capacity.RangeCount
}
fmt.Printf("Total bytes=%d, ranges=%d\n", totBytes, totRanges)
// Output:
// 999 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000
// 999 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000
// 999 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000
// 999 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000
// 999 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000
// 999 000 000 000 000 000 000 000 000 000 045 140 000 000 000 000 000 105 000 000
// 999 014 143 000 000 000 000 039 017 000 112 071 000 088 009 000 097 134 000 151
// 999 196 213 000 000 000 143 098 210 039 262 260 077 139 078 087 237 316 281 267
//.........这里部分代码省略.........
示例14: Example_rebalancing
func Example_rebalancing() {
// Model a set of stores in a cluster,
// randomly adding / removing stores and adding bytes.
g := gossip.New(nil, 0, nil)
alloc := newAllocator(g)
alloc.randGen = rand.New(rand.NewSource(0))
alloc.deterministic = true
var wg sync.WaitGroup
g.RegisterCallback(gossip.MakePrefixPattern(gossip.KeyStorePrefix), func(_ string, _ []byte) { wg.Done() })
const generations = 100
const nodes = 20
// Initialize testStores.
var testStores [nodes]testStore
for i := 0; i < len(testStores); i++ {
testStores[i].StoreID = proto.StoreID(i)
testStores[i].Node = proto.NodeDescriptor{NodeID: proto.NodeID(i)}
testStores[i].Capacity = proto.StoreCapacity{Capacity: 1 << 30, Available: 1 << 30}
}
// Initialize the cluster with a single range.
testStores[0].Add(alloc.randGen.Int63n(1 << 20))
for i := 0; i < generations; i++ {
// First loop through test stores and add data.
wg.Add(len(testStores))
for j := 0; j < len(testStores); j++ {
// Add a pretend range to the testStore if there's already one.
if testStores[j].Capacity.RangeCount > 0 {
testStores[j].Add(alloc.randGen.Int63n(1 << 20))
}
key := gossip.MakeStoreKey(proto.StoreID(j))
if err := g.AddInfoProto(key, &testStores[j].StoreDescriptor, 0); err != nil {
panic(err)
}
}
wg.Wait()
// Next loop through test stores and maybe rebalance.
for j := 0; j < len(testStores); j++ {
ts := &testStores[j]
if alloc.ShouldRebalance(&testStores[j].StoreDescriptor) {
target := alloc.RebalanceTarget(proto.Attributes{}, []proto.Replica{{NodeID: ts.Node.NodeID, StoreID: ts.StoreID}})
if target != nil {
testStores[j].Rebalance(&testStores[int(target.StoreID)], alloc.randGen.Int63n(1<<20))
}
}
}
// Output store capacities as hexidecimal 2-character values.
if i%(generations/50) == 0 {
var maxBytes int64
for j := 0; j < len(testStores); j++ {
bytes := testStores[j].Capacity.Capacity - testStores[j].Capacity.Available
if bytes > maxBytes {
maxBytes = bytes
}
}
if maxBytes > 0 {
for j := 0; j < len(testStores); j++ {
endStr := " "
if j == len(testStores)-1 {
endStr = ""
}
bytes := testStores[j].Capacity.Capacity - testStores[j].Capacity.Available
fmt.Printf("%03d%s", (999*bytes)/maxBytes, endStr)
}
fmt.Printf("\n")
}
}
}
var totBytes int64
var totRanges int32
for i := 0; i < len(testStores); i++ {
totBytes += testStores[i].Capacity.Capacity - testStores[i].Capacity.Available
totRanges += testStores[i].Capacity.RangeCount
}
fmt.Printf("Total bytes=%d, ranges=%d\n", totBytes, totRanges)
// Output:
// 999 000 000 000 000 000 000 739 000 000 000 000 000 000 000 000 000 000 000 000
// 999 107 000 000 204 000 000 375 000 000 000 000 000 000 000 000 000 000 536 000
// 999 310 000 262 872 000 000 208 000 705 000 526 000 000 439 000 000 607 933 000
// 812 258 000 220 999 673 402 480 000 430 516 374 000 431 318 000 551 714 917 000
// 582 625 185 334 720 589 647 619 000 300 483 352 279 502 208 665 816 684 999 374
// 751 617 771 542 738 676 665 525 309 435 612 449 457 616 306 837 993 754 999 445
// 759 659 828 478 693 622 594 591 349 458 630 538 526 613 462 827 879 787 999 550
// 861 658 828 559 801 660 681 560 487 529 652 686 642 716 575 999 989 875 989 581
// 775 647 724 557 779 662 670 494 535 502 681 676 624 695 561 961 999 772 888 592
// 856 712 753 661 767 658 717 606 529 615 755 699 672 700 576 955 999 755 861 671
// 882 735 776 685 844 643 740 578 610 688 787 741 661 767 587 999 955 809 803 731
// 958 716 789 719 861 689 821 608 634 724 800 782 694 799 619 994 999 851 812 818
// 949 726 788 664 873 633 749 599 680 714 790 728 663 842 628 999 978 816 823 791
// 923 698 792 712 816 605 774 651 661 728 802 718 670 819 714 999 966 801 829 791
// 962 779 847 737 900 675 811 691 745 778 835 812 680 894 790 999 989 872 923 799
// 967 812 826 772 891 685 828 683 761 808 864 820 643 873 783 969 999 873 910 781
// 923 813 837 739 867 672 792 664 773 772 879 803 610 845 740 957 999 867 912 732
// 952 803 866 759 881 655 765 668 803 772 929 762 601 844 751 973 999 892 864 731
//.........这里部分代码省略.........
示例15: ExampleAllocatorRebalancing
// ExampleAllocatorRebalancing models a set of stores in a cluster,
// randomly adding / removing stores and adding bytes.
func ExampleAllocatorRebalancing() {
g := gossip.New(nil, 0, nil)
alloc := newAllocator(g)
alloc.randGen = rand.New(rand.NewSource(0))
alloc.deterministic = true
var wg sync.WaitGroup
g.RegisterCallback(gossip.MakePrefixPattern(gossip.KeyCapacityPrefix), func(_ string, _ bool) { wg.Done() })
const generations = 100
const nodes = 20
// Initialize testStores.
var testStores [nodes]testStore
for i := 0; i < len(testStores); i++ {
testStores[i].StoreID = proto.StoreID(i)
testStores[i].Node = proto.NodeDescriptor{NodeID: proto.NodeID(i)}
testStores[i].Capacity = proto.StoreCapacity{Capacity: 1 << 30, Available: 1 << 30}
}
// Initialize the cluster with a single range.
testStores[0].Add(alloc.randGen.Int63n(1 << 20))
for i := 0; i < generations; i++ {
// First loop through test stores and add data.
wg.Add(len(testStores))
for j := 0; j < len(testStores); j++ {
// Add a pretend range to the testStore if there's already one.
if testStores[j].Capacity.RangeCount > 0 {
testStores[j].Add(alloc.randGen.Int63n(1 << 20))
}
key := gossip.MakeCapacityKey(proto.NodeID(j), proto.StoreID(j))
if err := g.AddInfo(key, testStores[j].StoreDescriptor, 0); err != nil {
panic(err)
}
}
wg.Wait()
// Next loop through test stores and maybe rebalance.
for j := 0; j < len(testStores); j++ {
ts := &testStores[j]
if alloc.ShouldRebalance(&testStores[j].StoreDescriptor) {
target := alloc.RebalanceTarget(proto.Attributes{}, []proto.Replica{{NodeID: ts.Node.NodeID, StoreID: ts.StoreID}})
if target != nil {
testStores[j].Rebalance(&testStores[int(target.StoreID)], alloc.randGen.Int63n(1<<20))
}
}
}
// Output store capacities as hexidecimal 2-character values.
if i%(generations/50) == 0 {
var maxBytes int64
for j := 0; j < len(testStores); j++ {
bytes := testStores[j].Capacity.Capacity - testStores[j].Capacity.Available
if bytes > maxBytes {
maxBytes = bytes
}
}
if maxBytes > 0 {
for j := 0; j < len(testStores); j++ {
endStr := " "
if j == len(testStores)-1 {
endStr = ""
}
bytes := testStores[j].Capacity.Capacity - testStores[j].Capacity.Available
fmt.Printf("%03d%s", (999*bytes)/maxBytes, endStr)
}
fmt.Printf("\n")
}
}
}
var totBytes int64
var totRanges int32
for i := 0; i < len(testStores); i++ {
totBytes += testStores[i].Capacity.Capacity - testStores[i].Capacity.Available
totRanges += testStores[i].Capacity.RangeCount
}
fmt.Printf("Total bytes=%d, ranges=%d\n", totBytes, totRanges)
// Output:
// 999 000 000 000 000 000 000 739 000 000 000 000 000 000 000 000 000 000 000 000
// 999 000 000 000 204 000 000 375 000 000 107 000 000 000 000 000 000 000 000 536
// 942 000 000 463 140 000 000 646 000 288 288 000 442 000 058 647 000 000 316 999
// 880 000 412 630 365 745 445 565 122 407 380 570 276 000 271 709 000 718 299 999
// 925 000 667 600 555 975 704 552 272 491 773 890 584 000 407 974 000 930 476 999
// 990 967 793 579 493 999 698 453 616 608 777 755 709 425 455 984 483 698 267 931
// 965 999 869 606 635 908 630 585 567 577 818 870 740 621 550 868 805 790 411 913
// 953 995 990 624 617 947 562 609 670 658 909 952 835 851 641 958 924 999 526 987
// 999 923 901 571 687 915 636 636 674 685 831 881 847 820 702 905 897 983 509 981
// 999 884 809 585 691 826 640 572 748 641 754 887 758 848 643 927 865 897 541 956
// 999 856 891 594 691 745 602 615 766 663 814 834 719 886 733 925 882 911 593 926
// 999 890 900 653 707 759 642 697 771 732 851 858 748 869 842 953 903 928 655 923
// 999 924 909 696 748 797 693 689 806 766 841 902 705 897 874 914 913 916 730 892
// 999 948 892 704 740 821 685 656 859 772 893 911 690 878 824 935 928 941 741 860
// 999 948 931 697 770 782 697 666 893 761 944 869 658 902 816 925 923 983 742 831
// 999 878 901 736 750 737 677 647 869 731 930 825 631 880 775 947 949 930 687 810
// 999 890 910 764 778 757 709 663 849 777 964 837 672 891 814 978 944 946 721 868
// 985 895 968 806 791 791 720 694 883 819 999 847 652 888 790 995 950 947 692 843
//.........这里部分代码省略.........