本文整理汇总了Golang中github.com/cockroachdb/cockroach/util.NewStopper函数的典型用法代码示例。如果您正苦于以下问题:Golang NewStopper函数的具体用法?Golang NewStopper怎么用?Golang NewStopper使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewStopper函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: TestScannerAddToQueues
// TestScannerAddToQueues verifies that ranges are added to and
// removed from multiple queues.
func TestScannerAddToQueues(t *testing.T) {
const count = 3
iter := newTestIterator(count)
q1, q2 := &testQueue{}, &testQueue{}
s := newRangeScanner(1*time.Millisecond, iter)
s.AddQueues(q1, q2)
mc := hlc.NewManualClock(0)
clock := hlc.NewClock(mc.UnixNano)
stopper := util.NewStopper(0)
// Start queue and verify that all ranges are added to both queues.
s.Start(clock, stopper)
if err := util.IsTrueWithin(func() bool {
return q1.count() == count && q2.count() == count
}, 50*time.Millisecond); err != nil {
t.Error(err)
}
// Remove first range and verify it does not exist in either range.
rng := iter.remove(0)
s.RemoveRange(rng)
if err := util.IsTrueWithin(func() bool {
return q1.count() == count-1 && q2.count() == count-1
}, 10*time.Millisecond); err != nil {
t.Error(err)
}
// Stop scanner and verify both queues are stopped.
stopper.Stop()
if !q1.isDone() || !q2.isDone() {
t.Errorf("expected all queues to stop; got %t, %t", q1.isDone(), q2.isDone())
}
}
示例2: TestScannerTiming
// TestScannerTiming verifies that ranges are scanned, regardless
// of how many, to match scanInterval.
func TestScannerTiming(t *testing.T) {
defer leaktest.AfterTest(t)
const count = 3
const runTime = 100 * time.Millisecond
const maxError = 7500 * time.Microsecond
durations := []time.Duration{
10 * time.Millisecond,
25 * time.Millisecond,
}
for i, duration := range durations {
ranges := newTestRangeSet(count, t)
q := &testQueue{}
s := newRangeScanner(duration, 0, ranges, nil)
s.AddQueues(q)
mc := hlc.NewManualClock(0)
clock := hlc.NewClock(mc.UnixNano)
stopper := util.NewStopper()
defer stopper.Stop()
s.Start(clock, stopper)
time.Sleep(runTime)
avg := s.avgScan()
log.Infof("%d: average scan: %s", i, avg)
if avg.Nanoseconds()-duration.Nanoseconds() > maxError.Nanoseconds() ||
duration.Nanoseconds()-avg.Nanoseconds() > maxError.Nanoseconds() {
t.Errorf("expected %s, got %s: exceeds max error of %s", duration, avg, maxError)
}
}
}
示例3: TestUnretryableError
// TestUnretryableError verifies that Send returns an unretryable
// error when it hits a critical error.
func TestUnretryableError(t *testing.T) {
defer leaktest.AfterTest(t)
stopper := util.NewStopper()
defer stopper.Stop()
nodeContext := NewNodeTestContext(nil, stopper)
s := createAndStartNewServer(t, nodeContext)
opts := Options{
N: 1,
Ordering: OrderStable,
SendNextTimeout: 1 * time.Second,
Timeout: 5 * time.Second,
}
getArgs := func(addr net.Addr) interface{} {
return &proto.PingRequest{}
}
// Make getRetry return a non-proto value so that the proto
// integrity check fails.
getReply := func() interface{} {
return 0
}
_, err := Send(opts, "Heartbeat.Ping", []net.Addr{s.Addr()}, getArgs, getReply, nodeContext)
if err == nil {
t.Fatalf("Unexpected success")
}
retryErr, ok := err.(util.Retryable)
if !ok {
t.Fatalf("Unexpected error type: %v", err)
}
if retryErr.CanRetry() {
t.Errorf("Unexpected retryable error: %v", retryErr)
}
}
示例4: TestRetryableError
// TestRetryableError verifies that Send returns a retryable error
// when it hits an RPC error.
func TestRetryableError(t *testing.T) {
defer leaktest.AfterTest(t)
stopper := util.NewStopper()
defer stopper.Stop()
nodeContext := NewNodeTestContext(nil, stopper)
s := createAndStartNewServer(t, nodeContext)
// Wait until the server becomes ready and shut down the server.
c := NewClient(s.Addr(), nil, nodeContext)
<-c.Ready
// Directly call Close() to close the connection without
// removing the client from the cache.
c.Client.Close()
s.Close()
opts := Options{
N: 1,
Ordering: OrderStable,
SendNextTimeout: 1 * time.Second,
Timeout: 1 * time.Second,
}
if _, err := sendPing(opts, []net.Addr{s.Addr()}, nodeContext); err != nil {
retryErr, ok := err.(util.Retryable)
if !ok {
t.Fatalf("Unexpected error type: %v", err)
}
if !retryErr.CanRetry() {
t.Errorf("Expected retryable error: %v", retryErr)
}
} else {
t.Fatalf("Unexpected success")
}
}
示例5: startAdminServer
// startAdminServer launches a new admin server using minimal engine
// and local database setup. Returns the new http test server, which
// should be cleaned up by caller via httptest.Server.Close(). The
// Cockroach KV client address is set to the address of the test server.
func startAdminServer() (string, *util.Stopper) {
stopper := util.NewStopper()
db, err := BootstrapCluster("cluster-1", []engine.Engine{engine.NewInMem(proto.Attributes{}, 1<<20)}, stopper)
if err != nil {
log.Fatal(err)
}
admin := newAdminServer(db, stopper)
mux := http.NewServeMux()
mux.Handle(adminEndpoint, admin)
mux.Handle(debugEndpoint, admin)
httpServer := httptest.NewUnstartedServer(mux)
tlsConfig, err := testContext.GetServerTLSConfig()
if err != nil {
log.Fatal(err)
}
httpServer.TLS = tlsConfig
httpServer.StartTLS()
stopper.AddCloser(httpServer)
if strings.HasPrefix(httpServer.URL, "http://") {
testContext.Addr = strings.TrimPrefix(httpServer.URL, "http://")
} else if strings.HasPrefix(httpServer.URL, "https://") {
testContext.Addr = strings.TrimPrefix(httpServer.URL, "https://")
}
return httpServer.URL, stopper
}
示例6: TestBaseQueueAddRemove
// TestBaseQueueAddRemove adds then removes a range; ensure range is not processed.
func TestBaseQueueAddRemove(t *testing.T) {
defer leaktest.AfterTest(t)
r := &Range{}
if err := r.setDesc(&proto.RangeDescriptor{RaftID: 1}); err != nil {
t.Fatal(err)
}
testQueue := &testQueueImpl{
shouldQueueFn: func(now proto.Timestamp, r *Range) (shouldQueue bool, priority float64) {
shouldQueue = true
priority = 1.0
return
},
}
bq := newBaseQueue("test", testQueue, 2)
stopper := util.NewStopper()
mc := hlc.NewManualClock(0)
clock := hlc.NewClock(mc.UnixNano)
bq.Start(clock, stopper)
defer stopper.Stop()
bq.MaybeAdd(r, proto.ZeroTimestamp)
bq.MaybeRemove(r)
time.Sleep(5 * time.Millisecond)
if pc := atomic.LoadInt32(&testQueue.processed); pc > 0 {
t.Errorf("expected processed count of 0; got %d", pc)
}
}
示例7: TestSendToMultipleClients
// TestSendToMultipleClients verifies that Send correctly sends
// multiple requests to multiple server using the heartbeat RPC.
func TestSendToMultipleClients(t *testing.T) {
defer leaktest.AfterTest(t)
stopper := util.NewStopper()
defer stopper.Stop()
nodeContext := NewNodeTestContext(nil, stopper)
numServers := 4
var addrs []net.Addr
for i := 0; i < numServers; i++ {
s := createAndStartNewServer(t, nodeContext)
addrs = append(addrs, s.Addr())
}
for n := 1; n < numServers; n++ {
// Send n requests.
opts := Options{
N: n,
Ordering: OrderStable,
SendNextTimeout: 1 * time.Second,
Timeout: 1 * time.Second,
}
replies, err := sendPing(opts, addrs, nodeContext)
if err != nil {
t.Fatal(err)
}
if len(replies) != n {
t.Errorf("%v replies are expected, but got %v", n, len(replies))
}
}
}
示例8: TestSlowStorage
func TestSlowStorage(t *testing.T) {
defer leaktest.AfterTest(t)
stopper := util.NewStopper()
cluster := newTestCluster(nil, 3, stopper, t)
defer stopper.Stop()
groupID := proto.RaftID(1)
cluster.createGroup(groupID, 0, 3)
cluster.triggerElection(0, groupID)
cluster.waitForElection(0)
// Block the storage on the last node.
// TODO(bdarnell): there appear to still be issues if the storage is blocked during
// the election.
cluster.storages[2].Block()
// Submit a command to the leader
cluster.nodes[0].SubmitCommand(groupID, makeCommandID(), []byte("command"))
// Even with the third node blocked, the other nodes can make progress.
for i := 0; i < 2; i++ {
events := cluster.events[i]
log.Infof("waiting for event to be commited on node %v", i)
commit := <-events.CommandCommitted
if string(commit.Command) != "command" {
t.Errorf("unexpected value in committed command: %v", commit.Command)
}
}
// Ensure that node 2 is in fact blocked.
time.Sleep(time.Millisecond)
select {
case commit := <-cluster.events[2].CommandCommitted:
t.Errorf("didn't expect commits on node 2 but got %v", commit)
default:
}
// After unblocking the third node, it will catch up.
cluster.storages[2].Unblock()
log.Infof("waiting for event to be commited on node 2")
// When we unblock, the backlog is not guaranteed to be processed in order,
// and in some cases the leader may need to retransmit some messages.
for i := 0; i < 3; i++ {
select {
case commit := <-cluster.events[2].CommandCommitted:
if string(commit.Command) != "command" {
t.Errorf("unexpected value in committed command: %v", commit.Command)
}
return
case <-time.After(5 * time.Millisecond):
// Tick both node's clocks. The ticks on the follower node don't
// really do anything, but they do ensure that that goroutine is
// getting scheduled (and the real-time delay allows rpc responses
// to pass between the nodes)
cluster.tickers[0].Tick()
cluster.tickers[2].Tick()
}
}
}
示例9: Start
// Start starts the test cluster by bootstrapping an in-memory store
// (defaults to maximum of 50M). The server is started, launching the
// node RPC server and all HTTP endpoints. Use the value of
// TestServer.Addr after Start() for client connections. Use Stop()
// to shutdown the server after the test completes.
func (ltc *LocalTestCluster) Start(t util.Tester) {
ltc.Manual = hlc.NewManualClock(0)
ltc.Clock = hlc.NewClock(ltc.Manual.UnixNano)
ltc.Stopper = util.NewStopper()
rpcContext := rpc.NewContext(testutils.NewTestBaseContext(), ltc.Clock, ltc.Stopper)
ltc.Gossip = gossip.New(rpcContext, gossip.TestInterval, gossip.TestBootstrap)
ltc.Eng = engine.NewInMem(proto.Attributes{}, 50<<20)
ltc.lSender = newRetryableLocalSender(NewLocalSender())
ltc.Sender = NewTxnCoordSender(ltc.lSender, ltc.Clock, false, ltc.Stopper)
var err error
if ltc.DB, err = client.Open("//[email protected]", client.SenderOpt(ltc.Sender)); err != nil {
t.Fatal(err)
}
transport := multiraft.NewLocalRPCTransport()
ltc.Stopper.AddCloser(transport)
ctx := storage.TestStoreContext
ctx.Clock = ltc.Clock
ctx.DB = ltc.DB
ctx.Gossip = ltc.Gossip
ctx.Transport = transport
ltc.Store = storage.NewStore(ctx, ltc.Eng, &proto.NodeDescriptor{NodeID: 1})
if err := ltc.Store.Bootstrap(proto.StoreIdent{NodeID: 1, StoreID: 1}, ltc.Stopper); err != nil {
t.Fatalf("unable to start local test cluster: %s", err)
}
ltc.lSender.AddStore(ltc.Store)
if err := ltc.Store.BootstrapRange(); err != nil {
t.Fatalf("unable to start local test cluster: %s", err)
}
if err := ltc.Store.Start(ltc.Stopper); err != nil {
t.Fatalf("unable to start local test cluster: %s", err)
}
}
示例10: TestBootstrapOfNonEmptyStore
// TestBootstrapOfNonEmptyStore verifies bootstrap failure if engine
// is not empty.
func TestBootstrapOfNonEmptyStore(t *testing.T) {
defer leaktest.AfterTest(t)
eng := engine.NewInMem(proto.Attributes{}, 1<<20)
// Put some random garbage into the engine.
if err := eng.Put(proto.EncodedKey("foo"), []byte("bar")); err != nil {
t.Errorf("failure putting key foo into engine: %s", err)
}
ctx := TestStoreContext
manual := hlc.NewManualClock(0)
ctx.Clock = hlc.NewClock(manual.UnixNano)
ctx.Transport = multiraft.NewLocalRPCTransport()
stopper := util.NewStopper()
stopper.AddCloser(ctx.Transport)
defer stopper.Stop()
store := NewStore(ctx, eng, &proto.NodeDescriptor{NodeID: 1})
// Can't init as haven't bootstrapped.
if err := store.Start(stopper); err == nil {
t.Error("expected failure init'ing un-bootstrapped store")
}
// Bootstrap should fail on non-empty engine.
if err := store.Bootstrap(testIdent, stopper); err == nil {
t.Error("expected bootstrap error on non-empty store")
}
}
示例11: TestClientHeartbeatBadServer
// TestClientHeartbeatBadServer verifies that the client is not marked
// as "ready" until a heartbeat request succeeds.
func TestClientHeartbeatBadServer(t *testing.T) {
defer leaktest.AfterTest(t)
stopper := util.NewStopper()
defer stopper.Stop()
// Create a server without registering a heartbeat service.
serverClock := hlc.NewClock(hlc.UnixNano)
s := createTestServer(serverClock, stopper, t)
// Create a client. It should attempt a heartbeat and fail.
c := NewClient(s.Addr(), nil, s.context)
// Register a heartbeat service.
heartbeat := &HeartbeatService{
clock: serverClock,
remoteClockMonitor: newRemoteClockMonitor(serverClock),
}
// The client should fail the heartbeat and be recycled.
<-c.Closed
if err := s.RegisterName("Heartbeat", heartbeat); err != nil {
t.Fatalf("Unable to register heartbeat service: %s", err)
}
// Same thing, but now the heartbeat service exists.
c = NewClient(s.Addr(), nil, s.context)
// A heartbeat should succeed and the client should become ready.
<-c.Ready
}
示例12: createTestStoreWithoutStart
// createTestStoreWithoutStart creates a test store using an in-memory
// engine without starting the store. It returns the store, the store
// clock's manual unix nanos time and a stopper. The caller is
// responsible for stopping the stopper upon completion.
func createTestStoreWithoutStart(t *testing.T) (*Store, *hlc.ManualClock, *util.Stopper) {
stopper := util.NewStopper()
rpcContext := rpc.NewContext(testBaseContext, hlc.NewClock(hlc.UnixNano), stopper)
ctx := TestStoreContext
ctx.Gossip = gossip.New(rpcContext, gossip.TestInterval, gossip.TestBootstrap)
manual := hlc.NewManualClock(0)
ctx.Clock = hlc.NewClock(manual.UnixNano)
eng := engine.NewInMem(proto.Attributes{}, 10<<20)
ctx.Transport = multiraft.NewLocalRPCTransport()
stopper.AddCloser(ctx.Transport)
sender := &testSender{}
var err error
if ctx.DB, err = client.Open("//[email protected]", client.SenderOpt(sender)); err != nil {
t.Fatal(err)
}
store := NewStore(ctx, eng, &proto.NodeDescriptor{NodeID: 1})
sender.store = store
if err := store.Bootstrap(proto.StoreIdent{NodeID: 1, StoreID: 1}, stopper); err != nil {
t.Fatal(err)
}
if err := store.BootstrapRange(); err != nil {
t.Fatal(err)
}
return store, manual, stopper
}
示例13: createTestNode
// createTestNode creates an rpc server using the specified address,
// gossip instance, KV database and a node using the specified slice
// of engines. The server, clock and node are returned. If gossipBS is
// not nil, the gossip bootstrap address is set to gossipBS.
func createTestNode(addr net.Addr, engines []engine.Engine, gossipBS net.Addr, t *testing.T) (
*rpc.Server, *hlc.Clock, *Node, *util.Stopper) {
var err error
ctx := storage.StoreContext{}
stopper := util.NewStopper()
ctx.Clock = hlc.NewClock(hlc.UnixNano)
nodeRPCContext := rpc.NewContext(nodeTestBaseContext, ctx.Clock, stopper)
ctx.ScanInterval = 10 * time.Hour
rpcServer := rpc.NewServer(addr, nodeRPCContext)
if err := rpcServer.Start(); err != nil {
t.Fatal(err)
}
g := gossip.New(nodeRPCContext, testContext.GossipInterval, testContext.GossipBootstrapResolvers)
if gossipBS != nil {
// Handle possibility of a :0 port specification.
if gossipBS == addr {
gossipBS = rpcServer.Addr()
}
g.SetResolvers([]resolver.Resolver{resolver.NewResolverFromAddress(gossipBS)})
g.Start(rpcServer, stopper)
}
ctx.Gossip = g
sender := kv.NewDistSender(&kv.DistSenderContext{Clock: ctx.Clock}, g)
if ctx.DB, err = client.Open("//[email protected]", client.SenderOpt(sender)); err != nil {
t.Fatal(err)
}
// TODO(bdarnell): arrange to have the transport closed.
ctx.Transport = multiraft.NewLocalRPCTransport()
ctx.EventFeed = &util.Feed{}
node := NewNode(ctx)
return rpcServer, ctx.Clock, node, stopper
}
示例14: TestCorruptedClusterID
// TestCorruptedClusterID verifies that a node fails to start when a
// store's cluster ID is empty.
func TestCorruptedClusterID(t *testing.T) {
defer leaktest.AfterTest(t)
eagerStopper := util.NewStopper()
e := engine.NewInMem(proto.Attributes{}, 1<<20)
_, err := BootstrapCluster("cluster-1", []engine.Engine{e}, eagerStopper)
if err != nil {
t.Fatal(err)
}
eagerStopper.Stop()
// Set the cluster ID to an empty string.
sIdent := proto.StoreIdent{
ClusterID: "",
NodeID: 1,
StoreID: 1,
}
if err = engine.MVCCPutProto(e, nil, keys.StoreIdentKey(), proto.ZeroTimestamp, nil, &sIdent); err != nil {
t.Fatal(err)
}
engines := []engine.Engine{e}
server, _, node, stopper := createTestNode(util.CreateTestAddr("tcp"), engines, nil, t)
if err := node.start(server, engines, proto.Attributes{}, stopper); err == nil {
t.Errorf("unexpected success")
}
stopper.Stop()
}
示例15: runStart
// runStart starts the cockroach node using --stores as the list of
// storage devices ("stores") on this machine and --gossip as the list
// of "well-known" hosts used to join this node to the cockroach
// cluster via the gossip network.
func runStart(cmd *cobra.Command, args []string) {
info := util.GetBuildInfo()
log.Infof("build Vers: %s", info.Vers)
log.Infof("build Tag: %s", info.Tag)
log.Infof("build Time: %s", info.Time)
log.Infof("build Deps: %s", info.Deps)
// Default user for servers.
Context.User = security.NodeUser
// First initialize the Context as it is used in other places.
err := Context.Init("start")
if err != nil {
log.Errorf("failed to initialize context: %s", err)
return
}
log.Info("starting cockroach cluster")
stopper := util.NewStopper()
stopper.AddWorker()
s, err := server.NewServer(Context, stopper)
if err != nil {
log.Errorf("failed to start Cockroach server: %s", err)
return
}
err = s.Start(false)
if err != nil {
log.Errorf("cockroach server exited with error: %s", err)
return
}
signalCh := make(chan os.Signal, 1)
signal.Notify(signalCh, os.Interrupt, os.Kill)
// TODO(spencer): move this behind a build tag.
signal.Notify(signalCh, syscall.SIGTERM)
// Block until one of the signals above is received or the stopper
// is stopped externally (for example, via the quit endpoint).
select {
case <-stopper.ShouldStop():
stopper.SetStopped()
case <-signalCh:
log.Infof("initiating graceful shutdown of server")
stopper.SetStopped()
go func() {
s.Stop()
}()
}
select {
case <-signalCh:
log.Warningf("second signal received, initiating hard shutdown")
case <-time.After(time.Minute):
log.Warningf("time limit reached, initiating hard shutdown")
return
case <-stopper.IsStopped():
log.Infof("server drained and shutdown completed")
}
log.Flush()
}