本文整理匯總了Golang中github.com/cockroachdb/cockroach/pkg/kv.MakeTxnMetrics函數的典型用法代碼示例。如果您正苦於以下問題:Golang MakeTxnMetrics函數的具體用法?Golang MakeTxnMetrics怎麽用?Golang MakeTxnMetrics使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了MakeTxnMetrics函數的9個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: createTestNode
// createTestNode creates an rpc server using the specified address,
// gossip instance, KV database and a node using the specified slice
// of engines. The server, clock and node are returned. If gossipBS is
// not nil, the gossip bootstrap address is set to gossipBS.
func createTestNode(
addr net.Addr, engines []engine.Engine, gossipBS net.Addr, t *testing.T,
) (*grpc.Server, net.Addr, *hlc.Clock, *Node, *stop.Stopper) {
cfg := storage.StoreConfig{}
stopper := stop.NewStopper()
cfg.Clock = hlc.NewClock(hlc.UnixNano)
nodeRPCContext := rpc.NewContext(log.AmbientContext{}, nodeTestBaseContext, cfg.Clock, stopper)
cfg.ScanInterval = 10 * time.Hour
cfg.ConsistencyCheckInterval = 10 * time.Hour
grpcServer := rpc.NewServer(nodeRPCContext)
serverCfg := makeTestConfig()
cfg.Gossip = gossip.NewTest(
0,
nodeRPCContext,
grpcServer,
serverCfg.GossipBootstrapResolvers,
stopper,
metric.NewRegistry(),
)
ln, err := netutil.ListenAndServeGRPC(stopper, grpcServer, addr)
if err != nil {
t.Fatal(err)
}
if gossipBS != nil {
// Handle possibility of a :0 port specification.
if gossipBS.Network() == addr.Network() && gossipBS.String() == addr.String() {
gossipBS = ln.Addr()
}
r, err := resolver.NewResolverFromAddress(gossipBS)
if err != nil {
t.Fatalf("bad gossip address %s: %s", gossipBS, err)
}
cfg.Gossip.SetResolvers([]resolver.Resolver{r})
cfg.Gossip.Start(ln.Addr())
}
retryOpts := base.DefaultRetryOptions()
retryOpts.Closer = stopper.ShouldQuiesce()
distSender := kv.NewDistSender(kv.DistSenderConfig{
Clock: cfg.Clock,
RPCContext: nodeRPCContext,
RPCRetryOptions: &retryOpts,
}, cfg.Gossip)
cfg.AmbientCtx.Tracer = tracing.NewTracer()
sender := kv.NewTxnCoordSender(
cfg.AmbientCtx,
distSender,
cfg.Clock,
false,
stopper,
kv.MakeTxnMetrics(metric.TestSampleInterval),
)
cfg.DB = client.NewDB(sender)
cfg.Transport = storage.NewDummyRaftTransport()
cfg.MetricsSampleInterval = metric.TestSampleInterval
node := NewNode(cfg, status.NewMetricsRecorder(cfg.Clock), metric.NewRegistry(), stopper,
kv.MakeTxnMetrics(metric.TestSampleInterval), sql.MakeEventLogger(nil))
roachpb.RegisterInternalServer(grpcServer, node)
return grpcServer, ln.Addr(), cfg.Clock, node, stopper
}
示例2: TestCorruptedClusterID
// TestCorruptedClusterID verifies that a node fails to start when a
// store's cluster ID is empty.
func TestCorruptedClusterID(t *testing.T) {
defer leaktest.AfterTest(t)()
e := engine.NewInMem(roachpb.Attributes{}, 1<<20)
defer e.Close()
if _, err := bootstrapCluster(
storage.StoreConfig{}, []engine.Engine{e}, kv.MakeTxnMetrics(metric.TestSampleInterval),
); err != nil {
t.Fatal(err)
}
// Set the cluster ID to the empty UUID.
sIdent := roachpb.StoreIdent{
ClusterID: uuid.UUID{},
NodeID: 1,
StoreID: 1,
}
if err := engine.MVCCPutProto(context.Background(), e, nil, keys.StoreIdentKey(), hlc.ZeroTimestamp, nil, &sIdent); err != nil {
t.Fatal(err)
}
engines := []engine.Engine{e}
_, serverAddr, _, node, stopper := createTestNode(util.TestAddr, engines, nil, t)
stopper.Stop()
if err := node.start(context.Background(), serverAddr, engines, roachpb.Attributes{}, roachpb.Locality{}); !testutils.IsError(err, "unidentified store") {
t.Errorf("unexpected error %v", err)
}
}
示例3: TestBootstrapNewStore
// TestBootstrapNewStore starts a cluster with two unbootstrapped
// stores and verifies both stores are added and started.
func TestBootstrapNewStore(t *testing.T) {
defer leaktest.AfterTest(t)()
e := engine.NewInMem(roachpb.Attributes{}, 1<<20)
if _, err := bootstrapCluster(
storage.StoreConfig{}, []engine.Engine{e}, kv.MakeTxnMetrics(metric.TestSampleInterval),
); err != nil {
t.Fatal(err)
}
// Start a new node with two new stores which will require bootstrapping.
engines := Engines([]engine.Engine{
e,
engine.NewInMem(roachpb.Attributes{}, 1<<20),
engine.NewInMem(roachpb.Attributes{}, 1<<20),
})
defer engines.Close()
_, _, node, stopper := createAndStartTestNode(
util.TestAddr,
engines,
util.TestAddr,
roachpb.Locality{},
t,
)
defer stopper.Stop()
// Non-initialized stores (in this case the new in-memory-based
// store) will be bootstrapped by the node upon start. This happens
// in a goroutine, so we'll have to wait a bit until we can find the
// new node.
util.SucceedsSoon(t, func() error {
if n := node.stores.GetStoreCount(); n != 3 {
return errors.Errorf("expected 3 stores but got %d", n)
}
return nil
})
// Check whether all stores are started properly.
if err := node.stores.VisitStores(func(s *storage.Store) error {
if !s.IsStarted() {
return errors.Errorf("fail to start store: %s", s)
}
return nil
}); err != nil {
t.Error(err)
}
}
示例4: TestBootstrapCluster
// TestBootstrapCluster verifies the results of bootstrapping a
// cluster. Uses an in memory engine.
func TestBootstrapCluster(t *testing.T) {
defer leaktest.AfterTest(t)()
stopper := stop.NewStopper()
defer stopper.Stop()
e := engine.NewInMem(roachpb.Attributes{}, 1<<20)
stopper.AddCloser(e)
if _, err := bootstrapCluster(
storage.StoreConfig{}, []engine.Engine{e}, kv.MakeTxnMetrics(metric.TestSampleInterval),
); err != nil {
t.Fatal(err)
}
// Scan the complete contents of the local database directly from the engine.
rows, _, _, err := engine.MVCCScan(context.Background(), e, keys.LocalMax, roachpb.KeyMax, math.MaxInt64, hlc.MaxTimestamp, true, nil)
if err != nil {
t.Fatal(err)
}
var foundKeys keySlice
for _, kv := range rows {
foundKeys = append(foundKeys, kv.Key)
}
var expectedKeys = keySlice{
testutils.MakeKey(roachpb.Key("\x02"), roachpb.KeyMax),
testutils.MakeKey(roachpb.Key("\x03"), roachpb.KeyMax),
roachpb.Key("\x04node-idgen"),
roachpb.Key("\x04store-idgen"),
}
// Add the initial keys for sql.
for _, kv := range GetBootstrapSchema().GetInitialValues() {
expectedKeys = append(expectedKeys, kv.Key)
}
// Resort the list. The sql values are not sorted.
sort.Sort(expectedKeys)
if !reflect.DeepEqual(foundKeys, expectedKeys) {
t.Errorf("expected keys mismatch:\n%s\n -- vs. -- \n\n%s",
formatKeys(foundKeys), formatKeys(expectedKeys))
}
// TODO(spencer): check values.
}
示例5: TestMultiRangeScanWithMaxResults
// TestMultiRangeScanWithMaxResults tests that commands which access multiple
// ranges with MaxResults parameter are carried out properly.
func TestMultiRangeScanWithMaxResults(t *testing.T) {
defer leaktest.AfterTest(t)()
testCases := []struct {
splitKeys []roachpb.Key
keys []roachpb.Key
}{
{[]roachpb.Key{roachpb.Key("m")},
[]roachpb.Key{roachpb.Key("a"), roachpb.Key("z")}},
{[]roachpb.Key{roachpb.Key("h"), roachpb.Key("q")},
[]roachpb.Key{roachpb.Key("b"), roachpb.Key("f"), roachpb.Key("k"),
roachpb.Key("r"), roachpb.Key("w"), roachpb.Key("y")}},
}
for i, tc := range testCases {
s, _, _ := serverutils.StartServer(t, base.TestServerArgs{})
defer s.Stopper().Stop()
ts := s.(*TestServer)
retryOpts := base.DefaultRetryOptions()
retryOpts.Closer = ts.stopper.ShouldQuiesce()
ds := kv.NewDistSender(kv.DistSenderConfig{
Clock: s.Clock(),
RPCContext: s.RPCContext(),
RPCRetryOptions: &retryOpts,
}, ts.Gossip())
ambient := log.AmbientContext{Tracer: tracing.NewTracer()}
tds := kv.NewTxnCoordSender(
ambient,
ds,
ts.Clock(),
ts.Cfg.Linearizable,
ts.stopper,
kv.MakeTxnMetrics(metric.TestSampleInterval),
)
for _, sk := range tc.splitKeys {
if err := ts.node.storeCfg.DB.AdminSplit(context.TODO(), sk); err != nil {
t.Fatal(err)
}
}
for _, k := range tc.keys {
put := roachpb.NewPut(k, roachpb.MakeValueFromBytes(k))
if _, err := client.SendWrapped(context.Background(), tds, put); err != nil {
t.Fatal(err)
}
}
// Try every possible ScanRequest startKey.
for start := 0; start < len(tc.keys); start++ {
// Try every possible maxResults, from 1 to beyond the size of key array.
for maxResults := 1; maxResults <= len(tc.keys)-start+1; maxResults++ {
scan := roachpb.NewScan(tc.keys[start], tc.keys[len(tc.keys)-1].Next())
reply, err := client.SendWrappedWith(
context.Background(), tds, roachpb.Header{MaxSpanRequestKeys: int64(maxResults)}, scan,
)
if err != nil {
t.Fatal(err)
}
rows := reply.(*roachpb.ScanResponse).Rows
if start+maxResults <= len(tc.keys) && len(rows) != maxResults {
t.Errorf("%d: start=%s: expected %d rows, but got %d", i, tc.keys[start], maxResults, len(rows))
} else if start+maxResults == len(tc.keys)+1 && len(rows) != maxResults-1 {
t.Errorf("%d: expected %d rows, but got %d", i, maxResults-1, len(rows))
}
}
}
}
}
示例6: TestMultiRangeScanDeleteRange
// TestMultiRangeScanDeleteRange tests that commands which access multiple
// ranges are carried out properly.
func TestMultiRangeScanDeleteRange(t *testing.T) {
defer leaktest.AfterTest(t)()
s, _, _ := serverutils.StartServer(t, base.TestServerArgs{})
defer s.Stopper().Stop()
ts := s.(*TestServer)
retryOpts := base.DefaultRetryOptions()
retryOpts.Closer = ts.stopper.ShouldQuiesce()
ds := kv.NewDistSender(kv.DistSenderConfig{
Clock: s.Clock(),
RPCContext: s.RPCContext(),
RPCRetryOptions: &retryOpts,
}, ts.Gossip())
ambient := log.AmbientContext{Tracer: tracing.NewTracer()}
tds := kv.NewTxnCoordSender(
ambient,
ds,
s.Clock(),
ts.Cfg.Linearizable,
ts.stopper,
kv.MakeTxnMetrics(metric.TestSampleInterval),
)
if err := ts.node.storeCfg.DB.AdminSplit(context.TODO(), "m"); err != nil {
t.Fatal(err)
}
writes := []roachpb.Key{roachpb.Key("a"), roachpb.Key("z")}
get := &roachpb.GetRequest{
Span: roachpb.Span{Key: writes[0]},
}
get.EndKey = writes[len(writes)-1]
if _, err := client.SendWrapped(context.Background(), tds, get); err == nil {
t.Errorf("able to call Get with a key range: %v", get)
}
var delTS hlc.Timestamp
for i, k := range writes {
put := roachpb.NewPut(k, roachpb.MakeValueFromBytes(k))
if _, err := client.SendWrapped(context.Background(), tds, put); err != nil {
t.Fatal(err)
}
scan := roachpb.NewScan(writes[0], writes[len(writes)-1].Next())
reply, err := client.SendWrapped(context.Background(), tds, scan)
if err != nil {
t.Fatal(err)
}
sr := reply.(*roachpb.ScanResponse)
if sr.Txn != nil {
// This was the other way around at some point in the past.
// Same below for Delete, etc.
t.Errorf("expected no transaction in response header")
}
if rows := sr.Rows; len(rows) != i+1 {
t.Fatalf("expected %d rows, but got %d", i+1, len(rows))
}
}
del := &roachpb.DeleteRangeRequest{
Span: roachpb.Span{
Key: writes[0],
EndKey: roachpb.Key(writes[len(writes)-1]).Next(),
},
ReturnKeys: true,
}
reply, err := client.SendWrappedWith(context.Background(), tds, roachpb.Header{Timestamp: delTS}, del)
if err != nil {
t.Fatal(err)
}
dr := reply.(*roachpb.DeleteRangeResponse)
if dr.Txn != nil {
t.Errorf("expected no transaction in response header")
}
if !reflect.DeepEqual(dr.Keys, writes) {
t.Errorf("expected %d keys to be deleted, but got %d instead", writes, dr.Keys)
}
scan := roachpb.NewScan(writes[0], writes[len(writes)-1].Next())
txn := &roachpb.Transaction{Name: "MyTxn"}
reply, err = client.SendWrappedWith(context.Background(), tds, roachpb.Header{Txn: txn}, scan)
if err != nil {
t.Fatal(err)
}
sr := reply.(*roachpb.ScanResponse)
if txn := sr.Txn; txn == nil || txn.Name != "MyTxn" {
t.Errorf("wanted Txn to persist, but it changed to %v", txn)
}
if rows := sr.Rows; len(rows) > 0 {
t.Fatalf("scan after delete returned rows: %v", rows)
}
}
示例7: NewServer
// NewServer creates a Server from a server.Context.
func NewServer(cfg Config, stopper *stop.Stopper) (*Server, error) {
if _, err := net.ResolveTCPAddr("tcp", cfg.AdvertiseAddr); err != nil {
return nil, errors.Errorf("unable to resolve RPC address %q: %v", cfg.AdvertiseAddr, err)
}
if cfg.AmbientCtx.Tracer == nil {
cfg.AmbientCtx.Tracer = tracing.NewTracer()
}
// Try loading the TLS configs before anything else.
if _, err := cfg.GetServerTLSConfig(); err != nil {
return nil, err
}
if _, err := cfg.GetClientTLSConfig(); err != nil {
return nil, err
}
s := &Server{
mux: http.NewServeMux(),
clock: hlc.NewClock(hlc.UnixNano, cfg.MaxOffset),
stopper: stopper,
cfg: cfg,
}
// Add a dynamic log tag value for the node ID.
//
// We need to pass an ambient context to the various server components, but we
// won't know the node ID until we Start(). At that point it's too late to
// change the ambient contexts in the components (various background processes
// will have already started using them).
//
// NodeIDContainer allows us to add the log tag to the context now and update
// the value asynchronously. It's not significantly more expensive than a
// regular tag since it's just doing an (atomic) load when a log/trace message
// is constructed. The node ID is set by the Store if this host was
// bootstrapped; otherwise a new one is allocated in Node.
s.cfg.AmbientCtx.AddLogTag("n", &s.nodeIDContainer)
ctx := s.AnnotateCtx(context.Background())
if s.cfg.Insecure {
log.Warning(ctx, "running in insecure mode, this is strongly discouraged. See --insecure.")
}
s.rpcContext = rpc.NewContext(s.cfg.AmbientCtx, s.cfg.Config, s.clock, s.stopper)
s.rpcContext.HeartbeatCB = func() {
if err := s.rpcContext.RemoteClocks.VerifyClockOffset(); err != nil {
log.Fatal(ctx, err)
}
}
s.grpc = rpc.NewServer(s.rpcContext)
s.registry = metric.NewRegistry()
s.gossip = gossip.New(
s.cfg.AmbientCtx,
&s.nodeIDContainer,
s.rpcContext,
s.grpc,
s.cfg.GossipBootstrapResolvers,
s.stopper,
s.registry,
)
s.storePool = storage.NewStorePool(
s.cfg.AmbientCtx,
s.gossip,
s.clock,
s.rpcContext,
s.cfg.TimeUntilStoreDead,
s.stopper,
/* deterministic */ false,
)
// A custom RetryOptions is created which uses stopper.ShouldQuiesce() as
// the Closer. This prevents infinite retry loops from occurring during
// graceful server shutdown
//
// Such a loop loop occurs with the DistSender attempts a connection to the
// local server during shutdown, and receives an internal server error (HTTP
// Code 5xx). This is the correct error for a server to return when it is
// shutting down, and is normally retryable in a cluster environment.
// However, on a single-node setup (such as a test), retries will never
// succeed because the only server has been shut down; thus, thus the
// DistSender needs to know that it should not retry in this situation.
retryOpts := base.DefaultRetryOptions()
retryOpts.Closer = s.stopper.ShouldQuiesce()
distSenderCfg := kv.DistSenderConfig{
AmbientCtx: s.cfg.AmbientCtx,
Clock: s.clock,
RPCContext: s.rpcContext,
RPCRetryOptions: &retryOpts,
}
s.distSender = kv.NewDistSender(distSenderCfg, s.gossip)
txnMetrics := kv.MakeTxnMetrics(s.cfg.MetricsSampleInterval)
s.registry.AddMetricStruct(txnMetrics)
s.txnCoordSender = kv.NewTxnCoordSender(
s.cfg.AmbientCtx,
s.distSender,
s.clock,
s.cfg.Linearizable,
s.stopper,
//.........這裏部分代碼省略.........
示例8: TestStartNodeWithLocality
// TestStartNodeWithLocality creates a new node and store and starts them with a
// collection of different localities.
func TestStartNodeWithLocality(t *testing.T) {
defer leaktest.AfterTest(t)()
testLocalityWitNewNode := func(locality roachpb.Locality) {
e := engine.NewInMem(roachpb.Attributes{}, 1<<20)
defer e.Close()
if _, err := bootstrapCluster(
storage.StoreConfig{}, []engine.Engine{e}, kv.MakeTxnMetrics(metric.TestSampleInterval),
); err != nil {
t.Fatal(err)
}
_, _, node, stopper := createAndStartTestNode(
util.TestAddr,
[]engine.Engine{e},
util.TestAddr,
locality,
t,
)
defer stopper.Stop()
// Check the node to make sure the locality was propagated to its
// nodeDescriptor.
if !reflect.DeepEqual(node.Descriptor.Locality, locality) {
t.Fatalf("expected node locality to be %s, but it was %s", locality, node.Descriptor.Locality)
}
// Check the store to make sure the locality was propagated to its
// nodeDescriptor.
if err := node.stores.VisitStores(func(store *storage.Store) error {
desc, err := store.Descriptor()
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(desc.Node.Locality, locality) {
t.Fatalf("expected store's node locality to be %s, but it was %s", locality, desc.Node.Locality)
}
return nil
}); err != nil {
t.Fatal(err)
}
}
testCases := []roachpb.Locality{
{},
{
Tiers: []roachpb.Tier{
{Key: "a", Value: "b"},
},
},
{
Tiers: []roachpb.Tier{
{Key: "a", Value: "b"},
{Key: "c", Value: "d"},
{Key: "e", Value: "f"},
},
},
}
for _, testCase := range testCases {
testLocalityWitNewNode(testCase)
}
}
示例9: TestNodeJoin
// TestNodeJoin verifies a new node is able to join a bootstrapped
// cluster consisting of one node.
func TestNodeJoin(t *testing.T) {
defer leaktest.AfterTest(t)()
engineStopper := stop.NewStopper()
defer engineStopper.Stop()
e := engine.NewInMem(roachpb.Attributes{}, 1<<20)
engineStopper.AddCloser(e)
if _, err := bootstrapCluster(
storage.StoreConfig{}, []engine.Engine{e}, kv.MakeTxnMetrics(metric.TestSampleInterval),
); err != nil {
t.Fatal(err)
}
// Start the bootstrap node.
engines1 := []engine.Engine{e}
_, server1Addr, node1, stopper1 := createAndStartTestNode(
util.TestAddr,
engines1,
util.TestAddr,
roachpb.Locality{},
t,
)
defer stopper1.Stop()
// Create a new node.
e2 := engine.NewInMem(roachpb.Attributes{}, 1<<20)
engineStopper.AddCloser(e2)
engines2 := []engine.Engine{e2}
_, server2Addr, node2, stopper2 := createAndStartTestNode(
util.TestAddr,
engines2,
server1Addr,
roachpb.Locality{},
t,
)
defer stopper2.Stop()
// Verify new node is able to bootstrap its store.
util.SucceedsSoon(t, func() error {
if sc := node2.stores.GetStoreCount(); sc != 1 {
return errors.Errorf("GetStoreCount() expected 1; got %d", sc)
}
return nil
})
// Verify node1 sees node2 via gossip and vice versa.
node1Key := gossip.MakeNodeIDKey(node1.Descriptor.NodeID)
node2Key := gossip.MakeNodeIDKey(node2.Descriptor.NodeID)
util.SucceedsSoon(t, func() error {
var nodeDesc1 roachpb.NodeDescriptor
if err := node1.storeCfg.Gossip.GetInfoProto(node2Key, &nodeDesc1); err != nil {
return err
}
if addr2Str, server2AddrStr := nodeDesc1.Address.String(), server2Addr.String(); addr2Str != server2AddrStr {
return errors.Errorf("addr2 gossip %s doesn't match addr2 address %s", addr2Str, server2AddrStr)
}
var nodeDesc2 roachpb.NodeDescriptor
if err := node2.storeCfg.Gossip.GetInfoProto(node1Key, &nodeDesc2); err != nil {
return err
}
if addr1Str, server1AddrStr := nodeDesc2.Address.String(), server1Addr.String(); addr1Str != server1AddrStr {
return errors.Errorf("addr1 gossip %s doesn't match addr1 address %s", addr1Str, server1AddrStr)
}
return nil
})
}