本文整理匯總了Golang中github.com/cockroachdb/cockroach/sql.NewLeaseManager函數的典型用法代碼示例。如果您正苦於以下問題:Golang NewLeaseManager函數的具體用法?Golang NewLeaseManager怎麽用?Golang NewLeaseManager使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了NewLeaseManager函數的8個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: node
func (t *leaseTest) node(nodeID uint32) *csql.LeaseManager {
mgr := t.nodes[nodeID]
if mgr == nil {
mgr = csql.NewLeaseManager(nodeID, *t.server.DB(), t.server.Clock())
t.nodes[nodeID] = mgr
}
return mgr
}
示例2: node
func (t *leaseTest) node(nodeID uint32) *csql.LeaseManager {
mgr := t.nodes[nodeID]
if mgr == nil {
mgr = csql.NewLeaseManager(
nodeID, *t.kvDB,
t.server.Clock(),
t.leaseManagerTestingKnobs,
t.server.Stopper(),
)
t.nodes[nodeID] = mgr
}
return mgr
}
示例3: NewServer
// NewServer creates a Server from a server.Context.
func NewServer(ctx *Context, stopper *stop.Stopper) (*Server, error) {
if ctx == nil {
return nil, util.Errorf("ctx must not be null")
}
if _, err := net.ResolveTCPAddr("tcp", ctx.Addr); err != nil {
return nil, util.Errorf("unable to resolve RPC address %q: %v", ctx.Addr, err)
}
if ctx.Insecure {
log.Warning("running in insecure mode, this is strongly discouraged. See --insecure and --certs.")
}
// Try loading the TLS configs before anything else.
if _, err := ctx.GetServerTLSConfig(); err != nil {
return nil, err
}
if _, err := ctx.GetClientTLSConfig(); err != nil {
return nil, err
}
s := &Server{
ctx: ctx,
mux: http.NewServeMux(),
clock: hlc.NewClock(hlc.UnixNano),
stopper: stopper,
}
s.clock.SetMaxOffset(ctx.MaxOffset)
s.rpcContext = crpc.NewContext(&ctx.Context, s.clock, stopper)
stopper.RunWorker(func() {
s.rpcContext.RemoteClocks.MonitorRemoteOffsets(stopper)
})
s.rpc = crpc.NewServer(s.rpcContext)
s.gossip = gossip.New(s.rpcContext, s.ctx.GossipBootstrapResolvers)
s.storePool = storage.NewStorePool(s.gossip, s.clock, ctx.TimeUntilStoreDead, stopper)
feed := util.NewFeed(stopper)
tracer := tracer.NewTracer(feed, ctx.Addr)
ds := kv.NewDistSender(&kv.DistSenderContext{Clock: s.clock, RPCContext: s.rpcContext}, s.gossip)
sender := kv.NewTxnCoordSender(ds, s.clock, ctx.Linearizable, tracer, s.stopper)
s.db = client.NewDB(sender)
var err error
s.raftTransport, err = newRPCTransport(s.gossip, s.rpc, s.rpcContext)
if err != nil {
return nil, err
}
s.stopper.AddCloser(s.raftTransport)
s.kvDB = kv.NewDBServer(&s.ctx.Context, sender)
if err := s.kvDB.RegisterRPC(s.rpc); err != nil {
return nil, err
}
leaseMgr := sql.NewLeaseManager(0, *s.db, s.clock)
leaseMgr.RefreshLeases(s.stopper, s.db, s.gossip)
s.sqlServer = sql.MakeServer(&s.ctx.Context, *s.db, s.gossip, leaseMgr)
if err := s.sqlServer.RegisterRPC(s.rpc); err != nil {
return nil, err
}
s.pgServer = pgwire.NewServer(&pgwire.Context{
Context: &s.ctx.Context,
Executor: s.sqlServer.Executor,
Stopper: stopper,
})
// TODO(bdarnell): make StoreConfig configurable.
nCtx := storage.StoreContext{
Clock: s.clock,
DB: s.db,
Gossip: s.gossip,
Transport: s.raftTransport,
ScanInterval: s.ctx.ScanInterval,
ScanMaxIdleTime: s.ctx.ScanMaxIdleTime,
EventFeed: feed,
Tracer: tracer,
StorePool: s.storePool,
AllocatorOptions: storage.AllocatorOptions{
AllowRebalance: true,
Mode: s.ctx.BalanceMode,
},
}
s.node = NewNode(nCtx)
s.admin = newAdminServer(s.db, s.stopper)
s.status = newStatusServer(s.db, s.gossip, ctx)
s.tsDB = ts.NewDB(s.db)
s.tsServer = ts.NewServer(s.tsDB)
return s, nil
}
示例4: NewServer
// NewServer creates a Server from a server.Context.
func NewServer(ctx *Context, stopper *stop.Stopper) (*Server, error) {
if ctx == nil {
return nil, util.Errorf("ctx must not be null")
}
if _, err := net.ResolveTCPAddr("tcp", ctx.Addr); err != nil {
return nil, util.Errorf("unable to resolve RPC address %q: %v", ctx.Addr, err)
}
if ctx.Insecure {
log.Warning("running in insecure mode, this is strongly discouraged. See --insecure and --certs.")
}
// Try loading the TLS configs before anything else.
if _, err := ctx.GetServerTLSConfig(); err != nil {
return nil, err
}
if _, err := ctx.GetClientTLSConfig(); err != nil {
return nil, err
}
s := &Server{
Tracer: tracing.NewTracer(),
ctx: ctx,
mux: http.NewServeMux(),
clock: hlc.NewClock(hlc.UnixNano),
stopper: stopper,
}
s.clock.SetMaxOffset(ctx.MaxOffset)
s.rpcContext = crpc.NewContext(&ctx.Context, s.clock, stopper)
stopper.RunWorker(func() {
s.rpcContext.RemoteClocks.MonitorRemoteOffsets(stopper)
})
s.rpc = crpc.NewServer(s.rpcContext)
s.gossip = gossip.New(s.rpcContext, s.ctx.GossipBootstrapResolvers, stopper)
s.storePool = storage.NewStorePool(s.gossip, s.clock, ctx.TimeUntilStoreDead, stopper)
feed := util.NewFeed(stopper)
// A custom RetryOptions is created which uses stopper.ShouldDrain() as
// the Closer. This prevents infinite retry loops from occurring during
// graceful server shutdown
//
// Such a loop loop occurs with the DistSender attempts a connection to the
// local server during shutdown, and receives an internal server error (HTTP
// Code 5xx). This is the correct error for a server to return when it is
// shutting down, and is normally retryable in a cluster environment.
// However, on a single-node setup (such as a test), retries will never
// succeed because the only server has been shut down; thus, thus the
// DistSender needs to know that it should not retry in this situation.
retryOpts := kv.GetDefaultDistSenderRetryOptions()
retryOpts.Closer = stopper.ShouldDrain()
ds := kv.NewDistSender(&kv.DistSenderContext{
Clock: s.clock,
RPCContext: s.rpcContext,
RPCRetryOptions: &retryOpts,
}, s.gossip)
txnRegistry := metric.NewRegistry()
txnMetrics := kv.NewTxnMetrics(txnRegistry)
sender := kv.NewTxnCoordSender(ds, s.clock, ctx.Linearizable, s.Tracer, s.stopper, txnMetrics)
s.db = client.NewDB(sender)
s.grpc = grpc.NewServer()
s.raftTransport = storage.NewRaftTransport(storage.GossipAddressResolver(s.gossip), s.grpc, s.rpcContext)
s.kvDB = kv.NewDBServer(&s.ctx.Context, sender, stopper)
if err := s.kvDB.RegisterRPC(s.rpc); err != nil {
return nil, err
}
s.leaseMgr = sql.NewLeaseManager(0, *s.db, s.clock)
s.leaseMgr.RefreshLeases(s.stopper, s.db, s.gossip)
sqlRegistry := metric.NewRegistry()
s.sqlExecutor = sql.NewExecutor(*s.db, s.gossip, s.leaseMgr, s.stopper, sqlRegistry)
s.pgServer = pgwire.MakeServer(&s.ctx.Context, s.sqlExecutor, sqlRegistry)
// TODO(bdarnell): make StoreConfig configurable.
nCtx := storage.StoreContext{
Clock: s.clock,
DB: s.db,
Gossip: s.gossip,
Transport: s.raftTransport,
ScanInterval: s.ctx.ScanInterval,
ScanMaxIdleTime: s.ctx.ScanMaxIdleTime,
EventFeed: feed,
Tracer: s.Tracer,
StorePool: s.storePool,
SQLExecutor: sql.InternalExecutor{
LeaseManager: s.leaseMgr,
},
LogRangeEvents: true,
AllocatorOptions: storage.AllocatorOptions{
AllowRebalance: true,
Mode: s.ctx.BalanceMode,
},
}
//.........這裏部分代碼省略.........
示例5: TestSchemaChangeProcess
func TestSchemaChangeProcess(t *testing.T) {
defer leaktest.AfterTest(t)
// The descriptor changes made must have an immediate effect
// so disable leases on tables.
defer csql.TestDisableTableLeases()()
// Disable external processing of mutations.
defer csql.TestDisableAsyncSchemaChangeExec()()
server, sqlDB, kvDB := setup(t)
defer cleanup(server, sqlDB)
var id = csql.ID(keys.MaxReservedDescID + 2)
var node = roachpb.NodeID(2)
db := server.DB()
leaseMgr := csql.NewLeaseManager(0, *db, hlc.NewClock(hlc.UnixNano))
changer := csql.NewSchemaChangerForTesting(id, 0, node, *db, leaseMgr)
if _, err := sqlDB.Exec(`
CREATE DATABASE t;
CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR, INDEX foo(v));
INSERT INTO t.test VALUES ('a', 'b'), ('c', 'd');
`); err != nil {
t.Fatal(err)
}
// Read table descriptor for version.
nameKey := csql.MakeNameMetadataKey(keys.MaxReservedDescID+1, "test")
gr, err := kvDB.Get(nameKey)
if err != nil {
t.Fatal(err)
}
if !gr.Exists() {
t.Fatalf("Name entry %q does not exist", nameKey)
}
descKey := csql.MakeDescMetadataKey(csql.ID(gr.ValueInt()))
desc := &csql.Descriptor{}
// Check that MaybeIncrementVersion doesn't increment the version
// when the up_version bit is not set.
if err := kvDB.GetProto(descKey, desc); err != nil {
t.Fatal(err)
}
expectedVersion := desc.GetTable().Version
if err := changer.MaybeIncrementVersion(); err != nil {
t.Fatal(err)
}
if err := kvDB.GetProto(descKey, desc); err != nil {
t.Fatal(err)
}
newVersion := desc.GetTable().Version
if newVersion != expectedVersion {
t.Fatalf("bad version; e = %d, v = %d", expectedVersion, newVersion)
}
isDone, err := changer.IsDone()
if err != nil {
t.Fatal(err)
}
if !isDone {
t.Fatalf("table expected to not have an outstanding schema change: %v", desc.GetTable())
}
// Check that MaybeIncrementVersion increments the version
// correctly.
expectedVersion++
desc.GetTable().UpVersion = true
if err := kvDB.Put(descKey, desc); err != nil {
t.Fatal(err)
}
isDone, err = changer.IsDone()
if err != nil {
t.Fatal(err)
}
if isDone {
t.Fatalf("table expected to have an outstanding schema change: %v", desc.GetTable())
}
if err := changer.MaybeIncrementVersion(); err != nil {
t.Fatal(err)
}
if err := kvDB.GetProto(descKey, desc); err != nil {
t.Fatal(err)
}
newVersion = desc.GetTable().Version
if newVersion != expectedVersion {
t.Fatalf("bad version; e = %d, v = %d", expectedVersion, newVersion)
}
isDone, err = changer.IsDone()
if err != nil {
t.Fatal(err)
}
if !isDone {
t.Fatalf("table expected to not have an outstanding schema change: %v", desc.GetTable())
}
// Check that RunStateMachineBeforeBackfill doesn't do anything
// if there are no mutations queued.
if err := changer.RunStateMachineBeforeBackfill(); err != nil {
t.Fatal(err)
}
if err := kvDB.GetProto(descKey, desc); err != nil {
t.Fatal(err)
}
//.........這裏部分代碼省略.........
示例6: TestSchemaChangeProcess
func TestSchemaChangeProcess(t *testing.T) {
defer leaktest.AfterTest(t)()
// The descriptor changes made must have an immediate effect
// so disable leases on tables.
defer csql.TestDisableTableLeases()()
params, _ := createTestServerParams()
// Disable external processing of mutations.
params.Knobs.SQLSchemaChangeManager = &csql.SchemaChangeManagerTestingKnobs{
AsyncSchemaChangerExecNotification: schemaChangeManagerDisabled,
}
s, sqlDB, kvDB := serverutils.StartServer(t, params)
defer s.Stopper().Stop()
var id = sqlbase.ID(keys.MaxReservedDescID + 2)
var node = roachpb.NodeID(2)
stopper := stop.NewStopper()
leaseMgr := csql.NewLeaseManager(0, *kvDB, hlc.NewClock(hlc.UnixNano), csql.LeaseManagerTestingKnobs{}, stopper)
defer stopper.Stop()
changer := csql.NewSchemaChangerForTesting(id, 0, node, *kvDB, leaseMgr)
if _, err := sqlDB.Exec(`
CREATE DATABASE t;
CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR, INDEX foo(v));
INSERT INTO t.test VALUES ('a', 'b'), ('c', 'd');
`); err != nil {
t.Fatal(err)
}
// Read table descriptor for version.
tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test")
expectedVersion := tableDesc.Version
desc, err := changer.MaybeIncrementVersion()
if err != nil {
t.Fatal(err)
}
tableDesc = desc.GetTable()
newVersion := tableDesc.Version
if newVersion != expectedVersion {
t.Fatalf("bad version; e = %d, v = %d", expectedVersion, newVersion)
}
isDone, err := changer.IsDone()
if err != nil {
t.Fatal(err)
}
if !isDone {
t.Fatalf("table expected to not have an outstanding schema change: %v", tableDesc)
}
// Check that MaybeIncrementVersion increments the version
// correctly.
expectedVersion++
tableDesc.UpVersion = true
if err := kvDB.Put(
sqlbase.MakeDescMetadataKey(tableDesc.ID),
sqlbase.WrapDescriptor(tableDesc),
); err != nil {
t.Fatal(err)
}
isDone, err = changer.IsDone()
if err != nil {
t.Fatal(err)
}
if isDone {
t.Fatalf("table expected to have an outstanding schema change: %v", desc.GetTable())
}
desc, err = changer.MaybeIncrementVersion()
if err != nil {
t.Fatal(err)
}
tableDesc = desc.GetTable()
savedTableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test")
newVersion = tableDesc.Version
if newVersion != expectedVersion {
t.Fatalf("bad version in returned desc; e = %d, v = %d", expectedVersion, newVersion)
}
newVersion = savedTableDesc.Version
if newVersion != expectedVersion {
t.Fatalf("bad version in saved desc; e = %d, v = %d", expectedVersion, newVersion)
}
isDone, err = changer.IsDone()
if err != nil {
t.Fatal(err)
}
if !isDone {
t.Fatalf("table expected to not have an outstanding schema change: %v", tableDesc)
}
// Check that RunStateMachineBeforeBackfill doesn't do anything
// if there are no mutations queued.
if err := changer.RunStateMachineBeforeBackfill(); err != nil {
t.Fatal(err)
}
tableDesc = sqlbase.GetTableDescriptor(kvDB, "t", "test")
newVersion = tableDesc.Version
if newVersion != expectedVersion {
t.Fatalf("bad version; e = %d, v = %d", expectedVersion, newVersion)
//.........這裏部分代碼省略.........
示例7: NewServer
//.........這裏部分代碼省略.........
// Such a loop loop occurs with the DistSender attempts a connection to the
// local server during shutdown, and receives an internal server error (HTTP
// Code 5xx). This is the correct error for a server to return when it is
// shutting down, and is normally retryable in a cluster environment.
// However, on a single-node setup (such as a test), retries will never
// succeed because the only server has been shut down; thus, thus the
// DistSender needs to know that it should not retry in this situation.
retryOpts := base.DefaultRetryOptions()
retryOpts.Closer = s.stopper.ShouldQuiesce()
distSenderCfg := kv.DistSenderConfig{
Ctx: s.Ctx(),
Clock: s.clock,
RPCContext: s.rpcContext,
RPCRetryOptions: &retryOpts,
}
s.distSender = kv.NewDistSender(&distSenderCfg, s.gossip)
txnMetrics := kv.MakeTxnMetrics()
s.registry.AddMetricStruct(txnMetrics)
s.txnCoordSender = kv.NewTxnCoordSender(s.Ctx(), s.distSender, s.clock, srvCtx.Linearizable,
s.stopper, txnMetrics)
s.db = client.NewDB(s.txnCoordSender)
s.raftTransport = storage.NewRaftTransport(storage.GossipAddressResolver(s.gossip), s.grpc, s.rpcContext)
s.kvDB = kv.NewDBServer(s.ctx.Context, s.txnCoordSender, s.stopper)
roachpb.RegisterExternalServer(s.grpc, s.kvDB)
// Set up Lease Manager
var lmKnobs sql.LeaseManagerTestingKnobs
if srvCtx.TestingKnobs.SQLLeaseManager != nil {
lmKnobs = *srvCtx.TestingKnobs.SQLLeaseManager.(*sql.LeaseManagerTestingKnobs)
}
s.leaseMgr = sql.NewLeaseManager(0, *s.db, s.clock, lmKnobs, s.stopper)
s.leaseMgr.RefreshLeases(s.stopper, s.db, s.gossip)
// Set up the DistSQL server
distSQLCfg := distsql.ServerConfig{
Context: s.Ctx(),
DB: s.db,
RPCContext: s.rpcContext,
}
s.distSQLServer = distsql.NewServer(distSQLCfg)
distsql.RegisterDistSQLServer(s.grpc, s.distSQLServer)
// Set up Executor
execCfg := sql.ExecutorConfig{
Context: s.Ctx(),
DB: s.db,
Gossip: s.gossip,
LeaseManager: s.leaseMgr,
Clock: s.clock,
DistSQLSrv: s.distSQLServer,
}
if srvCtx.TestingKnobs.SQLExecutor != nil {
execCfg.TestingKnobs = srvCtx.TestingKnobs.SQLExecutor.(*sql.ExecutorTestingKnobs)
} else {
execCfg.TestingKnobs = &sql.ExecutorTestingKnobs{}
}
s.sqlExecutor = sql.NewExecutor(execCfg, s.stopper)
s.registry.AddMetricStruct(s.sqlExecutor)
s.pgServer = pgwire.MakeServer(s.ctx.Context, s.sqlExecutor)
s.registry.AddMetricStruct(s.pgServer.Metrics())
示例8: NewServer
// NewServer creates a Server from a server.Context.
func NewServer(ctx Context, stopper *stop.Stopper) (*Server, error) {
if _, err := net.ResolveTCPAddr("tcp", ctx.Addr); err != nil {
return nil, errors.Errorf("unable to resolve RPC address %q: %v", ctx.Addr, err)
}
if ctx.Insecure {
log.Warning(context.TODO(), "running in insecure mode, this is strongly discouraged. See --insecure.")
}
// Try loading the TLS configs before anything else.
if _, err := ctx.GetServerTLSConfig(); err != nil {
return nil, err
}
if _, err := ctx.GetClientTLSConfig(); err != nil {
return nil, err
}
s := &Server{
Tracer: tracing.NewTracer(),
ctx: ctx,
mux: http.NewServeMux(),
clock: hlc.NewClock(hlc.UnixNano),
stopper: stopper,
}
s.clock.SetMaxOffset(ctx.MaxOffset)
s.rpcContext = rpc.NewContext(ctx.Context, s.clock, s.stopper)
s.rpcContext.HeartbeatCB = func() {
if err := s.rpcContext.RemoteClocks.VerifyClockOffset(); err != nil {
log.Fatal(context.TODO(), err)
}
}
s.grpc = rpc.NewServer(s.rpcContext)
s.registry = metric.NewRegistry()
s.gossip = gossip.New(s.rpcContext, s.grpc, s.ctx.GossipBootstrapResolvers, s.stopper, s.registry)
s.storePool = storage.NewStorePool(
s.gossip,
s.clock,
s.rpcContext,
ctx.ReservationsEnabled,
ctx.TimeUntilStoreDead,
s.stopper,
)
// A custom RetryOptions is created which uses stopper.ShouldQuiesce() as
// the Closer. This prevents infinite retry loops from occurring during
// graceful server shutdown
//
// Such a loop loop occurs with the DistSender attempts a connection to the
// local server during shutdown, and receives an internal server error (HTTP
// Code 5xx). This is the correct error for a server to return when it is
// shutting down, and is normally retryable in a cluster environment.
// However, on a single-node setup (such as a test), retries will never
// succeed because the only server has been shut down; thus, thus the
// DistSender needs to know that it should not retry in this situation.
retryOpts := base.DefaultRetryOptions()
retryOpts.Closer = s.stopper.ShouldQuiesce()
s.distSender = kv.NewDistSender(&kv.DistSenderContext{
Clock: s.clock,
RPCContext: s.rpcContext,
RPCRetryOptions: &retryOpts,
}, s.gossip)
txnMetrics := kv.NewTxnMetrics(s.registry)
sender := kv.NewTxnCoordSender(s.distSender, s.clock, ctx.Linearizable, s.Tracer,
s.stopper, txnMetrics)
s.db = client.NewDB(sender)
s.raftTransport = storage.NewRaftTransport(storage.GossipAddressResolver(s.gossip), s.grpc, s.rpcContext)
s.kvDB = kv.NewDBServer(s.ctx.Context, sender, s.stopper)
roachpb.RegisterExternalServer(s.grpc, s.kvDB)
// Set up Lease Manager
var lmKnobs sql.LeaseManagerTestingKnobs
if ctx.TestingKnobs.SQLLeaseManager != nil {
lmKnobs = *ctx.TestingKnobs.SQLLeaseManager.(*sql.LeaseManagerTestingKnobs)
}
s.leaseMgr = sql.NewLeaseManager(0, *s.db, s.clock, lmKnobs, s.stopper)
s.leaseMgr.RefreshLeases(s.stopper, s.db, s.gossip)
// Set up the DistSQL server
distSQLCtx := distsql.ServerContext{
Context: context.Background(),
DB: s.db,
RPCContext: s.rpcContext,
}
s.distSQLServer = distsql.NewServer(distSQLCtx)
distsql.RegisterDistSQLServer(s.grpc, s.distSQLServer)
// Set up Executor
eCtx := sql.ExecutorContext{
Context: context.Background(),
DB: s.db,
Gossip: s.gossip,
LeaseManager: s.leaseMgr,
Clock: s.clock,
DistSQLSrv: s.distSQLServer,
}
if ctx.TestingKnobs.SQLExecutor != nil {
//.........這裏部分代碼省略.........