本文整理汇总了Golang中github.com/cockroachdb/cockroach/pkg/util/syncutil.Mutex.Lock方法的典型用法代码示例。如果您正苦于以下问题:Golang Mutex.Lock方法的具体用法?Golang Mutex.Lock怎么用?Golang Mutex.Lock使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类github.com/cockroachdb/cockroach/pkg/util/syncutil.Mutex
的用法示例。
在下文中一共展示了Mutex.Lock方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: WaitForStores
// WaitForStores waits for all of the store descriptors to be gossiped. Servers
// other than the first "bootstrap" their stores asynchronously, but we'd like
// to wait for all of the stores to be initialized before returning the
// TestCluster.
func (tc *TestCluster) WaitForStores(t testing.TB, g *gossip.Gossip) {
// Register a gossip callback for the store descriptors.
var storesMu syncutil.Mutex
stores := map[roachpb.StoreID]struct{}{}
storesDone := make(chan error)
storesDoneOnce := storesDone
unregister := g.RegisterCallback(gossip.MakePrefixPattern(gossip.KeyStorePrefix),
func(_ string, content roachpb.Value) {
storesMu.Lock()
defer storesMu.Unlock()
if storesDoneOnce == nil {
return
}
var desc roachpb.StoreDescriptor
if err := content.GetProto(&desc); err != nil {
storesDoneOnce <- err
return
}
stores[desc.StoreID] = struct{}{}
if len(stores) == len(tc.Servers) {
close(storesDoneOnce)
storesDoneOnce = nil
}
})
defer unregister()
// Wait for the store descriptors to be gossiped.
for err := range storesDone {
if err != nil {
t.Fatal(err)
}
}
}
示例2: TestStopperRunLimitedAsyncTask
func TestStopperRunLimitedAsyncTask(t *testing.T) {
defer leaktest.AfterTest(t)()
s := stop.NewStopper()
defer s.Stop()
const maxConcurrency = 5
const duration = 10 * time.Millisecond
sem := make(chan struct{}, maxConcurrency)
var mu syncutil.Mutex
concurrency := 0
peakConcurrency := 0
var wg sync.WaitGroup
f := func(_ context.Context) {
mu.Lock()
concurrency++
if concurrency > peakConcurrency {
peakConcurrency = concurrency
}
mu.Unlock()
time.Sleep(duration)
mu.Lock()
concurrency--
mu.Unlock()
wg.Done()
}
for i := 0; i < maxConcurrency*3; i++ {
wg.Add(1)
if err := s.RunLimitedAsyncTask(
context.TODO(), sem, true /* wait */, f,
); err != nil {
t.Fatal(err)
}
}
wg.Wait()
if concurrency != 0 {
t.Fatalf("expected 0 concurrency at end of test but got %d", concurrency)
}
if peakConcurrency != maxConcurrency {
t.Fatalf("expected peak concurrency %d to equal max concurrency %d",
peakConcurrency, maxConcurrency)
}
sem = make(chan struct{}, 1)
sem <- struct{}{}
err := s.RunLimitedAsyncTask(
context.TODO(), sem, false /* wait */, func(_ context.Context) {
},
)
if err != stop.ErrThrottled {
t.Fatalf("expected %v; got %v", stop.ErrThrottled, err)
}
}
示例3: TestCantLeaseDeletedTable
// Test that we fail to lease a table that was marked for deletion.
func TestCantLeaseDeletedTable(testingT *testing.T) {
defer leaktest.AfterTest(testingT)()
var mu syncutil.Mutex
clearSchemaChangers := false
params, _ := createTestServerParams()
params.Knobs = base.TestingKnobs{
SQLSchemaChanger: &csql.SchemaChangerTestingKnobs{
SyncFilter: func(tscc csql.TestingSchemaChangerCollection) {
mu.Lock()
defer mu.Unlock()
if clearSchemaChangers {
tscc.ClearSchemaChangers()
}
},
AsyncExecNotification: asyncSchemaChangerDisabled,
},
}
t := newLeaseTest(testingT, params)
defer t.cleanup()
sql := `
CREATE DATABASE test;
CREATE TABLE test.t(a INT PRIMARY KEY);
`
_, err := t.db.Exec(sql)
if err != nil {
t.Fatal(err)
}
// Block schema changers so that the table we're about to DROP is not actually
// dropped; it will be left in a "deleted" state.
mu.Lock()
clearSchemaChangers = true
mu.Unlock()
// DROP the table
_, err = t.db.Exec(`DROP TABLE test.t`)
if err != nil {
t.Fatal(err)
}
// Make sure we can't get a lease on the descriptor.
tableDesc := sqlbase.GetTableDescriptor(t.kvDB, "test", "t")
// try to acquire at a bogus version to make sure we don't get back a lease we
// already had.
_, err = t.acquire(1, tableDesc.ID, tableDesc.Version+1)
if !testutils.IsError(err, "table is being dropped") {
t.Fatalf("got a different error than expected: %v", err)
}
}
示例4: MakeServer
// MakeServer constructs a Server that tracks active connections, closing them
// when signalled by stopper.
func MakeServer(stopper *stop.Stopper, tlsConfig *tls.Config, handler http.Handler) Server {
var mu syncutil.Mutex
activeConns := make(map[net.Conn]struct{})
server := Server{
Server: &http.Server{
Handler: handler,
TLSConfig: tlsConfig,
ConnState: func(conn net.Conn, state http.ConnState) {
mu.Lock()
switch state {
case http.StateNew:
activeConns[conn] = struct{}{}
case http.StateClosed:
delete(activeConns, conn)
}
mu.Unlock()
},
ErrorLog: httpLogger,
},
}
// net/http.(*Server).Serve/http2.ConfigureServer are not thread safe with
// respect to net/http.(*Server).TLSConfig, so we call it synchronously here.
if err := http2.ConfigureServer(server.Server, nil); err != nil {
log.Fatal(context.TODO(), err)
}
stopper.RunWorker(func() {
<-stopper.ShouldStop()
mu.Lock()
for conn := range activeConns {
conn.Close()
}
mu.Unlock()
})
return server
}
示例5: TestRangeTransferLease
func TestRangeTransferLease(t *testing.T) {
defer leaktest.AfterTest(t)()
cfg := storage.TestStoreConfig(nil)
var filterMu syncutil.Mutex
var filter func(filterArgs storagebase.FilterArgs) *roachpb.Error
cfg.TestingKnobs.TestingCommandFilter =
func(filterArgs storagebase.FilterArgs) *roachpb.Error {
filterMu.Lock()
filterCopy := filter
filterMu.Unlock()
if filterCopy != nil {
return filterCopy(filterArgs)
}
return nil
}
var waitForTransferBlocked atomic.Value
waitForTransferBlocked.Store(false)
transferBlocked := make(chan struct{})
cfg.TestingKnobs.LeaseTransferBlockedOnExtensionEvent = func(
_ roachpb.ReplicaDescriptor) {
if waitForTransferBlocked.Load().(bool) {
transferBlocked <- struct{}{}
waitForTransferBlocked.Store(false)
}
}
mtc := &multiTestContext{}
mtc.storeConfig = &cfg
mtc.Start(t, 2)
defer mtc.Stop()
// First, do a write; we'll use it to determine when the dust has settled.
leftKey := roachpb.Key("a")
incArgs := incrementArgs(leftKey, 1)
if _, pErr := client.SendWrapped(context.Background(), mtc.distSenders[0], &incArgs); pErr != nil {
t.Fatal(pErr)
}
// Get the left range's ID.
rangeID := mtc.stores[0].LookupReplica(roachpb.RKey("a"), nil).RangeID
// Replicate the left range onto node 1.
mtc.replicateRange(rangeID, 1)
replica0 := mtc.stores[0].LookupReplica(roachpb.RKey("a"), nil)
replica1 := mtc.stores[1].LookupReplica(roachpb.RKey("a"), nil)
gArgs := getArgs(leftKey)
replica0Desc, err := replica0.GetReplicaDescriptor()
if err != nil {
t.Fatal(err)
}
// Check that replica0 can serve reads OK.
if _, pErr := client.SendWrappedWith(
context.Background(),
mtc.senders[0],
roachpb.Header{Replica: replica0Desc},
&gArgs,
); pErr != nil {
t.Fatal(pErr)
}
{
// Transferring the lease to ourself should be a no-op.
origLeasePtr, _ := replica0.GetLease()
origLease := *origLeasePtr
if err := replica0.AdminTransferLease(replica0Desc.StoreID); err != nil {
t.Fatal(err)
}
newLeasePtr, _ := replica0.GetLease()
if origLeasePtr != newLeasePtr || origLease != *newLeasePtr {
t.Fatalf("expected %+v, but found %+v", origLeasePtr, newLeasePtr)
}
}
{
// An invalid target should result in an error.
const expected = "unable to find store .* in range"
if err := replica0.AdminTransferLease(1000); !testutils.IsError(err, expected) {
t.Fatalf("expected %s, but found %v", expected, err)
}
}
// Move the lease to store 1.
var newHolderDesc roachpb.ReplicaDescriptor
util.SucceedsSoon(t, func() error {
var err error
newHolderDesc, err = replica1.GetReplicaDescriptor()
return err
})
if err := replica0.AdminTransferLease(newHolderDesc.StoreID); err != nil {
t.Fatal(err)
}
// Check that replica0 doesn't serve reads any more.
replica0Desc, err = replica0.GetReplicaDescriptor()
if err != nil {
t.Fatal(err)
}
_, pErr := client.SendWrappedWith(
context.Background(),
//.........这里部分代码省略.........
示例6: TestTxnCanStillResolveOldName
// Test that a SQL txn that resolved a name can keep resolving that name during
// its lifetime even after the table has been renamed.
// Also tests that the name of a renamed table cannot be reused until everybody
// has stopped using it. Otherwise, we'd have different transactions in the
// systems using a single name for different tables.
// Also tests that the old name cannot be used by node that doesn't have a lease
// on the old version even while the name mapping still exists.
func TestTxnCanStillResolveOldName(t *testing.T) {
defer leaktest.AfterTest(t)()
var lmKnobs LeaseManagerTestingKnobs
// renameUnblocked is used to block the rename schema change until the test
// doesn't need the old name->id mapping to exist anymore.
renameUnblocked := make(chan interface{})
serverParams := base.TestServerArgs{
Knobs: base.TestingKnobs{
SQLSchemaChanger: &SchemaChangerTestingKnobs{
RenameOldNameNotInUseNotification: func() {
<-renameUnblocked
},
},
SQLLeaseManager: &lmKnobs,
}}
var mu syncutil.Mutex
var waitTableID sqlbase.ID
// renamed is used to block until the node cannot get leases with the original
// table name. It will be signaled once the table has been renamed and the update
// about the new name has been processed. Moreover, not only does an update to
// the name needs to have been received, but the version of the descriptor needs to
// have also been incremented in order to guarantee that the node cannot get
// leases using the old name (an update with the new name but the original
// version is ignored by the leasing refresh mechanism).
renamed := make(chan interface{})
lmKnobs.TestingLeasesRefreshedEvent =
func(cfg config.SystemConfig) {
mu.Lock()
defer mu.Unlock()
if waitTableID != 0 {
if isRenamed(waitTableID, "t2", 2, cfg) {
close(renamed)
waitTableID = 0
}
}
}
s, db, kvDB := serverutils.StartServer(t, serverParams)
defer s.Stopper().Stop()
sql := `
CREATE DATABASE test;
CREATE TABLE test.t (a INT PRIMARY KEY);
`
_, err := db.Exec(sql)
if err != nil {
t.Fatal(err)
}
tableDesc := sqlbase.GetTableDescriptor(kvDB, "test", "t")
mu.Lock()
waitTableID = tableDesc.ID
mu.Unlock()
txn, err := db.Begin()
if err != nil {
t.Fatal(err)
}
// Run a command to make the transaction resolves the table name.
if _, err := txn.Exec("SELECT * FROM test.t"); err != nil {
t.Fatal(err)
}
// Concurrently, rename the table.
threadDone := make(chan interface{})
go func() {
// The ALTER will commit and signal the main thread through `renamed`, but
// the schema changer will remain blocked by the lease on the "t" version
// held by the txn started above.
if _, err := db.Exec("ALTER TABLE test.t RENAME TO test.t2"); err != nil {
panic(err)
}
close(threadDone)
}()
// Block until the LeaseManager has processed the gossip update.
<-renamed
// Run another command in the transaction and make sure that we can still
// resolve the table name.
if _, err := txn.Exec("SELECT * FROM test.t"); err != nil {
t.Fatal(err)
}
// Check that the name cannot be reused while somebody still has a lease on
// the old one (the mechanism for ensuring this is that the entry for the old
// name is not deleted from the database until the async schema changer checks
// that there's no more leases on the old version).
if _, err := db.Exec("CREATE TABLE test.t (a INT PRIMARY KEY)"); !testutils.IsError(
err, `relation "t" already exists`) {
t.Fatal(err)
}
//.........这里部分代码省略.........
示例7: TestLeasesOnDeletedTableAreReleasedImmediately
// Test that once a table is marked as deleted, a lease's refcount dropping to 0
// means the lease is released immediately, as opposed to being released only
// when it expires.
func TestLeasesOnDeletedTableAreReleasedImmediately(t *testing.T) {
defer leaktest.AfterTest(t)()
var mu syncutil.Mutex
clearSchemaChangers := false
var waitTableID sqlbase.ID
deleted := make(chan bool)
params, _ := createTestServerParams()
params.Knobs = base.TestingKnobs{
SQLLeaseManager: &csql.LeaseManagerTestingKnobs{
TestingLeasesRefreshedEvent: func(cfg config.SystemConfig) {
mu.Lock()
defer mu.Unlock()
if waitTableID != 0 {
if isDeleted(waitTableID, cfg) {
close(deleted)
waitTableID = 0
}
}
},
},
SQLSchemaChanger: &csql.SchemaChangerTestingKnobs{
SyncFilter: func(tscc csql.TestingSchemaChangerCollection) {
mu.Lock()
defer mu.Unlock()
if clearSchemaChangers {
tscc.ClearSchemaChangers()
}
},
AsyncExecNotification: asyncSchemaChangerDisabled,
},
}
s, db, kvDB := serverutils.StartServer(t, params)
defer s.Stopper().Stop()
sql := `
CREATE DATABASE test;
CREATE TABLE test.t(a INT PRIMARY KEY);
`
_, err := db.Exec(sql)
if err != nil {
t.Fatal(err)
}
tableDesc := sqlbase.GetTableDescriptor(kvDB, "test", "t")
lease1, err := acquire(s.(*server.TestServer), tableDesc.ID, 0)
if err != nil {
t.Fatal(err)
}
lease2, err := acquire(s.(*server.TestServer), tableDesc.ID, 0)
if err != nil {
t.Fatal(err)
}
// Block schema changers so that the table we're about to DROP is not actually
// dropped; it will be left in a "deleted" state.
// Also install a way to wait for the config update to be processed.
mu.Lock()
clearSchemaChangers = true
waitTableID = tableDesc.ID
mu.Unlock()
// DROP the table
_, err = db.Exec(`DROP TABLE test.t`)
if err != nil {
t.Fatal(err)
}
// Block until the LeaseManager has processed the gossip update.
<-deleted
// We should still be able to acquire, because we have an active lease.
lease3, err := acquire(s.(*server.TestServer), tableDesc.ID, 0)
if err != nil {
t.Fatal(err)
}
// Release everything.
if err := s.LeaseManager().(*csql.LeaseManager).Release(lease1); err != nil {
t.Fatal(err)
}
if err := s.LeaseManager().(*csql.LeaseManager).Release(lease2); err != nil {
t.Fatal(err)
}
if err := s.LeaseManager().(*csql.LeaseManager).Release(lease3); err != nil {
t.Fatal(err)
}
// Now we shouldn't be able to acquire any more.
_, err = acquire(s.(*server.TestServer), tableDesc.ID, 0)
if !testutils.IsError(err, "table is being dropped") {
t.Fatalf("got a different error than expected: %v", err)
}
}
示例8: TestStopperRunLimitedAsyncTask
func TestStopperRunLimitedAsyncTask(t *testing.T) {
defer leaktest.AfterTest(t)()
s := stop.NewStopper()
defer s.Stop()
const maxConcurrency = 5
const numTasks = maxConcurrency * 3
sem := make(chan struct{}, maxConcurrency)
taskSignal := make(chan struct{}, maxConcurrency)
var mu syncutil.Mutex
concurrency := 0
peakConcurrency := 0
var wg sync.WaitGroup
f := func(_ context.Context) {
mu.Lock()
concurrency++
if concurrency > peakConcurrency {
peakConcurrency = concurrency
}
mu.Unlock()
<-taskSignal
mu.Lock()
concurrency--
mu.Unlock()
wg.Done()
}
go func() {
// Loop until the desired peak concurrency has been reached.
for {
mu.Lock()
c := concurrency
mu.Unlock()
if c >= maxConcurrency {
break
}
time.Sleep(time.Millisecond)
}
// Then let the rest of the async tasks finish quickly.
for i := 0; i < numTasks; i++ {
taskSignal <- struct{}{}
}
}()
for i := 0; i < numTasks; i++ {
wg.Add(1)
if err := s.RunLimitedAsyncTask(
context.TODO(), sem, true /* wait */, f,
); err != nil {
t.Fatal(err)
}
}
wg.Wait()
if concurrency != 0 {
t.Fatalf("expected 0 concurrency at end of test but got %d", concurrency)
}
if peakConcurrency != maxConcurrency {
t.Fatalf("expected peak concurrency %d to equal max concurrency %d",
peakConcurrency, maxConcurrency)
}
sem = make(chan struct{}, 1)
sem <- struct{}{}
err := s.RunLimitedAsyncTask(
context.TODO(), sem, false /* wait */, func(_ context.Context) {
},
)
if err != stop.ErrThrottled {
t.Fatalf("expected %v; got %v", stop.ErrThrottled, err)
}
}
示例9: testRandomSyntax
// testRandomSyntax performs all of the RSG setup and teardown for common random syntax testing operations. It takes f, a closure where the random expression should be generated and executed. It returns a boolean indicating if the statement executed successfully. This is used to verify that at least 1 success occurs (otherwise it is likely a bad test).
func testRandomSyntax(
t *testing.T, setup func(db *gosql.DB) error, f func(db *gosql.DB, r *rsg.RSG) (success bool),
) {
if *flagRSGTime == 0 {
t.Skip("enable with '-rsg <duration>'")
}
params, _ := createTestServerParams()
params.UseDatabase = "ident"
s, db, _ := serverutils.StartServer(t, params)
defer s.Stopper().Stop()
if setup != nil {
err := setup(db)
if err != nil {
t.Fatal(err)
}
}
y, err := ioutil.ReadFile(filepath.Join("parser", "sql.y"))
if err != nil {
t.Fatal(err)
}
r, err := rsg.NewRSG(timeutil.Now().UnixNano(), string(y))
if err != nil {
t.Fatal(err)
}
// Broadcast channel for all workers.
done := make(chan bool)
var wg sync.WaitGroup
var lock syncutil.Mutex
var total, success int
worker := func() {
defer wg.Done()
for {
select {
case <-done:
return
default:
}
s := f(db, r)
lock.Lock()
total++
if s {
success++
}
lock.Unlock()
}
}
for i := 0; i < *flagRSGGoRoutines; i++ {
go worker()
wg.Add(1)
}
time.Sleep(*flagRSGTime)
close(done)
wg.Wait()
t.Logf("%d executions, %d successful", total, success)
if success == 0 {
t.Fatal("0 successful executions")
}
}
示例10: TestIntentResolution
func TestIntentResolution(t *testing.T) {
defer leaktest.AfterTest(t)()
testCases := []struct {
keys []string
ranges [][2]string
exp []string
}{
// Note that the first key (or, range, if no keys present) determines
// the base key of the Txn. In these examples, it's always the first
// range, so "a"-"s" is local. Any examples added must stick to that
// convention and write the first key into "a"-"s".
{
keys: []string{"a", "x", "b", "c", "s"},
ranges: [][2]string{{"d", "e"}},
exp: []string{"s", "x"},
},
{
keys: []string{"h", "y", "z"},
ranges: [][2]string{{"g", "z"}},
exp: []string{`"s"-"z\x00"`},
},
{
keys: []string{"q", "s"},
ranges: [][2]string{{"a", "w"}, {"b", "x"}, {"t", "u"}},
exp: []string{`"s"-"x"`},
},
{
keys: []string{"q", "s", "y", "v"},
ranges: [][2]string{{"a", "s"}, {"r", "t"}, {"u", "w"}},
exp: []string{`"s"-"t"`, `"u"-"w"`, "y"},
},
}
splitKey := []byte("s")
for i, tc := range testCases {
// Use deterministic randomness to randomly put the writes in separate
// batches or commit them with EndTransaction.
rnd, seed := randutil.NewPseudoRand()
log.Infof(context.Background(), "%d: using intent test seed %d", i, seed)
results := map[string]struct{}{}
func() {
var storeKnobs storage.StoreTestingKnobs
var mu syncutil.Mutex
closer := make(chan struct{}, 2)
var done bool
storeKnobs.TestingCommandFilter =
func(filterArgs storagebase.FilterArgs) *roachpb.Error {
mu.Lock()
defer mu.Unlock()
header := filterArgs.Req.Header()
// Ignore anything outside of the intent key range of "a" - "z"
if header.Key.Compare(roachpb.Key("a")) < 0 || header.Key.Compare(roachpb.Key("z")) > 0 {
return nil
}
var entry string
switch arg := filterArgs.Req.(type) {
case *roachpb.ResolveIntentRequest:
if arg.Status == roachpb.COMMITTED {
entry = string(header.Key)
}
case *roachpb.ResolveIntentRangeRequest:
if arg.Status == roachpb.COMMITTED {
entry = fmt.Sprintf("%s-%s", header.Key, header.EndKey)
}
}
if entry != "" {
log.Infof(context.Background(), "got %s", entry)
results[entry] = struct{}{}
}
if len(results) >= len(tc.exp) && !done {
done = true
close(closer)
}
return nil
}
s, _, kvDB := serverutils.StartServer(t, base.TestServerArgs{
Knobs: base.TestingKnobs{Store: &storeKnobs}})
defer s.Stopper().Stop()
// Split the Range. This should not have any asynchronous intents.
if err := kvDB.AdminSplit(context.TODO(), splitKey); err != nil {
t.Fatal(err)
}
if err := kvDB.Txn(context.TODO(), func(txn *client.Txn) error {
b := txn.NewBatch()
if tc.keys[0] >= string(splitKey) {
t.Fatalf("first key %s must be < split key %s", tc.keys[0], splitKey)
}
for i, key := range tc.keys {
// The first write must not go to batch, it anchors the
// transaction to the correct range.
local := i != 0 && rnd.Intn(2) == 0
log.Infof(context.Background(), "%d: %s: local: %t", i, key, local)
if local {
b.Put(key, "test")
} else if err := txn.Put(key, "test"); err != nil {
//.........这里部分代码省略.........