本文整理匯總了Golang中sync.Locker.Unlock方法的典型用法代碼示例。如果您正苦於以下問題:Golang Locker.Unlock方法的具體用法?Golang Locker.Unlock怎麽用?Golang Locker.Unlock使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類sync.Locker
的用法示例。
在下文中一共展示了Locker.Unlock方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: TestRLocker
func TestRLocker(t *testing.T) {
var wl RWMutex
var rl sync.Locker
wlocked := make(chan bool, 1)
rlocked := make(chan bool, 1)
rl = wl.RLocker()
n := 10
go func() {
for i := 0; i < n; i++ {
rl.Lock()
rl.Lock()
rlocked <- true
wl.Lock()
wlocked <- true
}
}()
for i := 0; i < n; i++ {
<-rlocked
rl.Unlock()
select {
case <-wlocked:
t.Fatal("RLocker() didn't read-lock it")
default:
}
rl.Unlock()
<-wlocked
select {
case <-rlocked:
t.Fatal("RLocker() didn't respect the write lock")
default:
}
wl.Unlock()
}
}
示例2: Watch
func (d *deadlockDetector) Watch(name string, mut sync.Locker) {
d.lockers[name] = mut
go func() {
for {
time.Sleep(d.timeout / 4)
ok := make(chan bool, 2)
go func() {
mut.Lock()
_ = 1 // empty critical section
mut.Unlock()
ok <- true
}()
go func() {
time.Sleep(d.timeout)
ok <- false
}()
if r := <-ok; !r {
msg := fmt.Sprintf("deadlock detected at %s", name)
for otherName, otherMut := range d.lockers {
if otherHolder, ok := otherMut.(Holdable); ok {
msg += "\n===" + otherName + "===\n" + otherHolder.Holders()
}
}
panic(msg)
}
}
}()
}
示例3: Watch
func (d *deadlockDetector) Watch(name string, mut sync.Locker) {
d.lockers[name] = mut
go func() {
for {
time.Sleep(d.timeout / 4)
ok := make(chan bool, 2)
go func() {
mut.Lock()
mut.Unlock()
ok <- true
}()
go func() {
time.Sleep(d.timeout)
ok <- false
}()
if r := <-ok; !r {
msg := fmt.Sprintf("deadlock detected at %s", name)
for otherName, otherMut := range d.lockers {
if otherHolder, ok := otherMut.(Holder); ok {
holder, goid := otherHolder.Holder()
msg += fmt.Sprintf("\n %s = current holder: %s at routine %d", otherName, holder, goid)
}
}
panic(msg)
}
}
}()
}
示例4: ChromeProcessesCleaner
// Running benchmarks in parallel leads to multiple chrome instances coming up
// at the same time, when there are crashes chrome processes stick around which
// can severely impact the machine's performance. To stop this from
// happening chrome zombie processes are periodically killed.
func ChromeProcessesCleaner(locker sync.Locker, chromeCleanerTimer time.Duration) {
for _ = range time.Tick(chromeCleanerTimer) {
glog.Info("The chromeProcessesCleaner goroutine has started")
glog.Info("Waiting for all existing tasks to complete before killing zombie chrome processes")
locker.Lock()
util.LogErr(ExecuteCmd("pkill", []string{"-9", "chrome"}, []string{}, PKILL_TIMEOUT, nil, nil))
locker.Unlock()
}
}
示例5: GetE
func (l *LockedOrca) GetE(req common.GetRequest) error {
// Lock for each read key, complete the read, and then move on.
// The last key sent through should have a noop at the end to complete the
// whole interaction between the client and this server.
var ret error
var lock sync.Locker
// guarantee that an operation that failed with a panic will unlock its lock
defer func() {
if r := recover(); r != nil {
if lock != nil {
lock.Unlock()
}
panic(r)
}
}()
for idx, key := range req.Keys {
// Acquire read lock (true == read)
lock = l.getlock(key, true)
lock.Lock()
// The last request will have these set to complete the interaction
noopOpaque := uint32(0)
noopEnd := false
if idx == len(req.Keys)-1 {
noopOpaque = req.NoopOpaque
noopEnd = req.NoopEnd
}
subreq := common.GetRequest{
Keys: [][]byte{key},
Opaques: []uint32{req.Opaques[idx]},
Quiet: []bool{req.Quiet[idx]},
NoopOpaque: noopOpaque,
NoopEnd: noopEnd,
}
// Make the actual request
ret = l.wrapped.GetE(subreq)
// release read lock
lock.Unlock()
// Bail out early if there was an error (misses are not errors in this sense)
// This will probably end up breaking the connection anyway, so no worries
// about leaving the gets half-done.
if ret != nil {
break
}
}
return ret
}
示例6: sleepWhile
func sleepWhile(l sync.Locker, cond func() bool) {
for {
l.Lock()
val := cond()
l.Unlock()
if !val {
break
}
time.Sleep(time.Millisecond)
}
}
示例7: WaitEvents
func WaitEvents(l sync.Locker, evs ...*Event) {
cases := make([]reflect.SelectCase, 0, len(evs))
for _, ev := range evs {
cases = append(cases, reflect.SelectCase{
Dir: reflect.SelectRecv,
Chan: reflect.ValueOf(ev.C()),
})
}
l.Unlock()
reflect.Select(cases)
l.Lock()
}
示例8: process
// run kernel on inputs, produce outputs
func (b *Block) process() Interrupt {
b.Monitor <- MonitorMessage{
BI_KERNEL,
nil,
}
if b.state.Processed == true {
return nil
}
// block until connected to source if necessary
if b.sourceType != NONE && b.routing.Source == nil {
select {
case f := <-b.routing.InterruptChan:
return f
}
}
// we should only be able to get here if
// - we don't need an shared state
// - we have an external shared state and it has been attached
// if we have a store, lock it
var store sync.Locker
var ok bool
if store, ok = b.routing.Source.(sync.Locker); ok {
store.Lock()
}
// run the kernel
interrupt := b.kernel(b.state.inputValues,
b.state.outputValues,
b.state.internalValues,
b.routing.Source,
b.routing.InterruptChan)
// unlock the store if necessary
if store != nil {
store.Unlock()
}
// if an interrupt was receieved, return it
if interrupt != nil {
return interrupt
}
b.state.Processed = true
return nil
}
示例9: RunHostBenchmark
func RunHostBenchmark(
ctx *common.Context,
inputManager *common.InputManager,
sandbox Sandbox,
ioLock sync.Locker,
) (BenchmarkResults, error) {
ioLock.Lock()
defer ioLock.Unlock()
ctx.Log.Info("Running benchmark")
benchmarkResults := make(BenchmarkResults)
for idx, benchmarkCase := range cases {
input, err := inputManager.Add(
benchmarkCase.hash,
NewRunnerTarInputFactory(
&ctx.Config,
benchmarkCase.hash,
&benchmarkCase,
),
)
if err != nil {
return nil, err
}
defer input.Release(input)
run := common.Run{
AttemptID: uint64(idx),
Source: benchmarkCase.source,
Language: benchmarkCase.language,
InputHash: benchmarkCase.hash,
MaxScore: 1.0,
Debug: false,
}
results, err := Grade(ctx, nil, &run, input, sandbox)
if err != nil {
return nil, err
}
benchmarkResults[benchmarkCase.name] = BenchmarkResult{
Time: results.Time,
WallTime: results.WallTime,
Memory: results.Memory,
}
}
return benchmarkResults, nil
}
示例10: heartbeatStart
func heartbeatStart(job *Job, done chan bool, heartbeat int, l sync.Locker) {
tick := time.NewTicker(time.Duration(heartbeat) * time.Duration(time.Second))
for {
select {
case <-done:
tick.Stop()
return
case <-tick.C:
l.Lock()
success, err := job.HeartbeatWithNoData()
l.Unlock()
if err != nil {
log.Printf("failed HeartbeatWithNoData jid:%v, queue:%v, success:%v, error:%v",
job.Jid, job.Queue, success, err)
} else {
log.Printf("warning, slow, HeartbeatWithNoData jid:%v, queue:%v, success:%v",
job.Jid, job.Queue, success)
}
}
}
}
示例11: TestTryMutexLocker
func TestTryMutexLocker(t *testing.T) {
var mu syncx.TryMutex
var l sync.Locker = &mu
l.Lock()
ch := make(chan struct{})
go func() {
l.Lock()
ch <- struct{}{}
}()
runtime.Gosched()
if mu.TryLock() {
t.Fatal("mu should be locked")
}
l.Unlock()
<-ch
l.Unlock()
if !mu.TryLock() {
t.Fatal("mu should be unlocked")
}
}
示例12: deadlockDetect
func deadlockDetect(mut sync.Locker, timeout time.Duration) {
go func() {
for {
time.Sleep(timeout / 4)
ok := make(chan bool, 2)
go func() {
mut.Lock()
mut.Unlock()
ok <- true
}()
go func() {
time.Sleep(timeout)
ok <- false
}()
if r := <-ok; !r {
panic("deadlock detected")
}
}
}()
}
示例13: PreloadInputs
// PreloadInputs reads all files in path, runs them through the specified
// filter, and tries to add them into the InputManager. PreloadInputs acquires
// the ioLock just before doing I/O in order to guarantee that the system will
// not be doing expensive I/O operations in the middle of a
// performance-sensitive operation (like running contestants' code).
func (mgr *InputManager) PreloadInputs(
rootdir string,
factory CachedInputFactory,
ioLock sync.Locker,
) error {
// Since all the filenames in the cache directory are (or contain) the hash,
// it is useful to introduce 256 intermediate directories with the first two
// nibbles of the hash to avoid the cache directory entry to grow too large
// and become inefficient.
for i := 0; i < 256; i++ {
dirname := path.Join(rootdir, fmt.Sprintf("%02x", i))
contents, err := ioutil.ReadDir(dirname)
if err != nil {
continue
}
for _, info := range contents {
hash, ok := factory.GetInputHash(dirname, info)
if !ok {
continue
}
// Make sure no other I/O is being made while we pre-fetch this input.
ioLock.Lock()
input, err := mgr.Add(hash, factory)
if err != nil {
os.RemoveAll(path.Join(dirname, info.Name()))
mgr.ctx.Log.Error("Cached input corrupted", "hash", hash)
} else {
input.Release(input)
}
ioLock.Unlock()
}
}
mgr.ctx.Log.Info("Finished preloading cached inputs",
"cache_size", mgr.Size())
return nil
}
示例14: With
func With(mu sync.Locker, f func()) {
mu.Lock()
defer mu.Unlock()
f()
}
示例15: RunProc
// RunProc runs event handling loop on component ports.
// It returns true on success or panics with error message and returns false on error.
func RunProc(c interface{}) bool {
// Check if passed interface is a valid pointer to struct
name := reflect.TypeOf(c)
v := reflect.ValueOf(c)
if v.Kind() != reflect.Ptr || v.IsNil() {
panic("Argument of flow.Run() is not a valid pointer")
return false
}
vp := v
v = v.Elem()
if v.Kind() != reflect.Struct {
panic("Argument of flow.Run() is not a valid pointer to structure. Got type: " + vp.Type().Name())
return false
}
t := v.Type()
// Get internal state lock if available
hasLock := false
var locker sync.Locker
if lockField := v.FieldByName("StateLock"); lockField.IsValid() && lockField.Elem().IsValid() {
locker, hasLock = lockField.Interface().(sync.Locker)
}
// Call user init function if exists
if initable, ok := c.(Initializable); ok {
initable.Init()
}
// A group to wait for all inputs to be closed
inputsClose := new(sync.WaitGroup)
// A group to wait for all recv handlers to finish
handlersDone := new(sync.WaitGroup)
// Get the embedded flow.Component
vCom := v.FieldByName("Component")
isComponent := vCom.IsValid() && vCom.Type().Name() == "Component"
if !isComponent {
panic("Argument of flow.Run() is not a flow.Component")
}
// Get the component mode
componentMode := DefaultComponentMode
var poolSize uint8 = 0
if vComMode := vCom.FieldByName("Mode"); vComMode.IsValid() {
componentMode = int(vComMode.Int())
}
if vComPoolSize := vCom.FieldByName("PoolSize"); vComPoolSize.IsValid() {
poolSize = uint8(vComPoolSize.Uint())
}
// Create a slice of select cases and port handlers
cases := make([]reflect.SelectCase, 0, t.NumField())
handlers := make([]portHandler, 0, t.NumField())
// Make and listen on termination channel
vCom.FieldByName("Term").Set(reflect.MakeChan(vCom.FieldByName("Term").Type(), 0))
cases = append(cases, reflect.SelectCase{Dir: reflect.SelectRecv, Chan: vCom.FieldByName("Term")})
handlers = append(handlers, portHandler{})
// Detect active components
looper, isLooper := c.(Looper)
// Iterate over struct fields and bind handlers
inputCount := 0
for i := 0; i < t.NumField(); i++ {
fv := v.Field(i)
ff := t.Field(i)
ft := fv.Type()
// Detect control channels
if fv.IsValid() && fv.Kind() == reflect.Chan && !fv.IsNil() && (ft.ChanDir()&reflect.RecvDir) != 0 {
// Bind handlers for an input channel
cases = append(cases, reflect.SelectCase{Dir: reflect.SelectRecv, Chan: fv})
h := portHandler{onRecv: vp.MethodByName("On" + ff.Name), onClose: vp.MethodByName("On" + ff.Name + "Close")}
handlers = append(handlers, h)
if h.onClose.IsValid() || h.onRecv.IsValid() {
// Add the input to the wait group
inputsClose.Add(1)
inputCount++
}
}
}
if inputCount == 0 && !isLooper {
panic(fmt.Sprintf("Components with no input ports are not supported:%s", name))
}
// Prepare handler closures
recvHandler := func(onRecv, value reflect.Value) {
if hasLock {
locker.Lock()
}
valArr := [1]reflect.Value{value}
onRecv.Call(valArr[:])
if hasLock {
locker.Unlock()
//.........這裏部分代碼省略.........