本文整理匯總了Golang中sync.Mutex.Unlock方法的典型用法代碼示例。如果您正苦於以下問題:Golang Mutex.Unlock方法的具體用法?Golang Mutex.Unlock怎麽用?Golang Mutex.Unlock使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類sync.Mutex
的用法示例。
在下文中一共展示了Mutex.Unlock方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: run
func (cmd *serveEmbedCmd) run(ctx scope.Context, args []string) error {
listener, err := net.Listen("tcp", cmd.addr)
if err != nil {
return err
}
closed := false
m := sync.Mutex{}
closeListener := func() {
m.Lock()
if !closed {
listener.Close()
closed = true
}
m.Unlock()
}
// Spin off goroutine to watch ctx and close listener if shutdown requested.
go func() {
<-ctx.Done()
closeListener()
}()
if err := http.Serve(listener, cmd); err != nil {
fmt.Printf("http[%s]: %s\n", cmd.addr, err)
return err
}
closeListener()
ctx.WaitGroup().Done()
return ctx.Err()
}
示例2: Run
// Run runs the query concurrently, and returns the results.
func (q *Query) Run() []interface{} {
rand.Seed(time.Now().UnixNano())
var w sync.WaitGroup
var l sync.Mutex
places := make([]interface{}, len(q.Journey))
for i, r := range q.Journey {
w.Add(1)
go func(types string, i int) {
defer w.Done()
response, err := q.find(types)
if err != nil {
log.Println("Failed to find places:", err)
return
}
if len(response.Results) == 0 {
log.Println("No places found for", types)
return
}
for _, result := range response.Results {
for _, photo := range result.Photos {
photo.URL = "https://maps.googleapis.com/maps/api/place/photo?" +
"maxwidth=1000&photoreference=" + photo.PhotoRef + "&key=" + APIKey
}
}
randI := rand.Intn(len(response.Results))
l.Lock()
places[i] = response.Results[randI]
l.Unlock()
}(r, i)
}
w.Wait() // wait for everything to finish
return places
}
示例3: GetLookupdTopicChannels
// GetLookupdTopicChannels returns a []string containing a union of the channels
// from all the given lookupd for the given topic
func GetLookupdTopicChannels(topic string, lookupdHTTPAddrs []string) ([]string, error) {
success := false
allChannels := make([]string, 0)
var lock sync.Mutex
var wg sync.WaitGroup
for _, addr := range lookupdHTTPAddrs {
wg.Add(1)
endpoint := fmt.Sprintf("http://%s/channels?topic=%s", addr, url.QueryEscape(topic))
log.Printf("LOOKUPD: querying %s", endpoint)
go func(endpoint string) {
data, err := util.ApiRequest(endpoint)
lock.Lock()
defer lock.Unlock()
defer wg.Done()
if err != nil {
log.Printf("ERROR: lookupd %s - %s", endpoint, err.Error())
return
}
success = true
// {"data":{"channels":["test"]}}
channels, _ := data.Get("channels").StringArray()
allChannels = util.StringUnion(allChannels, channels)
}(endpoint)
}
wg.Wait()
sort.Strings(allChannels)
if success == false {
return nil, errors.New("unable to query any lookupd")
}
return allChannels, nil
}
示例4: Put
// Put implements the Putter interface.
func (mp *MultiPutter) Put(username string, creds map[string]interface{}) error {
var (
err error
mu sync.Mutex
wg sync.WaitGroup
)
for _, p := range mp.Putters {
wg.Add(1)
go func(p Putter) {
defer wg.Done()
if e := p.Put(username, creds); e != nil {
mu.Lock()
err = multierror.Append(err, e)
mu.Unlock()
}
}(p)
}
wg.Wait()
return err
}
示例5: runPipeline
func (a *apiServer) runPipeline(pipelineInfo *pps.PipelineInfo) error {
ctx, cancel := context.WithCancel(context.Background())
a.lock.Lock()
a.cancelFuncs[*pipelineInfo.Pipeline] = cancel
a.lock.Unlock()
var loopErr error
//TODO this gets really weird with branching... we need to figure out what that looks like.
mostRecentCommit := make(map[pfs.Repo]*pfs.Commit)
var lock sync.Mutex
var wg sync.WaitGroup
for _, inputRepo := range pipelineInfo.InputRepo {
inputRepo := inputRepo
wg.Add(1)
go func() {
defer wg.Done()
var lastCommit *pfs.Commit
listCommitRequest := &pfs.ListCommitRequest{
Repo: inputRepo,
CommitType: pfs.CommitType_COMMIT_TYPE_READ,
From: lastCommit,
Block: true,
}
commitInfos, err := a.pfsAPIClient.ListCommit(ctx, listCommitRequest)
if err != nil && loopErr == nil {
loopErr = err
return
}
for _, commitInfo := range commitInfos.CommitInfo {
lock.Lock()
mostRecentCommit[*inputRepo] = commitInfo.Commit
var commits []*pfs.Commit
for _, commit := range mostRecentCommit {
commits = append(commits, commit)
}
lock.Unlock()
if len(commits) < len(pipelineInfo.InputRepo) {
// we don't yet have a commit for every input repo so there's no way to run the job
continue
}
outParentCommit, err := a.bestParent(pipelineInfo, commitInfo)
if err != nil && loopErr == nil {
loopErr = err
return
}
_, err = a.jobAPIClient.CreateJob(
ctx,
&pps.CreateJobRequest{
Spec: &pps.CreateJobRequest_Pipeline{
Pipeline: pipelineInfo.Pipeline,
},
InputCommit: []*pfs.Commit{commitInfo.Commit},
OutputParent: outParentCommit,
},
)
}
}()
}
wg.Wait()
return loopErr
}
示例6: handleResponse
func handleResponse(conn net.Conn, request *protocol.VMessRequest, output chan<- *alloc.Buffer, finish *sync.Mutex, isUDP bool) {
defer finish.Unlock()
defer close(output)
responseKey := md5.Sum(request.RequestKey[:])
responseIV := md5.Sum(request.RequestIV[:])
decryptResponseReader, err := v2io.NewAesDecryptReader(responseKey[:], responseIV[:], conn)
if err != nil {
log.Error("VMessOut: Failed to create decrypt reader: %v", err)
return
}
buffer, err := v2net.ReadFrom(decryptResponseReader, nil)
if err != nil {
log.Error("VMessOut: Failed to read VMess response (%d bytes): %v", buffer.Len(), err)
return
}
if buffer.Len() < 4 || !bytes.Equal(buffer.Value[:4], request.ResponseHeader[:]) {
log.Warning("VMessOut: unexepcted response header. The connection is probably hijacked.")
return
}
log.Info("VMessOut received %d bytes from %s", buffer.Len()-4, conn.RemoteAddr().String())
buffer.SliceFrom(4)
output <- buffer
if !isUDP {
v2net.ReaderToChan(output, decryptResponseReader)
}
return
}
示例7: TestTransportDoubleCloseOnWriteError
// golang.org/issue/13924
// This used to fail after many iterations, especially with -race:
// go test -v -run=TestTransportDoubleCloseOnWriteError -count=500 -race
func TestTransportDoubleCloseOnWriteError(t *testing.T) {
var (
mu sync.Mutex
conn net.Conn // to close if set
)
st := newServerTester(t,
func(w http.ResponseWriter, r *http.Request) {
mu.Lock()
defer mu.Unlock()
if conn != nil {
conn.Close()
}
},
optOnlyServer,
)
defer st.Close()
tr := &Transport{
TLSClientConfig: tlsConfigInsecure,
DialTLS: func(network, addr string, cfg *tls.Config) (net.Conn, error) {
tc, err := tls.Dial(network, addr, cfg)
if err != nil {
return nil, err
}
mu.Lock()
defer mu.Unlock()
conn = tc
return tc, nil
},
}
defer tr.CloseIdleConnections()
c := &http.Client{Transport: tr}
c.Get(st.ts.URL)
}
示例8: memoizeBytes
func memoizeBytes() byteDescription {
cache := map[int64]string{}
suffixes := []string{"B", "KB", "MB", "GB", "TB", "PB"}
maxLen := len(suffixes) - 1
var cacheMu sync.Mutex
return func(b int64) string {
cacheMu.Lock()
defer cacheMu.Unlock()
description, ok := cache[b]
if ok {
return description
}
bf := float64(b)
i := 0
description = ""
for {
if bf/BytesPerKB < 1 || i >= maxLen {
description = fmt.Sprintf("%.2f%s", bf, suffixes[i])
break
}
bf /= BytesPerKB
i += 1
}
cache[b] = description
return description
}
}
示例9: TestPseudoRandomSend
func TestPseudoRandomSend(t *testing.T) {
n := 100
for _, chanCap := range []int{0, n} {
c := make(chan int, chanCap)
l := make([]int, n)
var m sync.Mutex
m.Lock()
go func() {
for i := 0; i < n; i++ {
runtime.Gosched()
l[i] = <-c
}
m.Unlock()
}()
for i := 0; i < n; i++ {
select {
case c <- 1:
case c <- 0:
}
}
m.Lock() // wait
n0 := 0
n1 := 0
for _, i := range l {
n0 += (i + 1) % 2
n1 += i
}
if n0 <= n/10 || n1 <= n/10 {
t.Errorf("Want pseudorandom, got %d zeros and %d ones (chan cap %d)", n0, n1, chanCap)
}
}
}
示例10: getNodesInfo
// Returns the host containers, non-Kubernetes containers, and an error (if any).
func (self *kubeNodeMetrics) getNodesInfo(nodeList *nodes.NodeList, start, end time.Time) ([]api.Container, []api.Container, error) {
var (
lock sync.Mutex
wg sync.WaitGroup
)
hostContainers := make([]api.Container, 0, len(nodeList.Items))
rawContainers := make([]api.Container, 0, len(nodeList.Items))
for host, info := range nodeList.Items {
wg.Add(1)
go func(host nodes.Host, info nodes.Info) {
defer wg.Done()
if hostContainer, containers, err := self.updateStats(host, info, start, end); err == nil {
lock.Lock()
defer lock.Unlock()
if hostContainers != nil {
hostContainers = append(hostContainers, *hostContainer)
}
rawContainers = append(rawContainers, containers...)
}
}(host, info)
}
wg.Wait()
return hostContainers, rawContainers, nil
}
示例11: handleRequest
func (this *VMessOutboundHandler) handleRequest(session *encoding.ClientSession, conn internet.Connection, request *protocol.RequestHeader, payload *alloc.Buffer, input v2io.Reader, finish *sync.Mutex) {
defer finish.Unlock()
writer := v2io.NewBufferedWriter(conn)
defer writer.Release()
session.EncodeRequestHeader(request, writer)
bodyWriter := session.EncodeRequestBody(writer)
var streamWriter v2io.Writer = v2io.NewAdaptiveWriter(bodyWriter)
if request.Option.Has(protocol.RequestOptionChunkStream) {
streamWriter = vmessio.NewAuthChunkWriter(streamWriter)
}
if err := streamWriter.Write(payload); err != nil {
conn.SetReusable(false)
}
writer.SetCached(false)
err := v2io.Pipe(input, streamWriter)
if err != io.EOF {
conn.SetReusable(false)
}
if request.Option.Has(protocol.RequestOptionChunkStream) {
err := streamWriter.Write(alloc.NewSmallBuffer().Clear())
if err != nil {
conn.SetReusable(false)
}
}
streamWriter.Release()
return
}
示例12: diameter
func diameter(digests []string, diffStore diff.DiffStore) int {
// TODO Parallelize.
lock := sync.Mutex{}
max := 0
wg := sync.WaitGroup{}
for {
if len(digests) <= 2 {
break
}
wg.Add(1)
go func(d1 string, d2 []string) {
defer wg.Done()
dms, err := diffStore.Get(d1, d2)
if err != nil {
glog.Errorf("Unable to get diff: %s", err)
return
}
localMax := 0
for _, dm := range dms {
if dm.NumDiffPixels > localMax {
localMax = dm.NumDiffPixels
}
}
lock.Lock()
defer lock.Unlock()
if localMax > max {
max = localMax
}
}(digests[0], digests[1:2])
digests = digests[1:]
}
wg.Wait()
return max
}
示例13: getMastersPosition
func (wr *Wrangler) getMastersPosition(shards []*topo.ShardInfo) (map[*topo.ShardInfo]myproto.ReplicationPosition, error) {
mu := sync.Mutex{}
result := make(map[*topo.ShardInfo]myproto.ReplicationPosition)
wg := sync.WaitGroup{}
rec := concurrency.AllErrorRecorder{}
for _, si := range shards {
wg.Add(1)
go func(si *topo.ShardInfo) {
defer wg.Done()
log.Infof("Gathering master position for %v", si.MasterAlias)
ti, err := wr.ts.GetTablet(si.MasterAlias)
if err != nil {
rec.RecordError(err)
return
}
pos, err := wr.ai.MasterPosition(ti, wr.ActionTimeout())
if err != nil {
rec.RecordError(err)
return
}
log.Infof("Got master position for %v", si.MasterAlias)
mu.Lock()
result[si] = pos
mu.Unlock()
}(si)
}
wg.Wait()
return result, rec.Error()
}
示例14: main
func main() {
flag.Parse()
if *zookeeper == "" {
printUsageErrorAndExit("You have to provide a zookeeper connection string using -zookeeper, or the ZOOKEEPER_PEERS environment variable")
}
conf := kazoo.NewConfig()
conf.Timeout = time.Duration(*zookeeperTimeout) * time.Millisecond
kz, err := kazoo.NewKazooFromConnectionString(*zookeeper, conf)
if err != nil {
printErrorAndExit(69, "Failed to connect to Zookeeper: %v", err)
}
defer func() { _ = kz.Close() }()
topics, err := kz.Topics()
if err != nil {
printErrorAndExit(69, "Failed to get Kafka topics from Zookeeper: %v", err)
}
sort.Sort(topics)
var (
wg sync.WaitGroup
l sync.Mutex
stdout = make([]string, len(topics))
)
for i, topic := range topics {
wg.Add(1)
go func(i int, topic *kazoo.Topic) {
defer wg.Done()
buffer := bytes.NewBuffer(make([]byte, 0))
partitions, err := topic.Partitions()
if err != nil {
printErrorAndExit(69, "Failed to get Kafka topic partitions from Zookeeper: %v", err)
}
fmt.Fprintf(buffer, "Topic: %s\tPartitions: %d\n", topic.Name, len(partitions))
for _, partition := range partitions {
leader, _ := partition.Leader()
isr, _ := partition.ISR()
fmt.Fprintf(buffer, "\tPartition: %d\tReplicas: %v\tLeader: %d\tISR: %v\n", partition.ID, partition.Replicas, leader, isr)
}
l.Lock()
stdout[i] = buffer.String()
l.Unlock()
}(i, topic)
}
wg.Wait()
for _, msg := range stdout {
fmt.Print(msg)
}
}
示例15: pqWorker
// generic loop (executed in a goroutine) that periodically wakes up to walk
// the priority queue and call the callback
func (c *Channel) pqWorker(pq *pqueue.PriorityQueue, mutex *sync.Mutex, callback func(item *pqueue.Item)) {
ticker := time.NewTicker(defaultWorkerWait)
for {
select {
case <-ticker.C:
case <-c.exitChan:
goto exit
}
now := time.Now().UnixNano()
for {
mutex.Lock()
item, _ := pq.PeekAndShift(now)
mutex.Unlock()
if item == nil {
break
}
callback(item)
}
}
exit:
log.Printf("CHANNEL(%s): closing ... pqueue worker", c.name)
ticker.Stop()
}