本文整理匯總了Golang中sync.Mutex類的典型用法代碼示例。如果您正苦於以下問題:Golang Mutex類的具體用法?Golang Mutex怎麽用?Golang Mutex使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
在下文中一共展示了Mutex類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: Serve
func Serve(ctx scope.Context, addr string) {
http.Handle("/metrics", prometheus.Handler())
listener, err := net.Listen("tcp", addr)
if err != nil {
ctx.Terminate(err)
}
closed := false
m := sync.Mutex{}
closeListener := func() {
m.Lock()
if !closed {
listener.Close()
closed = true
}
m.Unlock()
}
// Spin off goroutine to watch ctx and close listener if shutdown requested.
go func() {
<-ctx.Done()
closeListener()
}()
if err := http.Serve(listener, nil); err != nil {
fmt.Printf("http[%s]: %s\n", addr, err)
ctx.Terminate(err)
}
closeListener()
ctx.WaitGroup().Done()
}
示例2: Run
// Run runs the query concurrently, and returns the results.
func (q *Query) Run() []interface{} {
rand.Seed(time.Now().UnixNano())
var w sync.WaitGroup
var l sync.Mutex
places := make([]interface{}, len(q.Journey))
for i, r := range q.Journey {
w.Add(1)
go func(types string, i int) {
defer w.Done()
response, err := q.find(types)
if err != nil {
log.Println("Failed to find places:", err)
return
}
if len(response.Results) == 0 {
log.Println("No places found for", types)
return
}
for _, result := range response.Results {
for _, photo := range result.Photos {
photo.URL = "https://maps.googleapis.com/maps/api/place/photo?" +
"maxwidth=1000&photoreference=" + photo.PhotoRef + "&key=" + APIKey
}
}
randI := rand.Intn(len(response.Results))
l.Lock()
places[i] = response.Results[randI]
l.Unlock()
}(r, i)
}
w.Wait() // wait for everything to finish
return places
}
示例3: runPipeline
func (a *apiServer) runPipeline(pipelineInfo *pps.PipelineInfo) error {
ctx, cancel := context.WithCancel(context.Background())
a.lock.Lock()
a.cancelFuncs[*pipelineInfo.Pipeline] = cancel
a.lock.Unlock()
var loopErr error
//TODO this gets really weird with branching... we need to figure out what that looks like.
mostRecentCommit := make(map[pfs.Repo]*pfs.Commit)
var lock sync.Mutex
var wg sync.WaitGroup
for _, inputRepo := range pipelineInfo.InputRepo {
inputRepo := inputRepo
wg.Add(1)
go func() {
defer wg.Done()
var lastCommit *pfs.Commit
listCommitRequest := &pfs.ListCommitRequest{
Repo: inputRepo,
CommitType: pfs.CommitType_COMMIT_TYPE_READ,
From: lastCommit,
Block: true,
}
commitInfos, err := a.pfsAPIClient.ListCommit(ctx, listCommitRequest)
if err != nil && loopErr == nil {
loopErr = err
return
}
for _, commitInfo := range commitInfos.CommitInfo {
lock.Lock()
mostRecentCommit[*inputRepo] = commitInfo.Commit
var commits []*pfs.Commit
for _, commit := range mostRecentCommit {
commits = append(commits, commit)
}
lock.Unlock()
if len(commits) < len(pipelineInfo.InputRepo) {
// we don't yet have a commit for every input repo so there's no way to run the job
continue
}
outParentCommit, err := a.bestParent(pipelineInfo, commitInfo)
if err != nil && loopErr == nil {
loopErr = err
return
}
_, err = a.jobAPIClient.CreateJob(
ctx,
&pps.CreateJobRequest{
Spec: &pps.CreateJobRequest_Pipeline{
Pipeline: pipelineInfo.Pipeline,
},
InputCommit: []*pfs.Commit{commitInfo.Commit},
OutputParent: outParentCommit,
},
)
}
}()
}
wg.Wait()
return loopErr
}
示例4: GetLookupdTopicChannels
// GetLookupdTopicChannels returns a []string containing a union of the channels
// from all the given lookupd for the given topic
func GetLookupdTopicChannels(topic string, lookupdHTTPAddrs []string) ([]string, error) {
success := false
allChannels := make([]string, 0)
var lock sync.Mutex
var wg sync.WaitGroup
for _, addr := range lookupdHTTPAddrs {
wg.Add(1)
endpoint := fmt.Sprintf("http://%s/channels?topic=%s", addr, url.QueryEscape(topic))
log.Printf("LOOKUPD: querying %s", endpoint)
go func(endpoint string) {
data, err := util.ApiRequest(endpoint)
lock.Lock()
defer lock.Unlock()
defer wg.Done()
if err != nil {
log.Printf("ERROR: lookupd %s - %s", endpoint, err.Error())
return
}
success = true
// {"data":{"channels":["test"]}}
channels, _ := data.Get("channels").StringArray()
allChannels = util.StringUnion(allChannels, channels)
}(endpoint)
}
wg.Wait()
sort.Strings(allChannels)
if success == false {
return nil, errors.New("unable to query any lookupd")
}
return allChannels, nil
}
示例5: run
func (cmd *serveEmbedCmd) run(ctx scope.Context, args []string) error {
listener, err := net.Listen("tcp", cmd.addr)
if err != nil {
return err
}
closed := false
m := sync.Mutex{}
closeListener := func() {
m.Lock()
if !closed {
listener.Close()
closed = true
}
m.Unlock()
}
// Spin off goroutine to watch ctx and close listener if shutdown requested.
go func() {
<-ctx.Done()
closeListener()
}()
if err := http.Serve(listener, cmd); err != nil {
fmt.Printf("http[%s]: %s\n", cmd.addr, err)
return err
}
closeListener()
ctx.WaitGroup().Done()
return ctx.Err()
}
示例6: Put
// Put implements the Putter interface.
func (mp *MultiPutter) Put(username string, creds map[string]interface{}) error {
var (
err error
mu sync.Mutex
wg sync.WaitGroup
)
for _, p := range mp.Putters {
wg.Add(1)
go func(p Putter) {
defer wg.Done()
if e := p.Put(username, creds); e != nil {
mu.Lock()
err = multierror.Append(err, e)
mu.Unlock()
}
}(p)
}
wg.Wait()
return err
}
示例7: startCommunicate
func startCommunicate(request *protocol.VMessRequest, dest v2net.Destination, ray core.OutboundRay, firstPacket v2net.Packet) error {
conn, err := net.Dial(dest.Network(), dest.Address().String())
if err != nil {
log.Error("Failed to open %s: %v", dest.String(), err)
if ray != nil {
close(ray.OutboundOutput())
}
return err
}
log.Info("VMessOut: Tunneling request to %s via %s", request.Address.String(), dest.String())
defer conn.Close()
input := ray.OutboundInput()
output := ray.OutboundOutput()
var requestFinish, responseFinish sync.Mutex
requestFinish.Lock()
responseFinish.Lock()
go handleRequest(conn, request, firstPacket, input, &requestFinish)
go handleResponse(conn, request, output, &responseFinish, dest.IsUDP())
requestFinish.Lock()
if tcpConn, ok := conn.(*net.TCPConn); ok {
tcpConn.CloseWrite()
}
responseFinish.Lock()
return nil
}
示例8: cacher
func cacher(regMap map[*regexp.Regexp]string) func(string) string {
var cache = make(map[string]string)
var cacheMu sync.Mutex
return func(ext string) string {
cacheMu.Lock()
defer cacheMu.Unlock()
memoized, ok := cache[ext]
if ok {
return memoized
}
bExt := []byte(ext)
for regEx, mimeType := range regMap {
if regEx != nil && regEx.Match(bExt) {
memoized = mimeType
break
}
}
cache[ext] = memoized
return memoized
}
}
示例9: pqWorker
// generic loop (executed in a goroutine) that periodically wakes up to walk
// the priority queue and call the callback
func (c *Channel) pqWorker(pq *pqueue.PriorityQueue, mutex *sync.Mutex, callback func(item *pqueue.Item)) {
ticker := time.NewTicker(defaultWorkerWait)
for {
select {
case <-ticker.C:
case <-c.exitChan:
goto exit
}
now := time.Now().UnixNano()
for {
mutex.Lock()
item, _ := pq.PeekAndShift(now)
mutex.Unlock()
if item == nil {
break
}
callback(item)
}
}
exit:
log.Printf("CHANNEL(%s): closing ... pqueue worker", c.name)
ticker.Stop()
}
示例10: benchmarkMutexLock
func benchmarkMutexLock(b *testing.B) {
var lock sync.Mutex
for i := 0; i < b.N; i++ {
lock.Lock()
lock.Unlock()
}
}
示例11: memoizeBytes
func memoizeBytes() byteDescription {
cache := map[int64]string{}
suffixes := []string{"B", "KB", "MB", "GB", "TB", "PB"}
maxLen := len(suffixes) - 1
var cacheMu sync.Mutex
return func(b int64) string {
cacheMu.Lock()
defer cacheMu.Unlock()
description, ok := cache[b]
if ok {
return description
}
bf := float64(b)
i := 0
description = ""
for {
if bf/BytesPerKB < 1 || i >= maxLen {
description = fmt.Sprintf("%.2f%s", bf, suffixes[i])
break
}
bf /= BytesPerKB
i += 1
}
cache[b] = description
return description
}
}
示例12: createPodWorkers
func createPodWorkers() (*podWorkers, map[types.UID][]syncPodRecord) {
lock := sync.Mutex{}
processed := make(map[types.UID][]syncPodRecord)
fakeRecorder := &record.FakeRecorder{}
fakeRuntime := &containertest.FakeRuntime{}
fakeCache := containertest.NewFakeCache(fakeRuntime)
podWorkers := newPodWorkers(
func(options syncPodOptions) error {
func() {
lock.Lock()
defer lock.Unlock()
pod := options.pod
processed[pod.UID] = append(processed[pod.UID], syncPodRecord{
name: pod.Name,
updateType: options.updateType,
})
}()
return nil
},
fakeRecorder,
queue.NewBasicWorkQueue(&util.RealClock{}),
time.Second,
time.Second,
fakeCache,
)
return podWorkers, processed
}
示例13: artificialSeed
func artificialSeed(input []string, power int) [][]string {
var result [][]string
if isChainEmpty(input) {
input = randomChain()[:1]
}
var wg sync.WaitGroup
var mtx sync.Mutex
for _, word := range input {
if word == stop {
break
}
for i := 0; i < power; i++ {
wg.Add(1)
go func(word string, i int) {
defer wg.Done()
for _, mutation := range createSeeds(mutateChain(word, randomChain())) {
mtx.Lock()
result = append(result, mutation)
mtx.Unlock()
runtime.Gosched()
}
}(word, i)
}
}
wg.Wait()
/*if config.Debug {
log.Println("artificialSeed(", dump(input)+", "+fmt.Sprint(power)+")="+fmt.Sprint(result))
}*/
return result
}
示例14: goroutineWork
func goroutineWork(timestamps *[]int64, mutex *sync.Mutex, i int64, arrayCursor *int, url, token string) error {
getParam := "?page="
if strings.Contains(url, "?") {
getParam = "&page="
}
pageUrl := url + getParam + strconv.Itoa(int(i))
stargazers, _, err := getStargazers(pageUrl, token)
if err != nil {
return err
}
for _, star := range stargazers {
var t time.Time
t, err = time.Parse(time.RFC3339, star.Timestamp)
if err != nil {
return fmt.Errorf("An error occured while parsing the timestamp: %v", err)
}
timestamp := t.Unix()
mutex.Lock()
(*timestamps)[*arrayCursor] = timestamp
(*arrayCursor) = (*arrayCursor) + 1
mutex.Unlock()
}
return nil
}
示例15: GenMessages
func GenMessages(c *C, prefix, topic string, keys map[string]int) map[string][]*sarama.ProducerMessage {
config := NewConfig()
config.ClientID = "producer"
config.Kafka.SeedPeers = testKafkaPeers
producer, err := SpawnGracefulProducer(config)
c.Assert(err, IsNil)
messages := make(map[string][]*sarama.ProducerMessage)
var wg sync.WaitGroup
var lock sync.Mutex
for key, count := range keys {
for i := 0; i < count; i++ {
key := key
message := fmt.Sprintf("%s:%s:%d", prefix, key, i)
spawn(&wg, func() {
keyEncoder := sarama.StringEncoder(key)
msgEncoder := sarama.StringEncoder(message)
prodMsg, err := producer.Produce(topic, keyEncoder, msgEncoder)
c.Assert(err, IsNil)
log.Infof("*** produced: topic=%s, partition=%d, offset=%d, message=%s",
topic, prodMsg.Partition, prodMsg.Offset, message)
lock.Lock()
messages[key] = append(messages[key], prodMsg)
lock.Unlock()
})
}
}
wg.Wait()
// Sort the produced messages in ascending order of their offsets.
for _, keyMessages := range messages {
sort.Sort(MessageSlice(keyMessages))
}
return messages
}