本文整理汇总了Golang中github.com/toolkits/concurrent/semaphore.NewSemaphore函数的典型用法代码示例。如果您正苦于以下问题:Golang NewSemaphore函数的具体用法?Golang NewSemaphore怎么用?Golang NewSemaphore使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewSemaphore函数的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: startSendTasks
// TODO 添加对发送任务的控制,比如stop等
func startSendTasks() {
cfg := g.Config()
// init semaphore
judgeConcurrent := cfg.Judge.MaxIdle / 2
graphConcurrent := cfg.Graph.MaxIdle / 2
if judgeConcurrent < 1 {
judgeConcurrent = 1
}
if graphConcurrent < 1 {
graphConcurrent = 1
}
semaSendToJudge = nsema.NewSemaphore(judgeConcurrent)
semaSendToGraph = nsema.NewSemaphore(graphConcurrent)
semaSendToGraphMigrating = nsema.NewSemaphore(graphConcurrent)
// init send go-routines
for node, _ := range cfg.Judge.Cluster {
queue := JudgeQueues[node]
go forward2JudgeTask(queue, node, judgeConcurrent)
}
for node, _ := range cfg.Graph.Cluster {
queue := GraphQueues[node]
go forward2GraphTask(queue, node, graphConcurrent)
}
if cfg.Graph.Migrating {
for node, _ := range cfg.Graph.ClusterMigrating {
queue := GraphMigratingQueues[node]
go forward2GraphMigratingTask(queue, node, graphConcurrent)
}
}
}
示例2: SWifMetricToTransfer
func SWifMetricToTransfer() {
log.Println("start SWifMetricToTransfer")
sema := nsema.NewSemaphore(10)
for {
items := IfstatsQueue.PopBackBy(5000)
count := len(items)
if count == 0 {
time.Sleep(DefaultSendTaskSleepInterval)
continue
}
mvsSend := make([]*model.MetricValue, count)
for i := 0; i < count; i++ {
mvsSend[i] = items[i].(*model.MetricValue)
}
// 同步Call + 有限并发 进行发送
sema.Acquire()
go func(mvsend []*model.MetricValue) {
defer sema.Release()
g.SendToTransfer(mvsend)
}(mvsSend)
}
}
示例3: collectDataOnce
func collectDataOnce() int {
keys := config.Keys()
keysLen := len(keys)
// 并发+同步控制
cfg := g.Config().Collector
concurrent := int(cfg.Concurrent)
if concurrent < 1 || concurrent > 50 {
concurrent = 10
}
sema := tsema.NewSemaphore(concurrent)
batch := int(cfg.Batch)
if batch < 100 || batch > 1000 {
batch = 200 //batch不能太小, 否则channel将会很大
}
batchCnt := (keysLen + batch - 1) / batch
rch := make(chan int, batchCnt+1)
i := 0
for i < keysLen {
leftLen := keysLen - i
fetchSize := batch // 每次处理batch个配置
if leftLen < fetchSize {
fetchSize = leftLen
}
fetchKeys := keys[i : i+fetchSize]
// 并发collect数据
sema.Acquire()
go func(keys []string, keySize int) {
defer sema.Release()
size, _ := fetchItemsAndStore(keys, keySize)
rch <- size
}(fetchKeys, fetchSize)
i += fetchSize
}
collectCnt := 0
for i := 0; i < batchCnt; i++ {
select {
case cnt := <-rch:
collectCnt += cnt
}
}
return collectCnt
}
示例4: forward2JudgeTask
// Judge定时任务, 将 Judge发送缓存中的数据 通过rpc连接池 发送到Judge
func forward2JudgeTask(Q *list.SafeLinkedListLimited, node string, concurrent int) {
batch := g.Config().Judge.Batch // 一次发送,最多batch条数据
addr := g.Config().Judge.Cluster[node]
sema := nsema.NewSemaphore(concurrent)
for {
items := Q.PopBack(batch)
count := len(items)
if count == 0 {
time.Sleep(DefaultSendTaskSleepInterval)
continue
}
judgeItems := make([]*cmodel.JudgeItem, count)
for i := 0; i < count; i++ {
judgeItems[i] = items[i].(*cmodel.JudgeItem)
}
// 同步Call + 有限并发 进行发送
sema.Acquire()
go func(addr string, judgeItems []*cmodel.JudgeItem, count int) {
defer sema.Release()
resp := &cmodel.SimpleRpcResponse{}
var err error
sendOk := false
for i := 0; i < 3; i++ { //最多重试3次
err = JudgeConnPools.Call(addr, "Judge.Send", judgeItems, resp)
if err == nil {
sendOk = true
break
}
time.Sleep(time.Millisecond * 10)
}
if !sendOk {
log.Printf("send judge %s fail: %v", addr, err)
// statistics
proc.SendToJudgeFailCnt.IncrBy(int64(count))
} else {
// statistics
proc.SendToJudgeCnt.IncrBy(int64(count))
}
}(addr, judgeItems, count)
}
}
示例5: forward2GraphMigratingTask
// Graph定时任务, 进行数据迁移时的 数据冗余发送
func forward2GraphMigratingTask(Q *list.SafeLinkedListLimited, node string, concurrent int) {
batch := g.Config().Graph.Batch // 一次发送,最多batch条数据
addr := g.Config().Graph.ClusterMigrating[node]
sema := nsema.NewSemaphore(concurrent)
for {
items := Q.PopBack(batch)
count := len(items)
if count == 0 {
time.Sleep(DefaultSendTaskSleepInterval)
continue
}
graphItems := make([]*cmodel.GraphItem, count)
for i := 0; i < count; i++ {
graphItems[i] = items[i].(*cmodel.GraphItem)
}
sema.Acquire()
go func(addr string, graphItems []*cmodel.GraphItem, count int) {
defer sema.Release()
resp := &cmodel.SimpleRpcResponse{}
var err error
sendOk := false
for i := 0; i < 3; i++ { //最多重试3次
err = GraphMigratingConnPools.Call(addr, "Graph.Send", graphItems, resp)
if err == nil {
sendOk = true
break
}
time.Sleep(time.Millisecond * 10) //发送失败了,睡10ms
}
if !sendOk {
log.Printf("send to graph migrating %s fail: %v", addr, err)
// statistics
proc.SendToGraphMigratingFailCnt.IncrBy(int64(count))
} else {
// statistics
proc.SendToGraphMigratingCnt.IncrBy(int64(count))
}
}(addr, graphItems, count)
}
}
示例6: forward2TsdbTask
// Tsdb定时任务, 将数据通过api发送到tsdb
func forward2TsdbTask(concurrent int) {
batch := g.Config().Tsdb.Batch // 一次发送,最多batch条数据
retry := g.Config().Tsdb.MaxRetry
sema := nsema.NewSemaphore(concurrent)
for {
items := TsdbQueue.PopBackBy(batch)
if len(items) == 0 {
time.Sleep(DefaultSendTaskSleepInterval)
continue
}
// 同步Call + 有限并发 进行发送
sema.Acquire()
go func(itemList []interface{}) {
defer sema.Release()
var tsdbBuffer bytes.Buffer
for i := 0; i < len(itemList); i++ {
tsdbItem := itemList[i].(*cmodel.TsdbItem)
tsdbBuffer.WriteString(tsdbItem.TsdbString())
tsdbBuffer.WriteString("\n")
}
var err error
for i := 0; i < retry; i++ {
err = TsdbConnPoolHelper.Send(tsdbBuffer.Bytes())
if err == nil {
proc.SendToTsdbCnt.IncrBy(int64(len(itemList)))
break
}
time.Sleep(100 * time.Millisecond)
}
if err != nil {
proc.SendToTsdbFailCnt.IncrBy(int64(len(itemList)))
log.Println(err)
return
}
}(items)
}
}
示例7: forward2InfluxdbTask
// Tsdb定时任务, 将 Tsdb发送缓存中的数据 通过api连接池 发送到Tsdb
// 单个Cluster配置多个influxdb地址,修改为并发发送。
func forward2InfluxdbTask(Q *list.SafeListLimited, node string, concurrent int) {
cfg := g.Config()
batch := cfg.Influxdb.Batch // 一次发送,最多batch条数据
sema := nsema.NewSemaphore(concurrent * len(cfg.Influxdb.Cluster2[node].Addrs))
retry := cfg.Influxdb.MaxRetry
for {
items := Q.PopBackBy(batch)
count := len(items)
if count == 0 {
time.Sleep(DefaultSendTaskSleepInterval)
continue
}
pts := make([]*client.Point, count)
for i := 0; i < count; i++ {
pts[i] = items[i].(*client.Point)
}
for _, addr := range cfg.Influxdb.Cluster2[node].Addrs {
sema.Acquire()
go coreSend2Influxdb(addr, sema, retry, pts)
}
}
}
示例8: Start
"net/http"
"time"
cmodel "github.com/open-falcon/common/model"
tsema "github.com/toolkits/concurrent/semaphore"
"github.com/toolkits/container/nmap"
thttpclient "github.com/toolkits/http/httpclient"
ttime "github.com/toolkits/time"
"github.com/open-falcon/nodata/config"
"github.com/open-falcon/nodata/g"
)
var (
MockMap = nmap.NewSafeMap()
sema = tsema.NewSemaphore(1)
)
func Start() {
if !g.Config().Sender.Enabled {
log.Println("sender.Start warning, not enabled")
return
}
startGaussCron()
log.Println("sender.Start ok")
}
func AddMock(key string, endpoint string, metric string, tags string, ts int64, dstype string, step int64, value interface{}) {
item := &cmodel.JsonMetaData{metric, endpoint, ts, step, value, dstype, tags}
MockMap.Put(key, item)
}
示例9: UpdateIndexOne
cutils "github.com/open-falcon/common/utils"
"github.com/open-falcon/graph/g"
proc "github.com/open-falcon/graph/proc"
nsema "github.com/toolkits/concurrent/semaphore"
ntime "github.com/toolkits/time"
"log"
"time"
)
const (
DefaultUpdateStepInSec = 2 * 24 * 3600 //更新步长,一定不能大于删除步长. 两天内的数据,都可以用来建立索引
ConcurrentOfUpdateIndexAll = 1
)
var (
semaIndexUpdateAllTask = nsema.NewSemaphore(ConcurrentOfUpdateIndexAll) //全量同步任务 并发控制器
semaIndexUpdateAll = nsema.NewSemaphore(4) // 索引全量更新时的mysql操作并发控制
)
// 更新一条监控数据对应的索引. 用于手动添加索引,一般情况下不会使用
func UpdateIndexOne(endpoint string, metric string, tags map[string]string, dstype string, step int) error {
log.Println("1")
itemDemo := &cmodel.GraphItem{
Endpoint: endpoint,
Metric: metric,
Tags: tags,
DsType: dstype,
Step: step,
}
md5 := itemDemo.Checksum()
uuid := itemDemo.UUID()
示例10: StartIndexUpdateIncrTask
nsema "github.com/toolkits/concurrent/semaphore"
ntime "github.com/toolkits/time"
cmodel "github.com/open-falcon/common/model"
cutils "github.com/open-falcon/common/utils"
"github.com/open-falcon/graph/g"
proc "github.com/open-falcon/graph/proc"
)
const (
IndexUpdateIncrTaskSleepInterval = time.Duration(1) * time.Second // 增量更新间隔时间, 默认30s
)
var (
semaUpdateIndexIncr = nsema.NewSemaphore(2) // 索引增量更新时操作mysql的并发控制
)
// 启动索引的 异步、增量更新 任务
func StartIndexUpdateIncrTask() {
for {
time.Sleep(IndexUpdateIncrTaskSleepInterval)
startTs := time.Now().Unix()
cnt := updateIndexIncr()
endTs := time.Now().Unix()
// statistics
proc.IndexUpdateIncrCnt.SetCnt(int64(cnt))
proc.IndexUpdateIncr.Incr()
proc.IndexUpdateIncr.PutOther("lastStartTs", ntime.FormatTs(startTs))
proc.IndexUpdateIncr.PutOther("lastTimeConsumingInSec", endTs-startTs)
}
示例11: StartIndexDeleteTask
import (
Mdb "github.com/open-falcon/common/db"
"github.com/open-falcon/task/proc"
TSemaphore "github.com/toolkits/concurrent/semaphore"
cron "github.com/toolkits/cron"
"log"
"time"
)
const (
indexDeleteCronSpec = "0 0 2 ? * 6" // 每周6晚上22:00执行一次
deteleStepInSec = 7 * 24 * 3600 // 索引的最大生存周期, sec
)
var (
semaIndexDelete = TSemaphore.NewSemaphore(1)
indexDeleteCron = cron.New()
)
// 启动 索引全量更新 定时任务
func StartIndexDeleteTask() {
indexDeleteCron.AddFunc(indexDeleteCronSpec, func() {
DeleteIndex()
})
indexDeleteCron.Start()
}
func StopIndexDeleteTask() {
indexDeleteCron.Stop()
}
示例12: Start
nsema "github.com/toolkits/concurrent/semaphore"
nmap "github.com/toolkits/container/nmap"
ncron "github.com/toolkits/cron"
nhttpclient "github.com/toolkits/http/httpclient"
ntime "github.com/toolkits/time"
"io/ioutil"
"log"
"net/http"
"strings"
"sync"
"time"
)
var (
monitorCron = ncron.New()
sema = nsema.NewSemaphore(1)
statusCache = nmap.NewSafeMap()
alarmCache = nmap.NewSafeMap()
cronSpec = "30 * * * * ?"
)
func Start() {
if !g.Config().Monitor.Enabled {
log.Println("monitor.Start, not enable")
return
}
//
monitorCron.AddFunc(cronSpec, func() {
monitor()
})
monitorCron.Start()
示例13: forward2TransferTask
func forward2TransferTask(Q *nlist.SafeListLimited, concurrent int32) {
cfg := g.Config()
batch := int(cfg.Transfer.Batch)
maxConns := int64(cfg.Transfer.MaxConns)
retry := int(cfg.Transfer.Retry)
if retry < 1 {
retry = 1
}
sema := nsema.NewSemaphore(int(concurrent))
transNum := len(TransferHostnames)
for {
items := Q.PopBackBy(batch)
count := len(items)
if count == 0 {
time.Sleep(time.Millisecond * 50)
continue
}
transItems := make([]*cmodel.MetricValue, count)
for i := 0; i < count; i++ {
transItems[i] = convert(items[i].(*cmodel.MetaData))
}
sema.Acquire()
go func(transItems []*cmodel.MetricValue, count int) {
defer sema.Release()
var err error
// 随机遍历transfer列表,直到数据发送成功 或者 遍历完;随机遍历,可以缓解慢transfer
resp := &g.TransferResp{}
sendOk := false
for j := 0; j < retry && !sendOk; j++ {
rint := rand.Int()
for i := 0; i < transNum && !sendOk; i++ {
idx := (i + rint) % transNum
host := TransferHostnames[idx]
addr := TransferMap[host]
// 过滤掉建连缓慢的host, 否则会严重影响发送速率
cc := pfc.GetCounterCount(host)
if cc >= maxConns {
continue
}
pfc.Counter(host, 1)
err = SenderConnPools.Call(addr, "Transfer.Update", transItems, resp)
pfc.Counter(host, -1)
if err == nil {
sendOk = true
// statistics
TransferSendCnt[host].IncrBy(int64(count))
} else {
// statistics
TransferSendFailCnt[host].IncrBy(int64(count))
}
}
}
// statistics
if !sendOk {
if cfg.Debug {
log.Printf("send to transfer fail, connpool:%v", SenderConnPools.Proc())
}
pfc.Meter("SendFail", int64(count))
} else {
pfc.Meter("Send", int64(count))
}
}(transItems, count)
}
}
示例14: Start
"github.com/open-falcon/task/g"
"github.com/open-falcon/task/proc"
sema "github.com/toolkits/concurrent/semaphore"
cron "github.com/toolkits/cron"
nhttpclient "github.com/toolkits/http/httpclient"
"io/ioutil"
"log"
"net/http"
"strings"
"time"
)
var (
collectorCron = cron.New()
collectorCronSpec = "0 * * * * ?"
collectorSema = sema.NewSemaphore(1)
srcUrlFmt = "http://%s/statistics/all"
destUrl = "http://127.0.0.1:1988/v1/push"
)
func Start() {
if !g.Config().Collector.Enabled {
log.Println("collector.Start, not enable")
return
}
// init url
if g.Config().Collector.DestUrl != "" {
destUrl = g.Config().Collector.DestUrl
}
if g.Config().Collector.SrcUrlFmt != "" {