本文整理匯總了Golang中sync/atomic.StoreInt64函數的典型用法代碼示例。如果您正苦於以下問題:Golang StoreInt64函數的具體用法?Golang StoreInt64怎麽用?Golang StoreInt64使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了StoreInt64函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: commit
func (t *batchTx) commit(stop bool) {
var err error
// commit the last tx
if t.tx != nil {
if t.pending == 0 && !stop {
t.backend.mu.RLock()
defer t.backend.mu.RUnlock()
atomic.StoreInt64(&t.backend.size, t.tx.Size())
return
}
err = t.tx.Commit()
atomic.AddInt64(&t.backend.commits, 1)
t.pending = 0
if err != nil {
log.Fatalf("mvcc: cannot commit tx (%s)", err)
}
}
if stop {
return
}
t.backend.mu.RLock()
defer t.backend.mu.RUnlock()
// begin a new tx
t.tx, err = t.backend.db.Begin(true)
if err != nil {
log.Fatalf("mvcc: cannot begin tx (%s)", err)
}
atomic.StoreInt64(&t.backend.size, t.tx.Size())
}
示例2: SetLimits
// SetLimits modifies the rate limits of an existing
// ThrottledConn. It is safe to call SetLimits while
// other goroutines are calling Read/Write. This function
// will not block, and the new rate limits will be
// applied within Read/Write, but not necessarily until
// some futher I/O at previous rates.
func (conn *ThrottledConn) SetLimits(limits RateLimits) {
// Using atomic instead of mutex to avoid blocking
// this function on throttled I/O in an ongoing
// read or write. Precise synchronized application
// of the rate limit values is not required.
// Negative rates are invalid and -1 is a special
// value to used to signal throttling initialized
// state. Silently normalize negative values to 0.
rate := limits.ReadBytesPerSecond
if rate < 0 {
rate = 0
}
atomic.StoreInt64(&conn.readBytesPerSecond, rate)
atomic.StoreInt64(&conn.readUnthrottledBytes, limits.ReadUnthrottledBytes)
rate = limits.WriteBytesPerSecond
if rate < 0 {
rate = 0
}
atomic.StoreInt64(&conn.writeBytesPerSecond, rate)
atomic.StoreInt64(&conn.writeUnthrottledBytes, limits.WriteUnthrottledBytes)
closeAfterExhausted := int32(0)
if limits.CloseAfterExhausted {
closeAfterExhausted = 1
}
atomic.StoreInt32(&conn.closeAfterExhausted, closeAfterExhausted)
}
示例3: cronPrintCurrentStatus
// cronPrintCurrentStatus logs the regular status check banner
func cronPrintCurrentStatus() {
// Grab server status
stat := common.GetServerStatus()
if stat == (common.ServerStatus{}) {
log.Println("Could not print current status")
return
}
// Regular status banner
log.Printf("status - [goroutines: %d] [memory: %02.3f MB]", stat.NumGoroutine, stat.MemoryMB)
// HTTP stats
if common.Static.Config.HTTP {
log.Printf(" http - [current: %d] [total: %d]", stat.HTTP.Current, stat.HTTP.Total)
// Reset current HTTP counter
atomic.StoreInt64(&common.Static.HTTP.Current, 0)
}
// UDP stats
if common.Static.Config.UDP {
log.Printf(" udp - [current: %d] [total: %d]", stat.UDP.Current, stat.UDP.Total)
// Reset current UDP counter
atomic.StoreInt64(&common.Static.UDP.Current, 0)
}
}
示例4: newSegmentFile
// newSegmentFile will close the current segment file and open a new one, updating bookkeeping info on the log
func (l *WAL) newSegmentFile() error {
l.currentSegmentID++
if l.currentSegmentWriter != nil {
if err := l.currentSegmentWriter.close(); err != nil {
return err
}
atomic.StoreInt64(&l.stats.OldBytes, int64(l.currentSegmentWriter.size))
}
fileName := filepath.Join(l.path, fmt.Sprintf("%s%05d.%s", WALFilePrefix, l.currentSegmentID, WALFileExtension))
fd, err := os.OpenFile(fileName, os.O_CREATE|os.O_RDWR, 0666)
if err != nil {
return err
}
l.currentSegmentWriter = NewWALSegmentWriter(fd)
if stat, err := fd.Stat(); err == nil {
l.lastWriteTime = stat.ModTime()
}
// Reset the current segment size stat
atomic.StoreInt64(&l.stats.CurrentBytes, 0)
return nil
}
示例5: initOffset
func (pc *KafkaPartitionConsumer) initOffset() bool {
log.Infof("Initializing offset for topic %s, partition %d", pc.topic, pc.partition)
for {
offset, err := pc.client.GetOffset(pc.config.Group, pc.topic, pc.partition)
if err != nil {
if err == siesta.ErrUnknownTopicOrPartition {
return pc.resetOffset()
}
log.Warning("Cannot get offset for group %s, topic %s, partition %d: %s\n", pc.config.Group, pc.topic, pc.partition, err)
select {
case <-pc.stop:
{
log.Warning("PartitionConsumer told to stop trying to get offset, returning")
return false
}
default:
}
} else {
validOffset := offset + 1
log.Infof("Initialized offset to %d", validOffset)
atomic.StoreInt64(&pc.offset, validOffset)
atomic.StoreInt64(&pc.highwaterMarkOffset, validOffset)
return true
}
time.Sleep(pc.config.InitOffsetBackoff)
}
}
示例6: stamp
func (d *Datum) stamp(timestamp time.Time) {
if timestamp.IsZero() {
atomic.StoreInt64(&d.Time, time.Now().UTC().UnixNano())
} else {
atomic.StoreInt64(&d.Time, timestamp.UnixNano())
}
}
示例7: HealthCheck
func (c *Client) HealthCheck(url string) (err error) {
r, err := defaults.Client(c.Client).Get(url)
if err != nil {
atomic.StoreInt64(&c.count, 0)
//trace.Error(ctx, "Errors.Fail", err)
log.Println("forensiq:", err)
return
}
body, err := ioutil.ReadAll(r.Body)
if err != nil {
atomic.StoreInt64(&c.count, 0)
//trace.Error(ctx, "Errors.Down", err)
log.Println("forensiq: down")
return
}
if result := string(body); r.StatusCode != http.StatusOK || result[0] != '1' {
atomic.StoreInt64(&c.count, 0)
err = fmt.Errorf("%s: %s", r.Status, result)
//trace.Error(ctx, "Errors.Status", err)
log.Println("forensiq:", err)
return
}
atomic.StoreInt64(&c.count, defaults.Int64(c.FailCount, 10))
return
}
示例8: backoff
func (r *Consumer) backoff() {
if atomic.LoadInt32(&r.stopFlag) == 1 {
atomic.StoreInt64(&r.backoffDuration, 0)
return
}
// pick a random connection to test the waters
conns := r.conns()
if len(conns) == 0 {
// backoff again
backoffDuration := 1 * time.Second
atomic.StoreInt64(&r.backoffDuration, backoffDuration.Nanoseconds())
time.AfterFunc(backoffDuration, r.backoff)
return
}
idx := r.rng.Intn(len(conns))
choice := conns[idx]
r.log(LogLevelWarning,
"(%s) backoff timeout expired, sending RDY 1",
choice.String())
// while in backoff only ever let 1 message at a time through
err := r.updateRDY(choice, 1)
if err != nil {
r.log(LogLevelWarning, "(%s) error updating RDY - %s", choice.String(), err)
backoffDuration := 1 * time.Second
atomic.StoreInt64(&r.backoffDuration, backoffDuration.Nanoseconds())
time.AfterFunc(backoffDuration, r.backoff)
return
}
atomic.StoreInt64(&r.backoffDuration, 0)
}
示例9: Clear
func (s *intSampler) Clear() {
atomic.StoreInt64(&s.count, 0)
atomic.StoreInt64(&s.sum, 0)
for i := range s.cache {
atomic.StoreInt64(&s.cache[i], 0)
}
}
示例10: printLog
func (sp *Speed) printLog(now time.Time) {
now_used := now.Sub(sp.LastTime).Seconds()
sp.LastTime = now
if now_used == 0 || sp.TotalSec == 0 {
return
}
log_format := "total=%d,qps=%d,total_%ds=%d,speed=%.2fMps,total_size=%s,total_suc=%d," + fmt.Sprintf("total_%ds_suc", int64(now_used)) + "=%d"
size_speed := float64(sp.TotalSecSize) / now_used / (1024 * 1024)
TotalSize_str := ""
if sp.TotalSize > gsize {
TotalSize_str = fmt.Sprintf("%.2fG", float64(sp.TotalSize)/float64(gsize))
} else if sp.TotalSize > msize {
TotalSize_str = fmt.Sprintf("%.2fM", float64(sp.TotalSize)/float64(msize))
} else {
TotalSize_str = fmt.Sprintf("%.2fK", float64(sp.TotalSize)/float64(1024))
}
logMsg := fmt.Sprintf(log_format, sp.Total, int64(float64(sp.TotalSec)/now_used), int64(now_used), sp.TotalSec, size_speed, TotalSize_str, sp.TotalSuccess, sp.TotalSecSuccess)
sp.PrintFn(logMsg, sp)
atomic.StoreInt64(&sp.TotalSec, 0)
atomic.StoreInt64(&sp.TotalSecSize, 0)
atomic.StoreInt64(&sp.TotalSecSuccess, 0)
}
示例11: commit
func (t *batchTx) commit(stop bool) {
var err error
// commit the last tx
if t.tx != nil {
if t.pending == 0 && !stop {
t.backend.mu.RLock()
defer t.backend.mu.RUnlock()
atomic.StoreInt64(&t.backend.size, t.tx.Size())
return
}
start := time.Now()
err = t.tx.Commit()
commitDurations.Observe(time.Since(start).Seconds())
atomic.AddInt64(&t.backend.commits, 1)
t.pending = 0
if err != nil {
plog.Fatalf("cannot commit tx (%s)", err)
}
}
if stop {
return
}
t.backend.mu.RLock()
defer t.backend.mu.RUnlock()
// begin a new tx
t.tx, err = t.backend.db.Begin(true)
if err != nil {
plog.Fatalf("cannot begin tx (%s)", err)
}
atomic.StoreInt64(&t.backend.size, t.tx.Size())
}
示例12: midiLoop
func midiLoop(s *portmidi.Stream) {
noteOn := make([]int64, 0, 128)
for e := range s.Listen() {
switch e.Status {
case 144: // note on
on := false
for _, n := range noteOn {
if n == e.Data1 {
on = true
}
}
if !on {
noteOn = append(noteOn, e.Data1)
}
atomic.StoreInt64(&midiNote, e.Data1)
atomic.StoreInt64(&midiGate, 1)
case 128: // note off
for i, n := range noteOn {
if n == e.Data1 {
copy(noteOn[i:], noteOn[i+1:])
noteOn = noteOn[:len(noteOn)-1]
}
}
if len(noteOn) > 0 {
n := noteOn[len(noteOn)-1]
atomic.StoreInt64(&midiNote, n)
} else {
atomic.StoreInt64(&midiGate, 0)
}
}
}
}
示例13: resume
func (r *Consumer) resume() {
if atomic.LoadInt32(&r.stopFlag) == 1 {
atomic.StoreInt64(&r.backoffDuration, 0)
return
}
// pick a random connection to test the waters
conns := r.conns()
if len(conns) == 0 {
r.log(LogLevelWarning, "no connection available to resume")
r.log(LogLevelWarning, "backing off for %.04f seconds", 1)
r.backoff(time.Second)
return
}
idx := r.rng.Intn(len(conns))
choice := conns[idx]
r.log(LogLevelWarning,
"(%s) backoff timeout expired, sending RDY 1",
choice.String())
// while in backoff only ever let 1 message at a time through
err := r.updateRDY(choice, 1)
if err != nil {
r.log(LogLevelWarning, "(%s) error resuming RDY 1 - %s", choice.String(), err)
r.log(LogLevelWarning, "backing off for %.04f seconds", 1)
r.backoff(time.Second)
return
}
atomic.StoreInt64(&r.backoffDuration, 0)
}
示例14: Run
func (t *Task) Run() {
var tag int64
t.wg.Add(1)
defer t.wg.Done()
LOOP:
for {
select {
case <-t.pauseCh:
atomic.StoreInt64(&tag, 1)
case <-t.continueCh:
atomic.StoreInt64(&tag, 0)
case <-t.stopCh:
break LOOP
default:
if atomic.LoadInt64(&tag) == 0 {
t.do(t.index + 1)
t.index++
if t.index >= t.cnt {
atomic.StoreInt64(&t.state, FINISH)
break LOOP
}
} else {
time.Sleep(time.Second * 1)
}
}
}
}
示例15: work
func (w *worker) work(messages chan *Msg) {
for {
select {
case message := <-messages:
atomic.StoreInt64(&w.startedAt, time.Now().UTC().Unix())
w.currentMsg = message
if w.process(message) {
w.manager.confirm <- message
}
atomic.StoreInt64(&w.startedAt, 0)
w.currentMsg = nil
// Attempt to tell fetcher we're finished.
// Can be used when the fetcher has slept due
// to detecting an empty queue to requery the
// queue immediately if we finish work.
select {
case w.manager.fetch.FinishedWork() <- true:
default:
}
case w.manager.fetch.Ready() <- true:
// Signaled to fetcher that we're
// ready to accept a message
case <-w.stop:
w.exit <- true
return
}
}
}