本文整理汇总了Golang中Time.Nanoseconds函数的典型用法代码示例。如果您正苦于以下问题:Golang Nanoseconds函数的具体用法?Golang Nanoseconds怎么用?Golang Nanoseconds使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了Nanoseconds函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: time_exec
func time_exec(num_nodes int, num_msgs int) {
before := time.Nanoseconds()
run(num_nodes, num_msgs)
after := time.Nanoseconds()
elapsed := after - before
fmt.Println("Took:", elapsed)
}
示例2: LockOrTimeout
// LockOrTimeout proceeds as Lock, except that it returns an os.EAGAIN
// error, if a lock cannot be obtained within ns nanoseconds.
func (fdl *FDLimiter) LockOrTimeout(ns int64) os.Error {
waitsofar := int64(0)
for {
// Try to get an fd
fdl.lk.Lock()
if fdl.count < fdl.limit {
fdl.count++
fdl.lk.Unlock()
return nil
}
fdl.lk.Unlock()
// Or, wait for an fd or timeout
if waitsofar >= ns {
return os.EAGAIN
}
t0 := time.Nanoseconds()
alrm := alarmOnce(ns - waitsofar)
select {
case <-alrm:
case <-fdl.ch:
}
waitsofar += time.Nanoseconds() - t0
}
panic("FDLimiter, unreachable")
}
示例3: summarize
//Read back the workSumary of each worker.
//Calculates the average response time and total time for the
//whole request.
func (self *Master) summarize() {
log.Print("Tasks distributed. Waiting for summaries...")
self.summary.Start = time.Nanoseconds()
workers := self.runningTasks
var avgs float64
for tSummary := range self.channel {
//remove the worker from master
self.runningTasks -= 1
avgs += float64(tSummary.Avg)
self.summary.TotalSuc += tSummary.SucCount
self.summary.TotalErr += tSummary.ErrCount
self.summary.Max = Max(self.summary.Max, tSummary.Max)
self.summary.Min = Min(self.summary.Min, tSummary.Min)
//if no workers left
if self.runningTasks == 0 {
if self.summary.Min == -1 {
self.summary.Min = 0
}
self.summary.End = time.Nanoseconds()
self.summary.Elapsed = (self.summary.End - self.summary.Start)
self.summary.Avg = float64(avgs / float64(workers))
self.summary.RequestsPerSecond = int64(self.summary.TotalSuc*1000) / (self.summary.Elapsed / 1000000)
break
}
}
self.ctrlChan <- true
}
示例4: main
func main() {
//runtime.GOMAXPROCS(1)
const tableRez = 1000
mat := Eye(2)
mat.Scale(.58)
mat.Set(1, 0, .2)
const transCount = 3
trans := make([]*affine.Affine, transCount)
trans[0] = affine.FromOrigin2(mat, 0, 0)
trans[1] = affine.FromOrigin2(mat, .5, 1)
trans[2] = affine.FromOrigin2(mat, 1, 0)
x1, y1, x2, y2 := fit(trans)
//print (x1,x2,y1,y2,"\n")
shift := Zeros(2, 1)
shift.Set(0, 0, -x1)
shift.Set(1, 0, -y1)
scale := (tableRez - 4) / math.Fmax(x2-x1, y2-y1)
shift.Scale(scale)
shift.AddDense(Scaled(Ones(2, 1), 2))
//print (scale," "+shift.String(),"\n")
for i, t := range trans {
origin := Scaled(t.GetOrigin(), scale)
origin.AddDense(shift)
trans[i] = affine.FromOrigin(t.GetMat(), origin)
}
x1, y1, x2, y2 = fit(trans)
ix1 := int(x1)
ix2 := int(x2)
iy1 := int(y1)
iy2 := int(y2)
//print (int(x1)," ",int(x2)," ",int(y1)," ",int(y2),"\n")
rezx := ix2 + 2
rezy := iy2 + 2
ft := floatTable.NewFloatTable(rezx, rezy, channelCount)
ft2 := floatTable.NewFloatTable(rezx, rezy, channelCount)
ft.Fill(fill)
t := time.Nanoseconds()
Render(ix1, ix2, iy1, iy2, ft, ft2, trans)
t = time.Nanoseconds() - t
print("Time", "\n")
print(t/1000000, "\n")
println("Saving image")
f, err := os.Open("testFile.png", os.O_WRONLY|os.O_CREAT, 0666)
println(err)
MakeImage(f, ft, MakeColorizer(ft))
}
示例5: main
func main() {
flag.Parse()
t0 := time.Nanoseconds()
maxDepth := *n
if minDepth+2 > *n {
maxDepth = minDepth + 2
}
stretchDepth := maxDepth + 1
check := bottomUpTree(0, stretchDepth).itemCheck()
fmt.Printf("stretch tree of depth %d\t check: %d\n", stretchDepth, check)
longLivedTree := bottomUpTree(0, maxDepth)
for depth := minDepth; depth <= maxDepth; depth += 2 {
iterations := 1 << uint(maxDepth-depth+minDepth)
check = 0
for i := 1; i <= iterations; i++ {
check += bottomUpTree(i, depth).itemCheck()
check += bottomUpTree(-i, depth).itemCheck()
}
fmt.Printf("%d\t trees of depth %d\t check: %d\n", iterations*2, depth, check)
}
fmt.Printf("long lived tree of depth %d\t check: %d\n", maxDepth, longLivedTree.itemCheck())
t1 := time.Nanoseconds()
// Standard gotest benchmark output, collected by build dashboard.
gcstats("BenchmarkTree", *n, t1-t0)
}
示例6: Log
func Log(level int, message string, v ...interface{}) {
if level > MaxLevel {
return
}
var curtime int64
if Differential {
if StartTime == -1 {
StartTime = time.Nanoseconds()
}
curtime = time.Nanoseconds() - StartTime
} else {
curtime = time.Nanoseconds()
}
// Miliseconds
message = fmt.Sprintf("%dms - %s", curtime/1000000, message)
switch level {
case L_Fatal:
s := fmt.Sprintf(message, v...)
fatal.Output(2, s)
panic(s)
case L_Error:
error.Output(2, fmt.Sprintf(message, v...))
case L_Warning:
warning.Output(2, fmt.Sprintf(message, v...))
case L_Info:
info.Output(2, fmt.Sprintf(message, v...))
case L_Debug:
debug.Output(2, fmt.Sprintf(message, v...))
}
}
示例7: benchTask
// Use a single redis.AsyncClient with specified number
// of workers to bench concurrent load on the async client
func benchTask(taskspec taskSpec, iterations int, workers int, printReport bool) (delta int64, err os.Error) {
signal := make(chan int, workers) // Buffering optional but sensible.
spec := redis.DefaultSpec().Db(13).Password("go-redis")
client, e := redis.NewAsynchClientWithSpec(spec)
if e != nil {
log.Println("Error creating client for worker: ", e)
return -1, e
}
// defer client.Quit() // will be deprecated soon
defer client.RedisClient().Quit()
t0 := time.Nanoseconds()
for i := 0; i < workers; i++ {
id := fmt.Sprintf("%d", i)
go taskspec.task(id, signal, client, iterations)
}
for i := 0; i < workers; i++ {
<-signal
}
delta = time.Nanoseconds() - t0
// for i := 0; i < workers; i++ {
// clients[i].Quit()
// }
//
if printReport {
report("concurrent "+taskspec.name, delta, iterations*workers)
}
return
}
示例8: benchTask
func benchTask(taskspec taskSpec, iterations int, workers int, printReport bool) (delta int64, err os.Error) {
signal := make(chan int, workers) // Buffering optional but sensible.
clients, e := makeConcurrentClients(workers)
if e != nil {
return 0, e
}
t0 := time.Nanoseconds()
for i := 0; i < workers; i++ {
id := fmt.Sprintf("%d", i)
go taskspec.task(id, signal, clients[i], iterations)
}
for i := 0; i < workers; i++ {
<-signal
}
delta = time.Nanoseconds() - t0
for i := 0; i < workers; i++ {
clients[i].Quit()
}
if printReport {
report("concurrent "+taskspec.name, delta, iterations*workers)
}
return
}
示例9: buildExternal
// buildExternal downloads and builds external packages, and
// reports their build status to the dashboard.
// It will re-build all packages after pkgBuildInterval nanoseconds or
// a new release tag is found.
func (b *Builder) buildExternal() {
var prevTag string
var nextBuild int64
for {
time.Sleep(waitInterval)
err := run(nil, goroot, "hg", "pull", "-u")
if err != nil {
log.Println("hg pull failed:", err)
continue
}
hash, tag, err := firstTag(releaseRe)
if err != nil {
log.Println(err)
continue
}
if *verbose {
log.Println("latest release:", tag)
}
// don't rebuild if there's no new release
// and it's been less than pkgBuildInterval
// nanoseconds since the last build.
if tag == prevTag && time.Nanoseconds() < nextBuild {
continue
}
// build will also build the packages
if err := b.buildHash(hash); err != nil {
log.Println(err)
continue
}
prevTag = tag
nextBuild = time.Nanoseconds() + pkgBuildInterval
}
}
示例10: DoTurn
//DoTurn is where you should do your bot's actual work.
func (m *Map) DoTurn() {
strategies := []struct {
fn func()
name string
}{
{func() { m.closeCombat() }, "closeCombat"},
{func() { m.defend() }, "defend"},
{func() { m.reinforce() }, "reinforce"},
{func() { m.forage() }, "forage"},
{func() { m.attackEnemyHill() }, "enemyHill"},
{func() { m.scout() }, "scout"},
}
times := make([]string, 0, len(strategies))
for _, s := range strategies {
if m.deadlineExpired() {
break
}
start := time.Nanoseconds()
s.fn()
delta_ms := float64(time.Nanoseconds()-start) / 1e6
if delta_ms > 100 {
times = append(times, fmt.Sprintf("%s %.2f", s.name, delta_ms))
}
}
m.moveAll()
if len(times) > 0 {
log.Print("timings: %s", strings.Join(times, ", "))
}
}
示例11: receiver
func receiver(pipe chan int, done_pipe chan int) {
prevTime := time.Nanoseconds()
var newTime int64
msg_count := 0
stats := NewMovingAverage(10)
j := 0
for i := true; i != false; {
val := <-pipe
msg_count++
if val == -1 {
i = false
}
if msg_count == 100000 {
newTime = time.Nanoseconds()
stats.SetNextT(newTime - prevTime)
//fmt.Printf("%d %d\n", j, stats.CurrentAverage())
fmt.Printf("%d %d\n", j, newTime-prevTime)
j++
msg_count = 0
prevTime = newTime
}
}
done_pipe <- 1
}
示例12: send
// dispatch input from channel as \r\n terminated line to peer
// flood controlled using hybrid's algorithm if conn.Flood is true
func (conn *Conn) send() {
lastsent := time.Nanoseconds()
var badness, linetime, second int64 = 0, 0, 1000000000
for line := range conn.out {
// Hybrid's algorithm allows for 2 seconds per line and an additional
// 1/120 of a second per character on that line.
linetime = 2*second + int64(len(line))*second/120
if !conn.Flood && conn.connected {
// No point in tallying up flood protection stuff until connected
if badness += linetime + lastsent - time.Nanoseconds(); badness < 0 {
// negative badness times are badness...
badness = int64(0)
}
}
lastsent = time.Nanoseconds()
// If we've sent more than 10 second's worth of lines according to the
// calculation above, then we're at risk of "Excess Flood".
if badness > 10*second && !conn.Flood {
// so sleep for the current line's time value before sending it
time.Sleep(linetime)
}
if _, err := conn.io.WriteString(line + "\r\n"); err != nil {
conn.error("irc.send(): %s", err.String())
conn.shutdown()
break
}
conn.io.Flush()
if conn.Debug {
fmt.Println(conn.Timestamp().Format(conn.TSFormat) + " -> " + line)
}
}
}
示例13: FilterRequest
func (u *Upstream) FilterRequest(request *falcore.Request) (res *http.Response) {
var err os.Error
req := request.HttpRequest
// Force the upstream to use http
if u.ForceHttp || req.URL.Scheme == "" {
req.URL.Scheme = "http"
req.URL.Host = req.Host
}
before := time.Nanoseconds()
req.Header.Set("Connection", "Keep-Alive")
res, err = u.transport.RoundTrip(req)
diff := falcore.TimeDiff(before, time.Nanoseconds())
if err != nil {
if nerr, ok := err.(net.Error); ok && nerr.Timeout() {
falcore.Error("%s Upstream Timeout error: %v", request.ID, err)
res = falcore.SimpleResponse(req, 504, nil, "Gateway Timeout\n")
request.CurrentStage.Status = 2 // Fail
} else {
falcore.Error("%s Upstream error: %v", request.ID, err)
res = falcore.SimpleResponse(req, 502, nil, "Bad Gateway\n")
request.CurrentStage.Status = 2 // Fail
}
}
falcore.Debug("%s [%s] [%s%s] s=%d Time=%.4f", request.ID, req.Method, u.host, req.RawURL, res.StatusCode, diff)
return
}
示例14: testTimeout
func testTimeout(t *testing.T, network, addr string, readFrom bool) {
fd, err := Dial(network, addr)
if err != nil {
t.Errorf("dial %s %s failed: %v", network, addr, err)
return
}
defer fd.Close()
t0 := time.Nanoseconds()
fd.SetReadTimeout(1e8) // 100ms
var b [100]byte
var n int
var err1 os.Error
if readFrom {
n, _, err1 = fd.(PacketConn).ReadFrom(b[0:])
} else {
n, err1 = fd.Read(b[0:])
}
t1 := time.Nanoseconds()
what := "Read"
if readFrom {
what = "ReadFrom"
}
if n != 0 || err1 == nil || !err1.(Error).Timeout() {
t.Errorf("fd.%s on %s %s did not return 0, timeout: %v, %v", what, network, addr, n, err1)
}
if t1-t0 < 0.5e8 || t1-t0 > 1.5e8 {
t.Errorf("fd.%s on %s %s took %f seconds, expected 0.1", what, network, addr, float64(t1-t0)/1e9)
}
}
示例15: drain
// See the comment for Exporter.Drain.
func (cs *clientSet) drain(timeout int64) error {
startTime := time.Nanoseconds()
for {
pending := false
cs.mu.Lock()
// Any messages waiting for a client?
for _, chDir := range cs.names {
if chDir.ch.Len() > 0 {
pending = true
}
}
// Any unacknowledged messages?
for client := range cs.clients {
n := client.unackedCount()
if n > 0 { // Check for > rather than != just to be safe.
pending = true
break
}
}
cs.mu.Unlock()
if !pending {
break
}
if timeout > 0 && time.Nanoseconds()-startTime >= timeout {
return errors.New("timeout")
}
time.Sleep(100 * 1e6) // 100 milliseconds
}
return nil
}