本文整理匯總了Golang中github.com/netflix/rend/metrics.ObserveHist函數的典型用法代碼示例。如果您正苦於以下問題:Golang ObserveHist函數的具體用法?Golang ObserveHist怎麽用?Golang ObserveHist使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了ObserveHist函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: Prepend
func (l *L1L2BatchOrca) Prepend(req common.SetRequest) error {
//log.Println("prepend", string(req.Key))
metrics.IncCounter(MetricCmdPrependL2)
start := timer.Now()
err := l.l2.Prepend(req)
metrics.ObserveHist(HistPrependL2, timer.Since(start))
if err != nil {
// Prepending in L2 did not succeed. Don't try in L1 since this means L2
// may not have succeeded.
if err == common.ErrItemNotStored {
metrics.IncCounter(MetricCmdPrependNotStoredL2)
metrics.IncCounter(MetricCmdPrependNotStored)
return err
}
metrics.IncCounter(MetricCmdPrependErrorsL2)
metrics.IncCounter(MetricCmdPrependErrors)
return err
}
// L2 succeeded, so it's time to try L1. If L1 fails with a not found, we're
// still good since L1 is allowed to not have the data when L2 does. If
// there's an error, we need to fail because we're not in an unknown state
// where L1 possibly doesn't have the Prepend when L2 does. We don't recover
// from this but instead fail the request and let the client retry.
metrics.IncCounter(MetricCmdPrependL1)
start = timer.Now()
err = l.l1.Prepend(req)
metrics.ObserveHist(HistPrependL1, timer.Since(start))
if err != nil {
// Not stored in L1 is still fine. There's a possibility that a
// concurrent delete happened or that the data has just been pushed out
// of L1. Prepend will not bring data back into L1 as it's not necessarily
// going to be immediately read.
if err == common.ErrItemNotStored || err == common.ErrKeyNotFound {
metrics.IncCounter(MetricCmdPrependNotStoredL1)
metrics.IncCounter(MetricCmdPrependStored)
return l.res.Prepend(req.Opaque, req.Quiet)
}
metrics.IncCounter(MetricCmdPrependErrorsL1)
metrics.IncCounter(MetricCmdPrependErrors)
return err
}
metrics.IncCounter(MetricCmdPrependStoredL1)
metrics.IncCounter(MetricCmdPrependStored)
return l.res.Prepend(req.Opaque, req.Quiet)
}
示例2: Add
func (l *L1L2BatchOrca) Add(req common.SetRequest) error {
//log.Println("add", string(req.Key))
// Add in L2 first, since it has the larger state
metrics.IncCounter(MetricCmdAddL2)
start := timer.Now()
err := l.l2.Add(req)
metrics.ObserveHist(HistAddL2, timer.Since(start))
if err != nil {
// A key already existing is not an error per se, it's a part of the
// functionality of the add command to respond with a "not stored" in
// the form of a ErrKeyExists. Hence no error metrics.
if err == common.ErrKeyExists {
metrics.IncCounter(MetricCmdAddNotStoredL2)
metrics.IncCounter(MetricCmdAddNotStored)
return err
}
// otherwise we have a real error on our hands
metrics.IncCounter(MetricCmdAddErrorsL2)
metrics.IncCounter(MetricCmdAddErrors)
return err
}
metrics.IncCounter(MetricCmdAddStoredL2)
// Replace the entry in L1.
metrics.IncCounter(MetricCmdAddReplaceL1)
start = timer.Now()
err = l.l1.Replace(req)
metrics.ObserveHist(HistReplaceL1, timer.Since(start))
if err != nil {
if err == common.ErrKeyNotFound {
// For a replace not stored in L1, there's no problem.
// There is no hot data to replace
metrics.IncCounter(MetricCmdAddReplaceNotStoredL1)
} else {
metrics.IncCounter(MetricCmdAddReplaceErrorsL1)
metrics.IncCounter(MetricCmdAddErrors)
return err
}
} else {
metrics.IncCounter(MetricCmdAddReplaceStoredL1)
}
metrics.IncCounter(MetricCmdAddStored)
return l.res.Add(req.Opaque, req.Quiet)
}
示例3: Set
func (l *L1L2Orca) Set(req common.SetRequest) error {
//log.Println("set", string(req.Key))
// Try L2 first
metrics.IncCounter(MetricCmdSetL2)
start := timer.Now()
err := l.l2.Set(req)
metrics.ObserveHist(HistSetL2, timer.Since(start))
// If we fail to set in L2, don't set in L1
if err != nil {
metrics.IncCounter(MetricCmdSetErrorsL2)
metrics.IncCounter(MetricCmdSetErrors)
return err
}
metrics.IncCounter(MetricCmdSetSuccessL2)
// Now set in L1. If L1 fails, we log the error and fail the request.
// If a user was writing a new piece of information, the error would be OK,
// since the next GET would be able to put the L2 information back into L1.
// In the case that the user was overwriting information, a failed set in L1
// and successful one in L2 would leave us inconsistent. If the response was
// positive in this situation, it would look like the server successfully
// processed the request but didn't store the information. Clients will
// retry failed writes. In this case L2 will get two writes for the same key
// but this is better because it is more correct overall, though less
// efficient. Note there are no retries at this level.
//
// It should be noted that errors on a straight set are nearly always fatal
// for the connection. It's likely that if this branch is taken that the
// connections to everyone will be severed (for this one client connection)
// and that the client will reconnect to try again.
metrics.IncCounter(MetricCmdSetL1)
start = timer.Now()
err = l.l1.Set(req)
metrics.ObserveHist(HistSetL1, timer.Since(start))
if err != nil {
metrics.IncCounter(MetricCmdSetErrorsL1)
metrics.IncCounter(MetricCmdSetErrors)
return err
}
metrics.IncCounter(MetricCmdSetSuccessL1)
metrics.IncCounter(MetricCmdSetSuccess)
return l.res.Set(req.Opaque, req.Quiet)
}
示例4: Delete
func (l *L1OnlyOrca) Delete(req common.DeleteRequest) error {
//log.Println("delete", string(req.Key))
metrics.IncCounter(MetricCmdDeleteL1)
start := timer.Now()
err := l.l1.Delete(req)
metrics.ObserveHist(HistDeleteL1, timer.Since(start))
if err == nil {
metrics.IncCounter(MetricCmdDeleteHits)
metrics.IncCounter(MetricCmdDeleteHitsL1)
l.res.Delete(req.Opaque)
} else if err == common.ErrKeyNotFound {
metrics.IncCounter(MetricCmdDeleteMissesL1)
metrics.IncCounter(MetricCmdDeleteMisses)
} else {
metrics.IncCounter(MetricCmdDeleteErrorsL1)
metrics.IncCounter(MetricCmdDeleteErrors)
}
return err
}
示例5: Touch
func (l *L1OnlyOrca) Touch(req common.TouchRequest) error {
//log.Println("touch", string(req.Key))
metrics.IncCounter(MetricCmdTouchL1)
start := timer.Now()
err := l.l1.Touch(req)
metrics.ObserveHist(HistTouchL1, timer.Since(start))
if err == nil {
metrics.IncCounter(MetricCmdTouchHitsL1)
metrics.IncCounter(MetricCmdTouchHits)
l.res.Touch(req.Opaque)
} else if err == common.ErrKeyNotFound {
metrics.IncCounter(MetricCmdTouchMissesL1)
metrics.IncCounter(MetricCmdTouchMisses)
} else {
metrics.IncCounter(MetricCmdTouchMissesL1)
metrics.IncCounter(MetricCmdTouchMisses)
}
return err
}
示例6: Gat
func (l *L1OnlyOrca) Gat(req common.GATRequest) error {
//log.Println("gat", string(req.Key))
metrics.IncCounter(MetricCmdGatL1)
start := timer.Now()
res, err := l.l1.GAT(req)
metrics.ObserveHist(HistGatL1, timer.Since(start))
if err == nil {
if res.Miss {
metrics.IncCounter(MetricCmdGatMissesL1)
// TODO: Account for L2
metrics.IncCounter(MetricCmdGatMisses)
} else {
metrics.IncCounter(MetricCmdGatHits)
metrics.IncCounter(MetricCmdGatHitsL1)
}
l.res.GAT(res)
// There is no GetEnd call required here since this is only ever
// done in the binary protocol, where there's no END marker.
// Calling l.res.GetEnd was a no-op here and is just useless.
//l.res.GetEnd(0, false)
} else {
metrics.IncCounter(MetricCmdGatErrors)
metrics.IncCounter(MetricCmdGatErrorsL1)
}
return err
}
示例7: Replace
func (l *L1OnlyOrca) Replace(req common.SetRequest) error {
//log.Println("replace", string(req.Key))
metrics.IncCounter(MetricCmdReplaceL1)
start := timer.Now()
err := l.l1.Replace(req)
metrics.ObserveHist(HistReplaceL1, timer.Since(start))
if err == nil {
metrics.IncCounter(MetricCmdReplaceStoredL1)
metrics.IncCounter(MetricCmdReplaceStored)
err = l.res.Replace(req.Opaque, req.Quiet)
} else if err == common.ErrKeyNotFound {
metrics.IncCounter(MetricCmdReplaceNotStoredL1)
metrics.IncCounter(MetricCmdReplaceNotStored)
} else {
metrics.IncCounter(MetricCmdReplaceErrorsL1)
metrics.IncCounter(MetricCmdReplaceErrors)
}
return err
}
示例8: Add
func (l *L1OnlyOrca) Add(req common.SetRequest) error {
//log.Println("add", string(req.Key))
metrics.IncCounter(MetricCmdAddL1)
start := timer.Now()
err := l.l1.Add(req)
metrics.ObserveHist(HistAddL1, timer.Since(start))
if err == nil {
metrics.IncCounter(MetricCmdAddStoredL1)
metrics.IncCounter(MetricCmdAddStored)
err = l.res.Add(req.Opaque, req.Quiet)
} else if err == common.ErrKeyExists {
metrics.IncCounter(MetricCmdAddNotStoredL1)
metrics.IncCounter(MetricCmdAddNotStored)
} else {
metrics.IncCounter(MetricCmdAddErrorsL1)
metrics.IncCounter(MetricCmdAddErrors)
}
return err
}
示例9: Set
func (l *L1L2BatchOrca) Set(req common.SetRequest) error {
//log.Println("set", string(req.Key))
// Try L2 first
metrics.IncCounter(MetricCmdSetL2)
start := timer.Now()
err := l.l2.Set(req)
metrics.ObserveHist(HistSetL2, timer.Since(start))
// If we fail to set in L2, don't do anything in L1
if err != nil {
metrics.IncCounter(MetricCmdSetErrorsL2)
metrics.IncCounter(MetricCmdSetErrors)
return err
}
metrics.IncCounter(MetricCmdSetSuccessL2)
// Replace the entry in L1.
metrics.IncCounter(MetricCmdSetReplaceL1)
start = timer.Now()
err = l.l1.Replace(req)
metrics.ObserveHist(HistReplaceL1, timer.Since(start))
if err != nil {
if err == common.ErrKeyNotFound {
// For a replace not stored in L1, there's no problem.
// There is no hot data to replace
metrics.IncCounter(MetricCmdSetReplaceNotStoredL1)
} else {
metrics.IncCounter(MetricCmdSetReplaceErrorsL1)
metrics.IncCounter(MetricCmdSetErrors)
return err
}
} else {
metrics.IncCounter(MetricCmdSetReplaceStoredL1)
}
metrics.IncCounter(MetricCmdSetSuccess)
return l.res.Set(req.Opaque, req.Quiet)
}
示例10: Set
func (l *L1OnlyOrca) Set(req common.SetRequest) error {
//log.Println("set", string(req.Key))
metrics.IncCounter(MetricCmdSetL1)
start := timer.Now()
err := l.l1.Set(req)
metrics.ObserveHist(HistSetL1, timer.Since(start))
if err == nil {
metrics.IncCounter(MetricCmdSetSuccessL1)
metrics.IncCounter(MetricCmdSetSuccess)
err = l.res.Set(req.Opaque, req.Quiet)
} else {
metrics.IncCounter(MetricCmdSetErrorsL1)
metrics.IncCounter(MetricCmdSetErrors)
}
return err
}
示例11: Get
func (l *L1L2BatchOrca) Get(req common.GetRequest) error {
metrics.IncCounterBy(MetricCmdGetKeys, uint64(len(req.Keys)))
//debugString := "get"
//for _, k := range req.Keys {
// debugString += " "
// debugString += string(k)
//}
//println(debugString)
metrics.IncCounter(MetricCmdGetL1)
metrics.IncCounterBy(MetricCmdGetKeysL1, uint64(len(req.Keys)))
start := timer.Now()
resChan, errChan := l.l1.Get(req)
var err error
//var lastres common.GetResponse
var l2keys [][]byte
var l2opaques []uint32
var l2quiets []bool
// Read all the responses back from L1.
// The contract is that the resChan will have GetResponse's for get hits and misses,
// and the errChan will have any other errors, such as an out of memory error from
// memcached. If any receive happens from errChan, there will be no more responses
// from resChan.
for {
select {
case res, ok := <-resChan:
if !ok {
resChan = nil
} else {
if res.Miss {
metrics.IncCounter(MetricCmdGetMissesL1)
l2keys = append(l2keys, res.Key)
l2opaques = append(l2opaques, res.Opaque)
l2quiets = append(l2quiets, res.Quiet)
} else {
metrics.IncCounter(MetricCmdGetHits)
metrics.IncCounter(MetricCmdGetHitsL1)
l.res.Get(res)
}
}
case getErr, ok := <-errChan:
if !ok {
errChan = nil
} else {
metrics.IncCounter(MetricCmdGetErrors)
metrics.IncCounter(MetricCmdGetErrorsL1)
err = getErr
}
}
if resChan == nil && errChan == nil {
break
}
}
// record metrics before going to L2
metrics.ObserveHist(HistGetL1, timer.Since(start))
// leave early on all hits
if len(l2keys) == 0 {
if err != nil {
return err
}
return l.res.GetEnd(req.NoopOpaque, req.NoopEnd)
}
// Time for the same dance with L2
req = common.GetRequest{
Keys: l2keys,
NoopEnd: req.NoopEnd,
NoopOpaque: req.NoopOpaque,
Opaques: l2opaques,
Quiet: l2quiets,
}
metrics.IncCounter(MetricCmdGetL2)
metrics.IncCounterBy(MetricCmdGetKeysL2, uint64(len(l2keys)))
start = timer.Now()
resChan, errChan = l.l2.Get(req)
for {
select {
case res, ok := <-resChan:
if !ok {
resChan = nil
} else {
if res.Miss {
metrics.IncCounter(MetricCmdGetMissesL2)
// Missing L2 means a true miss
metrics.IncCounter(MetricCmdGetMisses)
} else {
metrics.IncCounter(MetricCmdGetHitsL2)
// For batch, don't set in l1. Typically batch users will read
// data once and not again, so setting in L1 will not be valuable.
//.........這裏部分代碼省略.........
示例12: GetE
func (l *L1OnlyOrca) GetE(req common.GetRequest) error {
// For an L1 only orchestrator, this will fail if the backend is memcached.
// It should be talking to another rend-based server, such as the L2 for the
// EVCache server project.
metrics.IncCounterBy(MetricCmdGetEKeys, uint64(len(req.Keys)))
//debugString := "gete"
//for _, k := range req.Keys {
// debugString += " "
// debugString += string(k)
//}
//println(debugString)
metrics.IncCounter(MetricCmdGetEL1)
metrics.IncCounterBy(MetricCmdGetEKeysL1, uint64(len(req.Keys)))
start := timer.Now()
resChan, errChan := l.l1.GetE(req)
var err error
// Read all the responses back from l.l1.
// The contract is that the resChan will have GetEResponse's for get hits and misses,
// and the errChan will have any other errors, such as an out of memory error from
// memcached. If any receive happens from errChan, there will be no more responses
// from resChan.
for {
select {
case res, ok := <-resChan:
if !ok {
resChan = nil
} else {
if res.Miss {
metrics.IncCounter(MetricCmdGetEMissesL1)
metrics.IncCounter(MetricCmdGetEMisses)
} else {
metrics.IncCounter(MetricCmdGetEHits)
metrics.IncCounter(MetricCmdGetEHitsL1)
}
l.res.GetE(res)
}
case getErr, ok := <-errChan:
if !ok {
errChan = nil
} else {
metrics.IncCounter(MetricCmdGetEErrors)
metrics.IncCounter(MetricCmdGetEErrorsL1)
err = getErr
}
}
if resChan == nil && errChan == nil {
break
}
}
metrics.ObserveHist(HistGetEL1, timer.Since(start))
if err == nil {
l.res.GetEnd(req.NoopOpaque, req.NoopEnd)
}
return err
}
示例13: Touch
func (l *L1L2Orca) Touch(req common.TouchRequest) error {
//log.Println("touch", string(req.Key))
// Try L2 first
metrics.IncCounter(MetricCmdTouchL2)
start := timer.Now()
err := l.l2.Touch(req)
metrics.ObserveHist(HistTouchL2, timer.Since(start))
if err != nil {
// On a touch miss in L2 don't bother touch in L1. The data should be
// TTL'd out within a second. This is yet another place where it's
// possible to be inconsistent, but only for a short time. Any
// concurrent requests will see the same behavior as this one. If the
// touch misses here, any other request will see the same view.
if err == common.ErrKeyNotFound {
metrics.IncCounter(MetricCmdTouchMissesL2)
metrics.IncCounter(MetricCmdTouchMisses)
return err
}
// If we fail to touch in L2, don't touch in L1. If the touch succeeded
// but for some reason the communication failed, then this is still OK
// since L1 can TTL out while L2 still has the data. On the next get
// request the data would still be retrievable, albeit more slowly.
metrics.IncCounter(MetricCmdTouchErrorsL2)
metrics.IncCounter(MetricCmdTouchErrors)
return err
}
metrics.IncCounter(MetricCmdTouchHitsL2)
// In the case of concurrent touches with different values, it's possible
// that the touches for L1 and L2 interleave and produce an inconsistent
// state. The L2 could be touched long, then L2 and L1 touched short on
// another request, then L1 touched long. In this case the data in L1 would
// outlive L2. This situation is uncommon and is therefore discounted.
metrics.IncCounter(MetricCmdTouchL1)
start = timer.Now()
err = l.l1.Touch(req)
metrics.ObserveHist(HistTouchL1, timer.Since(start))
if err != nil {
// Touch misses in L1 after a hit in L2 are nto a big deal. The
// touch operation here explicitly does *not* act as a pre-warm putting
// data into L1. A miss here after a hit is the same as a hit.
if err == common.ErrKeyNotFound {
metrics.IncCounter(MetricCmdTouchMissesL1)
// Note that we increment the overall hits here (not misses) on
// purpose because L2 hit.
metrics.IncCounter(MetricCmdTouchHits)
return l.res.Touch(req.Opaque)
}
metrics.IncCounter(MetricCmdTouchErrorsL1)
metrics.IncCounter(MetricCmdTouchErrors)
return err
}
metrics.IncCounter(MetricCmdTouchHitsL1)
metrics.IncCounter(MetricCmdTouchHits)
return l.res.Touch(req.Opaque)
}
示例14: Gat
func (l *L1L2Orca) Gat(req common.GATRequest) error {
//log.Println("gat", string(req.Key))
// Try L1 first
metrics.IncCounter(MetricCmdGatL1)
start := timer.Now()
res, err := l.l1.GAT(req)
metrics.ObserveHist(HistGatL1, timer.Since(start))
// Errors here are genrally fatal to the connection, as something has gone
// seriously wrong. Bail out early.
// I should note that this is different than the other commands, where there
// are some benevolent "errors" that include KeyNotFound or KeyExists. In
// both Get and GAT the mini-internal-protocol is different because the Get
// command uses a channel to send results back and an error channel to signal
// some kind of fatal problem. The result signals non-fatal "errors"; in this
// case it's ErrKeyNotFound --> res.Miss is true.
if err != nil {
metrics.IncCounter(MetricCmdGatErrorsL1)
metrics.IncCounter(MetricCmdGatErrors)
return err
}
if res.Miss {
// If we miss here, we have to GAT L2 to get the data, then put it back
// into L1 with the new TTL.
metrics.IncCounter(MetricCmdGatMissesL1)
metrics.IncCounter(MetricCmdGatL2)
start = timer.Now()
res, err = l.l2.GAT(req)
metrics.ObserveHist(HistGatL2, timer.Since(start))
// fatal error
if err != nil {
metrics.IncCounter(MetricCmdGatErrorsL2)
metrics.IncCounter(MetricCmdGatErrors)
return err
}
// A miss on L2 after L1 is a true miss
if res.Miss {
metrics.IncCounter(MetricCmdGatMissesL2)
metrics.IncCounter(MetricCmdGatMisses)
return l.res.GAT(res)
}
// Take the data from the L2 GAT and set into L1 with the new TTL.
// There's several problems that could arise from interleaving of other
// operations. Another GAT isn't a problem.
//
// Intermediate sets might get clobbered in L1 but remain in L2 if we
// used Set, but since we use Add we should not overwrite a Set that
// happens between the L2 GAT hit and subsequent L1 reconciliation.
//
// Deletes would be a possible problem since a delete hit in L2 and miss
// in L1 would interleave to have data in L1 not in L2. This is a risk
// that is understood and accepted. The typical use cases at Netflix
// will not use deletes concurrently with GATs.
setreq := common.SetRequest{
Key: req.Key,
Exptime: req.Exptime,
Flags: res.Flags,
Data: res.Data,
}
metrics.IncCounter(MetricCmdGatAddL1)
start2 := timer.Now()
err = l.l1.Add(setreq)
metrics.ObserveHist(HistAddL1, timer.Since(start2))
if err != nil {
// we were trampled in the middle of performing the GAT operation
// In this case, it's fine; no error for the overall op. We still
// want to track this with a metric, though, and return success.
if err == common.ErrKeyExists {
metrics.IncCounter(MetricCmdGatAddNotStoredL1)
} else {
metrics.IncCounter(MetricCmdGatAddErrorsL1)
// Gat errors here and not Add. The metrics for L1/L2 correspond to
// direct interaction with the two. THe overall metrics correspond
// to the more abstract orchestrator operation.
metrics.IncCounter(MetricCmdGatErrors)
return err
}
} else {
metrics.IncCounter(MetricCmdGatAddStoredL1)
}
// the overall operation succeeded
metrics.IncCounter(MetricCmdGatHits)
} else {
metrics.IncCounter(MetricCmdGatHitsL1)
//.........這裏部分代碼省略.........
示例15: Get
func (l *L1L2Orca) Get(req common.GetRequest) error {
metrics.IncCounterBy(MetricCmdGetKeys, uint64(len(req.Keys)))
//debugString := "get"
//for _, k := range req.Keys {
// debugString += " "
// debugString += string(k)
//}
//println(debugString)
metrics.IncCounter(MetricCmdGetL1)
metrics.IncCounterBy(MetricCmdGetKeysL1, uint64(len(req.Keys)))
start := timer.Now()
resChan, errChan := l.l1.Get(req)
var err error
//var lastres common.GetResponse
var l2keys [][]byte
var l2opaques []uint32
var l2quiets []bool
// Read all the responses back from L1.
// The contract is that the resChan will have GetResponse's for get hits and misses,
// and the errChan will have any other errors, such as an out of memory error from
// memcached. If any receive happens from errChan, there will be no more responses
// from resChan.
for {
select {
case res, ok := <-resChan:
if !ok {
resChan = nil
} else {
if res.Miss {
metrics.IncCounter(MetricCmdGetMissesL1)
l2keys = append(l2keys, res.Key)
l2opaques = append(l2opaques, res.Opaque)
l2quiets = append(l2quiets, res.Quiet)
} else {
metrics.IncCounter(MetricCmdGetHits)
metrics.IncCounter(MetricCmdGetHitsL1)
l.res.Get(res)
}
}
case getErr, ok := <-errChan:
if !ok {
errChan = nil
} else {
metrics.IncCounter(MetricCmdGetErrors)
metrics.IncCounter(MetricCmdGetErrorsL1)
err = getErr
}
}
if resChan == nil && errChan == nil {
break
}
}
// finish up metrics for overall L1 (batch) get operation
metrics.ObserveHist(HistGetL1, timer.Since(start))
// leave early on all hits
if len(l2keys) == 0 {
if err != nil {
return err
}
return l.res.GetEnd(req.NoopOpaque, req.NoopEnd)
}
// Time for the same dance with L2
req = common.GetRequest{
Keys: l2keys,
NoopEnd: req.NoopEnd,
NoopOpaque: req.NoopOpaque,
Opaques: l2opaques,
Quiet: l2quiets,
}
metrics.IncCounter(MetricCmdGetEL2)
metrics.IncCounterBy(MetricCmdGetEKeysL2, uint64(len(l2keys)))
start = timer.Now()
resChanE, errChan := l.l2.GetE(req)
for {
select {
case res, ok := <-resChanE:
if !ok {
resChanE = nil
} else {
if res.Miss {
metrics.IncCounter(MetricCmdGetEMissesL2)
// Missing L2 means a true miss
metrics.IncCounter(MetricCmdGetMisses)
} else {
metrics.IncCounter(MetricCmdGetEHitsL2)
//set in l1
setreq := common.SetRequest{
//.........這裏部分代碼省略.........