本文整理匯總了Golang中github.com/heems/bssim/Godeps/_workspace/src/golang.org/x/net/context.Context.Err方法的典型用法代碼示例。如果您正苦於以下問題:Golang Context.Err方法的具體用法?Golang Context.Err怎麽用?Golang Context.Err使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類github.com/heems/bssim/Godeps/_workspace/src/golang.org/x/net/context.Context
的用法示例。
在下文中一共展示了Context.Err方法的12個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: GetBlocks
// GetBlocks returns a channel where the caller may receive blocks that
// correspond to the provided |keys|. Returns an error if BitSwap is unable to
// begin this request within the deadline enforced by the context.
//
// NB: Your request remains open until the context expires. To conserve
// resources, provide a context with a reasonably short deadline (ie. not one
// that lasts throughout the lifetime of the server)
func (bs *Bitswap) GetBlocks(ctx context.Context, keys []key.Key) (<-chan *blocks.Block, error) {
select {
case <-bs.process.Closing():
return nil, errors.New("bitswap is closed")
default:
}
promise := bs.notifications.Subscribe(ctx, keys...)
for _, k := range keys {
log.Event(ctx, "Bitswap.GetBlockRequest.Start", &k)
}
bs.wm.WantBlocks(keys)
req := &blockRequest{
keys: keys,
ctx: ctx,
}
select {
case bs.findKeys <- req:
return promise, nil
case <-ctx.Done():
return nil, ctx.Err()
}
}
示例2: readMsgCtx
func readMsgCtx(ctx context.Context, r msgio.Reader, p proto.Message) ([]byte, error) {
var msg []byte
// read in a goroutine so we can exit when our context is cancelled.
done := make(chan error)
go func() {
var err error
msg, err = r.ReadMsg()
select {
case done <- err:
case <-ctx.Done():
}
}()
select {
case <-ctx.Done():
return nil, ctx.Err()
case e := <-done:
if e != nil {
return nil, e
}
}
return msg, proto.Unmarshal(msg, p)
}
示例3: gatedDialAttempt
// gatedDialAttempt is an attempt to dial a node. It is gated by the swarm's
// dial synchronization systems: dialsync and dialbackoff.
func (s *Swarm) gatedDialAttempt(ctx context.Context, p peer.ID) (*Conn, error) {
var logdial = lgbl.Dial("swarm", s.LocalPeer(), p, nil, nil)
defer log.EventBegin(ctx, "swarmDialAttemptSync", logdial).Done()
// check if we already have an open connection first
conn := s.bestConnectionToPeer(p)
if conn != nil {
return conn, nil
}
// check if there's an ongoing dial to this peer
if ok, wait := s.dsync.Lock(p); ok {
// ok, we have been charged to dial! let's do it.
// if it succeeds, dial will add the conn to the swarm itself.
defer log.EventBegin(ctx, "swarmDialAttemptStart", logdial).Done()
ctxT, cancel := context.WithTimeout(ctx, s.dialT)
conn, err := s.dial(ctxT, p)
cancel()
s.dsync.Unlock(p)
log.Debugf("dial end %s", conn)
if err != nil {
log.Event(ctx, "swarmDialBackoffAdd", logdial)
s.backf.AddBackoff(p) // let others know to backoff
// ok, we failed. try again. (if loop is done, our error is output)
return nil, fmt.Errorf("dial attempt failed: %s", err)
}
log.Event(ctx, "swarmDialBackoffClear", logdial)
s.backf.Clear(p) // okay, no longer need to backoff
return conn, nil
} else {
// we did not dial. we must wait for someone else to dial.
// check whether we should backoff first...
if s.backf.Backoff(p) {
log.Event(ctx, "swarmDialBackoff", logdial)
return nil, ErrDialBackoff
}
defer log.EventBegin(ctx, "swarmDialWait", logdial).Done()
select {
case <-wait: // wait for that other dial to finish.
// see if it worked, OR we got an incoming dial in the meantime...
conn := s.bestConnectionToPeer(p)
if conn != nil {
return conn, nil
}
return nil, ErrDialFailed
case <-ctx.Done(): // or we may have to bail...
return nil, ctx.Err()
}
}
}
示例4: GetBlock
// GetBlock attempts to retrieve a particular block from peers within the
// deadline enforced by the context.
func (bs *Bitswap) GetBlock(parent context.Context, k key.Key) (*blocks.Block, error) {
// Any async work initiated by this function must end when this function
// returns. To ensure this, derive a new context. Note that it is okay to
// listen on parent in this scope, but NOT okay to pass |parent| to
// functions called by this one. Otherwise those functions won't return
// when this context's cancel func is executed. This is difficult to
// enforce. May this comment keep you safe.
ctx, cancelFunc := context.WithCancel(parent)
ctx = eventlog.ContextWithLoggable(ctx, eventlog.Uuid("GetBlockRequest"))
log.Event(ctx, "Bitswap.GetBlockRequest.Start", &k)
defer log.Event(ctx, "Bitswap.GetBlockRequest.End", &k)
defer func() {
cancelFunc()
}()
promise, err := bs.GetBlocks(ctx, []key.Key{k})
if err != nil {
return nil, err
}
select {
case block, ok := <-promise:
if !ok {
select {
case <-ctx.Done():
return nil, ctx.Err()
default:
return nil, errors.New("promise channel was closed")
}
}
return block, nil
case <-parent.Done():
return nil, parent.Err()
}
}
示例5: FindProvidersAsync
func (c *Client) FindProvidersAsync(ctx context.Context, k key.Key, max int) <-chan peer.PeerInfo {
ctx = eventlog.ContextWithLoggable(ctx, eventlog.Uuid("findProviders"))
defer log.EventBegin(ctx, "findProviders", &k).Done()
ch := make(chan peer.PeerInfo)
go func() {
defer close(ch)
request := pb.NewMessage(pb.Message_GET_PROVIDERS, string(k), 0)
response, err := c.proxy.SendRequest(ctx, request)
if err != nil {
log.Debug(err)
return
}
for _, p := range pb.PBPeersToPeerInfos(response.GetProviderPeers()) {
select {
case <-ctx.Done():
log.Debug(ctx.Err())
return
case ch <- p:
}
}
}()
return ch
}
示例6: HasBlock
// HasBlock announces the existance of a block to this bitswap service. The
// service will potentially notify its peers.
func (bs *Bitswap) HasBlock(ctx context.Context, blk *blocks.Block) error {
select {
case <-bs.process.Closing():
return errors.New("bitswap is closed")
default:
}
err := bs.tryPutBlock(blk, 4) // attempt to store block up to four times
if err != nil {
log.Errorf("Error writing block to datastore: %s", err)
return err
}
bs.notifications.Publish(blk)
select {
case bs.newBlocks <- blk:
// send block off to be reprovided
case <-ctx.Done():
return ctx.Err()
}
return nil
}
示例7: writeMsgCtx
// writeMsgCtx is used by the
func writeMsgCtx(ctx context.Context, w msgio.Writer, msg proto.Message) ([]byte, error) {
enc, err := proto.Marshal(msg)
if err != nil {
return nil, err
}
// write in a goroutine so we can exit when our context is cancelled.
done := make(chan error)
go func(m []byte) {
err := w.WriteMsg(m)
select {
case done <- err:
case <-ctx.Done():
}
}(enc)
select {
case <-ctx.Done():
return nil, ctx.Err()
case e := <-done:
return enc, e
}
}
示例8: nextEnvelope
// nextEnvelope runs in the taskWorker goroutine. Returns an error if the
// context is cancelled before the next Envelope can be created.
func (e *Engine) nextEnvelope(ctx context.Context) (*Envelope, error) {
for {
nextTask := e.peerRequestQueue.Pop()
for nextTask == nil {
select {
case <-ctx.Done():
return nil, ctx.Err()
case <-e.workSignal:
nextTask = e.peerRequestQueue.Pop()
}
}
// with a task in hand, we're ready to prepare the envelope...
block, err := e.bs.Get(nextTask.Entry.Key)
if err != nil {
// If we don't have the block, don't hold that against the peer
// make sure to update that the task has been 'completed'
nextTask.Done()
continue
}
return &Envelope{
Peer: nextTask.Target,
Block: block,
Sent: func() {
nextTask.Done()
select {
case e.workSignal <- struct{}{}:
// work completing may mean that our queue will provide new
// work to be done.
default:
}
},
}, nil
}
}
示例9: FindPeersConnectedToPeer
// FindPeersConnectedToPeer searches for peers directly connected to a given peer.
func (dht *IpfsDHT) FindPeersConnectedToPeer(ctx context.Context, id peer.ID) (<-chan peer.PeerInfo, error) {
peerchan := make(chan peer.PeerInfo, asyncQueryBuffer)
peersSeen := peer.Set{}
peers := dht.routingTable.NearestPeers(kb.ConvertPeerID(id), AlphaValue)
if len(peers) == 0 {
return nil, kb.ErrLookupFailure
}
// setup the Query
query := dht.newQuery(key.Key(id), func(ctx context.Context, p peer.ID) (*dhtQueryResult, error) {
pmes, err := dht.findPeerSingle(ctx, p, id)
if err != nil {
return nil, err
}
var clpeers []peer.PeerInfo
closer := pmes.GetCloserPeers()
for _, pbp := range closer {
pi := pb.PBPeerToPeerInfo(pbp)
// skip peers already seen
if _, found := peersSeen[pi.ID]; found {
continue
}
peersSeen[pi.ID] = struct{}{}
// if peer is connected, send it to our client.
if pb.Connectedness(*pbp.Connection) == inet.Connected {
select {
case <-ctx.Done():
return nil, ctx.Err()
case peerchan <- pi:
}
}
// if peer is the peer we're looking for, don't bother querying it.
// TODO maybe query it?
if pb.Connectedness(*pbp.Connection) != inet.Connected {
clpeers = append(clpeers, pi)
}
}
return &dhtQueryResult{closerPeers: clpeers}, nil
})
// run it! run it asynchronously to gen peers as results are found.
// this does no error checking
go func() {
if _, err := query.Run(ctx, peers); err != nil {
log.Debug(err)
}
// close the peerchan channel when done.
close(peerchan)
}()
return peerchan, nil
}
示例10: findProvidersAsyncRoutine
func (dht *IpfsDHT) findProvidersAsyncRoutine(ctx context.Context, key key.Key, count int, peerOut chan peer.PeerInfo) {
defer log.EventBegin(ctx, "findProvidersAsync", &key).Done()
defer close(peerOut)
ps := pset.NewLimited(count)
provs := dht.providers.GetProviders(ctx, key)
for _, p := range provs {
// NOTE: assuming that this list of peers is unique
if ps.TryAdd(p) {
select {
case peerOut <- dht.peerstore.PeerInfo(p):
case <-ctx.Done():
return
}
}
// If we have enough peers locally, dont bother with remote RPC
if ps.Size() >= count {
return
}
}
// setup the Query
query := dht.newQuery(key, func(ctx context.Context, p peer.ID) (*dhtQueryResult, error) {
notif.PublishQueryEvent(ctx, ¬if.QueryEvent{
Type: notif.SendingQuery,
ID: p,
})
pmes, err := dht.findProvidersSingle(ctx, p, key)
if err != nil {
return nil, err
}
log.Debugf("%d provider entries", len(pmes.GetProviderPeers()))
provs := pb.PBPeersToPeerInfos(pmes.GetProviderPeers())
log.Debugf("%d provider entries decoded", len(provs))
// Add unique providers from request, up to 'count'
for _, prov := range provs {
log.Debugf("got provider: %s", prov)
if ps.TryAdd(prov.ID) {
log.Debugf("using provider: %s", prov)
select {
case peerOut <- prov:
case <-ctx.Done():
log.Debug("Context timed out sending more providers")
return nil, ctx.Err()
}
}
if ps.Size() >= count {
log.Debugf("got enough providers (%d/%d)", ps.Size(), count)
return &dhtQueryResult{success: true}, nil
}
}
// Give closer peers back to the query to be queried
closer := pmes.GetCloserPeers()
clpeers := pb.PBPeersToPeerInfos(closer)
log.Debugf("got closer peers: %d %s", len(clpeers), clpeers)
notif.PublishQueryEvent(ctx, ¬if.QueryEvent{
Type: notif.PeerResponse,
ID: p,
Responses: pointerizePeerInfos(clpeers),
})
return &dhtQueryResult{closerPeers: clpeers}, nil
})
peers := dht.routingTable.NearestPeers(kb.ConvertKey(key), AlphaValue)
_, err := query.Run(ctx, peers)
if err != nil {
log.Debugf("Query error: %s", err)
notif.PublishQueryEvent(ctx, ¬if.QueryEvent{
Type: notif.QueryError,
Extra: err.Error(),
})
}
}
示例11: GetClosestPeers
// Kademlia 'node lookup' operation. Returns a channel of the K closest peers
// to the given key
func (dht *IpfsDHT) GetClosestPeers(ctx context.Context, key key.Key) (<-chan peer.ID, error) {
e := log.EventBegin(ctx, "getClosestPeers", &key)
tablepeers := dht.routingTable.NearestPeers(kb.ConvertKey(key), AlphaValue)
if len(tablepeers) == 0 {
return nil, kb.ErrLookupFailure
}
out := make(chan peer.ID, KValue)
peerset := pset.NewLimited(KValue)
for _, p := range tablepeers {
select {
case out <- p:
case <-ctx.Done():
return nil, ctx.Err()
}
peerset.Add(p)
}
query := dht.newQuery(key, func(ctx context.Context, p peer.ID) (*dhtQueryResult, error) {
// For DHT query command
notif.PublishQueryEvent(ctx, ¬if.QueryEvent{
Type: notif.SendingQuery,
ID: p,
})
closer, err := dht.closerPeersSingle(ctx, key, p)
if err != nil {
log.Debugf("error getting closer peers: %s", err)
return nil, err
}
var filtered []peer.PeerInfo
for _, clp := range closer {
if kb.Closer(clp, dht.self, key) && peerset.TryAdd(clp) {
select {
case out <- clp:
case <-ctx.Done():
return nil, ctx.Err()
}
filtered = append(filtered, dht.peerstore.PeerInfo(clp))
}
}
// For DHT query command
notif.PublishQueryEvent(ctx, ¬if.QueryEvent{
Type: notif.PeerResponse,
ID: p,
Responses: pointerizePeerInfos(filtered),
})
return &dhtQueryResult{closerPeers: filtered}, nil
})
go func() {
defer close(out)
defer e.Done()
// run it!
_, err := query.Run(ctx, tablepeers)
if err != nil {
log.Debugf("closestPeers query run error: %s", err)
}
}()
return out, nil
}
示例12: Dial
// Dial connects to a peer over a particular address
// Ensures raddr is part of peer.Addresses()
// Example: d.DialAddr(ctx, peer.Addresses()[0], peer)
func (d *Dialer) Dial(ctx context.Context, raddr ma.Multiaddr, remote peer.ID) (Conn, error) {
logdial := lgbl.Dial("conn", d.LocalPeer, remote, nil, raddr)
logdial["encrypted"] = (d.PrivateKey != nil) // log wether this will be an encrypted dial or not.
defer log.EventBegin(ctx, "connDial", logdial).Done()
var connOut Conn
var errOut error
done := make(chan struct{})
// do it async to ensure we respect don contexteone
go func() {
defer func() {
select {
case done <- struct{}{}:
case <-ctx.Done():
}
}()
maconn, err := d.rawConnDial(ctx, raddr, remote)
if err != nil {
errOut = err
return
}
if d.Wrapper != nil {
maconn = d.Wrapper(maconn)
}
c, err := newSingleConn(ctx, d.LocalPeer, remote, maconn)
if err != nil {
maconn.Close()
errOut = err
return
}
if d.PrivateKey == nil || EncryptConnections == false {
log.Warning("dialer %s dialing INSECURELY %s at %s!", d, remote, raddr)
connOut = c
return
}
c2, err := newSecureConn(ctx, d.PrivateKey, c)
if err != nil {
errOut = err
c.Close()
return
}
connOut = c2
}()
select {
case <-ctx.Done():
logdial["error"] = ctx.Err()
logdial["dial"] = "failure"
return nil, ctx.Err()
case <-done:
// whew, finished.
}
if errOut != nil {
logdial["error"] = errOut
logdial["dial"] = "failure"
return nil, errOut
}
logdial["dial"] = "success"
return connOut, nil
}