本文整理汇总了Golang中github.com/djbarber/ipfs-hack/Godeps/_workspace/src/golang.org/x/net/context.WithTimeout函数的典型用法代码示例。如果您正苦于以下问题:Golang WithTimeout函数的具体用法?Golang WithTimeout怎么用?Golang WithTimeout使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了WithTimeout函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: doWork
func (mq *msgQueue) doWork(ctx context.Context) {
// allow ten minutes for connections
// this includes looking them up in the dht
// dialing them, and handshaking
conctx, cancel := context.WithTimeout(ctx, time.Minute*10)
defer cancel()
err := mq.network.ConnectTo(conctx, mq.p)
if err != nil {
log.Infof("cant connect to peer %s: %s", mq.p, err)
// TODO: cant connect, what now?
return
}
// grab outgoing message
mq.outlk.Lock()
wlm := mq.out
if wlm == nil || wlm.Empty() {
mq.outlk.Unlock()
return
}
mq.out = nil
mq.outlk.Unlock()
sendctx, cancel := context.WithTimeout(ctx, time.Minute*5)
defer cancel()
// send wantlist updates
err = mq.network.SendMessage(sendctx, mq.p, wlm)
if err != nil {
log.Infof("bitswap send error: %s", err)
// TODO: what do we do if this fails?
return
}
}
示例2: pingPeer
func pingPeer(ctx context.Context, n *core.IpfsNode, pid peer.ID, numPings int) <-chan interface{} {
outChan := make(chan interface{})
go func() {
defer close(outChan)
if len(n.Peerstore.Addrs(pid)) == 0 {
// Make sure we can find the node in question
outChan <- &PingResult{
Text: fmt.Sprintf("Looking up peer %s", pid.Pretty()),
}
ctx, cancel := context.WithTimeout(ctx, kPingTimeout)
defer cancel()
p, err := n.Routing.FindPeer(ctx, pid)
if err != nil {
outChan <- &PingResult{Text: fmt.Sprintf("Peer lookup error: %s", err)}
return
}
n.Peerstore.AddAddrs(p.ID, p.Addrs, peer.TempAddrTTL)
}
outChan <- &PingResult{Text: fmt.Sprintf("PING %s.", pid.Pretty())}
ctx, cancel := context.WithTimeout(ctx, kPingTimeout*time.Duration(numPings))
defer cancel()
pings, err := n.Ping.Ping(ctx, pid)
if err != nil {
log.Debugf("Ping error: %s", err)
outChan <- &PingResult{Text: fmt.Sprintf("Ping error: %s", err)}
return
}
var done bool
var total time.Duration
for i := 0; i < numPings && !done; i++ {
select {
case <-ctx.Done():
done = true
break
case t, ok := <-pings:
if !ok {
done = true
break
}
outChan <- &PingResult{
Success: true,
Time: t,
}
total += t
time.Sleep(time.Second)
}
}
averagems := total.Seconds() * 1000 / float64(numPings)
outChan <- &PingResult{
Text: fmt.Sprintf("Average latency: %.2fms", averagems),
}
}()
return outChan
}
示例3: TestDeadlineFractionCancel
func TestDeadlineFractionCancel(t *testing.T) {
ctx1, cancel1 := context.WithTimeout(context.Background(), 10*time.Millisecond)
ctx2, cancel2 := WithDeadlineFraction(ctx1, 0.5)
select {
case <-ctx1.Done():
t.Fatal("ctx1 ended too early")
case <-ctx2.Done():
t.Fatal("ctx2 ended too early")
default:
}
cancel2()
select {
case <-ctx1.Done():
t.Fatal("ctx1 should NOT be cancelled")
case <-ctx2.Done():
default:
t.Fatal("ctx2 should be cancelled")
}
cancel1()
select {
case <-ctx1.Done():
case <-ctx2.Done():
default:
t.Fatal("ctx1 should be cancelled")
}
}
示例4: providerConnector
// connects to providers for the given keys
func (bs *Bitswap) providerConnector(parent context.Context) {
defer log.Info("bitswap client worker shutting down...")
for {
log.Event(parent, "Bitswap.ProviderConnector.Loop")
select {
case req := <-bs.findKeys:
keys := req.keys
if len(keys) == 0 {
log.Warning("Received batch request for zero blocks")
continue
}
log.Event(parent, "Bitswap.ProviderConnector.Work", logging.LoggableMap{"Keys": keys})
// NB: Optimization. Assumes that providers of key[0] are likely to
// be able to provide for all keys. This currently holds true in most
// every situation. Later, this assumption may not hold as true.
child, cancel := context.WithTimeout(req.ctx, providerRequestTimeout)
providers := bs.network.FindProvidersAsync(child, keys[0], maxProvidersPerRequest)
for p := range providers {
go bs.network.ConnectTo(req.ctx, p)
}
cancel()
case <-parent.Done():
return
}
}
}
示例5: GetDiagnostic
// GetDiagnostic runs a diagnostics request across the entire network
func (d *Diagnostics) GetDiagnostic(ctx context.Context, timeout time.Duration) ([]*DiagInfo, error) {
log.Debug("Getting diagnostic.")
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
diagID := newID()
d.diagLock.Lock()
d.diagMap[diagID] = time.Now()
d.diagLock.Unlock()
log.Debug("Begin Diagnostic")
peers := d.getPeers()
log.Debugf("Sending diagnostic request to %d peers.", len(peers))
pmes := newMessage(diagID)
pmes.SetTimeoutDuration(timeout - HopTimeoutDecrement) // decrease timeout per hop
dpeers, err := d.getDiagnosticFromPeers(ctx, d.getPeers(), pmes)
if err != nil {
return nil, fmt.Errorf("diagnostic from peers err: %s", err)
}
di := d.getDiagInfo()
out := []*DiagInfo{di}
for dpi := range dpeers {
out = append(out, dpi)
}
return out, nil
}
示例6: TestBasicBitswap
func TestBasicBitswap(t *testing.T) {
net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay))
sg := NewTestSessionGenerator(net)
defer sg.Close()
bg := blocksutil.NewBlockGenerator()
t.Log("Test a one node trying to get one block from another")
instances := sg.Instances(2)
blocks := bg.Blocks(1)
err := instances[0].Exchange.HasBlock(blocks[0])
if err != nil {
t.Fatal(err)
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
defer cancel()
blk, err := instances[1].Exchange.GetBlock(ctx, blocks[0].Key())
if err != nil {
t.Fatal(err)
}
t.Log(blk)
for _, inst := range instances {
err := inst.Exchange.Close()
if err != nil {
t.Fatal(err)
}
}
}
示例7: TestGetBlockFromPeerAfterPeerAnnounces
func TestGetBlockFromPeerAfterPeerAnnounces(t *testing.T) {
net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay))
block := blocks.NewBlock([]byte("block"))
g := NewTestSessionGenerator(net)
defer g.Close()
peers := g.Instances(2)
hasBlock := peers[0]
defer hasBlock.Exchange.Close()
if err := hasBlock.Exchange.HasBlock(block); err != nil {
t.Fatal(err)
}
wantsBlock := peers[1]
defer wantsBlock.Exchange.Close()
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
received, err := wantsBlock.Exchange.GetBlock(ctx, block.Key())
if err != nil {
t.Log(err)
t.Fatal("Expected to succeed")
}
if !bytes.Equal(block.Data, received.Data) {
t.Fatal("Data doesn't match")
}
}
示例8: TestFindPeer
func TestFindPeer(t *testing.T) {
// t.Skip("skipping test to debug another")
if testing.Short() {
t.SkipNow()
}
ctx := context.Background()
_, peers, dhts := setupDHTS(ctx, 4, t)
defer func() {
for i := 0; i < 4; i++ {
dhts[i].Close()
dhts[i].host.Close()
}
}()
connect(t, ctx, dhts[0], dhts[1])
connect(t, ctx, dhts[1], dhts[2])
connect(t, ctx, dhts[1], dhts[3])
ctxT, _ := context.WithTimeout(ctx, time.Second)
p, err := dhts[0].FindPeer(ctxT, peers[2])
if err != nil {
t.Fatal(err)
}
if p.ID == "" {
t.Fatal("Failed to find peer.")
}
if p.ID != peers[2] {
t.Fatal("Didnt find expected peer.")
}
}
示例9: TestGetBlocksSequential
func TestGetBlocksSequential(t *testing.T) {
var servs = Mocks(4)
for _, s := range servs {
defer s.Close()
}
bg := blocksutil.NewBlockGenerator()
blks := bg.Blocks(50)
var keys []key.Key
for _, blk := range blks {
keys = append(keys, blk.Key())
servs[0].AddBlock(blk)
}
t.Log("one instance at a time, get blocks concurrently")
for i := 1; i < len(servs); i++ {
ctx, cancel := context.WithTimeout(context.Background(), time.Second*50)
defer cancel()
out := servs[i].GetBlocks(ctx, keys)
gotten := make(map[key.Key]*blocks.Block)
for blk := range out {
if _, ok := gotten[blk.Key()]; ok {
t.Fatal("Got duplicate block!")
}
gotten[blk.Key()] = blk
}
if len(gotten) != len(blks) {
t.Fatalf("Didnt get enough blocks back: %d/%d", len(gotten), len(blks))
}
}
}
示例10: connectToProviders
func (bs *Bitswap) connectToProviders(ctx context.Context, entries []wantlist.Entry) {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
// Get providers for all entries in wantlist (could take a while)
wg := sync.WaitGroup{}
for _, e := range entries {
wg.Add(1)
go func(k key.Key) {
defer wg.Done()
child, cancel := context.WithTimeout(ctx, providerRequestTimeout)
defer cancel()
providers := bs.network.FindProvidersAsync(child, k, maxProvidersPerRequest)
for prov := range providers {
go func(p peer.ID) {
bs.network.ConnectTo(ctx, p)
}(prov)
}
}(e.Key)
}
wg.Wait() // make sure all our children do finish.
}
示例11: TestValueGetSet
func TestValueGetSet(t *testing.T) {
// t.Skip("skipping test to debug another")
ctx := context.Background()
dhtA := setupDHT(ctx, t)
dhtB := setupDHT(ctx, t)
defer dhtA.Close()
defer dhtB.Close()
defer dhtA.host.Close()
defer dhtB.host.Close()
vf := &record.ValidChecker{
Func: func(key.Key, []byte) error {
return nil
},
Sign: false,
}
dhtA.Validator["v"] = vf
dhtB.Validator["v"] = vf
connect(t, ctx, dhtA, dhtB)
ctxT, _ := context.WithTimeout(ctx, time.Second)
dhtA.PutValue(ctxT, "/v/hello", []byte("world"))
ctxT, _ = context.WithTimeout(ctx, time.Second*2)
val, err := dhtA.GetValue(ctxT, "/v/hello")
if err != nil {
t.Fatal(err)
}
if string(val) != "world" {
t.Fatalf("Expected 'world' got '%s'", string(val))
}
ctxT, _ = context.WithTimeout(ctx, time.Second*2)
val, err = dhtB.GetValue(ctxT, "/v/hello")
if err != nil {
t.Fatal(err)
}
if string(val) != "world" {
t.Fatalf("Expected 'world' got '%s'", string(val))
}
}
示例12: HandlePeerFound
func (n *IpfsNode) HandlePeerFound(p peer.PeerInfo) {
log.Warning("trying peer info: ", p)
ctx, cancel := context.WithTimeout(n.Context(), discoveryConnTimeout)
defer cancel()
if err := n.PeerHost.Connect(ctx, p); err != nil {
log.Warning("Failed to connect to peer found by discovery: ", err)
}
}
示例13: Dial
func Dial(nd *core.IpfsNode, p peer.ID, protocol string) (net.Stream, error) {
ctx, cancel := context.WithTimeout(nd.Context(), time.Second*30)
defer cancel()
err := nd.PeerHost.Connect(ctx, peer.PeerInfo{ID: p})
if err != nil {
return nil, err
}
return nd.PeerHost.NewStream(pro.ID(protocol), p)
}
示例14: gatedDialAttempt
// gatedDialAttempt is an attempt to dial a node. It is gated by the swarm's
// dial synchronization systems: dialsync and dialbackoff.
func (s *Swarm) gatedDialAttempt(ctx context.Context, p peer.ID) (*Conn, error) {
var logdial = lgbl.Dial("swarm", s.LocalPeer(), p, nil, nil)
defer log.EventBegin(ctx, "swarmDialAttemptSync", logdial).Done()
// check if we already have an open connection first
conn := s.bestConnectionToPeer(p)
if conn != nil {
return conn, nil
}
// check if there's an ongoing dial to this peer
if ok, wait := s.dsync.Lock(p); ok {
// ok, we have been charged to dial! let's do it.
// if it succeeds, dial will add the conn to the swarm itself.
defer log.EventBegin(ctx, "swarmDialAttemptStart", logdial).Done()
ctxT, cancel := context.WithTimeout(ctx, s.dialT)
conn, err := s.dial(ctxT, p)
cancel()
s.dsync.Unlock(p)
log.Debugf("dial end %s", conn)
if err != nil {
log.Event(ctx, "swarmDialBackoffAdd", logdial)
s.backf.AddBackoff(p) // let others know to backoff
// ok, we failed. try again. (if loop is done, our error is output)
return nil, fmt.Errorf("dial attempt failed: %s", err)
}
log.Event(ctx, "swarmDialBackoffClear", logdial)
s.backf.Clear(p) // okay, no longer need to backoff
return conn, nil
} else {
// we did not dial. we must wait for someone else to dial.
// check whether we should backoff first...
if s.backf.Backoff(p) {
log.Event(ctx, "swarmDialBackoff", logdial)
return nil, ErrDialBackoff
}
defer log.EventBegin(ctx, "swarmDialWait", logdial).Done()
select {
case <-wait: // wait for that other dial to finish.
// see if it worked, OR we got an incoming dial in the meantime...
conn := s.bestConnectionToPeer(p)
if conn != nil {
return conn, nil
}
return nil, ErrDialFailed
case <-ctx.Done(): // or we may have to bail...
return nil, ctx.Err()
}
}
}
示例15: HandleMessage
func (d *Diagnostics) HandleMessage(ctx context.Context, s inet.Stream) error {
cr := ctxio.NewReader(ctx, s)
cw := ctxio.NewWriter(ctx, s)
r := ggio.NewDelimitedReader(cr, inet.MessageSizeMax) // maxsize
w := ggio.NewDelimitedWriter(cw)
// deserialize msg
pmes := new(pb.Message)
if err := r.ReadMsg(pmes); err != nil {
log.Debugf("Failed to decode protobuf message: %v", err)
return nil
}
// Print out diagnostic
log.Infof("[peer: %s] Got message from [%s]\n",
d.self.Pretty(), s.Conn().RemotePeer())
// Make sure we havent already handled this request to prevent loops
if err := d.startDiag(pmes.GetDiagID()); err != nil {
return nil
}
resp := newMessage(pmes.GetDiagID())
resp.Data = d.getDiagInfo().Marshal()
if err := w.WriteMsg(resp); err != nil {
log.Debugf("Failed to write protobuf message over stream: %s", err)
return err
}
timeout := pmes.GetTimeoutDuration()
if timeout < HopTimeoutDecrement {
return fmt.Errorf("timeout too short: %s", timeout)
}
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
pmes.SetTimeoutDuration(timeout - HopTimeoutDecrement)
dpeers, err := d.getDiagnosticFromPeers(ctx, d.getPeers(), pmes)
if err != nil {
log.Debugf("diagnostic from peers err: %s", err)
return err
}
for b := range dpeers {
resp := newMessage(pmes.GetDiagID())
resp.Data = b.Marshal()
if err := w.WriteMsg(resp); err != nil {
log.Debugf("Failed to write protobuf message over stream: %s", err)
return err
}
}
return nil
}