本文整理汇总了Golang中gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context.WithTimeout函数的典型用法代码示例。如果您正苦于以下问题:Golang WithTimeout函数的具体用法?Golang WithTimeout怎么用?Golang WithTimeout使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了WithTimeout函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: pingPeer
func pingPeer(ctx context.Context, n *core.IpfsNode, pid peer.ID, numPings int) <-chan interface{} {
outChan := make(chan interface{})
go func() {
defer close(outChan)
if len(n.Peerstore.Addrs(pid)) == 0 {
// Make sure we can find the node in question
outChan <- &PingResult{
Text: fmt.Sprintf("Looking up peer %s", pid.Pretty()),
}
ctx, cancel := context.WithTimeout(ctx, kPingTimeout)
defer cancel()
p, err := n.Routing.FindPeer(ctx, pid)
if err != nil {
outChan <- &PingResult{Text: fmt.Sprintf("Peer lookup error: %s", err)}
return
}
n.Peerstore.AddAddrs(p.ID, p.Addrs, peer.TempAddrTTL)
}
outChan <- &PingResult{Text: fmt.Sprintf("PING %s.", pid.Pretty())}
ctx, cancel := context.WithTimeout(ctx, kPingTimeout*time.Duration(numPings))
defer cancel()
pings, err := n.Ping.Ping(ctx, pid)
if err != nil {
log.Debugf("Ping error: %s", err)
outChan <- &PingResult{Text: fmt.Sprintf("Ping error: %s", err)}
return
}
var done bool
var total time.Duration
for i := 0; i < numPings && !done; i++ {
select {
case <-ctx.Done():
done = true
break
case t, ok := <-pings:
if !ok {
done = true
break
}
outChan <- &PingResult{
Success: true,
Time: t,
}
total += t
time.Sleep(time.Second)
}
}
averagems := total.Seconds() * 1000 / float64(numPings)
outChan <- &PingResult{
Text: fmt.Sprintf("Average latency: %.2fms", averagems),
}
}()
return outChan
}
示例2: doWork
func (mq *msgQueue) doWork(ctx context.Context) {
// allow ten minutes for connections
// this includes looking them up in the dht
// dialing them, and handshaking
conctx, cancel := context.WithTimeout(ctx, time.Minute*10)
defer cancel()
err := mq.network.ConnectTo(conctx, mq.p)
if err != nil {
log.Infof("cant connect to peer %s: %s", mq.p, err)
// TODO: cant connect, what now?
return
}
// grab outgoing message
mq.outlk.Lock()
wlm := mq.out
if wlm == nil || wlm.Empty() {
mq.outlk.Unlock()
return
}
mq.out = nil
mq.outlk.Unlock()
sendctx, cancel := context.WithTimeout(ctx, time.Minute*5)
defer cancel()
// send wantlist updates
err = mq.network.SendMessage(sendctx, mq.p, wlm)
if err != nil {
log.Infof("bitswap send error: %s", err)
// TODO: what do we do if this fails?
return
}
}
示例3: TestPinRecursiveFail
func TestPinRecursiveFail(t *testing.T) {
ctx := context.Background()
dstore := dssync.MutexWrap(ds.NewMapDatastore())
bstore := blockstore.NewBlockstore(dstore)
bserv := bs.New(bstore, offline.Exchange(bstore))
dserv := mdag.NewDAGService(bserv)
p := NewPinner(dstore, dserv)
a, _ := randNode()
b, _ := randNode()
err := a.AddNodeLinkClean("child", b)
if err != nil {
t.Fatal(err)
}
// NOTE: This isnt a time based test, we expect the pin to fail
mctx, _ := context.WithTimeout(ctx, time.Millisecond)
err = p.Pin(mctx, a, true)
if err == nil {
t.Fatal("should have failed to pin here")
}
_, err = dserv.Add(b)
if err != nil {
t.Fatal(err)
}
// this one is time based... but shouldnt cause any issues
mctx, _ = context.WithTimeout(ctx, time.Second)
err = p.Pin(mctx, a, true)
if err != nil {
t.Fatal(err)
}
}
示例4: GetValue
// GetValue searches for the value corresponding to given Key.
func (dht *IpfsDHT) GetValue(ctx context.Context, key key.Key) ([]byte, error) {
ctx, cancel := context.WithTimeout(ctx, time.Minute)
defer cancel()
vals, err := dht.GetValues(ctx, key, 16)
if err != nil {
return nil, err
}
var recs [][]byte
for _, v := range vals {
if v.Val != nil {
recs = append(recs, v.Val)
}
}
i, err := dht.Selector.BestRecord(key, recs)
if err != nil {
return nil, err
}
best := recs[i]
log.Debugf("GetValue %v %v", key, best)
if best == nil {
log.Errorf("GetValue yielded correct record with nil value.")
return nil, routing.ErrNotFound
}
fixupRec, err := record.MakePutRecord(dht.peerstore.PrivKey(dht.self), key, best, true)
if err != nil {
// probably shouldnt actually 'error' here as we have found a value we like,
// but this call failing probably isnt something we want to ignore
return nil, err
}
for _, v := range vals {
// if someone sent us a different 'less-valid' record, lets correct them
if !bytes.Equal(v.Val, best) {
go func(v routing.RecvdVal) {
if v.From == dht.self {
err := dht.putLocal(key, fixupRec)
if err != nil {
log.Error("Error correcting local dht entry:", err)
}
return
}
ctx, cancel := context.WithTimeout(dht.Context(), time.Second*30)
defer cancel()
err := dht.putValueToPeer(ctx, v.From, key, fixupRec)
if err != nil {
log.Error("Error correcting DHT entry: ", err)
}
}(v)
}
}
return best, nil
}
示例5: TestValueGetSet
func TestValueGetSet(t *testing.T) {
// t.Skip("skipping test to debug another")
ctx := context.Background()
dhtA := setupDHT(ctx, t)
dhtB := setupDHT(ctx, t)
defer dhtA.Close()
defer dhtB.Close()
defer dhtA.host.Close()
defer dhtB.host.Close()
vf := &record.ValidChecker{
Func: func(key.Key, []byte) error {
return nil
},
Sign: false,
}
nulsel := func(_ key.Key, bs [][]byte) (int, error) {
return 0, nil
}
dhtA.Validator["v"] = vf
dhtB.Validator["v"] = vf
dhtA.Selector["v"] = nulsel
dhtB.Selector["v"] = nulsel
connect(t, ctx, dhtA, dhtB)
ctxT, _ := context.WithTimeout(ctx, time.Second)
dhtA.PutValue(ctxT, "/v/hello", []byte("world"))
ctxT, _ = context.WithTimeout(ctx, time.Second*2)
val, err := dhtA.GetValue(ctxT, "/v/hello")
if err != nil {
t.Fatal(err)
}
if string(val) != "world" {
t.Fatalf("Expected 'world' got '%s'", string(val))
}
ctxT, _ = context.WithTimeout(ctx, time.Second*2)
val, err = dhtB.GetValue(ctxT, "/v/hello")
if err != nil {
t.Fatal(err)
}
if string(val) != "world" {
t.Fatalf("Expected 'world' got '%s'", string(val))
}
}
示例6: GetDiagnostic
// GetDiagnostic runs a diagnostics request across the entire network
func (d *Diagnostics) GetDiagnostic(ctx context.Context, timeout time.Duration) ([]*DiagInfo, error) {
log.Debug("getting diagnostic")
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
diagID := newID()
d.diagLock.Lock()
d.diagMap[diagID] = time.Now()
d.diagLock.Unlock()
log.Debug("begin diagnostic")
peers := d.getPeers()
log.Debugf("Sending diagnostic request to %d peers.", len(peers))
pmes := newMessage(diagID)
pmes.SetTimeoutDuration(timeout - HopTimeoutDecrement) // decrease timeout per hop
dpeers, err := d.getDiagnosticFromPeers(ctx, d.getPeers(), pmes)
if err != nil {
return nil, fmt.Errorf("diagnostic from peers err: %s", err)
}
di := d.getDiagInfo()
out := []*DiagInfo{di}
for dpi := range dpeers {
out = append(out, dpi)
}
return out, nil
}
示例7: ResolveLinks
// ResolveLinks iteratively resolves names by walking the link hierarchy.
// Every node is fetched from the DAGService, resolving the next name.
// Returns the list of nodes forming the path, starting with ndd. This list is
// guaranteed never to be empty.
//
// ResolveLinks(nd, []string{"foo", "bar", "baz"})
// would retrieve "baz" in ("bar" in ("foo" in nd.Links).Links).Links
func (s *Resolver) ResolveLinks(ctx context.Context, ndd *merkledag.Node, names []string) ([]*merkledag.Node, error) {
result := make([]*merkledag.Node, 0, len(names)+1)
result = append(result, ndd)
nd := ndd // dup arg workaround
// for each of the path components
for _, name := range names {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, time.Minute)
defer cancel()
nextnode, err := nd.GetLinkedNode(ctx, s.DAG, name)
if err == merkledag.ErrLinkNotFound {
n, _ := nd.Multihash()
return result, ErrNoLink{Name: name, Node: n}
} else if err != nil {
return append(result, nextnode), err
}
nd = nextnode
result = append(result, nextnode)
}
return result, nil
}
示例8: TestHasIsBloomCached
func TestHasIsBloomCached(t *testing.T) {
cd := &callbackDatastore{f: func() {}, ds: ds.NewMapDatastore()}
bs := NewBlockstore(syncds.MutexWrap(cd))
for i := 0; i < 1000; i++ {
bs.Put(blocks.NewBlock([]byte(fmt.Sprintf("data: %d", i))))
}
ctx, _ := context.WithTimeout(context.Background(), 1*time.Second)
cachedbs, err := testBloomCached(bs, ctx)
if err != nil {
t.Fatal(err)
}
select {
case <-cachedbs.rebuildChan:
case <-ctx.Done():
t.Fatalf("Timeout wating for rebuild: %d", cachedbs.bloom.ElementsAdded())
}
cacheFails := 0
cd.SetFunc(func() {
cacheFails++
})
for i := 0; i < 1000; i++ {
cachedbs.Has(blocks.NewBlock([]byte(fmt.Sprintf("data: %d", i+2000))).Key())
}
if float64(cacheFails)/float64(1000) > float64(0.05) {
t.Fatal("Bloom filter has cache miss rate of more than 5%")
}
}
示例9: TestDeadlineFractionCancel
func TestDeadlineFractionCancel(t *testing.T) {
ctx1, cancel1 := context.WithTimeout(context.Background(), 10*time.Millisecond)
ctx2, cancel2 := WithDeadlineFraction(ctx1, 0.5)
select {
case <-ctx1.Done():
t.Fatal("ctx1 ended too early")
case <-ctx2.Done():
t.Fatal("ctx2 ended too early")
default:
}
cancel2()
select {
case <-ctx1.Done():
t.Fatal("ctx1 should NOT be cancelled")
case <-ctx2.Done():
default:
t.Fatal("ctx2 should be cancelled")
}
cancel1()
select {
case <-ctx1.Done():
case <-ctx2.Done():
default:
t.Fatal("ctx1 should be cancelled")
}
}
示例10: TestBasicBitswap
func TestBasicBitswap(t *testing.T) {
net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay))
sg := NewTestSessionGenerator(net)
defer sg.Close()
bg := blocksutil.NewBlockGenerator()
t.Log("Test a one node trying to get one block from another")
instances := sg.Instances(2)
blocks := bg.Blocks(1)
err := instances[0].Exchange.HasBlock(blocks[0])
if err != nil {
t.Fatal(err)
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
defer cancel()
blk, err := instances[1].Exchange.GetBlock(ctx, blocks[0].Key())
if err != nil {
t.Fatal(err)
}
t.Log(blk)
for _, inst := range instances {
err := inst.Exchange.Close()
if err != nil {
t.Fatal(err)
}
}
}
示例11: TestGetBlocksSequential
func TestGetBlocksSequential(t *testing.T) {
var servs = Mocks(4)
for _, s := range servs {
defer s.Close()
}
bg := blocksutil.NewBlockGenerator()
blks := bg.Blocks(50)
var keys []key.Key
for _, blk := range blks {
keys = append(keys, blk.Key())
servs[0].AddBlock(blk)
}
t.Log("one instance at a time, get blocks concurrently")
for i := 1; i < len(servs); i++ {
ctx, cancel := context.WithTimeout(context.Background(), time.Second*50)
defer cancel()
out := servs[i].GetBlocks(ctx, keys)
gotten := make(map[key.Key]blocks.Block)
for blk := range out {
if _, ok := gotten[blk.Key()]; ok {
t.Fatal("Got duplicate block!")
}
gotten[blk.Key()] = blk
}
if len(gotten) != len(blks) {
t.Fatalf("Didnt get enough blocks back: %d/%d", len(gotten), len(blks))
}
}
}
示例12: TestGetBlockFromPeerAfterPeerAnnounces
func TestGetBlockFromPeerAfterPeerAnnounces(t *testing.T) {
net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay))
block := blocks.NewBlock([]byte("block"))
g := NewTestSessionGenerator(net)
defer g.Close()
peers := g.Instances(2)
hasBlock := peers[0]
defer hasBlock.Exchange.Close()
if err := hasBlock.Exchange.HasBlock(block); err != nil {
t.Fatal(err)
}
wantsBlock := peers[1]
defer wantsBlock.Exchange.Close()
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
received, err := wantsBlock.Exchange.GetBlock(ctx, block.Key())
if err != nil {
t.Log(err)
t.Fatal("Expected to succeed")
}
if !bytes.Equal(block.Data, received.Data) {
t.Fatal("Data doesn't match")
}
}
示例13: TestFindPeer
func TestFindPeer(t *testing.T) {
// t.Skip("skipping test to debug another")
if testing.Short() {
t.SkipNow()
}
ctx := context.Background()
_, peers, dhts := setupDHTS(ctx, 4, t)
defer func() {
for i := 0; i < 4; i++ {
dhts[i].Close()
dhts[i].host.Close()
}
}()
connect(t, ctx, dhts[0], dhts[1])
connect(t, ctx, dhts[1], dhts[2])
connect(t, ctx, dhts[1], dhts[3])
ctxT, _ := context.WithTimeout(ctx, time.Second)
p, err := dhts[0].FindPeer(ctxT, peers[2])
if err != nil {
t.Fatal(err)
}
if p.ID == "" {
t.Fatal("Failed to find peer.")
}
if p.ID != peers[2] {
t.Fatal("Didnt find expected peer.")
}
}
示例14: connectToProviders
func (bs *Bitswap) connectToProviders(ctx context.Context, entries []wantlist.Entry) {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
// Get providers for all entries in wantlist (could take a while)
wg := sync.WaitGroup{}
for _, e := range entries {
wg.Add(1)
go func(k key.Key) {
defer wg.Done()
child, cancel := context.WithTimeout(ctx, providerRequestTimeout)
defer cancel()
providers := bs.network.FindProvidersAsync(child, k, maxProvidersPerRequest)
for prov := range providers {
go func(p peer.ID) {
bs.network.ConnectTo(ctx, p)
}(prov)
}
}(e.Key)
}
wg.Wait() // make sure all our children do finish.
}
示例15: providerConnector
// connects to providers for the given keys
func (bs *Bitswap) providerConnector(parent context.Context) {
defer log.Info("bitswap client worker shutting down...")
for {
log.Event(parent, "Bitswap.ProviderConnector.Loop")
select {
case req := <-bs.findKeys:
keys := req.keys
if len(keys) == 0 {
log.Warning("Received batch request for zero blocks")
continue
}
log.Event(parent, "Bitswap.ProviderConnector.Work", logging.LoggableMap{"Keys": keys})
// NB: Optimization. Assumes that providers of key[0] are likely to
// be able to provide for all keys. This currently holds true in most
// every situation. Later, this assumption may not hold as true.
child, cancel := context.WithTimeout(req.ctx, providerRequestTimeout)
providers := bs.network.FindProvidersAsync(child, keys[0], maxProvidersPerRequest)
for p := range providers {
go bs.network.ConnectTo(req.ctx, p)
}
cancel()
case <-parent.Done():
return
}
}
}