本文整理汇总了Golang中github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context.WithTimeout函数的典型用法代码示例。如果您正苦于以下问题:Golang WithTimeout函数的具体用法?Golang WithTimeout怎么用?Golang WithTimeout使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了WithTimeout函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: TestPinRecursiveFail
func TestPinRecursiveFail(t *testing.T) {
ctx := context.Background()
dstore := dssync.MutexWrap(ds.NewMapDatastore())
bstore := blockstore.NewBlockstore(dstore)
bserv := bs.New(bstore, offline.Exchange(bstore))
dserv := mdag.NewDAGService(bserv)
p := NewPinner(dstore, dserv)
a, _ := randNode()
b, _ := randNode()
err := a.AddNodeLinkClean("child", b)
if err != nil {
t.Fatal(err)
}
// Note: this isnt a time based test, we expect the pin to fail
mctx, _ := context.WithTimeout(ctx, time.Millisecond)
err = p.Pin(mctx, a, true)
if err == nil {
t.Fatal("should have failed to pin here")
}
_, err = dserv.Add(b)
if err != nil {
t.Fatal(err)
}
// this one is time based... but shouldnt cause any issues
mctx, _ = context.WithTimeout(ctx, time.Second)
err = p.Pin(mctx, a, true)
if err != nil {
t.Fatal(err)
}
}
示例2: TestPing
func TestPing(t *testing.T) {
// t.Skip("skipping test to debug another")
ctx := context.Background()
dhtA := setupDHT(ctx, t)
dhtB := setupDHT(ctx, t)
peerA := dhtA.self
peerB := dhtB.self
defer dhtA.Close()
defer dhtB.Close()
defer dhtA.host.Close()
defer dhtB.host.Close()
connect(t, ctx, dhtA, dhtB)
//Test that we can ping the node
ctxT, _ := context.WithTimeout(ctx, 100*time.Millisecond)
if _, err := dhtA.Ping(ctxT, peerB); err != nil {
t.Fatal(err)
}
ctxT, _ = context.WithTimeout(ctx, 100*time.Millisecond)
if _, err := dhtB.Ping(ctxT, peerA); err != nil {
t.Fatal(err)
}
}
示例3: doWork
func (mq *msgQueue) doWork(ctx context.Context) {
// allow ten minutes for connections
// this includes looking them up in the dht
// dialing them, and handshaking
conctx, cancel := context.WithTimeout(ctx, time.Minute*10)
defer cancel()
err := mq.network.ConnectTo(conctx, mq.p)
if err != nil {
log.Infof("cant connect to peer %s: %s", mq.p, err)
// TODO: cant connect, what now?
return
}
// grab outgoing message
mq.outlk.Lock()
wlm := mq.out
if wlm == nil || wlm.Empty() {
mq.outlk.Unlock()
return
}
mq.out = nil
mq.outlk.Unlock()
sendctx, cancel := context.WithTimeout(ctx, time.Minute*5)
defer cancel()
// send wantlist updates
err = mq.network.SendMessage(sendctx, mq.p, wlm)
if err != nil {
log.Infof("bitswap send error: %s", err)
// TODO: what do we do if this fails?
return
}
}
示例4: pingPeer
func pingPeer(ctx context.Context, n *core.IpfsNode, pid peer.ID, numPings int) <-chan interface{} {
outChan := make(chan interface{})
go func() {
defer close(outChan)
if len(n.Peerstore.Addrs(pid)) == 0 {
// Make sure we can find the node in question
outChan <- &PingResult{
Text: fmt.Sprintf("Looking up peer %s", pid.Pretty()),
}
ctx, cancel := context.WithTimeout(ctx, kPingTimeout)
defer cancel()
p, err := n.Routing.FindPeer(ctx, pid)
if err != nil {
outChan <- &PingResult{Text: fmt.Sprintf("Peer lookup error: %s", err)}
return
}
n.Peerstore.AddAddrs(p.ID, p.Addrs, peer.TempAddrTTL)
}
outChan <- &PingResult{Text: fmt.Sprintf("PING %s.", pid.Pretty())}
ctx, cancel := context.WithTimeout(ctx, kPingTimeout*time.Duration(numPings))
defer cancel()
pings, err := n.Ping.Ping(ctx, pid)
if err != nil {
log.Debugf("Ping error: %s", err)
outChan <- &PingResult{Text: fmt.Sprintf("Ping error: %s", err)}
return
}
var done bool
var total time.Duration
for i := 0; i < numPings && !done; i++ {
select {
case <-ctx.Done():
done = true
break
case t, ok := <-pings:
if !ok {
done = true
break
}
outChan <- &PingResult{
Success: true,
Time: t,
}
total += t
time.Sleep(time.Second)
}
}
averagems := total.Seconds() * 1000 / float64(numPings)
outChan <- &PingResult{
Text: fmt.Sprintf("Average latency: %.2fms", averagems),
}
}()
return outChan
}
示例5: TestValueGetSet
func TestValueGetSet(t *testing.T) {
// t.Skip("skipping test to debug another")
ctx := context.Background()
dhtA := setupDHT(ctx, t)
dhtB := setupDHT(ctx, t)
defer dhtA.Close()
defer dhtB.Close()
defer dhtA.host.Close()
defer dhtB.host.Close()
vf := &record.ValidChecker{
Func: func(key.Key, []byte) error {
return nil
},
Sign: false,
}
nulsel := func(_ key.Key, bs [][]byte) (int, error) {
return 0, nil
}
dhtA.Validator["v"] = vf
dhtB.Validator["v"] = vf
dhtA.Selector["v"] = nulsel
dhtB.Selector["v"] = nulsel
connect(t, ctx, dhtA, dhtB)
ctxT, _ := context.WithTimeout(ctx, time.Second)
dhtA.PutValue(ctxT, "/v/hello", []byte("world"))
ctxT, _ = context.WithTimeout(ctx, time.Second*2)
val, err := dhtA.GetValue(ctxT, "/v/hello")
if err != nil {
t.Fatal(err)
}
if string(val) != "world" {
t.Fatalf("Expected 'world' got '%s'", string(val))
}
ctxT, _ = context.WithTimeout(ctx, time.Second*2)
val, err = dhtB.GetValue(ctxT, "/v/hello")
if err != nil {
t.Fatal(err)
}
if string(val) != "world" {
t.Fatalf("Expected 'world' got '%s'", string(val))
}
}
示例6: TestSendToWantingPeer
// TODO simplify this test. get to the _essence_!
func TestSendToWantingPeer(t *testing.T) {
if testing.Short() {
t.SkipNow()
}
net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay))
sg := NewTestSessionGenerator(net)
defer sg.Close()
bg := blocksutil.NewBlockGenerator()
prev := rebroadcastDelay.Set(time.Second / 2)
defer func() { rebroadcastDelay.Set(prev) }()
peers := sg.Instances(2)
peerA := peers[0]
peerB := peers[1]
t.Logf("Session %v\n", peerA.Peer)
t.Logf("Session %v\n", peerB.Peer)
timeout := time.Second
waitTime := time.Second * 5
alpha := bg.Next()
// peerA requests and waits for block alpha
ctx, cancel := context.WithTimeout(context.Background(), waitTime)
defer cancel()
alphaPromise, err := peerA.Exchange.GetBlocks(ctx, []key.Key{alpha.Key()})
if err != nil {
t.Fatal(err)
}
// peerB announces to the network that he has block alpha
ctx, cancel = context.WithTimeout(context.Background(), timeout)
defer cancel()
err = peerB.Exchange.HasBlock(ctx, alpha)
if err != nil {
t.Fatal(err)
}
// At some point, peerA should get alpha (or timeout)
blkrecvd, ok := <-alphaPromise
if !ok {
t.Fatal("context timed out and broke promise channel!")
}
if blkrecvd.Key() != alpha.Key() {
t.Fatal("Wrong block!")
}
}
示例7: GetValue
// GetValue searches for the value corresponding to given Key.
func (dht *IpfsDHT) GetValue(ctx context.Context, key key.Key) ([]byte, error) {
ctx, cancel := context.WithTimeout(ctx, time.Minute)
defer cancel()
vals, err := dht.GetValues(ctx, key, 16)
if err != nil {
return nil, err
}
var recs [][]byte
for _, v := range vals {
if v.Val != nil {
recs = append(recs, v.Val)
}
}
i, err := dht.Selector.BestRecord(key, recs)
if err != nil {
return nil, err
}
best := recs[i]
log.Debugf("GetValue %v %v", key, best)
if best == nil {
log.Errorf("GetValue yielded correct record with nil value.")
return nil, routing.ErrNotFound
}
fixupRec, err := record.MakePutRecord(dht.peerstore.PrivKey(dht.self), key, best, true)
if err != nil {
// probably shouldnt actually 'error' here as we have found a value we like,
// but this call failing probably isnt something we want to ignore
return nil, err
}
for _, v := range vals {
// if someone sent us a different 'less-valid' record, lets correct them
if !bytes.Equal(v.Val, best) {
go func(v routing.RecvdVal) {
ctx, cancel := context.WithTimeout(dht.Context(), time.Second*30)
defer cancel()
err := dht.putValueToPeer(ctx, v.From, key, fixupRec)
if err != nil {
log.Error("Error correcting DHT entry: ", err)
}
}(v)
}
}
return best, nil
}
示例8: TestFindPeer
func TestFindPeer(t *testing.T) {
// t.Skip("skipping test to debug another")
if testing.Short() {
t.SkipNow()
}
ctx := context.Background()
_, peers, dhts := setupDHTS(ctx, 4, t)
defer func() {
for i := 0; i < 4; i++ {
dhts[i].Close()
dhts[i].host.Close()
}
}()
connect(t, ctx, dhts[0], dhts[1])
connect(t, ctx, dhts[1], dhts[2])
connect(t, ctx, dhts[1], dhts[3])
ctxT, _ := context.WithTimeout(ctx, time.Second)
p, err := dhts[0].FindPeer(ctxT, peers[2])
if err != nil {
t.Fatal(err)
}
if p.ID == "" {
t.Fatal("Failed to find peer.")
}
if p.ID != peers[2] {
t.Fatal("Didnt find expected peer.")
}
}
示例9: providerConnector
// connects to providers for the given keys
func (bs *Bitswap) providerConnector(parent context.Context) {
defer log.Info("bitswap client worker shutting down...")
for {
log.Event(parent, "Bitswap.ProviderConnector.Loop")
select {
case req := <-bs.findKeys:
keys := req.keys
if len(keys) == 0 {
log.Warning("Received batch request for zero blocks")
continue
}
log.Event(parent, "Bitswap.ProviderConnector.Work", logging.LoggableMap{"Keys": keys})
// NB: Optimization. Assumes that providers of key[0] are likely to
// be able to provide for all keys. This currently holds true in most
// every situation. Later, this assumption may not hold as true.
child, cancel := context.WithTimeout(req.ctx, providerRequestTimeout)
providers := bs.network.FindProvidersAsync(child, keys[0], maxProvidersPerRequest)
for p := range providers {
go bs.network.ConnectTo(req.ctx, p)
}
cancel()
case <-parent.Done():
return
}
}
}
示例10: maybeGC
func (gc *GC) maybeGC(ctx context.Context, offset uint64) error {
storage, err := gc.Repo.GetStorageUsage()
if err != nil {
return err
}
if storage+offset > gc.StorageMax {
err := ErrMaxStorageExceeded
log.Error(err)
return err
}
if storage+offset > gc.StorageGC {
// Do GC here
log.Info("Starting repo GC...")
defer log.EventBegin(ctx, "repoGC").Done()
// 1 minute is sufficient for ~1GB unlink() blocks each of 100kb in SSD
_ctx, cancel := context.WithTimeout(ctx, time.Duration(gc.SlackGB)*time.Minute)
defer cancel()
if err := GarbageCollect(gc.Node, _ctx); err != nil {
return err
}
newStorage, err := gc.Repo.GetStorageUsage()
if err != nil {
return err
}
log.Infof("Repo GC done. Released %s\n", humanize.Bytes(uint64(storage-newStorage)))
return nil
}
return nil
}
示例11: ReceiveMessage
func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg.BitSwapMessage) {
// This call records changes to wantlists, blocks received,
// and number of bytes transfered.
bs.engine.MessageReceived(p, incoming)
// TODO: this is bad, and could be easily abused.
// Should only track *useful* messages in ledger
iblocks := incoming.Blocks()
if len(iblocks) == 0 {
return
}
// quickly send out cancels, reduces chances of duplicate block receives
var keys []key.Key
for _, block := range iblocks {
if _, found := bs.wm.wl.Contains(block.Key()); !found {
log.Info("received un-asked-for block: %s", block)
continue
}
keys = append(keys, block.Key())
}
bs.wm.CancelWants(keys)
wg := sync.WaitGroup{}
for _, block := range iblocks {
wg.Add(1)
go func(b *blocks.Block) {
defer wg.Done()
bs.counterLk.Lock()
bs.blocksRecvd++
has, err := bs.blockstore.Has(b.Key())
if err != nil {
bs.counterLk.Unlock()
log.Infof("blockstore.Has error: %s", err)
return
}
if err == nil && has {
bs.dupBlocksRecvd++
}
brecvd := bs.blocksRecvd
bdup := bs.dupBlocksRecvd
bs.counterLk.Unlock()
if has {
return
}
k := b.Key()
log.Event(ctx, "Bitswap.GetBlockRequest.End", &k)
log.Debugf("got block %s from %s (%d,%d)", b, p, brecvd, bdup)
hasBlockCtx, cancel := context.WithTimeout(ctx, hasBlockTimeout)
if err := bs.HasBlock(hasBlockCtx, b); err != nil {
log.Warningf("ReceiveMessage HasBlock error: %s", err)
}
cancel()
}(block)
}
wg.Wait()
}
示例12: getNode
// getNode returns the node for link. If it return an error,
// stop processing. if it returns a nil node, just skip it.
//
// the error handling is a little complicated.
func (t *traversal) getNode(link *mdag.Link) (*mdag.Node, error) {
getNode := func(l *mdag.Link) (*mdag.Node, error) {
ctx, cancel := context.WithTimeout(context.TODO(), time.Minute)
defer cancel()
next, err := l.GetNode(ctx, t.opts.DAG)
if err != nil {
return nil, err
}
skip, err := t.shouldSkip(next)
if skip {
next = nil
}
return next, err
}
next, err := getNode(link)
if err != nil && t.opts.ErrFunc != nil { // attempt recovery.
err = t.opts.ErrFunc(err)
next = nil // skip regardless
}
return next, err
}
示例13: TestBasicBitswap
func TestBasicBitswap(t *testing.T) {
net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay))
sg := NewTestSessionGenerator(net)
defer sg.Close()
bg := blocksutil.NewBlockGenerator()
t.Log("Test a one node trying to get one block from another")
instances := sg.Instances(2)
blocks := bg.Blocks(1)
err := instances[0].Exchange.HasBlock(context.Background(), blocks[0])
if err != nil {
t.Fatal(err)
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
defer cancel()
blk, err := instances[1].Exchange.GetBlock(ctx, blocks[0].Key())
if err != nil {
t.Fatal(err)
}
t.Log(blk)
for _, inst := range instances {
err := inst.Exchange.Close()
if err != nil {
t.Fatal(err)
}
}
}
示例14: connectToProviders
func (bs *Bitswap) connectToProviders(ctx context.Context, entries []wantlist.Entry) {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
// Get providers for all entries in wantlist (could take a while)
wg := sync.WaitGroup{}
for _, e := range entries {
wg.Add(1)
go func(k key.Key) {
defer wg.Done()
child, cancel := context.WithTimeout(ctx, providerRequestTimeout)
defer cancel()
providers := bs.network.FindProvidersAsync(child, k, maxProvidersPerRequest)
for prov := range providers {
go func(p peer.ID) {
bs.network.ConnectTo(ctx, p)
}(prov)
}
}(e.Key)
}
wg.Wait() // make sure all our children do finish.
}
示例15: InitializeKeyspace
// InitializeKeyspace sets the ipns record for the given key to
// point to an empty directory.
func InitializeKeyspace(n *core.IpfsNode, key ci.PrivKey) error {
emptyDir := &mdag.Node{Data: ft.FolderPBData()}
nodek, err := n.DAG.Add(emptyDir)
if err != nil {
return err
}
ctx, cancel := context.WithTimeout(context.TODO(), time.Minute)
defer cancel()
err = n.Pinning.Pin(ctx, emptyDir, false)
if err != nil {
return err
}
err = n.Pinning.Flush()
if err != nil {
return err
}
pub := nsys.NewRoutingPublisher(n.Routing)
err = pub.Publish(n.Context(), key, path.FromKey(nodek))
if err != nil {
return err
}
return nil
}