本文整理汇总了Golang中github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context.WithCancel函数的典型用法代码示例。如果您正苦于以下问题:Golang WithCancel函数的具体用法?Golang WithCancel怎么用?Golang WithCancel使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了WithCancel函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: WithDeadlineFraction
func WithDeadlineFraction(ctx context.Context, fraction float64) (context.Context, context.CancelFunc) {
d, found := ctx.Deadline()
if !found { // no deadline
return context.WithCancel(ctx)
}
left := d.Sub(time.Now())
if left < 0 { // already passed...
return context.WithCancel(ctx)
}
left = time.Duration(float64(left) * fraction)
return context.WithTimeout(ctx, left)
}
示例2: InitializeKeyspace
// InitializeKeyspace sets the ipns record for the given key to
// point to an empty directory.
func InitializeKeyspace(n *core.IpfsNode, key ci.PrivKey) error {
emptyDir := &mdag.Node{Data: ft.FolderPBData()}
nodek, err := n.DAG.Add(emptyDir)
if err != nil {
return err
}
ctx, cancel := context.WithCancel(n.Context())
defer cancel()
err = n.Pinning.Pin(ctx, emptyDir, false)
if err != nil {
return err
}
err = n.Pinning.Flush()
if err != nil {
return err
}
pub := nsys.NewRoutingPublisher(n.Routing, n.Repo.Datastore())
if err := pub.Publish(ctx, key, path.FromKey(nodek)); err != nil {
return err
}
return nil
}
示例3: SetupInterruptHandler
func (i *cmdInvocation) SetupInterruptHandler(ctx context.Context) (io.Closer, context.Context) {
intrh := NewIntrHandler()
ctx, cancelFunc := context.WithCancel(ctx)
handlerFunc := func(count int, ih *IntrHandler) {
switch count {
case 1:
fmt.Println() // Prevent un-terminated ^C character in terminal
ih.wg.Add(1)
go func() {
defer ih.wg.Done()
cancelFunc()
}()
default:
fmt.Println("Received another interrupt before graceful shutdown, terminating...")
os.Exit(-1)
}
}
intrh.Handle(handlerFunc, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM)
return intrh, ctx
}
示例4: republishEntries
func (rp *Republisher) republishEntries(p goprocess.Process) error {
ctx, cancel := context.WithCancel(gpctx.OnClosingContext(p))
defer cancel()
for id, _ := range rp.entries {
log.Debugf("republishing ipns entry for %s", id)
priv := rp.ps.PrivKey(id)
// Look for it locally only
_, ipnskey := namesys.IpnsKeysForID(id)
p, seq, err := rp.getLastVal(ipnskey)
if err != nil {
if err == errNoEntry {
continue
}
return err
}
// update record with same sequence number
eol := time.Now().Add(rp.RecordLifetime)
err = namesys.PutRecordToRouting(ctx, priv, p, seq, eol, rp.r, id)
if err != nil {
return err
}
}
return nil
}
示例5: TestBasic
func TestBasic(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ds, rt := setupRoot(ctx, t)
rootdir := rt.GetValue().(*Directory)
// test making a basic dir
_, err := rootdir.Mkdir("a")
if err != nil {
t.Fatal(err)
}
path := "a/b/c/d/e/f/g"
d := mkdirP(t, rootdir, path)
fi := getRandFile(t, ds, 1000)
// test inserting that file
err = d.AddChild("afile", fi)
if err != nil {
t.Fatal(err)
}
err = assertFileAtPath(ds, rootdir, fi, "a/b/c/d/e/f/g/afile")
if err != nil {
t.Fatal(err)
}
}
示例6: Unpin
func Unpin(n *core.IpfsNode, ctx context.Context, paths []string, recursive bool) ([]key.Key, error) {
var unpinned []key.Key
for _, p := range paths {
p, err := path.ParsePath(p)
if err != nil {
return nil, err
}
k, err := core.ResolveToKey(ctx, n, p)
if err != nil {
return nil, err
}
ctx, cancel := context.WithCancel(ctx)
defer cancel()
err = n.Pinning.Unpin(ctx, k, recursive)
if err != nil {
return nil, err
}
unpinned = append(unpinned, k)
}
err := n.Pinning.Flush()
if err != nil {
return nil, err
}
return unpinned, nil
}
示例7: connectToProviders
func (bs *Bitswap) connectToProviders(ctx context.Context, entries []wantlist.Entry) {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
// Get providers for all entries in wantlist (could take a while)
wg := sync.WaitGroup{}
for _, e := range entries {
wg.Add(1)
go func(k key.Key) {
defer wg.Done()
child, cancel := context.WithTimeout(ctx, providerRequestTimeout)
defer cancel()
providers := bs.network.FindProvidersAsync(child, k, maxProvidersPerRequest)
for prov := range providers {
go func(p peer.ID) {
bs.network.ConnectTo(ctx, p)
}(prov)
}
}(e.Key)
}
wg.Wait() // make sure all our children do finish.
}
示例8: readPrep
func (dm *DagModifier) readPrep() error {
err := dm.Sync()
if err != nil {
return err
}
if dm.read == nil {
ctx, cancel := context.WithCancel(dm.ctx)
dr, err := uio.NewDagReader(ctx, dm.curNode, dm.dagserv)
if err != nil {
return err
}
i, err := dr.Seek(int64(dm.curWrOff), os.SEEK_SET)
if err != nil {
return err
}
if i != int64(dm.curWrOff) {
return ErrSeekFail
}
dm.readCancel = cancel
dm.read = dr
}
return nil
}
示例9: Unpin
func Unpin(n *core.IpfsNode, ctx context.Context, paths []string, recursive bool) ([]key.Key, error) {
dagnodes := make([]*merkledag.Node, 0)
for _, fpath := range paths {
dagnode, err := core.Resolve(ctx, n, path.Path(fpath))
if err != nil {
return nil, err
}
dagnodes = append(dagnodes, dagnode)
}
var unpinned []key.Key
for _, dagnode := range dagnodes {
k, _ := dagnode.Key()
ctx, cancel := context.WithCancel(ctx)
defer cancel()
err := n.Pinning.Unpin(ctx, k, recursive)
if err != nil {
return nil, err
}
unpinned = append(unpinned, k)
}
err := n.Pinning.Flush()
if err != nil {
return nil, err
}
return unpinned, nil
}
示例10: RunSupernodePutRecordGetRecord
func RunSupernodePutRecordGetRecord(conf testutil.LatencyConfig) error {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
servers, clients, err := InitializeSupernodeNetwork(ctx, 2, 2, conf)
if err != nil {
return err
}
for _, n := range append(servers, clients...) {
defer n.Close()
}
putter := clients[0]
getter := clients[1]
k := key.Key("key")
note := []byte("a note from putter")
if err := putter.Routing.PutValue(ctx, k, note); err != nil {
return fmt.Errorf("failed to put value: %s", err)
}
received, err := getter.Routing.GetValue(ctx, k)
if err != nil {
return fmt.Errorf("failed to get value: %s", err)
}
if 0 != bytes.Compare(note, received) {
return errors.New("record doesn't match")
}
return nil
}
示例11: TestDoesNotDeadLockIfContextCancelledBeforePublish
func TestDoesNotDeadLockIfContextCancelledBeforePublish(t *testing.T) {
g := blocksutil.NewBlockGenerator()
ctx, cancel := context.WithCancel(context.Background())
n := New()
defer n.Shutdown()
t.Log("generate a large number of blocks. exceed default buffer")
bs := g.Blocks(1000)
ks := func() []key.Key {
var keys []key.Key
for _, b := range bs {
keys = append(keys, b.Key())
}
return keys
}()
_ = n.Subscribe(ctx, ks...) // ignore received channel
t.Log("cancel context before any blocks published")
cancel()
for _, b := range bs {
n.Publish(b)
}
t.Log("publishing the large number of blocks to the ignored channel must not deadlock")
}
示例12: newKeyRoot
// newKeyRoot creates a new KeyRoot for the given key, and starts up a republisher routine
// for it
func (fs *Filesystem) newKeyRoot(parent context.Context, k ci.PrivKey) (*KeyRoot, error) {
hash, err := k.GetPublic().Hash()
if err != nil {
return nil, err
}
name := "/ipns/" + key.Key(hash).String()
root := new(KeyRoot)
root.key = k
root.fs = fs
root.name = name
ctx, cancel := context.WithCancel(parent)
defer cancel()
pointsTo, err := fs.nsys.Resolve(ctx, name)
if err != nil {
err = namesys.InitializeKeyspace(ctx, fs.dserv, fs.nsys, fs.pins, k)
if err != nil {
return nil, err
}
pointsTo, err = fs.nsys.Resolve(ctx, name)
if err != nil {
return nil, err
}
}
mnode, err := fs.resolver.ResolvePath(ctx, pointsTo)
if err != nil {
log.Errorf("Failed to retrieve value '%s' for ipns entry: %s\n", pointsTo, err)
return nil, err
}
root.node = mnode
root.repub = NewRepublisher(root, time.Millisecond*300, time.Second*3)
go root.repub.Run(parent)
pbn, err := ft.FromBytes(mnode.Data)
if err != nil {
log.Error("IPNS pointer was not unixfs node")
return nil, err
}
switch pbn.GetType() {
case ft.TDirectory:
root.val = NewDirectory(ctx, pointsTo.String(), mnode, root, fs)
case ft.TFile, ft.TMetadata, ft.TRaw:
fi, err := NewFile(pointsTo.String(), mnode, root, fs)
if err != nil {
return nil, err
}
root.val = fi
default:
panic("unrecognized! (NYI)")
}
return root, nil
}
示例13: rebroadcastWorker
func (bs *Bitswap) rebroadcastWorker(parent context.Context) {
ctx, cancel := context.WithCancel(parent)
defer cancel()
broadcastSignal := time.NewTicker(rebroadcastDelay.Get())
defer broadcastSignal.Stop()
tick := time.NewTicker(10 * time.Second)
defer tick.Stop()
for {
log.Event(ctx, "Bitswap.Rebroadcast.idle")
select {
case <-tick.C:
n := bs.wm.wl.Len()
if n > 0 {
log.Debug(n, "keys in bitswap wantlist")
}
case <-broadcastSignal.C: // resend unfulfilled wantlist keys
log.Event(ctx, "Bitswap.Rebroadcast.active")
entries := bs.wm.wl.Entries()
if len(entries) > 0 {
bs.connectToProviders(ctx, entries)
}
case <-parent.Done():
return
}
}
}
示例14: TestConsistentAccounting
func TestConsistentAccounting(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
sender := newEngine(ctx, "Ernie")
receiver := newEngine(ctx, "Bert")
// Send messages from Ernie to Bert
for i := 0; i < 1000; i++ {
m := message.New(false)
content := []string{"this", "is", "message", "i"}
m.AddBlock(blocks.NewBlock([]byte(strings.Join(content, " "))))
sender.Engine.MessageSent(receiver.Peer, m)
receiver.Engine.MessageReceived(sender.Peer, m)
}
// Ensure sender records the change
if sender.Engine.numBytesSentTo(receiver.Peer) == 0 {
t.Fatal("Sent bytes were not recorded")
}
// Ensure sender and receiver have the same values
if sender.Engine.numBytesSentTo(receiver.Peer) != receiver.Engine.numBytesReceivedFrom(sender.Peer) {
t.Fatal("Inconsistent book-keeping. Strategies don't agree")
}
// Ensure sender didn't record receving anything. And that the receiver
// didn't record sending anything
if receiver.Engine.numBytesSentTo(sender.Peer) != 0 || sender.Engine.numBytesReceivedFrom(receiver.Peer) != 0 {
t.Fatal("Bert didn't send bytes to Ernie")
}
}
示例15: DirectAddCat
func DirectAddCat(data []byte, conf testutil.LatencyConfig) error {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
const numPeers = 2
// create network
mn, err := mocknet.FullMeshLinked(ctx, numPeers)
if err != nil {
return err
}
mn.SetLinkDefaults(mocknet.LinkOptions{
Latency: conf.NetworkLatency,
// TODO add to conf. This is tricky because we want 0 values to be functional.
Bandwidth: math.MaxInt32,
})
peers := mn.Peers()
if len(peers) < numPeers {
return errors.New("test initialization error")
}
adder, err := core.NewIPFSNode(ctx, core.ConfigOption(MocknetTestRepo(peers[0], mn.Host(peers[0]), conf, core.DHTOption)))
if err != nil {
return err
}
defer adder.Close()
catter, err := core.NewIPFSNode(ctx, core.ConfigOption(MocknetTestRepo(peers[1], mn.Host(peers[1]), conf, core.DHTOption)))
if err != nil {
return err
}
defer catter.Close()
bs1 := []peer.PeerInfo{adder.Peerstore.PeerInfo(adder.Identity)}
bs2 := []peer.PeerInfo{catter.Peerstore.PeerInfo(catter.Identity)}
if err := catter.Bootstrap(core.BootstrapConfigWithPeers(bs1)); err != nil {
return err
}
if err := adder.Bootstrap(core.BootstrapConfigWithPeers(bs2)); err != nil {
return err
}
added, err := coreunix.Add(adder, bytes.NewReader(data))
if err != nil {
return err
}
readerCatted, err := coreunix.Cat(catter, added)
if err != nil {
return err
}
// verify
bufout := new(bytes.Buffer)
io.Copy(bufout, readerCatted)
if 0 != bytes.Compare(bufout.Bytes(), data) {
return errors.New("catted data does not match added data")
}
return nil
}