本文整理汇总了Golang中github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/goprocess/context.WithContextAndTeardown函数的典型用法代码示例。如果您正苦于以下问题:Golang WithContextAndTeardown函数的具体用法?Golang WithContextAndTeardown怎么用?Golang WithContextAndTeardown使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了WithContextAndTeardown函数的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: NewNode
func NewNode(ctx context.Context, cfg *BuildCfg) (*IpfsNode, error) {
if cfg == nil {
cfg = new(BuildCfg)
}
err := cfg.fillDefaults()
if err != nil {
return nil, err
}
n := &IpfsNode{
mode: offlineMode,
Repo: cfg.Repo,
ctx: ctx,
Peerstore: peer.NewPeerstore(),
}
if cfg.Online {
n.mode = onlineMode
}
// TODO: this is a weird circular-ish dependency, rework it
n.proc = goprocessctx.WithContextAndTeardown(ctx, n.teardown)
if err := setupNode(ctx, n, cfg); err != nil {
n.Close()
return nil, err
}
return n, nil
}
示例2: NewSwarm
// NewSwarm constructs a Swarm, with a Chan.
func NewSwarm(ctx context.Context, listenAddrs []ma.Multiaddr,
local peer.ID, peers peer.Peerstore, bwc metrics.Reporter) (*Swarm, error) {
listenAddrs, err := filterAddrs(listenAddrs)
if err != nil {
return nil, err
}
s := &Swarm{
swarm: ps.NewSwarm(PSTransport),
local: local,
peers: peers,
ctx: ctx,
dialT: DialTimeout,
notifs: make(map[inet.Notifiee]ps.Notifiee),
bwc: bwc,
Filters: filter.NewFilters(),
}
// configure Swarm
s.proc = goprocessctx.WithContextAndTeardown(ctx, s.teardown)
s.SetConnHandler(nil) // make sure to setup our own conn handler.
// setup swarm metrics
prom.MustRegisterOrGet(peersTotal)
s.Notify((*metricsNotifiee)(s))
return s, s.listen(listenAddrs)
}
示例3: newPeernet
// newPeernet constructs a new peernet
func newPeernet(ctx context.Context, m *mocknet, k ic.PrivKey,
a ma.Multiaddr) (*peernet, error) {
p, err := peer.IDFromPublicKey(k.GetPublic())
if err != nil {
return nil, err
}
// create our own entirely, so that peers knowledge doesn't get shared
ps := peer.NewPeerstore()
ps.AddAddr(p, a, peer.PermanentAddrTTL)
ps.AddPrivKey(p, k)
ps.AddPubKey(p, k.GetPublic())
n := &peernet{
mocknet: m,
peer: p,
ps: ps,
connsByPeer: map[peer.ID]map[*conn]struct{}{},
connsByLink: map[*link]map[*conn]struct{}{},
notifs: make(map[inet.Notifiee]struct{}),
}
n.proc = goprocessctx.WithContextAndTeardown(ctx, n.teardown)
return n, nil
}
示例4: standardWithRouting
// TODO refactor so maybeRouter isn't special-cased in this way
func standardWithRouting(r repo.Repo, online bool, routingOption RoutingOption, hostOption HostOption) ConfigOption {
return func(ctx context.Context) (n *IpfsNode, err error) {
// FIXME perform node construction in the main constructor so it isn't
// necessary to perform this teardown in this scope.
success := false
defer func() {
if !success && n != nil {
n.teardown()
}
}()
// TODO move as much of node initialization as possible into
// NewIPFSNode. The larger these config options are, the harder it is
// to test all node construction code paths.
if r == nil {
return nil, fmt.Errorf("repo required")
}
n = &IpfsNode{
mode: func() mode {
if online {
return onlineMode
}
return offlineMode
}(),
Repo: r,
}
n.ctx = ctx
n.proc = goprocessctx.WithContextAndTeardown(ctx, n.teardown)
// setup Peerstore
n.Peerstore = peer.NewPeerstore()
// setup local peer ID (private key is loaded in online setup)
if err := n.loadID(); err != nil {
return nil, err
}
n.Blockstore, err = bstore.WriteCached(bstore.NewBlockstore(n.Repo.Datastore()), kSizeBlockstoreWriteCache)
if err != nil {
return nil, err
}
if online {
do := setupDiscoveryOption(n.Repo.Config().Discovery)
if err := n.startOnlineServices(ctx, routingOption, hostOption, do); err != nil {
return nil, err
}
} else {
n.Exchange = offline.Exchange(n.Blockstore)
}
success = true
return n, nil
}
}
示例5: WrapTransportListener
func WrapTransportListener(ctx context.Context, ml transport.Listener, local peer.ID, sk ic.PrivKey) (Listener, error) {
l := &listener{
Listener: ml,
local: local,
privk: sk,
}
l.proc = goprocessctx.WithContextAndTeardown(ctx, l.teardown)
log.Debugf("Conn Listener on %s", l.Multiaddr())
log.Event(ctx, "swarmListen", l)
return l, nil
}
示例6: NewIPFSNode
func NewIPFSNode(ctx context.Context, option ConfigOption) (*IpfsNode, error) {
node, err := option(ctx)
if err != nil {
return nil, err
}
if node.ctx == nil {
node.ctx = ctx
}
if node.proc == nil {
node.proc = goprocessctx.WithContextAndTeardown(node.ctx, node.teardown)
}
success := false // flip to true after all sub-system inits succeed
defer func() {
if !success {
node.proc.Close()
}
}()
// Need to make sure it's perfectly clear 1) which variables are expected
// to be initialized at this point, and 2) which variables will be
// initialized after this point.
node.Blocks, err = bserv.New(node.Blockstore, node.Exchange)
if err != nil {
return nil, err
}
if node.Peerstore == nil {
node.Peerstore = peer.NewPeerstore()
}
node.DAG = merkledag.NewDAGService(node.Blocks)
node.Pinning, err = pin.LoadPinner(node.Repo.Datastore(), node.DAG)
if err != nil {
node.Pinning = pin.NewPinner(node.Repo.Datastore(), node.DAG)
}
node.Resolver = &path.Resolver{DAG: node.DAG}
// Setup the mutable ipns filesystem structure
if node.OnlineMode() {
fs, err := ipnsfs.NewFilesystem(ctx, node.DAG, node.Namesys, node.Pinning, node.PrivateKey)
if err != nil && err != kb.ErrLookupFailure {
return nil, err
}
node.IpnsFs = fs
}
success = true
return node, nil
}
示例7: newPeernet
// newPeernet constructs a new peernet
func newPeernet(ctx context.Context, m *mocknet, p peer.ID, ps peer.Peerstore) (*peernet, error) {
n := &peernet{
mocknet: m,
peer: p,
ps: ps,
connsByPeer: map[peer.ID]map[*conn]struct{}{},
connsByLink: map[*link]map[*conn]struct{}{},
notifs: make(map[inet.Notifiee]struct{}),
}
n.proc = goprocessctx.WithContextAndTeardown(ctx, n.teardown)
return n, nil
}
示例8: Listen
// Listen listens on the particular multiaddr, with given peer and peerstore.
func Listen(ctx context.Context, addr ma.Multiaddr, local peer.ID, sk ic.PrivKey) (Listener, error) {
ml, err := manetListen(addr)
if err != nil {
return nil, err
}
l := &listener{
Listener: ml,
local: local,
privk: sk,
}
l.proc = goprocessctx.WithContextAndTeardown(ctx, l.teardown)
log.Debugf("Conn Listener on %s", l.Multiaddr())
log.Event(ctx, "swarmListen", l)
return l, nil
}
示例9: NewSwarm
// NewSwarm constructs a Swarm, with a Chan.
func NewSwarm(ctx context.Context, listenAddrs []ma.Multiaddr,
local peer.ID, peers peer.Peerstore, bwc metrics.Reporter) (*Swarm, error) {
listenAddrs, err := filterAddrs(listenAddrs)
if err != nil {
return nil, err
}
wrap := func(c transport.Conn) transport.Conn {
return mconn.WrapConn(bwc, c)
}
s := &Swarm{
swarm: ps.NewSwarm(PSTransport),
local: local,
peers: peers,
ctx: ctx,
dialT: DialTimeout,
notifs: make(map[inet.Notifiee]ps.Notifiee),
transports: []transport.Transport{
transport.NewTCPTransport(),
transport.NewUtpTransport(),
},
bwc: bwc,
fdRateLimit: make(chan struct{}, concurrentFdDials),
Filters: filter.NewFilters(),
dialer: conn.NewDialer(local, peers.PrivKey(local), wrap),
}
// configure Swarm
s.proc = goprocessctx.WithContextAndTeardown(ctx, s.teardown)
s.SetConnHandler(nil) // make sure to setup our own conn handler.
// setup swarm metrics
prom.MustRegisterOrGet(peersTotal)
s.Notify((*metricsNotifiee)(s))
err = s.setupInterfaces(listenAddrs)
if err != nil {
return nil, err
}
return s, nil
}