本文整理汇总了Golang中github.com/ipfs/go-ipfs/routing/dht/pb.PBPeersToPeerInfos函数的典型用法代码示例。如果您正苦于以下问题:Golang PBPeersToPeerInfos函数的具体用法?Golang PBPeersToPeerInfos怎么用?Golang PBPeersToPeerInfos使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了PBPeersToPeerInfos函数的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: handleAddProvider
func (dht *IpfsDHT) handleAddProvider(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) {
lm := make(lgbl.DeferredMap)
lm["peer"] = func() interface{} { return p.Pretty() }
defer log.EventBegin(ctx, "handleAddProvider", lm).Done()
key := key.Key(pmes.GetKey())
lm["key"] = func() interface{} { return key.Pretty() }
log.Debugf("%s adding %s as a provider for '%s'\n", dht.self, p, key)
// add provider should use the address given in the message
pinfos := pb.PBPeersToPeerInfos(pmes.GetProviderPeers())
for _, pi := range pinfos {
if pi.ID != p {
// we should ignore this provider reccord! not from originator.
// (we chould sign them and check signature later...)
log.Debugf("handleAddProvider received provider %s from %s. Ignore.", pi.ID, p)
continue
}
if len(pi.Addrs) < 1 {
log.Debugf("%s got no valid addresses for provider %s. Ignore.", dht.self, p)
continue
}
log.Infof("received provider %s for %s (addrs: %s)", p, key, pi.Addrs)
if pi.ID != dht.self { // dont add own addrs.
// add the received addresses to our peerstore.
dht.peerstore.AddAddrs(pi.ID, pi.Addrs, peer.ProviderAddrTTL)
}
dht.providers.AddProvider(ctx, key, p)
}
return nil, nil
}
示例2: getValueOrPeers
// getValueOrPeers queries a particular peer p for the value for
// key. It returns either the value or a list of closer peers.
// NOTE: It will update the dht's peerstore with any new addresses
// it finds for the given peer.
func (dht *IpfsDHT) getValueOrPeers(ctx context.Context, p peer.ID,
key key.Key) (*pb.Record, []peer.PeerInfo, error) {
pmes, err := dht.getValueSingle(ctx, p, key)
if err != nil {
return nil, nil, err
}
// Perhaps we were given closer peers
peers := pb.PBPeersToPeerInfos(pmes.GetCloserPeers())
if record := pmes.GetRecord(); record != nil {
// Success! We were given the value
log.Debug("getValueOrPeers: got value")
// make sure record is valid.
err = dht.verifyRecordOnline(ctx, record)
if err != nil {
log.Info("Received invalid record! (discarded)")
// return a sentinal to signify an invalid record was received
err = errInvalidRecord
record = new(pb.Record)
}
return record, peers, err
}
if len(peers) > 0 {
log.Debug("getValueOrPeers: peers")
return nil, peers, nil
}
log.Warning("getValueOrPeers: routing.ErrNotFound")
return nil, nil, routing.ErrNotFound
}
示例3: FindPeer
func (c *Client) FindPeer(ctx context.Context, id peer.ID) (peer.PeerInfo, error) {
defer log.EventBegin(ctx, "findPeer", id).Done()
request := pb.NewMessage(pb.Message_FIND_NODE, string(id), 0)
response, err := c.proxy.SendRequest(ctx, request) // hide remote
if err != nil {
return peer.PeerInfo{}, err
}
for _, p := range pb.PBPeersToPeerInfos(response.GetCloserPeers()) {
if p.ID == id {
return p, nil
}
}
return peer.PeerInfo{}, errors.New("could not find peer")
}
示例4: FindProvidersAsync
func (c *Client) FindProvidersAsync(ctx context.Context, k key.Key, max int) <-chan peer.PeerInfo {
ctx = logging.ContextWithLoggable(ctx, logging.Uuid("findProviders"))
defer log.EventBegin(ctx, "findProviders", &k).Done()
ch := make(chan peer.PeerInfo)
go func() {
defer close(ch)
request := pb.NewMessage(pb.Message_GET_PROVIDERS, string(k), 0)
response, err := c.proxy.SendRequest(ctx, request)
if err != nil {
log.Debug(err)
return
}
for _, p := range pb.PBPeersToPeerInfos(response.GetProviderPeers()) {
select {
case <-ctx.Done():
log.Debug(ctx.Err())
return
case ch <- p:
}
}
}()
return ch
}
示例5: FindPeer
// FindPeer searches for a peer with given ID.
func (dht *IpfsDHT) FindPeer(ctx context.Context, id peer.ID) (peer.PeerInfo, error) {
defer log.EventBegin(ctx, "FindPeer", id).Done()
// Check if were already connected to them
if pi := dht.FindLocal(id); pi.ID != "" {
return pi, nil
}
peers := dht.routingTable.NearestPeers(kb.ConvertPeerID(id), AlphaValue)
if len(peers) == 0 {
return peer.PeerInfo{}, kb.ErrLookupFailure
}
// Sanity...
for _, p := range peers {
if p == id {
log.Debug("Found target peer in list of closest peers...")
return dht.peerstore.PeerInfo(p), nil
}
}
// setup the Query
parent := ctx
query := dht.newQuery(key.Key(id), func(ctx context.Context, p peer.ID) (*dhtQueryResult, error) {
notif.PublishQueryEvent(parent, ¬if.QueryEvent{
Type: notif.SendingQuery,
ID: p,
})
pmes, err := dht.findPeerSingle(ctx, p, id)
if err != nil {
return nil, err
}
closer := pmes.GetCloserPeers()
clpeerInfos := pb.PBPeersToPeerInfos(closer)
// see it we got the peer here
for _, npi := range clpeerInfos {
if npi.ID == id {
return &dhtQueryResult{
peer: npi,
success: true,
}, nil
}
}
notif.PublishQueryEvent(parent, ¬if.QueryEvent{
Type: notif.PeerResponse,
Responses: pointerizePeerInfos(clpeerInfos),
})
return &dhtQueryResult{closerPeers: clpeerInfos}, nil
})
// run it!
result, err := query.Run(ctx, peers)
if err != nil {
return peer.PeerInfo{}, err
}
log.Debugf("FindPeer %v %v", id, result.success)
if result.peer.ID == "" {
return peer.PeerInfo{}, routing.ErrNotFound
}
return result.peer, nil
}
示例6: findProvidersAsyncRoutine
func (dht *IpfsDHT) findProvidersAsyncRoutine(ctx context.Context, key key.Key, count int, peerOut chan peer.PeerInfo) {
defer log.EventBegin(ctx, "findProvidersAsync", &key).Done()
defer close(peerOut)
ps := pset.NewLimited(count)
provs := dht.providers.GetProviders(ctx, key)
for _, p := range provs {
// NOTE: assuming that this list of peers is unique
if ps.TryAdd(p) {
select {
case peerOut <- dht.peerstore.PeerInfo(p):
case <-ctx.Done():
return
}
}
// If we have enough peers locally, dont bother with remote RPC
if ps.Size() >= count {
return
}
}
// setup the Query
parent := ctx
query := dht.newQuery(key, func(ctx context.Context, p peer.ID) (*dhtQueryResult, error) {
notif.PublishQueryEvent(parent, ¬if.QueryEvent{
Type: notif.SendingQuery,
ID: p,
})
pmes, err := dht.findProvidersSingle(ctx, p, key)
if err != nil {
return nil, err
}
log.Debugf("%d provider entries", len(pmes.GetProviderPeers()))
provs := pb.PBPeersToPeerInfos(pmes.GetProviderPeers())
log.Debugf("%d provider entries decoded", len(provs))
// Add unique providers from request, up to 'count'
for _, prov := range provs {
log.Debugf("got provider: %s", prov)
if ps.TryAdd(prov.ID) {
log.Debugf("using provider: %s", prov)
select {
case peerOut <- prov:
case <-ctx.Done():
log.Debug("Context timed out sending more providers")
return nil, ctx.Err()
}
}
if ps.Size() >= count {
log.Debugf("got enough providers (%d/%d)", ps.Size(), count)
return &dhtQueryResult{success: true}, nil
}
}
// Give closer peers back to the query to be queried
closer := pmes.GetCloserPeers()
clpeers := pb.PBPeersToPeerInfos(closer)
log.Debugf("got closer peers: %d %s", len(clpeers), clpeers)
notif.PublishQueryEvent(parent, ¬if.QueryEvent{
Type: notif.PeerResponse,
ID: p,
Responses: pointerizePeerInfos(clpeers),
})
return &dhtQueryResult{closerPeers: clpeers}, nil
})
peers := dht.routingTable.NearestPeers(kb.ConvertKey(key), AlphaValue)
_, err := query.Run(ctx, peers)
if err != nil {
log.Debugf("Query error: %s", err)
notif.PublishQueryEvent(ctx, ¬if.QueryEvent{
Type: notif.QueryError,
Extra: err.Error(),
})
}
}