本文整理汇总了Golang中gx/ipfs/QmUBogf4nUefBjmYjn6jfsfPJRkmDGSeMhNj4usRKq69f4/go-libp2p/p2p/peer.ID函数的典型用法代码示例。如果您正苦于以下问题:Golang ID函数的具体用法?Golang ID怎么用?Golang ID使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了ID函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: TestProviderManager
func TestProviderManager(t *testing.T) {
ctx := context.Background()
mid := peer.ID("testing")
p := NewProviderManager(ctx, mid)
a := key.Key("test")
p.AddProvider(ctx, a, peer.ID("testingprovider"))
resp := p.GetProviders(ctx, a)
if len(resp) != 1 {
t.Fatal("Could not retrieve provider.")
}
p.proc.Close()
}
示例2: closerPeersSingle
func (dht *IpfsDHT) closerPeersSingle(ctx context.Context, key key.Key, p peer.ID) ([]peer.ID, error) {
pmes, err := dht.findPeerSingle(ctx, p, peer.ID(key))
if err != nil {
return nil, err
}
var out []peer.ID
for _, pbp := range pmes.GetCloserPeers() {
pid := peer.ID(pbp.GetId())
if pid != dht.self { // dont add self
dht.peerstore.AddAddrs(pid, pbp.Addresses(), peer.TempAddrTTL)
out = append(out, pid)
}
}
return out, nil
}
示例3: handleFindPeer
func (dht *IpfsDHT) handleFindPeer(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) {
defer log.EventBegin(ctx, "handleFindPeer", p).Done()
resp := pb.NewMessage(pmes.GetType(), "", pmes.GetClusterLevel())
var closest []peer.ID
// if looking for self... special case where we send it on CloserPeers.
if peer.ID(pmes.GetKey()) == dht.self {
closest = []peer.ID{dht.self}
} else {
closest = dht.betterPeersToQuery(pmes, p, CloserPeerCount)
}
if closest == nil {
log.Infof("%s handleFindPeer %s: could not find anything.", dht.self, p)
return resp, nil
}
var withAddresses []peer.PeerInfo
closestinfos := peer.PeerInfos(dht.peerstore, closest)
for _, pi := range closestinfos {
if len(pi.Addrs) > 0 {
withAddresses = append(withAddresses, pi)
log.Debugf("handleFindPeer: sending back '%s'", pi.ID)
}
}
resp.CloserPeers = pb.PeerInfosToPBPeers(dht.host.Network(), withAddresses)
return resp, nil
}
示例4: RandPeerID
// RandPeerID generates random "valid" peer IDs. it does not NEED to generate
// keys because it is as if we lost the key right away. fine to read randomness
// and hash it. to generate proper keys and corresponding PeerID, use:
// sk, pk, _ := testutil.RandKeyPair()
// id, _ := peer.IDFromPublicKey(pk)
func RandPeerID() (peer.ID, error) {
buf := make([]byte, 16)
if _, err := io.ReadFull(u.NewTimeSeededRand(), buf); err != nil {
return "", err
}
h := u.Hash(buf)
return peer.ID(h), nil
}
示例5: newEngine
func newEngine(ctx context.Context, idStr string) peerAndEngine {
return peerAndEngine{
Peer: peer.ID(idStr),
//Strategy: New(true),
Engine: NewEngine(ctx,
blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore()))),
}
}
示例6: GetGraphJson
func GetGraphJson(dinfo []*DiagInfo) []byte {
out := make(map[string]interface{})
names := make(map[string]int)
var nodes []*node
for _, di := range dinfo {
names[di.ID] = len(nodes)
val := di.BwIn + di.BwOut + 10
// include the routing table key, for proper routing table display
rtk := peer.ID(rtable.ConvertPeerID(peer.ID(di.ID))).Pretty()
nodes = append(nodes, &node{Name: di.ID, Value: val, RtKey: rtk})
}
var links []*link
linkexists := make([][]bool, len(nodes))
for i := range linkexists {
linkexists[i] = make([]bool, len(nodes))
}
for _, di := range dinfo {
myid := names[di.ID]
for _, con := range di.Connections {
thisid := names[con.ID]
if !linkexists[thisid][myid] {
links = append(links, &link{
Source: myid,
Target: thisid,
Value: 3,
})
linkexists[myid][thisid] = true
}
}
}
out["nodes"] = nodes
out["links"] = links
b, err := json.Marshal(out)
if err != nil {
panic(err)
}
return b
}
示例7: storeProvidersToPeerstore
func storeProvidersToPeerstore(ps peer.Peerstore, p peer.ID, providers []*dhtpb.Message_Peer) {
for _, provider := range providers {
providerID := peer.ID(provider.GetId())
if providerID != p {
log.Errorf("provider message came from third-party %s", p)
continue
}
for _, maddr := range provider.Addresses() {
// as a router, we want to store addresses for peers who have provided
ps.AddAddr(p, maddr, peer.AddressTTL)
}
}
}
示例8: GetPublicKey
func GetPublicKey(r IpfsRouting, ctx context.Context, pkhash []byte) (ci.PubKey, error) {
if dht, ok := r.(PubKeyFetcher); ok {
// If we have a DHT as our routing system, use optimized fetcher
return dht.GetPublicKey(ctx, peer.ID(pkhash))
} else {
key := key.Key("/pk/" + string(pkhash))
pkval, err := r.GetValue(ctx, key)
if err != nil {
return nil, err
}
// get PublicKey from node.Data
return ci.UnmarshalPublicKey(pkval)
}
}
示例9: verify
func verify(ps peer.Peerstore, r *dhtpb.Record) error {
v := make(record.Validator)
v["pk"] = record.PublicKeyValidator
p := peer.ID(r.GetAuthor())
pk := ps.PubKey(p)
if pk == nil {
return fmt.Errorf("do not have public key for %s", p)
}
if err := record.CheckRecordSig(r, pk); err != nil {
return err
}
if err := v.VerifyRecord(r); err != nil {
return err
}
return nil
}
示例10: verifyRecordLocally
// verifyRecordLocally attempts to verify a record. if we do not have the public
// key, we fail. we do not search the dht.
func (dht *IpfsDHT) verifyRecordLocally(r *pb.Record) error {
if len(r.Signature) > 0 {
// First, validate the signature
p := peer.ID(r.GetAuthor())
pk := dht.peerstore.PubKey(p)
if pk == nil {
return fmt.Errorf("do not have public key for %s", p)
}
if err := record.CheckRecordSig(r, pk); err != nil {
return err
}
}
return dht.Validator.VerifyRecord(r)
}
示例11: verifyRecordOnline
// verifyRecordOnline verifies a record, searching the DHT for the public key
// if necessary. The reason there is a distinction in the functions is that
// retrieving arbitrary public keys from the DHT as a result of passively
// receiving records (e.g. through a PUT_VALUE or ADD_PROVIDER) can cause a
// massive amplification attack on the dht. Use with care.
func (dht *IpfsDHT) verifyRecordOnline(ctx context.Context, r *pb.Record) error {
if len(r.Signature) > 0 {
// get the public key, search for it if necessary.
p := peer.ID(r.GetAuthor())
pk, err := dht.GetPublicKey(ctx, p)
if err != nil {
return err
}
err = record.CheckRecordSig(r, pk)
if err != nil {
return err
}
}
return dht.Validator.VerifyRecord(r)
}
示例12: loadID
func (n *IpfsNode) loadID() error {
if n.Identity != "" {
return errors.New("identity already loaded")
}
cfg, err := n.Repo.Config()
if err != nil {
return err
}
cid := cfg.Identity.PeerID
if cid == "" {
return errors.New("Identity was not set in config (was ipfs init run?)")
}
if len(cid) == 0 {
return errors.New("No peer ID in config! (was ipfs init run?)")
}
n.Identity = peer.ID(b58.Decode(cid))
return nil
}
示例13: TestTableMultithreaded
// Looks for race conditions in table operations. For a more 'certain'
// test, increase the loop counter from 1000 to a much higher number
// and set GOMAXPROCS above 1
func TestTableMultithreaded(t *testing.T) {
local := peer.ID("localPeer")
m := peer.NewMetrics()
tab := NewRoutingTable(20, ConvertPeerID(local), time.Hour, m)
var peers []peer.ID
for i := 0; i < 500; i++ {
peers = append(peers, tu.RandPeerIDFatal(t))
}
done := make(chan struct{})
go func() {
for i := 0; i < 1000; i++ {
n := rand.Intn(len(peers))
tab.Update(peers[n])
}
done <- struct{}{}
}()
go func() {
for i := 0; i < 1000; i++ {
n := rand.Intn(len(peers))
tab.Update(peers[n])
}
done <- struct{}{}
}()
go func() {
for i := 0; i < 1000; i++ {
n := rand.Intn(len(peers))
tab.Find(peers[n])
}
done <- struct{}{}
}()
<-done
<-done
<-done
}
示例14: handleMessage
func (s *Server) handleMessage(
ctx context.Context, p peer.ID, req *dhtpb.Message) (peer.ID, *dhtpb.Message) {
defer log.EventBegin(ctx, "routingMessageReceived", req, p).Done()
var response = dhtpb.NewMessage(req.GetType(), req.GetKey(), req.GetClusterLevel())
switch req.GetType() {
case dhtpb.Message_GET_VALUE:
rawRecord, err := getRoutingRecord(s.routingBackend, key.Key(req.GetKey()))
if err != nil {
return "", nil
}
response.Record = rawRecord
return p, response
case dhtpb.Message_PUT_VALUE:
// FIXME: verify complains that the peer's ID is not present in the
// peerstore. Mocknet problem?
// if err := verify(s.peerstore, req.GetRecord()); err != nil {
// log.Event(ctx, "validationFailed", req, p)
// return "", nil
// }
putRoutingRecord(s.routingBackend, key.Key(req.GetKey()), req.GetRecord())
return p, req
case dhtpb.Message_FIND_NODE:
p := s.peerstore.PeerInfo(peer.ID(req.GetKey()))
pri := []dhtpb.PeerRoutingInfo{
{
PeerInfo: p,
// Connectedness: TODO
},
}
response.CloserPeers = dhtpb.PeerRoutingInfosToPBPeers(pri)
return p.ID, response
case dhtpb.Message_ADD_PROVIDER:
for _, provider := range req.GetProviderPeers() {
providerID := peer.ID(provider.GetId())
if providerID == p {
store := []*dhtpb.Message_Peer{provider}
storeProvidersToPeerstore(s.peerstore, p, store)
if err := putRoutingProviders(s.routingBackend, key.Key(req.GetKey()), store); err != nil {
return "", nil
}
} else {
log.Event(ctx, "addProviderBadRequest", p, req)
}
}
return "", nil
case dhtpb.Message_GET_PROVIDERS:
providers, err := getRoutingProviders(s.routingBackend, key.Key(req.GetKey()))
if err != nil {
return "", nil
}
response.ProviderPeers = providers
return p, response
case dhtpb.Message_PING:
return p, req
default:
}
return "", nil
}
示例15:
Arguments: []cmds.Argument{
cmds.StringArg("peerid", false, false, "Peer.ID of node to look up.").EnableStdin(),
},
Options: []cmds.Option{
cmds.StringOption("format", "f", "Optional output format."),
},
Run: func(req cmds.Request, res cmds.Response) {
node, err := req.InvocContext().GetNode()
if err != nil {
res.SetError(err, cmds.ErrNormal)
return
}
var id peer.ID
if len(req.Arguments()) > 0 {
id = peer.ID(b58.Decode(req.Arguments()[0]))
if len(id) == 0 {
res.SetError(cmds.ClientError("Invalid peer id"), cmds.ErrClient)
return
}
} else {
id = node.Identity
}
if id == node.Identity {
output, err := printSelf(node)
if err != nil {
res.SetError(err, cmds.ErrNormal)
return
}
res.SetOutput(output)