本文整理匯總了Golang中github.com/ipfs/go-ipfs/core/mock.MockHostOption函數的典型用法代碼示例。如果您正苦於以下問題:Golang MockHostOption函數的具體用法?Golang MockHostOption怎麽用?Golang MockHostOption使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了MockHostOption函數的4個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: InitializeSupernodeNetwork
func InitializeSupernodeNetwork(
ctx context.Context,
numServers, numClients int,
conf testutil.LatencyConfig) ([]*core.IpfsNode, []*core.IpfsNode, error) {
// create network
mn := mocknet.New(ctx)
mn.SetLinkDefaults(mocknet.LinkOptions{
Latency: conf.NetworkLatency,
Bandwidth: math.MaxInt32,
})
routingDatastore := ds2.CloserWrap(syncds.MutexWrap(datastore.NewMapDatastore()))
var servers []*core.IpfsNode
for i := 0; i < numServers; i++ {
bootstrap, err := core.NewNode(ctx, &core.BuildCfg{
Online: true,
Host: mock.MockHostOption(mn),
Routing: corerouting.SupernodeServer(routingDatastore),
})
if err != nil {
return nil, nil, err
}
servers = append(servers, bootstrap)
}
var bootstrapInfos []pstore.PeerInfo
for _, n := range servers {
info := n.Peerstore.PeerInfo(n.PeerHost.ID())
bootstrapInfos = append(bootstrapInfos, info)
}
var clients []*core.IpfsNode
for i := 0; i < numClients; i++ {
n, err := core.NewNode(ctx, &core.BuildCfg{
Online: true,
Host: mock.MockHostOption(mn),
Routing: corerouting.SupernodeClient(bootstrapInfos...),
})
if err != nil {
return nil, nil, err
}
clients = append(clients, n)
}
mn.LinkAll()
bcfg := core.BootstrapConfigWithPeers(bootstrapInfos)
for _, n := range clients {
if err := n.Bootstrap(bcfg); err != nil {
return nil, nil, err
}
}
return servers, clients, nil
}
示例2: RunThreeLeggedCat
func RunThreeLeggedCat(data []byte, conf testutil.LatencyConfig) error {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
const numPeers = 3
// create network
mn := mocknet.New(ctx)
mn.SetLinkDefaults(mocknet.LinkOptions{
Latency: conf.NetworkLatency,
// TODO add to conf. This is tricky because we want 0 values to be functional.
Bandwidth: math.MaxInt32,
})
bootstrap, err := core.NewNode(ctx, &core.BuildCfg{
Online: true,
Host: mock.MockHostOption(mn),
})
if err != nil {
return err
}
defer bootstrap.Close()
adder, err := core.NewNode(ctx, &core.BuildCfg{
Online: true,
Host: mock.MockHostOption(mn),
})
if err != nil {
return err
}
defer adder.Close()
catter, err := core.NewNode(ctx, &core.BuildCfg{
Online: true,
Host: mock.MockHostOption(mn),
})
if err != nil {
return err
}
defer catter.Close()
mn.LinkAll()
bis := bootstrap.Peerstore.PeerInfo(bootstrap.PeerHost.ID())
bcfg := core.BootstrapConfigWithPeers([]peer.PeerInfo{bis})
if err := adder.Bootstrap(bcfg); err != nil {
return err
}
if err := catter.Bootstrap(bcfg); err != nil {
return err
}
added, err := coreunix.Add(adder, bytes.NewReader(data))
if err != nil {
return err
}
readerCatted, err := coreunix.Cat(ctx, catter, added)
if err != nil {
return err
}
// verify
bufout := new(bytes.Buffer)
io.Copy(bufout, readerCatted)
if 0 != bytes.Compare(bufout.Bytes(), data) {
return errors.New("catted data does not match added data")
}
return nil
}
示例3: benchCat
func benchCat(b *testing.B, data []byte, conf testutil.LatencyConfig) error {
b.StopTimer()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// create network
mn := mocknet.New(ctx)
mn.SetLinkDefaults(mocknet.LinkOptions{
Latency: conf.NetworkLatency,
// TODO add to conf. This is tricky because we want 0 values to be functional.
Bandwidth: math.MaxInt32,
})
adder, err := core.NewNode(ctx, &core.BuildCfg{
Online: true,
Host: mock.MockHostOption(mn),
})
if err != nil {
return err
}
defer adder.Close()
catter, err := core.NewNode(ctx, &core.BuildCfg{
Online: true,
Host: mock.MockHostOption(mn),
})
if err != nil {
return err
}
defer catter.Close()
err = mn.LinkAll()
if err != nil {
return err
}
bs1 := []peer.PeerInfo{adder.Peerstore.PeerInfo(adder.Identity)}
bs2 := []peer.PeerInfo{catter.Peerstore.PeerInfo(catter.Identity)}
if err := catter.Bootstrap(core.BootstrapConfigWithPeers(bs1)); err != nil {
return err
}
if err := adder.Bootstrap(core.BootstrapConfigWithPeers(bs2)); err != nil {
return err
}
added, err := coreunix.Add(adder, bytes.NewReader(data))
if err != nil {
return err
}
b.StartTimer()
readerCatted, err := coreunix.Cat(ctx, catter, added)
if err != nil {
return err
}
// verify
bufout := new(bytes.Buffer)
io.Copy(bufout, readerCatted)
if 0 != bytes.Compare(bufout.Bytes(), data) {
return errors.New("catted data does not match added data")
}
return nil
}
示例4: TestRepublish
func TestRepublish(t *testing.T) {
// set cache life to zero for testing low-period repubs
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// create network
mn := mocknet.New(ctx)
var nodes []*core.IpfsNode
for i := 0; i < 10; i++ {
nd, err := core.NewNode(ctx, &core.BuildCfg{
Online: true,
Host: mock.MockHostOption(mn),
})
if err != nil {
t.Fatal(err)
}
nd.Namesys = namesys.NewNameSystem(nd.Routing, nd.Repo.Datastore(), 0)
nodes = append(nodes, nd)
}
mn.LinkAll()
bsinf := core.BootstrapConfigWithPeers(
[]pstore.PeerInfo{
nodes[0].Peerstore.PeerInfo(nodes[0].Identity),
},
)
for _, n := range nodes[1:] {
if err := n.Bootstrap(bsinf); err != nil {
t.Fatal(err)
}
}
// have one node publish a record that is valid for 1 second
publisher := nodes[3]
p := path.FromString("/ipfs/QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn") // does not need to be valid
rp := namesys.NewRoutingPublisher(publisher.Routing, publisher.Repo.Datastore())
err := rp.PublishWithEOL(ctx, publisher.PrivateKey, p, time.Now().Add(time.Second))
if err != nil {
t.Fatal(err)
}
name := "/ipns/" + publisher.Identity.Pretty()
if err := verifyResolution(nodes, name, p); err != nil {
t.Fatal(err)
}
// Now wait a second, the records will be invalid and we should fail to resolve
time.Sleep(time.Second)
if err := verifyResolutionFails(nodes, name); err != nil {
t.Fatal(err)
}
// The republishers that are contained within the nodes have their timeout set
// to 12 hours. Instead of trying to tweak those, we're just going to pretend
// they dont exist and make our own.
repub := NewRepublisher(publisher.Routing, publisher.Repo.Datastore(), publisher.Peerstore)
repub.Interval = time.Second
repub.RecordLifetime = time.Second * 5
repub.AddName(publisher.Identity)
proc := goprocess.Go(repub.Run)
defer proc.Close()
// now wait a couple seconds for it to fire
time.Sleep(time.Second * 2)
// we should be able to resolve them now
if err := verifyResolution(nodes, name, p); err != nil {
t.Fatal(err)
}
}