本文整理汇总了Golang中github.com/ipfs/go-ipfs/exchange/offline.Exchange函数的典型用法代码示例。如果您正苦于以下问题:Golang Exchange函数的具体用法?Golang Exchange怎么用?Golang Exchange使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了Exchange函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: BenchmarkHandle10KBlocks
func BenchmarkHandle10KBlocks(b *testing.B) {
bstore := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore()))
var testdata []*blocks.Block
for i := 0; i < 10000; i++ {
testdata = append(testdata, blocks.NewBlock([]byte(string(i))))
}
b.ResetTimer()
b.SetBytes(10000)
for i := 0; i < b.N; i++ {
b.StopTimer()
w := NewWorker(offline.Exchange(bstore), Config{
NumWorkers: 1,
ClientBufferSize: 0,
WorkerBufferSize: 0,
})
b.StartTimer()
for _, block := range testdata {
if err := w.HasBlock(block); err != nil {
b.Fatal(err)
}
}
b.StopTimer()
w.Close()
b.StartTimer()
}
}
示例2: getMockDagServ
func getMockDagServ(t testing.TB) mdag.DAGService {
dstore := ds.NewMapDatastore()
tsds := sync.MutexWrap(dstore)
bstore := blockstore.NewBlockstore(tsds)
bserv := bs.New(bstore, offline.Exchange(bstore))
return mdag.NewDAGService(bserv)
}
示例3: setupNode
func setupNode(ctx context.Context, n *IpfsNode, cfg *BuildCfg) error {
// setup local peer ID (private key is loaded in online setup)
if err := n.loadID(); err != nil {
return err
}
var err error
n.Blockstore, err = bstore.WriteCached(bstore.NewBlockstore(n.Repo.Datastore()), kSizeBlockstoreWriteCache)
if err != nil {
return err
}
if cfg.Online {
do := setupDiscoveryOption(n.Repo.Config().Discovery)
if err := n.startOnlineServices(ctx, cfg.Routing, cfg.Host, do); err != nil {
return err
}
} else {
n.Exchange = offline.Exchange(n.Blockstore)
}
n.Blocks = bserv.New(n.Blockstore, n.Exchange)
n.DAG = dag.NewDAGService(n.Blocks)
n.Pinning, err = pin.LoadPinner(n.Repo.Datastore(), n.DAG)
if err != nil {
// TODO: we should move towards only running 'NewPinner' explicity on
// node init instead of implicitly here as a result of the pinner keys
// not being found in the datastore.
// this is kinda sketchy and could cause data loss
n.Pinning = pin.NewPinner(n.Repo.Datastore(), n.DAG)
}
n.Resolver = &path.Resolver{DAG: n.DAG}
return nil
}
示例4: TestDuplicateSemantics
func TestDuplicateSemantics(t *testing.T) {
ctx := context.Background()
dstore := dssync.MutexWrap(ds.NewMapDatastore())
bstore := blockstore.NewBlockstore(dstore)
bserv := bs.New(bstore, offline.Exchange(bstore))
dserv := mdag.NewDAGService(bserv)
// TODO does pinner need to share datastore with blockservice?
p := NewPinner(dstore, dserv, dserv)
a, _ := randNode()
_, err := dserv.Add(a)
if err != nil {
t.Fatal(err)
}
// pin is recursively
err = p.Pin(ctx, a, true)
if err != nil {
t.Fatal(err)
}
// pinning directly should fail
err = p.Pin(ctx, a, false)
if err == nil {
t.Fatal("expected direct pin to fail")
}
// pinning recursively again should succeed
err = p.Pin(ctx, a, true)
if err != nil {
t.Fatal(err)
}
}
示例5: TestPinRecursiveFail
func TestPinRecursiveFail(t *testing.T) {
ctx := context.Background()
dstore := dssync.MutexWrap(ds.NewMapDatastore())
bstore := blockstore.NewBlockstore(dstore)
bserv := bs.New(bstore, offline.Exchange(bstore))
dserv := mdag.NewDAGService(bserv)
p := NewPinner(dstore, dserv)
a, _ := randNode()
b, _ := randNode()
err := a.AddNodeLinkClean("child", b)
if err != nil {
t.Fatal(err)
}
// NOTE: This isnt a time based test, we expect the pin to fail
mctx, _ := context.WithTimeout(ctx, time.Millisecond)
err = p.Pin(mctx, a, true)
if err == nil {
t.Fatal("should have failed to pin here")
}
_, err = dserv.Add(b)
if err != nil {
t.Fatal(err)
}
// this one is time based... but shouldnt cause any issues
mctx, _ = context.WithTimeout(ctx, time.Second)
err = p.Pin(mctx, a, true)
if err != nil {
t.Fatal(err)
}
}
示例6: TestFetchGraph
func TestFetchGraph(t *testing.T) {
var dservs []DAGService
bsis := bstest.Mocks(2)
for _, bsi := range bsis {
dservs = append(dservs, NewDAGService(bsi))
}
read := io.LimitReader(u.NewTimeSeededRand(), 1024*32)
root, err := imp.BuildDagFromReader(dservs[0], chunk.NewSizeSplitter(read, 512))
if err != nil {
t.Fatal(err)
}
err = FetchGraph(context.TODO(), root, dservs[1])
if err != nil {
t.Fatal(err)
}
// create an offline dagstore and ensure all blocks were fetched
bs := bserv.New(bsis[1].Blockstore, offline.Exchange(bsis[1].Blockstore))
offline_ds := NewDAGService(bs)
ks := key.NewKeySet()
err = EnumerateChildren(context.Background(), offline_ds, root, ks, false)
if err != nil {
t.Fatal(err)
}
}
示例7: GetOfflineLinkService
func (n *dagService) GetOfflineLinkService() LinkService {
if n.Blocks.Exchange().IsOnline() {
bsrv := bserv.New(n.Blocks.Blockstore(), offline.Exchange(n.Blocks.Blockstore()))
return NewDAGService(bsrv)
} else {
return n
}
}
示例8: getMockDagServAndBstore
func getMockDagServAndBstore(t testing.TB) (mdag.DAGService, blockstore.Blockstore, pin.ManualPinner) {
dstore := ds.NewMapDatastore()
tsds := sync.MutexWrap(dstore)
bstore := blockstore.NewBlockstore(tsds)
bserv := bs.New(bstore, offline.Exchange(bstore))
dserv := mdag.NewDAGService(bserv)
return dserv, bstore, pin.NewPinner(tsds, dserv).GetManual()
}
示例9: getMockDagServAndBstore
func getMockDagServAndBstore(t testing.TB) (mdag.DAGService, blockstore.GCBlockstore) {
dstore := ds.NewMapDatastore()
tsds := sync.MutexWrap(dstore)
bstore := blockstore.NewBlockstore(tsds)
bserv := bs.New(bstore, offline.Exchange(bstore))
dserv := mdag.NewDAGService(bserv)
return dserv, bstore
}
示例10: Mock
func Mock(t testing.TB) dag.DAGService {
bstore := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore()))
bserv, err := bsrv.New(bstore, offline.Exchange(bstore))
if err != nil {
t.Fatal(err)
}
return dag.NewDAGService(bserv)
}
示例11: getDagserv
func getDagserv(t *testing.T) merkledag.DAGService {
db := dssync.MutexWrap(ds.NewMapDatastore())
bs := bstore.NewBlockstore(db)
blockserv, err := bserv.New(bs, offline.Exchange(bs))
if err != nil {
t.Fatal(err)
}
return merkledag.NewDAGService(blockserv)
}
示例12: NewMockNode
// NewMockNode constructs an IpfsNode for use in tests.
func NewMockNode() (*core.IpfsNode, error) {
ctx := context.Background()
// Generate Identity
ident, err := testutil.RandIdentity()
if err != nil {
return nil, err
}
p := ident.ID()
c := config.Config{
Identity: config.Identity{
PeerID: p.String(),
},
}
nd, err := core.Offline(&repo.Mock{
C: c,
D: ds2.CloserWrap(syncds.MutexWrap(datastore.NewMapDatastore())),
})(ctx)
if err != nil {
return nil, err
}
nd.PrivateKey = ident.PrivateKey()
nd.Peerstore = peer.NewPeerstore()
nd.Peerstore.AddPrivKey(p, ident.PrivateKey())
nd.Peerstore.AddPubKey(p, ident.PublicKey())
nd.Identity = p
nd.PeerHost, err = mocknet.New(nd.Context()).AddPeer(ident.PrivateKey(), ident.Address()) // effectively offline
if err != nil {
return nil, err
}
// Routing
nd.Routing = offrt.NewOfflineRouter(nd.Repo.Datastore(), nd.PrivateKey)
// Bitswap
bstore := blockstore.NewBlockstore(nd.Repo.Datastore())
bserv, err := blockservice.New(bstore, offline.Exchange(bstore))
if err != nil {
return nil, err
}
nd.DAG = mdag.NewDAGService(bserv)
nd.Pinning = pin.NewPinner(nd.Repo.Datastore(), nd.DAG)
// Namespace resolver
nd.Namesys = nsys.NewNameSystem(nd.Routing)
// Path resolver
nd.Resolver = &path.Resolver{DAG: nd.DAG}
return nd, nil
}
示例13: TestRecurivePathResolution
func TestRecurivePathResolution(t *testing.T) {
ctx := context.Background()
dstore := sync.MutexWrap(datastore.NewMapDatastore())
bstore := blockstore.NewBlockstore(dstore)
bserv, err := blockservice.New(bstore, offline.Exchange(bstore))
if err != nil {
t.Fatal(err)
}
dagService := merkledag.NewDAGService(bserv)
a, _ := randNode()
b, _ := randNode()
c, cKey := randNode()
err = b.AddNodeLink("grandchild", c)
if err != nil {
t.Fatal(err)
}
err = a.AddNodeLink("child", b)
if err != nil {
t.Fatal(err)
}
err = dagService.AddRecursive(a)
if err != nil {
t.Fatal(err)
}
aKey, err := a.Key()
if err != nil {
t.Fatal(err)
}
segments := []string{aKey.String(), "child", "grandchild"}
p, err := path.FromSegments("/ipfs/", segments...)
if err != nil {
t.Fatal(err)
}
resolver := &path.Resolver{DAG: dagService}
node, err := resolver.ResolvePath(ctx, p)
if err != nil {
t.Fatal(err)
}
key, err := node.Key()
if err != nil {
t.Fatal(err)
}
if key.String() != cKey.String() {
t.Fatal(fmt.Errorf(
"recursive path resolution failed for %s: %s != %s",
p.String(), key.String(), cKey.String()))
}
}
示例14: standardWithRouting
// TODO refactor so maybeRouter isn't special-cased in this way
func standardWithRouting(r repo.Repo, online bool, routingOption RoutingOption, hostOption HostOption) ConfigOption {
return func(ctx context.Context) (n *IpfsNode, err error) {
// FIXME perform node construction in the main constructor so it isn't
// necessary to perform this teardown in this scope.
success := false
defer func() {
if !success && n != nil {
n.teardown()
}
}()
// TODO move as much of node initialization as possible into
// NewIPFSNode. The larger these config options are, the harder it is
// to test all node construction code paths.
if r == nil {
return nil, fmt.Errorf("repo required")
}
n = &IpfsNode{
mode: func() mode {
if online {
return onlineMode
}
return offlineMode
}(),
Repo: r,
}
n.ctx = ctx
n.proc = goprocessctx.WithContextAndTeardown(ctx, n.teardown)
// setup Peerstore
n.Peerstore = peer.NewPeerstore()
// setup local peer ID (private key is loaded in online setup)
if err := n.loadID(); err != nil {
return nil, err
}
n.Blockstore, err = bstore.WriteCached(bstore.NewBlockstore(n.Repo.Datastore()), kSizeBlockstoreWriteCache)
if err != nil {
return nil, err
}
if online {
do := setupDiscoveryOption(n.Repo.Config().Discovery)
if err := n.startOnlineServices(ctx, routingOption, hostOption, do); err != nil {
return nil, err
}
} else {
n.Exchange = offline.Exchange(n.Blockstore)
}
success = true
return n, nil
}
}
示例15: TestMultisetRoundtrip
func TestMultisetRoundtrip(t *testing.T) {
dstore := dssync.MutexWrap(datastore.NewMapDatastore())
bstore := blockstore.NewBlockstore(dstore)
bserv := blockservice.New(bstore, offline.Exchange(bstore))
dag := merkledag.NewDAGService(bserv)
fn := func(m map[key.Key]uint16) bool {
// Generate a smaller range for refcounts than full uint64, as
// otherwise this just becomes overly cpu heavy, splitting it
// out into too many items. That means we need to convert to
// the right kind of map. As storeMultiset mutates the map as
// part of its bookkeeping, this is actually good.
refcounts := copyMap(m)
ctx := context.Background()
n, err := storeMultiset(ctx, dag, refcounts, ignoreKeys)
if err != nil {
t.Fatalf("storing multiset: %v", err)
}
root := &merkledag.Node{}
const linkName = "dummylink"
if err := root.AddNodeLink(linkName, n); err != nil {
t.Fatalf("adding link to root node: %v", err)
}
roundtrip, err := loadMultiset(ctx, dag, root, linkName, ignoreKeys)
if err != nil {
t.Fatalf("loading multiset: %v", err)
}
orig := copyMap(m)
success := true
for k, want := range orig {
if got, ok := roundtrip[k]; ok {
if got != want {
success = false
t.Logf("refcount changed: %v -> %v for %q", want, got, k)
}
delete(orig, k)
delete(roundtrip, k)
}
}
for k, v := range orig {
success = false
t.Logf("refcount missing: %v for %q", v, k)
}
for k, v := range roundtrip {
success = false
t.Logf("refcount extra: %v for %q", v, k)
}
return success
}
if err := quick.Check(fn, nil); err != nil {
t.Fatal(err)
}
}