本文整理汇总了Golang中github.com/ipfs/go-ipfs/merkledag.NewDAGService函数的典型用法代码示例。如果您正苦于以下问题:Golang NewDAGService函数的具体用法?Golang NewDAGService怎么用?Golang NewDAGService使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewDAGService函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: TestPinRecursiveFail
func TestPinRecursiveFail(t *testing.T) {
ctx := context.Background()
dstore := dssync.MutexWrap(ds.NewMapDatastore())
bstore := blockstore.NewBlockstore(dstore)
bserv := bs.New(bstore, offline.Exchange(bstore))
dserv := mdag.NewDAGService(bserv)
p := NewPinner(dstore, dserv)
a, _ := randNode()
b, _ := randNode()
err := a.AddNodeLinkClean("child", b)
if err != nil {
t.Fatal(err)
}
// NOTE: This isnt a time based test, we expect the pin to fail
mctx, _ := context.WithTimeout(ctx, time.Millisecond)
err = p.Pin(mctx, a, true)
if err == nil {
t.Fatal("should have failed to pin here")
}
_, err = dserv.Add(b)
if err != nil {
t.Fatal(err)
}
// this one is time based... but shouldnt cause any issues
mctx, _ = context.WithTimeout(ctx, time.Second)
err = p.Pin(mctx, a, true)
if err != nil {
t.Fatal(err)
}
}
示例2: TestDuplicateSemantics
func TestDuplicateSemantics(t *testing.T) {
ctx := context.Background()
dstore := dssync.MutexWrap(ds.NewMapDatastore())
bstore := blockstore.NewBlockstore(dstore)
bserv := bs.New(bstore, offline.Exchange(bstore))
dserv := mdag.NewDAGService(bserv)
// TODO does pinner need to share datastore with blockservice?
p := NewPinner(dstore, dserv, dserv)
a, _ := randNode()
_, err := dserv.Add(a)
if err != nil {
t.Fatal(err)
}
// pin is recursively
err = p.Pin(ctx, a, true)
if err != nil {
t.Fatal(err)
}
// pinning directly should fail
err = p.Pin(ctx, a, false)
if err == nil {
t.Fatal("expected direct pin to fail")
}
// pinning recursively again should succeed
err = p.Pin(ctx, a, true)
if err != nil {
t.Fatal(err)
}
}
示例3: setupNode
func setupNode(ctx context.Context, n *IpfsNode, cfg *BuildCfg) error {
// setup local peer ID (private key is loaded in online setup)
if err := n.loadID(); err != nil {
return err
}
var err error
n.Blockstore, err = bstore.WriteCached(bstore.NewBlockstore(n.Repo.Datastore()), kSizeBlockstoreWriteCache)
if err != nil {
return err
}
if cfg.Online {
do := setupDiscoveryOption(n.Repo.Config().Discovery)
if err := n.startOnlineServices(ctx, cfg.Routing, cfg.Host, do); err != nil {
return err
}
} else {
n.Exchange = offline.Exchange(n.Blockstore)
}
n.Blocks = bserv.New(n.Blockstore, n.Exchange)
n.DAG = dag.NewDAGService(n.Blocks)
n.Pinning, err = pin.LoadPinner(n.Repo.Datastore(), n.DAG)
if err != nil {
// TODO: we should move towards only running 'NewPinner' explicity on
// node init instead of implicitly here as a result of the pinner keys
// not being found in the datastore.
// this is kinda sketchy and could cause data loss
n.Pinning = pin.NewPinner(n.Repo.Datastore(), n.DAG)
}
n.Resolver = &path.Resolver{DAG: n.DAG}
return nil
}
示例4: getMockDagServ
func getMockDagServ(t testing.TB) mdag.DAGService {
dstore := ds.NewMapDatastore()
tsds := sync.MutexWrap(dstore)
bstore := blockstore.NewBlockstore(tsds)
bserv := bs.New(bstore, offline.Exchange(bstore))
return mdag.NewDAGService(bserv)
}
示例5: getMockDagServAndBstore
func getMockDagServAndBstore(t testing.TB) (mdag.DAGService, blockstore.Blockstore, pin.ManualPinner) {
dstore := ds.NewMapDatastore()
tsds := sync.MutexWrap(dstore)
bstore := blockstore.NewBlockstore(tsds)
bserv := bs.New(bstore, offline.Exchange(bstore))
dserv := mdag.NewDAGService(bserv)
return dserv, bstore, pin.NewPinner(tsds, dserv).GetManual()
}
示例6: getMockDagServAndBstore
func getMockDagServAndBstore(t testing.TB) (mdag.DAGService, blockstore.GCBlockstore) {
dstore := ds.NewMapDatastore()
tsds := sync.MutexWrap(dstore)
bstore := blockstore.NewBlockstore(tsds)
bserv := bs.New(bstore, offline.Exchange(bstore))
dserv := mdag.NewDAGService(bserv)
return dserv, bstore
}
示例7: Mock
func Mock(t testing.TB) dag.DAGService {
bstore := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore()))
bserv, err := bsrv.New(bstore, offline.Exchange(bstore))
if err != nil {
t.Fatal(err)
}
return dag.NewDAGService(bserv)
}
示例8: TestRecurivePathResolution
func TestRecurivePathResolution(t *testing.T) {
ctx := context.Background()
dstore := sync.MutexWrap(datastore.NewMapDatastore())
bstore := blockstore.NewBlockstore(dstore)
bserv, err := blockservice.New(bstore, offline.Exchange(bstore))
if err != nil {
t.Fatal(err)
}
dagService := merkledag.NewDAGService(bserv)
a, _ := randNode()
b, _ := randNode()
c, cKey := randNode()
err = b.AddNodeLink("grandchild", c)
if err != nil {
t.Fatal(err)
}
err = a.AddNodeLink("child", b)
if err != nil {
t.Fatal(err)
}
err = dagService.AddRecursive(a)
if err != nil {
t.Fatal(err)
}
aKey, err := a.Key()
if err != nil {
t.Fatal(err)
}
segments := []string{aKey.String(), "child", "grandchild"}
p, err := path.FromSegments("/ipfs/", segments...)
if err != nil {
t.Fatal(err)
}
resolver := &path.Resolver{DAG: dagService}
node, err := resolver.ResolvePath(ctx, p)
if err != nil {
t.Fatal(err)
}
key, err := node.Key()
if err != nil {
t.Fatal(err)
}
if key.String() != cKey.String() {
t.Fatal(fmt.Errorf(
"recursive path resolution failed for %s: %s != %s",
p.String(), key.String(), cKey.String()))
}
}
示例9: getDagserv
func getDagserv(t *testing.T) merkledag.DAGService {
db := dssync.MutexWrap(ds.NewMapDatastore())
bs := bstore.NewBlockstore(db)
blockserv, err := bserv.New(bs, offline.Exchange(bs))
if err != nil {
t.Fatal(err)
}
return merkledag.NewDAGService(blockserv)
}
示例10: NewMockNode
// NewMockNode constructs an IpfsNode for use in tests.
func NewMockNode() (*core.IpfsNode, error) {
ctx := context.Background()
// Generate Identity
ident, err := testutil.RandIdentity()
if err != nil {
return nil, err
}
p := ident.ID()
c := config.Config{
Identity: config.Identity{
PeerID: p.String(),
},
}
nd, err := core.Offline(&repo.Mock{
C: c,
D: ds2.CloserWrap(syncds.MutexWrap(datastore.NewMapDatastore())),
})(ctx)
if err != nil {
return nil, err
}
nd.PrivateKey = ident.PrivateKey()
nd.Peerstore = peer.NewPeerstore()
nd.Peerstore.AddPrivKey(p, ident.PrivateKey())
nd.Peerstore.AddPubKey(p, ident.PublicKey())
nd.Identity = p
nd.PeerHost, err = mocknet.New(nd.Context()).AddPeer(ident.PrivateKey(), ident.Address()) // effectively offline
if err != nil {
return nil, err
}
// Routing
nd.Routing = offrt.NewOfflineRouter(nd.Repo.Datastore(), nd.PrivateKey)
// Bitswap
bstore := blockstore.NewBlockstore(nd.Repo.Datastore())
bserv, err := blockservice.New(bstore, offline.Exchange(bstore))
if err != nil {
return nil, err
}
nd.DAG = mdag.NewDAGService(bserv)
nd.Pinning = pin.NewPinner(nd.Repo.Datastore(), nd.DAG)
// Namespace resolver
nd.Namesys = nsys.NewNameSystem(nd.Routing)
// Path resolver
nd.Resolver = &path.Resolver{DAG: nd.DAG}
return nd, nil
}
示例11: TestMultisetRoundtrip
func TestMultisetRoundtrip(t *testing.T) {
dstore := dssync.MutexWrap(datastore.NewMapDatastore())
bstore := blockstore.NewBlockstore(dstore)
bserv := blockservice.New(bstore, offline.Exchange(bstore))
dag := merkledag.NewDAGService(bserv)
fn := func(m map[key.Key]uint16) bool {
// Generate a smaller range for refcounts than full uint64, as
// otherwise this just becomes overly cpu heavy, splitting it
// out into too many items. That means we need to convert to
// the right kind of map. As storeMultiset mutates the map as
// part of its bookkeeping, this is actually good.
refcounts := copyMap(m)
ctx := context.Background()
n, err := storeMultiset(ctx, dag, refcounts, ignoreKeys)
if err != nil {
t.Fatalf("storing multiset: %v", err)
}
root := &merkledag.Node{}
const linkName = "dummylink"
if err := root.AddNodeLink(linkName, n); err != nil {
t.Fatalf("adding link to root node: %v", err)
}
roundtrip, err := loadMultiset(ctx, dag, root, linkName, ignoreKeys)
if err != nil {
t.Fatalf("loading multiset: %v", err)
}
orig := copyMap(m)
success := true
for k, want := range orig {
if got, ok := roundtrip[k]; ok {
if got != want {
success = false
t.Logf("refcount changed: %v -> %v for %q", want, got, k)
}
delete(orig, k)
delete(roundtrip, k)
}
}
for k, v := range orig {
success = false
t.Logf("refcount missing: %v for %q", v, k)
}
for k, v := range roundtrip {
success = false
t.Logf("refcount extra: %v for %q", v, k)
}
return success
}
if err := quick.Check(fn, nil); err != nil {
t.Fatal(err)
}
}
示例12: NewIPFSNode
func NewIPFSNode(ctx context.Context, option ConfigOption) (*IpfsNode, error) {
node, err := option(ctx)
if err != nil {
return nil, err
}
if node.ctx == nil {
node.ctx = ctx
}
if node.proc == nil {
node.proc = goprocessctx.WithContextAndTeardown(node.ctx, node.teardown)
}
success := false // flip to true after all sub-system inits succeed
defer func() {
if !success {
node.proc.Close()
}
}()
// Need to make sure it's perfectly clear 1) which variables are expected
// to be initialized at this point, and 2) which variables will be
// initialized after this point.
node.Blocks, err = bserv.New(node.Blockstore, node.Exchange)
if err != nil {
return nil, err
}
if node.Peerstore == nil {
node.Peerstore = peer.NewPeerstore()
}
node.DAG = merkledag.NewDAGService(node.Blocks)
node.Pinning, err = pin.LoadPinner(node.Repo.Datastore(), node.DAG)
if err != nil {
node.Pinning = pin.NewPinner(node.Repo.Datastore(), node.DAG)
}
node.Resolver = &path.Resolver{DAG: node.DAG}
// Setup the mutable ipns filesystem structure
if node.OnlineMode() {
fs, err := ipnsfs.NewFilesystem(ctx, node.DAG, node.Namesys, node.Pinning, node.PrivateKey)
if err != nil && err != kb.ErrLookupFailure {
return nil, err
}
node.IpnsFs = fs
}
success = true
return node, nil
}
示例13: testAddWPosInfo
func testAddWPosInfo(t *testing.T, rawLeaves bool) {
r := &repo.Mock{
C: config.Config{
Identity: config.Identity{
PeerID: "Qmfoo", // required by offline node
},
},
D: testutil.ThreadSafeCloserMapDatastore(),
}
node, err := core.NewNode(context.Background(), &core.BuildCfg{Repo: r})
if err != nil {
t.Fatal(err)
}
bs := &testBlockstore{GCBlockstore: node.Blockstore, expectedPath: "/tmp/foo.txt", t: t}
bserv := blockservice.New(bs, node.Exchange)
dserv := dag.NewDAGService(bserv)
adder, err := NewAdder(context.Background(), node.Pinning, bs, dserv)
if err != nil {
t.Fatal(err)
}
adder.Out = make(chan interface{})
adder.Progress = true
adder.RawLeaves = rawLeaves
data := make([]byte, 5*1024*1024)
rand.New(rand.NewSource(2)).Read(data) // Rand.Read never returns an error
fileData := ioutil.NopCloser(bytes.NewBuffer(data))
fileInfo := dummyFileInfo{"foo.txt", int64(len(data)), time.Now()}
file := files.NewReaderFile("foo.txt", "/tmp/foo.txt", fileData, &fileInfo)
go func() {
defer close(adder.Out)
err = adder.AddFile(file)
if err != nil {
t.Fatal(err)
}
}()
for _ = range adder.Out {
}
if bs.countAtOffsetZero != 2 {
t.Fatal("expected 2 blocks with an offset at zero (one root and one leafh), got", bs.countAtOffsetZero)
}
if bs.countAtOffsetNonZero != 19 {
// note: the exact number will depend on the size and the sharding algo. used
t.Fatal("expected 19 blocks with an offset > 0, got", bs.countAtOffsetNonZero)
}
}
示例14: TestFlush
func TestFlush(t *testing.T) {
dstore := dssync.MutexWrap(ds.NewMapDatastore())
bstore := blockstore.NewBlockstore(dstore)
bserv := bs.New(bstore, offline.Exchange(bstore))
dserv := mdag.NewDAGService(bserv)
p := NewPinner(dstore, dserv, dserv)
_, k := randNode()
p.PinWithMode(k, Recursive)
if err := p.Flush(); err != nil {
t.Fatal(err)
}
assertPinned(t, p, k, "expected key to still be pinned")
}
示例15: GC
// GC performs a mark and sweep garbage collection of the blocks in the blockstore
// first, it creates a 'marked' set and adds to it the following:
// - all recursively pinned blocks, plus all of their descendants (recursively)
// - bestEffortRoots, plus all of its descendants (recursively)
// - all directly pinned blocks
// - all blocks utilized internally by the pinner
//
// The routine then iterates over every block in the blockstore and
// deletes any block that is not found in the marked set.
func GC(ctx context.Context, bs bstore.GCBlockstore, pn pin.Pinner, bestEffortRoots []key.Key) (<-chan key.Key, error) {
unlocker := bs.GCLock()
bsrv := bserv.New(bs, offline.Exchange(bs))
ds := dag.NewDAGService(bsrv)
gcs, err := ColoredSet(ctx, pn, ds, bestEffortRoots)
if err != nil {
return nil, err
}
keychan, err := bs.AllKeysChan(ctx)
if err != nil {
return nil, err
}
output := make(chan key.Key)
go func() {
defer close(output)
defer unlocker.Unlock()
for {
select {
case k, ok := <-keychan:
if !ok {
return
}
if !gcs.Has(k) {
err := bs.DeleteBlock(k)
if err != nil {
log.Debugf("Error removing key from blockstore: %s", err)
return
}
select {
case output <- k:
case <-ctx.Done():
return
}
}
case <-ctx.Done():
return
}
}
}()
return output, nil
}