本文整理汇总了Golang中github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/sync.MutexWrap函数的典型用法代码示例。如果您正苦于以下问题:Golang MutexWrap函数的具体用法?Golang MutexWrap怎么用?Golang MutexWrap使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了MutexWrap函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: BenchmarkHandle10KBlocks
func BenchmarkHandle10KBlocks(b *testing.B) {
bstore := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore()))
var testdata []*blocks.Block
for i := 0; i < 10000; i++ {
testdata = append(testdata, blocks.NewBlock([]byte(string(i))))
}
b.ResetTimer()
b.SetBytes(10000)
for i := 0; i < b.N; i++ {
b.StopTimer()
w := NewWorker(offline.Exchange(bstore), Config{
NumWorkers: 1,
ClientBufferSize: 0,
WorkerBufferSize: 0,
})
b.StartTimer()
for _, block := range testdata {
if err := w.HasBlock(block); err != nil {
b.Fatal(err)
}
}
b.StopTimer()
w.Close()
b.StartTimer()
}
}
示例2: fillDefaults
func (cfg *BuildCfg) fillDefaults() error {
if cfg.Repo != nil && cfg.NilRepo {
return errors.New("cannot set a repo and specify nilrepo at the same time")
}
if cfg.Repo == nil {
var d ds.Datastore
d = ds.NewMapDatastore()
if cfg.NilRepo {
d = ds.NewNullDatastore()
}
r, err := defaultRepo(dsync.MutexWrap(d))
if err != nil {
return err
}
cfg.Repo = r
}
if cfg.Routing == nil {
cfg.Routing = DHTOption
}
if cfg.Host == nil {
cfg.Host = DefaultHostOption
}
return nil
}
示例3: MockCmdsCtx
func MockCmdsCtx() (commands.Context, error) {
// Generate Identity
ident, err := testutil.RandIdentity()
if err != nil {
return commands.Context{}, err
}
p := ident.ID()
conf := config.Config{
Identity: config.Identity{
PeerID: p.String(),
},
}
node, err := core.NewIPFSNode(context.Background(), core.Offline(&repo.Mock{
D: ds2.CloserWrap(syncds.MutexWrap(datastore.NewMapDatastore())),
C: conf,
}))
return commands.Context{
Online: true,
ConfigRoot: "/tmp/.mockipfsconfig",
LoadConfig: func(path string) (*config.Config, error) {
return &conf, nil
},
ConstructNode: func() (*core.IpfsNode, error) {
return node, nil
},
}, nil
}
示例4: TestDuplicateSemantics
func TestDuplicateSemantics(t *testing.T) {
ctx := context.Background()
dstore := dssync.MutexWrap(ds.NewMapDatastore())
bstore := blockstore.NewBlockstore(dstore)
bserv := bs.New(bstore, offline.Exchange(bstore))
dserv := mdag.NewDAGService(bserv)
// TODO does pinner need to share datastore with blockservice?
p := NewPinner(dstore, dserv)
a, _ := randNode()
_, err := dserv.Add(a)
if err != nil {
t.Fatal(err)
}
// pin is recursively
err = p.Pin(ctx, a, true)
if err != nil {
t.Fatal(err)
}
// pinning directly should fail
err = p.Pin(ctx, a, false)
if err == nil {
t.Fatal("expected direct pin to fail")
}
// pinning recursively again should succeed
err = p.Pin(ctx, a, true)
if err != nil {
t.Fatal(err)
}
}
示例5: MocknetTestRepo
func MocknetTestRepo(p peer.ID, h host.Host, conf testutil.LatencyConfig, routing core.RoutingOption) core.ConfigOption {
return func(ctx context.Context) (*core.IpfsNode, error) {
const kWriteCacheElems = 100
const alwaysSendToPeer = true
dsDelay := delay.Fixed(conf.BlockstoreLatency)
r := &repo.Mock{
D: ds2.CloserWrap(syncds.MutexWrap(ds2.WithDelay(datastore.NewMapDatastore(), dsDelay))),
}
ds := r.Datastore()
n := &core.IpfsNode{
Peerstore: h.Peerstore(),
Repo: r,
PeerHost: h,
Identity: p,
}
dhtt, err := routing(ctx, n.PeerHost, n.Repo.Datastore())
if err != nil {
return nil, err
}
bsn := bsnet.NewFromIpfsHost(h, dhtt)
bstore, err := blockstore.WriteCached(blockstore.NewBlockstore(ds), kWriteCacheElems)
if err != nil {
return nil, err
}
exch := bitswap.New(ctx, p, bsn, bstore, alwaysSendToPeer)
n.Blockstore = bstore
n.Exchange = exch
n.Routing = dhtt
return n, nil
}
}
示例6: runFileCattingWorker
func runFileCattingWorker(ctx context.Context, n *core.IpfsNode) error {
conf, err := config.Init(ioutil.Discard, *nBitsForKeypair)
if err != nil {
return err
}
r := &repo.Mock{
D: ds2.CloserWrap(syncds.MutexWrap(datastore.NewMapDatastore())),
C: *conf,
}
dummy, err := core.NewNode(ctx, &core.BuildCfg{
Repo: r,
})
if err != nil {
return err
}
errs := make(chan error)
go func() {
defer dummy.Close()
var i int64 = 1
for {
buf := new(bytes.Buffer)
if err := random.WritePseudoRandomBytes(sizeOfIthFile(i), buf, *seed); err != nil {
errs <- err
}
// add to a dummy node to discover the key
k, err := coreunix.Add(dummy, bytes.NewReader(buf.Bytes()))
if err != nil {
errs <- err
}
e := elog.EventBegin(ctx, "cat", logging.LoggableF(func() map[string]interface{} {
return map[string]interface{}{
"key": k,
"localPeer": n.Identity,
}
}))
if r, err := coreunix.Cat(ctx, n, k); err != nil {
e.Done()
log.Printf("failed to cat file. seed: %d #%d key: %s err: %s", *seed, i, k, err)
} else {
log.Println("found file", "seed", *seed, "#", i, "key", k, "size", unit.Information(sizeOfIthFile(i)))
io.Copy(ioutil.Discard, r)
e.Done()
log.Println("catted file", "seed", *seed, "#", i, "key", k, "size", unit.Information(sizeOfIthFile(i)))
i++
}
time.Sleep(time.Second)
}
}()
err = <-errs
if err != nil {
log.Fatal(err)
}
return nil
}
示例7: InitializeSupernodeNetwork
func InitializeSupernodeNetwork(
ctx context.Context,
numServers, numClients int,
conf testutil.LatencyConfig) ([]*core.IpfsNode, []*core.IpfsNode, error) {
// create network
mn, err := mocknet.FullMeshLinked(ctx, numServers+numClients)
if err != nil {
return nil, nil, err
}
mn.SetLinkDefaults(mocknet.LinkOptions{
Latency: conf.NetworkLatency,
Bandwidth: math.MaxInt32,
})
peers := mn.Peers()
if len(peers) < numServers+numClients {
return nil, nil, errors.New("test initialization error")
}
clientPeers, serverPeers := peers[0:numClients], peers[numClients:]
routingDatastore := ds2.CloserWrap(syncds.MutexWrap(datastore.NewMapDatastore()))
var servers []*core.IpfsNode
for i := range iter.N(numServers) {
p := serverPeers[i]
bootstrap, err := core.NewIPFSNode(ctx, MocknetTestRepo(p, mn.Host(p), conf,
corerouting.SupernodeServer(routingDatastore)))
if err != nil {
return nil, nil, err
}
servers = append(servers, bootstrap)
}
var bootstrapInfos []peer.PeerInfo
for _, n := range servers {
info := n.Peerstore.PeerInfo(n.PeerHost.ID())
bootstrapInfos = append(bootstrapInfos, info)
}
var clients []*core.IpfsNode
for i := range iter.N(numClients) {
p := clientPeers[i]
n, err := core.NewIPFSNode(ctx, MocknetTestRepo(p, mn.Host(p), conf,
corerouting.SupernodeClient(bootstrapInfos...)))
if err != nil {
return nil, nil, err
}
clients = append(clients, n)
}
bcfg := core.BootstrapConfigWithPeers(bootstrapInfos)
for _, n := range clients {
if err := n.Bootstrap(bcfg); err != nil {
return nil, nil, err
}
}
return servers, clients, nil
}
示例8: newEngine
func newEngine(ctx context.Context, idStr string) peerAndEngine {
return peerAndEngine{
Peer: peer.ID(idStr),
//Strategy: New(true),
Engine: NewEngine(ctx,
blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore()))),
}
}
示例9: TestReturnsErrorWhenSizeNegative
func TestReturnsErrorWhenSizeNegative(t *testing.T) {
bs := NewBlockstore(syncds.MutexWrap(ds.NewMapDatastore()))
_, err := WriteCached(bs, -1)
if err != nil {
return
}
t.Fail()
}
示例10: Mock
func Mock(t testing.TB) dag.DAGService {
bstore := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore()))
bserv, err := bsrv.New(bstore, offline.Exchange(bstore))
if err != nil {
t.Fatal(err)
}
return dag.NewDAGService(bserv)
}
示例11: getMockDagServAndBstore
func getMockDagServAndBstore(t testing.TB) (mdag.DAGService, blockstore.Blockstore, pin.ManualPinner) {
dstore := ds.NewMapDatastore()
tsds := sync.MutexWrap(dstore)
bstore := blockstore.NewBlockstore(tsds)
bserv := bs.New(bstore, offline.Exchange(bstore))
dserv := mdag.NewDAGService(bserv)
return dserv, bstore, pin.NewPinner(tsds, dserv).GetManual()
}
示例12: NewPeerstore
// NewPeerstore creates a threadsafe collection of peers.
func NewPeerstore() Peerstore {
return &peerstore{
keybook: *newKeybook(),
metrics: *(NewMetrics()).(*metrics),
AddrManager: AddrManager{},
ds: dssync.MutexWrap(ds.NewMapDatastore()),
}
}
示例13: getDagserv
func getDagserv(t *testing.T) merkledag.DAGService {
db := dssync.MutexWrap(ds.NewMapDatastore())
bs := bstore.NewBlockstore(db)
blockserv, err := bserv.New(bs, offline.Exchange(bs))
if err != nil {
t.Fatal(err)
}
return merkledag.NewDAGService(blockserv)
}
示例14: NewMockNode
// NewMockNode constructs an IpfsNode for use in tests.
func NewMockNode() (*core.IpfsNode, error) {
ctx := context.Background()
// Generate Identity
ident, err := testutil.RandIdentity()
if err != nil {
return nil, err
}
p := ident.ID()
c := config.Config{
Identity: config.Identity{
PeerID: p.String(),
},
}
nd, err := core.Offline(&repo.Mock{
C: c,
D: ds2.CloserWrap(syncds.MutexWrap(datastore.NewMapDatastore())),
})(ctx)
if err != nil {
return nil, err
}
nd.PrivateKey = ident.PrivateKey()
nd.Peerstore = peer.NewPeerstore()
nd.Peerstore.AddPrivKey(p, ident.PrivateKey())
nd.Peerstore.AddPubKey(p, ident.PublicKey())
nd.Identity = p
nd.PeerHost, err = mocknet.New(nd.Context()).AddPeer(ident.PrivateKey(), ident.Address()) // effectively offline
if err != nil {
return nil, err
}
// Routing
nd.Routing = offrt.NewOfflineRouter(nd.Repo.Datastore(), nd.PrivateKey)
// Bitswap
bstore := blockstore.NewBlockstore(nd.Repo.Datastore())
bserv, err := blockservice.New(bstore, offline.Exchange(bstore))
if err != nil {
return nil, err
}
nd.DAG = mdag.NewDAGService(bserv)
nd.Pinning = pin.NewPinner(nd.Repo.Datastore(), nd.DAG)
// Namespace resolver
nd.Namesys = nsys.NewNameSystem(nd.Routing)
// Path resolver
nd.Resolver = &path.Resolver{DAG: nd.DAG}
return nd, nil
}
示例15: TestRecurivePathResolution
func TestRecurivePathResolution(t *testing.T) {
ctx := context.Background()
dstore := sync.MutexWrap(datastore.NewMapDatastore())
bstore := blockstore.NewBlockstore(dstore)
bserv, err := blockservice.New(bstore, offline.Exchange(bstore))
if err != nil {
t.Fatal(err)
}
dagService := merkledag.NewDAGService(bserv)
a, _ := randNode()
b, _ := randNode()
c, cKey := randNode()
err = b.AddNodeLink("grandchild", c)
if err != nil {
t.Fatal(err)
}
err = a.AddNodeLink("child", b)
if err != nil {
t.Fatal(err)
}
err = dagService.AddRecursive(a)
if err != nil {
t.Fatal(err)
}
aKey, err := a.Key()
if err != nil {
t.Fatal(err)
}
segments := []string{aKey.String(), "child", "grandchild"}
p, err := path.FromSegments("/ipfs/", segments...)
if err != nil {
t.Fatal(err)
}
resolver := &path.Resolver{DAG: dagService}
node, err := resolver.ResolvePath(ctx, p)
if err != nil {
t.Fatal(err)
}
key, err := node.Key()
if err != nil {
t.Fatal(err)
}
if key.String() != cKey.String() {
t.Fatal(fmt.Errorf(
"recursive path resolution failed for %s: %s != %s",
p.String(), key.String(), cKey.String()))
}
}