本文整理匯總了Golang中github.com/heems/bssim/Godeps/_workspace/src/golang.org/x/net/context.Background函數的典型用法代碼示例。如果您正苦於以下問題:Golang Background函數的具體用法?Golang Background怎麽用?Golang Background使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了Background函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: TestGetBlocks
func TestGetBlocks(t *testing.T) {
store := bstore()
ex := Exchange(store)
g := blocksutil.NewBlockGenerator()
expected := g.Blocks(2)
for _, b := range expected {
if err := ex.HasBlock(context.Background(), b); err != nil {
t.Fail()
}
}
request := func() []key.Key {
var ks []key.Key
for _, b := range expected {
ks = append(ks, b.Key())
}
return ks
}()
received, err := ex.GetBlocks(context.Background(), request)
if err != nil {
t.Fatal(err)
}
var count int
for _ = range received {
count++
}
if len(expected) != count {
t.Fail()
}
}
示例2: TestClientOverMax
func TestClientOverMax(t *testing.T) {
rs := NewServer()
k := key.Key("hello")
numProvidersForHelloKey := 100
for i := 0; i < numProvidersForHelloKey; i++ {
pi := testutil.RandIdentityOrFatal(t)
err := rs.Client(pi).Provide(context.Background(), k)
if err != nil {
t.Fatal(err)
}
}
max := 10
pi := testutil.RandIdentityOrFatal(t)
client := rs.Client(pi)
providersFromClient := client.FindProvidersAsync(context.Background(), k, max)
i := 0
for _ = range providersFromClient {
i++
}
if i != max {
t.Fatal("Too many providers returned")
}
}
示例3: TestClientFindProviders
func TestClientFindProviders(t *testing.T) {
pi := testutil.RandIdentityOrFatal(t)
rs := NewServer()
client := rs.Client(pi)
k := key.Key("hello")
err := client.Provide(context.Background(), k)
if err != nil {
t.Fatal(err)
}
// This is bad... but simulating networks is hard
time.Sleep(time.Millisecond * 300)
max := 100
providersFromClient := client.FindProvidersAsync(context.Background(), key.Key("hello"), max)
isInClient := false
for pi := range providersFromClient {
if pi.ID == pi.ID {
isInClient = true
}
}
if !isInClient {
t.Fatal("Despite client providing key, client didn't receive peer when finding providers")
}
}
示例4: TestGetBlockFromPeerAfterPeerAnnounces
func TestGetBlockFromPeerAfterPeerAnnounces(t *testing.T) {
net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay))
block := blocks.NewBlock([]byte("block"))
g := NewTestSessionGenerator(net)
defer g.Close()
peers := g.Instances(2)
hasBlock := peers[0]
defer hasBlock.Exchange.Close()
if err := hasBlock.Exchange.HasBlock(context.Background(), block); err != nil {
t.Fatal(err)
}
wantsBlock := peers[1]
defer wantsBlock.Exchange.Close()
ctx, _ := context.WithTimeout(context.Background(), time.Second)
received, err := wantsBlock.Exchange.GetBlock(ctx, block.Key())
if err != nil {
t.Log(err)
t.Fatal("Expected to succeed")
}
if !bytes.Equal(block.Data, received.Data) {
t.Fatal("Data doesn't match")
}
}
示例5: TestCanceledContext
// TODO does dht ensure won't receive self as a provider? probably not.
func TestCanceledContext(t *testing.T) {
rs := NewServer()
k := key.Key("hello")
// avoid leaking goroutine, without using the context to signal
// (we want the goroutine to keep trying to publish on a
// cancelled context until we've tested it doesnt do anything.)
done := make(chan struct{})
defer func() { done <- struct{}{} }()
t.Log("async'ly announce infinite stream of providers for key")
i := 0
go func() { // infinite stream
for {
select {
case <-done:
t.Log("exiting async worker")
return
default:
}
pi, err := testutil.RandIdentity()
if err != nil {
t.Error(err)
}
err = rs.Client(pi).Provide(context.Background(), k)
if err != nil {
t.Error(err)
}
i++
}
}()
local := testutil.RandIdentityOrFatal(t)
client := rs.Client(local)
t.Log("warning: max is finite so this test is non-deterministic")
t.Log("context cancellation could simply take lower priority")
t.Log("and result in receiving the max number of results")
max := 1000
t.Log("cancel the context before consuming")
ctx, cancelFunc := context.WithCancel(context.Background())
cancelFunc()
providers := client.FindProvidersAsync(ctx, k, max)
numProvidersReturned := 0
for _ = range providers {
numProvidersReturned++
}
t.Log(numProvidersReturned)
if numProvidersReturned == max {
t.Fatal("Context cancel had no effect")
}
}
示例6: ExampleEventLogger
func ExampleEventLogger() {
{
log := EventLogger(nil)
e := log.EventBegin(context.Background(), "dial")
e.Done()
}
{
log := EventLogger(nil)
e := log.EventBegin(context.Background(), "dial")
_ = e.Close() // implements io.Closer for convenience
}
}
示例7: getFileCmd
func getFileCmd(nodes []int, file string) error {
file = normalizePath(file)
blocks, ok := files[file]
if !ok {
return fmt.Errorf("Tried to get file, '%s', which has not been added.\n", file)
}
var wg sync.WaitGroup
// Get blocks and then Has them
for _, node := range nodes {
// remove blocks peer already has or nah?
// I'm assuming that peers with the first block of the file have the whole file,
// which i think is ok for the simulation, but i might have to change this later
alreadyhas, err := peers[node].Blockstore().Has(files[file][0])
check(err)
if alreadyhas {
continue
}
wg.Add(1)
go func(i int) {
timer := recorder.NewTimer()
ctx, cancel := context.WithTimeout(context.Background(), deadline)
defer cancel()
received, _ := peers[i].Exchange.GetBlocks(ctx, blocks)
for j := 0; j < len(blocks); j++ {
blockTimer := recorder.NewTimer()
x := <-received
if x == nil {
wg.Done()
return
}
recorder.EndBlockTime(blockTimer, peers[i].Peer.Pretty())
fmt.Println(i, x, j)
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
err := peers[i].Exchange.HasBlock(ctx, x)
if err != nil {
fmt.Println("error when adding block", i, err)
}
cancel()
}
recorder.EndFileTime(timer, peers[i].Peer.Pretty(), file)
// peers[i].Exchange.Close()
wg.Done()
}(node)
}
wg.Wait()
testGet(nodes, file)
return nil
}
示例8: createTestNetwork
// Creates test network using delays in config
// Returns a fully connected mocknet and an array of the instances in the network
func createTestNetwork() (mocknet.Mocknet, []bs.Instance) {
vv := convertTimeField("visibility_delay")
q := convertTimeField("query_delay")
//md := convertTimeField("message_delay")
delayCfg := mockrouting.DelayConfig{ValueVisibility: vv, Query: q}
n, err := strconv.Atoi(config["node_count"])
check(err)
mn := mocknet.New(context.Background())
snet, err := tn.StreamNet(context.Background(), mn, mockrouting.NewServerWithDelay(delayCfg))
check(err)
instances := genInstances(n, &mn, &snet)
return mn, instances
}
示例9: TestDoesNotDeadLockIfContextCancelledBeforePublish
func TestDoesNotDeadLockIfContextCancelledBeforePublish(t *testing.T) {
g := blocksutil.NewBlockGenerator()
ctx, cancel := context.WithCancel(context.Background())
n := New()
defer n.Shutdown()
t.Log("generate a large number of blocks. exceed default buffer")
bs := g.Blocks(1000)
ks := func() []key.Key {
var keys []key.Key
for _, b := range bs {
keys = append(keys, b.Key())
}
return keys
}()
_ = n.Subscribe(ctx, ks...) // ignore received channel
t.Log("cancel context before any blocks published")
cancel()
for _, b := range bs {
n.Publish(b)
}
t.Log("publishing the large number of blocks to the ignored channel must not deadlock")
}
示例10: TestFindPeer
func TestFindPeer(t *testing.T) {
// t.Skip("skipping test to debug another")
if testing.Short() {
t.SkipNow()
}
ctx := context.Background()
_, peers, dhts := setupDHTS(ctx, 4, t)
defer func() {
for i := 0; i < 4; i++ {
dhts[i].Close()
dhts[i].host.Close()
}
}()
connect(t, ctx, dhts[0], dhts[1])
connect(t, ctx, dhts[1], dhts[2])
connect(t, ctx, dhts[1], dhts[3])
ctxT, _ := context.WithTimeout(ctx, time.Second)
p, err := dhts[0].FindPeer(ctxT, peers[2])
if err != nil {
t.Fatal(err)
}
if p.ID == "" {
t.Fatal("Failed to find peer.")
}
if p.ID != peers[2] {
t.Fatal("Didnt find expected peer.")
}
}
示例11: TestDeadlineFractionCancel
func TestDeadlineFractionCancel(t *testing.T) {
ctx1, cancel1 := context.WithTimeout(context.Background(), 10*time.Millisecond)
ctx2, cancel2 := WithDeadlineFraction(ctx1, 0.5)
select {
case <-ctx1.Done():
t.Fatal("ctx1 ended too early")
case <-ctx2.Done():
t.Fatal("ctx2 ended too early")
default:
}
cancel2()
select {
case <-ctx1.Done():
t.Fatal("ctx1 should NOT be cancelled")
case <-ctx2.Done():
default:
t.Fatal("ctx2 should be cancelled")
}
cancel1()
select {
case <-ctx1.Done():
case <-ctx2.Done():
default:
t.Fatal("ctx1 should be cancelled")
}
}
示例12: TestDialBadAddrs
func TestDialBadAddrs(t *testing.T) {
m := func(s string) ma.Multiaddr {
maddr, err := ma.NewMultiaddr(s)
if err != nil {
t.Fatal(err)
}
return maddr
}
ctx := context.Background()
s := makeSwarms(ctx, t, 1)[0]
test := func(a ma.Multiaddr) {
p := testutil.RandPeerIDFatal(t)
s.peers.AddAddr(p, a, peer.PermanentAddrTTL)
if _, err := s.Dial(ctx, p); err == nil {
t.Error("swarm should not dial: %s", m)
}
}
test(m("/ip6/fe80::1")) // link local
test(m("/ip6/fe80::100")) // link local
test(m("/ip4/127.0.0.1/udp/1234/utp")) // utp
}
示例13: TestSimultOpen
func TestSimultOpen(t *testing.T) {
// t.Skip("skipping for another test")
t.Parallel()
ctx := context.Background()
swarms := makeSwarms(ctx, t, 2)
// connect everyone
{
var wg sync.WaitGroup
connect := func(s *Swarm, dst peer.ID, addr ma.Multiaddr) {
// copy for other peer
log.Debugf("TestSimultOpen: connecting: %s --> %s (%s)", s.local, dst, addr)
s.peers.AddAddr(dst, addr, peer.PermanentAddrTTL)
if _, err := s.Dial(ctx, dst); err != nil {
t.Fatal("error swarm dialing to peer", err)
}
wg.Done()
}
log.Info("Connecting swarms simultaneously.")
wg.Add(2)
go connect(swarms[0], swarms[1].local, swarms[1].ListenAddresses()[0])
go connect(swarms[1], swarms[0].local, swarms[0].ListenAddresses()[0])
wg.Wait()
}
for _, s := range swarms {
s.Close()
}
}
示例14: TestConsistentAccounting
func TestConsistentAccounting(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
sender := newEngine(ctx, "Ernie")
receiver := newEngine(ctx, "Bert")
// Send messages from Ernie to Bert
for i := 0; i < 1000; i++ {
m := message.New(false)
content := []string{"this", "is", "message", "i"}
m.AddBlock(blocks.NewBlock([]byte(strings.Join(content, " "))))
sender.Engine.MessageSent(receiver.Peer, m)
receiver.Engine.MessageReceived(sender.Peer, m)
}
// Ensure sender records the change
if sender.Engine.numBytesSentTo(receiver.Peer) == 0 {
t.Fatal("Sent bytes were not recorded")
}
// Ensure sender and receiver have the same values
if sender.Engine.numBytesSentTo(receiver.Peer) != receiver.Engine.numBytesReceivedFrom(sender.Peer) {
t.Fatal("Inconsistent book-keeping. Strategies don't agree")
}
// Ensure sender didn't record receving anything. And that the receiver
// didn't record sending anything
if receiver.Engine.numBytesSentTo(sender.Peer) != 0 || sender.Engine.numBytesReceivedFrom(receiver.Peer) != 0 {
t.Fatal("Bert didn't send bytes to Ernie")
}
}
示例15: TestValidAfter
func TestValidAfter(t *testing.T) {
pi := testutil.RandIdentityOrFatal(t)
var key = key.Key("mock key")
var ctx = context.Background()
conf := DelayConfig{
ValueVisibility: delay.Fixed(1 * time.Hour),
Query: delay.Fixed(0),
}
rs := NewServerWithDelay(conf)
rs.Client(pi).Provide(ctx, key)
var providers []peer.PeerInfo
providers, err := rs.Client(pi).FindProviders(ctx, key)
if err != nil {
t.Fatal(err)
}
if len(providers) > 0 {
t.Fail()
}
conf.ValueVisibility.Set(0)
providers, err = rs.Client(pi).FindProviders(ctx, key)
if err != nil {
t.Fatal(err)
}
t.Log("providers", providers)
if len(providers) != 1 {
t.Fail()
}
}