本文整理汇总了Golang中github.com/uber/tchannel-go/testutils.NewOpts函数的典型用法代码示例。如果您正苦于以下问题:Golang NewOpts函数的具体用法?Golang NewOpts怎么用?Golang NewOpts使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewOpts函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: TestRelayConnection
func TestRelayConnection(t *testing.T) {
var errTest = errors.New("test")
var wantHostPort string
getHost := func(call relay.CallFrame, conn relay.Conn) (relay.Peer, error) {
matches := conn.RemoteProcessPrefixMatches()
assert.Equal(t, []bool{true, true, true, false}, matches, "Unexpected prefix matches.")
assert.Equal(t, wantHostPort, conn.RemoteHostPort(), "Unexpected RemoteHostPort")
return relay.Peer{}, errTest
}
// Note: we cannot use WithTestServer since we override the RelayHosts.
opts := testutils.NewOpts().
SetServiceName("relay").
SetRelayHosts(hostsFunc(getHost)).
SetProcessPrefixes("nod", "nodejs-hyperbahn", "", "hyperbahn")
relay := testutils.NewServer(t, opts)
defer relay.Close()
// Create a client that is listening so we can set the expected host:port.
clientOpts := testutils.NewOpts().SetProcessName("nodejs-hyperbahn")
client := testutils.NewServer(t, clientOpts)
wantHostPort = client.PeerInfo().HostPort
defer client.Close()
err := testutils.CallEcho(client, relay.PeerInfo().HostPort, relay.ServiceName(), nil)
require.Error(t, err, "Expected CallEcho to fail")
assert.Contains(t, err.Error(), errTest.Error(), "Unexpected error")
}
示例2: TestStatsCalls
func TestStatsCalls(t *testing.T) {
defer testutils.SetTimeout(t, time.Second)()
initialTime := time.Date(2015, 2, 1, 10, 10, 0, 0, time.UTC)
clientNow, clientNowFn := testutils.NowStub(initialTime)
serverNow, serverNowFn := testutils.NowStub(initialTime)
clientNowFn(100 * time.Millisecond)
serverNowFn(50 * time.Millisecond)
clientStats := newRecordingStatsReporter()
serverStats := newRecordingStatsReporter()
serverOpts := testutils.NewOpts().
SetStatsReporter(serverStats).
SetTimeNow(serverNow)
WithVerifiedServer(t, serverOpts, func(serverCh *Channel, hostPort string) {
handler := raw.Wrap(newTestHandler(t))
serverCh.Register(handler, "echo")
serverCh.Register(handler, "app-error")
ch := testutils.NewClient(t, testutils.NewOpts().
SetStatsReporter(clientStats).
SetTimeNow(clientNow))
defer ch.Close()
ctx, cancel := NewContext(time.Second * 5)
defer cancel()
_, _, _, err := raw.Call(ctx, ch, hostPort, testServiceName, "echo", []byte("Headers"), []byte("Body"))
require.NoError(t, err)
outboundTags := tagsForOutboundCall(serverCh, ch, "echo")
clientStats.Expected.IncCounter("outbound.calls.send", outboundTags, 1)
clientStats.Expected.IncCounter("outbound.calls.success", outboundTags, 1)
clientStats.Expected.RecordTimer("outbound.calls.per-attempt.latency", outboundTags, 100*time.Millisecond)
clientStats.Expected.RecordTimer("outbound.calls.latency", outboundTags, 100*time.Millisecond)
inboundTags := tagsForInboundCall(serverCh, ch, "echo")
serverStats.Expected.IncCounter("inbound.calls.recvd", inboundTags, 1)
serverStats.Expected.IncCounter("inbound.calls.success", inboundTags, 1)
serverStats.Expected.RecordTimer("inbound.calls.latency", inboundTags, 50*time.Millisecond)
_, _, resp, err := raw.Call(ctx, ch, hostPort, testServiceName, "app-error", nil, nil)
require.NoError(t, err)
require.True(t, resp.ApplicationError(), "expected application error")
outboundTags = tagsForOutboundCall(serverCh, ch, "app-error")
clientStats.Expected.IncCounter("outbound.calls.send", outboundTags, 1)
clientStats.Expected.IncCounter("outbound.calls.per-attempt.app-errors", outboundTags, 1)
clientStats.Expected.IncCounter("outbound.calls.app-errors", outboundTags, 1)
clientStats.Expected.RecordTimer("outbound.calls.per-attempt.latency", outboundTags, 100*time.Millisecond)
clientStats.Expected.RecordTimer("outbound.calls.latency", outboundTags, 100*time.Millisecond)
inboundTags = tagsForInboundCall(serverCh, ch, "app-error")
serverStats.Expected.IncCounter("inbound.calls.recvd", inboundTags, 1)
serverStats.Expected.IncCounter("inbound.calls.app-errors", inboundTags, 1)
serverStats.Expected.RecordTimer("inbound.calls.latency", inboundTags, 50*time.Millisecond)
})
clientStats.Validate(t)
serverStats.Validate(t)
}
示例3: BenchmarkBothSerial
func BenchmarkBothSerial(b *testing.B) {
serverAddr, err := setupBenchServer()
require.NoError(b, err, "setupBenchServer failed")
opts := testutils.NewOpts().SetFramePool(tchannel.NewSyncFramePool())
clientCh := testutils.NewClient(b, opts)
for _, addr := range serverAddr {
clientCh.Peers().Add(addr)
}
thriftClient := thrift.NewClient(clientCh, "bench-server", nil)
client := gen.NewTChanSecondServiceClient(thriftClient)
ctx, cancel := thrift.NewContext(10 * time.Millisecond)
client.Echo(ctx, "make connection")
cancel()
b.ResetTimer()
for i := 0; i < b.N; i++ {
ctx, cancel := thrift.NewContext(10 * time.Millisecond)
defer cancel()
_, err := client.Echo(ctx, "hello world")
if err != nil {
b.Errorf("Echo failed: %v", err)
}
}
}
示例4: TestRequestSubChannel
func TestRequestSubChannel(t *testing.T) {
ctx, cancel := NewContext(time.Second)
defer cancel()
tchan := testutils.NewServer(t, testutils.NewOpts().SetServiceName("svc1"))
defer tchan.Close()
clientCh := testutils.NewClient(t, nil)
defer clientCh.Close()
clientCh.Peers().Add(tchan.PeerInfo().HostPort)
tests := []tchannel.Registrar{tchan, tchan.GetSubChannel("svc2"), tchan.GetSubChannel("svc3")}
for _, ch := range tests {
mockHandler := new(mocks.TChanSecondService)
server := NewServer(ch)
server.Register(gen.NewTChanSecondServiceServer(mockHandler))
client := NewClient(clientCh, ch.ServiceName(), nil)
secondClient := gen.NewTChanSecondServiceClient(client)
echoArg := ch.ServiceName()
echoRes := echoArg + "-echo"
mockHandler.On("Echo", ctxArg(), echoArg).Return(echoRes, nil)
res, err := secondClient.Echo(ctx, echoArg)
assert.NoError(t, err, "Echo failed")
assert.Equal(t, echoRes, res)
}
}
示例5: TestIntrospectNumConnections
func TestIntrospectNumConnections(t *testing.T) {
// Disable the relay, since the relay does not maintain a 1:1 mapping betewen
// incoming connections vs outgoing connections.
opts := testutils.NewOpts().NoRelay()
testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
ctx, cancel := NewContext(time.Second)
defer cancel()
assert.Equal(t, 0, ts.Server().IntrospectNumConnections(), "Expected no connection on new server")
for i := 0; i < 10; i++ {
client := ts.NewClient(nil)
defer client.Close()
require.NoError(t, client.Ping(ctx, ts.HostPort()), "Ping from new client failed")
assert.Equal(t, 1, client.IntrospectNumConnections(), "Client should have single connection")
assert.Equal(t, i+1, ts.Server().IntrospectNumConnections(), "Incorrect number of server connections")
}
// Make sure that a closed connection will reduce NumConnections.
client := ts.NewClient(nil)
require.NoError(t, client.Ping(ctx, ts.HostPort()), "Ping from new client failed")
assert.Equal(t, 11, ts.Server().IntrospectNumConnections(), "Number of connections expected to increase")
client.Close()
require.True(t, testutils.WaitFor(100*time.Millisecond, func() bool {
return ts.Server().IntrospectNumConnections() == 10
}), "Closed connection did not get removed, num connections is %v", ts.Server().IntrospectNumConnections())
})
}
示例6: TestRelayConcurrentCalls
// TestRelayStress makes many concurrent calls and ensures that
// we don't try to reuse any frames once they've been released.
func TestRelayConcurrentCalls(t *testing.T) {
pool := NewProtectMemFramePool()
opts := testutils.NewOpts().SetRelayOnly().SetFramePool(pool)
testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
server := benchmark.NewServer(
benchmark.WithNoLibrary(),
benchmark.WithServiceName("s1"),
)
defer server.Close()
ts.RelayHosts().Add("s1", server.HostPort())
client := benchmark.NewClient([]string{ts.HostPort()},
benchmark.WithNoDurations(),
benchmark.WithNoLibrary(),
benchmark.WithNumClients(20),
benchmark.WithServiceName("s1"),
benchmark.WithTimeout(time.Minute),
)
defer client.Close()
require.NoError(t, client.Warmup(), "Client warmup failed")
_, err := client.RawCall(1000)
assert.NoError(t, err, "RawCalls failed")
})
}
示例7: TestDirtyFrameRequests
func TestDirtyFrameRequests(t *testing.T) {
argSizes := []int{25000, 50000, 75000}
// Create the largest required random cache.
testutils.RandBytes(argSizes[len(argSizes)-1])
opts := testutils.NewOpts().
SetServiceName("swap-server").
SetFramePool(dirtyFramePool{})
WithVerifiedServer(t, opts, func(serverCh *Channel, hostPort string) {
peerInfo := serverCh.PeerInfo()
serverCh.Register(raw.Wrap(&swapper{t}), "swap")
for _, argSize := range argSizes {
ctx, cancel := NewContext(time.Second)
defer cancel()
arg2, arg3 := testutils.RandBytes(argSize), testutils.RandBytes(argSize)
res2, res3, _, err := raw.Call(ctx, serverCh, hostPort, peerInfo.ServiceName, "swap", arg2, arg3)
if assert.NoError(t, err, "Call failed") {
assert.Equal(t, arg2, res3, "Result arg3 wrong")
assert.Equal(t, arg3, res2, "Result arg3 wrong")
}
}
})
}
示例8: TestRelayHandleLargeLocalCall
func TestRelayHandleLargeLocalCall(t *testing.T) {
opts := testutils.NewOpts().SetRelayOnly().
SetRelayLocal("relay").
AddLogFilter("Received fragmented callReq", 1).
// Expect 4 callReqContinues for 256 kb payload that we cannot relay.
AddLogFilter("Failed to relay frame.", 4)
testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
client := ts.NewClient(nil)
testutils.RegisterEcho(ts.Relay(), nil)
// This large call should fail with a bad request.
err := testutils.CallEcho(client, ts.HostPort(), "relay", &raw.Args{
Arg2: testutils.RandBytes(128 * 1024),
Arg3: testutils.RandBytes(128 * 1024),
})
if assert.Equal(t, ErrCodeBadRequest, GetSystemErrorCode(err), "Expected BadRequest for large call to relay") {
assert.Contains(t, err.Error(), "cannot receive fragmented calls")
}
// We may get an error before the call is finished flushing.
// Do a ping to ensure everything has been flushed.
ctx, cancel := NewContext(time.Second)
defer cancel()
require.NoError(t, client.Ping(ctx, ts.HostPort()), "Ping failed")
})
}
示例9: runRetryTest
func runRetryTest(t *testing.T, f func(r *retryTest)) {
r := &retryTest{}
defer testutils.SetTimeout(t, time.Second)()
r.setup()
defer testutils.ResetSleepStub(&timeSleep)
withSetup(t, func(hypCh *tchannel.Channel, hostPort string) {
json.Register(hypCh, json.Handlers{"ad": r.adHandler}, nil)
// Advertise failures cause warning log messages.
opts := testutils.NewOpts().
SetServiceName("my-client").
AddLogFilter("Hyperbahn client registration failed", 10)
serverCh := testutils.NewServer(t, opts)
defer serverCh.Close()
var err error
r.ch = serverCh
r.client, err = NewClient(serverCh, configFor(hostPort), &ClientOptions{
Handler: r,
FailStrategy: FailStrategyIgnore,
})
require.NoError(t, err, "NewClient")
defer r.client.Close()
f(r)
r.mock.AssertExpectations(t)
})
}
示例10: TestInboundEphemeralPeerRemoved
func TestInboundEphemeralPeerRemoved(t *testing.T) {
ctx, cancel := NewContext(time.Second)
defer cancel()
// No relay, since we look for the exact host:port in peer lists.
opts := testutils.NewOpts().NoRelay()
WithVerifiedServer(t, opts, func(ch *Channel, hostPort string) {
client := testutils.NewClient(t, nil)
assert.NoError(t, client.Ping(ctx, hostPort), "Ping to server failed")
// Server should have a host:port in the root peers for the client.
var clientHP string
peers := ch.RootPeers().Copy()
for k := range peers {
clientHP = k
}
waitTillInboundEmpty(t, ch, clientHP, func() {
client.Close()
})
assert.Equal(t, ChannelClosed, client.State(), "Client should be closed")
_, ok := ch.RootPeers().Get(clientHP)
assert.False(t, ok, "server's root peers should remove peer for client on close")
})
}
示例11: TestReadTimeout
func TestReadTimeout(t *testing.T) {
// The error frame may fail to send since the connection closes before the handler sends it
// or the handler connection may be closed as it sends when the other side closes the conn.
opts := testutils.NewOpts().
AddLogFilter("Couldn't send outbound error frame", 1).
AddLogFilter("Connection error", 1, "site", "read frames").
AddLogFilter("Connection error", 1, "site", "write frames").
AddLogFilter("simpleHandler OnError", 1,
"error", "failed to send error frame, connection state connectionClosed")
testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
sn := ts.ServiceName()
calls := relaytest.NewMockStats()
for i := 0; i < 10; i++ {
ctx, cancel := NewContext(time.Second)
handler := func(ctx context.Context, args *raw.Args) (*raw.Res, error) {
defer cancel()
return nil, ErrTimeout
}
ts.RegisterFunc("call", handler)
_, _, _, err := raw.Call(ctx, ts.Server(), ts.HostPort(), ts.ServiceName(), "call", nil, nil)
assert.Equal(t, err, context.Canceled, "Call should fail due to cancel")
calls.Add(sn, sn, "call").Failed("timeout").End()
}
ts.AssertRelayStats(calls)
})
}
示例12: TestFragmentationSlowReader
func TestFragmentationSlowReader(t *testing.T) {
startReading, handlerComplete := make(chan struct{}), make(chan struct{})
handler := func(ctx context.Context, call *InboundCall) {
<-ctx.Done()
<-startReading
_, err := raw.ReadArgs(call)
assert.Error(t, err, "ReadArgs should fail since frames will be dropped due to slow reading")
close(handlerComplete)
}
// Inbound forward will timeout and cause a warning log.
opts := testutils.NewOpts().AddLogFilter("Unable to forward frame", 1)
WithVerifiedServer(t, opts, func(ch *Channel, hostPort string) {
ch.Register(HandlerFunc(handler), "echo")
arg2 := testutils.RandBytes(MaxFramePayloadSize * MexChannelBufferSize)
arg3 := testutils.RandBytes(MaxFramePayloadSize * (MexChannelBufferSize + 1))
ctx, cancel := NewContext(testutils.Timeout(15 * time.Millisecond))
defer cancel()
_, _, _, err := raw.Call(ctx, ch, hostPort, testServiceName, "echo", arg2, arg3)
assert.Error(t, err, "Call should timeout due to slow reader")
close(startReading)
<-handlerComplete
})
goroutines.VerifyNoLeaks(t, nil)
}
示例13: setupBenchServer
func setupBenchServer() ([]string, error) {
ch, err := testutils.NewServerChannel(testutils.NewOpts().
SetServiceName(benchServerName).
SetFramePool(tchannel.NewSyncFramePool()))
if err != nil {
return nil, err
}
fmt.Println(benchServerName, "started on", ch.PeerInfo().HostPort)
server := thrift.NewServer(ch)
server.Register(gen.NewTChanSecondServiceServer(benchSecondHandler{}))
if !*useHyperbahn {
return []string{ch.PeerInfo().HostPort}, nil
}
// Set up a Hyperbahn client and advertise it.
nodes := strings.Split(*hyperbahnNodes, ",")
config := hyperbahn.Configuration{InitialNodes: nodes}
hc, err := hyperbahn.NewClient(ch, config, nil)
if err := hc.Advertise(); err != nil {
return nil, err
}
return nodes, nil
}
示例14: TestCloseAfterTimeout
func TestCloseAfterTimeout(t *testing.T) {
// Disable log verfication since connections are closed after a timeout
// and the relay might still be reading/writing to the connection.
// TODO: Ideally, we only disable log verification on the relay.
opts := testutils.NewOpts().DisableLogVerification()
testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
testHandler := onErrorTestHandler{newTestHandler(t), func(_ context.Context, err error) {}}
ts.Register(raw.Wrap(testHandler), "block")
ctx, cancel := NewContext(100 * time.Millisecond)
defer cancel()
// Make a call, wait for it to timeout.
clientCh := ts.NewClient(nil)
_, _, _, err := raw.Call(ctx, clientCh, ts.HostPort(), ts.ServiceName(), "block", nil, nil)
require.Equal(t, ErrTimeout, err, "Expected call to timeout")
// The client channel should also close immediately.
clientCh.Close()
assertStateChangesTo(t, clientCh, ChannelClosed)
assert.True(t, clientCh.Closed(), "Channel should be closed")
// Unblock the testHandler so that a goroutine isn't leaked.
<-testHandler.blockErr
})
}
示例15: TestRaceExchangesWithClose
func TestRaceExchangesWithClose(t *testing.T) {
var wg sync.WaitGroup
ctx, cancel := NewContext(testutils.Timeout(70 * time.Millisecond))
defer cancel()
opts := testutils.NewOpts().DisableLogVerification()
testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
server := ts.Server()
gotCall := make(chan struct{})
completeCall := make(chan struct{})
testutils.RegisterFunc(server, "dummy", func(ctx context.Context, args *raw.Args) (*raw.Res, error) {
return &raw.Res{}, nil
})
testutils.RegisterEcho(server, func() {
close(gotCall)
<-completeCall
})
client := ts.NewClient(opts)
defer client.Close()
callDone := make(chan struct{})
go func() {
assert.NoError(t, testutils.CallEcho(client, ts.HostPort(), server.ServiceName(), &raw.Args{}), "Echo failed")
close(callDone)
}()
// Wait until the server recieves a call, so it has an active inbound.
<-gotCall
// Start a bunch of clients to trigger races between connecting and close.
for i := 0; i < 100; i++ {
wg.Add(1)
go func() {
defer wg.Done()
// We don't use ts.NewClient here to avoid data races.
c := testutils.NewClient(t, opts)
defer c.Close()
c.Ping(ctx, ts.HostPort())
raw.Call(ctx, c, ts.HostPort(), server.ServiceName(), "dummy", nil, nil)
}()
}
// Now try to close the channel, it should block since there's active exchanges.
server.Close()
assert.Equal(t, ChannelStartClose, ts.Server().State(), "Server should be in StartClose")
close(completeCall)
<-callDone
})
// Wait for all calls to complete
wg.Wait()
}