本文整理汇总了Golang中github.com/uber/tchannel-go/testutils.WithTestServer函数的典型用法代码示例。如果您正苦于以下问题:Golang WithTestServer函数的具体用法?Golang WithTestServer怎么用?Golang WithTestServer使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了WithTestServer函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: TestReadTimeout
func TestReadTimeout(t *testing.T) {
// The error frame may fail to send since the connection closes before the handler sends it
// or the handler connection may be closed as it sends when the other side closes the conn.
opts := testutils.NewOpts().
AddLogFilter("Couldn't send outbound error frame", 1).
AddLogFilter("Connection error", 1, "site", "read frames").
AddLogFilter("Connection error", 1, "site", "write frames").
AddLogFilter("simpleHandler OnError", 1,
"error", "failed to send error frame, connection state connectionClosed")
testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
sn := ts.ServiceName()
calls := relaytest.NewMockStats()
for i := 0; i < 10; i++ {
ctx, cancel := NewContext(time.Second)
handler := func(ctx context.Context, args *raw.Args) (*raw.Res, error) {
defer cancel()
return nil, ErrTimeout
}
ts.RegisterFunc("call", handler)
_, _, _, err := raw.Call(ctx, ts.Server(), ts.HostPort(), ts.ServiceName(), "call", nil, nil)
assert.Equal(t, err, context.Canceled, "Call should fail due to cancel")
calls.Add(sn, sn, "call").Failed("timeout").End()
}
ts.AssertRelayStats(calls)
})
}
示例2: TestFragmentation
func TestFragmentation(t *testing.T) {
testutils.WithTestServer(t, nil, func(ts *testutils.TestServer) {
ts.Register(raw.Wrap(newTestHandler(t)), "echo")
arg2 := make([]byte, MaxFramePayloadSize*2)
for i := 0; i < len(arg2); i++ {
arg2[i] = byte('a' + (i % 10))
}
arg3 := make([]byte, MaxFramePayloadSize*3)
for i := 0; i < len(arg3); i++ {
arg3[i] = byte('A' + (i % 10))
}
ctx, cancel := NewContext(time.Second)
defer cancel()
respArg2, respArg3, _, err := raw.Call(ctx, ts.Server(), ts.HostPort(), ts.ServiceName(), "echo", arg2, arg3)
require.NoError(t, err)
assert.Equal(t, arg2, respArg2)
assert.Equal(t, arg3, respArg3)
calls := relaytest.NewMockStats()
calls.Add(ts.ServiceName(), ts.ServiceName(), "echo").Succeeded().End()
ts.AssertRelayStats(calls)
})
}
示例3: TestTimeout
func TestTimeout(t *testing.T) {
testutils.WithTestServer(t, nil, func(ts *testutils.TestServer) {
// onError may be called when the block call tries to write the call response.
onError := func(ctx context.Context, err error) {
assert.Equal(t, ErrTimeout, err, "onError err should be ErrTimeout")
assert.Equal(t, context.DeadlineExceeded, ctx.Err(), "Context should timeout")
}
testHandler := onErrorTestHandler{newTestHandler(t), onError}
ts.Register(raw.Wrap(testHandler), "block")
ctx, cancel := NewContext(testutils.Timeout(15 * time.Millisecond))
defer cancel()
_, _, _, err := raw.Call(ctx, ts.Server(), ts.HostPort(), ts.ServiceName(), "block", []byte("Arg2"), []byte("Arg3"))
assert.Equal(t, ErrTimeout, err)
// Verify the server-side receives an error from the context.
select {
case err := <-testHandler.blockErr:
assert.Equal(t, context.DeadlineExceeded, err, "Server should have received timeout")
case <-time.After(time.Second):
t.Errorf("Server did not receive call, may need higher timeout")
}
calls := relaytest.NewMockStats()
calls.Add(ts.ServiceName(), ts.ServiceName(), "block").Failed("timeout").End()
ts.AssertRelayStats(calls)
})
}
示例4: TestRoundTrip
func TestRoundTrip(t *testing.T) {
testutils.WithTestServer(t, nil, func(ts *testutils.TestServer) {
handler := newTestHandler(t)
ts.Register(raw.Wrap(handler), "echo")
ctx, cancel := NewContext(time.Second)
defer cancel()
call, err := ts.Server().BeginCall(ctx, ts.HostPort(), ts.ServiceName(), "echo", &CallOptions{Format: JSON})
require.NoError(t, err)
assert.NotEmpty(t, call.RemotePeer().HostPort)
require.NoError(t, NewArgWriter(call.Arg2Writer()).Write(testArg2))
require.NoError(t, NewArgWriter(call.Arg3Writer()).Write(testArg3))
var respArg2 []byte
require.NoError(t, NewArgReader(call.Response().Arg2Reader()).Read(&respArg2))
assert.Equal(t, testArg2, []byte(respArg2))
var respArg3 []byte
require.NoError(t, NewArgReader(call.Response().Arg3Reader()).Read(&respArg3))
assert.Equal(t, testArg3, []byte(respArg3))
assert.Equal(t, JSON, handler.format)
assert.Equal(t, ts.ServiceName(), handler.caller)
assert.Equal(t, JSON, call.Response().Format(), "response Format should match request Format")
})
}
示例5: TestIntrospection
// Purpose of this test is to ensure introspection doesn't cause any panics
// and we have coverage of the introspection code.
func TestIntrospection(t *testing.T) {
testutils.WithTestServer(t, nil, func(ts *testutils.TestServer) {
client := testutils.NewClient(t, nil)
defer client.Close()
ctx, cancel := json.NewContext(time.Second)
defer cancel()
var resp map[string]interface{}
peer := client.Peers().GetOrAdd(ts.HostPort())
err := json.CallPeer(ctx, peer, ts.ServiceName(), "_gometa_introspect", map[string]interface{}{
"includeExchanges": true,
"includeEmptyPeers": true,
"includeTombstones": true,
}, &resp)
require.NoError(t, err, "Call _gometa_introspect failed")
err = json.CallPeer(ctx, peer, ts.ServiceName(), "_gometa_runtime", map[string]interface{}{
"includeGoStacks": true,
}, &resp)
require.NoError(t, err, "Call _gometa_runtime failed")
if !ts.HasRelay() {
// Try making the call on the "tchannel" service which is where meta handlers
// are registered. This will only work when we call it directly as the relay
// will not forward the tchannel service.
err = json.CallPeer(ctx, peer, "tchannel", "_gometa_runtime", map[string]interface{}{
"includeGoStacks": true,
}, &resp)
require.NoError(t, err, "Call _gometa_runtime failed")
}
})
}
示例6: TestRelayIDClash
func TestRelayIDClash(t *testing.T) {
opts := serviceNameOpts("s1").SetRelayOnly()
testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
s1 := ts.Server()
s2 := ts.NewServer(serviceNameOpts("s2"))
unblock := make(chan struct{})
testutils.RegisterEcho(s1, func() {
<-unblock
})
testutils.RegisterEcho(s2, nil)
var wg sync.WaitGroup
for i := 0; i < 10; i++ {
wg.Add(1)
go func() {
defer wg.Done()
testutils.AssertEcho(t, s2, ts.HostPort(), s1.ServiceName())
}()
}
for i := 0; i < 5; i++ {
testutils.AssertEcho(t, s1, ts.HostPort(), s2.ServiceName())
}
close(unblock)
wg.Wait()
})
}
示例7: TestIntrospectNumConnections
func TestIntrospectNumConnections(t *testing.T) {
// Disable the relay, since the relay does not maintain a 1:1 mapping betewen
// incoming connections vs outgoing connections.
opts := testutils.NewOpts().NoRelay()
testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
ctx, cancel := NewContext(time.Second)
defer cancel()
assert.Equal(t, 0, ts.Server().IntrospectNumConnections(), "Expected no connection on new server")
for i := 0; i < 10; i++ {
client := ts.NewClient(nil)
defer client.Close()
require.NoError(t, client.Ping(ctx, ts.HostPort()), "Ping from new client failed")
assert.Equal(t, 1, client.IntrospectNumConnections(), "Client should have single connection")
assert.Equal(t, i+1, ts.Server().IntrospectNumConnections(), "Incorrect number of server connections")
}
// Make sure that a closed connection will reduce NumConnections.
client := ts.NewClient(nil)
require.NoError(t, client.Ping(ctx, ts.HostPort()), "Ping from new client failed")
assert.Equal(t, 11, ts.Server().IntrospectNumConnections(), "Number of connections expected to increase")
client.Close()
require.True(t, testutils.WaitFor(100*time.Millisecond, func() bool {
return ts.Server().IntrospectNumConnections() == 10
}), "Closed connection did not get removed, num connections is %v", ts.Server().IntrospectNumConnections())
})
}
示例8: TestTimeoutCallsThenClose
func TestTimeoutCallsThenClose(t *testing.T) {
// Test needs at least 2 CPUs to trigger race conditions.
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
opts := serviceNameOpts("s1").SetRelayOnly().DisableLogVerification()
testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
s1 := ts.Server()
s2 := ts.NewServer(serviceNameOpts("s2").DisableLogVerification())
unblockEcho := make(chan struct{})
testutils.RegisterEcho(s1, func() {
<-unblockEcho
})
ctx, cancel := NewContext(testutils.Timeout(30 * time.Millisecond))
defer cancel()
var callers sync.WaitGroup
for i := 0; i < 100; i++ {
callers.Add(1)
go func() {
defer callers.Done()
raw.Call(ctx, s2, ts.HostPort(), "s1", "echo", nil, nil)
}()
}
close(unblockEcho)
// Wait for all the callers to end
callers.Wait()
})
}
示例9: TestRelayConcurrentCalls
// TestRelayStress makes many concurrent calls and ensures that
// we don't try to reuse any frames once they've been released.
func TestRelayConcurrentCalls(t *testing.T) {
pool := NewProtectMemFramePool()
opts := testutils.NewOpts().SetRelayOnly().SetFramePool(pool)
testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
server := benchmark.NewServer(
benchmark.WithNoLibrary(),
benchmark.WithServiceName("s1"),
)
defer server.Close()
ts.RelayHosts().Add("s1", server.HostPort())
client := benchmark.NewClient([]string{ts.HostPort()},
benchmark.WithNoDurations(),
benchmark.WithNoLibrary(),
benchmark.WithNumClients(20),
benchmark.WithServiceName("s1"),
benchmark.WithTimeout(time.Minute),
)
defer client.Close()
require.NoError(t, client.Warmup(), "Client warmup failed")
_, err := client.RawCall(1000)
assert.NoError(t, err, "RawCalls failed")
})
}
示例10: TestRelayHandleLargeLocalCall
func TestRelayHandleLargeLocalCall(t *testing.T) {
opts := testutils.NewOpts().SetRelayOnly().
SetRelayLocal("relay").
AddLogFilter("Received fragmented callReq", 1).
// Expect 4 callReqContinues for 256 kb payload that we cannot relay.
AddLogFilter("Failed to relay frame.", 4)
testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
client := ts.NewClient(nil)
testutils.RegisterEcho(ts.Relay(), nil)
// This large call should fail with a bad request.
err := testutils.CallEcho(client, ts.HostPort(), "relay", &raw.Args{
Arg2: testutils.RandBytes(128 * 1024),
Arg3: testutils.RandBytes(128 * 1024),
})
if assert.Equal(t, ErrCodeBadRequest, GetSystemErrorCode(err), "Expected BadRequest for large call to relay") {
assert.Contains(t, err.Error(), "cannot receive fragmented calls")
}
// We may get an error before the call is finished flushing.
// Do a ping to ensure everything has been flushed.
ctx, cancel := NewContext(time.Second)
defer cancel()
require.NoError(t, client.Ping(ctx, ts.HostPort()), "Ping failed")
})
}
示例11: TestWriteErrorAfterTimeout
func TestWriteErrorAfterTimeout(t *testing.T) {
// TODO: Make this test block at different points (e.g. before, during read/write).
testutils.WithTestServer(t, nil, func(ts *testutils.TestServer) {
timedOut := make(chan struct{})
done := make(chan struct{})
handler := func(ctx context.Context, call *InboundCall) {
<-ctx.Done()
<-timedOut
_, err := raw.ReadArgs(call)
assert.Equal(t, ErrTimeout, err, "Read args should fail with timeout")
response := call.Response()
assert.Equal(t, ErrTimeout, response.SendSystemError(ErrServerBusy), "SendSystemError should fail")
close(done)
}
ts.Register(HandlerFunc(handler), "call")
ctx, cancel := NewContext(testutils.Timeout(30 * time.Millisecond))
defer cancel()
_, _, _, err := raw.Call(ctx, ts.Server(), ts.HostPort(), ts.ServiceName(), "call", nil, testutils.RandBytes(100000))
assert.Equal(t, err, ErrTimeout, "Call should timeout")
close(timedOut)
select {
case <-done:
case <-time.After(time.Second):
t.Errorf("Handler not called, timeout may be too low")
}
calls := relaytest.NewMockStats()
calls.Add(ts.ServiceName(), ts.ServiceName(), "call").Failed("timeout").End()
ts.AssertRelayStats(calls)
})
}
示例12: TestCloseAfterTimeout
func TestCloseAfterTimeout(t *testing.T) {
// Disable log verfication since connections are closed after a timeout
// and the relay might still be reading/writing to the connection.
// TODO: Ideally, we only disable log verification on the relay.
opts := testutils.NewOpts().DisableLogVerification()
testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
testHandler := onErrorTestHandler{newTestHandler(t), func(_ context.Context, err error) {}}
ts.Register(raw.Wrap(testHandler), "block")
ctx, cancel := NewContext(100 * time.Millisecond)
defer cancel()
// Make a call, wait for it to timeout.
clientCh := ts.NewClient(nil)
_, _, _, err := raw.Call(ctx, clientCh, ts.HostPort(), ts.ServiceName(), "block", nil, nil)
require.Equal(t, ErrTimeout, err, "Expected call to timeout")
// The client channel should also close immediately.
clientCh.Close()
assertStateChangesTo(t, clientCh, ChannelClosed)
assert.True(t, clientCh.Closed(), "Channel should be closed")
// Unblock the testHandler so that a goroutine isn't leaked.
<-testHandler.blockErr
})
}
示例13: TestRaceExchangesWithClose
func TestRaceExchangesWithClose(t *testing.T) {
var wg sync.WaitGroup
ctx, cancel := NewContext(testutils.Timeout(70 * time.Millisecond))
defer cancel()
opts := testutils.NewOpts().DisableLogVerification()
testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
server := ts.Server()
gotCall := make(chan struct{})
completeCall := make(chan struct{})
testutils.RegisterFunc(server, "dummy", func(ctx context.Context, args *raw.Args) (*raw.Res, error) {
return &raw.Res{}, nil
})
testutils.RegisterEcho(server, func() {
close(gotCall)
<-completeCall
})
client := ts.NewClient(opts)
defer client.Close()
callDone := make(chan struct{})
go func() {
assert.NoError(t, testutils.CallEcho(client, ts.HostPort(), server.ServiceName(), &raw.Args{}), "Echo failed")
close(callDone)
}()
// Wait until the server recieves a call, so it has an active inbound.
<-gotCall
// Start a bunch of clients to trigger races between connecting and close.
for i := 0; i < 100; i++ {
wg.Add(1)
go func() {
defer wg.Done()
// We don't use ts.NewClient here to avoid data races.
c := testutils.NewClient(t, opts)
defer c.Close()
c.Ping(ctx, ts.HostPort())
raw.Call(ctx, c, ts.HostPort(), server.ServiceName(), "dummy", nil, nil)
}()
}
// Now try to close the channel, it should block since there's active exchanges.
server.Close()
assert.Equal(t, ChannelStartClose, ts.Server().State(), "Server should be in StartClose")
close(completeCall)
<-callDone
})
// Wait for all calls to complete
wg.Wait()
}
示例14: withRelayedEcho
func withRelayedEcho(t testing.TB, f func(relay, server, client *Channel, ts *testutils.TestServer)) {
opts := serviceNameOpts("test").SetRelayOnly()
testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
testutils.RegisterEcho(ts.Server(), nil)
client := ts.NewClient(serviceNameOpts("client"))
client.Peers().Add(ts.HostPort())
f(ts.Relay(), ts.Server(), client, ts)
})
}
示例15: TestPing
func TestPing(t *testing.T) {
testutils.WithTestServer(t, nil, func(ts *testutils.TestServer) {
ctx, cancel := NewContext(time.Second)
defer cancel()
clientCh := ts.NewClient(nil)
defer clientCh.Close()
require.NoError(t, clientCh.Ping(ctx, ts.HostPort()))
})
}