本文整理汇总了Golang中github.com/uber/tchannel-go/raw.Wrap函数的典型用法代码示例。如果您正苦于以下问题:Golang Wrap函数的具体用法?Golang Wrap怎么用?Golang Wrap使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了Wrap函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: TestReuseConnection
func TestReuseConnection(t *testing.T) {
ctx, cancel := NewContext(time.Second)
defer cancel()
// Since we're specifically testing that connections between hosts are re-used,
// we can't interpose a relay in this test.
s1Opts := testutils.NewOpts().SetServiceName("s1").NoRelay()
testutils.WithTestServer(t, s1Opts, func(ts *testutils.TestServer) {
ch2 := ts.NewServer(&testutils.ChannelOpts{ServiceName: "s2"})
hostPort2 := ch2.PeerInfo().HostPort
defer ch2.Close()
ts.Register(raw.Wrap(newTestHandler(t)), "echo")
ch2.Register(raw.Wrap(newTestHandler(t)), "echo")
outbound, err := ts.Server().BeginCall(ctx, hostPort2, "s2", "echo", nil)
require.NoError(t, err)
outboundConn, outboundNetConn := OutboundConnection(outbound)
// Try to make another call at the same time, should reuse the same connection.
outbound2, err := ts.Server().BeginCall(ctx, hostPort2, "s2", "echo", nil)
require.NoError(t, err)
outbound2Conn, _ := OutboundConnection(outbound)
assert.Equal(t, outboundConn, outbound2Conn)
// Wait for the connection to be marked as active in ch2.
assert.True(t, testutils.WaitFor(time.Second, func() bool {
return ch2.IntrospectState(nil).NumConnections > 0
}), "ch2 does not have any active connections")
// When ch2 tries to call the test server, it should reuse the existing
// inbound connection the test server. Of course, this only works if the
// test server -> ch2 call wasn't relayed.
outbound3, err := ch2.BeginCall(ctx, ts.HostPort(), "s1", "echo", nil)
require.NoError(t, err)
_, outbound3NetConn := OutboundConnection(outbound3)
assert.Equal(t, outboundNetConn.RemoteAddr(), outbound3NetConn.LocalAddr())
assert.Equal(t, outboundNetConn.LocalAddr(), outbound3NetConn.RemoteAddr())
// Ensure all calls can complete in parallel.
var wg sync.WaitGroup
for _, call := range []*OutboundCall{outbound, outbound2, outbound3} {
wg.Add(1)
go func(call *OutboundCall) {
defer wg.Done()
resp1, resp2, _, err := raw.WriteArgs(call, []byte("arg2"), []byte("arg3"))
require.NoError(t, err)
assert.Equal(t, resp1, []byte("arg2"), "result does match argument")
assert.Equal(t, resp2, []byte("arg3"), "result does match argument")
}(call)
}
wg.Wait()
})
}
示例2: register
// Register the different endpoints of the test subject
func register(ch *tchannel.Channel) {
ch.Register(raw.Wrap(echoRawHandler{}), "echo/raw")
ch.Register(raw.Wrap(handlerTimeoutRawHandler{}), "handlertimeout/raw")
json.Register(ch, json.Handlers{"echo": echoJSONHandler}, onError)
tserver := thrift.NewServer(ch)
tserver.Register(echo.NewTChanEchoServer(&echoThriftHandler{}))
tserver.Register(gauntlet_tchannel.NewTChanThriftTestServer(&thriftTestHandler{}))
tserver.Register(gauntlet_tchannel.NewTChanSecondServiceServer(&secondServiceHandler{}))
}
示例3: TestReuseConnection
func TestReuseConnection(t *testing.T) {
ctx, cancel := NewContext(time.Second)
defer cancel()
s1Opts := &testutils.ChannelOpts{ServiceName: "s1"}
WithVerifiedServer(t, s1Opts, func(ch1 *Channel, hostPort1 string) {
s2Opts := &testutils.ChannelOpts{ServiceName: "s2"}
WithVerifiedServer(t, s2Opts, func(ch2 *Channel, hostPort2 string) {
ch1.Register(raw.Wrap(newTestHandler(t)), "echo")
ch2.Register(raw.Wrap(newTestHandler(t)), "echo")
// We need the servers to have their peers set before making outgoing calls
// for the outgoing calls to contain the correct peerInfo.
require.True(t, testutils.WaitFor(time.Second, func() bool {
return !ch1.PeerInfo().IsEphemeral() && !ch2.PeerInfo().IsEphemeral()
}))
outbound, err := ch1.BeginCall(ctx, hostPort2, "s2", "echo", nil)
require.NoError(t, err)
outboundConn, outboundNetConn := OutboundConnection(outbound)
// Try to make another call at the same time, should reuse the same connection.
outbound2, err := ch1.BeginCall(ctx, hostPort2, "s2", "echo", nil)
require.NoError(t, err)
outbound2Conn, _ := OutboundConnection(outbound)
assert.Equal(t, outboundConn, outbound2Conn)
// When ch2 tries to call ch1, it should reuse the inbound connection from ch1.
outbound3, err := ch2.BeginCall(ctx, hostPort1, "s1", "echo", nil)
require.NoError(t, err)
_, outbound3NetConn := OutboundConnection(outbound3)
assert.Equal(t, outboundNetConn.RemoteAddr(), outbound3NetConn.LocalAddr())
assert.Equal(t, outboundNetConn.LocalAddr(), outbound3NetConn.RemoteAddr())
// Ensure all calls can complete in parallel.
var wg sync.WaitGroup
for _, call := range []*OutboundCall{outbound, outbound2, outbound3} {
wg.Add(1)
go func(call *OutboundCall) {
defer wg.Done()
resp1, resp2, _, err := raw.WriteArgs(call, []byte("arg2"), []byte("arg3"))
require.NoError(t, err)
assert.Equal(t, resp1, []byte("arg2"), "result does match argument")
assert.Equal(t, resp2, []byte("arg3"), "result does match argument")
}(call)
}
wg.Wait()
})
})
}
示例4: TestRoundTrip
func TestRoundTrip(t *testing.T) {
testutils.WithTestServer(t, nil, func(ts *testutils.TestServer) {
handler := newTestHandler(t)
ts.Register(raw.Wrap(handler), "echo")
ctx, cancel := NewContext(time.Second)
defer cancel()
call, err := ts.Server().BeginCall(ctx, ts.HostPort(), ts.ServiceName(), "echo", &CallOptions{Format: JSON})
require.NoError(t, err)
assert.NotEmpty(t, call.RemotePeer().HostPort)
require.NoError(t, NewArgWriter(call.Arg2Writer()).Write(testArg2))
require.NoError(t, NewArgWriter(call.Arg3Writer()).Write(testArg3))
var respArg2 []byte
require.NoError(t, NewArgReader(call.Response().Arg2Reader()).Read(&respArg2))
assert.Equal(t, testArg2, []byte(respArg2))
var respArg3 []byte
require.NoError(t, NewArgReader(call.Response().Arg3Reader()).Read(&respArg3))
assert.Equal(t, testArg3, []byte(respArg3))
assert.Equal(t, JSON, handler.format)
assert.Equal(t, ts.ServiceName(), handler.caller)
assert.Equal(t, JSON, call.Response().Format(), "response Format should match request Format")
})
}
示例5: Benchmark_TChannel_YARPCToTChannel
func Benchmark_TChannel_YARPCToTChannel(b *testing.B) {
serverCh, err := tchannel.NewChannel("server", nil)
require.NoError(b, err, "failed to build server TChannel")
defer serverCh.Close()
serverCh.Register(traw.Wrap(tchannelEcho{t: b}), "echo")
require.NoError(b, serverCh.ListenAndServe(":0"), "failed to start up TChannel")
clientCh, err := tchannel.NewChannel("client", nil)
require.NoError(b, err, "failed to build client TChannel")
clientCfg := yarpc.Config{
Name: "client",
Outbounds: yarpc.Outbounds{
"server": {
Unary: ytchannel.NewOutbound(clientCh, ytchannel.HostPort(serverCh.PeerInfo().HostPort)),
},
},
}
withDispatcher(b, clientCfg, func(client yarpc.Dispatcher) {
b.ResetTimer()
runYARPCClient(b, raw.New(client.Channel("server")))
})
}
示例6: TestRequestStateRetry
func TestRequestStateRetry(t *testing.T) {
ctx, cancel := NewContext(time.Second)
defer cancel()
server := testutils.NewServer(t, nil)
defer server.Close()
server.Register(raw.Wrap(newTestHandler(t)), "echo")
client := testutils.NewClient(t, nil)
defer client.Close()
counter := 0
sc := client.GetSubChannel(server.PeerInfo().ServiceName)
err := client.RunWithRetry(ctx, func(ctx context.Context, rs *RequestState) error {
defer func() { counter++ }()
assert.Equal(t, counter, len(rs.SelectedPeers), "SelectedPeers should not be reused")
if counter < 4 {
client.Peers().Add(testutils.GetClosedHostPort(t))
} else {
client.Peers().Add(server.PeerInfo().HostPort)
}
_, err := raw.CallV2(ctx, sc, raw.CArgs{
Operation: "echo",
CallOptions: &CallOptions{RequestState: rs},
})
return err
})
assert.NoError(t, err, "RunWithRetry should succeed")
assert.Equal(t, 5, counter, "RunWithRetry should retry 5 times")
}
示例7: TestCloseAfterTimeout
func TestCloseAfterTimeout(t *testing.T) {
// Disable log verfication since connections are closed after a timeout
// and the relay might still be reading/writing to the connection.
// TODO: Ideally, we only disable log verification on the relay.
opts := testutils.NewOpts().DisableLogVerification()
testutils.WithTestServer(t, opts, func(ts *testutils.TestServer) {
testHandler := onErrorTestHandler{newTestHandler(t), func(_ context.Context, err error) {}}
ts.Register(raw.Wrap(testHandler), "block")
ctx, cancel := NewContext(100 * time.Millisecond)
defer cancel()
// Make a call, wait for it to timeout.
clientCh := ts.NewClient(nil)
_, _, _, err := raw.Call(ctx, clientCh, ts.HostPort(), ts.ServiceName(), "block", nil, nil)
require.Equal(t, ErrTimeout, err, "Expected call to timeout")
// The client channel should also close immediately.
clientCh.Close()
assertStateChangesTo(t, clientCh, ChannelClosed)
assert.True(t, clientCh.Closed(), "Channel should be closed")
// Unblock the testHandler so that a goroutine isn't leaked.
<-testHandler.blockErr
})
}
示例8: NewServer
// NewServer returns a new Server that can recieve Thrift calls or raw calls.
func NewServer(optFns ...Option) Server {
opts := getOptions(optFns)
if opts.external {
return newExternalServer(opts)
}
ch, err := tchannel.NewChannel(opts.svcName, &tchannel.ChannelOptions{
Logger: tchannel.NewLevelLogger(tchannel.NewLogger(os.Stderr), tchannel.LogLevelWarn),
})
if err != nil {
panic("failed to create channel: " + err.Error())
}
if err := ch.ListenAndServe("127.0.0.1:0"); err != nil {
panic("failed to listen on port 0: " + err.Error())
}
s := &internalServer{
ch: ch,
opts: opts,
}
tServer := thrift.NewServer(ch)
tServer.Register(gen.NewTChanSecondServiceServer(handler{calls: &s.thriftCalls}))
ch.Register(raw.Wrap(rawHandler{calls: &s.rawCalls}), "echo")
if len(opts.advertiseHosts) > 0 {
if err := s.Advertise(opts.advertiseHosts); err != nil {
panic("failed to advertise: " + err.Error())
}
}
return s
}
示例9: setupClient
func (pt *peerSelectionTest) setupClient(t testing.TB) {
pt.client, _ = pt.NewService(t, "client", "client")
pt.client.Register(raw.Wrap(newTestHandler(pt.t)), "echo")
for _, server := range pt.servers {
pt.client.Peers().Add(server.PeerInfo().HostPort)
}
}
示例10: TestDirtyFrameRequests
func TestDirtyFrameRequests(t *testing.T) {
argSizes := []int{50000, 100000, 150000}
WithVerifiedServer(t, &testutils.ChannelOpts{
ServiceName: "swap-server",
DefaultConnectionOptions: ConnectionOptions{
FramePool: dirtyFramePool{},
},
}, func(serverCh *Channel, hostPort string) {
peerInfo := serverCh.PeerInfo()
serverCh.Register(raw.Wrap(&swapper{t}), "swap")
for _, arg2Size := range argSizes {
for _, arg3Size := range argSizes {
ctx, cancel := NewContext(time.Second)
defer cancel()
arg2, arg3 := testutils.RandBytes(arg2Size), testutils.RandBytes(arg3Size)
res2, res3, _, err := raw.Call(ctx, serverCh, hostPort, peerInfo.ServiceName, "swap", arg2, arg3)
if assert.NoError(t, err, "Call failed") {
assert.Equal(t, arg2, res3, "Result arg3 wrong")
assert.Equal(t, arg3, res2, "Result arg3 wrong")
}
}
}
})
}
示例11: TestCloseAfterTimeout
func TestCloseAfterTimeout(t *testing.T) {
WithVerifiedServer(t, nil, func(ch *Channel, hostPort string) {
testHandler := onErrorTestHandler{newTestHandler(t), func(_ context.Context, err error) {}}
ch.Register(raw.Wrap(testHandler), "block")
ctx, cancel := NewContext(10 * time.Millisecond)
defer cancel()
// Make a call, wait for it to timeout.
clientCh, err := testutils.NewClient(nil)
require.NoError(t, err, "NewClient failed")
peerInfo := ch.PeerInfo()
_, _, _, err = raw.Call(ctx, clientCh, peerInfo.HostPort, peerInfo.ServiceName, "block", nil, nil)
require.Error(t, err, "Expected call to timeout")
// The client channel should also close immediately.
clientCh.Close()
runtime.Gosched()
assert.Equal(t, ChannelClosed, clientCh.State())
assert.True(t, clientCh.Closed(), "Channel should be closed")
// Unblock the testHandler so that a goroutine isn't leaked.
<-testHandler.blockErr
})
VerifyNoBlockedGoroutines(t)
}
示例12: TestTimeout
func TestTimeout(t *testing.T) {
testutils.WithTestServer(t, nil, func(ts *testutils.TestServer) {
// onError may be called when the block call tries to write the call response.
onError := func(ctx context.Context, err error) {
assert.Equal(t, ErrTimeout, err, "onError err should be ErrTimeout")
assert.Equal(t, context.DeadlineExceeded, ctx.Err(), "Context should timeout")
}
testHandler := onErrorTestHandler{newTestHandler(t), onError}
ts.Register(raw.Wrap(testHandler), "block")
ctx, cancel := NewContext(testutils.Timeout(15 * time.Millisecond))
defer cancel()
_, _, _, err := raw.Call(ctx, ts.Server(), ts.HostPort(), ts.ServiceName(), "block", []byte("Arg2"), []byte("Arg3"))
assert.Equal(t, ErrTimeout, err)
// Verify the server-side receives an error from the context.
select {
case err := <-testHandler.blockErr:
assert.Equal(t, context.DeadlineExceeded, err, "Server should have received timeout")
case <-time.After(time.Second):
t.Errorf("Server did not receive call, may need higher timeout")
}
calls := relaytest.NewMockStats()
calls.Add(ts.ServiceName(), ts.ServiceName(), "block").Failed("timeout").End()
ts.AssertRelayStats(calls)
})
}
示例13: TestFragmentation
func TestFragmentation(t *testing.T) {
testutils.WithTestServer(t, nil, func(ts *testutils.TestServer) {
ts.Register(raw.Wrap(newTestHandler(t)), "echo")
arg2 := make([]byte, MaxFramePayloadSize*2)
for i := 0; i < len(arg2); i++ {
arg2[i] = byte('a' + (i % 10))
}
arg3 := make([]byte, MaxFramePayloadSize*3)
for i := 0; i < len(arg3); i++ {
arg3[i] = byte('A' + (i % 10))
}
ctx, cancel := NewContext(time.Second)
defer cancel()
respArg2, respArg3, _, err := raw.Call(ctx, ts.Server(), ts.HostPort(), ts.ServiceName(), "echo", arg2, arg3)
require.NoError(t, err)
assert.Equal(t, arg2, respArg2)
assert.Equal(t, arg3, respArg3)
calls := relaytest.NewMockStats()
calls.Add(ts.ServiceName(), ts.ServiceName(), "echo").Succeeded().End()
ts.AssertRelayStats(calls)
})
}
示例14: TestRoundTrip
func TestRoundTrip(t *testing.T) {
WithVerifiedServer(t, nil, func(ch *Channel, hostPort string) {
handler := newTestHandler(t)
ch.Register(raw.Wrap(handler), "echo")
ctx, cancel := NewContext(time.Second)
defer cancel()
call, err := ch.BeginCall(ctx, hostPort, testServiceName, "echo", &CallOptions{Format: JSON})
require.NoError(t, err)
require.NoError(t, NewArgWriter(call.Arg2Writer()).Write(testArg2))
require.NoError(t, NewArgWriter(call.Arg3Writer()).Write(testArg3))
var respArg2 []byte
require.NoError(t, NewArgReader(call.Response().Arg2Reader()).Read(&respArg2))
assert.Equal(t, testArg2, []byte(respArg2))
var respArg3 []byte
require.NoError(t, NewArgReader(call.Response().Arg3Reader()).Read(&respArg3))
assert.Equal(t, testArg3, []byte(respArg3))
assert.Equal(t, JSON, handler.format)
assert.Equal(t, testServiceName, handler.caller)
assert.Equal(t, JSON, call.Response().Format(), "response Format should match request Format")
})
}
示例15: TestDirtyFrameRequests
func TestDirtyFrameRequests(t *testing.T) {
argSizes := []int{25000, 50000, 75000}
// Create the largest required random cache.
testutils.RandBytes(argSizes[len(argSizes)-1])
opts := testutils.NewOpts().
SetServiceName("swap-server").
SetFramePool(dirtyFramePool{})
WithVerifiedServer(t, opts, func(serverCh *Channel, hostPort string) {
peerInfo := serverCh.PeerInfo()
serverCh.Register(raw.Wrap(&swapper{t}), "swap")
for _, argSize := range argSizes {
ctx, cancel := NewContext(time.Second)
defer cancel()
arg2, arg3 := testutils.RandBytes(argSize), testutils.RandBytes(argSize)
res2, res3, _, err := raw.Call(ctx, serverCh, hostPort, peerInfo.ServiceName, "swap", arg2, arg3)
if assert.NoError(t, err, "Call failed") {
assert.Equal(t, arg2, res3, "Result arg3 wrong")
assert.Equal(t, arg3, res2, "Result arg3 wrong")
}
}
})
}