本文整理匯總了Golang中google/golang.org/grpc/stats.On函數的典型用法代碼示例。如果您正苦於以下問題:Golang On函數的具體用法?Golang On怎麽用?Golang On使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了On函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: RecvMsg
func (ss *serverStream) RecvMsg(m interface{}) (err error) {
defer func() {
if ss.trInfo != nil {
ss.mu.Lock()
if ss.trInfo.tr != nil {
if err == nil {
ss.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true)
} else if err != io.EOF {
ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
ss.trInfo.tr.SetError()
}
}
ss.mu.Unlock()
}
}()
var inPayload *stats.InPayload
if stats.On() {
inPayload = &stats.InPayload{}
}
if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxMsgSize, inPayload); err != nil {
if err == io.EOF {
return err
}
if err == io.ErrUnexpectedEOF {
err = Errorf(codes.Internal, io.ErrUnexpectedEOF.Error())
}
return toRPCErr(err)
}
if inPayload != nil {
stats.HandleRPC(ss.s.Context(), inPayload)
}
return nil
}
示例2: Close
// Close kicks off the shutdown process of the transport. This should be called
// only once on a transport. Once it is called, the transport should not be
// accessed any more.
func (t *http2Client) Close() (err error) {
t.mu.Lock()
if t.state == closing {
t.mu.Unlock()
return
}
if t.state == reachable || t.state == draining {
close(t.errorChan)
}
t.state = closing
t.mu.Unlock()
close(t.shutdownChan)
err = t.conn.Close()
t.mu.Lock()
streams := t.activeStreams
t.activeStreams = nil
t.mu.Unlock()
// Notify all active streams.
for _, s := range streams {
s.mu.Lock()
if !s.headerDone {
close(s.headerChan)
s.headerDone = true
}
s.mu.Unlock()
s.write(recvMsg{err: ErrConnClosing})
}
if stats.On() {
connEnd := &stats.ConnEnd{
Client: true,
}
stats.HandleConn(t.ctx, connEnd)
}
return
}
示例3: sendResponse
func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options) error {
var (
cbuf *bytes.Buffer
outPayload *stats.OutPayload
)
if cp != nil {
cbuf = new(bytes.Buffer)
}
if stats.On() {
outPayload = &stats.OutPayload{}
}
p, err := encode(s.opts.codec, msg, cp, cbuf, outPayload)
if err != nil {
// This typically indicates a fatal issue (e.g., memory
// corruption or hardware faults) the application program
// cannot handle.
//
// TODO(zhaoq): There exist other options also such as only closing the
// faulty stream locally and remotely (Other streams can keep going). Find
// the optimal option.
grpclog.Fatalf("grpc: Server failed to encode response %v", err)
}
err = t.Write(stream, p, opts)
if err == nil && outPayload != nil {
outPayload.SentTime = time.Now()
stats.HandleRPC(stream.Context(), outPayload)
}
return err
}
示例4: WriteStatus
// WriteStatus sends stream status to the client and terminates the stream.
// There is no further I/O operations being able to perform on this stream.
// TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early
// OK is adopted.
func (t *http2Server) WriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error {
var headersSent, hasHeader bool
s.mu.Lock()
if s.state == streamDone {
s.mu.Unlock()
return nil
}
if s.headerOk {
headersSent = true
}
if s.header.Len() > 0 {
hasHeader = true
}
s.mu.Unlock()
if !headersSent && hasHeader {
t.WriteHeader(s, nil)
headersSent = true
}
if _, err := wait(s.ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil {
return err
}
t.hBuf.Reset()
if !headersSent {
t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"})
}
t.hEnc.WriteField(
hpack.HeaderField{
Name: "grpc-status",
Value: strconv.Itoa(int(statusCode)),
})
t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(statusDesc)})
// Attach the trailer metadata.
for k, v := range s.trailer {
// Clients don't tolerate reading restricted headers after some non restricted ones were sent.
if isReservedHeader(k) {
continue
}
for _, entry := range v {
t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry})
}
}
bufLen := t.hBuf.Len()
if err := t.writeHeaders(s, t.hBuf, true); err != nil {
t.Close()
return err
}
if stats.On() {
outTrailer := &stats.OutTrailer{
WireLength: bufLen,
}
stats.HandleRPC(s.Context(), outTrailer)
}
t.closeStream(s)
t.writableChan <- 0
return nil
}
示例5: SendMsg
func (cs *clientStream) SendMsg(m interface{}) (err error) {
if cs.tracing {
cs.mu.Lock()
if cs.trInfo.tr != nil {
cs.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true)
}
cs.mu.Unlock()
}
// TODO Investigate how to signal the stats handling party.
// generate error stats if err != nil && err != io.EOF?
defer func() {
if err != nil {
cs.finish(err)
}
if err == nil {
return
}
if err == io.EOF {
// Specialize the process for server streaming. SendMesg is only called
// once when creating the stream object. io.EOF needs to be skipped when
// the rpc is early finished (before the stream object is created.).
// TODO: It is probably better to move this into the generated code.
if !cs.desc.ClientStreams && cs.desc.ServerStreams {
err = nil
}
return
}
if _, ok := err.(transport.ConnectionError); !ok {
cs.closeTransportStream(err)
}
err = toRPCErr(err)
}()
var outPayload *stats.OutPayload
if stats.On() {
outPayload = &stats.OutPayload{
Client: true,
}
}
out, err := encode(cs.codec, m, cs.cp, cs.cbuf, outPayload)
defer func() {
if cs.cbuf != nil {
cs.cbuf.Reset()
}
}()
if err != nil {
return Errorf(codes.Internal, "grpc: %v", err)
}
err = cs.t.Write(cs.s, out, &transport.Options{Last: false})
if err == nil && outPayload != nil {
outPayload.SentTime = time.Now()
stats.HandleRPC(cs.statsCtx, outPayload)
}
return err
}
示例6: WriteHeader
// WriteHeader sends the header metedata md back to the client.
func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
s.mu.Lock()
if s.headerOk || s.state == streamDone {
s.mu.Unlock()
return ErrIllegalHeaderWrite
}
s.headerOk = true
if md.Len() > 0 {
if s.header.Len() > 0 {
s.header = metadata.Join(s.header, md)
} else {
s.header = md
}
}
md = s.header
s.mu.Unlock()
if _, err := wait(s.ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil {
return err
}
t.hBuf.Reset()
t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"})
if s.sendCompress != "" {
t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress})
}
for k, v := range md {
if isReservedHeader(k) {
// Clients don't tolerate reading restricted headers after some non restricted ones were sent.
continue
}
for _, entry := range v {
t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry})
}
}
bufLen := t.hBuf.Len()
if err := t.writeHeaders(s, t.hBuf, false); err != nil {
return err
}
if stats.On() {
outHeader := &stats.OutHeader{
WireLength: bufLen,
}
stats.HandleRPC(s.Context(), outHeader)
}
t.writableChan <- 0
return nil
}
示例7: sendRequest
// sendRequest writes out various information of an RPC such as Context and Message.
func sendRequest(ctx context.Context, codec Codec, compressor Compressor, callHdr *transport.CallHdr, t transport.ClientTransport, args interface{}, opts *transport.Options) (_ *transport.Stream, err error) {
stream, err := t.NewStream(ctx, callHdr)
if err != nil {
return nil, err
}
defer func() {
if err != nil {
// If err is connection error, t will be closed, no need to close stream here.
if _, ok := err.(transport.ConnectionError); !ok {
t.CloseStream(stream, err)
}
}
}()
var (
cbuf *bytes.Buffer
outPayload *stats.OutPayload
)
if compressor != nil {
cbuf = new(bytes.Buffer)
}
if stats.On() {
outPayload = &stats.OutPayload{
Client: true,
}
}
outBuf, err := encode(codec, args, compressor, cbuf, outPayload)
if err != nil {
return nil, Errorf(codes.Internal, "grpc: %v", err)
}
err = t.Write(stream, outBuf, opts)
if err == nil && outPayload != nil {
outPayload.SentTime = time.Now()
stats.Handle(ctx, outPayload)
}
// t.NewStream(...) could lead to an early rejection of the RPC (e.g., the service/method
// does not exist.) so that t.Write could get io.EOF from wait(...). Leave the following
// recvResponse to get the final status.
if err != nil && err != io.EOF {
return nil, err
}
// Sent successfully.
return stream, nil
}
示例8: Close
// Close starts shutting down the http2Server transport.
// TODO(zhaoq): Now the destruction is not blocked on any pending streams. This
// could cause some resource issue. Revisit this later.
func (t *http2Server) Close() (err error) {
t.mu.Lock()
if t.state == closing {
t.mu.Unlock()
return errors.New("transport: Close() was already called")
}
t.state = closing
streams := t.activeStreams
t.activeStreams = nil
t.mu.Unlock()
close(t.shutdownChan)
err = t.conn.Close()
// Cancel all active streams.
for _, s := range streams {
s.cancel()
}
if stats.On() {
connEnd := &stats.ConnEnd{}
stats.HandleConn(t.ctx, connEnd)
}
return
}
示例9: recvResponse
// recvResponse receives and parses an RPC response.
// On error, it returns the error and indicates whether the call should be retried.
//
// TODO(zhaoq): Check whether the received message sequence is valid.
// TODO ctx is used for stats collection and processing. It is the context passed from the application.
func recvResponse(ctx context.Context, dopts dialOptions, t transport.ClientTransport, c *callInfo, stream *transport.Stream, reply interface{}) (err error) {
// Try to acquire header metadata from the server if there is any.
defer func() {
if err != nil {
if _, ok := err.(transport.ConnectionError); !ok {
t.CloseStream(stream, err)
}
}
}()
c.headerMD, err = stream.Header()
if err != nil {
return
}
p := &parser{r: stream}
var inPayload *stats.InPayload
if stats.On() {
inPayload = &stats.InPayload{
Client: true,
}
}
for {
if err = recv(p, dopts.codec, stream, dopts.dc, reply, math.MaxInt32, inPayload); err != nil {
if err == io.EOF {
break
}
return
}
}
if inPayload != nil && err == io.EOF && stream.StatusCode() == codes.OK {
// TODO in the current implementation, inTrailer may be handled before inPayload in some cases.
// Fix the order if necessary.
stats.Handle(ctx, inPayload)
}
c.trailerMD = stream.Trailer()
return nil
}
示例10: TestStartStop
func TestStartStop(t *testing.T) {
stats.RegisterRPCHandler(nil)
stats.RegisterConnHandler(nil)
stats.Start()
if stats.On() {
t.Fatalf("stats.Start() with nil handler, stats.On() = true, want false")
}
stats.RegisterRPCHandler(func(ctx context.Context, s stats.RPCStats) {})
stats.RegisterConnHandler(nil)
stats.Start()
if !stats.On() {
t.Fatalf("stats.Start() with non-nil handler, stats.On() = false, want true")
}
stats.Stop()
stats.RegisterRPCHandler(nil)
stats.RegisterConnHandler(func(ctx context.Context, s stats.ConnStats) {})
stats.Start()
if !stats.On() {
t.Fatalf("stats.Start() with non-nil conn handler, stats.On() = false, want true")
}
stats.Stop()
stats.RegisterRPCHandler(func(ctx context.Context, s stats.RPCStats) {})
stats.RegisterConnHandler(func(ctx context.Context, s stats.ConnStats) {})
if stats.On() {
t.Fatalf("after stats.RegisterRPCHandler(), stats.On() = true, want false")
}
stats.Start()
if !stats.On() {
t.Fatalf("after stats.Start(_), stats.On() = false, want true")
}
stats.Stop()
if stats.On() {
t.Fatalf("after stats.Stop(), stats.On() = true, want false")
}
}
示例11: newHTTP2Server
// newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is
// returned if something goes wrong.
func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) {
framer := newFramer(conn)
// Send initial settings as connection preface to client.
var settings []http2.Setting
// TODO(zhaoq): Have a better way to signal "no limit" because 0 is
// permitted in the HTTP2 spec.
maxStreams := config.MaxStreams
if maxStreams == 0 {
maxStreams = math.MaxUint32
} else {
settings = append(settings, http2.Setting{
ID: http2.SettingMaxConcurrentStreams,
Val: maxStreams,
})
}
if initialWindowSize != defaultWindowSize {
settings = append(settings, http2.Setting{
ID: http2.SettingInitialWindowSize,
Val: uint32(initialWindowSize)})
}
if err := framer.writeSettings(true, settings...); err != nil {
return nil, connectionErrorf(true, err, "transport: %v", err)
}
// Adjust the connection flow control window if needed.
if delta := uint32(initialConnWindowSize - defaultWindowSize); delta > 0 {
if err := framer.writeWindowUpdate(true, 0, delta); err != nil {
return nil, connectionErrorf(true, err, "transport: %v", err)
}
}
var buf bytes.Buffer
t := &http2Server{
ctx: context.Background(),
conn: conn,
remoteAddr: conn.RemoteAddr(),
localAddr: conn.LocalAddr(),
authInfo: config.AuthInfo,
framer: framer,
hBuf: &buf,
hEnc: hpack.NewEncoder(&buf),
maxStreams: maxStreams,
inTapHandle: config.InTapHandle,
controlBuf: newRecvBuffer(),
fc: &inFlow{limit: initialConnWindowSize},
sendQuotaPool: newQuotaPool(defaultWindowSize),
state: reachable,
writableChan: make(chan int, 1),
shutdownChan: make(chan struct{}),
activeStreams: make(map[uint32]*Stream),
streamSendQuota: defaultWindowSize,
}
if stats.On() {
t.ctx = stats.TagConn(t.ctx, &stats.ConnTagInfo{
RemoteAddr: t.remoteAddr,
LocalAddr: t.localAddr,
})
connBegin := &stats.ConnBegin{}
stats.HandleConn(t.ctx, connBegin)
}
go t.controller()
t.writableChan <- 0
return t, nil
}
示例12: invoke
func invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) (e error) {
c := defaultCallInfo
for _, o := range opts {
if err := o.before(&c); err != nil {
return toRPCErr(err)
}
}
defer func() {
for _, o := range opts {
o.after(&c)
}
}()
if EnableTracing {
c.traceInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method)
defer c.traceInfo.tr.Finish()
c.traceInfo.firstLine.client = true
if deadline, ok := ctx.Deadline(); ok {
c.traceInfo.firstLine.deadline = deadline.Sub(time.Now())
}
c.traceInfo.tr.LazyLog(&c.traceInfo.firstLine, false)
// TODO(dsymonds): Arrange for c.traceInfo.firstLine.remoteAddr to be set.
defer func() {
if e != nil {
c.traceInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{e}}, true)
c.traceInfo.tr.SetError()
}
}()
}
if stats.On() {
begin := &stats.Begin{
Client: true,
BeginTime: time.Now(),
FailFast: c.failFast,
}
stats.Handle(ctx, begin)
}
defer func() {
if stats.On() {
end := &stats.End{
Client: true,
EndTime: time.Now(),
Error: e,
}
stats.Handle(ctx, end)
}
}()
topts := &transport.Options{
Last: true,
Delay: false,
}
for {
var (
err error
t transport.ClientTransport
stream *transport.Stream
// Record the put handler from Balancer.Get(...). It is called once the
// RPC has completed or failed.
put func()
)
// TODO(zhaoq): Need a formal spec of fail-fast.
callHdr := &transport.CallHdr{
Host: cc.authority,
Method: method,
}
if cc.dopts.cp != nil {
callHdr.SendCompress = cc.dopts.cp.Type()
}
gopts := BalancerGetOptions{
BlockingWait: !c.failFast,
}
t, put, err = cc.getTransport(ctx, gopts)
if err != nil {
// TODO(zhaoq): Probably revisit the error handling.
if _, ok := err.(*rpcError); ok {
return err
}
if err == errConnClosing || err == errConnUnavailable {
if c.failFast {
return Errorf(codes.Unavailable, "%v", err)
}
continue
}
// All the other errors are treated as Internal errors.
return Errorf(codes.Internal, "%v", err)
}
if c.traceInfo.tr != nil {
c.traceInfo.tr.LazyLog(&payload{sent: true, msg: args}, true)
}
stream, err = sendRequest(ctx, cc.dopts.codec, cc.dopts.cp, callHdr, t, args, topts)
if err != nil {
if put != nil {
put()
put = nil
}
// Retry a non-failfast RPC when
// i) there is a connection error; or
// ii) the server started to drain before this RPC was initiated.
if _, ok := err.(transport.ConnectionError); ok || err == transport.ErrStreamDrain {
if c.failFast {
return toRPCErr(err)
//.........這裏部分代碼省略.........
示例13: processStreamingRPC
func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) {
if stats.On() {
begin := &stats.Begin{
BeginTime: time.Now(),
}
stats.HandleRPC(stream.Context(), begin)
}
defer func() {
if stats.On() {
end := &stats.End{
EndTime: time.Now(),
}
if err != nil && err != io.EOF {
end.Error = toRPCErr(err)
}
stats.HandleRPC(stream.Context(), end)
}
}()
if s.opts.cp != nil {
stream.SetSendCompress(s.opts.cp.Type())
}
ss := &serverStream{
t: t,
s: stream,
p: &parser{r: stream},
codec: s.opts.codec,
cp: s.opts.cp,
dc: s.opts.dc,
maxMsgSize: s.opts.maxMsgSize,
trInfo: trInfo,
}
if ss.cp != nil {
ss.cbuf = new(bytes.Buffer)
}
if trInfo != nil {
trInfo.tr.LazyLog(&trInfo.firstLine, false)
defer func() {
ss.mu.Lock()
if err != nil && err != io.EOF {
ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
ss.trInfo.tr.SetError()
}
ss.trInfo.tr.Finish()
ss.trInfo.tr = nil
ss.mu.Unlock()
}()
}
var appErr error
if s.opts.streamInt == nil {
appErr = sd.Handler(srv.server, ss)
} else {
info := &StreamServerInfo{
FullMethod: stream.Method(),
IsClientStream: sd.ClientStreams,
IsServerStream: sd.ServerStreams,
}
appErr = s.opts.streamInt(srv.server, ss, info, sd.Handler)
}
if appErr != nil {
if err, ok := appErr.(*rpcError); ok {
ss.statusCode = err.code
ss.statusDesc = err.desc
} else if err, ok := appErr.(transport.StreamError); ok {
ss.statusCode = err.Code
ss.statusDesc = err.Desc
} else {
ss.statusCode = convertCode(appErr)
ss.statusDesc = appErr.Error()
}
}
if trInfo != nil {
ss.mu.Lock()
if ss.statusCode != codes.OK {
ss.trInfo.tr.LazyLog(stringer(ss.statusDesc), true)
ss.trInfo.tr.SetError()
} else {
ss.trInfo.tr.LazyLog(stringer("OK"), false)
}
ss.mu.Unlock()
}
errWrite := t.WriteStatus(ss.s, ss.statusCode, ss.statusDesc)
if ss.statusCode != codes.OK {
return Errorf(ss.statusCode, ss.statusDesc)
}
return errWrite
}
示例14: operateHeaders
// operateHeader takes action on the decoded headers.
func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (close bool) {
buf := newRecvBuffer()
s := &Stream{
id: frame.Header().StreamID,
st: t,
buf: buf,
fc: &inFlow{limit: initialWindowSize},
}
var state decodeState
for _, hf := range frame.Fields {
state.processHeaderField(hf)
}
if err := state.err; err != nil {
if se, ok := err.(StreamError); ok {
t.controlBuf.put(&resetStream{s.id, statusCodeConvTab[se.Code]})
}
return
}
if frame.StreamEnded() {
// s is just created by the caller. No lock needed.
s.state = streamReadDone
}
s.recvCompress = state.encoding
if state.timeoutSet {
s.ctx, s.cancel = context.WithTimeout(t.ctx, state.timeout)
} else {
s.ctx, s.cancel = context.WithCancel(t.ctx)
}
pr := &peer.Peer{
Addr: t.remoteAddr,
}
// Attach Auth info if there is any.
if t.authInfo != nil {
pr.AuthInfo = t.authInfo
}
s.ctx = peer.NewContext(s.ctx, pr)
// Cache the current stream to the context so that the server application
// can find out. Required when the server wants to send some metadata
// back to the client (unary call only).
s.ctx = newContextWithStream(s.ctx, s)
// Attach the received metadata to the context.
if len(state.mdata) > 0 {
s.ctx = metadata.NewContext(s.ctx, state.mdata)
}
s.dec = &recvBufferReader{
ctx: s.ctx,
recv: s.buf,
}
s.recvCompress = state.encoding
s.method = state.method
if t.inTapHandle != nil {
var err error
info := &tap.Info{
FullMethodName: state.method,
}
s.ctx, err = t.inTapHandle(s.ctx, info)
if err != nil {
// TODO: Log the real error.
t.controlBuf.put(&resetStream{s.id, http2.ErrCodeRefusedStream})
return
}
}
t.mu.Lock()
if t.state != reachable {
t.mu.Unlock()
return
}
if uint32(len(t.activeStreams)) >= t.maxStreams {
t.mu.Unlock()
t.controlBuf.put(&resetStream{s.id, http2.ErrCodeRefusedStream})
return
}
if s.id%2 != 1 || s.id <= t.maxStreamID {
t.mu.Unlock()
// illegal gRPC stream id.
grpclog.Println("transport: http2Server.HandleStreams received an illegal stream id: ", s.id)
return true
}
t.maxStreamID = s.id
s.sendQuotaPool = newQuotaPool(int(t.streamSendQuota))
t.activeStreams[s.id] = s
t.mu.Unlock()
s.windowHandler = func(n int) {
t.updateWindow(s, uint32(n))
}
s.ctx = traceCtx(s.ctx, s.method)
if stats.On() {
s.ctx = stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method})
inHeader := &stats.InHeader{
FullMethod: s.method,
RemoteAddr: t.remoteAddr,
LocalAddr: t.localAddr,
Compression: s.recvCompress,
WireLength: int(frame.Header().Length),
}
stats.HandleRPC(s.ctx, inHeader)
//.........這裏部分代碼省略.........
示例15: newHTTP2Client
// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2
// and starts to receive messages on it. Non-nil error returns if construction
// fails.
func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions) (_ ClientTransport, err error) {
scheme := "http"
conn, err := dial(ctx, opts.Dialer, addr.Addr)
if err != nil {
if opts.FailOnNonTempDialError {
return nil, connectionErrorf(isTemporary(err), err, "transport: %v", err)
}
return nil, connectionErrorf(true, err, "transport: %v", err)
}
// Any further errors will close the underlying connection
defer func(conn net.Conn) {
if err != nil {
conn.Close()
}
}(conn)
var authInfo credentials.AuthInfo
if creds := opts.TransportCredentials; creds != nil {
scheme = "https"
conn, authInfo, err = creds.ClientHandshake(ctx, addr.Addr, conn)
if err != nil {
// Credentials handshake errors are typically considered permanent
// to avoid retrying on e.g. bad certificates.
temp := isTemporary(err)
return nil, connectionErrorf(temp, err, "transport: %v", err)
}
}
ua := primaryUA
if opts.UserAgent != "" {
ua = opts.UserAgent + " " + ua
}
var buf bytes.Buffer
t := &http2Client{
ctx: ctx,
target: addr.Addr,
userAgent: ua,
md: addr.Metadata,
conn: conn,
remoteAddr: conn.RemoteAddr(),
localAddr: conn.LocalAddr(),
authInfo: authInfo,
// The client initiated stream id is odd starting from 1.
nextID: 1,
writableChan: make(chan int, 1),
shutdownChan: make(chan struct{}),
errorChan: make(chan struct{}),
goAway: make(chan struct{}),
framer: newFramer(conn),
hBuf: &buf,
hEnc: hpack.NewEncoder(&buf),
controlBuf: newRecvBuffer(),
fc: &inFlow{limit: initialConnWindowSize},
sendQuotaPool: newQuotaPool(defaultWindowSize),
scheme: scheme,
state: reachable,
activeStreams: make(map[uint32]*Stream),
creds: opts.PerRPCCredentials,
maxStreams: math.MaxInt32,
streamSendQuota: defaultWindowSize,
}
// Start the reader goroutine for incoming message. Each transport has
// a dedicated goroutine which reads HTTP2 frame from network. Then it
// dispatches the frame to the corresponding stream entity.
go t.reader()
// Send connection preface to server.
n, err := t.conn.Write(clientPreface)
if err != nil {
t.Close()
return nil, connectionErrorf(true, err, "transport: %v", err)
}
if n != len(clientPreface) {
t.Close()
return nil, connectionErrorf(true, err, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface))
}
if initialWindowSize != defaultWindowSize {
err = t.framer.writeSettings(true, http2.Setting{
ID: http2.SettingInitialWindowSize,
Val: uint32(initialWindowSize),
})
} else {
err = t.framer.writeSettings(true)
}
if err != nil {
t.Close()
return nil, connectionErrorf(true, err, "transport: %v", err)
}
// Adjust the connection flow control window if needed.
if delta := uint32(initialConnWindowSize - defaultWindowSize); delta > 0 {
if err := t.framer.writeWindowUpdate(true, 0, delta); err != nil {
t.Close()
return nil, connectionErrorf(true, err, "transport: %v", err)
}
}
go t.controller()
t.writableChan <- 0
if stats.On() {
t.ctx = stats.TagConn(t.ctx, &stats.ConnTagInfo{
RemoteAddr: t.remoteAddr,
//.........這裏部分代碼省略.........