本文整理汇总了Golang中google/golang.org/grpc.ClientConn类的典型用法代码示例。如果您正苦于以下问题:Golang ClientConn类的具体用法?Golang ClientConn怎么用?Golang ClientConn使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了ClientConn类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: cleanup
func cleanup(ctx *testutils.RktRunCtx, svc *gexpect.ExpectSubprocess, conn *grpc.ClientConn, imagePath string) {
t := new(testing.T) // Print no messages.
os.Remove(imagePath)
conn.Close()
stopAPIService(t, svc)
ctx.Cleanup()
}
示例2: ExecuteFetchAsApp
// ExecuteFetchAsApp is part of the tmclient.TabletManagerClient interface.
func (client *Client) ExecuteFetchAsApp(ctx context.Context, tablet *topodatapb.Tablet, usePool bool, query []byte, maxRows int) (*querypb.QueryResult, error) {
var c tabletmanagerservicepb.TabletManagerClient
var err error
if usePool {
c, err = client.dialPool(tablet)
if err != nil {
return nil, err
}
} else {
var cc *grpc.ClientConn
cc, c, err = client.dial(tablet)
if err != nil {
return nil, err
}
defer cc.Close()
}
response, err := c.ExecuteFetchAsApp(ctx, &tabletmanagerdatapb.ExecuteFetchAsAppRequest{
Query: query,
MaxRows: uint64(maxRows),
})
if err != nil {
return nil, err
}
return response.Result, nil
}
示例3: benchmarkEchoGRPC
func benchmarkEchoGRPC(b *testing.B, size int) {
var conn *grpc.ClientConn
var client EchoClient
benchmarkEcho(b, size, listenAndServeGRPC,
func(addr net.Addr) {
var err error
conn, err = grpc.Dial(addr.String(), grpc.WithTransportCredentials(credentials.NewTLS(clientTLSConfig)))
if err != nil {
b.Fatal(err)
}
client = NewEchoClient(conn)
},
func() {
if err := conn.Close(); err != nil {
b.Fatal(err)
}
},
func(echoMsg string) string {
resp, err := client.Echo(context.Background(), &EchoRequest{Msg: echoMsg})
if err != nil {
b.Fatal(err)
}
return resp.Msg
},
)
}
示例4: compactKV
func (c *cluster) compactKV(rev int64) error {
var (
conn *grpc.ClientConn
err error
)
if rev <= 0 {
return nil
}
for i, u := range c.GRPCURLs {
conn, err = grpc.Dial(u, grpc.WithInsecure(), grpc.WithTimeout(5*time.Second))
if err != nil {
continue
}
kvc := pb.NewKVClient(conn)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
_, err = kvc.Compact(ctx, &pb.CompactionRequest{Revision: rev, Physical: true})
cancel()
conn.Close()
if err != nil {
if strings.Contains(err.Error(), "required revision has been compacted") && i > 0 {
plog.Printf("%s is already compacted with %d (%v)", u, rev, err)
err = nil // in case compact was requested more than once
}
}
}
return err
}
示例5: ExecuteFetchAsDba
// ExecuteFetchAsDba is part of the tmclient.TabletManagerClient interface.
func (client *Client) ExecuteFetchAsDba(ctx context.Context, tablet *topodatapb.Tablet, usePool bool, query []byte, maxRows int, disableBinlogs, reloadSchema bool) (*querypb.QueryResult, error) {
var c tabletmanagerservicepb.TabletManagerClient
var err error
if usePool {
c, err = client.dialPool(tablet)
if err != nil {
return nil, err
}
} else {
var cc *grpc.ClientConn
cc, c, err = client.dial(tablet)
if err != nil {
return nil, err
}
defer cc.Close()
}
response, err := c.ExecuteFetchAsDba(ctx, &tabletmanagerdatapb.ExecuteFetchAsDbaRequest{
Query: query,
DbName: topoproto.TabletDbName(tablet),
MaxRows: uint64(maxRows),
DisableBinlogs: disableBinlogs,
ReloadSchema: reloadSchema,
})
if err != nil {
return nil, err
}
return response.Result, nil
}
示例6: Put
// Put returns a connection to the pool or closes and discards the connection
// incase the pool channel is at capacity.
func (p *pool) Put(conn *grpc.ClientConn) error {
select {
case p.conns <- conn:
return nil
default:
return conn.Close()
}
}
示例7: removeConn
func (ctx *Context) removeConn(key string, conn *grpc.ClientConn) {
if err := conn.Close(); err != nil && !grpcutil.IsClosedConnection(err) {
if log.V(1) {
log.Errorf("failed to close client connection: %s", err)
}
}
delete(ctx.conns.cache, key)
}
示例8: createConnection
func (c *commImpl) createConnection(endpoint string, expectedPKIID common.PKIidType) (*connection, error) {
var err error
var cc *grpc.ClientConn
var stream proto.Gossip_GossipStreamClient
var pkiID common.PKIidType
c.logger.Debug("Entering", endpoint, expectedPKIID)
defer c.logger.Debug("Exiting")
if c.isStopping() {
return nil, fmt.Errorf("Stopping")
}
cc, err = grpc.Dial(endpoint, append(c.opts, grpc.WithBlock())...)
if err != nil {
return nil, err
}
cl := proto.NewGossipClient(cc)
if _, err = cl.Ping(context.Background(), &proto.Empty{}); err != nil {
cc.Close()
return nil, err
}
if stream, err = cl.GossipStream(context.Background()); err == nil {
pkiID, err = c.authenticateRemotePeer(stream)
if err == nil {
if expectedPKIID != nil && !bytes.Equal(pkiID, expectedPKIID) {
// PKIID is nil when we don't know the remote PKI id's
c.logger.Warning("Remote endpoint claims to be a different peer, expected", expectedPKIID, "but got", pkiID)
return nil, fmt.Errorf("Authentication failure")
}
conn := newConnection(cl, cc, stream, nil)
conn.pkiID = pkiID
conn.logger = c.logger
h := func(m *proto.GossipMessage) {
c.logger.Debug("Got message:", m)
c.msgPublisher.DeMultiplex(&ReceivedMessageImpl{
conn: conn,
lock: conn,
GossipMessage: m,
})
}
conn.handler = h
return conn, nil
}
}
cc.Close()
return nil, err
}
示例9: TestConnection_Correct
func TestConnection_Correct(t *testing.T) {
config.SetupTestConfig("./../../peer")
viper.Set("ledger.blockchain.deploy-system-chaincode", "false")
var tmpConn *grpc.ClientConn
var err error
if TLSEnabled() {
tmpConn, err = NewClientConnectionWithAddress(viper.GetString("peer.address"), true, true, InitTLSForPeer())
}
tmpConn, err = NewClientConnectionWithAddress(viper.GetString("peer.address"), true, false, nil)
if err != nil {
t.Fatalf("error connection to server at host:port = %s\n", viper.GetString("peer.address"))
}
tmpConn.Close()
}
示例10: TestConnection_WrongAddress
func TestConnection_WrongAddress(t *testing.T) {
config.SetupTestConfig("./../../peer")
viper.Set("ledger.blockchain.deploy-system-chaincode", "false")
viper.Set("peer.address", "0.0.0.0:30304")
var tmpConn *grpc.ClientConn
var err error
if TLSEnabled() {
tmpConn, err = NewClientConnectionWithAddress(viper.GetString("peer.address"), true, true, InitTLSForPeer())
}
tmpConn, err = NewClientConnectionWithAddress(viper.GetString("peer.address"), true, false, nil)
if err == nil {
fmt.Printf("error connection to server - at host:port = %s\n", viper.GetString("peer.address"))
t.Error("error connection to server - connection should fail")
tmpConn.Close()
}
}
示例11: compactKV
func (c *cluster) compactKV(rev int64) error {
var (
conn *grpc.ClientConn
err error
)
for _, u := range c.GRPCURLs {
conn, err = grpc.Dial(u, grpc.WithInsecure(), grpc.WithTimeout(5*time.Second))
if err != nil {
continue
}
kvc := pb.NewKVClient(conn)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
_, err = kvc.Compact(ctx, &pb.CompactionRequest{Revision: rev})
cancel()
conn.Close()
if err == nil {
return nil
}
}
return err
}
示例12: retryConnection
// retryConnection establishes a new connection
func (c *Client) retryConnection(oldConn *grpc.ClientConn, err error) (*grpc.ClientConn, error) {
c.mu.Lock()
defer c.mu.Unlock()
if err != nil {
c.errors = append(c.errors, err)
}
if c.cancel == nil {
return nil, c.ctx.Err()
}
if oldConn != c.conn {
// conn has already been updated
return c.conn, nil
}
oldConn.Close()
if st, _ := oldConn.State(); st != grpc.Shutdown {
// wait for shutdown so grpc doesn't leak sleeping goroutines
oldConn.WaitForStateChange(c.ctx, st)
}
conn, dialErr := c.cfg.RetryDialer(c)
if dialErr != nil {
c.errors = append(c.errors, dialErr)
return nil, dialErr
}
c.conn = conn
return c.conn, nil
}
示例13: invokeOnWithTimeout
func (b *BDDContext) invokeOnWithTimeout(composeServices []string, timeoutInSecs string, callBack func(context.Context, pb.EndorserClient) (proposalResponse *pb.ProposalResponse, err error)) (map[string]*pb.ProposalResponse, error) {
var err error
resultsMap := make(map[string]*pb.ProposalResponse)
errRetFunc := func() error {
return fmt.Errorf("Error when invoking endorser(s) on '%s': %s", composeServices, err)
}
var (
durationToWait time.Duration
ctx context.Context
cancel context.CancelFunc
)
if durationToWait, err = time.ParseDuration(fmt.Sprintf("%ss", timeoutInSecs)); err != nil {
return nil, errRetFunc()
}
ctx, cancel = context.WithTimeout(context.Background(), durationToWait)
defer cancel()
cancel()
for _, cs := range composeServices {
go func(composeService string) {
var proposalResponse *pb.ProposalResponse
// Now use the endorser client to create the send the proposal
println("Calling endorser for compose service:", composeService)
var grpcClient *grpc.ClientConn
if grpcClient, err = NewGrpcClient("172.17.0.4:7051"); err != nil {
return
}
defer grpcClient.Close()
endorserClient := pb.NewEndorserClient(grpcClient)
if proposalResponse, err = callBack(ctx, endorserClient); err != nil {
return
}
resultsMap[composeService] = proposalResponse
}(cs)
}
return resultsMap, err
}
示例14: userSendsProposalToEndorsersWithTimeoutOfSeconds
func (b *BDDContext) userSendsProposalToEndorsersWithTimeoutOfSeconds(enrollID, proposalAlias, timeoutInSecs string, endorsersTable *gherkin.DataTable) (err error) {
var proposal *pb.Proposal
var keyedProposalResponsesMap KeyedProposalResponseMap
keyedProposalResponsesMap = make(KeyedProposalResponseMap)
errRetFunc := func() error {
return fmt.Errorf("Error sending proposal '%s' for user '%s': %s", proposalAlias, enrollID, err)
}
var userRegistration *UserRegistration
if userRegistration, err = b.GetUserRegistration(enrollID); err != nil {
return errRetFunc()
}
// Get the proposal from the user
if proposal, err = userRegistration.GetProposal(proposalAlias); err != nil {
return errRetFunc()
}
var ctx context.Context
var cancel context.CancelFunc
if ctx, cancel, err = getContextAndCancelForTimeoutInSecs(context.Background(), timeoutInSecs); err != nil {
return errRetFunc()
}
defer cancel()
// Loop through endorsers and send proposals
var endorsers []string
if endorsers, err = b.GetArgsForUser(endorsersTable.Rows[0].Cells, userRegistration); err != nil {
return errRetFunc()
}
respQueue := make(chan *KeyedProposalResponse)
for _, e := range endorsers {
go func(endorser string) {
var localErr error
var proposalResponse *pb.ProposalResponse
// Now use the endorser client to send the proposal
var grpcClient *grpc.ClientConn
if grpcClient, localErr = b.getGrpcClientForComposeService(endorser); localErr != nil {
respQueue <- &KeyedProposalResponse{endorser, nil, fmt.Errorf("Error calling endorser '%s': %s", endorser, localErr)}
return
}
defer grpcClient.Close()
proposalBytes, err := utils.GetBytesProposal(proposal)
if err != nil {
respQueue <- &KeyedProposalResponse{endorser, nil, fmt.Errorf("Error serializing proposal bytes")}
return
}
// FIXME: the endorser needs to be given a signed proposal - who should sign?
signedProposal := &pb.SignedProposal{ProposalBytes: proposalBytes, Signature: []byte("signature")}
endorserClient := pb.NewEndorserClient(grpcClient)
if proposalResponse, localErr = endorserClient.ProcessProposal(ctx, signedProposal); localErr != nil {
respQueue <- &KeyedProposalResponse{endorser, nil, fmt.Errorf("Error calling endorser '%s': %s", endorser, localErr)}
return
}
respQueue <- &KeyedProposalResponse{endorser, proposalResponse, nil}
}(e)
}
go func() {
for i := 0; i < len(endorsers); i++ {
result := <-respQueue
keyedProposalResponsesMap[result.endorser] = result
if result.err != nil {
// TODO: think about whether to break on first failure, or allow to collect
}
}
cancel()
}()
<-ctx.Done()
if ctx.Err() != context.Canceled {
err = ctx.Err()
return errRetFunc()
}
userRegistration.lastResult = keyedProposalResponsesMap
return nil
}
示例15: Start
func (b *balancer) Start(target string, config grpc.BalancerConfig) error {
// TODO: Fall back to the basic direct connection if there is no name resolver.
if b.r == nil {
return errors.New("there is no name resolver installed")
}
b.mu.Lock()
if b.done {
b.mu.Unlock()
return grpc.ErrClientConnClosing
}
b.addrCh = make(chan []grpc.Address)
w, err := b.r.Resolve(target)
if err != nil {
b.mu.Unlock()
return err
}
b.w = w
b.mu.Unlock()
balancerAddrCh := make(chan remoteBalancerInfo, 1)
// Spawn a goroutine to monitor the name resolution of remote load balancer.
go func() {
for {
if err := b.watchAddrUpdates(w, balancerAddrCh); err != nil {
grpclog.Printf("grpc: the naming watcher stops working due to %v.\n", err)
close(balancerAddrCh)
return
}
}
}()
// Spawn a goroutine to talk to the remote load balancer.
go func() {
var cc *grpc.ClientConn
for {
rb, ok := <-balancerAddrCh
if cc != nil {
cc.Close()
}
if !ok {
// b is closing.
return
}
// Talk to the remote load balancer to get the server list.
var err error
creds := config.DialCreds
if creds == nil {
cc, err = grpc.Dial(rb.addr, grpc.WithInsecure())
} else {
if rb.name != "" {
if err := creds.OverrideServerName(rb.name); err != nil {
grpclog.Printf("Failed to override the server name in the credentials: %v", err)
continue
}
}
cc, err = grpc.Dial(rb.addr, grpc.WithTransportCredentials(creds))
}
if err != nil {
grpclog.Printf("Failed to setup a connection to the remote balancer %v: %v", rb.addr, err)
return
}
b.mu.Lock()
b.seq++ // tick when getting a new balancer address
seq := b.seq
b.next = 0
b.mu.Unlock()
go func(cc *grpc.ClientConn) {
lbc := lbpb.NewLoadBalancerClient(cc)
for {
if retry := b.callRemoteBalancer(lbc, seq); !retry {
cc.Close()
return
}
}
}(cc)
}
}()
return nil
}