本文整理汇总了Golang中net/rpc.DialHTTP函数的典型用法代码示例。如果您正苦于以下问题:Golang DialHTTP函数的具体用法?Golang DialHTTP怎么用?Golang DialHTTP使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了DialHTTP函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: ReconnectToLB
func (pc *pacClient) ReconnectToLB() error {
fmt.Println("reconnect called")
args := &loadbalancerrpc.RouteArgs{Attempt: loadbalancerrpc.RETRY, HostPort: pc.serverHostPort}
reply := new(loadbalancerrpc.RouteReply)
pc.loadBalancer.Call("LoadBalancer.RouteToServer", args, &reply)
if reply.Status == loadbalancerrpc.MOSTFAIL {
fmt.Println("SERVER ALL FAILED")
return errors.New("reconnect fail, most servers dead")
}
serverConn, err := rpc.DialHTTP("tcp", reply.HostPort)
for err != nil {
fmt.Println("trying to get new server")
args := &loadbalancerrpc.RouteArgs{Attempt: loadbalancerrpc.RETRY, HostPort: reply.HostPort}
pc.loadBalancer.Call("LoadBalancer.RouteToServer", args, &reply)
if reply.Status == loadbalancerrpc.MOSTFAIL {
fmt.Println("SERVER ALL FAILED")
return errors.New("reconnect fail, most servers dead")
}
serverConn, err = rpc.DialHTTP("tcp", reply.HostPort)
}
pc.serverHostPort = reply.HostPort
pc.serverConn = serverConn
return nil
}
示例2: AcceptByNode
func (pn *paxosNode) AcceptByNode(nodeID int, hostPort string, args *paxosrpc.ProposeArgs, retChan chan int) {
pn.nodeMutex.Lock()
client, ok := pn.nodeClientMap[nodeID]
var err error
if !ok {
client, err = rpc.DialHTTP("tcp", pn.hostMap[nodeID])
for i := 0; i < 5 && err != nil; i++ {
client, err = rpc.DialHTTP("tcp", pn.hostMap[nodeID])
}
if err == nil {
pn.nodeClientMap[nodeID] = client
} else {
pn.nodeMutex.Unlock()
retChan <- 0
return
}
}
pn.nodeMutex.Unlock()
acceptArgs := paxosrpc.AcceptArgs{
Key: args.Key,
N: args.N,
V: args.V,
}
acceptReply := paxosrpc.AcceptReply{}
// client.Call("PaxosNode.RecvAccept", acceptArgs, &acceptReply)
acceptcall := client.Go("PaxosNode.RecvAccept", acceptArgs, &acceptReply, nil)
// if err != nil {
// retChan <- 0
// return
// }
// switch acceptReply.Status {
// case paxosrpc.OK:
// // fmt.Println("ok accept", nodeID)
// retChan <- 1
// case paxosrpc.Reject:
// // fmt.Println("reject accept", nodeID)
// retChan <- 0
// }
select {
case <-acceptcall.Done:
if acceptcall.Error != nil {
retChan <- 0
return
}
switch acceptReply.Status {
case paxosrpc.OK:
// fmt.Println("accept by node ok accept", nodeID)
retChan <- 1
case paxosrpc.Reject:
// fmt.Println("accept by node reject accept", nodeID)
retChan <- 0
}
case <-time.After(time.Duration(10) * time.Millisecond):
// fmt.Println("accept by node timeout", nodeID)
retChan <- 0
return
}
}
示例3: dialDaemon
// Connect to the daemon for RPC communication.
// Starts the daemon if he's not yet running.
func dialDaemon() *rpc.Client {
// try to call the daemon
client, err := rpc.DialHTTP("tcp", "localhost"+Port)
// if daemon does not seem to be running, start him.
if SpawnDaemon {
const SLEEP = 10e6 // nanoseconds
if err != nil {
forkDaemon()
time.Sleep(SLEEP)
}
// try again to call the daemon,
// give him some time to come up.
trials := 0
for err != nil && trials < 10 {
client, err = rpc.DialHTTP("tcp", "localhost"+Port)
time.Sleep(SLEEP)
trials++
}
}
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
return client
}
示例4: Connect
// Connects to the rpc server.
func (self *Client) Connect() (err error) {
if len(self.socket) > 0 && self.socket[0] != '/' {
self.Client, err = rpc.DialHTTP("tcp", self.socket)
} else {
self.Client, err = rpc.DialHTTP("unix", self.socket)
}
return
}
示例5: connect
func connect(addr string) *rpc.Client {
client, err := rpc.DialHTTP("tcp", addr)
for err != nil {
logrus.Error(err.Error())
time.Sleep(1 * time.Second)
client, err = rpc.DialHTTP("tcp", addr)
}
return client
}
示例6: NewPacClient
func NewPacClient(loadHostPort string, port int, ID string) (PacClient, error) {
pac := new(pacClient)
pac.loadHostPort = loadHostPort
pac.ID = ID
pac.logs = make(map[int]string)
cli, err := rpc.DialHTTP("tcp", loadHostPort)
if err != nil {
return nil, err
}
pac.loadBalancer = cli
args := &loadbalancerrpc.RouteArgs{Attempt: loadbalancerrpc.INIT, HostPort: ""}
var reply loadbalancerrpc.RouteReply
cli.Call("LoadBalancer.RouteToServer", args, &reply)
for reply.Status == loadbalancerrpc.NotReady {
fmt.Println("retrying to connect")
time.Sleep(1000 * time.Millisecond)
err = cli.Call("LoadBalancer.RouteToServer", args, &reply)
}
if reply.Status == loadbalancerrpc.MOSTFAIL {
return nil, err
}
//connect to server
cli2, err := rpc.DialHTTP("tcp", reply.HostPort)
pac.serverConn = cli2
pac.serverHostPort = reply.HostPort
if err != nil {
err1 := pac.ReconnectToLB()
/*for err != nil {
fmt.Println("trying to get new server")
args.HostPort = reply.HostPort
args.Attempt = loadbalancerrpc.RETRY
cli.Call("LoadBalancer.RouteToServer", args, &reply)
time.Sleep(time.Second)
if reply.Status != loadbalancerrpc.OK {
if reply.Status == loadbalancerrpc.MOSTFAIL {
return nil, errors.New("most servers failed")
}
}
cli2, err = rpc.DialHTTP("tcp", reply.HostPort)
}*/
if err1 != nil {
fmt.Println("SERVER ALL FAILED")
return nil, errors.New("reconnect fail, most servers dead")
}
}
fmt.Println("Server connected", reply.HostPort)
pac.GetLogs()
go pac.RefreshTimer()
return pac, nil
}
示例7: NewLoadBalancer
func NewLoadBalancer(port int, monitorhostport string) *LoadBalancer {
lb := new(LoadBalancer)
lsplog.Vlogf(1, "[NewLoadBalancer] Dialing master... %v", monitorhostport)
// connexion to monitor
// server to communicate with workers
lb.server, _ = lsp12.NewLspServer(port+10, &lsp12.LspParams{5, 2000})
lb.monitor, _ = rpc.DialHTTP("tcp", monitorhostport)
name, _ := os.Hostname()
addrs, _ := net.LookupHost(name)
lb.myAddress = addrs[0]
args := &commproto.RegisterLoadBalancerArgs{fmt.Sprintf("%s:%v", addrs[0], port)}
var reply commproto.RegisterLoadBalancerReply
lb.monitor.Call("MonitorRPC.RegisterLoadBalancer", args, &reply)
lsplog.Vlogf(1, "[NewLoadBalancer] Completed registration %s %+v", reply.Buddy, reply.LoadBalancersHostPort)
lb.buddy = reply.Buddy
lb.loadBalancers = reply.LoadBalancersHostPort
// get workers
lb.workers = reply.Workers
lb.numberOfWorkers = len(lb.workers)
lsplog.Vlogf(2, "[LoadBalancer] Establishing conn to workers")
// establish RPC connexion load balancers
lb.workersRPC = make(map[string]*rpc.Client)
for i := 0; i < len(lb.workers); i++ {
lb.workersRPC[lb.workers[i]], _ = rpc.DialHTTP("tcp", lb.workers[i])
}
lsplog.Vlogf(2, "[LoadBalancer] Establishing conn to LB")
// establish RPC connexion with workers
//numLbs := len(lb.loadBalancers)
lb.LbsRPC = make(map[string]*rpc.Client)
//for i := 0; i < numLbs; i ++ {
//lb.LbsRPC[lb.loadBalancers[i]], _ = rpc.DialHTTP("tcp", lb.loadBalancers[i])
//}
lsplog.Vlogf(2, "[LoadBalancer] Established connections")
lb.clientDict = make(map[uint16]*commproto.ClientS)
lb.replicatedInformation = make(map[string]int)
//lsplog.Vlogf(1, "[LoadBalancer] Received buddy: %s loadBalancers: %s", reply.Buddy, lb.loadBalancers)
// connexion to switch
addr, errResolve := lspnet.ResolveUDPAddr("udp", fmt.Sprintf(":%d", port))
if errResolve != nil {
return nil
}
connexion, errDial := lspnet.ListenUDP("udp", addr)
if errDial != nil {
return nil
}
lb.connSwitch = connexion
lsplog.Vlogf(1, "[LoadBalancer] Received buddy: %s loadBalancers: %s", reply.Buddy, lb.loadBalancers)
go lb.runLoadBalancer()
go lb.buddyHeartbeat()
return lb
}
示例8: getConnection
func (ss *Storageserver) getConnection(clientCallback string) *rpc.Client {
ss.callbackLocker.Lock()
defer ss.callbackLocker.Unlock()
cli := ss.callbackConnections[clientCallback]
var e error
if cli == nil {
cli, e = rpc.DialHTTP("tcp", clientCallback)
for e != nil {
cli, e = rpc.DialHTTP("tcp", clientCallback)
}
ss.callbackConnections[clientCallback] = cli
}
return cli
}
示例9: findConnection
func (ls *libstore) findConnection(serverHostPort string) *rpc.Client {
client, exist := ls.connectionMap[serverHostPort]
if exist == true {
return client
} else {
client, err := rpc.DialHTTP("tcp", serverHostPort)
for err != nil {
client, err = rpc.DialHTTP("tcp", serverHostPort)
}
ls.connectionMap[serverHostPort] = client
return client
}
return nil
}
示例10: JoinGroup
//Join the group by finding successor and getting all the required data from it
func (self *Ring) JoinGroup(address string) (err error) {
client, err := rpc.DialHTTP("tcp", address)
if err != nil {
log.Fatal("dialing:", err)
}
//Get Successor
hostPort := net.JoinHostPort(self.Address, self.Port)
hashedKey := data.Hasher(hostPort + time.Now().String()) // TODO this is a hack
successor := self.callForSuccessor(hashedKey, address)
argi := data.NewLocationStore(hashedKey, hostPort)
client, err = rpc.DialHTTP("tcp", successor.Address)
if err != nil {
log.Fatal("dialing:", err)
}
fmt.Println(successor)
//Get smallest key less then key and initiate data transfer
var data_t []*data.DataStore
err = client.Call("Ring.GetEntryData", argi, &data_t)
//TODO:: Iterate throught array and add items like below except all at once as shown. Straightforward.
length := len(data_t)
for i := 0; i < length; i++ {
//Insert Key into my table
self.KeyValTable.Insert(*(data_t[i]))
//Insert Value of Key as my Id
newMember := data.NewGroupMember(data_t[i].Key, hostPort, 0, Joining)
self.updateMember(newMember)
//Start Gossiping
if self.isGossiping == false {
go self.Gossip()
}
}
if self.isGossiping == false {
go self.Gossip()
fmt.Println("Am i done")
}
//Make hashed key my id
finalMember := data.NewGroupMember(hashedKey, hostPort, 0, Stable)
self.updateMember(finalMember)
return
}
示例11: NewLibstore
// NewLibstore creates a new instance of a TribServer's libstore. masterServerHostPort
// is the master storage server's host:port. myHostPort is this Libstore's host:port
// (i.e. the callback address that the storage servers should use to send back
// notifications when leases are revoked).
//
// The mode argument is a debugging flag that determines how the Libstore should
// request/handle leases. If mode is Never, then the Libstore should never request
// leases from the storage server (i.e. the GetArgs.WantLease field should always
// be set to false). If mode is Always, then the Libstore should always request
// leases from the storage server (i.e. the GetArgs.WantLease field should always
// be set to true). If mode is Normal, then the Libstore should make its own
// decisions on whether or not a lease should be requested from the storage server,
// based on the requirements specified in the project PDF handout. Note that the
// value of the mode flag may also determine whether or not the Libstore should
// register to receive RPCs from the storage servers.
//
// To register the Libstore to receive RPCs from the storage servers, the following
// line of code should suffice:
//
// rpc.RegisterName("LeaseCallbacks", librpc.Wrap(libstore))
//
// Note that unlike in the NewTribServer and NewStorageServer functions, there is no
// need to create a brand new HTTP handler to serve the requests (the Libstore may
// simply reuse the TribServer's HTTP handler since the two run in the same process).
func NewLibstore(masterServerHostPort, myHostPort string, mode LeaseMode) (Libstore, error) {
master_server, err := rpc.DialHTTP("tcp", masterServerHostPort)
if err != nil {
return nil, errors.New("Cannot connect to the master server")
}
// Call GetServers to get storage servers' information
var args storagerpc.GetServersArgs
var reply storagerpc.GetServersReply
master_server.Call("StorageServer.GetServers", args, &reply)
if reply.Status == storagerpc.NotReady {
for i := 0; i < 5; i++ {
time.Sleep(1 * time.Second)
master_server.Call("StorageServer.GetServers", args, &reply)
if reply.Status == storagerpc.OK {
break
}
}
}
master_server.Close()
if reply.Status == storagerpc.NotReady {
return nil, errors.New("Storage Server is not ready yet")
}
// Register RPC connection for each storage server
ls := &libstore{}
// Sort the servers by NodeID
sort.Sort(SortNodeByNodeID(reply.Servers))
ls.servers = reply.Servers
ls.rpc_connection = make([]*rpc.Client, len(ls.servers))
ls.host_port = myHostPort
ls.lease_mode = mode
ls.query_record = make(map[string]*list.List)
ls.value_cache = make(map[string]*ValueCacheElement)
ls.list_cache = make(map[string]*ListCacheElement)
ls.query_record_locker = new(sync.Mutex)
ls.value_cache_locker = new(sync.Mutex)
ls.list_cache_locker = new(sync.Mutex)
go ls.CacheCleaner()
err = rpc.RegisterName("LeaseCallbacks", librpc.Wrap(ls))
if err != nil {
return nil, errors.New("Could not register Libstore")
}
for i, server := range ls.servers {
ls.rpc_connection[i], _ = rpc.DialHTTP("tcp", server.HostPort)
}
return ls, nil
}
示例12: NewLibstore
// NewLibstore creates a new instance of a TribServer's libstore. masterServerHostPort
// is the master storage server's host:port. myHostPort is this Libstore's host:port
// (i.e. the callback address that the storage servers should use to send back
// notifications when leases are revoked).
//
// The mode argument is a debugging flag that determines how the Libstore should
// request/handle leases. If mode is Never, then the Libstore should never request
// leases from the storage server (i.e. the GetArgs.WantLease field should always
// be set to false). If mode is Always, then the Libstore should always request
// leases from the storage server (i.e. the GetArgs.WantLease field should always
// be set to true). If mode is Normal, then the Libstore should make its own
// decisions on whether or not a lease should be requested from the storage server,
// based on the requirements specified in the project PDF handout. Note that the
// value of the mode flag may also determine whether or not the Libstore should
// register to receive RPCs from the storage servers.
//
// To register the Libstore to receive RPCs from the storage servers, the following
// line of code should suffice:
//
// rpc.RegisterName("LeaseCallbacks", librpc.Wrap(libstore))
//
// Note that unlike in the NewTribServer and NewStorageServer functions, there is no
// need to create a brand new HTTP handler to serve the requests (the Libstore may
// simply reuse the TribServer's HTTP handler since the two run in the same process).
func NewLibstore(masterServerHostPort, myHostPort string, mode LeaseMode) (Libstore, error) {
ls := &libstore{
myHostPort: myHostPort,
mode: mode,
storageServers: make(map[uint32]*storagerpc.Node),
cache: newCache(),
storageRPCHandler: make(map[uint32]*rpc.Client),
accessInfoHub: newAccessInfoHub(),
}
// connect to the master server and get the server list
master, err := rpc.DialHTTP("tcp", masterServerHostPort)
if err != nil {
return nil, err
}
var args storagerpc.GetServersArgs
var reply storagerpc.GetServersReply
ok := false
for i := 0; i < maximumTrials; i++ {
err = master.Call("StorageServer.GetServers", &args, &reply)
if reply.Status == storagerpc.OK {
ok = true
break
}
time.Sleep(time.Second)
}
if !ok {
return nil, errors.New("Cannot get servers after " + strconv.Itoa(maximumTrials) + " trials")
}
// adding the server list
for _, s := range reply.Servers {
ls.storageServers[s.NodeID] = &s
ls.storageRPCHandler[s.NodeID], err = rpc.DialHTTP("tcp", s.HostPort)
if err != nil {
return nil, err
}
}
// register the callback
rpc.RegisterName("LeaseCallbacks", librpc.Wrap(ls))
go ls.gc()
return ls, nil
}
示例13: SendStore
func SendStore(k *Kademlia, key ID, value []byte, nodeID ID) error {
c, ok := LookupContact(k, nodeID)
if !ok {
return errors.New("node not found")
}
address := c.Address()
client, err := rpc.DialHTTP("tcp", address)
if err != nil {
k.removeContact(c.NodeID)
return nil
}
msgID := NewRandomID()
req := StoreRequest{k.Self, msgID, key, value}
var res StoreResult
err = client.Call("Kademlia.Store", req, &res)
if err != nil {
return err
}
defer client.Close()
return res.Err
}
示例14: NewLBClient
// Create a new AgentClient.
func NewLBClient(addr string) (s *LBClient, err error) {
s = new(LBClient)
s.addr = addr
rpcClient, err := rpc.DialHTTP("tcp", s.addr)
s.rpcClient = rpcClient
return s, err
}
示例15: NewStorageServer
// NewStorageServer creates and starts a new StorageServer. masterServerHostPort
// is the master storage server's host:port address. If empty, then this server
// is the master; otherwise, this server is a slave. numNodes is the total number of
// servers in the ring. port is the port number that this server should listen on.
// nodeID is a random, unsigned 32-bit ID identifying this server.
//
// This function should return only once all storage servers have joined the ring,
// and should return a non-nil error if the storage server could not be started.
func NewStorageServer(masterServerHostPort string, numNodes, port int, nodeID uint32) (StorageServer, error) {
// Set upt this server's info
serverInfo := storagerpc.Node{HostPort: fmt.Sprintf("localhost:%d", port), NodeID: nodeID}
var ss storageServer
if masterServerHostPort == "" {
// If this is the master server, set up a list of servers
var servers = make([]storagerpc.Node, numNodes)
servers[0] = serverInfo
// Create the master server
ss = storageServer{topMap: make(map[string]interface{}), nodeID: nodeID,
servers: servers, count: 1, countLock: sync.Mutex{}, keyLocks: make(map[string]chan int)}
} else {
// Try to connect to the master at most five times
args := storagerpc.RegisterArgs{ServerInfo: serverInfo}
var reply storagerpc.RegisterReply
var err error
var master *rpc.Client
for try := 1; try <= 5; try++ {
master, err = rpc.DialHTTP("tcp", masterServerHostPort)
if err == nil {
break
}
if try == 5 {
return nil, err
}
time.Sleep(time.Millisecond * 20)
}
for i := 1; i <= 5; i++ {
master.Call("StorageServer.RegisterServer", args, &reply)
if reply.Status == storagerpc.OK {
// All servers are connected, create this slave server
ss = storageServer{topMap: make(map[string]interface{}), nodeID: nodeID,
servers: reply.Servers, count: numNodes, countLock: sync.Mutex{}, keyLocks: make(map[string]chan int)}
break
}
// Wait one second, try to connect to master again
if i == 5 {
return nil, errors.New("couldn't connect to master")
}
time.Sleep(time.Millisecond * 20)
}
}
// Start listening for connections from other storageServers and libstores
rpc.RegisterName("StorageServer", &ss)
rpc.HandleHTTP()
l, e := net.Listen("tcp", serverInfo.HostPort)
if e != nil {
return nil, errors.New("Storage server couldn't start listening")
}
go http.Serve(l, nil)
return &ss, nil
}