本文整理匯總了Golang中github.com/flynn/flynn/pkg/cluster.Host類的典型用法代碼示例。如果您正苦於以下問題:Golang Host類的具體用法?Golang Host怎麽用?Golang Host使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
在下文中一共展示了Host類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: makeIshApp
/*
Make an 'ish' application on the given host, returning it when
it has registered readiness with discoverd.
User will want to defer cmd.Kill() to clean up.
*/
func makeIshApp(cluster *cluster.Client, h *cluster.Host, dc *discoverd.Client, extraConfig host.ContainerConfig) (*exec.Cmd, *discoverd.Instance, error) {
// pick a unique string to use as service name so this works with concurrent tests.
serviceName := "ish-service-" + random.String(6)
// run a job that accepts tcp connections and performs tasks we ask of it in its container
cmd := exec.JobUsingCluster(cluster, exec.DockerImage(imageURIs["test-apps"]), &host.Job{
Config: host.ContainerConfig{
Args: []string{"/bin/ish"},
Ports: []host.Port{{Proto: "tcp"}},
Env: map[string]string{
"NAME": serviceName,
},
}.Merge(extraConfig),
})
cmd.HostID = h.ID()
if err := cmd.Start(); err != nil {
return nil, nil, err
}
// wait for the job to heartbeat and return its address
services, err := dc.Instances(serviceName, time.Second*100)
if err != nil {
cmd.Kill()
return nil, nil, err
}
if len(services) != 1 {
cmd.Kill()
return nil, nil, fmt.Errorf("test setup: expected exactly one service instance, got %d", len(services))
}
return cmd, services[0], nil
}
示例2: runPs
func runPs(args *docopt.Args, client cluster.Host) error {
all, err := client.ListJobs()
if err != nil {
return fmt.Errorf("could not get local jobs: %s", err)
}
jobs := make(sortJobs, 0, len(all))
for _, job := range all {
if !args.Bool["-a"] && !args.Bool["--all"] && job.Status != host.StatusStarting && job.Status != host.StatusRunning {
continue
}
jobs = append(jobs, job)
}
sort.Sort(sort.Reverse(jobs))
if args.Bool["-q"] || args.Bool["--quiet"] {
for _, job := range jobs {
fmt.Println(job.Job.ID)
}
return nil
}
w := tabwriter.NewWriter(os.Stdout, 1, 2, 2, ' ', 0)
defer w.Flush()
fmt.Fprintln(w, "JOB ID\tSTATE\tSTARTED\tCONTROLLER APP\tCONTROLLER TYPE")
for _, job := range jobs {
fmt.Fprintf(w, "%s\t%s\t%s ago\t%s\t%s\n", job.Job.ID, job.Status, units.HumanDuration(time.Now().UTC().Sub(job.StartedAt)), job.Job.Metadata["flynn-controller.app_name"], job.Job.Metadata["flynn-controller.type"])
}
return nil
}
示例3: TestUpdateTags
func (s *HostSuite) TestUpdateTags(t *c.C) {
events := make(chan *discoverd.Event)
stream, err := s.discoverdClient(t).Service("flynn-host").Watch(events)
t.Assert(err, c.IsNil)
defer stream.Close()
nextEvent := func() *discoverd.Event {
select {
case e, ok := <-events:
if !ok {
t.Fatal("unexpected close of discoverd stream")
}
return e
case <-time.After(10 * time.Second):
t.Fatal("timed out waiting for discoverd event")
}
return nil
}
var client *cluster.Host
for {
e := nextEvent()
if e.Kind == discoverd.EventKindUp && client == nil {
client = cluster.NewHost(e.Instance.Meta["id"], e.Instance.Addr, nil, nil)
}
if e.Kind == discoverd.EventKindCurrent {
break
}
}
if client == nil {
t.Fatal("did not initialize flynn-host client")
}
t.Assert(client.UpdateTags(map[string]string{"foo": "bar"}), c.IsNil)
var meta map[string]string
for {
e := nextEvent()
if e.Kind == discoverd.EventKindUpdate && e.Instance.Meta["id"] == client.ID() {
meta = e.Instance.Meta
break
}
}
t.Assert(meta["tag:foo"], c.Equals, "bar")
// setting to empty string should delete the tag
t.Assert(client.UpdateTags(map[string]string{"foo": ""}), c.IsNil)
for {
e := nextEvent()
if e.Kind == discoverd.EventKindUpdate && e.Instance.Meta["id"] == client.ID() {
meta = e.Instance.Meta
break
}
}
if _, ok := meta["tag:foo"]; ok {
t.Fatal("expected tag to be deleted but is still present")
}
}
示例4: jobLog
func jobLog(req *http.Request, app *ct.App, params martini.Params, hc cluster.Host, w http.ResponseWriter, r ResponseHelper) {
attachReq := &host.AttachReq{
JobID: params["jobs_id"],
Flags: host.AttachFlagStdout | host.AttachFlagStderr | host.AttachFlagLogs,
}
tail := req.FormValue("tail") != ""
if tail {
attachReq.Flags |= host.AttachFlagStream
}
wait := req.FormValue("wait") != ""
attachClient, err := hc.Attach(attachReq, wait)
if err != nil {
if err == cluster.ErrWouldWait {
w.WriteHeader(404)
} else {
r.Error(err)
}
return
}
if cn, ok := w.(http.CloseNotifier); ok {
go func() {
<-cn.CloseNotify()
attachClient.Close()
}()
} else {
defer attachClient.Close()
}
sse := strings.Contains(req.Header.Get("Accept"), "text/event-stream")
if sse {
w.Header().Set("Content-Type", "text/event-stream; charset=utf-8")
} else {
w.Header().Set("Content-Type", "application/vnd.flynn.attach")
}
w.WriteHeader(200)
// Send headers right away if tailing
if wf, ok := w.(http.Flusher); ok && tail {
wf.Flush()
}
fw := flushWriter{w, tail}
if sse {
ssew := NewSSELogWriter(w)
exit, err := attachClient.Receive(flushWriter{ssew.Stream("stdout"), tail}, flushWriter{ssew.Stream("stderr"), tail})
if err != nil {
fw.Write([]byte("event: error\ndata: {}\n\n"))
return
}
if tail {
fmt.Fprintf(fw, "event: exit\ndata: {\"status\": %d}\n\n", exit)
return
}
fw.Write([]byte("event: eof\ndata: {}\n\n"))
} else {
io.Copy(fw, attachClient.Conn())
}
}
示例5: ProvisionVolume
func ProvisionVolume(h *cluster.Host, job *host.Job) error {
vol, err := h.CreateVolume("default")
if err != nil {
return err
}
job.Config.Volumes = []host.VolumeBinding{{
Target: "/data",
VolumeID: vol.ID,
Writeable: true,
}}
return nil
}
示例6: hostRaftStatus
func hostRaftStatus(host *cluster.Host, peers []string, leader string) (raftStatus string) {
raftStatus = "proxy"
ip, _, _ := net.SplitHostPort(host.Addr())
for _, addr := range peers {
discIp := ip + ":1111"
if addr == discIp {
raftStatus = "peer"
if leader == discIp {
raftStatus = raftStatus + " (leader)"
}
break
}
}
return
}
示例7: runStop
func runStop(args *docopt.Args, client cluster.Host) error {
success := true
for _, id := range args.All["ID"].([]string) {
if err := client.StopJob(id); err != nil {
fmt.Printf("could not stop job %s: %s\n", id, err)
success = false
continue
}
fmt.Println(id, "stopped")
}
if !success {
return errors.New("could not stop all jobs")
}
return nil
}
示例8: jobLog
func jobLog(req *http.Request, app *ct.App, params martini.Params, hc cluster.Host, w http.ResponseWriter, r ResponseHelper) {
attachReq := &host.AttachReq{
JobID: params["jobs_id"],
Flags: host.AttachFlagStdout | host.AttachFlagStderr | host.AttachFlagLogs,
}
tail := req.FormValue("tail") != ""
if tail {
attachReq.Flags |= host.AttachFlagStream
}
wait := req.FormValue("wait") != ""
attachClient, err := hc.Attach(attachReq, wait)
if err != nil {
if err == cluster.ErrWouldWait {
w.WriteHeader(404)
} else {
r.Error(err)
}
return
}
defer attachClient.Close()
sse := strings.Contains(req.Header.Get("Accept"), "text/event-stream")
if sse {
w.Header().Set("Content-Type", "text/event-stream; charset=utf-8")
} else {
w.Header().Set("Content-Type", "application/vnd.flynn.attach")
}
w.WriteHeader(200)
// Send headers right away if tailing
if wf, ok := w.(http.Flusher); ok && tail {
wf.Flush()
}
// TODO: use http.CloseNotifier to clean up when client disconnects
if sse {
ssew := NewSSELogWriter(w)
attachClient.Receive(flushWriter{ssew.Stream("stdout"), tail}, flushWriter{ssew.Stream("stderr"), tail})
// TODO: include exit code here if tailing
flushWriter{w, tail}.Write([]byte("event: eof\ndata: {}\n\n"))
} else {
io.Copy(flushWriter{w, tail}, attachClient.Conn())
}
}
示例9: runLog
func runLog(args *docopt.Args, client cluster.Host) error {
attachReq := &host.AttachReq{
JobID: args.String["ID"],
Flags: host.AttachFlagStdout | host.AttachFlagStderr | host.AttachFlagLogs,
}
if args.Bool["-f"] || args.Bool["--follow"] {
attachReq.Flags |= host.AttachFlagStream
}
attachClient, err := client.Attach(attachReq, false)
if err != nil {
if err == cluster.ErrWouldWait {
return fmt.Errorf("no such job")
}
return err
}
defer attachClient.Close()
attachClient.Receive(os.Stdout, os.Stderr)
return nil
}
示例10: startJob
func startJob(s *State, hc *cluster.Host, job *host.Job) (*Job, error) {
data := &Job{HostID: hc.ID(), JobID: job.ID}
jobStatus := make(chan error)
events := make(chan *host.Event)
stream, err := hc.StreamEvents(data.JobID, events)
if err != nil {
return nil, err
}
go func() {
defer stream.Close()
for e := range events {
switch e.Event {
case "start", "stop":
jobStatus <- nil
return
case "error":
job, err := hc.GetJob(data.JobID)
if err != nil {
jobStatus <- err
return
}
if job.Error == nil {
jobStatus <- fmt.Errorf("bootstrap: unknown error from host")
return
}
jobStatus <- fmt.Errorf("bootstrap: host error while launching job: %q", *job.Error)
return
default:
}
}
jobStatus <- fmt.Errorf("bootstrap: host job stream disconnected unexpectedly: %q", stream.Err())
}()
if err := hc.AddJob(job); err != nil {
return nil, err
}
return data, <-jobStatus
}
示例11: startJob
func startJob(s *State, hc *cluster.Host, job *host.Job) error {
jobStatus := make(chan error)
events := make(chan *host.Event)
stream, err := hc.StreamEvents(job.ID, events)
if err != nil {
return err
}
go func() {
defer stream.Close()
loop:
for {
select {
case e, ok := <-events:
if !ok {
break loop
}
switch e.Event {
case "start", "stop":
jobStatus <- nil
return
case "error":
job, err := hc.GetJob(job.ID)
if err != nil {
jobStatus <- err
return
}
if job.Error == nil {
jobStatus <- fmt.Errorf("bootstrap: unknown error from host")
return
}
jobStatus <- fmt.Errorf("bootstrap: host error while launching job: %q", *job.Error)
return
default:
}
case <-time.After(30 * time.Second):
jobStatus <- errors.New("bootstrap: timed out waiting for job event")
return
}
}
jobStatus <- fmt.Errorf("bootstrap: host job stream disconnected unexpectedly: %q", stream.Err())
}()
if err := hc.AddJob(job); err != nil {
return err
}
return <-jobStatus
}
示例12: JobUsingHost
func JobUsingHost(h *cluster.Host, artifact host.Artifact, job *host.Job) *Cmd {
command := Job(artifact, job)
command.HostID = h.ID()
command.host = h
return command
}
示例13: doVolumeTransmitAPI
func (s *VolumeSuite) doVolumeTransmitAPI(h0, h1 *cluster.Host, t *c.C) {
clus := s.clusterClient(t)
// create a volume!
vol, err := h0.CreateVolume("default")
t.Assert(err, c.IsNil)
defer func() {
t.Assert(h0.DestroyVolume(vol.ID), c.IsNil)
}()
// create a job and use it to add data to the volume
cmd, service, err := makeIshApp(clus, h0, s.discoverdClient(t), host.ContainerConfig{
Volumes: []host.VolumeBinding{{
Target: "/vol",
VolumeID: vol.ID,
Writeable: true,
}},
})
t.Assert(err, c.IsNil)
defer cmd.Kill()
resp, err := runIshCommand(service, "echo 'testcontent' > /vol/alpha ; echo $?")
t.Assert(err, c.IsNil)
t.Assert(resp, c.Equals, "0\n")
// take a snapshot
snapInfo, err := h0.CreateSnapshot(vol.ID)
t.Assert(err, c.IsNil)
defer func() {
t.Assert(h0.DestroyVolume(snapInfo.ID), c.IsNil)
}()
// make a volume on another host to yank the snapshot content into
vol2, err := h1.CreateVolume("default")
t.Assert(err, c.IsNil)
defer func() {
t.Assert(h1.DestroyVolume(vol2.ID), c.IsNil)
}()
// transfer the snapshot to the new volume on the other host
snapInfo2, err := h1.PullSnapshot(vol2.ID, h0.ID(), snapInfo.ID)
t.Assert(err, c.IsNil)
defer func() {
t.Assert(h1.DestroyVolume(snapInfo2.ID), c.IsNil)
}()
// start a job on the other host that mounts and inspects the transmitted volume
cmd, service, err = makeIshApp(clus, h1, s.discoverdClient(t), host.ContainerConfig{
Volumes: []host.VolumeBinding{{
Target: "/vol",
VolumeID: vol2.ID,
Writeable: false,
}},
})
t.Assert(err, c.IsNil)
defer cmd.Kill()
// read data back from the volume
resp, err = runIshCommand(service, "cat /vol/alpha")
t.Assert(err, c.IsNil)
t.Assert(resp, c.Equals, "testcontent\n")
}
示例14: watchHost
func (c *context) watchHost(h *cluster.Host, ready chan struct{}) {
if !c.hosts.Add(h.ID()) {
if ready != nil {
ready <- struct{}{}
}
return
}
defer c.hosts.Remove(h.ID())
g := grohl.NewContext(grohl.Data{"fn": "watchHost", "host.id": h.ID()})
c.hosts.Set(h.ID(), h)
g.Log(grohl.Data{"at": "start"})
ch := make(chan *host.Event)
h.StreamEvents("all", ch)
if ready != nil {
ready <- struct{}{}
}
// Call PutJob in a goroutine so we don't block receiving job events whilst potentially
// making multiple requests to the controller (e.g. if the controller is down).
//
// Use a channel (rather than spawning a goroutine per event) so that events are delivered in order.
jobs := make(chan *ct.Job, 10)
go func() {
for job := range jobs {
putJobAttempts.Run(func() error {
if err := c.PutJob(job); err != nil {
g.Log(grohl.Data{"at": "put_job_error", "job.id": job.ID, "state": job.State, "err": err})
// ignore validation / not found errors
if httphelper.IsValidationError(err) || err == controller.ErrNotFound {
return nil
}
return err
}
g.Log(grohl.Data{"at": "put_job", "job.id": job.ID, "state": job.State})
return nil
})
}
}()
for event := range ch {
meta := event.Job.Job.Metadata
appID := meta["flynn-controller.app"]
releaseID := meta["flynn-controller.release"]
jobType := meta["flynn-controller.type"]
if appID == "" || releaseID == "" {
continue
}
job := &ct.Job{
ID: event.JobID,
AppID: appID,
ReleaseID: releaseID,
Type: jobType,
State: jobState(event),
Meta: jobMetaFromMetadata(meta),
}
g.Log(grohl.Data{"at": "event", "job.id": event.JobID, "event": event.Event})
jobs <- job
// get a read lock on the mutex to ensure we are not currently
// syncing with the cluster
c.mtx.RLock()
j := c.jobs.Get(h.ID(), event.JobID)
c.mtx.RUnlock()
if j == nil {
continue
}
j.startedAt = event.Job.StartedAt
if event.Event != "error" && event.Event != "stop" {
continue
}
g.Log(grohl.Data{"at": "remove", "job.id": event.JobID, "event": event.Event})
c.jobs.Remove(h.ID(), event.JobID)
go func(event *host.Event) {
c.mtx.RLock()
j.Formation.RestartJob(jobType, h.ID(), event.JobID)
c.mtx.RUnlock()
}(event)
}
// TODO: check error/reconnect
}
示例15: FixPostgres
//.........這裏部分代碼省略.........
watchCh := make(chan *discoverd.Event)
upCh := make(chan string)
stream, err := service.Watch(watchCh)
if err != nil {
return nil, fmt.Errorf("error watching discoverd service: %s", err)
}
go func() {
var current bool
for event := range watchCh {
if event.Kind == discoverd.EventKindCurrent {
current = true
continue
}
if !current || event.Kind != discoverd.EventKindUp {
continue
}
if event.Instance.Meta["FLYNN_JOB_ID"] == jobID {
upCh <- event.Instance.Addr
}
}
}()
return func() (string, error) {
f.l.Info("waiting for postgres instance to start", "job.id", jobID)
defer stream.Close()
select {
case addr := <-upCh:
return addr, nil
case <-time.After(time.Minute):
return "", fmt.Errorf("timed out waiting for postgres instance to come up")
}
}, nil
}
var wait func() (string, error)
have := len(instances)
want := 2
if state.Singleton {
want = 1
}
if have >= want {
return fmt.Errorf("already have enough postgres instances, unable to fix")
}
f.l.Info("attempting to start missing postgres jobs", "want", want, "have", have)
if leader == nil {
// if no postgres, attempt to start
job.ID = cluster.GenerateJobID(host.ID(), "")
f.FixJobEnv(job)
f.l.Info("starting postgres primary job", "job.id", job.ID)
wait, err = waitForInstance(job.ID)
if err != nil {
return err
}
if err := host.AddJob(job); err != nil {
return fmt.Errorf("error starting postgres primary job on %s: %s", host.ID(), err)
}
have++
}
if want > have {
// if not enough postgres instances, start another
var secondHost *cluster.Host
for _, h := range f.hosts {
if h.ID() != host.ID() {
secondHost = h
break
}
}
if secondHost == nil {
// if there are no other hosts, use the same one we put the primary on
secondHost = host
}
job.ID = cluster.GenerateJobID(secondHost.ID(), "")
f.FixJobEnv(job)
f.l.Info("starting second postgres job", "job.id", job.ID)
if wait == nil {
wait, err = waitForInstance(job.ID)
if err != nil {
return err
}
}
if err := utils.ProvisionVolume(secondHost, job); err != nil {
return fmt.Errorf("error creating postgres volume on %s: %s", secondHost.ID(), err)
}
if err := secondHost.AddJob(job); err != nil {
return fmt.Errorf("error starting additional postgres job on %s: %s", secondHost.ID(), err)
}
}
if wait != nil {
addr, err := wait()
if err != nil {
return err
}
if leader != nil {
addr = leader.Addr
}
f.l.Info("waiting for postgres to come up read-write")
return pgmanager.NewClient(addr).WaitForReadWrite(5 * time.Minute)
}
return nil
}