本文整理匯總了Golang中github.com/flynn/flynn/pkg/cluster.Host.ID方法的典型用法代碼示例。如果您正苦於以下問題:Golang Host.ID方法的具體用法?Golang Host.ID怎麽用?Golang Host.ID使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類github.com/flynn/flynn/pkg/cluster.Host
的用法示例。
在下文中一共展示了Host.ID方法的8個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: makeIshApp
/*
Make an 'ish' application on the given host, returning it when
it has registered readiness with discoverd.
User will want to defer cmd.Kill() to clean up.
*/
func makeIshApp(cluster *cluster.Client, h *cluster.Host, dc *discoverd.Client, extraConfig host.ContainerConfig) (*exec.Cmd, *discoverd.Instance, error) {
// pick a unique string to use as service name so this works with concurrent tests.
serviceName := "ish-service-" + random.String(6)
// run a job that accepts tcp connections and performs tasks we ask of it in its container
cmd := exec.JobUsingCluster(cluster, exec.DockerImage(imageURIs["test-apps"]), &host.Job{
Config: host.ContainerConfig{
Args: []string{"/bin/ish"},
Ports: []host.Port{{Proto: "tcp"}},
Env: map[string]string{
"NAME": serviceName,
},
}.Merge(extraConfig),
})
cmd.HostID = h.ID()
if err := cmd.Start(); err != nil {
return nil, nil, err
}
// wait for the job to heartbeat and return its address
services, err := dc.Instances(serviceName, time.Second*100)
if err != nil {
cmd.Kill()
return nil, nil, err
}
if len(services) != 1 {
cmd.Kill()
return nil, nil, fmt.Errorf("test setup: expected exactly one service instance, got %d", len(services))
}
return cmd, services[0], nil
}
示例2: TestUpdateTags
func (s *HostSuite) TestUpdateTags(t *c.C) {
events := make(chan *discoverd.Event)
stream, err := s.discoverdClient(t).Service("flynn-host").Watch(events)
t.Assert(err, c.IsNil)
defer stream.Close()
nextEvent := func() *discoverd.Event {
select {
case e, ok := <-events:
if !ok {
t.Fatal("unexpected close of discoverd stream")
}
return e
case <-time.After(10 * time.Second):
t.Fatal("timed out waiting for discoverd event")
}
return nil
}
var client *cluster.Host
for {
e := nextEvent()
if e.Kind == discoverd.EventKindUp && client == nil {
client = cluster.NewHost(e.Instance.Meta["id"], e.Instance.Addr, nil, nil)
}
if e.Kind == discoverd.EventKindCurrent {
break
}
}
if client == nil {
t.Fatal("did not initialize flynn-host client")
}
t.Assert(client.UpdateTags(map[string]string{"foo": "bar"}), c.IsNil)
var meta map[string]string
for {
e := nextEvent()
if e.Kind == discoverd.EventKindUpdate && e.Instance.Meta["id"] == client.ID() {
meta = e.Instance.Meta
break
}
}
t.Assert(meta["tag:foo"], c.Equals, "bar")
// setting to empty string should delete the tag
t.Assert(client.UpdateTags(map[string]string{"foo": ""}), c.IsNil)
for {
e := nextEvent()
if e.Kind == discoverd.EventKindUpdate && e.Instance.Meta["id"] == client.ID() {
meta = e.Instance.Meta
break
}
}
if _, ok := meta["tag:foo"]; ok {
t.Fatal("expected tag to be deleted but is still present")
}
}
示例3: doVolumeTransmitAPI
func (s *VolumeSuite) doVolumeTransmitAPI(h0, h1 *cluster.Host, t *c.C) {
clus := s.clusterClient(t)
// create a volume!
vol, err := h0.CreateVolume("default")
t.Assert(err, c.IsNil)
defer func() {
t.Assert(h0.DestroyVolume(vol.ID), c.IsNil)
}()
// create a job and use it to add data to the volume
cmd, service, err := makeIshApp(clus, h0, s.discoverdClient(t), host.ContainerConfig{
Volumes: []host.VolumeBinding{{
Target: "/vol",
VolumeID: vol.ID,
Writeable: true,
}},
})
t.Assert(err, c.IsNil)
defer cmd.Kill()
resp, err := runIshCommand(service, "echo 'testcontent' > /vol/alpha ; echo $?")
t.Assert(err, c.IsNil)
t.Assert(resp, c.Equals, "0\n")
// take a snapshot
snapInfo, err := h0.CreateSnapshot(vol.ID)
t.Assert(err, c.IsNil)
defer func() {
t.Assert(h0.DestroyVolume(snapInfo.ID), c.IsNil)
}()
// make a volume on another host to yank the snapshot content into
vol2, err := h1.CreateVolume("default")
t.Assert(err, c.IsNil)
defer func() {
t.Assert(h1.DestroyVolume(vol2.ID), c.IsNil)
}()
// transfer the snapshot to the new volume on the other host
snapInfo2, err := h1.PullSnapshot(vol2.ID, h0.ID(), snapInfo.ID)
t.Assert(err, c.IsNil)
defer func() {
t.Assert(h1.DestroyVolume(snapInfo2.ID), c.IsNil)
}()
// start a job on the other host that mounts and inspects the transmitted volume
cmd, service, err = makeIshApp(clus, h1, s.discoverdClient(t), host.ContainerConfig{
Volumes: []host.VolumeBinding{{
Target: "/vol",
VolumeID: vol2.ID,
Writeable: false,
}},
})
t.Assert(err, c.IsNil)
defer cmd.Kill()
// read data back from the volume
resp, err = runIshCommand(service, "cat /vol/alpha")
t.Assert(err, c.IsNil)
t.Assert(resp, c.Equals, "testcontent\n")
}
示例4: startJob
func startJob(s *State, hc *cluster.Host, job *host.Job) (*Job, error) {
data := &Job{HostID: hc.ID(), JobID: job.ID}
jobStatus := make(chan error)
events := make(chan *host.Event)
stream, err := hc.StreamEvents(data.JobID, events)
if err != nil {
return nil, err
}
go func() {
defer stream.Close()
for e := range events {
switch e.Event {
case "start", "stop":
jobStatus <- nil
return
case "error":
job, err := hc.GetJob(data.JobID)
if err != nil {
jobStatus <- err
return
}
if job.Error == nil {
jobStatus <- fmt.Errorf("bootstrap: unknown error from host")
return
}
jobStatus <- fmt.Errorf("bootstrap: host error while launching job: %q", *job.Error)
return
default:
}
}
jobStatus <- fmt.Errorf("bootstrap: host job stream disconnected unexpectedly: %q", stream.Err())
}()
if err := hc.AddJob(job); err != nil {
return nil, err
}
return data, <-jobStatus
}
示例5: JobUsingHost
func JobUsingHost(h *cluster.Host, artifact host.Artifact, job *host.Job) *Cmd {
command := Job(artifact, job)
command.HostID = h.ID()
command.host = h
return command
}
示例6: watchHost
func (c *context) watchHost(h *cluster.Host, ready chan struct{}) {
if !c.hosts.Add(h.ID()) {
if ready != nil {
ready <- struct{}{}
}
return
}
defer c.hosts.Remove(h.ID())
g := grohl.NewContext(grohl.Data{"fn": "watchHost", "host.id": h.ID()})
c.hosts.Set(h.ID(), h)
g.Log(grohl.Data{"at": "start"})
ch := make(chan *host.Event)
h.StreamEvents("all", ch)
if ready != nil {
ready <- struct{}{}
}
// Call PutJob in a goroutine so we don't block receiving job events whilst potentially
// making multiple requests to the controller (e.g. if the controller is down).
//
// Use a channel (rather than spawning a goroutine per event) so that events are delivered in order.
jobs := make(chan *ct.Job, 10)
go func() {
for job := range jobs {
putJobAttempts.Run(func() error {
if err := c.PutJob(job); err != nil {
g.Log(grohl.Data{"at": "put_job_error", "job.id": job.ID, "state": job.State, "err": err})
// ignore validation / not found errors
if httphelper.IsValidationError(err) || err == controller.ErrNotFound {
return nil
}
return err
}
g.Log(grohl.Data{"at": "put_job", "job.id": job.ID, "state": job.State})
return nil
})
}
}()
for event := range ch {
meta := event.Job.Job.Metadata
appID := meta["flynn-controller.app"]
releaseID := meta["flynn-controller.release"]
jobType := meta["flynn-controller.type"]
if appID == "" || releaseID == "" {
continue
}
job := &ct.Job{
ID: event.JobID,
AppID: appID,
ReleaseID: releaseID,
Type: jobType,
State: jobState(event),
Meta: jobMetaFromMetadata(meta),
}
g.Log(grohl.Data{"at": "event", "job.id": event.JobID, "event": event.Event})
jobs <- job
// get a read lock on the mutex to ensure we are not currently
// syncing with the cluster
c.mtx.RLock()
j := c.jobs.Get(h.ID(), event.JobID)
c.mtx.RUnlock()
if j == nil {
continue
}
j.startedAt = event.Job.StartedAt
if event.Event != "error" && event.Event != "stop" {
continue
}
g.Log(grohl.Data{"at": "remove", "job.id": event.JobID, "event": event.Event})
c.jobs.Remove(h.ID(), event.JobID)
go func(event *host.Event) {
c.mtx.RLock()
j.Formation.RestartJob(jobType, h.ID(), event.JobID)
c.mtx.RUnlock()
}(event)
}
// TODO: check error/reconnect
}
示例7: FixPostgres
func (f *ClusterFixer) FixPostgres() error {
f.l.Info("checking postgres")
service := discoverd.NewService("postgres")
leader, _ := service.Leader()
if leader == nil || leader.Addr == "" {
f.l.Info("no running postgres leader")
leader = nil
} else {
f.l.Info("found running postgres leader")
}
instances, _ := service.Instances()
f.l.Info(fmt.Sprintf("found %d running postgres instances", len(instances)))
f.l.Info("getting postgres status")
var status *pgmanager.Status
if leader != nil && leader.Addr != "" {
client := pgmanager.NewClient(leader.Addr)
var err error
status, err = client.Status()
if err != nil {
f.l.Error("error getting status from postgres leader", "error", err)
}
}
if status != nil && status.Postgres.ReadWrite {
f.l.Info("postgres claims to be read-write")
return nil
}
f.l.Info("getting postgres service metadata")
meta, err := discoverd.NewService("postgres").GetMeta()
if err != nil {
return fmt.Errorf("error getting postgres state from discoverd: %s", err)
}
var state pgstate.State
if err := json.Unmarshal(meta.Data, &state); err != nil {
return fmt.Errorf("error decoding postgres state: %s", err)
}
if state.Primary == nil {
return fmt.Errorf("no primary in postgres state")
}
f.l.Info("getting postgres primary job info", "job.id", state.Primary.Meta["FLYNN_JOB_ID"])
job, host, err := f.GetJob(state.Primary.Meta["FLYNN_JOB_ID"])
if err != nil {
if state.Sync != nil {
f.l.Error("unable to get primary job info", "error", err)
f.l.Info("getting postgres sync job info", "job.id", state.Sync.Meta["FLYNN_JOB_ID"])
job, host, err = f.GetJob(state.Sync.Meta["FLYNN_JOB_ID"])
if err != nil {
return fmt.Errorf("unable to get postgres primary or sync job details: %s", err)
}
} else {
return fmt.Errorf("unable to get postgres primary job details: %s", err)
}
}
if leader != nil && state.Singleton {
return fmt.Errorf("postgres leader is running in singleton mode, unable to fix")
}
waitForInstance := func(jobID string) (func() (string, error), error) {
watchCh := make(chan *discoverd.Event)
upCh := make(chan string)
stream, err := service.Watch(watchCh)
if err != nil {
return nil, fmt.Errorf("error watching discoverd service: %s", err)
}
go func() {
var current bool
for event := range watchCh {
if event.Kind == discoverd.EventKindCurrent {
current = true
continue
}
if !current || event.Kind != discoverd.EventKindUp {
continue
}
if event.Instance.Meta["FLYNN_JOB_ID"] == jobID {
upCh <- event.Instance.Addr
}
}
}()
return func() (string, error) {
f.l.Info("waiting for postgres instance to start", "job.id", jobID)
defer stream.Close()
select {
case addr := <-upCh:
return addr, nil
case <-time.After(time.Minute):
return "", fmt.Errorf("timed out waiting for postgres instance to come up")
}
}, nil
}
var wait func() (string, error)
have := len(instances)
want := 2
if state.Singleton {
want = 1
//.........這裏部分代碼省略.........
示例8: FixSirenia
func (f *ClusterFixer) FixSirenia(svc string) error {
log := f.l.New("fn", "FixSirenia", "service", svc)
service := discoverd.NewService(svc)
instances, _ := service.Instances()
leader, _ := service.Leader()
log.Info("getting service metadata")
meta, err := service.GetMeta()
if err != nil {
return fmt.Errorf("error getting sirenia state from discoverd: %s", err)
}
var state state.State
if err := json.Unmarshal(meta.Data, &state); err != nil {
return fmt.Errorf("error decoding state: %s", err)
}
if state.Primary == nil {
return fmt.Errorf("no primary in sirenia state")
}
log.Info("getting primary job info", "job.id", state.Primary.Meta["FLYNN_JOB_ID"])
primaryJob, primaryHost, err := f.GetJob(state.Primary.Meta["FLYNN_JOB_ID"])
if err != nil {
log.Error("unable to get primary job info")
}
var syncJob *host.Job
var syncHost *cluster.Host
if state.Sync != nil {
log.Info("getting sync job info", "job.id", state.Sync.Meta["FLYNN_JOB_ID"])
syncJob, syncHost, err = f.GetJob(state.Sync.Meta["FLYNN_JOB_ID"])
if err != nil {
log.Error("unable to get sync job info")
}
}
waitForInstance := func(jobID string) (func() (string, error), error) {
watchCh := make(chan *discoverd.Event)
upCh := make(chan string)
stream, err := service.Watch(watchCh)
if err != nil {
return nil, fmt.Errorf("error watching discoverd service: %s", err)
}
go func() {
var current bool
for event := range watchCh {
if event.Kind == discoverd.EventKindCurrent {
current = true
continue
}
if !current || event.Kind != discoverd.EventKindUp {
continue
}
if event.Instance.Meta["FLYNN_JOB_ID"] == jobID {
upCh <- event.Instance.Addr
}
}
}()
return func() (string, error) {
log.Info("waiting for instance to start", "job.id", jobID)
defer stream.Close()
select {
case addr := <-upCh:
return addr, nil
case <-time.After(time.Minute):
return "", fmt.Errorf("timed out waiting for sirenia instance to come up")
}
}, nil
}
log.Info("terminating unassigned sirenia instances")
outer:
for _, i := range instances {
if i.Addr == state.Primary.Addr || (state.Sync != nil && i.Addr == state.Sync.Addr) {
continue
}
for _, a := range state.Async {
if i.Addr == a.Addr {
continue outer
}
}
// job not assigned in state, attempt to terminate it
if jobID, ok := i.Meta["FLYNN_JOB_ID"]; ok {
hostID, err := cluster.ExtractHostID(jobID)
if err != nil {
log.Error("error extracting host id from jobID", "jobID", jobID, "err", err)
}
h := f.Host(hostID)
if h != nil {
if err := h.StopJob(jobID); err != nil {
log.Error("error stopping unassigned sirenia job", "jobID", jobID)
}
} else {
log.Error("host not found", "hostID", hostID)
}
}
}
isRunning := func(addr string) bool {
for _, i := range instances {
//.........這裏部分代碼省略.........