本文整理匯總了Golang中github.com/flynn/flynn/pkg/cluster.GenerateJobID函數的典型用法代碼示例。如果您正苦於以下問題:Golang GenerateJobID函數的具體用法?Golang GenerateJobID怎麽用?Golang GenerateJobID使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了GenerateJobID函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: TestMigrateJobStates
// TestMigrateJobStates checks that migrating to ID 9 does not break existing
// job records
func (MigrateSuite) TestMigrateJobStates(c *C) {
db := setupTestDB(c, "controllertest_migrate_job_states")
m := &testMigrator{c: c, db: db}
// start from ID 7
m.migrateTo(7)
// insert a job
hostID := "host1"
uuid := random.UUID()
jobID := cluster.GenerateJobID(hostID, uuid)
appID := random.UUID()
releaseID := random.UUID()
c.Assert(db.Exec(`INSERT INTO apps (app_id, name) VALUES ($1, $2)`, appID, "migrate-app"), IsNil)
c.Assert(db.Exec(`INSERT INTO releases (release_id) VALUES ($1)`, releaseID), IsNil)
c.Assert(db.Exec(`INSERT INTO job_cache (job_id, app_id, release_id, state) VALUES ($1, $2, $3, $4)`, jobID, appID, releaseID, "up"), IsNil)
// migrate to 8 and check job states are still constrained
m.migrateTo(8)
err := db.Exec(`UPDATE job_cache SET state = 'foo' WHERE job_id = $1`, jobID)
c.Assert(err, NotNil)
if !postgres.IsPostgresCode(err, postgres.ForeignKeyViolation) {
c.Fatalf("expected postgres foreign key violation, got %s", err)
}
// migrate to 9 and check job IDs are correct, pending state is valid
m.migrateTo(9)
var clusterID, dbUUID, dbHostID string
c.Assert(db.QueryRow("SELECT cluster_id, job_id, host_id FROM job_cache WHERE cluster_id = $1", jobID).Scan(&clusterID, &dbUUID, &dbHostID), IsNil)
c.Assert(clusterID, Equals, jobID)
c.Assert(dbUUID, Equals, uuid)
c.Assert(dbHostID, Equals, hostID)
c.Assert(db.Exec(`UPDATE job_cache SET state = 'pending' WHERE job_id = $1`, uuid), IsNil)
}
示例2: StartAppJob
func (f *ClusterFixer) StartAppJob(app, typ, service string) ([]*discoverd.Instance, error) {
f.l.Info(fmt.Sprintf("no %s %s process running, getting release details from hosts", app, typ))
releases := f.FindAppReleaseJobs(app, typ)
if len(releases) == 0 {
return nil, fmt.Errorf("didn't find any %s %s release jobs", app, typ)
}
// get a job template from the first release
var job *host.Job
for _, job = range releases[0] {
break
}
host := f.hosts[0]
job.ID = cluster.GenerateJobID(host.ID(), "")
// provision new temporary volumes
for i, v := range job.Config.Volumes {
if v.DeleteOnStop {
f.l.Info(fmt.Sprintf("provisioning volume for %s %s job", app, typ), "job.id", job.ID, "release", job.Metadata["flynn-controller.release"])
vol, err := host.CreateVolume("default")
if err != nil {
return nil, fmt.Errorf("error provisioning volume for %s %s job: %s", app, typ, err)
}
job.Config.Volumes[i].VolumeID = vol.ID
}
}
f.FixJobEnv(job)
// run it on the host
f.l.Info(fmt.Sprintf("starting %s %s job", app, typ), "job.id", job.ID, "release", job.Metadata["flynn-controller.release"])
if err := host.AddJob(job); err != nil {
return nil, fmt.Errorf("error starting %s %s job: %s", app, typ, err)
}
f.l.Info("waiting for job to start")
return discoverd.GetInstances(service, time.Minute)
}
示例3: TestJobGet
func (s *S) TestJobGet(c *C) {
app := s.createTestApp(c, &ct.App{Name: "job-get"})
release := s.createTestRelease(c, &ct.Release{})
s.createTestFormation(c, &ct.Formation{ReleaseID: release.ID, AppID: app.ID})
uuid := random.UUID()
hostID := "host0"
jobID := cluster.GenerateJobID(hostID, uuid)
s.createTestJob(c, &ct.Job{
ID: jobID,
UUID: uuid,
HostID: hostID,
AppID: app.ID,
ReleaseID: release.ID,
Type: "web",
State: ct.JobStateStarting,
Meta: map[string]string{"some": "info"},
})
// test getting the job with both the job ID and the UUID
for _, id := range []string{jobID, uuid} {
job, err := s.c.GetJob(app.ID, id)
c.Assert(err, IsNil)
c.Assert(job.ID, Equals, jobID)
c.Assert(job.UUID, Equals, uuid)
c.Assert(job.HostID, Equals, hostID)
c.Assert(job.AppID, Equals, app.ID)
c.Assert(job.ReleaseID, Equals, release.ID)
c.Assert(job.Meta, DeepEquals, map[string]string{"some": "info"})
}
}
示例4: JobConfig
func JobConfig(f *ct.ExpandedFormation, name, hostID string, uuid string) *host.Job {
t := f.Release.Processes[name]
env := make(map[string]string, len(f.Release.Env)+len(t.Env)+4)
for k, v := range f.Release.Env {
env[k] = v
}
for k, v := range t.Env {
env[k] = v
}
id := cluster.GenerateJobID(hostID, uuid)
env["FLYNN_APP_ID"] = f.App.ID
env["FLYNN_APP_NAME"] = f.App.Name
env["FLYNN_RELEASE_ID"] = f.Release.ID
env["FLYNN_PROCESS_TYPE"] = name
env["FLYNN_JOB_ID"] = id
metadata := make(map[string]string, len(f.App.Meta)+4)
for k, v := range f.App.Meta {
metadata[k] = v
}
metadata["flynn-controller.app"] = f.App.ID
metadata["flynn-controller.app_name"] = f.App.Name
metadata["flynn-controller.release"] = f.Release.ID
metadata["flynn-controller.formation"] = "true"
metadata["flynn-controller.type"] = name
job := &host.Job{
ID: id,
Metadata: metadata,
Config: host.ContainerConfig{
Cmd: t.Cmd,
Env: env,
HostNetwork: t.HostNetwork,
},
Resurrect: t.Resurrect,
Resources: t.Resources,
}
if f.App.Meta["flynn-system-app"] == "true" {
job.Partition = "system"
}
if len(t.Entrypoint) > 0 {
job.Config.Entrypoint = t.Entrypoint
}
if f.ImageArtifact != nil {
job.ImageArtifact = f.ImageArtifact.HostArtifact()
}
if len(f.FileArtifacts) > 0 {
job.FileArtifacts = make([]*host.Artifact, len(f.FileArtifacts))
for i, artifact := range f.FileArtifacts {
job.FileArtifacts[i] = artifact.HostArtifact()
}
}
job.Config.Ports = make([]host.Port, len(t.Ports))
for i, p := range t.Ports {
job.Config.Ports[i].Proto = p.Proto
job.Config.Ports[i].Port = p.Port
job.Config.Ports[i].Service = p.Service
}
return job
}
示例5: FixFlannel
func (f *ClusterFixer) FixFlannel() error {
f.l.Info("checking flannel")
flannelJobs := make(map[string]*host.Job, len(f.hosts))
for _, h := range f.hosts {
jobs, err := h.ListJobs()
if err != nil {
return fmt.Errorf("error getting jobs list from %s: %s", h.ID(), err)
}
for _, j := range jobs {
if j.Status != host.StatusRunning ||
j.Job.Metadata["flynn-controller.app_name"] != "flannel" ||
j.Job.Metadata["flynn-controller.type"] != "app" {
continue
}
flannelJobs[h.ID()] = j.Job
break
}
}
if len(flannelJobs) == len(f.hosts) {
f.l.Info("flannel looks good")
return nil
}
var job *host.Job
if len(flannelJobs) == 0 {
f.l.Info("flannel not running, starting it on each host")
releases := f.FindAppReleaseJobs("flannel", "app")
if len(releases) == 0 {
return fmt.Errorf("didn't find flannel release jobs")
}
for _, j := range releases[0] {
job = j
break
}
} else {
f.l.Info("flannel is not running on each host, starting missing jobs")
for _, job = range flannelJobs {
break
}
}
for _, h := range f.hosts {
if _, ok := flannelJobs[h.ID()]; ok {
continue
}
job.ID = cluster.GenerateJobID(h.ID(), "")
f.FixJobEnv(job)
if err := h.AddJob(job); err != nil {
return fmt.Errorf("error starting flannel job: %s", err)
}
f.l.Info("started flannel job", "job.id", job.ID)
}
f.l.Info("flannel fix complete")
return nil
}
示例6: TestKillJob
func (s *S) TestKillJob(c *C) {
app := s.createTestApp(c, &ct.App{Name: "killjob"})
hostID := fakeHostID()
jobID := cluster.GenerateJobID(hostID)
hc := tu.NewFakeHostClient(hostID)
s.cc.AddHost(hc)
c.Assert(s.c.DeleteJob(app.ID, jobID), IsNil)
c.Assert(hc.IsStopped(jobID), Equals, true)
}
示例7: CrashJob
func (c *FakeHostClient) CrashJob(uuid string) error {
c.jobsMtx.Lock()
defer c.jobsMtx.Unlock()
id := cluster.GenerateJobID(c.hostID, uuid)
c.stopped[id] = true
job, ok := c.Jobs[id]
if ok {
job.Status = host.StatusCrashed
c.Jobs[id] = job
return c.stop(id)
} else {
return ct.NotFoundError{Resource: id}
}
}
示例8: JobConfig
func JobConfig(f *ct.ExpandedFormation, name, hostID string) *host.Job {
t := f.Release.Processes[name]
env := make(map[string]string, len(f.Release.Env)+len(t.Env)+4)
for k, v := range f.Release.Env {
env[k] = v
}
for k, v := range t.Env {
env[k] = v
}
id := cluster.GenerateJobID(hostID)
env["FLYNN_APP_ID"] = f.App.ID
env["FLYNN_APP_NAME"] = f.App.Name
env["FLYNN_RELEASE_ID"] = f.Release.ID
env["FLYNN_PROCESS_TYPE"] = name
env["FLYNN_JOB_ID"] = id
job := &host.Job{
ID: id,
Metadata: map[string]string{
"flynn-controller.app": f.App.ID,
"flynn-controller.app_name": f.App.Name,
"flynn-controller.release": f.Release.ID,
"flynn-controller.type": name,
},
Artifact: host.Artifact{
Type: f.Artifact.Type,
URI: f.Artifact.URI,
},
Config: host.ContainerConfig{
Cmd: t.Cmd,
Env: env,
HostNetwork: t.HostNetwork,
},
Resurrect: t.Resurrect,
Resources: t.Resources,
}
if len(t.Entrypoint) > 0 {
job.Config.Entrypoint = t.Entrypoint
}
job.Config.Ports = make([]host.Port, len(t.Ports))
for i, p := range t.Ports {
job.Config.Ports[i].Proto = p.Proto
job.Config.Ports[i].Port = p.Port
job.Config.Ports[i].Service = p.Service
}
return job
}
示例9: StartAppJob
func (f *ClusterFixer) StartAppJob(app, typ, service string) ([]*discoverd.Instance, error) {
f.l.Info(fmt.Sprintf("no %s %s process running, getting release details from hosts", app, typ))
releases := f.FindAppReleaseJobs(app, typ)
if len(releases) == 0 {
return nil, fmt.Errorf("didn't find any %s %s release jobs", app, typ)
}
// get a job template from the first release
var job *host.Job
for _, job = range releases[0] {
break
}
job.ID = cluster.GenerateJobID(f.hosts[0].ID(), "")
f.FixJobEnv(job)
// run it on a host
f.l.Info(fmt.Sprintf("starting %s %s job", app, typ), "job.id", job.ID, "release", job.Metadata["flynn-controller.release"])
if err := f.hosts[0].AddJob(job); err != nil {
return nil, fmt.Errorf("error starting %s %s job: %s", app, typ, err)
}
f.l.Info("waiting for job to start")
return discoverd.GetInstances(service, time.Minute)
}
示例10: FixDiscoverd
func (f *ClusterFixer) FixDiscoverd() error {
f.l.Info("ensuring discoverd is running on all hosts")
releases := f.FindAppReleaseJobs("discoverd", "app")
if len(releases) == 0 {
return fmt.Errorf("didn't find any discoverd release jobs")
}
outer:
for hostID, job := range releases[0] {
for _, h := range f.hosts {
if h.ID() != hostID {
continue
}
// check if discoverd is already running on this host
jobs, err := h.ListJobs()
if err != nil {
return fmt.Errorf("error listing jobs on %s: %s", h.ID(), err)
}
for _, j := range jobs {
if j.Status == host.StatusRunning &&
j.Job.Metadata["flynn-controller.app_name"] == "discoverd" &&
j.Job.Metadata["flynn-controller.type"] == "app" {
continue outer
}
}
job.ID = cluster.GenerateJobID(h.ID(), "")
f.FixJobEnv(job)
if err := h.AddJob(job); err != nil {
return fmt.Errorf("error starting discoverd on %s: %s", h.ID(), err)
}
f.l.Info("started discoverd instance", "job.id", job.ID)
break
}
}
return nil
}
示例11: TestKillJob
func (s *S) TestKillJob(c *C) {
app := s.createTestApp(c, &ct.App{Name: "killjob"})
release := s.createTestRelease(c, &ct.Release{})
hostID := fakeHostID()
uuid := random.UUID()
jobID := cluster.GenerateJobID(hostID, uuid)
s.createTestJob(c, &ct.Job{
ID: jobID,
UUID: uuid,
HostID: hostID,
AppID: app.ID,
ReleaseID: release.ID,
Type: "web",
State: ct.JobStateStarting,
Meta: map[string]string{"some": "info"},
})
hc := tu.NewFakeHostClient(hostID, false)
hc.AddJob(&host.Job{ID: jobID})
s.cc.AddHost(hc)
err := s.c.DeleteJob(app.ID, jobID)
c.Assert(err, IsNil)
c.Assert(hc.IsStopped(jobID), Equals, true)
}
示例12: RunJob
func (c *controllerAPI) RunJob(ctx context.Context, w http.ResponseWriter, req *http.Request) {
var newJob ct.NewJob
if err := httphelper.DecodeJSON(req, &newJob); err != nil {
respondWithError(w, err)
return
}
if err := schema.Validate(newJob); err != nil {
respondWithError(w, err)
return
}
data, err := c.releaseRepo.Get(newJob.ReleaseID)
if err != nil {
respondWithError(w, err)
return
}
release := data.(*ct.Release)
var artifactIDs []string
if len(newJob.ArtifactIDs) > 0 {
artifactIDs = newJob.ArtifactIDs
} else if len(release.ArtifactIDs) > 0 {
artifactIDs = release.ArtifactIDs
} else {
httphelper.ValidationError(w, "release.ArtifactIDs", "cannot be empty")
return
}
artifacts := make([]*ct.Artifact, len(artifactIDs))
artifactList, err := c.artifactRepo.ListIDs(artifactIDs...)
if err != nil {
respondWithError(w, err)
return
}
for i, id := range artifactIDs {
artifacts[i] = artifactList[id]
}
var entrypoint ct.ImageEntrypoint
if e := utils.GetEntrypoint(artifacts, ""); e != nil {
entrypoint = *e
}
attach := strings.Contains(req.Header.Get("Upgrade"), "flynn-attach/0")
hosts, err := c.clusterClient.Hosts()
if err != nil {
respondWithError(w, err)
return
}
if len(hosts) == 0 {
respondWithError(w, errors.New("no hosts found"))
return
}
client := hosts[random.Math.Intn(len(hosts))]
uuid := random.UUID()
hostID := client.ID()
id := cluster.GenerateJobID(hostID, uuid)
app := c.getApp(ctx)
env := make(map[string]string, len(entrypoint.Env)+len(release.Env)+len(newJob.Env)+4)
env["FLYNN_APP_ID"] = app.ID
env["FLYNN_RELEASE_ID"] = release.ID
env["FLYNN_PROCESS_TYPE"] = ""
env["FLYNN_JOB_ID"] = id
for k, v := range entrypoint.Env {
env[k] = v
}
if newJob.ReleaseEnv {
for k, v := range release.Env {
env[k] = v
}
}
for k, v := range newJob.Env {
env[k] = v
}
metadata := make(map[string]string, len(newJob.Meta)+3)
for k, v := range newJob.Meta {
metadata[k] = v
}
metadata["flynn-controller.app"] = app.ID
metadata["flynn-controller.app_name"] = app.Name
metadata["flynn-controller.release"] = release.ID
job := &host.Job{
ID: id,
Metadata: metadata,
Config: host.ContainerConfig{
Args: entrypoint.Args,
Env: env,
WorkingDir: entrypoint.WorkingDir,
Uid: entrypoint.Uid,
Gid: entrypoint.Gid,
TTY: newJob.TTY,
Stdin: attach,
DisableLog: newJob.DisableLog,
},
Resources: newJob.Resources,
Partition: string(newJob.Partition),
}
resource.SetDefaults(&job.Resources)
//.........這裏部分代碼省略.........
示例13: RunJob
func (c *controllerAPI) RunJob(ctx context.Context, w http.ResponseWriter, req *http.Request) {
var newJob ct.NewJob
if err := httphelper.DecodeJSON(req, &newJob); err != nil {
respondWithError(w, err)
return
}
if err := schema.Validate(newJob); err != nil {
respondWithError(w, err)
return
}
data, err := c.releaseRepo.Get(newJob.ReleaseID)
if err != nil {
respondWithError(w, err)
return
}
release := data.(*ct.Release)
data, err = c.artifactRepo.Get(release.ArtifactID)
if err != nil {
respondWithError(w, err)
return
}
artifact := data.(*ct.Artifact)
attach := strings.Contains(req.Header.Get("Upgrade"), "flynn-attach/0")
hosts, err := c.clusterClient.Hosts()
if err != nil {
respondWithError(w, err)
return
}
if len(hosts) == 0 {
respondWithError(w, errors.New("no hosts found"))
return
}
client := hosts[random.Math.Intn(len(hosts))]
id := cluster.GenerateJobID(client.ID(), "")
app := c.getApp(ctx)
env := make(map[string]string, len(release.Env)+len(newJob.Env)+4)
env["FLYNN_APP_ID"] = app.ID
env["FLYNN_RELEASE_ID"] = release.ID
env["FLYNN_PROCESS_TYPE"] = ""
env["FLYNN_JOB_ID"] = id
if newJob.ReleaseEnv {
for k, v := range release.Env {
env[k] = v
}
}
for k, v := range newJob.Env {
env[k] = v
}
metadata := make(map[string]string, len(newJob.Meta)+3)
for k, v := range newJob.Meta {
metadata[k] = v
}
metadata["flynn-controller.app"] = app.ID
metadata["flynn-controller.app_name"] = app.Name
metadata["flynn-controller.release"] = release.ID
job := &host.Job{
ID: id,
Metadata: metadata,
Artifact: host.Artifact{
Type: artifact.Type,
URI: artifact.URI,
},
Config: host.ContainerConfig{
Cmd: newJob.Cmd,
Env: env,
TTY: newJob.TTY,
Stdin: attach,
DisableLog: newJob.DisableLog,
},
Resources: newJob.Resources,
}
resource.SetDefaults(&job.Resources)
if len(newJob.Entrypoint) > 0 {
job.Config.Entrypoint = newJob.Entrypoint
}
var attachClient cluster.AttachClient
if attach {
attachReq := &host.AttachReq{
JobID: job.ID,
Flags: host.AttachFlagStdout | host.AttachFlagStderr | host.AttachFlagStdin | host.AttachFlagStream,
Height: uint16(newJob.Lines),
Width: uint16(newJob.Columns),
}
attachClient, err = client.Attach(attachReq, true)
if err != nil {
respondWithError(w, fmt.Errorf("attach failed: %s", err.Error()))
return
}
defer attachClient.Close()
}
if err := client.AddJob(job); err != nil {
respondWithError(w, fmt.Errorf("schedule failed: %s", err.Error()))
return
}
//.........這裏部分代碼省略.........
示例14: FixPostgres
//.........這裏部分代碼省略.........
watchCh := make(chan *discoverd.Event)
upCh := make(chan string)
stream, err := service.Watch(watchCh)
if err != nil {
return nil, fmt.Errorf("error watching discoverd service: %s", err)
}
go func() {
var current bool
for event := range watchCh {
if event.Kind == discoverd.EventKindCurrent {
current = true
continue
}
if !current || event.Kind != discoverd.EventKindUp {
continue
}
if event.Instance.Meta["FLYNN_JOB_ID"] == jobID {
upCh <- event.Instance.Addr
}
}
}()
return func() (string, error) {
f.l.Info("waiting for postgres instance to start", "job.id", jobID)
defer stream.Close()
select {
case addr := <-upCh:
return addr, nil
case <-time.After(time.Minute):
return "", fmt.Errorf("timed out waiting for postgres instance to come up")
}
}, nil
}
var wait func() (string, error)
have := len(instances)
want := 2
if state.Singleton {
want = 1
}
if have >= want {
return fmt.Errorf("already have enough postgres instances, unable to fix")
}
f.l.Info("attempting to start missing postgres jobs", "want", want, "have", have)
if leader == nil {
// if no postgres, attempt to start
job.ID = cluster.GenerateJobID(host.ID(), "")
f.FixJobEnv(job)
f.l.Info("starting postgres primary job", "job.id", job.ID)
wait, err = waitForInstance(job.ID)
if err != nil {
return err
}
if err := host.AddJob(job); err != nil {
return fmt.Errorf("error starting postgres primary job on %s: %s", host.ID(), err)
}
have++
}
if want > have {
// if not enough postgres instances, start another
var secondHost *cluster.Host
for _, h := range f.hosts {
if h.ID() != host.ID() {
secondHost = h
break
}
}
if secondHost == nil {
// if there are no other hosts, use the same one we put the primary on
secondHost = host
}
job.ID = cluster.GenerateJobID(secondHost.ID(), "")
f.FixJobEnv(job)
f.l.Info("starting second postgres job", "job.id", job.ID)
if wait == nil {
wait, err = waitForInstance(job.ID)
if err != nil {
return err
}
}
if err := utils.ProvisionVolume(secondHost, job); err != nil {
return fmt.Errorf("error creating postgres volume on %s: %s", secondHost.ID(), err)
}
if err := secondHost.AddJob(job); err != nil {
return fmt.Errorf("error starting additional postgres job on %s: %s", secondHost.ID(), err)
}
}
if wait != nil {
addr, err := wait()
if err != nil {
return err
}
if leader != nil {
addr = leader.Addr
}
f.l.Info("waiting for postgres to come up read-write")
return pgmanager.NewClient(addr).WaitForReadWrite(5 * time.Minute)
}
return nil
}
示例15: Restore
/*
Restore prior state from the save location defined at construction time.
If the state save file is empty, nothing is loaded, and no error is returned.
*/
func (s *State) Restore(backend Backend, buffers host.LogBuffers) (func(), error) {
if err := s.Acquire(); err != nil {
return nil, err
}
defer s.Release()
s.backend = backend
var resurrect []*host.Job
if err := s.stateDB.View(func(tx *bolt.Tx) error {
jobsBucket := tx.Bucket([]byte("jobs"))
backendJobsBucket := tx.Bucket([]byte("backend-jobs"))
backendGlobalBucket := tx.Bucket([]byte("backend-global"))
persistentBucket := tx.Bucket([]byte("persistent-jobs"))
// restore jobs
if err := jobsBucket.ForEach(func(k, v []byte) error {
job := &host.ActiveJob{}
if err := json.Unmarshal(v, job); err != nil {
return err
}
if job.CreatedAt.IsZero() {
job.CreatedAt = time.Now()
}
s.jobs[string(k)] = job
return nil
}); err != nil {
return err
}
// hand opaque blobs back to backend so it can do its restore
backendJobsBlobs := make(map[string][]byte)
if err := backendJobsBucket.ForEach(func(k, v []byte) error {
backendJobsBlobs[string(k)] = v
return nil
}); err != nil {
return err
}
backendGlobalBlob := backendGlobalBucket.Get([]byte("backend"))
if err := backend.UnmarshalState(s.jobs, backendJobsBlobs, backendGlobalBlob, buffers); err != nil {
return err
}
// resurrect any persistent jobs which are not running
if err := persistentBucket.ForEach(func(k, v []byte) error {
for _, job := range s.jobs {
if job.Job.ID == string(v) && !backend.JobExists(job.Job.ID) {
resurrect = append(resurrect, job.Job)
}
}
return nil
}); err != nil {
return err
}
return nil
}); err != nil && err != io.EOF {
return nil, fmt.Errorf("could not restore from host persistence db: %s", err)
}
return func() {
if len(resurrect) == 0 {
return
}
var wg sync.WaitGroup
wg.Add(len(resurrect))
for _, job := range resurrect {
go func(job *host.Job) {
// generate a new job id, this is a new job
newJob := job.Dup()
newJob.ID = cluster.GenerateJobID(s.id, "")
if _, ok := newJob.Config.Env["FLYNN_JOB_ID"]; ok {
newJob.Config.Env["FLYNN_JOB_ID"] = newJob.ID
}
log.Printf("resurrecting %s as %s", job.ID, newJob.ID)
s.AddJob(newJob)
backend.Run(newJob, nil, nil)
wg.Done()
}(job)
}
wg.Wait()
}, nil
}