本文整理汇总了Golang中github.com/flynn/go-check.C.Fatal方法的典型用法代码示例。如果您正苦于以下问题:Golang C.Fatal方法的具体用法?Golang C.Fatal怎么用?Golang C.Fatal使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类github.com/flynn/go-check.C
的用法示例。
在下文中一共展示了C.Fatal方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: waitForDeploymentEvents
func waitForDeploymentEvents(t *c.C, stream chan *ct.DeploymentEvent, expected []*ct.DeploymentEvent) {
debugf(t, "waiting for %d deployment events", len(expected))
actual := make([]*ct.DeploymentEvent, 0, len(expected))
loop:
for {
select {
case e, ok := <-stream:
if !ok {
t.Fatal("unexpected close of deployment event stream")
}
actual = append(actual, e)
if e.Status == "complete" || e.Status == "failed" {
debugf(t, "got deployment event: %s", e.Status)
break loop
}
debugf(t, "got deployment event: %s %s", e.JobType, e.JobState)
case <-time.After(60 * time.Second):
t.Fatal("timed out waiting for deployment event")
}
}
compare := func(t *c.C, i *ct.DeploymentEvent, j *ct.DeploymentEvent) {
t.Assert(i.ReleaseID, c.Equals, j.ReleaseID)
t.Assert(i.JobType, c.Equals, j.JobType)
t.Assert(i.JobState, c.Equals, j.JobState)
t.Assert(i.Status, c.Equals, j.Status)
t.Assert(i.Error, c.Equals, j.Error)
}
for i, e := range expected {
compare(t, actual[i], e)
}
}
示例2: TestAttachFinishedInteractiveJob
func (s *HostSuite) TestAttachFinishedInteractiveJob(t *c.C) {
cluster := s.clusterClient(t)
// run a quick interactive job
cmd := exec.CommandUsingCluster(cluster, exec.DockerImage(imageURIs["test-apps"]), "/bin/true")
cmd.TTY = true
runErr := make(chan error)
go func() {
runErr <- cmd.Run()
}()
select {
case err := <-runErr:
t.Assert(err, c.IsNil)
case <-time.After(30 * time.Second):
t.Fatal("timed out waiting for interactive job")
}
h, err := cluster.Host(cmd.HostID)
t.Assert(err, c.IsNil)
// Getting the logs for the job should fail, as it has none because it was
// interactive
attachErr := make(chan error)
go func() {
_, err = h.Attach(&host.AttachReq{JobID: cmd.Job.ID, Flags: host.AttachFlagLogs}, false)
attachErr <- err
}()
select {
case err := <-attachErr:
t.Assert(err, c.NotNil)
case <-time.After(time.Second):
t.Error("timed out waiting for attach")
}
}
示例3: TestTCPApp
func (s *SchedulerSuite) TestTCPApp(t *c.C) {
app, _ := s.createApp(t)
t.Assert(flynn(t, "/", "-a", app.Name, "scale", "echoer=1"), Succeeds)
newRoute := flynn(t, "/", "-a", app.Name, "route", "add", "tcp", "-s", "echo-service")
t.Assert(newRoute, Succeeds)
t.Assert(newRoute.Output, Matches, `.+ on port \d+`)
str := strings.Split(strings.TrimSpace(string(newRoute.Output)), " ")
port := str[len(str)-1]
// use Attempts to give the processes time to start
if err := Attempts.Run(func() error {
servAddr := routerIP + ":" + port
conn, err := net.Dial("tcp", servAddr)
if err != nil {
return err
}
defer conn.Close()
msg := []byte("hello there!\n")
_, err = conn.Write(msg)
if err != nil {
return err
}
reply := make([]byte, len(msg))
_, err = conn.Read(reply)
if err != nil {
return err
}
t.Assert(reply, c.DeepEquals, msg)
return nil
}); err != nil {
t.Fatal(err)
}
}
示例4: TestResourceLimits
func (s *HostSuite) TestResourceLimits(t *c.C) {
cmd := exec.JobUsingCluster(
s.clusterClient(t),
exec.DockerImage(imageURIs["test-apps"]),
&host.Job{
Config: host.ContainerConfig{Args: []string{"sh", "-c", resourceCmd}},
Resources: testResources(),
},
)
var out bytes.Buffer
cmd.Stdout = &out
runErr := make(chan error)
go func() {
runErr <- cmd.Run()
}()
select {
case err := <-runErr:
t.Assert(err, c.IsNil)
case <-time.After(30 * time.Second):
t.Fatal("timed out waiting for resource limits job")
}
assertResourceLimits(t, out.String())
}
示例5: TestDevStdout
func (s *HostSuite) TestDevStdout(t *c.C) {
cmd := exec.CommandUsingCluster(
s.clusterClient(t),
s.createArtifact(t, "test-apps"),
"sh",
)
cmd.Stdin = strings.NewReader(`
echo foo > /dev/stdout
echo bar > /dev/stderr
echo "SUBSHELL: $(echo baz > /dev/stdout)"
echo "SUBSHELL: $(echo qux 2>&1 > /dev/stderr)" >&2`)
var stdout, stderr bytes.Buffer
cmd.Stdout = &stdout
cmd.Stderr = &stderr
runErr := make(chan error)
go func() {
runErr <- cmd.Run()
}()
select {
case err := <-runErr:
t.Assert(err, c.IsNil)
case <-time.After(30 * time.Second):
t.Fatal("timed out waiting for /dev/stdout job")
}
t.Assert(stdout.String(), c.Equals, "foo\nSUBSHELL: baz\n")
t.Assert(stderr.String(), c.Equals, "bar\nSUBSHELL: qux\n")
}
示例6: waitForJobEvents
func (s *DeployerSuite) waitForJobEvents(t *c.C, jobType string, events chan *ct.Job, expected []*ct.Job) {
debugf(t, "waiting for %d job events", len(expected))
actual := make([]*ct.Job, 0, len(expected))
loop:
for {
select {
case e, ok := <-events:
if !ok {
t.Fatal("unexpected close of job event stream")
}
// only track up and down events as we can't always
// predict the order of pending / starting / stopping
// events when scaling multiple jobs
if e.State != ct.JobStateUp && e.State != ct.JobStateDown {
continue
}
actual = append(actual, e)
if len(actual) == len(expected) {
break loop
}
case <-time.After(60 * time.Second):
t.Fatal("timed out waiting for job events")
}
}
for i, event := range expected {
t.Assert(actual[i].ReleaseID, c.Equals, event.ReleaseID)
t.Assert(actual[i].State, c.Equals, event.State)
t.Assert(actual[i].Type, c.Equals, jobType)
}
}
示例7: TestVolumeDeleteOnStop
func (s *HostSuite) TestVolumeDeleteOnStop(t *c.C) {
hosts, err := s.clusterClient(t).Hosts()
t.Assert(err, c.IsNil)
t.Assert(hosts, c.Not(c.HasLen), 0)
h := hosts[0]
// stream job events so we can wait for cleanup events
events := make(chan *host.Event)
stream, err := h.StreamEvents("all", events)
t.Assert(err, c.IsNil)
defer stream.Close()
waitCleanup := func(jobID string) {
timeout := time.After(30 * time.Second)
for {
select {
case event := <-events:
if event.JobID == jobID && event.Event == host.JobEventCleanup {
return
}
case <-timeout:
t.Fatal("timed out waiting for cleanup event")
}
}
}
for _, deleteOnStop := range []bool{true, false} {
job := &host.Job{
Config: host.ContainerConfig{
Args: []string{"sh", "-c", "ls -d /foo"},
DisableLog: true,
},
}
// provision a volume
req := &ct.VolumeReq{Path: "/foo", DeleteOnStop: deleteOnStop}
vol, err := utils.ProvisionVolume(req, h, job)
t.Assert(err, c.IsNil)
defer h.DestroyVolume(vol.ID)
// run the job
cmd := exec.JobUsingCluster(s.clusterClient(t), s.createArtifact(t, "test-apps"), job)
cmd.HostID = h.ID()
out, err := cmd.CombinedOutput()
t.Assert(err, c.IsNil)
t.Assert(string(out), c.Equals, "/foo\n")
// wait for a cleanup event
waitCleanup(job.ID)
// check if the volume was deleted or not
vol, err = h.GetVolume(vol.ID)
if deleteOnStop {
t.Assert(hh.IsObjectNotFoundError(err), c.Equals, true)
} else {
t.Assert(err, c.IsNil)
}
}
}
示例8: TestCancel
func (s *GitDeploySuite) TestCancel(t *c.C) {
r := s.newGitRepo(t, "cancel-hang")
t.Assert(r.flynn("create", "cancel-hang"), Succeeds)
t.Assert(r.flynn("env", "set", "FOO=bar", "BUILDPACK_URL=https://github.com/kr/heroku-buildpack-inline"), Succeeds)
// start watching for slugbuilder events
watcher, err := s.controllerClient(t).WatchJobEvents("cancel-hang", "")
t.Assert(err, c.IsNil)
// start push
cmd := exec.Command("git", "push", "flynn", "master")
// put the command in its own process group (to emulate the way shells handle Ctrl-C)
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
cmd.Dir = r.dir
var stdout io.Reader
stdout, _ = cmd.StdoutPipe()
cmd.Stderr = cmd.Stdout
out := &bytes.Buffer{}
stdout = io.TeeReader(stdout, out)
err = cmd.Start()
t.Assert(err, c.IsNil)
done := make(chan struct{})
go func() {
select {
case <-done:
case <-time.After(30 * time.Second):
cmd.Process.Signal(syscall.SIGTERM)
cmd.Wait()
t.Fatal("git push timed out")
}
}()
// wait for sentinel
sc := bufio.NewScanner(stdout)
found := false
for sc.Scan() {
if strings.Contains(sc.Text(), "hanging...") {
found = true
break
}
}
t.Log(out.String())
t.Assert(found, c.Equals, true)
// send Ctrl-C to git process group
syscall.Kill(-cmd.Process.Pid, syscall.SIGINT)
t.Assert(err, c.IsNil)
go io.Copy(ioutil.Discard, stdout)
cmd.Wait()
close(done)
// check that slugbuilder exits immediately
err = watcher.WaitFor(ct.JobEvents{"slugbuilder": {ct.JobStateUp: 1, ct.JobStateDown: 1}}, 10*time.Second, nil)
t.Assert(err, c.IsNil)
}
示例9: TestAppEvents
// TestAppEvents checks that streaming events for an app only receives events
// for that particular app.
func (s *ControllerSuite) TestAppEvents(t *c.C) {
client := s.controllerClient(t)
app1, release1 := s.createApp(t)
app2, release2 := s.createApp(t)
// stream events for app1
events := make(chan *ct.Job)
stream, err := client.StreamJobEvents(app1.ID, events)
t.Assert(err, c.IsNil)
defer stream.Close()
runJob := func(appID, releaseID string) {
rwc, err := client.RunJobAttached(appID, &ct.NewJob{
ReleaseID: releaseID,
Args: []string{"/bin/true"},
DisableLog: true,
})
t.Assert(err, c.IsNil)
rwc.Close()
}
// generate events for app2 and wait for them
watcher, err := client.WatchJobEvents(app2.ID, release2.ID)
t.Assert(err, c.IsNil)
defer watcher.Close()
runJob(app2.ID, release2.ID)
t.Assert(watcher.WaitFor(
ct.JobEvents{"": {ct.JobStateUp: 1, ct.JobStateDown: 1}},
10*time.Second,
func(e *ct.Job) error {
debugf(t, "got %s job event for app2", e.State)
return nil
},
), c.IsNil)
// generate events for app1
runJob(app1.ID, release1.ID)
// check the stream only gets events for app1
for {
select {
case e, ok := <-events:
if !ok {
t.Fatal("unexpected close of job event stream")
}
t.Assert(e.AppID, c.Equals, app1.ID)
debugf(t, "got %s job event for app1", e.State)
if e.State == ct.JobStateDown {
return
}
case <-time.After(10 * time.Second):
t.Fatal("timed out waiting for job events for app1")
}
}
}
示例10: TestDockerPush
func (s *CLISuite) TestDockerPush(t *c.C) {
// build image with ENV and CMD
repo := "cli-test-push"
s.buildDockerImage(t, repo,
`ENV FOO=BAR`,
`CMD ["/bin/pingserv"]`,
)
// create app
client := s.controllerClient(t)
app := &ct.App{Name: "cli-test-docker-push"}
t.Assert(client.CreateApp(app), c.IsNil)
// flynn docker push image
t.Assert(flynn(t, "/", "-a", app.Name, "docker", "push", repo), Succeeds)
// check app was released with correct env, meta and process type
release, err := client.GetAppRelease(app.ID)
t.Assert(err, c.IsNil)
t.Assert(release.Env["FOO"], c.Equals, "BAR")
t.Assert(release.Meta["docker-receive"], c.Equals, "true")
t.Assert(release.Processes, c.HasLen, 1)
proc, ok := release.Processes["app"]
if !ok {
t.Fatal(`release missing "app" process type`)
}
t.Assert(proc.Args, c.DeepEquals, []string{"/bin/pingserv"})
// check updated env vars are not overwritten
//
// need to remove the tag before pushing as we are using Docker 1.9
// which does not overwrite tags.
// TODO: remove this when upgrading Docker > 1.9
u, err := url.Parse(s.clusterConf(t).DockerPushURL)
t.Assert(err, c.IsNil)
tag := fmt.Sprintf("%s/%s:latest", u.Host, app.Name)
t.Assert(run(t, exec.Command("docker", "rmi", tag)), Succeeds)
t.Assert(flynn(t, "/", "-a", app.Name, "env", "set", "FOO=BAZ"), Succeeds)
t.Assert(flynn(t, "/", "-a", app.Name, "docker", "push", repo), Succeeds)
t.Assert(flynn(t, "/", "-a", app.Name, "env", "get", "FOO"), Outputs, "BAZ\n")
// check the release can be scaled up
t.Assert(flynn(t, "/", "-a", app.Name, "scale", "app=1"), Succeeds)
// check the job is reachable with the app's name in discoverd
instances, err := s.discoverdClient(t).Instances(app.Name+"-web", 10*time.Second)
t.Assert(err, c.IsNil)
res, err := hh.RetryClient.Get("http://" + instances[0].Addr)
t.Assert(err, c.IsNil)
defer res.Body.Close()
body, err := ioutil.ReadAll(res.Body)
t.Assert(err, c.IsNil)
t.Assert(string(body), c.Equals, "OK")
}
示例11: TestAddFailingJob
func (s *HostSuite) TestAddFailingJob(t *c.C) {
// get a host and watch events
hosts, err := s.clusterClient(t).Hosts()
t.Assert(err, c.IsNil)
t.Assert(hosts, c.Not(c.HasLen), 0)
h := hosts[0]
jobID := random.UUID()
events := make(chan *host.Event)
stream, err := h.StreamEvents(jobID, events)
t.Assert(err, c.IsNil)
defer stream.Close()
// add a job with a non existent partition
job := &host.Job{
ID: jobID,
ImageArtifact: &host.Artifact{
Type: host.ArtifactTypeDocker,
URI: "http://example.com?name=foo&id=bar",
},
Partition: "nonexistent",
}
t.Assert(h.AddJob(job), c.IsNil)
// check we get a create then error event
actual := make(map[string]*host.Event, 2)
loop:
for {
select {
case e, ok := <-events:
if !ok {
t.Fatalf("job event stream closed unexpectedly: %s", stream.Err())
}
if _, ok := actual[e.Event]; ok {
t.Fatalf("unexpected event: %v", e)
}
actual[e.Event] = e
if len(actual) >= 2 {
break loop
}
case <-time.After(30 * time.Second):
t.Fatal("timed out waiting for job event")
}
}
t.Assert(actual[host.JobEventCreate], c.NotNil)
e := actual[host.JobEventError]
t.Assert(e, c.NotNil)
t.Assert(e.Job, c.NotNil)
t.Assert(e.Job.Error, c.NotNil)
t.Assert(*e.Job.Error, c.Equals, `host: invalid job partition "nonexistent"`)
}
示例12: TestUpdateTags
func (s *HostSuite) TestUpdateTags(t *c.C) {
events := make(chan *discoverd.Event)
stream, err := s.discoverdClient(t).Service("flynn-host").Watch(events)
t.Assert(err, c.IsNil)
defer stream.Close()
nextEvent := func() *discoverd.Event {
select {
case e, ok := <-events:
if !ok {
t.Fatal("unexpected close of discoverd stream")
}
return e
case <-time.After(10 * time.Second):
t.Fatal("timed out waiting for discoverd event")
}
return nil
}
var client *cluster.Host
for {
e := nextEvent()
if e.Kind == discoverd.EventKindUp && client == nil {
client = cluster.NewHost(e.Instance.Meta["id"], e.Instance.Addr, nil, nil)
}
if e.Kind == discoverd.EventKindCurrent {
break
}
}
if client == nil {
t.Fatal("did not initialize flynn-host client")
}
t.Assert(client.UpdateTags(map[string]string{"foo": "bar"}), c.IsNil)
var meta map[string]string
for {
e := nextEvent()
if e.Kind == discoverd.EventKindUpdate && e.Instance.Meta["id"] == client.ID() {
meta = e.Instance.Meta
break
}
}
t.Assert(meta["tag:foo"], c.Equals, "bar")
// setting to empty string should delete the tag
t.Assert(client.UpdateTags(map[string]string{"foo": ""}), c.IsNil)
for {
e := nextEvent()
if e.Kind == discoverd.EventKindUpdate && e.Instance.Meta["id"] == client.ID() {
meta = e.Instance.Meta
break
}
}
if _, ok := meta["tag:foo"]; ok {
t.Fatal("expected tag to be deleted but is still present")
}
}
示例13: TestPushImage
func (s *DockerReceiveSuite) TestPushImage(t *c.C) {
// build a Docker image
repo := "docker-receive-test-push"
s.buildDockerImage(t, repo, "RUN echo foo > /foo.txt")
// subscribe to artifact events
client := s.controllerClient(t)
events := make(chan *ct.Event)
stream, err := client.StreamEvents(ct.StreamEventsOptions{
ObjectTypes: []ct.EventType{ct.EventTypeArtifact},
}, events)
t.Assert(err, c.IsNil)
defer stream.Close()
// push the Docker image to docker-receive
u, err := url.Parse(s.clusterConf(t).DockerPushURL)
t.Assert(err, c.IsNil)
tag := fmt.Sprintf("%s/%s:latest", u.Host, repo)
t.Assert(run(t, exec.Command("docker", "tag", "--force", repo, tag)), Succeeds)
t.Assert(run(t, exec.Command("docker", "push", tag)), Succeeds)
// wait for an artifact to be created
var artifact ct.Artifact
loop:
for {
select {
case event, ok := <-events:
if !ok {
t.Fatalf("event stream closed unexpectedly: %s", stream.Err())
}
t.Assert(json.Unmarshal(event.Data, &artifact), c.IsNil)
if artifact.Meta["docker-receive.repository"] == repo {
break loop
}
case <-time.After(30 * time.Second):
t.Fatal("timed out waiting for artifact")
}
}
// create a release with the Docker artifact
app := &ct.App{}
t.Assert(client.CreateApp(app), c.IsNil)
release := &ct.Release{ArtifactIDs: []string{artifact.ID}}
t.Assert(client.CreateRelease(release), c.IsNil)
t.Assert(client.SetAppRelease(app.ID, release.ID), c.IsNil)
// check running a job uses the image
t.Assert(flynn(t, "/", "-a", app.ID, "run", "cat", "/foo.txt"), SuccessfulOutputContains, "foo")
}
示例14: TestClusterBackups
func (s *ZZBackupSuite) TestClusterBackups(t *c.C) {
if args.BootConfig.BackupsDir == "" {
t.Skip("--backups-dir not set")
}
backups, err := ioutil.ReadDir(args.BootConfig.BackupsDir)
t.Assert(err, c.IsNil)
if len(backups) == 0 {
t.Fatal("backups dir is empty")
}
for i, backup := range backups {
s.testClusterBackup(t, i, filepath.Join(args.BootConfig.BackupsDir, backup.Name()))
}
}
示例15: TestRun
func (s *CLISuite) TestRun(t *c.C) {
app := s.newCliTestApp(t)
defer app.cleanup()
// this shouldn't be logged
t.Assert(app.sh("echo foo"), Outputs, "foo\n")
// drain the events
app.waitFor(ct.JobEvents{"": {ct.JobStateUp: 1, ct.JobStateDown: 1}})
// this should be logged due to the --enable-log flag
t.Assert(app.flynn("run", "--enable-log", "echo", "hello"), Outputs, "hello\n")
app.waitFor(ct.JobEvents{"": {ct.JobStateUp: 1, ct.JobStateDown: 1}})
detached := app.flynn("run", "-d", "echo", "world")
t.Assert(detached, Succeeds)
t.Assert(detached, c.Not(Outputs), "world\n")
id := strings.TrimSpace(detached.Output)
jobID := app.waitFor(ct.JobEvents{"": {ct.JobStateUp: 1, ct.JobStateDown: 1}})
t.Assert(jobID, c.Equals, id)
t.Assert(app.flynn("log", "--raw-output"), Outputs, "hello\nworld\n")
// test stdin and stderr
streams := app.flynnCmd("run", "sh", "-c", "cat 1>&2")
stdin, err := streams.StdinPipe()
t.Assert(err, c.IsNil)
go func() {
stdin.Write([]byte("goto stderr"))
stdin.Close()
}()
var stderr bytes.Buffer
var stdout bytes.Buffer
streams.Stderr = &stderr
streams.Stdout = &stdout
t.Assert(streams.Run(), c.IsNil)
t.Assert(stderr.String(), c.Equals, "goto stderr")
t.Assert(stdout.String(), c.Equals, "")
// test exit code
exit := app.sh("exit 42")
t.Assert(exit, c.Not(Succeeds))
if msg, ok := exit.Err.(*exec.ExitError); ok { // there is error code
code := msg.Sys().(syscall.WaitStatus).ExitStatus()
t.Assert(code, c.Equals, 42)
} else {
t.Fatal("There was no error code!")
}
}