本文整理汇总了Golang中github.com/openshift/origin/pkg/build/api.GetBuildPodName函数的典型用法代码示例。如果您正苦于以下问题:Golang GetBuildPodName函数的具体用法?Golang GetBuildPodName怎么用?Golang GetBuildPodName使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了GetBuildPodName函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: Describe
// Describe returns the description of a build
func (d *BuildDescriber) Describe(namespace, name string, settings kctl.DescriberSettings) (string, error) {
c := d.osClient.Builds(namespace)
build, err := c.Get(name)
if err != nil {
return "", err
}
events, _ := d.kubeClient.Events(namespace).Search(build)
if events == nil {
events = &kapi.EventList{}
}
// get also pod events and merge it all into one list for describe
if pod, err := d.kubeClient.Pods(namespace).Get(buildapi.GetBuildPodName(build)); err == nil {
if podEvents, _ := d.kubeClient.Events(namespace).Search(pod); podEvents != nil {
events.Items = append(events.Items, podEvents.Items...)
}
}
return tabbedString(func(out *tabwriter.Writer) error {
formatMeta(out, build.ObjectMeta)
fmt.Fprintln(out, "")
status := bold(build.Status.Phase)
if build.Status.Message != "" {
status += " (" + build.Status.Message + ")"
}
formatString(out, "Status", status)
if build.Status.StartTimestamp != nil && !build.Status.StartTimestamp.IsZero() {
formatString(out, "Started", build.Status.StartTimestamp.Time.Format(time.RFC1123))
}
// Create the time object with second-level precision so we don't get
// output like "duration: 1.2724395728934s"
formatString(out, "Duration", describeBuildDuration(build))
if build.Status.Config != nil {
formatString(out, "Build Config", build.Status.Config.Name)
}
formatString(out, "Build Pod", buildapi.GetBuildPodName(build))
describeCommonSpec(build.Spec.CommonSpec, out)
describeBuildTriggerCauses(build.Spec.TriggeredBy, out)
if settings.ShowEvents {
kctl.DescribeEvents(events, out)
}
return nil
})
}
示例2: List
// List lists all Pods associated with a Build.
func (lw *buildPodDeleteLW) List(options kapi.ListOptions) (runtime.Object, error) {
glog.V(5).Info("Checking for deleted build pods")
buildList, err := lw.Client.Builds(kapi.NamespaceAll).List(options)
if err != nil {
glog.V(4).Infof("Failed to find any builds due to error %v", err)
return nil, err
}
for _, build := range buildList.Items {
glog.V(5).Infof("Found build %s/%s", build.Namespace, build.Name)
if buildutil.IsBuildComplete(&build) {
glog.V(5).Infof("Ignoring build %s/%s because it is complete", build.Namespace, build.Name)
continue
}
if build.Spec.Strategy.JenkinsPipelineStrategy != nil {
glog.V(5).Infof("Ignoring build %s/%s because it is a pipeline build", build.Namespace, build.Name)
continue
}
pod, err := lw.KubeClient.Pods(build.Namespace).Get(buildapi.GetBuildPodName(&build))
if err != nil {
if !kerrors.IsNotFound(err) {
glog.V(4).Infof("Error getting pod for build %s/%s: %v", build.Namespace, build.Name, err)
return nil, err
} else {
pod = nil
}
} else {
if buildName := buildapi.GetBuildName(pod); buildName != build.Name {
pod = nil
}
}
if pod == nil {
deletedPod := &kapi.Pod{
ObjectMeta: kapi.ObjectMeta{
Name: buildapi.GetBuildPodName(&build),
Namespace: build.Namespace,
},
}
glog.V(4).Infof("No build pod found for build %s/%s, sending delete event for build pod", build.Namespace, build.Name)
err := lw.store.Delete(deletedPod)
if err != nil {
glog.V(4).Infof("Error queuing delete event: %v", err)
}
} else {
glog.V(5).Infof("Found build pod %s/%s for build %s", pod.Namespace, pod.Name, build.Name)
}
}
return &kapi.PodList{}, nil
}
示例3: CancelBuild
// CancelBuild updates a build status to Cancelled, after its associated pod is deleted.
func (bc *BuildController) CancelBuild(build *buildapi.Build) error {
if !isBuildCancellable(build) {
glog.V(4).Infof("Build %s/%s can be cancelled only if it has pending/running status, not %s.", build.Namespace, build.Name, build.Status.Phase)
return nil
}
glog.V(4).Infof("Cancelling build %s/%s.", build.Namespace, build.Name)
pod, err := bc.PodManager.GetPod(build.Namespace, buildapi.GetBuildPodName(build))
if err != nil {
if !errors.IsNotFound(err) {
return fmt.Errorf("Failed to get pod for build %s/%s: %v", build.Namespace, build.Name, err)
}
} else {
err := bc.PodManager.DeletePod(build.Namespace, pod)
if err != nil && !errors.IsNotFound(err) {
return fmt.Errorf("Couldn't delete build pod %s/%s: %v", build.Namespace, pod.Name, err)
}
}
build.Status.Phase = buildapi.BuildPhaseCancelled
build.Status.Reason = ""
build.Status.Message = ""
now := unversioned.Now()
build.Status.CompletionTimestamp = &now
if err := bc.BuildUpdater.Update(build.Namespace, build); err != nil {
return fmt.Errorf("Failed to update build %s/%s: %v", build.Namespace, build.Name, err)
}
glog.V(4).Infof("Build %s/%s was successfully cancelled.", build.Namespace, build.Name)
return nil
}
示例4: canRunAsRoot
func (bs *SourceBuildStrategy) canRunAsRoot(build *buildapi.Build) bool {
var rootUser int64
rootUser = 0
pod := &kapi.Pod{
ObjectMeta: kapi.ObjectMeta{
Name: buildapi.GetBuildPodName(build),
Namespace: build.Namespace,
},
Spec: kapi.PodSpec{
ServiceAccountName: build.Spec.ServiceAccount,
Containers: []kapi.Container{
{
Name: "sti-build",
Image: bs.Image,
SecurityContext: &kapi.SecurityContext{
RunAsUser: &rootUser,
},
},
},
RestartPolicy: kapi.RestartPolicyNever,
},
}
userInfo := serviceaccount.UserInfo(build.Namespace, build.Spec.ServiceAccount, "")
attrs := admission.NewAttributesRecord(pod, pod, kapi.Kind("Pod").WithVersion(""), pod.Namespace, pod.Name, kapi.Resource("pods").WithVersion(""), "", admission.Create, userInfo)
err := bs.AdmissionControl.Admit(attrs)
if err != nil {
glog.V(2).Infof("Admit for root user returned error: %v", err)
}
return err == nil
}
示例5: HandleBuildDeletion
// HandleBuildDeletion deletes a build pod if the corresponding build has been deleted
func (bc *BuildDeleteController) HandleBuildDeletion(build *buildapi.Build) error {
glog.V(4).Infof("Handling deletion of build %s", build.Name)
if build.Spec.Strategy.JenkinsPipelineStrategy != nil {
glog.V(4).Infof("Ignoring build with jenkins pipeline strategy")
return nil
}
podName := buildapi.GetBuildPodName(build)
pod, err := bc.PodManager.GetPod(build.Namespace, podName)
if err != nil && !errors.IsNotFound(err) {
glog.V(2).Infof("Failed to find pod with name %s for build %s in namespace %s due to error: %v", podName, build.Name, build.Namespace, err)
return err
}
if pod == nil {
glog.V(2).Infof("Did not find pod with name %s for build %s in namespace %s", podName, build.Name, build.Namespace)
return nil
}
if buildName := buildapi.GetBuildName(pod); buildName != build.Name {
glog.V(2).Infof("Not deleting pod %s/%s because the build label %s does not match the build name %s", pod.Namespace, podName, buildName, build.Name)
return nil
}
err = bc.PodManager.DeletePod(build.Namespace, pod)
if err != nil && !errors.IsNotFound(err) {
glog.V(2).Infof("Failed to delete pod %s/%s for build %s due to error: %v", build.Namespace, podName, build.Name, err)
return err
}
return nil
}
示例6: runBuildPodAdmissionTest
func runBuildPodAdmissionTest(t *testing.T, client *client.Client, kclientset *kclientset.Clientset, build *buildapi.Build) (*buildapi.Build, *kapi.Pod) {
ns := testutil.Namespace()
_, err := client.Builds(ns).Create(build)
if err != nil {
t.Fatalf("%v", err)
}
watchOpt := kapi.ListOptions{
FieldSelector: fields.OneTermEqualSelector(
"metadata.name",
buildapi.GetBuildPodName(build),
),
}
podWatch, err := kclientset.Core().Pods(ns).Watch(watchOpt)
if err != nil {
t.Fatalf("%v", err)
}
type resultObjs struct {
build *buildapi.Build
pod *kapi.Pod
}
result := make(chan resultObjs)
defer podWatch.Stop()
go func() {
for e := range podWatch.ResultChan() {
if e.Type == watchapi.Added {
pod, ok := e.Object.(*kapi.Pod)
if !ok {
t.Fatalf("unexpected object: %v", e.Object)
}
build := (*buildtestutil.TestPod)(pod).GetBuild(t)
result <- resultObjs{build: build, pod: pod}
}
}
}()
select {
case <-time.After(buildPodAdmissionTestTimeout):
t.Fatalf("timed out after %v", buildPodAdmissionTestTimeout)
case objs := <-result:
return objs.build, objs.pod
}
return nil, nil
}
示例7: runBuildDeleteTest
func runBuildDeleteTest(t *testing.T, clusterAdminClient *client.Client, clusterAdminKubeClient *kclient.Client) {
buildWatch, err := clusterAdminClient.Builds(testutil.Namespace()).Watch(kapi.ListOptions{})
if err != nil {
t.Fatalf("Couldn't subscribe to Builds %v", err)
}
defer buildWatch.Stop()
created, err := clusterAdminClient.Builds(testutil.Namespace()).Create(mockBuild())
if err != nil {
t.Fatalf("Couldn't create Build: %v", err)
}
podWatch, err := clusterAdminKubeClient.Pods(testutil.Namespace()).Watch(kapi.ListOptions{ResourceVersion: created.ResourceVersion})
if err != nil {
t.Fatalf("Couldn't subscribe to Pods %v", err)
}
defer podWatch.Stop()
// wait for initial build event from the creation of the imagerepo with tag latest
event := waitForWatch(t, "initial build added", buildWatch)
if e, a := watchapi.Added, event.Type; e != a {
t.Fatalf("expected watch event type %s, got %s", e, a)
}
newBuild := event.Object.(*buildapi.Build)
// initial pod creation for build
event = waitForWatch(t, "build pod created", podWatch)
if e, a := watchapi.Added, event.Type; e != a {
t.Fatalf("expected watch event type %s, got %s", e, a)
}
clusterAdminClient.Builds(testutil.Namespace()).Delete(newBuild.Name)
event = waitForWatchType(t, "pod deleted due to build deleted", podWatch, watchapi.Deleted)
if e, a := watchapi.Deleted, event.Type; e != a {
t.Fatalf("expected watch event type %s, got %s", e, a)
}
pod := event.Object.(*kapi.Pod)
if expected := buildapi.GetBuildPodName(newBuild); pod.Name != expected {
t.Fatalf("Expected pod %s to be deleted, but pod %s was deleted", expected, pod.Name)
}
}
示例8: TestCustomCreateBuildPod
func TestCustomCreateBuildPod(t *testing.T) {
strategy := CustomBuildStrategy{
Codec: kapi.Codecs.LegacyCodec(buildapi.SchemeGroupVersion),
}
expectedBad := mockCustomBuild(false, false)
expectedBad.Spec.Strategy.CustomStrategy.From = kapi.ObjectReference{
Kind: "DockerImage",
Name: "",
}
if _, err := strategy.CreateBuildPod(expectedBad); err == nil {
t.Errorf("Expected error when Image is empty, got nothing")
}
build := mockCustomBuild(false, false)
actual, err := strategy.CreateBuildPod(build)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if expected, actual := buildapi.GetBuildPodName(build), actual.ObjectMeta.Name; expected != actual {
t.Errorf("Expected %s, but got %s!", expected, actual)
}
if !reflect.DeepEqual(map[string]string{buildapi.BuildLabel: buildapi.LabelValue(build.Name)}, actual.Labels) {
t.Errorf("Pod Labels does not match Build Labels!")
}
if !reflect.DeepEqual(nodeSelector, actual.Spec.NodeSelector) {
t.Errorf("Pod NodeSelector does not match Build NodeSelector. Expected: %v, got: %v", nodeSelector, actual.Spec.NodeSelector)
}
container := actual.Spec.Containers[0]
if container.Name != "custom-build" {
t.Errorf("Expected custom-build, but got %s!", container.Name)
}
if container.ImagePullPolicy != kapi.PullIfNotPresent {
t.Errorf("Expected %v, got %v", kapi.PullIfNotPresent, container.ImagePullPolicy)
}
if actual.Spec.RestartPolicy != kapi.RestartPolicyNever {
t.Errorf("Expected never, got %#v", actual.Spec.RestartPolicy)
}
if len(container.VolumeMounts) != 3 {
t.Fatalf("Expected 3 volumes in container, got %d", len(container.VolumeMounts))
}
if *actual.Spec.ActiveDeadlineSeconds != 60 {
t.Errorf("Expected ActiveDeadlineSeconds 60, got %d", *actual.Spec.ActiveDeadlineSeconds)
}
for i, expected := range []string{dockerSocketPath, DockerPushSecretMountPath, sourceSecretMountPath} {
if container.VolumeMounts[i].MountPath != expected {
t.Fatalf("Expected %s in VolumeMount[%d], got %s", expected, i, container.VolumeMounts[i].MountPath)
}
}
if !kapi.Semantic.DeepEqual(container.Resources, build.Spec.Resources) {
t.Fatalf("Expected actual=expected, %v != %v", container.Resources, build.Spec.Resources)
}
if len(actual.Spec.Volumes) != 3 {
t.Fatalf("Expected 3 volumes in Build pod, got %d", len(actual.Spec.Volumes))
}
buildJSON, _ := runtime.Encode(kapi.Codecs.LegacyCodec(buildapi.SchemeGroupVersion), build)
errorCases := map[int][]string{
0: {"BUILD", string(buildJSON)},
}
standardEnv := []string{"SOURCE_REPOSITORY", "SOURCE_URI", "SOURCE_CONTEXT_DIR", "SOURCE_REF", "OUTPUT_IMAGE", "OUTPUT_REGISTRY", buildapi.OriginVersion}
for index, exp := range errorCases {
if e := container.Env[index]; e.Name != exp[0] || e.Value != exp[1] {
t.Errorf("Expected %s:%s, got %s:%s!\n", exp[0], exp[1], e.Name, e.Value)
}
}
for _, name := range standardEnv {
found := false
for _, item := range container.Env {
if (item.Name == name) && len(item.Value) != 0 {
found = true
}
}
if !found {
t.Errorf("Expected %s variable to be set", name)
}
}
}
示例9:
g.It("Source: should start a build and wait for the build failed and build pod being killed by kubelet", func() {
g.By("calling oc create source-build")
err := oc.Run("create").Args("-f", sourceFixture).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("starting the source build with --wait flag and short timeout")
br, err := exutil.StartBuildAndWait(oc, "source-build", "--wait")
o.Expect(br.StartBuildErr).To(o.HaveOccurred()) // start-build should detect the build error
g.By("verifying the build status")
o.Expect(br.BuildAttempt).To(o.BeTrue()) // the build should have been attempted
o.Expect(br.Build.Status.Phase).Should(o.BeEquivalentTo(buildapi.BuildPhaseFailed)) // the build should have failed
g.By("verifying the build pod status")
pod, err := oc.KubeREST().Pods(oc.Namespace()).Get(buildapi.GetBuildPodName(br.Build))
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(pod.Status.Phase).Should(o.BeEquivalentTo(kapi.PodFailed))
o.Expect(pod.Status.Reason).Should(o.ContainSubstring("DeadlineExceeded"))
})
})
g.Describe("oc start-build docker-build --wait", func() {
g.It("Docker: should start a build and wait for the build failed and build pod being killed by kubelet", func() {
g.By("calling oc create docker-build")
err := oc.Run("create").Args("-f", dockerFixture).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("starting the docker build with --wait flag and short timeout")
示例10: Get
// Get returns a streamer resource with the contents of the build log
func (r *REST) Get(ctx kapi.Context, name string, opts runtime.Object) (runtime.Object, error) {
buildLogOpts, ok := opts.(*api.BuildLogOptions)
if !ok {
return nil, errors.NewBadRequest("did not get an expected options.")
}
if errs := validation.ValidateBuildLogOptions(buildLogOpts); len(errs) > 0 {
return nil, errors.NewInvalid(api.Kind("BuildLogOptions"), "", errs)
}
obj, err := r.Getter.Get(ctx, name)
if err != nil {
return nil, err
}
build := obj.(*api.Build)
if buildLogOpts.Previous {
version := buildutil.VersionForBuild(build)
// Use the previous version
version--
previousBuildName := buildutil.BuildNameForConfigVersion(buildutil.ConfigNameForBuild(build), version)
previous, err := r.Getter.Get(ctx, previousBuildName)
if err != nil {
return nil, err
}
build = previous.(*api.Build)
}
switch build.Status.Phase {
// Build has not launched, wait til it runs
case api.BuildPhaseNew, api.BuildPhasePending:
if buildLogOpts.NoWait {
glog.V(4).Infof("Build %s/%s is in %s state. No logs to retrieve yet.", build.Namespace, build.Name, build.Status.Phase)
// return empty content if not waiting for build
return &genericrest.LocationStreamer{}, nil
}
glog.V(4).Infof("Build %s/%s is in %s state, waiting for Build to start", build.Namespace, build.Name, build.Status.Phase)
latest, ok, err := registry.WaitForRunningBuild(r.Watcher, ctx, build, r.Timeout)
if err != nil {
return nil, errors.NewBadRequest(fmt.Sprintf("unable to wait for build %s to run: %v", build.Name, err))
}
switch latest.Status.Phase {
case api.BuildPhaseError:
return nil, errors.NewBadRequest(fmt.Sprintf("build %s encountered an error: %s", build.Name, buildutil.NoBuildLogsMessage))
case api.BuildPhaseCancelled:
return nil, errors.NewBadRequest(fmt.Sprintf("build %s was cancelled: %s", build.Name, buildutil.NoBuildLogsMessage))
}
if !ok {
return nil, errors.NewTimeoutError(fmt.Sprintf("timed out waiting for build %s to start after %s", build.Name, r.Timeout), 1)
}
// The build was cancelled
case api.BuildPhaseCancelled:
return nil, errors.NewBadRequest(fmt.Sprintf("build %s was cancelled. %s", build.Name, buildutil.NoBuildLogsMessage))
// An error occurred launching the build, return an error
case api.BuildPhaseError:
return nil, errors.NewBadRequest(fmt.Sprintf("build %s is in an error state. %s", build.Name, buildutil.NoBuildLogsMessage))
}
// The container should be the default build container, so setting it to blank
buildPodName := api.GetBuildPodName(build)
logOpts := api.BuildToPodLogOptions(buildLogOpts)
location, transport, err := pod.LogLocation(r.PodGetter, r.ConnectionInfo, ctx, buildPodName, logOpts)
if err != nil {
if errors.IsNotFound(err) {
return nil, errors.NewNotFound(kapi.Resource("pod"), buildPodName)
}
return nil, errors.NewBadRequest(err.Error())
}
return &genericrest.LocationStreamer{
Location: location,
Transport: transport,
ContentType: "text/plain",
Flush: buildLogOpts.Follow,
ResponseChecker: genericrest.NewGenericHttpResponseChecker(kapi.Resource("pod"), buildPodName),
}, nil
}
示例11: TestDockerCreateBuildPod
func TestDockerCreateBuildPod(t *testing.T) {
strategy := DockerBuildStrategy{
Image: "docker-test-image",
Codec: kapi.Codecs.LegacyCodec(buildapi.SchemeGroupVersion),
}
expected := mockDockerBuild()
actual, err := strategy.CreateBuildPod(expected)
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
if expected, actual := buildapi.GetBuildPodName(expected), actual.ObjectMeta.Name; expected != actual {
t.Errorf("Expected %s, but got %s!", expected, actual)
}
if !reflect.DeepEqual(map[string]string{buildapi.BuildLabel: buildapi.LabelValue(expected.Name)}, actual.Labels) {
t.Errorf("Pod Labels does not match Build Labels!")
}
container := actual.Spec.Containers[0]
if container.Name != "docker-build" {
t.Errorf("Expected docker-build, but got %s!", container.Name)
}
if container.Image != strategy.Image {
t.Errorf("Expected %s image, got %s!", container.Image, strategy.Image)
}
if container.ImagePullPolicy != kapi.PullIfNotPresent {
t.Errorf("Expected %v, got %v", kapi.PullIfNotPresent, container.ImagePullPolicy)
}
if actual.Spec.RestartPolicy != kapi.RestartPolicyNever {
t.Errorf("Expected never, got %#v", actual.Spec.RestartPolicy)
}
if len(container.Env) != 10 {
var keys []string
for _, env := range container.Env {
keys = append(keys, env.Name)
}
t.Fatalf("Expected 10 elements in Env table, got %d:\n%s", len(container.Env), strings.Join(keys, ", "))
}
if len(container.VolumeMounts) != 4 {
t.Fatalf("Expected 4 volumes in container, got %d", len(container.VolumeMounts))
}
if *actual.Spec.ActiveDeadlineSeconds != 60 {
t.Errorf("Expected ActiveDeadlineSeconds 60, got %d", *actual.Spec.ActiveDeadlineSeconds)
}
for i, expected := range []string{dockerSocketPath, DockerPushSecretMountPath, DockerPullSecretMountPath, sourceSecretMountPath} {
if container.VolumeMounts[i].MountPath != expected {
t.Fatalf("Expected %s in VolumeMount[%d], got %s", expected, i, container.VolumeMounts[i].MountPath)
}
}
if len(actual.Spec.Volumes) != 4 {
t.Fatalf("Expected 4 volumes in Build pod, got %d", len(actual.Spec.Volumes))
}
if !kapi.Semantic.DeepEqual(container.Resources, expected.Spec.Resources) {
t.Fatalf("Expected actual=expected, %v != %v", container.Resources, expected.Spec.Resources)
}
found := false
foundIllegal := false
for _, v := range container.Env {
if v.Name == "BUILD_LOGLEVEL" && v.Value == "bar" {
found = true
}
if v.Name == "ILLEGAL" {
foundIllegal = true
}
}
if !found {
t.Fatalf("Expected variable BUILD_LOGLEVEL be defined for the container")
}
if foundIllegal {
t.Fatalf("Found illegal environment variable 'ILLEGAL' defined on container")
}
buildJSON, _ := runtime.Encode(kapi.Codecs.LegacyCodec(buildapi.SchemeGroupVersion), expected)
errorCases := map[int][]string{
0: {"BUILD", string(buildJSON)},
}
for index, exp := range errorCases {
if e := container.Env[index]; e.Name != exp[0] || e.Value != exp[1] {
t.Errorf("Expected %s:%s, got %s:%s!\n", exp[0], exp[1], e.Name, e.Value)
}
}
}
示例12: runBuildRunningPodDeleteTest
func runBuildRunningPodDeleteTest(t *testing.T, clusterAdminClient *client.Client, clusterAdminKubeClient *kclient.Client) {
buildWatch, err := clusterAdminClient.Builds(testutil.Namespace()).Watch(kapi.ListOptions{})
if err != nil {
t.Fatalf("Couldn't subscribe to Builds %v", err)
}
defer buildWatch.Stop()
created, err := clusterAdminClient.Builds(testutil.Namespace()).Create(mockBuild())
if err != nil {
t.Fatalf("Couldn't create Build: %v", err)
}
podWatch, err := clusterAdminKubeClient.Pods(testutil.Namespace()).Watch(kapi.ListOptions{ResourceVersion: created.ResourceVersion})
if err != nil {
t.Fatalf("Couldn't subscribe to Pods %v", err)
}
defer podWatch.Stop()
// wait for initial build event from the creation of the imagerepo with tag latest
event := waitForWatch(t, "initial build added", buildWatch)
if e, a := watchapi.Added, event.Type; e != a {
t.Fatalf("expected watch event type %s, got %s", e, a)
}
newBuild := event.Object.(*buildapi.Build)
buildName := newBuild.Name
podName := newBuild.Name + "-build"
// initial pod creation for build
for {
event = waitForWatch(t, "build pod created", podWatch)
newPod := event.Object.(*kapi.Pod)
if newPod.Name == podName {
break
}
}
if e, a := watchapi.Added, event.Type; e != a {
t.Fatalf("expected watch event type %s, got %s", e, a)
}
// throw away events from other builds, we only care about the new build
// we just triggered
for {
event = waitForWatch(t, "build updated to pending", buildWatch)
newBuild = event.Object.(*buildapi.Build)
if newBuild.Name == buildName {
break
}
}
if e, a := watchapi.Modified, event.Type; e != a {
t.Fatalf("expected watch event type %s, got %s", e, a)
}
if newBuild.Status.Phase != buildapi.BuildPhasePending {
t.Fatalf("expected build status to be marked pending, but was marked %s", newBuild.Status.Phase)
}
clusterAdminKubeClient.Pods(testutil.Namespace()).Delete(buildapi.GetBuildPodName(newBuild), kapi.NewDeleteOptions(0))
event = waitForWatch(t, "build updated to error", buildWatch)
if e, a := watchapi.Modified, event.Type; e != a {
t.Fatalf("expected watch event type %s, got %s", e, a)
}
newBuild = event.Object.(*buildapi.Build)
if newBuild.Status.Phase != buildapi.BuildPhaseError {
t.Fatalf("expected build status to be marked error, but was marked %s", newBuild.Status.Phase)
}
}
示例13: TestConcurrentBuildPodControllers
// TestConcurrentBuildPodControllers tests the lifecycle of a build pod when running multiple controllers.
func TestConcurrentBuildPodControllers(t *testing.T) {
// Start a master with multiple BuildPodControllers
osClient, kClient := setupBuildControllerTest(controllerCount{BuildPodControllers: 5}, t)
ns := testutil.Namespace()
waitTime := ConcurrentBuildPodControllersTestWait
tests := []buildControllerPodTest{
{
Name: "running state test",
States: []buildControllerPodState{
{
PodPhase: kapi.PodRunning,
BuildPhase: buildapi.BuildPhaseRunning,
},
},
},
{
Name: "build succeeded",
States: []buildControllerPodState{
{
PodPhase: kapi.PodRunning,
BuildPhase: buildapi.BuildPhaseRunning,
},
{
PodPhase: kapi.PodSucceeded,
BuildPhase: buildapi.BuildPhaseComplete,
},
},
},
{
Name: "build failed",
States: []buildControllerPodState{
{
PodPhase: kapi.PodRunning,
BuildPhase: buildapi.BuildPhaseRunning,
},
{
PodPhase: kapi.PodFailed,
BuildPhase: buildapi.BuildPhaseFailed,
},
},
},
}
for _, test := range tests {
// Setup communications channels
podReadyChan := make(chan *kapi.Pod) // Will receive a value when a build pod is ready
errChan := make(chan error) // Will receive a value when an error occurs
stateReached := int32(0)
// Create a build
b, err := osClient.Builds(ns).Create(mockBuild())
checkErr(t, err)
// Watch build pod for transition to pending
podWatch, err := kClient.Pods(ns).Watch(kapi.ListOptions{FieldSelector: fields.OneTermEqualSelector("metadata.name", buildapi.GetBuildPodName(b))})
checkErr(t, err)
go func() {
for e := range podWatch.ResultChan() {
pod, ok := e.Object.(*kapi.Pod)
if !ok {
checkErr(t, fmt.Errorf("%s: unexpected object received: %#v\n", test.Name, e.Object))
}
if pod.Status.Phase == kapi.PodPending {
podReadyChan <- pod
break
}
}
}()
var pod *kapi.Pod
select {
case pod = <-podReadyChan:
if pod.Status.Phase != kapi.PodPending {
t.Errorf("Got wrong pod phase: %s", pod.Status.Phase)
podWatch.Stop()
continue
}
case <-time.After(BuildControllersWatchTimeout):
t.Errorf("Timed out waiting for build pod to be ready")
podWatch.Stop()
continue
}
podWatch.Stop()
for _, state := range test.States {
// Update pod state and verify that corresponding build state happens accordingly
pod, err := kClient.Pods(ns).Get(pod.Name)
checkErr(t, err)
pod.Status.Phase = state.PodPhase
_, err = kClient.Pods(ns).UpdateStatus(pod)
checkErr(t, err)
buildWatch, err := osClient.Builds(ns).Watch(kapi.ListOptions{FieldSelector: fields.OneTermEqualSelector("metadata.name", b.Name), ResourceVersion: b.ResourceVersion})
checkErr(t, err)
defer buildWatch.Stop()
go func() {
done := false
//.........这里部分代码省略.........
示例14: handle
func (h *binaryInstantiateHandler) handle(r io.Reader) (runtime.Object, error) {
h.options.Name = h.name
if err := rest.BeforeCreate(BinaryStrategy, h.ctx, h.options); err != nil {
glog.Infof("failed to validate binary: %#v", h.options)
return nil, err
}
request := &buildapi.BuildRequest{}
request.Name = h.name
if len(h.options.Commit) > 0 {
request.Revision = &buildapi.SourceRevision{
Git: &buildapi.GitSourceRevision{
Committer: buildapi.SourceControlUser{
Name: h.options.CommitterName,
Email: h.options.CommitterEmail,
},
Author: buildapi.SourceControlUser{
Name: h.options.AuthorName,
Email: h.options.AuthorEmail,
},
Message: h.options.Message,
Commit: h.options.Commit,
},
}
}
request.Binary = &buildapi.BinaryBuildSource{
AsFile: h.options.AsFile,
}
var build *buildapi.Build
start := time.Now()
if err := wait.Poll(time.Second, h.r.Timeout, func() (bool, error) {
result, err := h.r.Generator.Instantiate(h.ctx, request)
if err != nil {
if errors.IsNotFound(err) {
if s, ok := err.(errors.APIStatus); ok {
if s.Status().Kind == "imagestreamtags" {
return false, nil
}
}
}
glog.V(2).Infof("failed to instantiate: %#v", request)
return false, err
}
build = result
return true, nil
}); err != nil {
return nil, err
}
remaining := h.r.Timeout - time.Now().Sub(start)
latest, ok, err := registry.WaitForRunningBuild(h.r.Watcher, h.ctx, build, remaining)
if err != nil {
switch {
case latest.Status.Phase == buildapi.BuildPhaseError:
return nil, errors.NewBadRequest(fmt.Sprintf("build %s encountered an error: %s", build.Name, buildutil.NoBuildLogsMessage))
case latest.Status.Phase == buildapi.BuildPhaseCancelled:
return nil, errors.NewBadRequest(fmt.Sprintf("build %s was cancelled: %s", build.Name, buildutil.NoBuildLogsMessage))
case err == registry.ErrBuildDeleted:
return nil, errors.NewBadRequest(fmt.Sprintf("build %s was deleted before it started: %s", build.Name, buildutil.NoBuildLogsMessage))
default:
return nil, errors.NewBadRequest(fmt.Sprintf("unable to wait for build %s to run: %v", build.Name, err))
}
}
if !ok {
return nil, errors.NewTimeoutError(fmt.Sprintf("timed out waiting for build %s to start after %s", build.Name, h.r.Timeout), 0)
}
if latest.Status.Phase != buildapi.BuildPhaseRunning {
return nil, errors.NewBadRequest(fmt.Sprintf("cannot upload file to build %s with status %s", build.Name, latest.Status.Phase))
}
// The container should be the default build container, so setting it to blank
buildPodName := buildapi.GetBuildPodName(build)
opts := &kapi.PodAttachOptions{
Stdin: true,
}
location, transport, err := pod.AttachLocation(h.r.PodGetter, h.r.ConnectionInfo, h.ctx, buildPodName, opts)
if err != nil {
if errors.IsNotFound(err) {
return nil, errors.NewNotFound(kapi.Resource("pod"), buildPodName)
}
return nil, errors.NewBadRequest(err.Error())
}
rawTransport, ok := transport.(*http.Transport)
if !ok {
return nil, errors.NewInternalError(fmt.Errorf("unable to connect to node, unrecognized type: %v", reflect.TypeOf(transport)))
}
upgrader := spdy.NewRoundTripper(rawTransport.TLSClientConfig)
exec, err := remotecommand.NewStreamExecutor(upgrader, nil, "POST", location)
if err != nil {
return nil, errors.NewInternalError(fmt.Errorf("unable to connect to server: %v", err))
}
streamOptions := remotecommand.StreamOptions{
SupportedProtocols: kubeletremotecommand.SupportedStreamingProtocols,
Stdin: r,
}
if err := exec.Stream(streamOptions); err != nil {
return nil, errors.NewInternalError(err)
}
return latest, nil
//.........这里部分代码省略.........
示例15: testSTICreateBuildPod
func testSTICreateBuildPod(t *testing.T, rootAllowed bool) {
strategy := &SourceBuildStrategy{
Image: "sti-test-image",
Codec: kapi.Codecs.LegacyCodec(buildapi.SchemeGroupVersion),
AdmissionControl: &FakeAdmissionControl{admit: rootAllowed},
}
expected := mockSTIBuild()
actual, err := strategy.CreateBuildPod(expected)
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
if expected, actual := buildapi.GetBuildPodName(expected), actual.ObjectMeta.Name; expected != actual {
t.Errorf("Expected %s, but got %s!", expected, actual)
}
if !reflect.DeepEqual(map[string]string{buildapi.BuildLabel: buildapi.LabelValue(expected.Name)}, actual.Labels) {
t.Errorf("Pod Labels does not match Build Labels!")
}
container := actual.Spec.Containers[0]
if container.Name != "sti-build" {
t.Errorf("Expected sti-build, but got %s!", container.Name)
}
if container.Image != strategy.Image {
t.Errorf("Expected %s image, got %s!", container.Image, strategy.Image)
}
if container.ImagePullPolicy != kapi.PullIfNotPresent {
t.Errorf("Expected %v, got %v", kapi.PullIfNotPresent, container.ImagePullPolicy)
}
if actual.Spec.RestartPolicy != kapi.RestartPolicyNever {
t.Errorf("Expected never, got %#v", actual.Spec.RestartPolicy)
}
// strategy ENV is whitelisted into the container environment, and not all
// the values are allowed, so only expect 10 not 11 values.
expectedEnvCount := 10
if !rootAllowed {
expectedEnvCount = 12
}
if len(container.Env) != expectedEnvCount {
var keys []string
for _, env := range container.Env {
keys = append(keys, env.Name)
}
t.Fatalf("Expected 11 elements in Env table, got %d:\n%s", len(container.Env), strings.Join(keys, ", "))
}
if len(container.VolumeMounts) != 4 {
t.Fatalf("Expected 4 volumes in container, got %d", len(container.VolumeMounts))
}
for i, expected := range []string{dockerSocketPath, DockerPushSecretMountPath, DockerPullSecretMountPath, sourceSecretMountPath} {
if container.VolumeMounts[i].MountPath != expected {
t.Fatalf("Expected %s in VolumeMount[%d], got %s", expected, i, container.VolumeMounts[i].MountPath)
}
}
if len(actual.Spec.Volumes) != 4 {
t.Fatalf("Expected 4 volumes in Build pod, got %d", len(actual.Spec.Volumes))
}
if *actual.Spec.ActiveDeadlineSeconds != 60 {
t.Errorf("Expected ActiveDeadlineSeconds 60, got %d", *actual.Spec.ActiveDeadlineSeconds)
}
if !kapi.Semantic.DeepEqual(container.Resources, expected.Spec.Resources) {
t.Fatalf("Expected actual=expected, %v != %v", container.Resources, expected.Spec.Resources)
}
found := false
foundIllegal := false
foundAllowedUIDs := false
foundDropCaps := false
for _, v := range container.Env {
if v.Name == "BUILD_LOGLEVEL" && v.Value == "bar" {
found = true
}
if v.Name == "ILLEGAL" {
foundIllegal = true
}
if v.Name == buildapi.AllowedUIDs && v.Value == "1-" {
foundAllowedUIDs = true
}
if v.Name == buildapi.DropCapabilities && v.Value == "KILL,MKNOD,SETGID,SETUID,SYS_CHROOT" {
foundDropCaps = true
}
}
if !found {
t.Fatalf("Expected variable BUILD_LOGLEVEL be defined for the container")
}
if foundIllegal {
t.Fatalf("Found illegal environment variable 'ILLEGAL' defined on container")
}
if foundAllowedUIDs && rootAllowed {
t.Fatalf("Did not expect %s when root is allowed", buildapi.AllowedUIDs)
}
if !foundAllowedUIDs && !rootAllowed {
t.Fatalf("Expected %s when root is not allowed", buildapi.AllowedUIDs)
}
if foundDropCaps && rootAllowed {
t.Fatalf("Did not expect %s when root is allowed", buildapi.DropCapabilities)
}
if !foundDropCaps && !rootAllowed {
t.Fatalf("Expected %s when root is not allowed", buildapi.DropCapabilities)
}
buildJSON, _ := runtime.Encode(kapi.Codecs.LegacyCodec(buildapi.SchemeGroupVersion), expected)
errorCases := map[int][]string{
//.........这里部分代码省略.........