本文整理匯總了Golang中github.com/openshift/origin/pkg/client.Client.BuildConfigs方法的典型用法代碼示例。如果您正苦於以下問題:Golang Client.BuildConfigs方法的具體用法?Golang Client.BuildConfigs怎麽用?Golang Client.BuildConfigs使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類github.com/openshift/origin/pkg/client.Client
的用法示例。
在下文中一共展示了Client.BuildConfigs方法的4個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: runBuildConfigChangeControllerTest
func runBuildConfigChangeControllerTest(t *testing.T, clusterAdminClient *client.Client, clusterAdminKubeClient *kclient.Client) {
config := configChangeBuildConfig()
created, err := clusterAdminClient.BuildConfigs(testutil.Namespace()).Create(config)
if err != nil {
t.Fatalf("Couldn't create BuildConfig: %v", err)
}
watch, err := clusterAdminClient.Builds(testutil.Namespace()).Watch(labels.Everything(), fields.Everything(), created.ResourceVersion)
if err != nil {
t.Fatalf("Couldn't subscribe to Builds %v", err)
}
defer watch.Stop()
watch2, err := clusterAdminClient.BuildConfigs(testutil.Namespace()).Watch(labels.Everything(), fields.Everything(), created.ResourceVersion)
if err != nil {
t.Fatalf("Couldn't subscribe to BuildConfigs %v", err)
}
defer watch2.Stop()
// wait for initial build event
event := waitForWatch(t, "config change initial build added", watch)
if e, a := watchapi.Added, event.Type; e != a {
t.Fatalf("expected watch event type %s, got %s", e, a)
}
event = waitForWatch(t, "config change config updated", watch2)
if e, a := watchapi.Modified, event.Type; e != a {
t.Fatalf("expected watch event type %s, got %s", e, a)
}
if bc := event.Object.(*buildapi.BuildConfig); bc.Status.LastVersion == 0 {
t.Fatalf("expected build config lastversion to be greater than zero after build")
}
}
示例2: runTest
func runTest(t *testing.T, testname string, clusterAdminClient *client.Client, imageStream *imageapi.ImageStream, imageStreamMapping *imageapi.ImageStreamMapping, config *buildapi.BuildConfig, tag string) {
created, err := clusterAdminClient.BuildConfigs(testutil.Namespace()).Create(config)
if err != nil {
t.Fatalf("Couldn't create BuildConfig: %v", err)
}
watch, err := clusterAdminClient.Builds(testutil.Namespace()).Watch(labels.Everything(), fields.Everything(), created.ResourceVersion)
if err != nil {
t.Fatalf("Couldn't subscribe to Builds %v", err)
}
defer watch.Stop()
watch2, err := clusterAdminClient.BuildConfigs(testutil.Namespace()).Watch(labels.Everything(), fields.Everything(), created.ResourceVersion)
if err != nil {
t.Fatalf("Couldn't subscribe to BuildConfigs %v", err)
}
defer watch2.Stop()
imageStream, err = clusterAdminClient.ImageStreams(testutil.Namespace()).Create(imageStream)
if err != nil {
t.Fatalf("Couldn't create ImageStream: %v", err)
}
err = clusterAdminClient.ImageStreamMappings(testutil.Namespace()).Create(imageStreamMapping)
if err != nil {
t.Fatalf("Couldn't create Image: %v", err)
}
// wait for initial build event from the creation of the imagerepo with tag latest
event := <-watch.ResultChan()
if e, a := watchapi.Added, event.Type; e != a {
t.Fatalf("expected watch event type %s, got %s", e, a)
}
newBuild := event.Object.(*buildapi.Build)
switch newBuild.Spec.Strategy.Type {
case buildapi.SourceBuildStrategyType:
if newBuild.Spec.Strategy.SourceStrategy.From.Name != "registry:8080/openshift/test-image-trigger:"+tag {
i, _ := clusterAdminClient.ImageStreams(testutil.Namespace()).Get(imageStream.Name)
bc, _ := clusterAdminClient.BuildConfigs(testutil.Namespace()).Get(config.Name)
t.Fatalf("Expected build with base image %s, got %s\n, imagerepo is %v\ntrigger is %s\n", "registry:8080/openshift/test-image-trigger:"+tag, newBuild.Spec.Strategy.DockerStrategy.From.Name, i, bc.Spec.Triggers[0].ImageChange)
}
case buildapi.DockerBuildStrategyType:
if newBuild.Spec.Strategy.DockerStrategy.From.Name != "registry:8080/openshift/test-image-trigger:"+tag {
i, _ := clusterAdminClient.ImageStreams(testutil.Namespace()).Get(imageStream.Name)
bc, _ := clusterAdminClient.BuildConfigs(testutil.Namespace()).Get(config.Name)
t.Fatalf("Expected build with base image %s, got %s\n, imagerepo is %v\ntrigger is %s\n", "registry:8080/openshift/test-image-trigger:"+tag, newBuild.Spec.Strategy.DockerStrategy.From.Name, i, bc.Spec.Triggers[0].ImageChange)
}
case buildapi.CustomBuildStrategyType:
if newBuild.Spec.Strategy.CustomStrategy.From.Name != "registry:8080/openshift/test-image-trigger:"+tag {
i, _ := clusterAdminClient.ImageStreams(testutil.Namespace()).Get(imageStream.Name)
bc, _ := clusterAdminClient.BuildConfigs(testutil.Namespace()).Get(config.Name)
t.Fatalf("Expected build with base image %s, got %s\n, imagerepo is %v\ntrigger is %s\n", "registry:8080/openshift/test-image-trigger:"+tag, newBuild.Spec.Strategy.DockerStrategy.From.Name, i, bc.Spec.Triggers[0].ImageChange)
}
}
event = <-watch.ResultChan()
if e, a := watchapi.Modified, event.Type; e != a {
t.Fatalf("expected watch event type %s, got %s", e, a)
}
newBuild = event.Object.(*buildapi.Build)
// Make sure the resolution of the build's docker image pushspec didn't mutate the persisted API object
if newBuild.Spec.Output.To.Name != "test-image-trigger-repo:outputtag" {
t.Fatalf("unexpected build output: %#v %#v", newBuild.Spec.Output.To, newBuild.Spec.Output)
}
if newBuild.Labels["testlabel"] != "testvalue" {
t.Fatalf("Expected build with label %s=%s from build config got %s=%s", "testlabel", "testvalue", "testlabel", newBuild.Labels["testlabel"])
}
// wait for build config to be updated
<-watch2.ResultChan()
updatedConfig, err := clusterAdminClient.BuildConfigs(testutil.Namespace()).Get(config.Name)
if err != nil {
t.Fatalf("Couldn't get BuildConfig: %v", err)
}
// the first tag did not have an image id, so the last trigger field is the pull spec
if updatedConfig.Spec.Triggers[0].ImageChange.LastTriggeredImageID != "registry:8080/openshift/test-image-trigger:"+tag {
t.Errorf("Expected imageID equal to pull spec, got %#v", updatedConfig.Spec.Triggers[0].ImageChange)
}
// trigger a build by posting a new image
if err := clusterAdminClient.ImageStreamMappings(testutil.Namespace()).Create(&imageapi.ImageStreamMapping{
ObjectMeta: kapi.ObjectMeta{
Namespace: testutil.Namespace(),
Name: imageStream.Name,
},
Tag: tag,
Image: imageapi.Image{
ObjectMeta: kapi.ObjectMeta{
Name: "ref-2-random",
},
DockerImageReference: "registry:8080/openshift/test-image-trigger:ref-2-random",
},
}); err != nil {
t.Fatalf("unexpected error: %v", err)
}
event = <-watch.ResultChan()
if e, a := watchapi.Added, event.Type; e != a {
t.Fatalf("expected watch event type %s, got %s", e, a)
}
newBuild = event.Object.(*buildapi.Build)
//.........這裏部分代碼省略.........
示例3: ValidationApplicationItemName
func ValidationApplicationItemName(namespace string, items applicationapi.ItemList, oClient *oclient.Client, kClient *kclient.Client) (bool, string) {
for _, item := range items {
switch item.Kind {
case "ServiceBroker":
if _, err := oClient.ServiceBrokers().Get(item.Name); err != nil {
if kerrors.IsNotFound(err) {
return false, fmt.Sprintf("resource %s=%s no found.", item.Kind, item.Name)
}
}
case "BackingServiceInstance":
if _, err := oClient.BackingServiceInstances(namespace).Get(item.Name); err != nil {
if kerrors.IsNotFound(err) {
return false, fmt.Sprintf("resource %s=%s no found.", item.Kind, item.Name)
}
}
case "Build":
if _, err := oClient.Builds(namespace).Get(item.Name); err != nil {
if kerrors.IsNotFound(err) {
return false, fmt.Sprintf("resource %s=%s no found.", item.Kind, item.Name)
}
}
case "BuildConfig":
if _, err := oClient.BuildConfigs(namespace).Get(item.Name); err != nil {
if kerrors.IsNotFound(err) {
return false, fmt.Sprintf("resource %s=%s no found.", item.Kind, item.Name)
}
}
case "DeploymentConfig":
if _, err := oClient.DeploymentConfigs(namespace).Get(item.Name); err != nil {
if kerrors.IsNotFound(err) {
return false, fmt.Sprintf("resource %s=%s no found.", item.Kind, item.Name)
}
}
case "ImageStream":
if _, err := oClient.ImageStreams(namespace).Get(item.Name); err != nil {
if kerrors.IsNotFound(err) {
return false, fmt.Sprintf("resource %s=%s no found.", item.Kind, item.Name)
}
}
case "ReplicationController":
if _, err := kClient.ReplicationControllers(namespace).Get(item.Name); err != nil {
if kerrors.IsNotFound(err) {
return false, fmt.Sprintf("resource %s=%s no found.", item.Kind, item.Name)
}
}
case "Node":
if _, err := kClient.Nodes().Get(item.Name); err != nil {
if kerrors.IsNotFound(err) {
return false, fmt.Sprintf("resource %s=%s no found.", item.Kind, item.Name)
}
}
case "Pod":
if _, err := kClient.Pods(namespace).Get(item.Name); err != nil {
if kerrors.IsNotFound(err) {
return false, fmt.Sprintf("resource %s=%s no found.", item.Kind, item.Name)
}
}
case "Service":
if _, err := kClient.Services(namespace).Get(item.Name); err != nil {
if kerrors.IsNotFound(err) {
return false, fmt.Sprintf("resource %s=%s no found.", item.Kind, item.Name)
}
}
}
}
return true, ""
}
示例4: runImageChangeTriggerTest
func runImageChangeTriggerTest(t *testing.T, clusterAdminClient *client.Client, imageStream *imageapi.ImageStream, imageStreamMapping *imageapi.ImageStreamMapping, config *buildapi.BuildConfig, tag string) {
created, err := clusterAdminClient.BuildConfigs(testutil.Namespace()).Create(config)
if err != nil {
t.Fatalf("Couldn't create BuildConfig: %v", err)
}
watch, err := clusterAdminClient.Builds(testutil.Namespace()).Watch(kapi.ListOptions{ResourceVersion: created.ResourceVersion})
if err != nil {
t.Fatalf("Couldn't subscribe to Builds %v", err)
}
watch2, err := clusterAdminClient.BuildConfigs(testutil.Namespace()).Watch(kapi.ListOptions{ResourceVersion: created.ResourceVersion})
if err != nil {
t.Fatalf("Couldn't subscribe to BuildConfigs %v", err)
}
defer watch2.Stop()
imageStream, err = clusterAdminClient.ImageStreams(testutil.Namespace()).Create(imageStream)
if err != nil {
t.Fatalf("Couldn't create ImageStream: %v", err)
}
err = clusterAdminClient.ImageStreamMappings(testutil.Namespace()).Create(imageStreamMapping)
if err != nil {
t.Fatalf("Couldn't create Image: %v", err)
}
// wait for initial build event from the creation of the imagerepo with tag latest
event := waitForWatch(t, "initial build added", watch)
if e, a := watchapi.Added, event.Type; e != a {
t.Fatalf("expected watch event type %s, got %s", e, a)
}
newBuild := event.Object.(*buildapi.Build)
strategy := newBuild.Spec.Strategy
switch {
case strategy.SourceStrategy != nil:
if strategy.SourceStrategy.From.Name != "registry:8080/openshift/test-image-trigger:"+tag {
i, _ := clusterAdminClient.ImageStreams(testutil.Namespace()).Get(imageStream.Name)
bc, _ := clusterAdminClient.BuildConfigs(testutil.Namespace()).Get(config.Name)
t.Fatalf("Expected build with base image %s, got %s\n, imagerepo is %v\ntrigger is %s\n", "registry:8080/openshift/test-image-trigger:"+tag, strategy.SourceStrategy.From.Name, i, bc.Spec.Triggers[0].ImageChange)
}
case strategy.DockerStrategy != nil:
if strategy.DockerStrategy.From.Name != "registry:8080/openshift/test-image-trigger:"+tag {
i, _ := clusterAdminClient.ImageStreams(testutil.Namespace()).Get(imageStream.Name)
bc, _ := clusterAdminClient.BuildConfigs(testutil.Namespace()).Get(config.Name)
t.Fatalf("Expected build with base image %s, got %s\n, imagerepo is %v\ntrigger is %s\n", "registry:8080/openshift/test-image-trigger:"+tag, strategy.DockerStrategy.From.Name, i, bc.Spec.Triggers[0].ImageChange)
}
case strategy.CustomStrategy != nil:
if strategy.CustomStrategy.From.Name != "registry:8080/openshift/test-image-trigger:"+tag {
i, _ := clusterAdminClient.ImageStreams(testutil.Namespace()).Get(imageStream.Name)
bc, _ := clusterAdminClient.BuildConfigs(testutil.Namespace()).Get(config.Name)
t.Fatalf("Expected build with base image %s, got %s\n, imagerepo is %v\ntrigger is %s\n", "registry:8080/openshift/test-image-trigger:"+tag, strategy.CustomStrategy.From.Name, i, bc.Spec.Triggers[0].ImageChange)
}
}
// Wait for an update on the specific build that was added
watch3, err := clusterAdminClient.Builds(testutil.Namespace()).Watch(kapi.ListOptions{FieldSelector: fields.OneTermEqualSelector("metadata.name", newBuild.Name), ResourceVersion: newBuild.ResourceVersion})
defer watch3.Stop()
if err != nil {
t.Fatalf("Couldn't subscribe to Builds %v", err)
}
event = waitForWatch(t, "initial build update", watch3)
if e, a := watchapi.Modified, event.Type; e != a {
t.Fatalf("expected watch event type %s, got %s", e, a)
}
newBuild = event.Object.(*buildapi.Build)
// Make sure the resolution of the build's docker image pushspec didn't mutate the persisted API object
if newBuild.Spec.Output.To.Name != "test-image-trigger-repo:outputtag" {
t.Fatalf("unexpected build output: %#v %#v", newBuild.Spec.Output.To, newBuild.Spec.Output)
}
if newBuild.Labels["testlabel"] != "testvalue" {
t.Fatalf("Expected build with label %s=%s from build config got %s=%s", "testlabel", "testvalue", "testlabel", newBuild.Labels["testlabel"])
}
// wait for build config to be updated
WaitLoop:
for {
select {
case e := <-watch2.ResultChan():
event = &e
continue
case <-time.After(BuildControllersWatchTimeout):
break WaitLoop
}
}
updatedConfig := event.Object.(*buildapi.BuildConfig)
if err != nil {
t.Fatalf("Couldn't get BuildConfig: %v", err)
}
// the first tag did not have an image id, so the last trigger field is the pull spec
if updatedConfig.Spec.Triggers[0].ImageChange.LastTriggeredImageID != "registry:8080/openshift/test-image-trigger:"+tag {
t.Fatalf("Expected imageID equal to pull spec, got %#v", updatedConfig.Spec.Triggers[0].ImageChange)
}
// clear out the build/buildconfig watches before triggering a new build
WaitLoop2:
for {
select {
case <-watch.ResultChan():
continue
case <-watch2.ResultChan():
//.........這裏部分代碼省略.........