本文整理汇总了Golang中github.com/openshift/origin/test/util.WaitForPolicyUpdate函数的典型用法代码示例。如果您正苦于以下问题:Golang WaitForPolicyUpdate函数的具体用法?Golang WaitForPolicyUpdate怎么用?Golang WaitForPolicyUpdate使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了WaitForPolicyUpdate函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: removeBuildStrategyRoleResources
func removeBuildStrategyRoleResources(t *testing.T, clusterAdminClient, projectAdminClient, projectEditorClient *client.Client) {
// remove resources from role so that certain build strategies are forbidden
for _, role := range []string{bootstrappolicy.BuildStrategyCustomRoleName, bootstrappolicy.BuildStrategyDockerRoleName, bootstrappolicy.BuildStrategySourceRoleName, bootstrappolicy.BuildStrategyJenkinsPipelineRoleName} {
options := &policy.RoleModificationOptions{
RoleNamespace: "",
RoleName: role,
RoleBindingAccessor: policy.NewClusterRoleBindingAccessor(clusterAdminClient),
Groups: []string{"system:authenticated"},
}
if err := options.RemoveRole(); err != nil {
t.Fatalf("unexpected error: %v", err)
}
}
if err := testutil.WaitForPolicyUpdate(projectEditorClient, testutil.Namespace(), "create", buildapi.Resource(authorizationapi.DockerBuildResource), false); err != nil {
t.Fatal(err)
}
if err := testutil.WaitForPolicyUpdate(projectEditorClient, testutil.Namespace(), "create", buildapi.Resource(authorizationapi.SourceBuildResource), false); err != nil {
t.Fatal(err)
}
if err := testutil.WaitForPolicyUpdate(projectEditorClient, testutil.Namespace(), "create", buildapi.Resource(authorizationapi.CustomBuildResource), false); err != nil {
t.Fatal(err)
}
if err := testutil.WaitForPolicyUpdate(projectEditorClient, testutil.Namespace(), "create", buildapi.Resource(authorizationapi.JenkinsPipelineBuildResource), false); err != nil {
t.Fatal(err)
}
}
示例2: removeBuildStrategyRoleResources
func removeBuildStrategyRoleResources(t *testing.T, clusterAdminClient, projectAdminClient, projectEditorClient *client.Client) {
// remove resources from role so that certain build strategies are forbidden
removeBuildStrategyPrivileges(t, clusterAdminClient.ClusterRoles(), bootstrappolicy.EditRoleName)
if err := testutil.WaitForPolicyUpdate(projectEditorClient, testutil.Namespace(), "create", authorizationapi.DockerBuildResource, false); err != nil {
t.Error(err)
}
removeBuildStrategyPrivileges(t, clusterAdminClient.ClusterRoles(), bootstrappolicy.AdminRoleName)
if err := testutil.WaitForPolicyUpdate(projectAdminClient, testutil.Namespace(), "create", authorizationapi.DockerBuildResource, false); err != nil {
t.Error(err)
}
}
示例3: TestSimpleImageChangeBuildTriggerFromImageStreamTagCustomWithConfigChange
func TestSimpleImageChangeBuildTriggerFromImageStreamTagCustomWithConfigChange(t *testing.T) {
defer testutil.DumpEtcdOnFailure(t)
projectAdminClient, _ := setup(t)
clusterAdminClient, err := testutil.GetClusterAdminClient(testutil.GetBaseDir() + "/openshift.local.config/master/admin.kubeconfig")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
clusterRoleBindingAccessor := policy.NewClusterRoleBindingAccessor(clusterAdminClient)
subjects := []kapi.ObjectReference{
{
Kind: authorizationapi.SystemGroupKind,
Name: bootstrappolicy.AuthenticatedGroup,
},
}
options := policy.RoleModificationOptions{
RoleNamespace: testutil.Namespace(),
RoleName: bootstrappolicy.BuildStrategyCustomRoleName,
RoleBindingAccessor: clusterRoleBindingAccessor,
Subjects: subjects,
}
options.AddRole()
if err := testutil.WaitForPolicyUpdate(projectAdminClient, testutil.Namespace(), "create", buildapi.Resource(authorizationapi.CustomBuildResource), true); err != nil {
t.Fatal(err)
}
imageStream := mockImageStream2(tag)
imageStreamMapping := mockImageStreamMapping(imageStream.Name, "someimage", tag, "registry:8080/openshift/test-image-trigger:"+tag)
strategy := customStrategy("ImageStreamTag", streamName+":"+tag)
config := imageChangeBuildConfigWithConfigChange("custom-imagestreamtag", strategy)
runTest(t, "SimpleImageChangeBuildTriggerFromImageStreamTagCustom", projectAdminClient, imageStream, imageStreamMapping, config, tag)
}
示例4: setupBuildStrategyTest
func setupBuildStrategyTest(t *testing.T) (clusterAdminClient, projectAdminClient, projectEditorClient *client.Client) {
namespace := testutil.Namespace()
_, clusterAdminKubeConfig, err := testserver.StartTestMaster()
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
clusterAdminClient, err = testutil.GetClusterAdminClient(clusterAdminKubeConfig)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
clusterAdminClientConfig, err := testutil.GetClusterAdminClientConfig(clusterAdminKubeConfig)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
projectAdminClient, err = testserver.CreateNewProject(clusterAdminClient, *clusterAdminClientConfig, namespace, "harold")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
projectEditorClient, _, _, err = testutil.GetClientForUser(*clusterAdminClientConfig, "joe")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
addJoe := &policy.RoleModificationOptions{
RoleNamespace: "",
RoleName: bootstrappolicy.EditRoleName,
RoleBindingAccessor: policy.NewLocalRoleBindingAccessor(namespace, projectAdminClient),
Users: []string{"joe"},
}
if err := addJoe.AddRole(); err != nil {
t.Fatalf("unexpected error: %v", err)
}
if err := testutil.WaitForPolicyUpdate(projectEditorClient, namespace, "create", authorizationapi.DockerBuildResource, true); err != nil {
t.Fatalf(err.Error())
}
// Create builder image stream and tag
imageStream := &imageapi.ImageStream{}
imageStream.Name = "builderimage"
_, err = clusterAdminClient.ImageStreams(testutil.Namespace()).Create(imageStream)
if err != nil {
t.Fatalf("Couldn't create ImageStream: %v", err)
}
// Create image stream mapping
imageStreamMapping := &imageapi.ImageStreamMapping{}
imageStreamMapping.Name = "builderimage"
imageStreamMapping.Tag = "latest"
imageStreamMapping.Image.Name = "image-id"
imageStreamMapping.Image.DockerImageReference = "test/builderimage:latest"
err = clusterAdminClient.ImageStreamMappings(testutil.Namespace()).Create(imageStreamMapping)
if err != nil {
t.Fatalf("Couldn't create ImageStreamMapping: %v", err)
}
return
}
示例5: grantRestrictedBuildStrategyRoleResources
func grantRestrictedBuildStrategyRoleResources(t *testing.T, clusterAdminClient, projectAdminClient, projectEditorClient *client.Client) {
// grant resources to role so that restricted build strategies are available
for _, role := range []string{bootstrappolicy.BuildStrategyCustomRoleName} {
options := &policy.RoleModificationOptions{
RoleNamespace: "",
RoleName: role,
RoleBindingAccessor: policy.NewClusterRoleBindingAccessor(clusterAdminClient),
Groups: []string{"system:authenticated"},
}
if err := options.AddRole(); err != nil {
t.Fatalf("unexpected error: %v", err)
}
}
if err := testutil.WaitForPolicyUpdate(projectEditorClient, testutil.Namespace(), "create", buildapi.Resource(authorizationapi.CustomBuildResource), true); err != nil {
t.Fatal(err)
}
}
示例6: setupBuildStrategyTest
func setupBuildStrategyTest(t *testing.T, includeControllers bool) (clusterAdminClient, projectAdminClient, projectEditorClient *client.Client) {
testutil.RequireEtcd(t)
namespace := testutil.Namespace()
var clusterAdminKubeConfig string
var err error
if includeControllers {
_, clusterAdminKubeConfig, err = testserver.StartTestMaster()
} else {
_, clusterAdminKubeConfig, err = testserver.StartTestMasterAPI()
}
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
clusterAdminClient, err = testutil.GetClusterAdminClient(clusterAdminKubeConfig)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
clusterAdminClientConfig, err := testutil.GetClusterAdminClientConfig(clusterAdminKubeConfig)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
projectAdminClient, err = testserver.CreateNewProject(clusterAdminClient, *clusterAdminClientConfig, namespace, "harold")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
projectEditorClient, _, _, err = testutil.GetClientForUser(*clusterAdminClientConfig, "joe")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
addJoe := &policy.RoleModificationOptions{
RoleNamespace: "",
RoleName: bootstrappolicy.EditRoleName,
RoleBindingAccessor: policy.NewLocalRoleBindingAccessor(namespace, projectAdminClient),
Users: []string{"joe"},
}
if err := addJoe.AddRole(); err != nil {
t.Fatalf("unexpected error: %v", err)
}
if err := testutil.WaitForPolicyUpdate(projectEditorClient, namespace, "create", buildapi.Resource(authorizationapi.DockerBuildResource), true); err != nil {
t.Fatalf(err.Error())
}
// we need a template that doesn't create service accounts or rolebindings so editors can create
// pipeline buildconfig's successfully, so we're not using the standard jenkins template.
// but we do need a template that creates a service named jenkins.
template, err := testutil.GetTemplateFixture("../../examples/jenkins/master-slave/jenkins-master-template.json")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
// pipeline defaults expect to find a template named jenkins-ephemeral
// in the openshift namespace.
template.Name = "jenkins-ephemeral"
template.Namespace = "openshift"
_, err = clusterAdminClient.Templates("openshift").Create(template)
if err != nil {
t.Fatalf("Couldn't create jenkins template: %v", err)
}
return
}
示例7: TestBasicGroupManipulation
func TestBasicGroupManipulation(t *testing.T) {
_, clusterAdminKubeConfig, err := testutil.StartTestMaster()
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
clusterAdminClientConfig, err := testutil.GetClusterAdminClientConfig(clusterAdminKubeConfig)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
clusterAdminClient, err := testutil.GetClusterAdminClient(clusterAdminKubeConfig)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
valerieOpenshiftClient, _, _, err := testutil.GetClientForUser(*clusterAdminClientConfig, "valerie")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
theGroup := &userapi.Group{}
theGroup.Name = "thegroup"
theGroup.Users = append(theGroup.Users, "valerie", "victor")
_, err = clusterAdminClient.Groups().Create(theGroup)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
_, err = valerieOpenshiftClient.Projects().Get("empty")
if err == nil {
t.Fatalf("expected error")
}
emptyProject := &projectapi.Project{}
emptyProject.Name = "empty"
_, err = clusterAdminClient.Projects().Create(emptyProject)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
roleBinding := &authorizationapi.RoleBinding{}
roleBinding.Name = "admins"
roleBinding.RoleRef.Name = "admin"
roleBinding.Groups = util.NewStringSet(theGroup.Name)
_, err = clusterAdminClient.RoleBindings("empty").Create(roleBinding)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if err := testutil.WaitForPolicyUpdate(valerieOpenshiftClient, "empty", "get", "pods", true); err != nil {
t.Error(err)
}
// make sure that user groups are respected for policy
_, err = valerieOpenshiftClient.Projects().Get("empty")
if err != nil {
t.Errorf("unexpected error: %v", err)
}
victorOpenshiftClient, _, _, err := testutil.GetClientForUser(*clusterAdminClientConfig, "victor")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
_, err = victorOpenshiftClient.Projects().Get("empty")
if err != nil {
t.Errorf("unexpected error: %v", err)
}
}
示例8: TestExtensionsAPIDeletion
func TestExtensionsAPIDeletion(t *testing.T) {
const projName = "ext-deletion-proj"
testutil.RequireEtcd(t)
defer testutil.DumpEtcdOnFailure(t)
_, clusterAdminKubeConfig, err := testserver.StartTestMaster()
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
clusterAdminClient, err := testutil.GetClusterAdminClient(clusterAdminKubeConfig)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
clusterAdminKubeClient, err := testutil.GetClusterAdminKubeClient(clusterAdminKubeConfig)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
clusterAdminClientConfig, err := testutil.GetClusterAdminClientConfig(clusterAdminKubeConfig)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
// create the containing project
if _, err := testserver.CreateNewProject(clusterAdminClient, *clusterAdminClientConfig, projName, "admin"); err != nil {
t.Fatalf("unexpected error creating the project: %v", err)
}
projectAdminClient, projectAdminKubeClient, _, err := testutil.GetClientForUser(*clusterAdminClientConfig, "admin")
if err != nil {
t.Fatalf("unexpected error getting project admin client: %v", err)
}
if err := testutil.WaitForPolicyUpdate(projectAdminClient, projName, "get", expapi.Resource("horizontalpodautoscalers"), true); err != nil {
t.Fatalf("unexpected error waiting for policy update: %v", err)
}
// create the extensions resources as the project admin
percent := int32(10)
hpa := autoscaling.HorizontalPodAutoscaler{
ObjectMeta: kapi.ObjectMeta{Name: "test-hpa"},
Spec: autoscaling.HorizontalPodAutoscalerSpec{
ScaleTargetRef: autoscaling.CrossVersionObjectReference{Kind: "DeploymentConfig", Name: "frontend", APIVersion: "v1"},
MaxReplicas: 10,
TargetCPUUtilizationPercentage: &percent,
},
}
if _, err := projectAdminKubeClient.Autoscaling().HorizontalPodAutoscalers(projName).Create(&hpa); err != nil {
t.Fatalf("unexpected error creating the HPA object: %v", err)
}
job := batch.Job{
ObjectMeta: kapi.ObjectMeta{Name: "test-job"},
Spec: batch.JobSpec{
Template: kapi.PodTemplateSpec{
ObjectMeta: kapi.ObjectMeta{Labels: map[string]string{"foo": "bar"}},
Spec: kapi.PodSpec{
Containers: []kapi.Container{{Name: "baz", Image: "run"}},
RestartPolicy: kapi.RestartPolicyOnFailure,
},
},
},
}
if _, err := projectAdminKubeClient.Extensions().Jobs(projName).Create(&job); err != nil {
t.Fatalf("unexpected error creating the job object: %v", err)
}
if err := clusterAdminClient.Projects().Delete(projName); err != nil {
t.Fatalf("unexpected error deleting the project: %v", err)
}
err = wait.PollImmediate(1*time.Second, 30*time.Second, func() (bool, error) {
_, err := clusterAdminKubeClient.Namespaces().Get(projName)
if errors.IsNotFound(err) {
return true, nil
}
return false, err
})
if err != nil {
t.Fatalf("unexpected error while waiting for project to delete: %v", err)
}
if _, err := clusterAdminKubeClient.Autoscaling().HorizontalPodAutoscalers(projName).Get(hpa.Name); err == nil {
t.Fatalf("HPA object was still present after project was deleted!")
} else if !errors.IsNotFound(err) {
t.Fatalf("Error trying to get deleted HPA object (not a not-found error): %v", err)
}
if _, err := clusterAdminKubeClient.Extensions().Jobs(projName).Get(job.Name); err == nil {
t.Fatalf("Job object was still present after project was deleted!")
} else if !errors.IsNotFound(err) {
t.Fatalf("Error trying to get deleted Job object (not a not-found error): %v", err)
}
}
示例9: TestBasicUserBasedGroupManipulation
func TestBasicUserBasedGroupManipulation(t *testing.T) {
testutil.RequireEtcd(t)
defer testutil.DumpEtcdOnFailure(t)
_, clusterAdminKubeConfig, err := testserver.StartTestMasterAPI()
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
clusterAdminClientConfig, err := testutil.GetClusterAdminClientConfig(clusterAdminKubeConfig)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
clusterAdminClient, err := testutil.GetClusterAdminClient(clusterAdminKubeConfig)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
valerieOpenshiftClient, _, _, err := testutil.GetClientForUser(*clusterAdminClientConfig, "valerie")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
// make sure we don't get back system groups
firstValerie, err := clusterAdminClient.Users().Get("valerie")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if len(firstValerie.Groups) != 0 {
t.Errorf("unexpected groups: %v", firstValerie.Groups)
}
// make sure that user/~ returns groups for unbacked users
expectedClusterAdminGroups := []string{"system:cluster-admins"}
clusterAdminUser, err := clusterAdminClient.Users().Get("~")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if !reflect.DeepEqual(clusterAdminUser.Groups, expectedClusterAdminGroups) {
t.Errorf("expected %v, got %v", clusterAdminUser.Groups, expectedClusterAdminGroups)
}
valerieGroups := []string{"theGroup"}
firstValerie.Groups = append(firstValerie.Groups, valerieGroups...)
_, err = clusterAdminClient.Users().Update(firstValerie)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
// make sure that user/~ doesn't get back system groups when it merges
secondValerie, err := valerieOpenshiftClient.Users().Get("~")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if !reflect.DeepEqual(secondValerie.Groups, valerieGroups) {
t.Errorf("expected %v, got %v", secondValerie.Groups, valerieGroups)
}
_, err = valerieOpenshiftClient.Projects().Get("empty")
if err == nil {
t.Fatalf("expected error")
}
emptyProject := &projectapi.Project{}
emptyProject.Name = "empty"
_, err = clusterAdminClient.Projects().Create(emptyProject)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
roleBinding := &authorizationapi.RoleBinding{}
roleBinding.Name = "admins"
roleBinding.RoleRef.Name = "admin"
roleBinding.Subjects = authorizationapi.BuildSubjects([]string{}, valerieGroups, uservalidation.ValidateUserName, uservalidation.ValidateGroupName)
_, err = clusterAdminClient.RoleBindings("empty").Create(roleBinding)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if err := testutil.WaitForPolicyUpdate(valerieOpenshiftClient, "empty", "get", kapi.Resource("pods"), true); err != nil {
t.Error(err)
}
// make sure that user groups are respected for policy
_, err = valerieOpenshiftClient.Projects().Get("empty")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
}
示例10: runStorageTest
func runStorageTest(t *testing.T, ns string, autoscalingVersion, batchVersion, extensionsVersion unversioned.GroupVersion) {
etcdServer := testutil.RequireEtcd(t)
masterConfig, err := testserver.DefaultMasterOptions()
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
keys := etcd.NewKeysAPI(etcdServer.Client)
getGVKFromEtcd := func(prefix, name string) (*unversioned.GroupVersionKind, error) {
key := path.Join(masterConfig.EtcdStorageConfig.KubernetesStoragePrefix, prefix, ns, name)
resp, err := keys.Get(context.TODO(), key, nil)
if err != nil {
return nil, err
}
_, gvk, err := runtime.UnstructuredJSONScheme.Decode([]byte(resp.Node.Value), nil, nil)
return gvk, err
}
// TODO: Set storage versions for API groups
// masterConfig.EtcdStorageConfig.StorageVersions[autoscaling.GroupName] = autoscalingVersion.String()
// masterConfig.EtcdStorageConfig.StorageVersions[batch.GroupName] = batchVersion.String()
// masterConfig.EtcdStorageConfig.StorageVersions[extensions.GroupName] = extensionsVersion.String()
clusterAdminKubeConfig, err := testserver.StartConfiguredMaster(masterConfig)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
clusterAdminClient, err := testutil.GetClusterAdminClient(clusterAdminKubeConfig)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
clusterAdminClientConfig, err := testutil.GetClusterAdminClientConfig(clusterAdminKubeConfig)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
// create the containing project
if _, err := testserver.CreateNewProject(clusterAdminClient, *clusterAdminClientConfig, ns, "admin"); err != nil {
t.Fatalf("unexpected error creating the project: %v", err)
}
projectAdminClient, projectAdminKubeClient, _, err := testutil.GetClientForUser(*clusterAdminClientConfig, "admin")
if err != nil {
t.Fatalf("unexpected error getting project admin client: %v", err)
}
if err := testutil.WaitForPolicyUpdate(projectAdminClient, ns, "get", extensions.Resource("horizontalpodautoscalers"), true); err != nil {
t.Fatalf("unexpected error waiting for policy update: %v", err)
}
jobTestcases := map[string]struct {
creator kclient.JobInterface
}{
"batch": {creator: projectAdminKubeClient.Batch().Jobs(ns)},
}
for name, testcase := range jobTestcases {
job := batch.Job{
ObjectMeta: kapi.ObjectMeta{Name: name + "-job"},
Spec: batch.JobSpec{
Template: kapi.PodTemplateSpec{
Spec: kapi.PodSpec{
RestartPolicy: kapi.RestartPolicyNever,
Containers: []kapi.Container{{Name: "containername", Image: "containerimage"}},
},
},
},
}
// Create a Job
if _, err := testcase.creator.Create(&job); err != nil {
t.Fatalf("%s: unexpected error creating Job: %v", name, err)
}
// Ensure it is persisted correctly
if gvk, err := getGVKFromEtcd("jobs", job.Name); err != nil {
t.Fatalf("%s: unexpected error reading Job: %v", name, err)
} else if *gvk != batchVersion.WithKind("Job") {
t.Fatalf("%s: expected api version %s in etcd, got %s reading Job", name, batchVersion, gvk)
}
// Ensure it is accessible from both APIs
if _, err := projectAdminKubeClient.Batch().Jobs(ns).Get(job.Name); err != nil {
t.Errorf("%s: Error reading Job from the batch client: %#v", name, err)
}
if _, err := projectAdminKubeClient.Extensions().Jobs(ns).Get(job.Name); err != nil {
t.Errorf("%s: Error reading Job from the extensions client: %#v", name, err)
}
}
hpaTestcases := map[string]struct {
creator kclient.HorizontalPodAutoscalerInterface
}{
"autoscaling": {creator: projectAdminKubeClient.Autoscaling().HorizontalPodAutoscalers(ns)},
"extensions": {creator: projectAdminKubeClient.Extensions().HorizontalPodAutoscalers(ns)},
}
for name, testcase := range hpaTestcases {
hpa := extensions.HorizontalPodAutoscaler{
ObjectMeta: kapi.ObjectMeta{Name: name + "-hpa"},
Spec: extensions.HorizontalPodAutoscalerSpec{
MaxReplicas: 1,
ScaleRef: extensions.SubresourceReference{Kind: "ReplicationController", Name: "myrc", Subresource: "scale"},
//.........这里部分代码省略.........
示例11: TestPolicyBasedRestrictionOfBuildStrategies
func TestPolicyBasedRestrictionOfBuildStrategies(t *testing.T) {
const namespace = "hammer"
_, clusterAdminKubeConfig, err := testutil.StartTestMaster()
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
clusterAdminClient, err := testutil.GetClusterAdminClient(clusterAdminKubeConfig)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
clusterAdminClientConfig, err := testutil.GetClusterAdminClientConfig(clusterAdminKubeConfig)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
haroldClient, err := testutil.CreateNewProject(clusterAdminClient, *clusterAdminClientConfig, namespace, "harold")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
joeClient, err := testutil.GetClientForUser(*clusterAdminClientConfig, "joe")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
addJoe := &policy.RoleModificationOptions{
RoleNamespace: "",
RoleName: bootstrappolicy.EditRoleName,
RoleBindingAccessor: policy.NewLocalRoleBindingAccessor(namespace, haroldClient),
Users: []string{"joe"},
}
if err := addJoe.AddRole(); err != nil {
t.Errorf("unexpected error: %v", err)
}
if err := testutil.WaitForPolicyUpdate(joeClient, namespace, "create", authorizationapi.DockerBuildResource, true); err != nil {
t.Error(err)
}
// by default admins and editors can create all type of builds
_, err = createDockerBuild(t, haroldClient.Builds(namespace))
if err != nil {
t.Errorf("unexpected error: %v", err)
}
_, err = createDockerBuild(t, joeClient.Builds(namespace))
if err != nil {
t.Errorf("unexpected error: %v", err)
}
_, err = createSourceBuild(t, haroldClient.Builds(namespace))
if err != nil {
t.Errorf("unexpected error: %v", err)
}
_, err = createSourceBuild(t, joeClient.Builds(namespace))
if err != nil {
t.Errorf("unexpected error: %v", err)
}
_, err = createCustomBuild(t, haroldClient.Builds(namespace))
if err != nil {
t.Errorf("unexpected error: %v", err)
}
_, err = createCustomBuild(t, joeClient.Builds(namespace))
if err != nil {
t.Errorf("unexpected error: %v", err)
}
// remove resources from role so that certain build strategies are forbidden
removeBuildStrategyPrivileges(t, clusterAdminClient.ClusterRoles(), bootstrappolicy.EditRoleName)
if err := testutil.WaitForPolicyUpdate(joeClient, namespace, "create", authorizationapi.DockerBuildResource, false); err != nil {
t.Error(err)
}
removeBuildStrategyPrivileges(t, clusterAdminClient.ClusterRoles(), bootstrappolicy.AdminRoleName)
if err := testutil.WaitForPolicyUpdate(haroldClient, namespace, "create", authorizationapi.DockerBuildResource, false); err != nil {
t.Error(err)
}
// make sure builds are rejected
if _, err = createDockerBuild(t, haroldClient.Builds(namespace)); !kapierror.IsForbidden(err) {
t.Errorf("expected forbidden, got %v", err)
}
if _, err = createDockerBuild(t, joeClient.Builds(namespace)); !kapierror.IsForbidden(err) {
t.Errorf("expected forbidden, got %v", err)
}
if _, err = createSourceBuild(t, haroldClient.Builds(namespace)); !kapierror.IsForbidden(err) {
t.Errorf("expected forbidden, got %v", err)
}
if _, err = createSourceBuild(t, joeClient.Builds(namespace)); !kapierror.IsForbidden(err) {
t.Errorf("expected forbidden, got %v", err)
}
if _, err = createCustomBuild(t, haroldClient.Builds(namespace)); !kapierror.IsForbidden(err) {
t.Errorf("expected forbidden, got %v", err)
}
if _, err = createCustomBuild(t, joeClient.Builds(namespace)); !kapierror.IsForbidden(err) {
t.Errorf("expected forbidden, got %v", err)
}
}
示例12: setupBuildStrategyTest
func setupBuildStrategyTest(t *testing.T, includeControllers bool) (clusterAdminClient, projectAdminClient, projectEditorClient *client.Client) {
testutil.RequireEtcd(t)
namespace := testutil.Namespace()
var clusterAdminKubeConfig string
var err error
if includeControllers {
_, clusterAdminKubeConfig, err = testserver.StartTestMaster()
} else {
_, clusterAdminKubeConfig, err = testserver.StartTestMasterAPI()
}
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
clusterAdminClient, err = testutil.GetClusterAdminClient(clusterAdminKubeConfig)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
clusterAdminClientConfig, err := testutil.GetClusterAdminClientConfig(clusterAdminKubeConfig)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
projectAdminClient, err = testserver.CreateNewProject(clusterAdminClient, *clusterAdminClientConfig, namespace, "harold")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
projectEditorClient, _, _, err = testutil.GetClientForUser(*clusterAdminClientConfig, "joe")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
addJoe := &policy.RoleModificationOptions{
RoleNamespace: "",
RoleName: bootstrappolicy.EditRoleName,
RoleBindingAccessor: policy.NewLocalRoleBindingAccessor(namespace, projectAdminClient),
Users: []string{"joe"},
}
if err := addJoe.AddRole(); err != nil {
t.Fatalf("unexpected error: %v", err)
}
if err := testutil.WaitForPolicyUpdate(projectEditorClient, namespace, "create", buildapi.Resource(authorizationapi.DockerBuildResource), true); err != nil {
t.Fatalf(err.Error())
}
// Create builder image stream and tag
imageStream := &imageapi.ImageStream{}
imageStream.Name = "builderimage"
_, err = clusterAdminClient.ImageStreams(testutil.Namespace()).Create(imageStream)
if err != nil {
t.Fatalf("Couldn't create ImageStream: %v", err)
}
// Create image stream mapping
imageStreamMapping := &imageapi.ImageStreamMapping{}
imageStreamMapping.Name = "builderimage"
imageStreamMapping.Tag = "latest"
imageStreamMapping.Image.Name = "image-id"
imageStreamMapping.Image.DockerImageReference = "test/builderimage:latest"
err = clusterAdminClient.ImageStreamMappings(testutil.Namespace()).Create(imageStreamMapping)
if err != nil {
t.Fatalf("Couldn't create ImageStreamMapping: %v", err)
}
template, err := testutil.GetTemplateFixture("../../examples/jenkins/jenkins-ephemeral-template.json")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
template.Name = "jenkins"
template.Namespace = "openshift"
_, err = clusterAdminClient.Templates("openshift").Create(template)
if err != nil {
t.Fatalf("Couldn't create jenkins template: %v", err)
}
return
}
示例13: TestExtensionsAPIDisabled
func TestExtensionsAPIDisabled(t *testing.T) {
const projName = "ext-disabled-proj"
masterConfig, err := testserver.DefaultMasterOptions()
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
// Disable all extensions API versions
masterConfig.KubernetesMasterConfig.DisabledAPIGroupVersions = map[string][]string{"extensions": {"*"}}
clusterAdminKubeConfig, err := testserver.StartConfiguredMaster(masterConfig)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
clusterAdminClient, err := testutil.GetClusterAdminClient(clusterAdminKubeConfig)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
clusterAdminKubeClient, err := testutil.GetClusterAdminKubeClient(clusterAdminKubeConfig)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
clusterAdminClientConfig, err := testutil.GetClusterAdminClientConfig(clusterAdminKubeConfig)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
// create the containing project
if _, err := testserver.CreateNewProject(clusterAdminClient, *clusterAdminClientConfig, projName, "admin"); err != nil {
t.Fatalf("unexpected error creating the project: %v", err)
}
projectAdminClient, projectAdminKubeClient, _, err := testutil.GetClientForUser(*clusterAdminClientConfig, "admin")
if err != nil {
t.Fatalf("unexpected error getting project admin client: %v", err)
}
// TODO: switch to checking "list","horizontalpodautoscalers" once SAR supports API groups
if err := testutil.WaitForPolicyUpdate(projectAdminClient, projName, "get", "pods", true); err != nil {
t.Fatalf("unexpected error waiting for policy update: %v", err)
}
// make sure extensions API objects cannot be listed or created
if _, err := projectAdminKubeClient.Extensions().HorizontalPodAutoscalers(projName).List(kapi.ListOptions{}); !errors.IsNotFound(err) {
t.Fatalf("expected NotFound error listing HPA, got %v", err)
}
if _, err := projectAdminKubeClient.Extensions().HorizontalPodAutoscalers(projName).Create(&expapi.HorizontalPodAutoscaler{}); !errors.IsNotFound(err) {
t.Fatalf("expected NotFound error creating HPA, got %v", err)
}
if _, err := projectAdminKubeClient.Extensions().Jobs(projName).List(kapi.ListOptions{}); !errors.IsNotFound(err) {
t.Fatalf("expected NotFound error listing jobs, got %v", err)
}
if _, err := projectAdminKubeClient.Extensions().Jobs(projName).Create(&expapi.Job{}); !errors.IsNotFound(err) {
t.Fatalf("expected NotFound error creating job, got %v", err)
}
if err := clusterAdminClient.Projects().Delete(projName); err != nil {
t.Fatalf("unexpected error deleting the project: %v", err)
}
err = wait.PollImmediate(1*time.Second, 30*time.Second, func() (bool, error) {
_, err := clusterAdminKubeClient.Namespaces().Get(projName)
if errors.IsNotFound(err) {
return true, nil
}
return false, err
})
if err != nil {
t.Fatalf("unexpected error while waiting for project to delete: %v", err)
}
}
示例14: TestBasicGroupManipulation
func TestBasicGroupManipulation(t *testing.T) {
_, clusterAdminKubeConfig, err := testutil.StartTestMaster()
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
clusterAdminClientConfig, err := testutil.GetClusterAdminClientConfig(clusterAdminKubeConfig)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
clusterAdminClient, err := testutil.GetClusterAdminClient(clusterAdminKubeConfig)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
valerieClientConfig := *clusterAdminClientConfig
valerieClientConfig.Username = ""
valerieClientConfig.Password = ""
valerieClientConfig.BearerToken = ""
valerieClientConfig.CertFile = ""
valerieClientConfig.KeyFile = ""
valerieClientConfig.CertData = nil
valerieClientConfig.KeyData = nil
accessToken, err := tokencmd.RequestToken(&valerieClientConfig, nil, "valerie", "security!")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
valerieClientConfig.BearerToken = accessToken
valerieOpenshiftClient, err := client.New(&valerieClientConfig)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
// make sure we don't get back system groups
firstValerie, err := clusterAdminClient.Users().Get("valerie")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if len(firstValerie.Groups) != 0 {
t.Errorf("unexpected groups: %v", firstValerie.Groups)
}
// make sure that user/~ returns groups for unbacked users
expectedClusterAdminGroups := []string{"system:cluster-admins"}
clusterAdminUser, err := clusterAdminClient.Users().Get("~")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if !reflect.DeepEqual(clusterAdminUser.Groups, expectedClusterAdminGroups) {
t.Errorf("expected %v, got %v", clusterAdminUser.Groups, expectedClusterAdminGroups)
}
valerieGroups := []string{"thegroup"}
firstValerie.Groups = append(firstValerie.Groups, valerieGroups...)
_, err = clusterAdminClient.Users().Update(firstValerie)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
// make sure that user/~ doesn't get back system groups when it merges
secondValerie, err := valerieOpenshiftClient.Users().Get("~")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if !reflect.DeepEqual(secondValerie.Groups, valerieGroups) {
t.Errorf("expected %v, got %v", secondValerie.Groups, valerieGroups)
}
_, err = valerieOpenshiftClient.Projects().Get("empty")
if err == nil {
t.Fatalf("expected error")
}
emptyProject := &projectapi.Project{}
emptyProject.Name = "empty"
_, err = clusterAdminClient.Projects().Create(emptyProject)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
roleBinding := &authorizationapi.RoleBinding{}
roleBinding.Name = "admins"
roleBinding.RoleRef.Name = "admin"
roleBinding.Groups = util.NewStringSet(valerieGroups...)
_, err = clusterAdminClient.RoleBindings("empty").Create(roleBinding)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if err := testutil.WaitForPolicyUpdate(valerieOpenshiftClient, "empty", "get", "pods", true); err != nil {
t.Error(err)
}
// make sure that user groups are respected for policy
_, err = valerieOpenshiftClient.Projects().Get("empty")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
//.........这里部分代码省略.........
示例15: replicationTestFactory
func replicationTestFactory(oc *exutil.CLI, tc testCase) func() {
return func() {
oc.SetOutputDir(exutil.TestContext.OutputDir)
defer cleanup(oc)
_, err := exutil.SetupHostPathVolumes(oc.AdminKubeClient().Core().PersistentVolumes(), oc.Namespace(), "1Gi", 2)
o.Expect(err).NotTo(o.HaveOccurred())
err = testutil.WaitForPolicyUpdate(oc.Client(), oc.Namespace(), "create", templateapi.Resource("templates"), true)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.CheckOpenShiftNamespaceImageStreams(oc)
err = oc.Run("new-app").Args("-f", tc.TemplatePath).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.Run("new-app").Args("-f", helperTemplate, "-p", fmt.Sprintf("DATABASE_SERVICE_NAME=%s", helperName)).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
// oc.KubeFramework().WaitForAnEndpoint currently will wait forever; for now, prefacing with our WaitForADeploymentToComplete,
// which does have a timeout, since in most cases a failure in the service coming up stems from a failed deployment
g.By("waiting for the deployment to complete")
err = exutil.WaitForADeploymentToComplete(oc.KubeClient().Core().ReplicationControllers(oc.Namespace()), helperName, oc)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("waiting for an endpoint")
err = oc.KubeFramework().WaitForAnEndpoint(helperName)
o.Expect(err).NotTo(o.HaveOccurred())
tableCounter := 0
assertReplicationIsWorking := func(masterDeployment, slaveDeployment string, slaveCount int) (exutil.Database, []exutil.Database, exutil.Database) {
tableCounter++
table := fmt.Sprintf("table_%0.2d", tableCounter)
g.By("creating replication helpers")
master, slaves, helper := CreateMySQLReplicationHelpers(oc.KubeClient().Core().Pods(oc.Namespace()), masterDeployment, slaveDeployment, fmt.Sprintf("%s-1", helperName), slaveCount)
o.Expect(exutil.WaitUntilAllHelpersAreUp(oc, []exutil.Database{master, helper})).NotTo(o.HaveOccurred())
o.Expect(exutil.WaitUntilAllHelpersAreUp(oc, slaves)).NotTo(o.HaveOccurred())
// Test if we can query as root
g.By("wait for mysql-master endpoint")
oc.KubeFramework().WaitForAnEndpoint("mysql-master")
err := helper.TestRemoteLogin(oc, "mysql-master")
o.Expect(err).NotTo(o.HaveOccurred())
// Create a new table with random name
g.By("create new table")
_, err = master.Query(oc, fmt.Sprintf("CREATE TABLE %s (col1 VARCHAR(20), col2 VARCHAR(20));", table))
o.Expect(err).NotTo(o.HaveOccurred())
// Write new data to the table through master
_, err = master.Query(oc, fmt.Sprintf("INSERT INTO %s (col1, col2) VALUES ('val1', 'val2');", table))
o.Expect(err).NotTo(o.HaveOccurred())
// Make sure data is present on master
err = exutil.WaitForQueryOutputContains(oc, master, 10*time.Second, false, fmt.Sprintf("SELECT * FROM %s\\G;", table), "col1: val1\ncol2: val2")
o.Expect(err).NotTo(o.HaveOccurred())
// Make sure data was replicated to all slaves
for _, slave := range slaves {
err = exutil.WaitForQueryOutputContains(oc, slave, 90*time.Second, false, fmt.Sprintf("SELECT * FROM %s\\G;", table), "col1: val1\ncol2: val2")
o.Expect(err).NotTo(o.HaveOccurred())
}
return master, slaves, helper
}
g.By("after initial deployment")
master, _, _ := assertReplicationIsWorking("mysql-master-1", "mysql-slave-1", 1)
if tc.SkipReplication {
return
}
g.By("after master is restarted by changing the Deployment Config")
err = oc.Run("env").Args("dc", "mysql-master", "MYSQL_ROOT_PASSWORD=newpass").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = exutil.WaitUntilPodIsGone(oc.KubeClient().Core().Pods(oc.Namespace()), master.PodName(), 1*time.Minute)
master, _, _ = assertReplicationIsWorking("mysql-master-2", "mysql-slave-1", 1)
g.By("after master is restarted by deleting the pod")
err = oc.Run("delete").Args("pod", "-l", "deployment=mysql-master-2").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = exutil.WaitUntilPodIsGone(oc.KubeClient().Core().Pods(oc.Namespace()), master.PodName(), 1*time.Minute)
o.Expect(err).NotTo(o.HaveOccurred())
_, slaves, _ := assertReplicationIsWorking("mysql-master-2", "mysql-slave-1", 1)
g.By("after slave is restarted by deleting the pod")
err = oc.Run("delete").Args("pod", "-l", "deployment=mysql-slave-1").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = exutil.WaitUntilPodIsGone(oc.KubeClient().Core().Pods(oc.Namespace()), slaves[0].PodName(), 1*time.Minute)
o.Expect(err).NotTo(o.HaveOccurred())
assertReplicationIsWorking("mysql-master-2", "mysql-slave-1", 1)
pods, err := oc.KubeClient().Core().Pods(oc.Namespace()).List(kapi.ListOptions{LabelSelector: exutil.ParseLabelsOrDie("deployment=mysql-slave-1")})
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(pods.Items)).To(o.Equal(1))
// NOTE: Commented out, current template does not support multiple replicas.
/*
g.By("after slave is scaled to 0 and then back to 4 replicas")
//.........这里部分代码省略.........