本文整理汇总了Golang中k8s/io/kubernetes/pkg/util/uuid.NewUUID函数的典型用法代码示例。如果您正苦于以下问题:Golang NewUUID函数的具体用法?Golang NewUUID怎么用?Golang NewUUID使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewUUID函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: Stop
func (reaper *DaemonSetReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error {
ds, err := reaper.client.DaemonSets(namespace).Get(name, metav1.GetOptions{})
if err != nil {
return err
}
// We set the nodeSelector to a random label. This label is nearly guaranteed
// to not be set on any node so the DameonSetController will start deleting
// daemon pods. Once it's done deleting the daemon pods, it's safe to delete
// the DaemonSet.
ds.Spec.Template.Spec.NodeSelector = map[string]string{
string(uuid.NewUUID()): string(uuid.NewUUID()),
}
// force update to avoid version conflict
ds.ResourceVersion = ""
if ds, err = reaper.client.DaemonSets(namespace).Update(ds); err != nil {
return err
}
// Wait for the daemon set controller to kill all the daemon pods.
if err := wait.Poll(reaper.pollInterval, reaper.timeout, func() (bool, error) {
updatedDS, err := reaper.client.DaemonSets(namespace).Get(name, metav1.GetOptions{})
if err != nil {
return false, nil
}
return updatedDS.Status.CurrentNumberScheduled+updatedDS.Status.NumberMisscheduled == 0, nil
}); err != nil {
return err
}
return reaper.client.DaemonSets(namespace).Delete(name, nil)
}
示例2: makePodToVerifyCgroups
// makePodToVerifyCgroups returns a pod that verifies the existence of the specified cgroups.
func makePodToVerifyCgroups(cgroupNames []cm.CgroupName) *api.Pod {
// convert the names to their literal cgroupfs forms...
cgroupFsNames := []string{}
for _, cgroupName := range cgroupNames {
if framework.TestContext.KubeletConfig.CgroupDriver == "systemd" {
cgroupFsNames = append(cgroupFsNames, cm.ConvertCgroupNameToSystemd(cgroupName, true))
} else {
cgroupFsNames = append(cgroupFsNames, string(cgroupName))
}
}
// build the pod command to either verify cgroups exist
command := ""
for _, cgroupFsName := range cgroupFsNames {
localCommand := "if [ ! -d /tmp/memory/" + cgroupFsName + " ] || [ ! -d /tmp/cpu/" + cgroupFsName + " ]; then exit 1; fi; "
command += localCommand
}
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
Name: "pod" + string(uuid.NewUUID()),
},
Spec: api.PodSpec{
RestartPolicy: api.RestartPolicyNever,
Containers: []api.Container{
{
Image: "gcr.io/google_containers/busybox:1.24",
Name: "container" + string(uuid.NewUUID()),
Command: []string{"sh", "-c", command},
VolumeMounts: []api.VolumeMount{
{
Name: "sysfscgroup",
MountPath: "/tmp",
},
},
},
},
Volumes: []api.Volume{
{
Name: "sysfscgroup",
VolumeSource: api.VolumeSource{
HostPath: &api.HostPathVolumeSource{Path: "/sys/fs/cgroup"},
},
},
},
},
}
return pod
}
示例3: TestOverlappingRCs
func TestOverlappingRCs(t *testing.T) {
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
for i := 0; i < 5; i++ {
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, 10, 0)
manager.podStoreSynced = alwaysReady
// Create 10 rcs, shuffled them randomly and insert them into the rc manager's store
var controllers []*api.ReplicationController
for j := 1; j < 10; j++ {
controllerSpec := newReplicationController(1)
controllerSpec.CreationTimestamp = unversioned.Date(2014, time.December, j, 0, 0, 0, 0, time.Local)
controllerSpec.Name = string(uuid.NewUUID())
controllers = append(controllers, controllerSpec)
}
shuffledControllers := shuffle(controllers)
for j := range shuffledControllers {
manager.rcStore.Indexer.Add(shuffledControllers[j])
}
// Add a pod and make sure only the oldest rc is synced
pods := newPodList(nil, 1, api.PodPending, controllers[0], "pod")
rcKey := getKey(controllers[0], t)
manager.addPod(&pods.Items[0])
queueRC, _ := manager.queue.Get()
if queueRC != rcKey {
t.Fatalf("Expected to find key %v in queue, found %v", rcKey, queueRC)
}
}
}
示例4: newTestPods
func newTestPods(numPods int, imageName, podType string) []*api.Pod {
var pods []*api.Pod
for i := 0; i < numPods; i++ {
podName := "test-" + string(uuid.NewUUID())
labels := map[string]string{
"type": podType,
"name": podName,
}
pods = append(pods,
&api.Pod{
ObjectMeta: api.ObjectMeta{
Name: podName,
Labels: labels,
},
Spec: api.PodSpec{
// ToDo: restart policy is always
// check whether pods restart at the end of tests
Containers: []api.Container{
{
Image: imageName,
Name: podName,
},
},
},
})
}
return pods
}
示例5: TestOverlappingRSs
func TestOverlappingRSs(t *testing.T) {
client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
labelMap := map[string]string{"foo": "bar"}
for i := 0; i < 5; i++ {
manager := NewReplicaSetControllerFromClient(client, controller.NoResyncPeriodFunc, 10, 0)
manager.podStoreSynced = alwaysReady
// Create 10 ReplicaSets, shuffled them randomly and insert them into the ReplicaSet controller's store
var controllers []*extensions.ReplicaSet
for j := 1; j < 10; j++ {
rsSpec := newReplicaSet(1, labelMap)
rsSpec.CreationTimestamp = unversioned.Date(2014, time.December, j, 0, 0, 0, 0, time.Local)
rsSpec.Name = string(uuid.NewUUID())
controllers = append(controllers, rsSpec)
}
shuffledControllers := shuffle(controllers)
for j := range shuffledControllers {
manager.rsStore.Store.Add(shuffledControllers[j])
}
// Add a pod and make sure only the oldest ReplicaSet is synced
pods := newPodList(nil, 1, api.PodPending, labelMap, controllers[0], "pod")
rsKey := getKey(controllers[0], t)
manager.addPod(&pods.Items[0])
queueRS, _ := manager.queue.Get()
if queueRS != rsKey {
t.Fatalf("Expected to find key %v in queue, found %v", rsKey, queueRS)
}
}
}
示例6: Provision
// Create for hostPath simply creates a local /tmp/hostpath_pv/%s directory as a new PersistentVolume.
// This Provisioner is meant for development and testing only and WILL NOT WORK in a multi-node cluster.
func (r *hostPathProvisioner) Provision() (*api.PersistentVolume, error) {
fullpath := fmt.Sprintf("/tmp/hostpath_pv/%s", uuid.NewUUID())
pv := &api.PersistentVolume{
ObjectMeta: api.ObjectMeta{
Name: r.options.PVName,
Annotations: map[string]string{
"kubernetes.io/createdby": "hostpath-dynamic-provisioner",
},
},
Spec: api.PersistentVolumeSpec{
PersistentVolumeReclaimPolicy: r.options.PersistentVolumeReclaimPolicy,
AccessModes: r.options.AccessModes,
Capacity: api.ResourceList{
api.ResourceName(api.ResourceStorage): r.options.Capacity,
},
PersistentVolumeSource: api.PersistentVolumeSource{
HostPath: &api.HostPathVolumeSource{
Path: fullpath,
},
},
},
}
return pv, os.MkdirAll(pv.Spec.HostPath.Path, 0750)
}
示例7: scTestPod
func scTestPod(hostIPC bool, hostPID bool) *api.Pod {
podName := "security-context-" + string(uuid.NewUUID())
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
Name: podName,
Labels: map[string]string{"name": podName},
Annotations: map[string]string{},
},
Spec: api.PodSpec{
SecurityContext: &api.PodSecurityContext{
HostIPC: hostIPC,
HostPID: hostPID,
},
Containers: []api.Container{
{
Name: "test-container",
Image: "gcr.io/google_containers/busybox:1.24",
},
},
RestartPolicy: api.RestartPolicyNever,
},
}
return pod
}
示例8: StartPods
// Simplified version of RunRC, that does not create RC, but creates plain Pods.
// Optionally waits for pods to start running (if waitForRunning == true).
// The number of replicas must be non-zero.
func StartPods(c clientset.Interface, replicas int, namespace string, podNamePrefix string,
pod api.Pod, waitForRunning bool, logFunc func(fmt string, args ...interface{})) error {
// no pod to start
if replicas < 1 {
panic("StartPods: number of replicas must be non-zero")
}
startPodsID := string(uuid.NewUUID()) // So that we can label and find them
for i := 0; i < replicas; i++ {
podName := fmt.Sprintf("%v-%v", podNamePrefix, i)
pod.ObjectMeta.Name = podName
pod.ObjectMeta.Labels["name"] = podName
pod.ObjectMeta.Labels["startPodsID"] = startPodsID
pod.Spec.Containers[0].Name = podName
_, err := c.Core().Pods(namespace).Create(&pod)
if err != nil {
return err
}
}
logFunc("Waiting for running...")
if waitForRunning {
label := labels.SelectorFromSet(labels.Set(map[string]string{"startPodsID": startPodsID}))
err := WaitForPodsWithLabelRunning(c, namespace, label)
if err != nil {
return fmt.Errorf("Error waiting for %d pods to be running - probably a timeout: %v", replicas, err)
}
}
return nil
}
示例9: createPodWithPodAffinity
func createPodWithPodAffinity(f *framework.Framework, topologyKey string) *api.Pod {
return createPausePod(f, pausePodConfig{
Name: "with-podantiaffinity-" + string(uuid.NewUUID()),
Affinity: `{
"podAffinity": {
"requiredDuringSchedulingIgnoredDuringExecution": [{
"labelSelector": {
"matchExpressions": [{
"key": "security",
"operator": "In",
"values":["S1"]
}]
},
"topologyKey": "` + topologyKey + `"
}]
},
"podAntiAffinity": {
"requiredDuringSchedulingIgnoredDuringExecution": [{
"labelSelector": {
"matchExpressions": [{
"key": "security",
"operator": "In",
"values":["S2"]
}]
},
"topologyKey": "` + topologyKey + `"
}]
}
}`,
})
}
示例10: Provision
func (fc *FakeProvisioner) Provision() (*v1.PersistentVolume, error) {
fullpath := fmt.Sprintf("/tmp/hostpath_pv/%s", uuid.NewUUID())
pv := &v1.PersistentVolume{
ObjectMeta: v1.ObjectMeta{
Name: fc.Options.PVName,
Annotations: map[string]string{
"kubernetes.io/createdby": "fakeplugin-provisioner",
},
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeReclaimPolicy: fc.Options.PersistentVolumeReclaimPolicy,
AccessModes: fc.Options.PVC.Spec.AccessModes,
Capacity: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): fc.Options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)],
},
PersistentVolumeSource: v1.PersistentVolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: fullpath,
},
},
},
}
return pv, nil
}
示例11: newPod
func newPod(t *testing.T, name string) (*api.Pod, string) {
pod := &api.Pod{
TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String()},
ObjectMeta: api.ObjectMeta{
UID: uuid.NewUUID(),
Annotations: make(map[string]string),
Name: name,
Namespace: api.NamespaceDefault,
ResourceVersion: "18",
Labels: fooBar(),
},
Spec: api.PodSpec{},
Status: api.PodStatus{
Conditions: []api.PodCondition{
{Type: api.PodReady, Status: api.ConditionTrue},
},
},
}
podName, err := controller.KeyFunc(pod)
if err != nil {
t.Fatalf("Unexpected error naming pod %q: %v", pod.Name, err)
}
return pod, podName
}
示例12: setup
func (config *KubeProxyTestConfig) setup() {
By("creating a selector")
selectorName := "selector-" + string(uuid.NewUUID())
serviceSelector := map[string]string{
selectorName: "true",
}
By("Getting node addresses")
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(config.f.Client))
nodeList := framework.GetReadySchedulableNodesOrDie(config.f.Client)
config.externalAddrs = framework.NodeAddresses(nodeList, api.NodeExternalIP)
if len(config.externalAddrs) < 2 {
// fall back to legacy IPs
config.externalAddrs = framework.NodeAddresses(nodeList, api.NodeLegacyHostIP)
}
Expect(len(config.externalAddrs)).To(BeNumerically(">=", 2), fmt.Sprintf("At least two nodes necessary with an external or LegacyHostIP"))
config.nodes = nodeList.Items
if enableLoadBalancerTest {
By("Creating the LoadBalancer Service on top of the pods in kubernetes")
config.createLoadBalancerService(serviceSelector)
}
By("Creating the service pods in kubernetes")
podName := "netserver"
config.endpointPods = config.createNetProxyPods(podName, serviceSelector)
By("Creating the service on top of the pods in kubernetes")
config.createNodePortService(serviceSelector)
By("Creating test pods")
config.createTestPods()
}
示例13: TestDeleter
func TestDeleter(t *testing.T) {
// Deleter has a hard-coded regex for "/tmp".
tempPath := fmt.Sprintf("/tmp/hostpath/%s", uuid.NewUUID())
defer os.RemoveAll(tempPath)
err := os.MkdirAll(tempPath, 0750)
if err != nil {
t.Fatalf("Failed to create tmp directory for deleter: %v", err)
}
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volumetest.NewFakeVolumeHost("/tmp/fake", nil, nil, "" /* rootContext */))
spec := &volume.Spec{PersistentVolume: &api.PersistentVolume{Spec: api.PersistentVolumeSpec{PersistentVolumeSource: api.PersistentVolumeSource{HostPath: &api.HostPathVolumeSource{Path: tempPath}}}}}
plug, err := plugMgr.FindDeletablePluginBySpec(spec)
if err != nil {
t.Errorf("Can't find the plugin by name")
}
deleter, err := plug.NewDeleter(spec)
if err != nil {
t.Errorf("Failed to make a new Deleter: %v", err)
}
if deleter.GetPath() != tempPath {
t.Errorf("Expected %s but got %s", tempPath, deleter.GetPath())
}
if err := deleter.Delete(); err != nil {
t.Errorf("Mock Recycler expected to return nil but got %s", err)
}
if exists, _ := util.FileExists("foo"); exists {
t.Errorf("Temp path expected to be deleted, but was found at %s", tempPath)
}
}
示例14: Provision
// Create for hostPath simply creates a local /tmp/hostpath_pv/%s directory as a new PersistentVolume.
// This Provisioner is meant for development and testing only and WILL NOT WORK in a multi-node cluster.
func (r *hostPathProvisioner) Provision() (*v1.PersistentVolume, error) {
fullpath := fmt.Sprintf("/tmp/hostpath_pv/%s", uuid.NewUUID())
capacity := r.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
pv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: r.options.PVName,
Annotations: map[string]string{
"kubernetes.io/createdby": "hostpath-dynamic-provisioner",
},
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeReclaimPolicy: r.options.PersistentVolumeReclaimPolicy,
AccessModes: r.options.PVC.Spec.AccessModes,
Capacity: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): capacity,
},
PersistentVolumeSource: v1.PersistentVolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: fullpath,
},
},
},
}
if len(r.options.PVC.Spec.AccessModes) == 0 {
pv.Spec.AccessModes = r.plugin.GetAccessModes()
}
return pv, os.MkdirAll(pv.Spec.HostPath.Path, 0750)
}
示例15: newTestPods
// newTestPods creates a list of pods (specification) for test.
func newTestPods(numPods int, imageName, podType string) []*v1.Pod {
var pods []*v1.Pod
for i := 0; i < numPods; i++ {
podName := "test-" + string(uuid.NewUUID())
labels := map[string]string{
"type": podType,
"name": podName,
}
pods = append(pods,
&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
Labels: labels,
},
Spec: v1.PodSpec{
// Restart policy is always (default).
Containers: []v1.Container{
{
Image: imageName,
Name: podName,
},
},
},
})
}
return pods
}