当前位置: 首页>>代码示例>>Golang>>正文


Golang util.NewUUID函数代码示例

本文整理汇总了Golang中k8s/io/kubernetes/pkg/util.NewUUID函数的典型用法代码示例。如果您正苦于以下问题:Golang NewUUID函数的具体用法?Golang NewUUID怎么用?Golang NewUUID使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了NewUUID函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。

示例1: Stop

func (reaper *DaemonSetReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error {
	ds, err := reaper.Extensions().DaemonSets(namespace).Get(name)
	if err != nil {
		return err
	}

	// We set the nodeSelector to a random label. This label is nearly guaranteed
	// to not be set on any node so the DameonSetController will start deleting
	// daemon pods. Once it's done deleting the daemon pods, it's safe to delete
	// the DaemonSet.
	ds.Spec.Template.Spec.NodeSelector = map[string]string{
		string(util.NewUUID()): string(util.NewUUID()),
	}
	// force update to avoid version conflict
	ds.ResourceVersion = ""

	if ds, err = reaper.Extensions().DaemonSets(namespace).Update(ds); err != nil {
		return err
	}

	// Wait for the daemon set controller to kill all the daemon pods.
	if err := wait.Poll(reaper.pollInterval, reaper.timeout, func() (bool, error) {
		updatedDS, err := reaper.Extensions().DaemonSets(namespace).Get(name)
		if err != nil {
			return false, nil
		}
		return updatedDS.Status.CurrentNumberScheduled+updatedDS.Status.NumberMisscheduled == 0, nil
	}); err != nil {
		return err
	}

	return reaper.Extensions().DaemonSets(namespace).Delete(name)
}
开发者ID:Clarifai,项目名称:kubernetes,代码行数:33,代码来源:stop.go

示例2: Create

// Create a LimitRange object
func (rs *REST) Create(ctx api.Context, obj runtime.Object) (runtime.Object, error) {
	limitRange, ok := obj.(*api.LimitRange)
	if !ok {
		return nil, fmt.Errorf("invalid object type")
	}

	if !api.ValidNamespace(ctx, &limitRange.ObjectMeta) {
		return nil, errors.NewConflict("limitRange", limitRange.Namespace, fmt.Errorf("LimitRange.Namespace does not match the provided context"))
	}

	if len(limitRange.Name) == 0 {
		limitRange.Name = string(util.NewUUID())
	}

	if errs := validation.ValidateLimitRange(limitRange); len(errs) > 0 {
		return nil, errors.NewInvalid("limitRange", limitRange.Name, errs)
	}
	api.FillObjectMetaSystemFields(ctx, &limitRange.ObjectMeta)

	err := rs.registry.CreateWithName(ctx, limitRange.Name, limitRange)
	if err != nil {
		return nil, err
	}
	return rs.registry.Get(ctx, limitRange.Name)
}
开发者ID:keithtobin,项目名称:kubernetes,代码行数:26,代码来源:rest.go

示例3: Provision

// Create for hostPath simply creates a local /tmp/hostpath_pv/%s directory as a new PersistentVolume.
// This Provisioner is meant for development and testing only and WILL NOT WORK in a multi-node cluster.
func (r *hostPathProvisioner) Provision() (*api.PersistentVolume, error) {
	fullpath := fmt.Sprintf("/tmp/hostpath_pv/%s", util.NewUUID())

	pv := &api.PersistentVolume{
		ObjectMeta: api.ObjectMeta{
			Name: r.options.PVName,
			Annotations: map[string]string{
				"kubernetes.io/createdby": "hostpath-dynamic-provisioner",
			},
		},
		Spec: api.PersistentVolumeSpec{
			PersistentVolumeReclaimPolicy: r.options.PersistentVolumeReclaimPolicy,
			AccessModes:                   r.options.AccessModes,
			Capacity: api.ResourceList{
				api.ResourceName(api.ResourceStorage): r.options.Capacity,
			},
			PersistentVolumeSource: api.PersistentVolumeSource{
				HostPath: &api.HostPathVolumeSource{
					Path: fullpath,
				},
			},
		},
	}

	return pv, os.MkdirAll(pv.Spec.HostPath.Path, 0750)
}
开发者ID:RyanBinfeng,项目名称:kubernetes,代码行数:28,代码来源:host_path.go

示例4: scTestPod

func scTestPod(hostIPC bool, hostPID bool) *api.Pod {
	podName := "security-context-" + string(util.NewUUID())
	pod := &api.Pod{
		ObjectMeta: api.ObjectMeta{
			Name:        podName,
			Labels:      map[string]string{"name": podName},
			Annotations: map[string]string{},
		},
		Spec: api.PodSpec{
			SecurityContext: &api.PodSecurityContext{
				HostIPC: hostIPC,
				HostPID: hostPID,
			},
			Containers: []api.Container{
				{
					Name:  "test-container",
					Image: "gcr.io/google_containers/busybox:1.24",
				},
			},
			RestartPolicy: api.RestartPolicyNever,
		},
	}

	return pod
}
开发者ID:FlyWings,项目名称:kubernetes,代码行数:25,代码来源:security_context.go

示例5: setup

func (config *KubeProxyTestConfig) setup() {
	By("creating a selector")
	selectorName := "selector-" + string(util.NewUUID())
	serviceSelector := map[string]string{
		selectorName: "true",
	}

	By("Getting two nodes")
	nodeList, err := config.f.Client.Nodes().List(labels.Everything(), fields.Everything(), unversioned.ListOptions{})
	Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to get node list: %v", err))
	config.externalAddrs = NodeAddresses(nodeList, api.NodeExternalIP)
	if len(config.externalAddrs) < 2 {
		// fall back to legacy IPs
		config.externalAddrs = NodeAddresses(nodeList, api.NodeLegacyHostIP)
	}
	Expect(len(config.externalAddrs)).To(BeNumerically(">=", 2), fmt.Sprintf("At least two nodes necessary with an external or LegacyHostIP"))
	config.nodes = nodeList.Items

	if enableLoadBalancerTest {
		By("Creating the LoadBalancer Service on top of the pods in kubernetes")
		config.createLoadBalancerService(serviceSelector)
	}

	By("Creating the service pods in kubernetes")
	podName := "netserver"
	config.endpointPods = config.createNetProxyPods(podName, serviceSelector)

	By("Creating the service on top of the pods in kubernetes")
	config.createNodePortService(serviceSelector)

	By("Creating test pods")
	config.createTestPods()
}
开发者ID:yghannam,项目名称:kubernetes,代码行数:33,代码来源:kubeproxy.go

示例6: CreateVolume

func (gceutil *GCEDiskUtil) CreateVolume(c *gcePersistentDiskProvisioner) (volumeID string, volumeSizeGB int, err error) {
	cloud, err := getCloudProvider()
	if err != nil {
		return "", 0, err
	}

	name := fmt.Sprintf("kube-dynamic-%s", util.NewUUID())
	requestBytes := c.options.Capacity.Value()
	// GCE works with gigabytes, convert to GiB with rounding up
	requestGB := volume.RoundUpSize(requestBytes, 1024*1024*1024)

	// The disk will be created in the zone in which this code is currently running
	// TODO: We should support auto-provisioning volumes in multiple/specified zones
	zone, err := cloud.GetZone()
	if err != nil {
		glog.V(2).Infof("error getting zone information from GCE: %v", err)
		return "", 0, err
	}

	err = cloud.CreateDisk(name, zone.FailureDomain, int64(requestGB), *c.options.CloudTags)
	if err != nil {
		glog.V(2).Infof("Error creating GCE PD volume: %v", err)
		return "", 0, err
	}
	glog.V(2).Infof("Successfully created GCE PD volume %s", name)
	return name, int(requestGB), nil
}
开发者ID:richm,项目名称:origin,代码行数:27,代码来源:gce_util.go

示例7: generateTestingImageList

// generateTestingImageList generate randomly generated image list and corresponding expectedImageList.
func generateTestingImageList(count int) ([]kubecontainer.Image, []api.ContainerImage) {
	// imageList is randomly generated image list
	var imageList []kubecontainer.Image
	for ; count > 0; count-- {
		imageItem := kubecontainer.Image{
			ID:       string(util.NewUUID()),
			RepoTags: generateImageTags(),
			Size:     rand.Int63nRange(minImgSize, maxImgSize+1),
		}
		imageList = append(imageList, imageItem)
	}

	// expectedImageList is generated by imageList according to size and maxImagesInNodeStatus
	// 1. sort the imageList by size
	sort.Sort(byImageSize(imageList))
	// 2. convert sorted imageList to api.ContainerImage list
	var expectedImageList []api.ContainerImage
	for _, kubeImage := range imageList {
		apiImage := api.ContainerImage{
			Names:     kubeImage.RepoTags,
			SizeBytes: kubeImage.Size,
		}

		expectedImageList = append(expectedImageList, apiImage)
	}
	// 3. only returns the top maxImagesInNodeStatus images in expectedImageList
	return imageList, expectedImageList[0:maxImagesInNodeStatus]
}
开发者ID:anish,项目名称:kubernetes,代码行数:29,代码来源:kubelet_node_status_test.go

示例8: TestDeleter

func TestDeleter(t *testing.T) {
	tempPath := fmt.Sprintf("/tmp/hostpath/%s", util.NewUUID())
	defer os.RemoveAll(tempPath)
	err := os.MkdirAll(tempPath, 0750)
	if err != nil {
		t.Fatalf("Failed to create tmp directory for deleter: %v", err)
	}

	plugMgr := volume.VolumePluginMgr{}
	plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volume.NewFakeVolumeHost("/tmp/fake", nil, nil))

	spec := &volume.Spec{PersistentVolume: &api.PersistentVolume{Spec: api.PersistentVolumeSpec{PersistentVolumeSource: api.PersistentVolumeSource{HostPath: &api.HostPathVolumeSource{Path: tempPath}}}}}
	plug, err := plugMgr.FindDeletablePluginBySpec(spec)
	if err != nil {
		t.Errorf("Can't find the plugin by name")
	}
	deleter, err := plug.NewDeleter(spec)
	if err != nil {
		t.Errorf("Failed to make a new Deleter: %v", err)
	}
	if deleter.GetPath() != tempPath {
		t.Errorf("Expected %s but got %s", tempPath, deleter.GetPath())
	}
	if err := deleter.Delete(); err != nil {
		t.Errorf("Mock Recycler expected to return nil but got %s", err)
	}
	if exists, _ := util.FileExists("foo"); exists {
		t.Errorf("Temp path expected to be deleted, but was found at %s", tempPath)
	}
}
开发者ID:johndmulhausen,项目名称:kubernetes,代码行数:30,代码来源:host_path_test.go

示例9: TestOverlappingRSs

func TestOverlappingRSs(t *testing.T) {
	client := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
	labelMap := map[string]string{"foo": "bar"}

	for i := 0; i < 5; i++ {
		manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, 10)
		manager.podStoreSynced = alwaysReady

		// Create 10 ReplicaSets, shuffled them randomly and insert them into the ReplicaSet controller's store
		var controllers []*extensions.ReplicaSet
		for j := 1; j < 10; j++ {
			rsSpec := newReplicaSet(1, labelMap)
			rsSpec.CreationTimestamp = unversioned.Date(2014, time.December, j, 0, 0, 0, 0, time.Local)
			rsSpec.Name = string(util.NewUUID())
			controllers = append(controllers, rsSpec)
		}
		shuffledControllers := shuffle(controllers)
		for j := range shuffledControllers {
			manager.rsStore.Store.Add(shuffledControllers[j])
		}
		// Add a pod and make sure only the oldest ReplicaSet is synced
		pods := newPodList(nil, 1, api.PodPending, labelMap, controllers[0])
		rsKey := getKey(controllers[0], t)

		manager.addPod(&pods.Items[0])
		queueRS, _ := manager.queue.Get()
		if queueRS != rsKey {
			t.Fatalf("Expected to find key %v in queue, found %v", rsKey, queueRS)
		}
	}
}
开发者ID:Victorliautaud,项目名称:kubernetes,代码行数:31,代码来源:replica_set_test.go

示例10: makeContainerLogMount

func makeContainerLogMount(opts *kubecontainer.RunContainerOptions, container *api.Container) (*kubecontainer.Mount, error) {
	if opts.PodContainerDir == "" || container.TerminationMessagePath == "" {
		return nil, nil
	}

	// In docker runtime, the container log path contains the container ID.
	// However, for rkt runtime, we cannot get the container ID before the
	// the container is launched, so here we generate a random uuid to enable
	// us to map a container's termination message path to an unique log file
	// on the disk.
	randomUID := util.NewUUID()
	containerLogPath := path.Join(opts.PodContainerDir, string(randomUID))
	fs, err := os.Create(containerLogPath)
	if err != nil {
		return nil, err
	}

	if err := fs.Close(); err != nil {
		return nil, err
	}

	mnt := &kubecontainer.Mount{
		// Use a random name for the termination message mount, so that
		// when a container restarts, it will not overwrite the old termination
		// message.
		Name:          fmt.Sprintf("termination-message-%s", randomUID),
		ContainerPath: container.TerminationMessagePath,
		HostPath:      containerLogPath,
		ReadOnly:      false,
	}
	opts.Mounts = append(opts.Mounts, *mnt)

	return mnt, nil
}
开发者ID:sjtud,项目名称:kubernetes,代码行数:34,代码来源:rkt.go

示例11: Create

func (cc *ConformanceContainer) Create() error {
	cc.podName = cc.Container.Name + string(util.NewUUID())
	imagePullSecrets := []api.LocalObjectReference{}
	for _, s := range cc.ImagePullSecrets {
		imagePullSecrets = append(imagePullSecrets, api.LocalObjectReference{Name: s})
	}
	pod := &api.Pod{
		ObjectMeta: api.ObjectMeta{
			Name:      cc.podName,
			Namespace: cc.Namespace,
		},
		Spec: api.PodSpec{
			NodeName:      cc.NodeName,
			RestartPolicy: cc.RestartPolicy,
			Containers: []api.Container{
				cc.Container,
			},
			Volumes:          cc.Volumes,
			ImagePullSecrets: imagePullSecrets,
		},
	}

	_, err := cc.Client.Pods(cc.Namespace).Create(pod)
	return err
}
开发者ID:openshift,项目名称:kubernetes,代码行数:25,代码来源:container.go

示例12: Provision

func (fc *FakeProvisioner) Provision() (*api.PersistentVolume, error) {
	fullpath := fmt.Sprintf("/tmp/hostpath_pv/%s", util.NewUUID())

	pv := &api.PersistentVolume{
		ObjectMeta: api.ObjectMeta{
			Name: fc.Options.PVName,
			Annotations: map[string]string{
				"kubernetes.io/createdby": "fakeplugin-provisioner",
			},
		},
		Spec: api.PersistentVolumeSpec{
			PersistentVolumeReclaimPolicy: fc.Options.PersistentVolumeReclaimPolicy,
			AccessModes:                   fc.Options.AccessModes,
			Capacity: api.ResourceList{
				api.ResourceName(api.ResourceStorage): fc.Options.Capacity,
			},
			PersistentVolumeSource: api.PersistentVolumeSource{
				HostPath: &api.HostPathVolumeSource{
					Path: fullpath,
				},
			},
		},
	}

	return pv, nil
}
开发者ID:FlyWings,项目名称:kubernetes,代码行数:26,代码来源:testing.go

示例13: createPD

func createPD() (string, error) {
	if testContext.Provider == "gce" || testContext.Provider == "gke" {
		pdName := fmt.Sprintf("%s-%s", testContext.prefix, string(util.NewUUID()))

		gceCloud, err := getGCECloud()
		if err != nil {
			return "", err
		}

		tags := map[string]string{}
		err = gceCloud.CreateDisk(pdName, testContext.CloudConfig.Zone, 10 /* sizeGb */, tags)
		if err != nil {
			return "", err
		}
		return pdName, nil
	} else {
		volumes, ok := testContext.CloudConfig.Provider.(awscloud.Volumes)
		if !ok {
			return "", fmt.Errorf("Provider does not support volumes")
		}
		volumeOptions := &awscloud.VolumeOptions{}
		volumeOptions.CapacityGB = 10
		return volumes.CreateDisk(volumeOptions)
	}
}
开发者ID:ethernetdan,项目名称:kubernetes,代码行数:25,代码来源:pd.go

示例14: testPodWithHostVol

//TODO: To merge this with the emptyDir tests, we can make source a lambda.
func testPodWithHostVol(path string, source *api.HostPathVolumeSource) *api.Pod {
	podName := "pod-" + string(util.NewUUID())

	return &api.Pod{
		TypeMeta: api.TypeMeta{
			Kind:       "Pod",
			APIVersion: latest.Version,
		},
		ObjectMeta: api.ObjectMeta{
			Name: podName,
		},
		Spec: api.PodSpec{
			Containers: []api.Container{
				{
					Name:  containerName,
					Image: "gcr.io/google_containers/mounttest:0.2",
					VolumeMounts: []api.VolumeMount{
						{
							Name:      volumeName,
							MountPath: path,
						},
					},
				},
			},
			RestartPolicy: api.RestartPolicyNever,
			Volumes:       mount(source),
		},
	}
}
开发者ID:nvnvrts,项目名称:kubernetes,代码行数:30,代码来源:host_path.go

示例15: setup

func (config *KubeProxyTestConfig) setup() {
	By("creating a selector")
	selectorName := "selector-" + string(util.NewUUID())
	serviceSelector := map[string]string{
		selectorName: "true",
	}

	By("Getting ssh-able hosts")
	hosts, err := NodeSSHHosts(config.f.Client)
	Expect(err).NotTo(HaveOccurred())
	if len(hosts) == 0 {
		Failf("No ssh-able nodes")
	}
	config.nodes = hosts

	if enableLoadBalancerTest {
		By("Creating the LoadBalancer Service on top of the pods in kubernetes")
		config.createLoadBalancerService(serviceSelector)
	}

	By("Creating the service pods in kubernetes")
	podName := "netserver"
	config.endpointPods = config.createNetProxyPods(podName, serviceSelector, testContext.CloudConfig.NumNodes)

	By("Creating the service on top of the pods in kubernetes")
	config.createNodePortService(serviceSelector)

	By("Creating test pods")
	config.createTestPod()
}
开发者ID:chiefy,项目名称:kubernetes,代码行数:30,代码来源:kubeproxy.go


注:本文中的k8s/io/kubernetes/pkg/util.NewUUID函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。