本文整理汇总了Golang中vulcan/kubernetes/pkg/util.NewUUID函数的典型用法代码示例。如果您正苦于以下问题:Golang NewUUID函数的具体用法?Golang NewUUID怎么用?Golang NewUUID使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewUUID函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: TestDeleter
func TestDeleter(t *testing.T) {
tempPath := fmt.Sprintf("/tmp/hostpath/%s", util.NewUUID())
defer os.RemoveAll(tempPath)
err := os.MkdirAll(tempPath, 0750)
if err != nil {
t.Fatal("Failed to create tmp directory for deleter: %v", err)
}
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volume.NewFakeVolumeHost("/tmp/fake", nil, nil))
spec := &volume.Spec{PersistentVolume: &api.PersistentVolume{Spec: api.PersistentVolumeSpec{PersistentVolumeSource: api.PersistentVolumeSource{HostPath: &api.HostPathVolumeSource{Path: tempPath}}}}}
plug, err := plugMgr.FindDeletablePluginBySpec(spec)
if err != nil {
t.Errorf("Can't find the plugin by name")
}
deleter, err := plug.NewDeleter(spec)
if err != nil {
t.Errorf("Failed to make a new Deleter: %v", err)
}
if deleter.GetPath() != tempPath {
t.Errorf("Expected %s but got %s", tempPath, deleter.GetPath())
}
if err := deleter.Delete(); err != nil {
t.Errorf("Mock Recycler expected to return nil but got %s", err)
}
if exists, _ := util.FileExists("foo"); exists {
t.Errorf("Temp path expected to be deleted, but was found at %s", tempPath)
}
}
示例2: TestOverlappingRCs
func TestOverlappingRCs(t *testing.T) {
client := client.NewOrDie(&client.Config{Host: "", Version: testapi.Default.Version()})
for i := 0; i < 5; i++ {
manager := NewReplicationManager(client, controller.NoResyncPeriodFunc, 10)
manager.podStoreSynced = alwaysReady
// Create 10 rcs, shuffled them randomly and insert them into the rc manager's store
var controllers []*api.ReplicationController
for j := 1; j < 10; j++ {
controllerSpec := newReplicationController(1)
controllerSpec.CreationTimestamp = unversioned.Date(2014, time.December, j, 0, 0, 0, 0, time.Local)
controllerSpec.Name = string(util.NewUUID())
controllers = append(controllers, controllerSpec)
}
shuffledControllers := shuffle(controllers)
for j := range shuffledControllers {
manager.rcStore.Store.Add(shuffledControllers[j])
}
// Add a pod and make sure only the oldest rc is synced
pods := newPodList(nil, 1, api.PodPending, controllers[0])
rcKey := getKey(controllers[0], t)
manager.addPod(&pods.Items[0])
queueRC, _ := manager.queue.Get()
if queueRC != rcKey {
t.Fatalf("Expected to find key %v in queue, found %v", rcKey, queueRC)
}
}
}
示例3: testPodWithVolume
func testPodWithVolume(image, path string, source *api.EmptyDirVolumeSource) *api.Pod {
podName := "pod-" + string(util.NewUUID())
return &api.Pod{
TypeMeta: unversioned.TypeMeta{
Kind: "Pod",
APIVersion: latest.GroupOrDie("").Version,
},
ObjectMeta: api.ObjectMeta{
Name: podName,
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: containerName,
Image: image,
VolumeMounts: []api.VolumeMount{
{
Name: volumeName,
MountPath: path,
},
},
},
},
RestartPolicy: api.RestartPolicyNever,
Volumes: []api.Volume{
{
Name: volumeName,
VolumeSource: api.VolumeSource{
EmptyDir: source,
},
},
},
},
}
}
示例4: Create
// Create for hostPath simply creates a local /tmp/hostpath_pv/%s directory as a new PersistentVolume.
// This Creater is meant for development and testing only and WILL NOT WORK in a multi-node cluster.
func (r *hostPathCreater) Create() (*api.PersistentVolume, error) {
fullpath := fmt.Sprintf("/tmp/hostpath_pv/%s", util.NewUUID())
err := os.MkdirAll(fullpath, 0750)
if err != nil {
return nil, err
}
return &api.PersistentVolume{
ObjectMeta: api.ObjectMeta{
GenerateName: "pv-hostpath-",
Labels: map[string]string{
"createdby": "hostpath dynamic provisioner",
},
},
Spec: api.PersistentVolumeSpec{
PersistentVolumeReclaimPolicy: r.options.PersistentVolumeReclaimPolicy,
AccessModes: r.options.AccessModes,
Capacity: api.ResourceList{
api.ResourceName(api.ResourceStorage): resource.MustParse(fmt.Sprintf("%dMi", r.options.CapacityMB)),
},
PersistentVolumeSource: api.PersistentVolumeSource{
HostPath: &api.HostPathVolumeSource{
Path: fullpath,
},
},
},
}, nil
}
示例5: setup
func (config *KubeProxyTestConfig) setup() {
By("creating a selector")
selectorName := "selector-" + string(util.NewUUID())
serviceSelector := map[string]string{
selectorName: "true",
}
By("Getting ssh-able hosts")
hosts, err := NodeSSHHosts(config.f.Client)
Expect(err).NotTo(HaveOccurred())
config.nodes = make([]string, 0, len(hosts))
for _, h := range hosts {
config.nodes = append(config.nodes, strings.TrimSuffix(h, ":22"))
}
if enableLoadBalancerTest {
By("Creating the LoadBalancer Service on top of the pods in kubernetes")
config.createLoadBalancerService(serviceSelector)
}
By("Creating the service pods in kubernetes")
podName := "netserver"
config.endpointPods = config.createNetProxyPods(podName, serviceSelector, testContext.CloudConfig.NumNodes)
By("Creating the service on top of the pods in kubernetes")
config.createNodePortService(serviceSelector)
By("Creating test pods")
config.createTestPod()
}
示例6: getService
func getService(servicePorts []api.ServicePort) *api.Service {
return &api.Service{
ObjectMeta: api.ObjectMeta{
Name: string(util.NewUUID()), Namespace: ns},
Spec: api.ServiceSpec{
Ports: servicePorts,
},
}
}
示例7: Stop
func (reaper *DaemonSetReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) (string, error) {
ds, err := reaper.Extensions().DaemonSets(namespace).Get(name)
if err != nil {
return "", err
}
// We set the nodeSelector to a random label. This label is nearly guaranteed
// to not be set on any node so the DameonSetController will start deleting
// daemon pods. Once it's done deleting the daemon pods, it's safe to delete
// the DaemonSet.
ds.Spec.Template.Spec.NodeSelector = map[string]string{
string(util.NewUUID()): string(util.NewUUID()),
}
// force update to avoid version conflict
ds.ResourceVersion = ""
if ds, err = reaper.Extensions().DaemonSets(namespace).Update(ds); err != nil {
return "", err
}
// Wait for the daemon set controller to kill all the daemon pods.
if err := wait.Poll(reaper.pollInterval, reaper.timeout, func() (bool, error) {
updatedDS, err := reaper.Extensions().DaemonSets(namespace).Get(name)
if err != nil {
return false, nil
}
return updatedDS.Status.CurrentNumberScheduled+updatedDS.Status.NumberMisscheduled == 0, nil
}); err != nil {
return "", err
}
if err := reaper.Extensions().DaemonSets(namespace).Delete(name); err != nil {
return "", err
}
return fmt.Sprintf("%s stopped", name), nil
}
示例8: makePodSpec
func makePodSpec(readinessProbe, livenessProbe *api.Probe) *api.Pod {
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{Name: "test-webserver-" + string(util.NewUUID())},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: "test-webserver",
Image: "gcr.io/google_containers/test-webserver",
LivenessProbe: livenessProbe,
ReadinessProbe: readinessProbe,
},
},
},
}
return pod
}
示例9: runResourceTrackingTest
func runResourceTrackingTest(framework *Framework, podsPerNode int, nodeNames sets.String, resourceMonitor *resourceMonitor) {
numNodes := nodeNames.Len()
totalPods := podsPerNode * numNodes
By(fmt.Sprintf("Creating a RC of %d pods and wait until all pods of this RC are running", totalPods))
rcName := fmt.Sprintf("resource%d-%s", totalPods, string(util.NewUUID()))
// TODO: Use a more realistic workload
Expect(RunRC(RCConfig{
Client: framework.Client,
Name: rcName,
Namespace: framework.Namespace.Name,
Image: "gcr.io/google_containers/pause:go",
Replicas: totalPods,
})).NotTo(HaveOccurred())
// Log once and flush the stats.
resourceMonitor.LogLatest()
resourceMonitor.Reset()
By("Start monitoring resource usage")
// Periodically dump the cpu summary until the deadline is met.
// Note that without calling resourceMonitor.Reset(), the stats
// would occupy increasingly more memory. This should be fine
// for the current test duration, but we should reclaim the
// entries if we plan to monitor longer (e.g., 8 hours).
deadline := time.Now().Add(monitoringTime)
for time.Now().Before(deadline) {
Logf("Still running...%v left", deadline.Sub(time.Now()))
time.Sleep(reportingPeriod)
timeLeft := deadline.Sub(time.Now())
Logf("Still running...%v left", timeLeft)
if timeLeft < reportingPeriod {
time.Sleep(timeLeft)
} else {
time.Sleep(reportingPeriod)
}
logPodsOnNodes(framework.Client, nodeNames.List())
}
By("Reporting overall resource usage")
logPodsOnNodes(framework.Client, nodeNames.List())
resourceMonitor.LogCPUSummary()
resourceMonitor.LogLatest()
By("Deleting the RC")
DeleteRC(framework.Client, framework.Namespace.Name, rcName)
}
示例10: NewWebserverTest
func NewWebserverTest(client *client.Client, namespace string, serviceName string) *WebserverTest {
t := &WebserverTest{}
t.Client = client
t.Namespace = namespace
t.ServiceName = serviceName
t.TestId = t.ServiceName + "-" + string(util.NewUUID())
t.Labels = map[string]string{
"testid": t.TestId,
}
t.rcs = make(map[string]bool)
t.services = make(map[string]bool)
t.name = "webserver"
t.image = "gcr.io/google_containers/test-webserver"
return t
}
示例11: entrypointTestPod
// Return a prototypical entrypoint test pod
func entrypointTestPod() *api.Pod {
podName := "client-containers-" + string(util.NewUUID())
return &api.Pod{
ObjectMeta: api.ObjectMeta{
Name: podName,
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: testContainerName,
Image: "gcr.io/google_containers/eptest:0.1",
},
},
RestartPolicy: api.RestartPolicyNever,
},
}
}
示例12: createTerminatingPod
func createTerminatingPod(f *Framework) (*api.Pod, error) {
uuid := util.NewUUID()
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
Name: string(uuid),
},
Spec: api.PodSpec{
NodeName: "nonexistant-node",
Containers: []api.Container{
{
Name: string(uuid),
Image: "beta.gcr.io/google_containers/busybox",
},
},
},
}
return f.Client.Pods(f.Namespace.Name).Create(pod)
}
示例13: createPD
func createPD() (string, error) {
if testContext.Provider == "gce" || testContext.Provider == "gke" {
pdName := fmt.Sprintf("%s-%s", testContext.prefix, string(util.NewUUID()))
zone := testContext.CloudConfig.Zone
// TODO: make this hit the compute API directly instread of shelling out to gcloud.
err := exec.Command("gcloud", "compute", "--quiet", "--project="+testContext.CloudConfig.ProjectID, "disks", "create", "--zone="+zone, "--size=10GB", pdName).Run()
if err != nil {
return "", err
}
return pdName, nil
} else {
volumes, ok := testContext.CloudConfig.Provider.(aws_cloud.Volumes)
if !ok {
return "", fmt.Errorf("Provider does not support volumes")
}
volumeOptions := &aws_cloud.VolumeOptions{}
volumeOptions.CapacityMB = 10 * 1024
return volumes.CreateVolume(volumeOptions)
}
}
示例14: buildTestLoadBalancer
// buildTestLoadBalancer build a common loadBalancerController to be used
// in the tests to verify the generated HAProxy configuration file
func buildTestLoadBalancer(lbDefAlgorithm string) *loadBalancerController {
endpointAddresses := []api.EndpointAddress{
{IP: "1.2.3.4"},
{IP: "5.6.7.8"},
}
ports := []int{80, 443}
endpointPorts := []api.EndpointPort{
{Port: ports[0], Protocol: "TCP"},
{Port: ports[1], Protocol: "HTTP"},
}
servicePorts := []api.ServicePort{
{Port: ports[0], TargetPort: util.NewIntOrStringFromInt(ports[0])},
{Port: ports[1], TargetPort: util.NewIntOrStringFromInt(ports[1])},
}
svc1 := getService(servicePorts)
svc1.ObjectMeta.Name = "svc-1"
svc2 := getService(servicePorts)
svc2.ObjectMeta.Name = "svc-2"
endpoints := []*api.Endpoints{
getEndpoints(svc1, endpointAddresses, endpointPorts),
getEndpoints(svc2, endpointAddresses, endpointPorts),
}
flb := newFakeLoadBalancerController(endpoints, []*api.Service{svc1, svc2})
cfg, _ := filepath.Abs("./test-samples/loadbalancer_test.json")
// do not have the input parameters. We need to specify a default.
if lbDefAlgorithm == "" {
lbDefAlgorithm = "roundrobin"
}
flb.cfg = parseCfg(cfg, lbDefAlgorithm)
cfgFile, _ := filepath.Abs("test-" + string(util.NewUUID()))
flb.cfg.Config = cfgFile
flb.tcpServices = map[string]int{
svc1.Name: 20,
}
return flb
}
示例15: newReplicationController
func newReplicationController(replicas int) *api.ReplicationController {
rc := &api.ReplicationController{
TypeMeta: unversioned.TypeMeta{APIVersion: testapi.Default.Version()},
ObjectMeta: api.ObjectMeta{
UID: util.NewUUID(),
Name: "foobar",
Namespace: api.NamespaceDefault,
ResourceVersion: "18",
},
Spec: api.ReplicationControllerSpec{
Replicas: replicas,
Selector: map[string]string{"foo": "bar"},
Template: &api.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{
Labels: map[string]string{
"name": "foo",
"type": "production",
},
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Image: "foo/bar",
TerminationMessagePath: api.TerminationMessagePathDefault,
ImagePullPolicy: api.PullIfNotPresent,
SecurityContext: securitycontext.ValidSecurityContextWithContainerDefaults(),
},
},
RestartPolicy: api.RestartPolicyAlways,
DNSPolicy: api.DNSDefault,
NodeSelector: map[string]string{
"baz": "blah",
},
},
},
},
}
return rc
}