本文整理汇总了Golang中github.com/ttysteale/kubernetes-api/types.UID函数的典型用法代码示例。如果您正苦于以下问题:Golang UID函数的具体用法?Golang UID怎么用?Golang UID使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了UID函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: createEvent
func createEvent(eventType eventType, selfUID string, owners []string) event {
var ownerReferences []api.OwnerReference
for i := 0; i < len(owners); i++ {
ownerReferences = append(ownerReferences, api.OwnerReference{UID: types.UID(owners[i])})
}
return event{
eventType: eventType,
obj: &api.Pod{
ObjectMeta: api.ObjectMeta{
UID: types.UID(selfUID),
OwnerReferences: ownerReferences,
},
},
}
}
示例2: TestReasonCache
func TestReasonCache(t *testing.T) {
// Create test sync result
syncResult := kubecontainer.PodSyncResult{}
results := []*kubecontainer.SyncResult{
// reason cache should be set for SyncResult with StartContainer action and error
kubecontainer.NewSyncResult(kubecontainer.StartContainer, "container_1"),
// reason cache should not be set for SyncResult with StartContainer action but without error
kubecontainer.NewSyncResult(kubecontainer.StartContainer, "container_2"),
// reason cache should not be set for SyncResult with other actions
kubecontainer.NewSyncResult(kubecontainer.KillContainer, "container_3"),
}
results[0].Fail(kubecontainer.ErrRunContainer, "message_1")
results[2].Fail(kubecontainer.ErrKillContainer, "message_3")
syncResult.AddSyncResult(results...)
uid := types.UID("pod_1")
reasonCache := NewReasonCache()
reasonCache.Update(uid, syncResult)
assertReasonInfo(t, reasonCache, uid, results[0], true)
assertReasonInfo(t, reasonCache, uid, results[1], false)
assertReasonInfo(t, reasonCache, uid, results[2], false)
reasonCache.Remove(uid, results[0].Target.(string))
assertReasonInfo(t, reasonCache, uid, results[0], false)
}
示例3: TestMounterAndUnmounterTypeAssert
func TestMounterAndUnmounterTypeAssert(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("awsebsTest")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil, "" /* rootContext */))
plug, err := plugMgr.FindPluginByName("kubernetes.io/aws-ebs")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
spec := &api.Volume{
Name: "vol1",
VolumeSource: api.VolumeSource{
AWSElasticBlockStore: &api.AWSElasticBlockStoreVolumeSource{
VolumeID: "pd",
FSType: "ext4",
},
},
}
mounter, err := plug.(*awsElasticBlockStorePlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), &fakePDManager{}, &mount.FakeMounter{})
if _, ok := mounter.(volume.Unmounter); ok {
t.Errorf("Volume Mounter can be type-assert to Unmounter")
}
unmounter, err := plug.(*awsElasticBlockStorePlugin).newUnmounterInternal("vol1", types.UID("poduid"), &fakePDManager{}, &mount.FakeMounter{})
if _, ok := unmounter.(volume.Mounter); ok {
t.Errorf("Volume Unmounter can be type-assert to Mounter")
}
}
示例4: getContainerInfoFromLabel
func getContainerInfoFromLabel(labels map[string]string) *labelledContainerInfo {
var err error
containerInfo := &labelledContainerInfo{
PodName: getStringValueFromLabel(labels, types.KubernetesPodNameLabel),
PodNamespace: getStringValueFromLabel(labels, types.KubernetesPodNamespaceLabel),
PodUID: kubetypes.UID(getStringValueFromLabel(labels, types.KubernetesPodUIDLabel)),
Name: getStringValueFromLabel(labels, types.KubernetesContainerNameLabel),
Hash: getStringValueFromLabel(labels, kubernetesContainerHashLabel),
TerminationMessagePath: getStringValueFromLabel(labels, kubernetesContainerTerminationMessagePathLabel),
}
if containerInfo.RestartCount, err = getIntValueFromLabel(labels, kubernetesContainerRestartCountLabel); err != nil {
logError(containerInfo, kubernetesContainerRestartCountLabel, err)
}
if containerInfo.PodDeletionGracePeriod, err = getInt64PointerFromLabel(labels, kubernetesPodDeletionGracePeriodLabel); err != nil {
logError(containerInfo, kubernetesPodDeletionGracePeriodLabel, err)
}
if containerInfo.PodTerminationGracePeriod, err = getInt64PointerFromLabel(labels, kubernetesPodTerminationGracePeriodLabel); err != nil {
logError(containerInfo, kubernetesPodTerminationGracePeriodLabel, err)
}
preStopHandler := &api.Handler{}
if found, err := getJsonObjectFromLabel(labels, kubernetesContainerPreStopHandlerLabel, preStopHandler); err != nil {
logError(containerInfo, kubernetesContainerPreStopHandlerLabel, err)
} else if found {
containerInfo.PreStopHandler = preStopHandler
}
supplyContainerInfoWithOldLabel(labels, containerInfo)
return containerInfo
}
示例5: testDeleteWithUID
// This test the fast-fail path. We test that the precondition gets verified
// again before deleting the object in tests of pkg/storage/etcd.
func (t *Tester) testDeleteWithUID(obj runtime.Object, createFn CreateFunc, getFn GetFunc, isNotFoundFn IsErrorFunc) {
ctx := t.TestContext()
foo := copyOrDie(obj)
t.setObjectMeta(foo, t.namer(1))
objectMeta := t.getObjectMetaOrFail(foo)
objectMeta.UID = types.UID("UID0000")
if err := createFn(ctx, foo); err != nil {
t.Errorf("unexpected error: %v", err)
}
obj, err := t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.Name, api.NewPreconditionDeleteOptions("UID1111"))
if err == nil || !errors.IsConflict(err) {
t.Errorf("unexpected error: %v", err)
}
obj, err = t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.Name, api.NewPreconditionDeleteOptions("UID0000"))
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if !t.returnDeletedObject {
if status, ok := obj.(*unversioned.Status); !ok {
t.Errorf("expected status of delete, got %v", status)
} else if status.Status != unversioned.StatusSuccess {
t.Errorf("expected success, got: %v", status.Status)
}
}
_, err = getFn(ctx, foo)
if err == nil || !isNotFoundFn(err) {
t.Errorf("unexpected error: %v", err)
}
}
示例6: TestPluginBackCompat
func TestPluginBackCompat(t *testing.T) {
basePath, err := utiltesting.MkTmpdir("emptydirTest")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
defer os.RemoveAll(basePath)
plug := makePluginUnderTest(t, "kubernetes.io/empty-dir", basePath, "" /* rootContext */)
spec := &api.Volume{
Name: "vol1",
}
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
mounter, err := plug.NewMounter(volume.NewSpecFromVolume(spec), pod, volume.VolumeOptions{})
if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err)
}
if mounter == nil {
t.Errorf("Got a nil Mounter")
}
volPath := mounter.GetPath()
if volPath != path.Join(basePath, "pods/poduid/volumes/kubernetes.io~empty-dir/vol1") {
t.Errorf("Got unexpected path: %s", volPath)
}
}
示例7: TestDependentsRace
// TestDependentsRace relies on golang's data race detector to check if there is
// data race among in the dependents field.
func TestDependentsRace(t *testing.T) {
clientPool := dynamic.NewClientPool(&restclient.Config{}, dynamic.LegacyAPIPathResolverFunc)
podResource := []unversioned.GroupVersionResource{{Version: "v1", Resource: "pods"}}
gc, err := NewGarbageCollector(clientPool, podResource)
if err != nil {
t.Fatal(err)
}
const updates = 100
owner := &node{dependentsLock: &sync.RWMutex{}, dependents: make(map[*node]struct{})}
ownerUID := types.UID("owner")
gc.propagator.uidToNode.Write(owner)
go func() {
for i := 0; i < updates; i++ {
dependent := &node{}
gc.propagator.addDependentToOwners(dependent, []metatypes.OwnerReference{{UID: ownerUID}})
gc.propagator.removeDependentFromOwners(dependent, []metatypes.OwnerReference{{UID: ownerUID}})
}
}()
go func() {
gc.orphanQueue.Add(owner)
for i := 0; i < updates; i++ {
gc.orphanFinalizer()
}
}()
}
示例8: testUpdateWithWrongUID
func (t *Tester) testUpdateWithWrongUID(obj runtime.Object, createFn CreateFunc, getFn GetFunc) {
ctx := t.TestContext()
foo := copyOrDie(obj)
t.setObjectMeta(foo, t.namer(5))
objectMeta := t.getObjectMetaOrFail(foo)
objectMeta.UID = types.UID("UID0000")
if err := createFn(ctx, foo); err != nil {
t.Errorf("unexpected error: %v", err)
}
objectMeta.UID = types.UID("UID1111")
obj, created, err := t.storage.(rest.Updater).Update(ctx, objectMeta.Name, rest.DefaultUpdatedObjectInfo(foo, api.Scheme))
if created || obj != nil {
t.Errorf("expected nil object and no creation for object: %v", foo)
}
if err == nil || !errors.IsConflict(err) {
t.Errorf("unexpected error: %v", err)
}
}
示例9: recordNodeEvent
func (nc *NodeController) recordNodeEvent(nodeName, eventtype, reason, event string) {
ref := &api.ObjectReference{
Kind: "Node",
Name: nodeName,
UID: types.UID(nodeName),
Namespace: "",
}
glog.V(2).Infof("Recording %s event message for node %s", event, nodeName)
nc.recorder.Eventf(ref, eventtype, reason, "Node %s event: %s", nodeName, event)
}
示例10: TestPersistentClaimReadOnlyFlag
func TestPersistentClaimReadOnlyFlag(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("iscsi_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
pv := &api.PersistentVolume{
ObjectMeta: api.ObjectMeta{
Name: "pvA",
},
Spec: api.PersistentVolumeSpec{
PersistentVolumeSource: api.PersistentVolumeSource{
ISCSI: &api.ISCSIVolumeSource{
TargetPortal: "127.0.0.1:3260",
IQN: "iqn.2014-12.server:storage.target01",
FSType: "ext4",
Lun: 0,
},
},
ClaimRef: &api.ObjectReference{
Name: "claimA",
},
},
}
claim := &api.PersistentVolumeClaim{
ObjectMeta: api.ObjectMeta{
Name: "claimA",
Namespace: "nsA",
},
Spec: api.PersistentVolumeClaimSpec{
VolumeName: "pvA",
},
Status: api.PersistentVolumeClaimStatus{
Phase: api.ClaimBound,
},
}
client := fake.NewSimpleClientset(pv, claim)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, client, nil, "" /* rootContext */))
plug, _ := plugMgr.FindPluginByName(iscsiPluginName)
// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
spec := volume.NewSpecFromPersistentVolume(pv, true)
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{})
if !mounter.GetAttributes().ReadOnly {
t.Errorf("Expected true for mounter.IsReadOnly")
}
}
示例11: recordNodeStatusChange
func (nc *NodeController) recordNodeStatusChange(node *api.Node, new_status string) {
ref := &api.ObjectReference{
Kind: "Node",
Name: node.Name,
UID: types.UID(node.Name),
Namespace: "",
}
glog.V(2).Infof("Recording status change %s event message for node %s", new_status, node.Name)
// TODO: This requires a transaction, either both node status is updated
// and event is recorded or neither should happen, see issue #6055.
nc.recorder.Eventf(ref, api.EventTypeNormal, new_status, "Node %s status is now: %s", node.Name, new_status)
}
示例12: newPetSetWithVolumes
func newPetSetWithVolumes(replicas int, name string, petMounts []api.VolumeMount, podMounts []api.VolumeMount) *apps.PetSet {
mounts := append(petMounts, podMounts...)
claims := []api.PersistentVolumeClaim{}
for _, m := range petMounts {
claims = append(claims, newPVC(m.Name))
}
vols := []api.Volume{}
for _, m := range podMounts {
vols = append(vols, api.Volume{
Name: m.Name,
VolumeSource: api.VolumeSource{
HostPath: &api.HostPathVolumeSource{
Path: fmt.Sprintf("/tmp/%v", m.Name),
},
},
})
}
return &apps.PetSet{
TypeMeta: unversioned.TypeMeta{
Kind: "PetSet",
APIVersion: "apps/v1beta1",
},
ObjectMeta: api.ObjectMeta{
Name: name,
Namespace: api.NamespaceDefault,
UID: types.UID("test"),
},
Spec: apps.PetSetSpec{
Selector: &unversioned.LabelSelector{
MatchLabels: map[string]string{"foo": "bar"},
},
Replicas: replicas,
Template: api.PodTemplateSpec{
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: "nginx",
Image: "nginx",
VolumeMounts: mounts,
},
},
Volumes: vols,
},
},
VolumeClaimTemplates: claims,
ServiceName: "governingsvc",
},
}
}
示例13: TestGetWork
func TestGetWork(t *testing.T) {
q, clock := newTestBasicWorkQueue()
q.Enqueue(types.UID("foo1"), -1*time.Minute)
q.Enqueue(types.UID("foo2"), -1*time.Minute)
q.Enqueue(types.UID("foo3"), 1*time.Minute)
q.Enqueue(types.UID("foo4"), 1*time.Minute)
expected := []types.UID{types.UID("foo1"), types.UID("foo2")}
compareResults(t, expected, q.GetWork())
compareResults(t, []types.UID{}, q.GetWork())
// Dial the time to 1 hour ahead.
clock.Step(time.Hour)
expected = []types.UID{types.UID("foo3"), types.UID("foo4")}
compareResults(t, expected, q.GetWork())
compareResults(t, []types.UID{}, q.GetWork())
}
示例14: TestPatchWithVersionConflictThenAdmissionFailure
func TestPatchWithVersionConflictThenAdmissionFailure(t *testing.T) {
namespace := "bar"
name := "foo"
uid := types.UID("uid")
fifteen := int64(15)
thirty := int64(30)
seen := false
tc := &patchTestCase{
name: "TestPatchWithVersionConflictThenAdmissionFailure",
admit: func(updatedObject runtime.Object, currentObject runtime.Object) error {
if seen {
return errors.New("admission failure")
}
seen = true
return nil
},
startingPod: &api.Pod{},
changedPod: &api.Pod{},
updatePod: &api.Pod{},
expectedError: "admission failure",
}
tc.startingPod.Name = name
tc.startingPod.Namespace = namespace
tc.startingPod.UID = uid
tc.startingPod.ResourceVersion = "1"
tc.startingPod.APIVersion = "v1"
tc.startingPod.Spec.ActiveDeadlineSeconds = &fifteen
tc.changedPod.Name = name
tc.changedPod.Namespace = namespace
tc.changedPod.UID = uid
tc.changedPod.ResourceVersion = "1"
tc.changedPod.APIVersion = "v1"
tc.changedPod.Spec.ActiveDeadlineSeconds = &thirty
tc.updatePod.Name = name
tc.updatePod.Namespace = namespace
tc.updatePod.UID = uid
tc.updatePod.ResourceVersion = "2"
tc.updatePod.APIVersion = "v1"
tc.updatePod.Spec.ActiveDeadlineSeconds = &fifteen
tc.updatePod.Spec.NodeName = "anywhere"
tc.Run(t)
}
示例15: NewUUID
func NewUUID() types.UID {
uuidLock.Lock()
defer uuidLock.Unlock()
result := uuid.NewUUID()
// The UUID package is naive and can generate identical UUIDs if the
// time interval is quick enough.
// The UUID uses 100 ns increments so it's short enough to actively
// wait for a new value.
for uuid.Equal(lastUUID, result) == true {
result = uuid.NewUUID()
}
lastUUID = result
return types.UID(result.String())
}