本文整理汇总了Golang中k8s/io/kubernetes/pkg/client/cache.MetaNamespaceKeyFunc函数的典型用法代码示例。如果您正苦于以下问题:Golang MetaNamespaceKeyFunc函数的具体用法?Golang MetaNamespaceKeyFunc怎么用?Golang MetaNamespaceKeyFunc使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了MetaNamespaceKeyFunc函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: GetUID
// implements Unique
func (p *Pod) GetUID() string {
if id, err := cache.MetaNamespaceKeyFunc(p.Pod); err != nil {
panic(fmt.Sprintf("failed to determine pod id for '%+v'", p.Pod))
} else {
return id
}
}
示例2: newHeadlessService
// Generates skydns records for a headless service.
func (ks *kube2sky) newHeadlessService(subdomain string, service *kapi.Service) error {
// Create an A record for every pod in the service.
// This record must be periodically updated.
// Format is as follows:
// For a service x, with pods a and b create DNS records,
// a.x.ns.domain. and, b.x.ns.domain.
ks.mlock.Lock()
defer ks.mlock.Unlock()
key, err := kcache.MetaNamespaceKeyFunc(service)
if err != nil {
return err
}
e, exists, err := ks.endpointsStore.GetByKey(key)
if err != nil {
return fmt.Errorf("failed to get endpoints object from endpoints store - %v", err)
}
if !exists {
glog.V(1).Infof("Could not find endpoints for service %q in namespace %q. DNS records will be created once endpoints show up.", service.Name, service.Namespace)
return nil
}
if e, ok := e.(*kapi.Endpoints); ok {
return ks.generateRecordsForHeadlessService(subdomain, e, service)
}
return nil
}
示例3: findAndRemoveDeletedPods
// Iterate through all pods in desired state of world, and remove if they no
// longer exist in the informer
func (dswp *desiredStateOfWorldPopulator) findAndRemoveDeletedPods() {
for dswPodUID, dswPodToAdd := range dswp.desiredStateOfWorld.GetPodToAdd() {
dswPodKey, err := kcache.MetaNamespaceKeyFunc(dswPodToAdd.Pod)
if err != nil {
glog.Errorf("MetaNamespaceKeyFunc failed for pod %q (UID %q) with: %v", dswPodKey, dswPodUID, err)
continue
}
// retrieve the pod object from pod informer with the namespace key
informerPodObj, exists, err := dswp.podInformer.GetStore().GetByKey(dswPodKey)
if err != nil || informerPodObj == nil {
glog.Errorf("podInformer GetByKey failed for pod %q (UID %q) with %v", dswPodKey, dswPodUID, err)
continue
}
if exists {
informerPod, ok := informerPodObj.(*api.Pod)
if !ok {
glog.Errorf("Failed to cast obj %#v to pod object for pod %q (UID %q)", informerPod, dswPodKey, dswPodUID)
continue
}
informerPodUID := volumehelper.GetUniquePodName(informerPod)
// Check whether the unique idenfier of the pod from dsw matches the one retrived from pod informer
if informerPodUID == dswPodUID {
glog.V(10).Infof(
"Verified pod %q (UID %q) from dsw exists in pod informer.", dswPodKey, dswPodUID)
continue
}
}
// the pod from dsw does not exist in pod informer, or it does not match the unique idenfier retrieved
// from the informer, delete it from dsw
glog.V(1).Infof(
"Removing pod %q (UID %q) from dsw because it does not exist in pod informer.", dswPodKey, dswPodUID)
dswp.desiredStateOfWorld.DeletePod(dswPodUID, dswPodToAdd.VolumeName, dswPodToAdd.NodeName)
}
}
示例4: Handle
// Handle ensures an image stream is checked for scheduling and then runs a direct import
func (b *scheduled) Handle(obj interface{}) error {
stream := obj.(*api.ImageStream)
if b.enabled && needsScheduling(stream) {
key, _ := cache.MetaNamespaceKeyFunc(stream)
b.scheduler.Add(key, uniqueItem{uid: string(stream.UID), resourceVersion: stream.ResourceVersion})
}
return b.controller.Next(stream, b)
}
示例5: Importing
// Importing is invoked when the controller decides to import a stream in order to push back
// the next schedule time.
func (b *scheduled) Importing(stream *api.ImageStream) {
if !b.enabled {
return
}
glog.V(5).Infof("DEBUG: stream %s was just imported", stream.Name)
// Push the current key back to the end of the queue because it's just been imported
key, _ := cache.MetaNamespaceKeyFunc(stream)
b.scheduler.Delay(key)
}
示例6: Schedule
// Schedule implements the Scheduler interface of Kubernetes.
// It returns the selectedMachine's name and error (if there's any).
func (k *kubeScheduler) Schedule(pod *api.Pod, unused algorithm.MinionLister) (string, error) {
log.Infof("Try to schedule pod %v\n", pod.Name)
ctx := api.WithNamespace(api.NewDefaultContext(), pod.Namespace)
// default upstream scheduler passes pod.Name as binding.PodID
podKey, err := podtask.MakePodKey(ctx, pod.Name)
if err != nil {
return "", err
}
k.api.Lock()
defer k.api.Unlock()
switch task, state := k.api.tasks().ForPod(podKey); state {
case podtask.StateUnknown:
// There's a bit of a potential race here, a pod could have been yielded() and
// then before we get *here* it could be deleted.
// We use meta to index the pod in the store since that's what k8s reflector does.
podName, err := cache.MetaNamespaceKeyFunc(pod)
if err != nil {
log.Warningf("aborting Schedule, unable to understand pod object %+v", pod)
return "", noSuchPodErr
}
if deleted := k.podUpdates.Poll(podName, queue.DELETE_EVENT); deleted {
// avoid scheduling a pod that's been deleted between yieldPod() and Schedule()
log.Infof("aborting Schedule, pod has been deleted %+v", pod)
return "", noSuchPodErr
}
return k.doSchedule(k.api.tasks().Register(k.api.createPodTask(ctx, pod)))
//TODO(jdef) it's possible that the pod state has diverged from what
//we knew previously, we should probably update the task.Pod state here
//before proceeding with scheduling
case podtask.StatePending:
if pod.UID != task.Pod.UID {
// we're dealing with a brand new pod spec here, so the old one must have been
// deleted -- and so our task store is out of sync w/ respect to reality
//TODO(jdef) reconcile task
return "", fmt.Errorf("task %v spec is out of sync with pod %v spec, aborting schedule", task.ID, pod.Name)
} else if task.Has(podtask.Launched) {
// task has been marked as "launched" but the pod binding creation may have failed in k8s,
// but we're going to let someone else handle it, probably the mesos task error handler
return "", fmt.Errorf("task %s has already been launched, aborting schedule", task.ID)
} else {
return k.doSchedule(task, nil)
}
default:
return "", fmt.Errorf("task %s is not pending, nothing to schedule", task.ID)
}
}
示例7: GetImageReferenceForObjectReference
// GetImageReferenceForObjectReference returns corresponding image reference for the given object
// reference representing either an image stream image or image stream tag or docker image.
func GetImageReferenceForObjectReference(namespace string, objRef *kapi.ObjectReference) (string, error) {
switch objRef.Kind {
case "ImageStreamImage", "DockerImage":
res, err := imageapi.ParseDockerImageReference(objRef.Name)
if err != nil {
return "", err
}
if objRef.Kind == "ImageStreamImage" {
if res.Namespace == "" {
res.Namespace = objRef.Namespace
}
if res.Namespace == "" {
res.Namespace = namespace
}
if len(res.ID) == 0 {
return "", fmt.Errorf("missing id in ImageStreamImage reference %q", objRef.Name)
}
} else {
// objRef.Kind == "DockerImage"
res = res.DockerClientDefaults()
}
// docker image reference
return res.DaemonMinimal().Exact(), nil
case "ImageStreamTag":
isName, tag, err := imageapi.ParseImageStreamTagName(objRef.Name)
if err != nil {
return "", err
}
ns := namespace
if len(objRef.Namespace) > 0 {
ns = objRef.Namespace
}
// <namespace>/<isname>:<tag>
return cache.MetaNamespaceKeyFunc(&kapi.ObjectMeta{
Namespace: ns,
Name: imageapi.JoinImageStreamTag(isName, tag),
})
}
return "", fmt.Errorf("unsupported object reference kind %s", objRef.Kind)
}
示例8: getServiceFromEndpoints
func (ks *kube2sky) getServiceFromEndpoints(e *kapi.Endpoints) (*kapi.Service, error) {
key, err := kcache.MetaNamespaceKeyFunc(e)
if err != nil {
return nil, err
}
obj, exists, err := ks.servicesStore.GetByKey(key)
if err != nil {
return nil, fmt.Errorf("failed to get service object from services store - %v", err)
}
if !exists {
glog.V(1).Infof("could not find service for endpoint %q in namespace %q", e.Name, e.Namespace)
return nil, nil
}
if svc, ok := obj.(*kapi.Service); ok {
return svc, nil
}
return nil, fmt.Errorf("got a non service object in services store %v", obj)
}
示例9: getServiceFromEndpoints
func (kd *KubeDNS) getServiceFromEndpoints(e *v1.Endpoints) (*v1.Service, error) {
key, err := kcache.MetaNamespaceKeyFunc(e)
if err != nil {
return nil, err
}
obj, exists, err := kd.servicesStore.GetByKey(key)
if err != nil {
return nil, fmt.Errorf("failed to get service object from services store - %v", err)
}
if !exists {
glog.V(3).Infof("No service for endpoint %q in namespace %q",
e.Name, e.Namespace)
return nil, nil
}
if svc, ok := assertIsService(obj); ok {
return svc, nil
}
return nil, fmt.Errorf("got a non service object in services store %v", obj)
}
示例10: retryFunc
// retryFunc returns a function to retry a controller event
func retryFunc(kind string, isFatal func(err error) bool) controller.RetryFunc {
return func(obj interface{}, err error, retries controller.Retry) bool {
name, keyErr := cache.MetaNamespaceKeyFunc(obj)
if keyErr != nil {
name = "Unknown"
}
if isFatal != nil && isFatal(err) {
glog.V(3).Infof("Will not retry fatal error for %s %s: %v", kind, name, err)
utilruntime.HandleError(err)
return false
}
if retries.Count > maxRetries {
glog.V(3).Infof("Giving up retrying %s %s: %v", kind, name, err)
utilruntime.HandleError(err)
return false
}
glog.V(4).Infof("Retrying %s %s: %v", kind, name, err)
return true
}
}
示例11: extractFromFile
func (s *sourceFile) extractFromFile(filename string) (pod *v1.Pod, err error) {
glog.V(3).Infof("Reading config file %q", filename)
defer func() {
if err == nil && pod != nil {
objKey, keyErr := cache.MetaNamespaceKeyFunc(pod)
if keyErr != nil {
err = keyErr
return
}
s.fileKeyMapping[filename] = objKey
}
}()
file, err := os.Open(filename)
if err != nil {
return pod, err
}
defer file.Close()
data, err := ioutil.ReadAll(file)
if err != nil {
return pod, err
}
defaultFn := func(pod *api.Pod) error {
return s.applyDefaults(pod, filename)
}
parsed, pod, podErr := tryDecodeSinglePod(data, defaultFn)
if parsed {
if podErr != nil {
return pod, podErr
}
return pod, nil
}
return pod, fmt.Errorf("%v: read '%v', but couldn't parse as pod(%v).\n",
filename, string(data), podErr)
}
示例12: getServiceFromEndpoints
func (k2c *Kube2Consul) getServiceFromEndpoints(eps *kapi.Endpoints) (*kapi.Service, error) {
key, err := kcache.MetaNamespaceKeyFunc(eps)
if err != nil {
return nil, err
}
obj, exist, err := k2c.servicesStore.GetByKey(key)
if err != nil {
return nil, fmt.Errorf("faild to get service from service store: %v", err)
}
if !exist {
glog.Infof("can't find service for endpoint %s in namespace %s.", eps.Name, eps.Namespace)
return nil, nil
}
if svc, ok := assertIsService(obj); ok {
return svc, nil
}
return nil, fmt.Errorf("a none service object in service store: %v", obj)
}
示例13: serviceWithClusterIPHasEndpoints
// Returns true if the service corresponding to the given message has endpoints.
// Note: Works only for services with ClusterIP. Will return an error for headless service (service without a clusterIP).
// Important: Assumes that we already have the cacheLock. Callers responsibility to acquire it.
// This is because the code will panic, if we try to acquire it again if we already have it.
func (kd *KubeDNS) serviceWithClusterIPHasEndpoints(msg *skymsg.Service) (bool, error) {
svc, ok := kd.clusterIPServiceMap[msg.Host]
if !ok {
// It is a headless service.
return false, fmt.Errorf("method not expected to be called for headless service")
}
key, err := kcache.MetaNamespaceKeyFunc(svc)
if err != nil {
return false, err
}
e, exists, err := kd.endpointsStore.GetByKey(key)
if err != nil {
return false, fmt.Errorf("failed to get endpoints object from endpoints store - %v", err)
}
if !exists {
return false, nil
}
if e, ok := e.(*v1.Endpoints); ok {
return len(e.Subsets) > 0, nil
}
return false, fmt.Errorf("unexpected: found non-endpoint object in endpoint store: %v", e)
}
示例14: yield
// implementation of scheduling plugin's NextPod func; see k8s plugin/pkg/scheduler
func (q *queuer) yield() *api.Pod {
log.V(2).Info("attempting to yield a pod")
q.lock.Lock()
defer q.lock.Unlock()
for {
// limit blocking here to short intervals so that we don't block the
// enqueuer Run() routine for very long
kpod := q.podQueue.Await(yieldPopTimeout)
if kpod == nil {
signalled := runtime.After(q.unscheduledCond.Wait)
// lock is yielded at this point and we're going to wait for either
// a timeout, or a signal that there's data
select {
case <-time.After(yieldWaitTimeout):
q.unscheduledCond.Broadcast() // abort Wait()
<-signalled // wait for the go-routine, and the lock
log.V(4).Infoln("timed out waiting for a pod to yield")
case <-signalled:
// we have acquired the lock, and there
// may be a pod for us to pop now
}
continue
}
pod := kpod.(*Pod).Pod
if podName, err := cache.MetaNamespaceKeyFunc(pod); err != nil {
log.Warningf("yield unable to understand pod object %+v, will skip: %v", pod, err)
} else if !q.podUpdates.Poll(podName, queue.POP_EVENT) {
log.V(1).Infof("yield popped a transitioning pod, skipping: %+v", pod)
} else if annotatedForExecutor(pod) {
// should never happen if enqueuePods is filtering properly
log.Warningf("yield popped an already-scheduled pod, skipping: %+v", pod)
} else {
return pod
}
}
}
示例15: getServiceFromEndpoints
func getServiceFromEndpoints(serviceStore kubeCache.Store, e *kubeAPI.Endpoints) (*kubeAPI.Service, error) {
var (
err error
key string
obj interface{}
exists bool
ok bool
svc *kubeAPI.Service
)
if key, err = kubeCache.MetaNamespaceKeyFunc(e); err != nil {
return nil, err
}
if obj, exists, err = serviceStore.GetByKey(key); err != nil {
return nil, fmt.Errorf("Error getting service object from services store - %v", err)
}
if !exists {
log.WithFields(log.Fields{"name": e.Name, "namespace": e.Namespace}).Warn("Unable to find service for endpoint")
return nil, nil
}
if svc, ok = obj.(*kubeAPI.Service); !ok {
return nil, fmt.Errorf("got a non service object in services store %v", obj)
}
return svc, nil
}