本文整理匯總了Golang中k8s/io/kubernetes/pkg/client/unversioned.Client.Events方法的典型用法代碼示例。如果您正苦於以下問題:Golang Client.Events方法的具體用法?Golang Client.Events怎麽用?Golang Client.Events使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類k8s/io/kubernetes/pkg/client/unversioned.Client
的用法示例。
在下文中一共展示了Client.Events方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: verifyResult
func verifyResult(c *client.Client, podName string, ns string, oldNotScheduled int) {
allPods, err := c.Pods(api.NamespaceAll).List(labels.Everything(), fields.Everything())
expectNoError(err)
scheduledPods, notScheduledPods := getPodsScheduled(allPods)
schedEvents, err := c.Events(ns).List(
labels.Everything(),
fields.Set{
"involvedObject.kind": "Pod",
"involvedObject.name": podName,
"involvedObject.namespace": ns,
"source": "scheduler",
"reason": "FailedScheduling",
}.AsSelector())
expectNoError(err)
printed := false
printOnce := func(msg string) string {
if !printed {
printed = true
return msg
} else {
return ""
}
}
Expect(len(notScheduledPods)).To(Equal(1+oldNotScheduled), printOnce(fmt.Sprintf("Not scheduled Pods: %#v", notScheduledPods)))
Expect(schedEvents.Items).ToNot(BeEmpty(), printOnce(fmt.Sprintf("Scheduled Pods: %#v", scheduledPods)))
}
示例2: NewPetSetController
// NewPetSetController creates a new petset controller.
func NewPetSetController(podInformer framework.SharedIndexInformer, kubeClient *client.Client, resyncPeriod time.Duration) *PetSetController {
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(kubeClient.Events(""))
recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "petset"})
pc := &apiServerPetClient{kubeClient, recorder, &defaultPetHealthChecker{}}
psc := &PetSetController{
kubeClient: kubeClient,
blockingPetStore: newUnHealthyPetTracker(pc),
newSyncer: func(blockingPet *pcb) *petSyncer {
return &petSyncer{pc, blockingPet}
},
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "petset"),
}
podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
// lookup the petset and enqueue
AddFunc: psc.addPod,
// lookup current and old petset if labels changed
UpdateFunc: psc.updatePod,
// lookup petset accounting for deletion tombstones
DeleteFunc: psc.deletePod,
})
psc.podStore.Indexer = podInformer.GetIndexer()
psc.podController = podInformer.GetController()
psc.psStore.Store, psc.psController = framework.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return psc.kubeClient.Apps().PetSets(api.NamespaceAll).List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return psc.kubeClient.Apps().PetSets(api.NamespaceAll).Watch(options)
},
},
&apps.PetSet{},
petSetResyncPeriod,
framework.ResourceEventHandlerFuncs{
AddFunc: psc.enqueuePetSet,
UpdateFunc: func(old, cur interface{}) {
oldPS := old.(*apps.PetSet)
curPS := cur.(*apps.PetSet)
if oldPS.Status.Replicas != curPS.Status.Replicas {
glog.V(4).Infof("Observed updated replica count for PetSet: %v, %d->%d", curPS.Name, oldPS.Status.Replicas, curPS.Status.Replicas)
}
psc.enqueuePetSet(cur)
},
DeleteFunc: psc.enqueuePetSet,
},
)
// TODO: Watch volumes
psc.podStoreSynced = psc.podController.HasSynced
psc.syncHandler = psc.Sync
return psc
}
示例3: NewKubeEvents
func NewKubeEvents(client *kubeclient.Client, ec cache.EventsCache) api.Source {
// Buffered channel to send/receive events from
eventsChan := make(chan eventsUpdate, 1024)
errorChan := make(chan error)
es := &eventsSourceImpl{
Client: client,
eventsChannel: eventsChan,
errorChannel: errorChan,
ec: ec,
}
go es.watchLoop(client.Events(kubeapi.NamespaceAll), eventsChan, errorChan)
// TODO: Inject Namespace Store in here to get namespace IDs for events.
return es
}
示例4: checkNoUnexpectedEvents
// checkNoUnexpectedEvents checks unexpected events didn't happen.
// Currently only "UnexpectedJob" is checked.
func checkNoUnexpectedEvents(c *client.Client, ns, scheduledJobName string) error {
sj, err := c.Batch().ScheduledJobs(ns).Get(scheduledJobName)
if err != nil {
return fmt.Errorf("error in getting scheduledjob %s/%s: %v", ns, scheduledJobName, err)
}
events, err := c.Events(ns).Search(sj)
if err != nil {
return fmt.Errorf("error in listing events: %s", err)
}
for _, e := range events.Items {
if e.Reason == "UnexpectedJob" {
return fmt.Errorf("found unexpected event: %#v", e)
}
}
return nil
}
示例5: GetReplicationControllerPodsEvents
// GetReplicationControllerPodsEvents gets events associated to pods in replication controller.
func GetReplicationControllerPodsEvents(client *client.Client, namespace, replicationControllerName string) ([]api.Event,
error) {
replicationController, err := client.ReplicationControllers(namespace).Get(replicationControllerName)
if err != nil {
return nil, err
}
pods, err := client.Pods(namespace).List(api.ListOptions{
LabelSelector: labels.SelectorFromSet(replicationController.Spec.Selector),
FieldSelector: fields.Everything(),
})
if err != nil {
return nil, err
}
eventList, err := client.Events(namespace).List(api.ListOptions{
LabelSelector: labels.Everything(),
FieldSelector: fields.Everything(),
})
if err != nil {
return nil, err
}
events := filterEventsByPodsUID(eventList.Items, pods.Items)
return events, nil
}
示例6: verifyResult
func verifyResult(c *client.Client, podName string, ns string) {
allPods, err := c.Pods(api.NamespaceAll).List(labels.Everything(), fields.Everything(), unversioned.ListOptions{})
expectNoError(err)
scheduledPods, notScheduledPods := getPodsScheduled(allPods)
schedEvents, err := c.Events(ns).List(
labels.Everything(),
fields.Set{
"involvedObject.kind": "Pod",
"involvedObject.name": podName,
"involvedObject.namespace": ns,
"source": "scheduler",
"reason": "FailedScheduling",
}.AsSelector(),
unversioned.ListOptions{})
expectNoError(err)
// If we failed to find event with a capitalized first letter of reason
// try looking for one starting with a small one for backward compatibility.
// If we don't do it we end up in #15806.
// TODO: remove this block when we don't care about supporting v1.0 too much.
if len(schedEvents.Items) == 0 {
schedEvents, err = c.Events(ns).List(
labels.Everything(),
fields.Set{
"involvedObject.kind": "Pod",
"involvedObject.name": podName,
"involvedObject.namespace": ns,
"source": "scheduler",
"reason": "failedScheduling",
}.AsSelector(),
unversioned.ListOptions{})
expectNoError(err)
}
printed := false
printOnce := func(msg string) string {
if !printed {
printed = true
return msg
} else {
return ""
}
}
Expect(len(notScheduledPods)).To(Equal(1), printOnce(fmt.Sprintf("Not scheduled Pods: %#v", notScheduledPods)))
Expect(schedEvents.Items).ToNot(BeEmpty(), printOnce(fmt.Sprintf("Scheduled Pods: %#v", scheduledPods)))
}
示例7: GetReplicationControllerList
// GetReplicationControllerList returns a list of all Replication Controllers in the cluster.
func GetReplicationControllerList(client *client.Client) (*ReplicationControllerList, error) {
log.Printf("Getting list of all replication controllers in the cluster")
listEverything := api.ListOptions{
LabelSelector: labels.Everything(),
FieldSelector: fields.Everything(),
}
replicationControllers, err := client.ReplicationControllers(api.NamespaceAll).List(listEverything)
if err != nil {
return nil, err
}
services, err := client.Services(api.NamespaceAll).List(listEverything)
if err != nil {
return nil, err
}
pods, err := client.Pods(api.NamespaceAll).List(listEverything)
if err != nil {
return nil, err
}
eventsList, err := client.Events(api.NamespaceAll).List(api.ListOptions{
LabelSelector: labels.Everything(),
FieldSelector: fields.Everything(),
})
if err != nil {
return nil, err
}
// Anonymous callback function to get pods warnings.
// Function fulfils GetPodsEventWarningsFunc type contract.
// Based on list of api pods returns list of pod related warning events
getPodsEventWarningsFn := func(pods []api.Pod) []Event {
return GetPodsEventWarnings(eventsList, pods)
}
// Anonymous callback function to get nodes by their names.
getNodeFn := func(nodeName string) (*api.Node, error) {
return client.Nodes().Get(nodeName)
}
result, err := getReplicationControllerList(replicationControllers.Items, services.Items,
pods.Items, getPodsEventWarningsFn, getNodeFn)
if err != nil {
return nil, err
}
return result, nil
}
示例8: NewScheduledJobController
func NewScheduledJobController(kubeClient *client.Client) *ScheduledJobController {
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
// TODO: remove the wrapper when every clients have moved to use the clientset.
eventBroadcaster.StartRecordingToSink(kubeClient.Events(""))
if kubeClient != nil && kubeClient.GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("scheduledjob_controller", kubeClient.GetRateLimiter())
}
jm := &ScheduledJobController{
kubeClient: kubeClient,
jobControl: realJobControl{KubeClient: kubeClient},
sjControl: &realSJControl{KubeClient: kubeClient},
recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduled-job-controller"}),
}
return jm
}
示例9: GetReplicationControllerEvents
// Gets events associated to replication controller.
func GetReplicationControllerEvents(client *client.Client, namespace, replicationControllerName string) ([]api.Event,
error) {
fieldSelector, err := fields.ParseSelector("involvedObject.name=" + replicationControllerName)
if err != nil {
return nil, err
}
list, err := client.Events(namespace).List(api.ListOptions{
LabelSelector: labels.Everything(),
FieldSelector: fieldSelector,
})
if err != nil {
return nil, err
}
return list.Items, nil
}
示例10: GetReplicaSetEvents
// Gets events associated to replica set.
func GetReplicaSetEvents(client *client.Client, namespace, replicaSetName string) ([]api.Event,
error) {
fieldSelector, err := fields.ParseSelector("involvedObject.name=" + replicaSetName)
if err != nil {
return nil, err
}
list, err := client.Events(namespace).List(unversioned.ListOptions{
LabelSelector: unversioned.LabelSelector{labels.Everything()},
FieldSelector: unversioned.FieldSelector{fieldSelector},
})
if err != nil {
return nil, err
}
return list.Items, nil
}
示例11: GetReplicaSetPodsEvents
// Gets events associated to pods in replica set.
func GetReplicaSetPodsEvents(client *client.Client, namespace, replicaSetName string) ([]api.Event,
error) {
replicaSet, err := client.ReplicationControllers(namespace).Get(replicaSetName)
if err != nil {
return nil, err
}
pods, err := client.Pods(namespace).List(unversioned.ListOptions{
LabelSelector: unversioned.LabelSelector{labels.SelectorFromSet(replicaSet.Spec.Selector)},
FieldSelector: unversioned.FieldSelector{fields.Everything()},
})
if err != nil {
return nil, err
}
events := make([]api.Event, 0, 0)
for _, pod := range pods.Items {
fieldSelector, err := fields.ParseSelector("involvedObject.name=" + pod.Name)
if err != nil {
return nil, err
}
list, err := client.Events(namespace).List(unversioned.ListOptions{
LabelSelector: unversioned.LabelSelector{labels.Everything()},
FieldSelector: unversioned.FieldSelector{fieldSelector},
})
if err != nil {
return nil, err
}
for _, event := range list.Items {
events = append(events, event)
}
}
return events, nil
}
示例12:
config := RCConfig{Client: c,
Image: "gcr.io/google_containers/pause:2.0",
Name: RCName,
Namespace: ns,
PollInterval: itArg.interval,
PodStatusFile: fileHndl,
Replicas: totalPods,
MaxContainerFailures: &MaxContainerFailures,
}
// Create a listener for events.
events := make([](*api.Event), 0)
_, controller := controllerframework.NewInformer(
&cache.ListWatch{
ListFunc: func() (runtime.Object, error) {
return c.Events(ns).List(labels.Everything(), fields.Everything(), unversioned.ListOptions{})
},
WatchFunc: func(options unversioned.ListOptions) (watch.Interface, error) {
return c.Events(ns).Watch(options)
},
},
&api.Event{},
0,
controllerframework.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
events = append(events, obj.(*api.Event))
},
},
)
stop := make(chan struct{})
go controller.Run(stop)
示例13:
Name: RCName,
Namespace: ns,
PollInterval: itArg.interval,
PodStatusFile: fileHndl,
Replicas: totalPods,
MaxContainerFailures: &MaxContainerFailures,
}
// Create a listener for events.
// eLock is a lock protects the events
var eLock sync.Mutex
events := make([](*api.Event), 0)
_, controller := controllerframework.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return c.Events(ns).List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return c.Events(ns).Watch(options)
},
},
&api.Event{},
0,
controllerframework.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
eLock.Lock()
defer eLock.Unlock()
events = append(events, obj.(*api.Event))
},
},
)
示例14: NewLoadBalancerController
// NewLoadBalancerController creates a controller for gce loadbalancers.
// - kubeClient: A kubernetes REST client.
// - clusterManager: A ClusterManager capable of creating all cloud resources
// required for L7 loadbalancing.
// - resyncPeriod: Watchers relist from the Kubernetes API server this often.
func NewLoadBalancerController(kubeClient *client.Client, clusterManager *ClusterManager, resyncPeriod time.Duration, namespace string) (*LoadBalancerController, error) {
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(kubeClient.Events(""))
lbc := LoadBalancerController{
client: kubeClient,
CloudClusterManager: clusterManager,
stopCh: make(chan struct{}),
recorder: eventBroadcaster.NewRecorder(
api.EventSource{Component: "loadbalancer-controller"}),
}
lbc.nodeQueue = NewTaskQueue(lbc.syncNodes)
lbc.ingQueue = NewTaskQueue(lbc.sync)
lbc.hasSynced = lbc.storesSynced
// Ingress watch handlers
pathHandlers := framework.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
addIng := obj.(*extensions.Ingress)
if !isGCEIngress(addIng) {
glog.Infof("Ignoring add for ingress %v based on annotation %v", addIng.Name, ingressClassKey)
return
}
lbc.recorder.Eventf(addIng, api.EventTypeNormal, "ADD", fmt.Sprintf("%s/%s", addIng.Namespace, addIng.Name))
lbc.ingQueue.enqueue(obj)
},
DeleteFunc: func(obj interface{}) {
delIng := obj.(*extensions.Ingress)
if !isGCEIngress(delIng) {
glog.Infof("Ignoring delete for ingress %v based on annotation %v", delIng.Name, ingressClassKey)
return
}
glog.Infof("Delete notification received for Ingress %v/%v", delIng.Namespace, delIng.Name)
lbc.ingQueue.enqueue(obj)
},
UpdateFunc: func(old, cur interface{}) {
curIng := cur.(*extensions.Ingress)
if !isGCEIngress(curIng) {
return
}
if !reflect.DeepEqual(old, cur) {
glog.V(3).Infof("Ingress %v changed, syncing", curIng.Name)
}
lbc.ingQueue.enqueue(cur)
},
}
lbc.ingLister.Store, lbc.ingController = framework.NewInformer(
&cache.ListWatch{
ListFunc: ingressListFunc(lbc.client, namespace),
WatchFunc: ingressWatchFunc(lbc.client, namespace),
},
&extensions.Ingress{}, resyncPeriod, pathHandlers)
// Service watch handlers
svcHandlers := framework.ResourceEventHandlerFuncs{
AddFunc: lbc.enqueueIngressForService,
UpdateFunc: func(old, cur interface{}) {
if !reflect.DeepEqual(old, cur) {
lbc.enqueueIngressForService(cur)
}
},
// Ingress deletes matter, service deletes don't.
}
lbc.svcLister.Store, lbc.svcController = framework.NewInformer(
cache.NewListWatchFromClient(
lbc.client, "services", namespace, fields.Everything()),
&api.Service{}, resyncPeriod, svcHandlers)
lbc.podLister.Indexer, lbc.podController = framework.NewIndexerInformer(
cache.NewListWatchFromClient(lbc.client, "pods", namespace, fields.Everything()),
&api.Pod{},
resyncPeriod,
framework.ResourceEventHandlerFuncs{},
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
)
nodeHandlers := framework.ResourceEventHandlerFuncs{
AddFunc: lbc.nodeQueue.enqueue,
DeleteFunc: lbc.nodeQueue.enqueue,
// Nodes are updated every 10s and we don't care, so no update handler.
}
// Node watch handlers
lbc.nodeLister.Store, lbc.nodeController = framework.NewInformer(
&cache.ListWatch{
ListFunc: func(opts api.ListOptions) (runtime.Object, error) {
return lbc.client.Get().
Resource("nodes").
FieldsSelectorParam(fields.Everything()).
Do().
Get()
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return lbc.client.Get().
//.........這裏部分代碼省略.........
示例15: runLatencyTest
func runLatencyTest(nodeCount int, c *client.Client, ns string) {
var (
nodes = make(map[string]string, 0) // pod name -> node name
createTimestamps = make(map[string]unversioned.Time, 0) // pod name -> create time
scheduleTimestamps = make(map[string]unversioned.Time, 0) // pod name -> schedule time
startTimestamps = make(map[string]unversioned.Time, 0) // pod name -> time to run
watchTimestamps = make(map[string]unversioned.Time, 0) // pod name -> time to read from informer
additionalPodsPrefix = "latency-pod-" + string(util.NewUUID())
)
var mutex sync.Mutex
readPodInfo := func(p *api.Pod) {
mutex.Lock()
defer mutex.Unlock()
defer GinkgoRecover()
if p.Status.Phase == api.PodRunning {
if _, found := watchTimestamps[p.Name]; !found {
watchTimestamps[p.Name] = unversioned.Now()
createTimestamps[p.Name] = p.CreationTimestamp
nodes[p.Name] = p.Spec.NodeName
var startTimestamp unversioned.Time
for _, cs := range p.Status.ContainerStatuses {
if cs.State.Running != nil {
if startTimestamp.Before(cs.State.Running.StartedAt) {
startTimestamp = cs.State.Running.StartedAt
}
}
}
if startTimestamp != unversioned.NewTime(time.Time{}) {
startTimestamps[p.Name] = startTimestamp
} else {
Failf("Pod %v is reported to be running, but none of its containers are", p.Name)
}
}
}
}
// Create a informer to read timestamps for each pod
stopCh := make(chan struct{})
_, informer := framework.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
options.LabelSelector = labels.SelectorFromSet(labels.Set{"name": additionalPodsPrefix})
return c.Pods(ns).List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
options.LabelSelector = labels.SelectorFromSet(labels.Set{"name": additionalPodsPrefix})
return c.Pods(ns).Watch(options)
},
},
&api.Pod{},
0,
framework.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
p, ok := obj.(*api.Pod)
Expect(ok).To(Equal(true))
go readPodInfo(p)
},
UpdateFunc: func(oldObj, newObj interface{}) {
p, ok := newObj.(*api.Pod)
Expect(ok).To(Equal(true))
go readPodInfo(p)
},
},
)
go informer.Run(stopCh)
// Create additional pods with throughput ~5 pods/sec.
var wg sync.WaitGroup
wg.Add(nodeCount)
podLabels := map[string]string{
"name": additionalPodsPrefix,
}
for i := 1; i <= nodeCount; i++ {
name := additionalPodsPrefix + "-" + strconv.Itoa(i)
go createRunningPod(&wg, c, name, ns, "gcr.io/google_containers/pause:go", podLabels)
time.Sleep(200 * time.Millisecond)
}
wg.Wait()
Logf("Waiting for all Pods begin observed by the watch...")
for start := time.Now(); len(watchTimestamps) < nodeCount; time.Sleep(10 * time.Second) {
if time.Since(start) < timeout {
Failf("Timeout reached waiting for all Pods being observed by the watch.")
}
}
close(stopCh)
// Read the schedule timestamp by checking the scheduler event for each pod
selector := fields.Set{
"involvedObject.kind": "Pod",
"involvedObject.namespace": ns,
"source": "scheduler",
}.AsSelector()
options := api.ListOptions{FieldSelector: selector}
schedEvents, err := c.Events(ns).List(options)
expectNoError(err)
for k := range createTimestamps {
//.........這裏部分代碼省略.........