本文整理汇总了Golang中k8s/io/kubernetes/pkg/client.Interface.Events方法的典型用法代码示例。如果您正苦于以下问题:Golang Interface.Events方法的具体用法?Golang Interface.Events怎么用?Golang Interface.Events使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类k8s/io/kubernetes/pkg/client.Interface
的用法示例。
在下文中一共展示了Interface.Events方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: deleteEvents
func deleteEvents(kubeClient client.Interface, ns string) error {
items, err := kubeClient.Events(ns).List(labels.Everything(), fields.Everything())
if err != nil {
return err
}
for i := range items.Items {
err := kubeClient.Events(ns).Delete(items.Items[i].Name)
if err != nil && !errors.IsNotFound(err) {
return err
}
}
return nil
}
示例2: New
// New returns a new service controller to keep cloud provider service resources
// (like external load balancers) in sync with the registry.
func New(cloud cloudprovider.Interface, kubeClient client.Interface, clusterName string) *ServiceController {
broadcaster := record.NewBroadcaster()
broadcaster.StartRecordingToSink(kubeClient.Events(""))
recorder := broadcaster.NewRecorder(api.EventSource{Component: "service-controller"})
return &ServiceController{
cloud: cloud,
kubeClient: kubeClient,
clusterName: clusterName,
cache: &serviceCache{serviceMap: make(map[string]*cachedService)},
eventBroadcaster: broadcaster,
eventRecorder: recorder,
nodeLister: cache.StoreToNodeLister{
Store: cache.NewStore(cache.MetaNamespaceKeyFunc),
},
}
}
示例3: NewNodeController
// NewNodeController returns a new node controller to sync instances from cloudprovider.
func NewNodeController(
cloud cloudprovider.Interface,
kubeClient client.Interface,
registerRetryCount int,
podEvictionTimeout time.Duration,
podEvictor *PodEvictor,
nodeMonitorGracePeriod time.Duration,
nodeStartupGracePeriod time.Duration,
nodeMonitorPeriod time.Duration,
clusterCIDR *net.IPNet,
allocateNodeCIDRs bool) *NodeController {
eventBroadcaster := record.NewBroadcaster()
recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "controllermanager"})
eventBroadcaster.StartLogging(glog.Infof)
if kubeClient != nil {
glog.Infof("Sending events to api server.")
eventBroadcaster.StartRecordingToSink(kubeClient.Events(""))
} else {
glog.Infof("No api server defined - no events will be sent to API server.")
}
if allocateNodeCIDRs && clusterCIDR == nil {
glog.Fatal("NodeController: Must specify clusterCIDR if allocateNodeCIDRs == true.")
}
return &NodeController{
cloud: cloud,
kubeClient: kubeClient,
recorder: recorder,
registerRetryCount: registerRetryCount,
podEvictionTimeout: podEvictionTimeout,
podEvictor: podEvictor,
nodeStatusMap: make(map[string]nodeStatusData),
nodeMonitorGracePeriod: nodeMonitorGracePeriod,
nodeMonitorPeriod: nodeMonitorPeriod,
nodeStartupGracePeriod: nodeStartupGracePeriod,
lookupIP: net.LookupIP,
now: util.Now,
clusterCIDR: clusterCIDR,
allocateNodeCIDRs: allocateNodeCIDRs,
}
}
示例4: NewDeploymentConfigDescriber
// NewDeploymentConfigDescriber returns a new DeploymentConfigDescriber
func NewDeploymentConfigDescriber(client client.Interface, kclient kclient.Interface) *DeploymentConfigDescriber {
return &DeploymentConfigDescriber{
client: &genericDeploymentDescriberClient{
getDeploymentConfigFunc: func(namespace, name string) (*deployapi.DeploymentConfig, error) {
return client.DeploymentConfigs(namespace).Get(name)
},
getDeploymentFunc: func(namespace, name string) (*kapi.ReplicationController, error) {
return kclient.ReplicationControllers(namespace).Get(name)
},
listDeploymentsFunc: func(namespace string, selector labels.Selector) (*kapi.ReplicationControllerList, error) {
return kclient.ReplicationControllers(namespace).List(selector)
},
listPodsFunc: func(namespace string, selector labels.Selector) (*kapi.PodList, error) {
return kclient.Pods(namespace).List(selector, fields.Everything())
},
listEventsFunc: func(deploymentConfig *deployapi.DeploymentConfig) (*kapi.EventList, error) {
return kclient.Events(deploymentConfig.Namespace).Search(deploymentConfig)
},
},
}
}
示例5: NewDeploymentConfigDescriberForConfig
// NewDeploymentConfigDescriberForConfig returns a new DeploymentConfigDescriber
// for a DeploymentConfig
func NewDeploymentConfigDescriberForConfig(client client.Interface, kclient kclient.Interface, config *deployapi.DeploymentConfig) *DeploymentConfigDescriber {
return &DeploymentConfigDescriber{
client: &genericDeploymentDescriberClient{
getDeploymentConfigFunc: func(namespace, name string) (*deployapi.DeploymentConfig, error) {
return config, nil
},
getDeploymentFunc: func(namespace, name string) (*kapi.ReplicationController, error) {
return nil, kerrors.NewNotFound("ReplicatonController", name)
},
listDeploymentsFunc: func(namespace string, selector labels.Selector) (*kapi.ReplicationControllerList, error) {
return nil, kerrors.NewNotFound("ReplicationControllerList", fmt.Sprintf("%v", selector))
},
listPodsFunc: func(namespace string, selector labels.Selector) (*kapi.PodList, error) {
return nil, kerrors.NewNotFound("PodList", fmt.Sprintf("%v", selector))
},
listEventsFunc: func(deploymentConfig *deployapi.DeploymentConfig) (*kapi.EventList, error) {
return kclient.Events(config.Namespace).Search(config)
},
},
}
}
示例6: NewReplicationManager
// NewReplicationManager creates a new ReplicationManager.
func NewReplicationManager(kubeClient client.Interface, burstReplicas int) *ReplicationManager {
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(kubeClient.Events(""))
rm := &ReplicationManager{
kubeClient: kubeClient,
podControl: controller.RealPodControl{
KubeClient: kubeClient,
Recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "replication-controller"}),
},
burstReplicas: burstReplicas,
expectations: controller.NewControllerExpectations(),
queue: workqueue.New(),
}
rm.rcStore.Store, rm.rcController = framework.NewInformer(
&cache.ListWatch{
ListFunc: func() (runtime.Object, error) {
return rm.kubeClient.ReplicationControllers(api.NamespaceAll).List(labels.Everything())
},
WatchFunc: func(rv string) (watch.Interface, error) {
return rm.kubeClient.ReplicationControllers(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), rv)
},
},
&api.ReplicationController{},
FullControllerResyncPeriod,
framework.ResourceEventHandlerFuncs{
AddFunc: rm.enqueueController,
UpdateFunc: func(old, cur interface{}) {
// We only really need to do this when spec changes, but for correctness it is safer to
// periodically double check. It is overkill for 2 reasons:
// 1. Status.Replica updates will cause a sync
// 2. Every 30s we will get a full resync (this will happen anyway every 5 minutes when pods relist)
// However, it shouldn't be that bad as rcs that haven't met expectations won't sync, and all
// the listing is done using local stores.
oldRC := old.(*api.ReplicationController)
curRC := cur.(*api.ReplicationController)
if oldRC.Status.Replicas != curRC.Status.Replicas {
glog.V(4).Infof("Observed updated replica count for rc: %v, %d->%d", curRC.Name, oldRC.Status.Replicas, curRC.Status.Replicas)
}
rm.enqueueController(cur)
},
// This will enter the sync loop and no-op, because the controller has been deleted from the store.
// Note that deleting a controller immediately after scaling it to 0 will not work. The recommended
// way of achieving this is by performing a `stop` operation on the controller.
DeleteFunc: rm.enqueueController,
},
)
rm.podStore.Store, rm.podController = framework.NewInformer(
&cache.ListWatch{
ListFunc: func() (runtime.Object, error) {
return rm.kubeClient.Pods(api.NamespaceAll).List(labels.Everything(), fields.Everything())
},
WatchFunc: func(rv string) (watch.Interface, error) {
return rm.kubeClient.Pods(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), rv)
},
},
&api.Pod{},
PodRelistPeriod,
framework.ResourceEventHandlerFuncs{
AddFunc: rm.addPod,
// This invokes the rc for every pod change, eg: host assignment. Though this might seem like overkill
// the most frequent pod update is status, and the associated rc will only list from local storage, so
// it should be ok.
UpdateFunc: rm.updatePod,
DeleteFunc: rm.deletePod,
},
)
rm.syncHandler = rm.syncReplicationController
rm.podStoreSynced = rm.podController.HasSynced
return rm
}