本文整理汇总了Golang中k8s/io/kubernetes/pkg/client/unversioned.Events函数的典型用法代码示例。如果您正苦于以下问题:Golang Events函数的具体用法?Golang Events怎么用?Golang Events使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了Events函数的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: TestMultiWatch
func TestMultiWatch(t *testing.T) {
// Disable this test as long as it demonstrates a problem.
// TODO: Reenable this test when we get #6059 resolved.
return
const watcherCount = 50
rt.GOMAXPROCS(watcherCount)
framework.DeleteAllEtcdKeys()
defer framework.DeleteAllEtcdKeys()
_, s := framework.RunAMaster(t)
defer s.Close()
ns := api.NamespaceDefault
client := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
dummyEvent := func(i int) *api.Event {
name := fmt.Sprintf("unrelated-%v", i)
return &api.Event{
ObjectMeta: api.ObjectMeta{
Name: fmt.Sprintf("%v.%x", name, time.Now().UnixNano()),
Namespace: ns,
},
InvolvedObject: api.ObjectReference{
Name: name,
Namespace: ns,
},
Reason: fmt.Sprintf("unrelated change %v", i),
}
}
type timePair struct {
t time.Time
name string
}
receivedTimes := make(chan timePair, watcherCount*2)
watchesStarted := sync.WaitGroup{}
// make a bunch of pods and watch them
for i := 0; i < watcherCount; i++ {
watchesStarted.Add(1)
name := fmt.Sprintf("multi-watch-%v", i)
got, err := client.Pods(ns).Create(&api.Pod{
ObjectMeta: api.ObjectMeta{
Name: name,
Labels: labels.Set{"watchlabel": name},
},
Spec: api.PodSpec{
Containers: []api.Container{{
Name: "pause",
Image: e2e.GetPauseImageName(client),
}},
},
})
if err != nil {
t.Fatalf("Couldn't make %v: %v", name, err)
}
go func(name, rv string) {
options := api.ListOptions{
LabelSelector: labels.Set{"watchlabel": name}.AsSelector(),
ResourceVersion: rv,
}
w, err := client.Pods(ns).Watch(options)
if err != nil {
panic(fmt.Sprintf("watch error for %v: %v", name, err))
}
defer w.Stop()
watchesStarted.Done()
e, ok := <-w.ResultChan() // should get the update (that we'll do below)
if !ok {
panic(fmt.Sprintf("%v ended early?", name))
}
if e.Type != watch.Modified {
panic(fmt.Sprintf("Got unexpected watch notification:\n%v: %+v %+v", name, e, e.Object))
}
receivedTimes <- timePair{time.Now(), name}
}(name, got.ObjectMeta.ResourceVersion)
}
log.Printf("%v: %v pods made and watchers started", time.Now(), watcherCount)
// wait for watches to start before we start spamming the system with
// objects below, otherwise we'll hit the watch window restriction.
watchesStarted.Wait()
const (
useEventsAsUnrelatedType = false
usePodsAsUnrelatedType = true
)
// make a bunch of unrelated changes in parallel
if useEventsAsUnrelatedType {
const unrelatedCount = 3000
var wg sync.WaitGroup
defer wg.Wait()
changeToMake := make(chan int, unrelatedCount*2)
changeMade := make(chan int, unrelatedCount*2)
go func() {
for i := 0; i < unrelatedCount; i++ {
changeToMake <- i
//.........这里部分代码省略.........
示例2: TestSingleWatch
func TestSingleWatch(t *testing.T) {
_, s := framework.RunAMaster(t)
defer s.Close()
ns := "blargh"
deleteAllEtcdKeys()
client := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
mkEvent := func(i int) *api.Event {
name := fmt.Sprintf("event-%v", i)
return &api.Event{
ObjectMeta: api.ObjectMeta{
Namespace: ns,
Name: name,
},
InvolvedObject: api.ObjectReference{
Namespace: ns,
Name: name,
},
Reason: fmt.Sprintf("event %v", i),
}
}
rv1 := ""
for i := 0; i < 10; i++ {
event := mkEvent(i)
got, err := client.Events(ns).Create(event)
if err != nil {
t.Fatalf("Failed creating event %#q: %v", event, err)
}
if rv1 == "" {
rv1 = got.ResourceVersion
if rv1 == "" {
t.Fatal("did not get a resource version.")
}
}
t.Logf("Created event %#v", got.ObjectMeta)
}
w, err := client.Get().
Prefix("watch").
NamespaceIfScoped(ns, len(ns) > 0).
Resource("events").
Name("event-9").
Param("resourceVersion", rv1).
Watch()
if err != nil {
t.Fatalf("Failed watch: %v", err)
}
defer w.Stop()
select {
case <-time.After(wait.ForeverTestTimeout):
t.Fatalf("watch took longer than %s", wait.ForeverTestTimeout.String())
case got, ok := <-w.ResultChan():
if !ok {
t.Fatal("Watch channel closed unexpectedly.")
}
// We expect to see an ADD of event-9 and only event-9. (This
// catches a bug where all the events would have been sent down
// the channel.)
if e, a := watch.Added, got.Type; e != a {
t.Errorf("Wanted %v, got %v", e, a)
}
switch o := got.Object.(type) {
case *api.Event:
if e, a := "event-9", o.Name; e != a {
t.Errorf("Wanted %v, got %v", e, a)
}
default:
t.Fatalf("Unexpected watch event containing object %#q", got)
}
}
}
示例3: bootstrap
//.........这里部分代码省略.........
// downgrade allocation strategy if user disables "account-for-pod-resources"
if !s.accountForPodResources {
as = podschedulers.NewAllocationStrategy(
podtask.DefaultMinimalPredicate,
podtask.DefaultMinimalProcurement)
}
// mirror all nodes into the nodeStore
nodesClient, err := s.createAPIServerClient()
if err != nil {
log.Fatalf("Cannot create client to watch nodes: %v", err)
}
nodeStore := cache.NewStore(cache.MetaNamespaceKeyFunc)
nodeLW := cache.NewListWatchFromClient(nodesClient, "nodes", api.NamespaceAll, fields.Everything())
cache.NewReflector(nodeLW, &api.Node{}, nodeStore, s.nodeRelistPeriod).Run()
lookupNode := func(hostName string) *api.Node {
n, _, _ := nodeStore.GetByKey(hostName) // ignore error and return nil then
if n == nil {
return nil
}
return n.(*api.Node)
}
fcfs := podschedulers.NewFCFSPodScheduler(as, lookupNode)
framework := framework.New(framework.Config{
SchedulerConfig: *sc,
Executor: executor,
Client: client,
FailoverTimeout: s.failoverTimeout,
ReconcileInterval: s.reconcileInterval,
ReconcileCooldown: s.reconcileCooldown,
LookupNode: lookupNode,
StoreFrameworkId: func(id string) {
// TODO(jdef): port FrameworkId store to generic Kubernetes config store as soon as available
_, err := etcdClient.Set(meta.FrameworkIDKey, id, uint64(s.failoverTimeout))
if err != nil {
log.Errorf("failed to renew frameworkId TTL: %v", err)
}
},
})
masterUri := s.mesosMaster
info, cred, err := s.buildFrameworkInfo()
if err != nil {
log.Fatalf("Misconfigured mesos framework: %v", err)
}
schedulerProcess := ha.New(framework)
dconfig := &bindings.DriverConfig{
Scheduler: schedulerProcess,
Framework: info,
Master: masterUri,
Credential: cred,
BindingAddress: s.address,
BindingPort: uint16(s.driverPort),
HostnameOverride: s.hostnameOverride,
WithAuthContext: func(ctx context.Context) context.Context {
ctx = auth.WithLoginProvider(ctx, s.mesosAuthProvider)
ctx = sasl.WithBindingAddress(ctx, s.address)
return ctx
},
}
// create event recorder sending events to the "" namespace of the apiserver
broadcaster := record.NewBroadcaster()
recorder := broadcaster.NewRecorder(api.EventSource{Component: "scheduler"})
broadcaster.StartRecordingToSink(client.Events(""))
// create scheduler core with all components arranged around it
lw := cache.NewListWatchFromClient(client, "pods", api.NamespaceAll, fields.Everything())
sched := components.New(sc, framework, fcfs, client, recorder, schedulerProcess.Terminal(), s.mux, lw)
runtime.On(framework.Registration(), func() { sched.Run(schedulerProcess.Terminal()) })
runtime.On(framework.Registration(), s.newServiceWriter(schedulerProcess.Terminal()))
driverFactory := ha.DriverFactory(func() (drv bindings.SchedulerDriver, err error) {
log.V(1).Infoln("performing deferred initialization")
if err = framework.Init(sched, schedulerProcess.Master(), s.mux); err != nil {
return nil, fmt.Errorf("failed to initialize pod scheduler: %v", err)
}
log.V(1).Infoln("deferred init complete")
// defer obtaining framework ID to prevent multiple schedulers
// from overwriting each other's framework IDs
dconfig.Framework.Id, err = s.fetchFrameworkID(etcdClient)
if err != nil {
return nil, fmt.Errorf("failed to fetch framework ID from etcd: %v", err)
}
log.V(1).Infoln("constructing mesos scheduler driver")
drv, err = bindings.NewMesosSchedulerDriver(*dconfig)
if err != nil {
return nil, fmt.Errorf("failed to construct scheduler driver: %v", err)
}
log.V(1).Infoln("constructed mesos scheduler driver:", drv)
s.setDriver(drv)
return drv, nil
})
return schedulerProcess, driverFactory, etcdClient, eid
}
示例4: Run
// Run runs the specified ProxyServer. This should never exit (unless CleanupAndExit is set).
func (s *ProxyServer) Run(_ []string) error {
protocol := utiliptables.ProtocolIpv4
if s.BindAddress.To4() == nil {
protocol = utiliptables.ProtocolIpv6
}
// remove iptables rules and exit
if s.CleanupAndExit {
execer := exec.New()
ipt := utiliptables.New(execer, protocol)
encounteredError := userspace.CleanupLeftovers(ipt)
encounteredError = iptables.CleanupLeftovers(ipt) || encounteredError
if encounteredError {
return errors.New("Encountered an error while tearing down rules.")
}
return nil
}
// TODO(vmarmol): Use container config for this.
oomAdjuster := oom.NewOomAdjuster()
if err := oomAdjuster.ApplyOomScoreAdj(0, s.OOMScoreAdj); err != nil {
glog.V(2).Info(err)
}
// Run in its own container.
if err := util.RunInResourceContainer(s.ResourceContainer); err != nil {
glog.Warningf("Failed to start in resource-only container %q: %v", s.ResourceContainer, err)
} else {
glog.V(2).Infof("Running in resource-only container %q", s.ResourceContainer)
}
// define api config source
if s.Kubeconfig == "" && s.Master == "" {
glog.Warningf("Neither --kubeconfig nor --master was specified. Using default API client. This might not work.")
}
// This creates a client, first loading any specified kubeconfig
// file, and then overriding the Master flag, if non-empty.
kubeconfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
&clientcmd.ClientConfigLoadingRules{ExplicitPath: s.Kubeconfig},
&clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: s.Master}}).ClientConfig()
if err != nil {
return err
}
client, err := client.New(kubeconfig)
if err != nil {
glog.Fatalf("Invalid API configuration: %v", err)
}
// Add event recorder
Hostname := nodeutil.GetHostname(s.HostnameOverride)
eventBroadcaster := record.NewBroadcaster()
s.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: "kube-proxy", Host: Hostname})
eventBroadcaster.StartRecordingToSink(client.Events(""))
s.nodeRef = &api.ObjectReference{
Kind: "Node",
Name: Hostname,
UID: types.UID(Hostname),
Namespace: "",
}
// Birth Cry
s.birthCry()
serviceConfig := config.NewServiceConfig()
endpointsConfig := config.NewEndpointsConfig()
var proxier proxy.ProxyProvider
var endpointsHandler config.EndpointsConfigHandler
// guaranteed false on error, error only necessary for debugging
shouldUseIptables, err := iptables.ShouldUseIptablesProxier()
if err != nil {
glog.Errorf("Can't determine whether to use iptables or userspace, using userspace proxier: %v", err)
}
if !s.ForceUserspaceProxy && shouldUseIptables {
glog.V(2).Info("Using iptables Proxier.")
execer := exec.New()
ipt := utiliptables.New(execer, protocol)
proxierIptables, err := iptables.NewProxier(ipt, execer, s.SyncPeriod, s.MasqueradeAll)
if err != nil {
glog.Fatalf("Unable to create proxier: %v", err)
}
proxier = proxierIptables
endpointsHandler = proxierIptables
// No turning back. Remove artifacts that might still exist from the userspace Proxier.
glog.V(2).Info("Tearing down userspace rules. Errors here are acceptable.")
userspace.CleanupLeftovers(ipt)
} else {
glog.V(2).Info("Using userspace Proxier.")
// This is a proxy.LoadBalancer which NewProxier needs but has methods we don't need for
// our config.EndpointsConfigHandler.
loadBalancer := userspace.NewLoadBalancerRR()
// set EndpointsConfigHandler to our loadBalancer
endpointsHandler = loadBalancer
//.........这里部分代码省略.........
示例5: bootstrap
//.........这里部分代码省略.........
}
framework := framework.New(framework.Config{
SchedulerConfig: *sc,
Client: client,
FailoverTimeout: s.failoverTimeout,
ReconcileInterval: s.reconcileInterval,
ReconcileCooldown: s.reconcileCooldown,
LookupNode: lookupNode,
StoreFrameworkId: frameworkIDStorage.Set,
ExecutorId: eiPrototype.GetExecutorId(),
})
masterUri := s.mesosMaster
info, cred, err := s.buildFrameworkInfo()
if err != nil {
log.Fatalf("Misconfigured mesos framework: %v", err)
}
schedulerProcess := ha.New(framework)
dconfig := &bindings.DriverConfig{
Scheduler: schedulerProcess,
Framework: info,
Master: masterUri,
Credential: cred,
BindingAddress: s.address,
BindingPort: uint16(s.driverPort),
HostnameOverride: s.hostnameOverride,
WithAuthContext: func(ctx context.Context) context.Context {
ctx = auth.WithLoginProvider(ctx, s.mesosAuthProvider)
ctx = sasl.WithBindingAddress(ctx, s.address)
return ctx
},
}
// create event recorder sending events to the "" namespace of the apiserver
broadcaster := record.NewBroadcaster()
recorder := broadcaster.NewRecorder(api.EventSource{Component: api.DefaultSchedulerName})
broadcaster.StartLogging(log.Infof)
broadcaster.StartRecordingToSink(client.Events(""))
// create scheduler core with all components arranged around it
lw := cache.NewListWatchFromClient(client.LegacyClient, "pods", api.NamespaceAll, fields.Everything())
sched := components.New(
sc,
framework,
fcfs,
client,
recorder,
schedulerProcess.Terminal(),
s.mux,
lw,
eiPrototype,
s.frameworkRoles,
s.defaultPodRoles,
s.defaultContainerCPULimit,
s.defaultContainerMemLimit,
)
runtime.On(framework.Registration(), func() { sched.Run(schedulerProcess.Terminal()) })
runtime.On(framework.Registration(), s.newServiceWriter(schedulerProcess.Terminal()))
runtime.On(framework.Registration(), func() { nodeCtl.Run(schedulerProcess.Terminal()) })
driverFactory := ha.DriverFactory(func() (drv bindings.SchedulerDriver, err error) {
log.V(1).Infoln("performing deferred initialization")
if err = framework.Init(sched, schedulerProcess.Master(), s.mux); err != nil {
return nil, fmt.Errorf("failed to initialize pod scheduler: %v", err)
}
log.V(1).Infoln("deferred init complete")
if s.failoverTimeout > 0 {
// defer obtaining framework ID to prevent multiple schedulers
// from overwriting each other's framework IDs
var frameworkID string
frameworkID, err = frameworkIDStorage.Get(context.TODO())
if err != nil {
return nil, fmt.Errorf("failed to fetch framework ID from storage: %v", err)
}
if frameworkID != "" {
log.Infof("configuring FrameworkInfo with ID found in storage: %q", frameworkID)
dconfig.Framework.Id = &mesos.FrameworkID{Value: &frameworkID}
} else {
log.V(1).Infof("did not find framework ID in storage")
}
} else {
// TODO(jdef) this is a hack, really for development, to simplify clean up of old framework IDs
frameworkIDStorage.Remove(context.TODO())
}
log.V(1).Infoln("constructing mesos scheduler driver")
drv, err = bindings.NewMesosSchedulerDriver(*dconfig)
if err != nil {
return nil, fmt.Errorf("failed to construct scheduler driver: %v", err)
}
log.V(1).Infoln("constructed mesos scheduler driver:", drv)
s.setDriver(drv)
return drv, nil
})
return schedulerProcess, driverFactory, etcdClient, eiPrototype.GetExecutorId()
}