本文整理汇总了Golang中github.com/GoogleCloudPlatform/kubernetes/pkg/client/record.NewBroadcaster函数的典型用法代码示例。如果您正苦于以下问题:Golang NewBroadcaster函数的具体用法?Golang NewBroadcaster怎么用?Golang NewBroadcaster使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewBroadcaster函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: createPodConfigTester
func createPodConfigTester(mode PodConfigNotificationMode) (chan<- interface{}, <-chan kubelet.PodUpdate, *PodConfig) {
eventBroadcaster := record.NewBroadcaster()
config := NewPodConfig(mode, eventBroadcaster.NewRecorder(api.EventSource{Component: "kubelet"}))
channel := config.Channel(TestSource)
ch := config.Updates()
return channel, ch, config
}
示例2: Create
// Create constructs a BuildController
func (factory *BuildControllerFactory) Create() controller.RunnableController {
queue := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
cache.NewReflector(&buildLW{client: factory.OSClient}, &buildapi.Build{}, queue, 2*time.Minute).Run()
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartRecordingToSink(factory.KubeClient.Events(""))
client := ControllerClient{factory.KubeClient, factory.OSClient}
buildController := &buildcontroller.BuildController{
BuildUpdater: factory.BuildUpdater,
ImageStreamClient: client,
PodManager: client,
BuildStrategy: &typeBasedFactoryStrategy{
DockerBuildStrategy: factory.DockerBuildStrategy,
SourceBuildStrategy: factory.SourceBuildStrategy,
CustomBuildStrategy: factory.CustomBuildStrategy,
},
Recorder: eventBroadcaster.NewRecorder(kapi.EventSource{Component: "build-controller"}),
OpenshiftEnabled: factory.OpenshiftEnabled,
}
return &controller.RetryController{
Queue: queue,
RetryManager: controller.NewQueueRetryManager(
queue,
cache.MetaNamespaceKeyFunc,
limitedLogAndRetry(factory.BuildUpdater, 30*time.Minute),
kutil.NewTokenBucketRateLimiter(1, 10)),
Handle: func(obj interface{}) error {
build := obj.(*buildapi.Build)
return buildController.HandleBuild(build)
},
}
}
示例3: Run
// Run runs the specified SchedulerServer. This should never exit.
func (s *SchedulerServer) Run(_ []string) error {
if s.Kubeconfig == "" && s.Master == "" {
glog.Warningf("Neither --kubeconfig nor --master was specified. Using default API client. This might not work.")
}
// This creates a client, first loading any specified kubeconfig
// file, and then overriding the Master flag, if non-empty.
kubeconfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
&clientcmd.ClientConfigLoadingRules{ExplicitPath: s.Kubeconfig},
&clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: s.Master}}).ClientConfig()
if err != nil {
return err
}
kubeconfig.QPS = 20.0
kubeconfig.Burst = 30
kubeClient, err := client.New(kubeconfig)
if err != nil {
glog.Fatalf("Invalid API configuration: %v", err)
}
go func() {
mux := http.NewServeMux()
healthz.InstallHandler(mux)
if s.EnableProfiling {
mux.HandleFunc("/debug/pprof/", pprof.Index)
mux.HandleFunc("/debug/pprof/profile", pprof.Profile)
mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
}
mux.Handle("/metrics", prometheus.Handler())
server := &http.Server{
Addr: net.JoinHostPort(s.Address.String(), strconv.Itoa(s.Port)),
Handler: mux,
}
glog.Fatal(server.ListenAndServe())
}()
configFactory := factory.NewConfigFactory(kubeClient)
config, err := s.createConfig(configFactory)
if err != nil {
glog.Fatalf("Failed to create scheduler configuration: %v", err)
}
config.Cloud, err = cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile)
if err != nil {
glog.Fatalf("Cloud provider could not be initialized: %v", err)
}
eventBroadcaster := record.NewBroadcaster()
config.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduler"})
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(kubeClient.Events(""))
sched := scheduler.New(config)
sched.Run()
select {}
}
示例4: RunKubelet
// RunKubelet is responsible for setting up and running a kubelet. It is used in three different applications:
// 1 Integration tests
// 2 Kubelet binary
// 3 Standalone 'kubernetes' binary
// Eventually, #2 will be replaced with instances of #3
func RunKubelet(kcfg *KubeletConfig, builder KubeletBuilder) error {
kcfg.Hostname = nodeutil.GetHostname(kcfg.HostnameOverride)
if len(kcfg.NodeName) == 0 {
// Query the cloud provider for our node name, default to Hostname
nodeName := kcfg.Hostname
if kcfg.Cloud != nil {
var err error
instances, ok := kcfg.Cloud.Instances()
if !ok {
return fmt.Errorf("failed to get instances from cloud provider")
}
nodeName, err = instances.CurrentNodeName(kcfg.Hostname)
if err != nil {
return fmt.Errorf("error fetching current instance name from cloud provider: %v", err)
}
glog.V(2).Infof("cloud provider determined current node name to be %s", nodeName)
}
kcfg.NodeName = nodeName
}
eventBroadcaster := record.NewBroadcaster()
kcfg.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: "kubelet", Host: kcfg.NodeName})
eventBroadcaster.StartLogging(glog.V(3).Infof)
if kcfg.KubeClient != nil {
glog.V(4).Infof("Sending events to api server.")
eventBroadcaster.StartRecordingToSink(kcfg.KubeClient.Events(""))
} else {
glog.Warning("No api server defined - no events will be sent to API server.")
}
capabilities.Setup(kcfg.AllowPrivileged, kcfg.HostNetworkSources)
credentialprovider.SetPreferredDockercfgPath(kcfg.RootDirectory)
if builder == nil {
builder = createAndInitKubelet
}
if kcfg.OSInterface == nil {
kcfg.OSInterface = kubecontainer.RealOS{}
}
k, podCfg, err := builder(kcfg)
if err != nil {
return fmt.Errorf("failed to create kubelet: %v", err)
}
// process pods and exit.
if kcfg.Runonce {
if _, err := k.RunOnce(podCfg.Updates()); err != nil {
return fmt.Errorf("runonce failed: %v", err)
}
glog.Infof("Started kubelet as runonce")
} else {
startKubelet(k, podCfg, kcfg)
glog.Infof("Started kubelet")
}
return nil
}
示例5: Create
// Create creates a DeploymentConfigChangeController.
func (factory *DeploymentConfigChangeControllerFactory) Create() controller.RunnableController {
deploymentConfigLW := &deployutil.ListWatcherImpl{
ListFunc: func() (runtime.Object, error) {
return factory.Client.DeploymentConfigs(kapi.NamespaceAll).List(labels.Everything(), fields.Everything())
},
WatchFunc: func(resourceVersion string) (watch.Interface, error) {
return factory.Client.DeploymentConfigs(kapi.NamespaceAll).Watch(labels.Everything(), fields.Everything(), resourceVersion)
},
}
queue := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
cache.NewReflector(deploymentConfigLW, &deployapi.DeploymentConfig{}, queue, 2*time.Minute).Run()
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartRecordingToSink(factory.KubeClient.Events(""))
changeController := &DeploymentConfigChangeController{
changeStrategy: &changeStrategyImpl{
getDeploymentFunc: func(namespace, name string) (*kapi.ReplicationController, error) {
return factory.KubeClient.ReplicationControllers(namespace).Get(name)
},
generateDeploymentConfigFunc: func(namespace, name string) (*deployapi.DeploymentConfig, error) {
return factory.Client.DeploymentConfigs(namespace).Generate(name)
},
updateDeploymentConfigFunc: func(namespace string, config *deployapi.DeploymentConfig) (*deployapi.DeploymentConfig, error) {
return factory.Client.DeploymentConfigs(namespace).Update(config)
},
},
decodeConfig: func(deployment *kapi.ReplicationController) (*deployapi.DeploymentConfig, error) {
return deployutil.DecodeDeploymentConfig(deployment, factory.Codec)
},
recorder: eventBroadcaster.NewRecorder(kapi.EventSource{Component: "deployer"}),
}
return &controller.RetryController{
Queue: queue,
RetryManager: controller.NewQueueRetryManager(
queue,
cache.MetaNamespaceKeyFunc,
func(obj interface{}, err error, retries controller.Retry) bool {
kutil.HandleError(err)
if _, isFatal := err.(fatalError); isFatal {
return false
}
if retries.Count > 0 {
return false
}
return true
},
kutil.NewTokenBucketRateLimiter(1, 10),
),
Handle: func(obj interface{}) error {
config := obj.(*deployapi.DeploymentConfig)
return changeController.Handle(config)
},
}
}
示例6: NewPluginConfig
func (k *KubernetesScheduler) NewPluginConfig(terminate <-chan struct{}, mux *http.ServeMux,
podsWatcher *cache.ListWatch) *PluginConfig {
// Watch and queue pods that need scheduling.
updates := make(chan queue.Entry, k.schedcfg.UpdatesBacklog)
podUpdates := &podStoreAdapter{queue.NewHistorical(updates)}
reflector := cache.NewReflector(podsWatcher, &api.Pod{}, podUpdates, 0)
// lock that guards critial sections that involve transferring pods from
// the store (cache) to the scheduling queue; its purpose is to maintain
// an ordering (vs interleaving) of operations that's easier to reason about.
kapi := &k8smScheduler{internal: k}
q := newQueuer(podUpdates)
podDeleter := &deleter{
api: kapi,
qr: q,
}
eh := &errorHandler{
api: kapi,
backoff: backoff.New(k.schedcfg.InitialPodBackoff.Duration, k.schedcfg.MaxPodBackoff.Duration),
qr: q,
}
startLatch := make(chan struct{})
eventBroadcaster := record.NewBroadcaster()
runtime.On(startLatch, func() {
eventBroadcaster.StartRecordingToSink(k.client.Events(""))
reflector.Run() // TODO(jdef) should listen for termination
podDeleter.Run(updates, terminate)
q.Run(terminate)
q.installDebugHandlers(mux)
podtask.InstallDebugHandlers(k.taskRegistry, mux)
})
return &PluginConfig{
Config: &plugin.Config{
MinionLister: nil,
Algorithm: &kubeScheduler{
api: kapi,
podUpdates: podUpdates,
defaultContainerCPULimit: k.defaultContainerCPULimit,
defaultContainerMemLimit: k.defaultContainerMemLimit,
},
Binder: &binder{api: kapi},
NextPod: q.yield,
Error: eh.handleSchedulingError,
Recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduler"}),
},
api: kapi,
client: k.client,
qr: q,
deleter: podDeleter,
starting: startLatch,
}
}
示例7: RunScheduler
// RunScheduler starts the Kubernetes scheduler
func (c *MasterConfig) RunScheduler() {
config, err := c.createSchedulerConfig()
if err != nil {
glog.Fatalf("Unable to start scheduler: %v", err)
}
eventcast := record.NewBroadcaster()
config.Recorder = eventcast.NewRecorder(kapi.EventSource{Component: "scheduler"})
eventcast.StartRecordingToSink(c.KubeClient.Events(""))
s := scheduler.New(config)
s.Run()
glog.Infof("Started Kubernetes Scheduler")
}
示例8: New
// New returns a new service controller to keep cloud provider service resources
// (like external load balancers) in sync with the registry.
func New(cloud cloudprovider.Interface, kubeClient client.Interface, clusterName string) *ServiceController {
broadcaster := record.NewBroadcaster()
broadcaster.StartRecordingToSink(kubeClient.Events(""))
recorder := broadcaster.NewRecorder(api.EventSource{Component: "service-controller"})
return &ServiceController{
cloud: cloud,
kubeClient: kubeClient,
clusterName: clusterName,
cache: &serviceCache{serviceMap: make(map[string]*cachedService)},
eventBroadcaster: broadcaster,
eventRecorder: recorder,
}
}
示例9: NewReplicationManager
// NewReplicationManager creates a new ReplicationManager.
func NewReplicationManager(kubeClient client.Interface) *ReplicationManager {
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartRecordingToSink(kubeClient.Events(""))
rm := &ReplicationManager{
kubeClient: kubeClient,
podControl: RealPodControl{
kubeClient: kubeClient,
recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "replication-controller"}),
},
}
rm.syncHandler = rm.syncReplicationController
return rm
}
示例10: TestUnschedulableNodes
func TestUnschedulableNodes(t *testing.T) {
helper, err := framework.NewHelper()
if err != nil {
t.Fatalf("Couldn't create etcd helper: %v", err)
}
framework.DeleteAllEtcdKeys()
var m *master.Master
s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
m.Handler.ServeHTTP(w, req)
}))
defer s.Close()
m = master.New(&master.Config{
EtcdHelper: helper,
KubeletClient: client.FakeKubeletClient{},
EnableCoreControllers: true,
EnableLogsSupport: false,
EnableUISupport: false,
EnableIndex: true,
APIPrefix: "/api",
// Enable v1beta3 if we are testing that version.
EnableV1Beta3: testapi.Version() == "v1beta3",
Authorizer: apiserver.NewAlwaysAllowAuthorizer(),
AdmissionControl: admit.NewAlwaysAdmit(),
})
restClient := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Version()})
schedulerConfigFactory := factory.NewConfigFactory(restClient)
schedulerConfig, err := schedulerConfigFactory.Create()
if err != nil {
t.Fatalf("Couldn't create scheduler config: %v", err)
}
eventBroadcaster := record.NewBroadcaster()
schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduler"})
eventBroadcaster.StartRecordingToSink(restClient.Events(""))
scheduler.New(schedulerConfig).Run()
defer close(schedulerConfig.StopEverything)
DoTestUnschedulableNodes(t, restClient, schedulerConfigFactory.NodeLister.Store)
}
示例11: NewNodeController
// NewNodeController returns a new node controller to sync instances from cloudprovider.
func NewNodeController(
cloud cloudprovider.Interface,
kubeClient client.Interface,
registerRetryCount int,
podEvictionTimeout time.Duration,
podEvictor *PodEvictor,
nodeMonitorGracePeriod time.Duration,
nodeStartupGracePeriod time.Duration,
nodeMonitorPeriod time.Duration,
clusterCIDR *net.IPNet,
allocateNodeCIDRs bool) *NodeController {
eventBroadcaster := record.NewBroadcaster()
recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "controllermanager"})
eventBroadcaster.StartLogging(glog.Infof)
if kubeClient != nil {
glog.Infof("Sending events to api server.")
eventBroadcaster.StartRecordingToSink(kubeClient.Events(""))
} else {
glog.Infof("No api server defined - no events will be sent to API server.")
}
if allocateNodeCIDRs && clusterCIDR == nil {
glog.Fatal("NodeController: Must specify clusterCIDR if allocateNodeCIDRs == true.")
}
return &NodeController{
cloud: cloud,
kubeClient: kubeClient,
recorder: recorder,
registerRetryCount: registerRetryCount,
podEvictionTimeout: podEvictionTimeout,
podEvictor: podEvictor,
nodeStatusMap: make(map[string]nodeStatusData),
nodeMonitorGracePeriod: nodeMonitorGracePeriod,
nodeMonitorPeriod: nodeMonitorPeriod,
nodeStartupGracePeriod: nodeStartupGracePeriod,
lookupIP: net.LookupIP,
now: util.Now,
clusterCIDR: clusterCIDR,
allocateNodeCIDRs: allocateNodeCIDRs,
}
}
示例12: NewNodeController
// NewNodeController returns a new node controller to sync instances from cloudprovider.
func NewNodeController(
cloud cloudprovider.Interface,
matchRE string,
nodes []string,
staticResources *api.NodeResources,
kubeClient client.Interface,
registerRetryCount int,
podEvictionTimeout time.Duration,
deletingPodsRateLimiter util.RateLimiter,
nodeMonitorGracePeriod time.Duration,
nodeStartupGracePeriod time.Duration,
nodeMonitorPeriod time.Duration,
clusterName string) *NodeController {
eventBroadcaster := record.NewBroadcaster()
recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "controllermanager"})
if kubeClient != nil {
glog.Infof("Sending events to api server.")
eventBroadcaster.StartRecordingToSink(kubeClient.Events(""))
} else {
glog.Infof("No api server defined - no events will be sent to API server.")
}
return &NodeController{
cloud: cloud,
matchRE: matchRE,
nodes: nodes,
staticResources: staticResources,
kubeClient: kubeClient,
recorder: recorder,
registerRetryCount: registerRetryCount,
podEvictionTimeout: podEvictionTimeout,
deletingPodsRateLimiter: deletingPodsRateLimiter,
nodeStatusMap: make(map[string]nodeStatusData),
nodeMonitorGracePeriod: nodeMonitorGracePeriod,
nodeMonitorPeriod: nodeMonitorPeriod,
nodeStartupGracePeriod: nodeStartupGracePeriod,
lookupIP: net.LookupIP,
now: util.Now,
clusterName: clusterName,
}
}
示例13: RunKubelet
// RunKubelet is responsible for setting up and running a kubelet. It is used in three different applications:
// 1 Integration tests
// 2 Kubelet binary
// 3 Standalone 'kubernetes' binary
// Eventually, #2 will be replaced with instances of #3
func RunKubelet(kcfg *KubeletConfig, builder KubeletBuilder) error {
kcfg.Hostname = util.GetHostname(kcfg.HostnameOverride)
eventBroadcaster := record.NewBroadcaster()
kcfg.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: "kubelet", Host: kcfg.Hostname})
eventBroadcaster.StartLogging(glog.V(3).Infof)
if kcfg.KubeClient != nil {
glog.V(4).Infof("Sending events to api server.")
eventBroadcaster.StartRecordingToSink(kcfg.KubeClient.Events(""))
} else {
glog.Warning("No api server defined - no events will be sent to API server.")
}
capabilities.Setup(kcfg.AllowPrivileged, kcfg.HostNetworkSources)
credentialprovider.SetPreferredDockercfgPath(kcfg.RootDirectory)
if builder == nil {
builder = createAndInitKubelet
}
if kcfg.OSInterface == nil {
kcfg.OSInterface = kubecontainer.RealOS{}
}
k, podCfg, err := builder(kcfg)
if err != nil {
return fmt.Errorf("failed to create kubelet: %v", err)
}
// process pods and exit.
if kcfg.Runonce {
if _, err := k.RunOnce(podCfg.Updates()); err != nil {
return fmt.Errorf("runonce failed: %v", err)
}
glog.Infof("Started kubelet as runonce")
} else {
startKubelet(k, podCfg, kcfg)
glog.Infof("Started kubelet")
}
return nil
}
示例14: startComponents
func startComponents(firstManifestURL, secondManifestURL, apiVersion string) (string, string) {
// Setup
servers := []string{}
glog.Infof("Creating etcd client pointing to %v", servers)
handler := delegateHandler{}
apiServer := httptest.NewServer(&handler)
etcdClient := etcd.NewClient(servers)
sleep := 4 * time.Second
ok := false
for i := 0; i < 3; i++ {
keys, err := etcdClient.Get("/", false, false)
if err != nil {
glog.Warningf("Unable to list root etcd keys: %v", err)
if i < 2 {
time.Sleep(sleep)
sleep = sleep * sleep
}
continue
}
for _, node := range keys.Node.Nodes {
if _, err := etcdClient.Delete(node.Key, true); err != nil {
glog.Fatalf("Unable delete key: %v", err)
}
}
ok = true
break
}
if !ok {
glog.Fatalf("Failed to connect to etcd")
}
cl := client.NewOrDie(&client.Config{Host: apiServer.URL, Version: apiVersion})
etcdStorage, err := master.NewEtcdStorage(etcdClient, latest.InterfacesFor, latest.Version, etcdtest.PathPrefix())
if err != nil {
glog.Fatalf("Unable to get etcd storage: %v", err)
}
expEtcdStorage, err := master.NewEtcdStorage(etcdClient, explatest.InterfacesFor, explatest.Version, etcdtest.PathPrefix())
if err != nil {
glog.Fatalf("Unable to get etcd storage for experimental: %v", err)
}
// Master
host, port, err := net.SplitHostPort(strings.TrimLeft(apiServer.URL, "http://"))
if err != nil {
glog.Fatalf("Unable to parse URL '%v': %v", apiServer.URL, err)
}
portNumber, err := strconv.Atoi(port)
if err != nil {
glog.Fatalf("Nonnumeric port? %v", err)
}
publicAddress := net.ParseIP(host)
if publicAddress == nil {
glog.Fatalf("no public address for %s", host)
}
// Create a master and install handlers into mux.
m := master.New(&master.Config{
DatabaseStorage: etcdStorage,
ExpDatabaseStorage: expEtcdStorage,
KubeletClient: fakeKubeletClient{},
EnableCoreControllers: true,
EnableLogsSupport: false,
EnableProfiling: true,
APIPrefix: "/api",
ExpAPIPrefix: "/experimental",
Authorizer: apiserver.NewAlwaysAllowAuthorizer(),
AdmissionControl: admit.NewAlwaysAdmit(),
ReadWritePort: portNumber,
PublicAddress: publicAddress,
CacheTimeout: 2 * time.Second,
})
handler.delegate = m.Handler
// Scheduler
schedulerConfigFactory := factory.NewConfigFactory(cl, nil)
schedulerConfig, err := schedulerConfigFactory.Create()
if err != nil {
glog.Fatalf("Couldn't create scheduler config: %v", err)
}
eventBroadcaster := record.NewBroadcaster()
schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduler"})
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(cl.Events(""))
scheduler.New(schedulerConfig).Run()
endpoints := endpointcontroller.NewEndpointController(cl)
// ensure the service endpoints are sync'd several times within the window that the integration tests wait
go endpoints.Run(3, util.NeverStop)
controllerManager := replicationControllerPkg.NewReplicationManager(cl, replicationControllerPkg.BurstReplicas)
// TODO: Write an integration test for the replication controllers watch.
go controllerManager.Run(3, util.NeverStop)
nodeController := nodecontroller.NewNodeController(nil, cl, 5*time.Minute, nodecontroller.NewPodEvictor(util.NewFakeRateLimiter()),
40*time.Second, 60*time.Second, 5*time.Second, nil, false)
//.........这里部分代码省略.........
示例15: TestScheduler
func TestScheduler(t *testing.T) {
eventBroadcaster := record.NewBroadcaster()
defer eventBroadcaster.StartLogging(t.Logf).Stop()
errS := errors.New("scheduler")
errB := errors.New("binder")
table := []struct {
injectBindError error
sendPod *api.Pod
algo scheduler.Scheduler
expectErrorPod *api.Pod
expectAssumedPod *api.Pod
expectError error
expectBind *api.Binding
eventReason string
}{
{
sendPod: podWithID("foo", ""),
algo: mockScheduler{"machine1", nil},
expectBind: &api.Binding{ObjectMeta: api.ObjectMeta{Name: "foo"}, Target: api.ObjectReference{Kind: "Node", Name: "machine1"}},
expectAssumedPod: podWithID("foo", "machine1"),
eventReason: "scheduled",
}, {
sendPod: podWithID("foo", ""),
algo: mockScheduler{"machine1", errS},
expectError: errS,
expectErrorPod: podWithID("foo", ""),
eventReason: "failedScheduling",
}, {
sendPod: podWithID("foo", ""),
algo: mockScheduler{"machine1", nil},
expectBind: &api.Binding{ObjectMeta: api.ObjectMeta{Name: "foo"}, Target: api.ObjectReference{Kind: "Node", Name: "machine1"}},
injectBindError: errB,
expectError: errB,
expectErrorPod: podWithID("foo", ""),
eventReason: "failedScheduling",
},
}
for i, item := range table {
var gotError error
var gotPod *api.Pod
var gotAssumedPod *api.Pod
var gotBinding *api.Binding
c := &Config{
Modeler: &FakeModeler{
AssumePodFunc: func(pod *api.Pod) {
gotAssumedPod = pod
},
},
MinionLister: scheduler.FakeMinionLister(
api.NodeList{Items: []api.Node{{ObjectMeta: api.ObjectMeta{Name: "machine1"}}}},
),
Algorithm: item.algo,
Binder: fakeBinder{func(b *api.Binding) error {
gotBinding = b
return item.injectBindError
}},
Error: func(p *api.Pod, err error) {
gotPod = p
gotError = err
},
NextPod: func() *api.Pod {
return item.sendPod
},
Recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduler"}),
}
s := New(c)
called := make(chan struct{})
events := eventBroadcaster.StartEventWatcher(func(e *api.Event) {
if e, a := item.eventReason, e.Reason; e != a {
t.Errorf("%v: expected %v, got %v", i, e, a)
}
close(called)
})
s.scheduleOne()
if e, a := item.expectAssumedPod, gotAssumedPod; !reflect.DeepEqual(e, a) {
t.Errorf("%v: assumed pod: wanted %v, got %v", i, e, a)
}
if e, a := item.expectErrorPod, gotPod; !reflect.DeepEqual(e, a) {
t.Errorf("%v: error pod: wanted %v, got %v", i, e, a)
}
if e, a := item.expectError, gotError; !reflect.DeepEqual(e, a) {
t.Errorf("%v: error: wanted %v, got %v", i, e, a)
}
if e, a := item.expectBind, gotBinding; !reflect.DeepEqual(e, a) {
t.Errorf("%v: error: %s", i, util.ObjectDiff(e, a))
}
<-called
events.Stop()
}
}