本文整理汇总了Golang中k8s/io/kubernetes/pkg/client/unversioned.NewInCluster函数的典型用法代码示例。如果您正苦于以下问题:Golang NewInCluster函数的具体用法?Golang NewInCluster怎么用?Golang NewInCluster使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewInCluster函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: main
func main() {
flags.Parse(os.Args)
if *defaultSvc == "" {
glog.Fatalf("Please specify --default-backend")
}
kubeClient, err := unversioned.NewInCluster()
if err != nil {
glog.Fatalf("failed to create client: %v", err)
}
lbInfo, _ := getLBDetails(kubeClient)
defSvc := getService(kubeClient, *defaultSvc)
defError := getService(kubeClient, *customErrorSvc)
// Start loadbalancer controller
lbc, err := NewLoadBalancerController(kubeClient, *resyncPeriod, defSvc, defError, *watchNamespace, lbInfo)
if err != nil {
glog.Fatalf("%v", err)
}
lbc.Run()
for {
glog.Infof("Handled quit, awaiting pod deletion.")
time.Sleep(30 * time.Second)
}
}
示例2: main
func main() {
clientConfig := kubectl_util.DefaultClientConfig(flags)
flags.Parse(os.Args)
cfg := parseCfg(*config, *lbDefAlgorithm)
var kubeClient *unversioned.Client
var err error
defErrorPage := newStaticPageHandler(*errorPage, defaultErrorPage)
if defErrorPage == nil {
glog.Fatalf("Failed to load the default error page")
}
go registerHandlers(defErrorPage)
var tcpSvcs map[string]int
if *tcpServices != "" {
tcpSvcs = parseTCPServices(*tcpServices)
} else {
glog.Infof("No tcp/https services specified")
}
if *startSyslog {
cfg.startSyslog = true
_, err = newSyslogServer("/var/run/haproxy.log.socket")
if err != nil {
glog.Fatalf("Failed to start syslog server: %v", err)
}
}
if *cluster {
if kubeClient, err = unversioned.NewInCluster(); err != nil {
glog.Fatalf("Failed to create client: %v", err)
}
} else {
config, err := clientConfig.ClientConfig()
if err != nil {
glog.Fatalf("error connecting to the client: %v", err)
}
kubeClient, err = unversioned.New(config)
}
namespace, specified, err := clientConfig.Namespace()
if err != nil {
glog.Fatalf("unexpected error: %v", err)
}
if !specified {
namespace = api.NamespaceAll
}
// TODO: Handle multiple namespaces
lbc := newLoadBalancerController(cfg, kubeClient, namespace, tcpSvcs)
go lbc.epController.Run(util.NeverStop)
go lbc.svcController.Run(util.NeverStop)
if *dry {
dryRun(lbc)
} else {
lbc.cfg.reload()
util.Until(lbc.worker, time.Second, util.NeverStop)
}
}
示例3: main
func main() {
kubeClient, err := client.NewInCluster()
if err != nil {
log.Fatalf("Failed to create client: %v", err)
}
listAll := api.ListOptions{LabelSelector: labels.Everything(), FieldSelector: fields.Everything()}
nodes, err := kubeClient.Nodes().List(listAll)
if err != nil {
log.Fatalf("Failed to list nodes: %v", err)
}
log.Printf("Nodes:")
for _, node := range nodes.Items {
log.Printf("\t%v", node.Name)
}
services, err := kubeClient.Services(api.NamespaceDefault).List(listAll)
if err != nil {
log.Fatalf("Failed to list services: %v", err)
}
log.Printf("Services:")
for _, svc := range services.Items {
log.Printf("\t%v", svc.Name)
}
log.Printf("Success")
http.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "Ok")
})
log.Fatal(http.ListenAndServe(":8080", nil))
}
示例4: Run
// Run runs the git-receive hook. This func is effectively the main for the git-receive hook,
// although it is called from the main in boot.go.
func Run(conf *Config, fs sys.FS, env sys.Env, storageDriver storagedriver.StorageDriver) error {
log.Debug("Running git hook")
builderKey, err := builderconf.GetBuilderKey()
if err != nil {
return err
}
kubeClient, err := client.NewInCluster()
if err != nil {
return fmt.Errorf("couldn't reach the api server (%s)", err)
}
scanner := bufio.NewScanner(os.Stdin)
for scanner.Scan() {
line := scanner.Text()
oldRev, newRev, refName, err := readLine(line)
if err != nil {
return fmt.Errorf("reading STDIN (%s)", err)
}
log.Debug("read [%s,%s,%s]", oldRev, newRev, refName)
// if we're processing a receive-pack on an existing repo, run a build
if strings.HasPrefix(conf.SSHOriginalCommand, "git-receive-pack") {
if err := build(conf, storageDriver, kubeClient, fs, env, builderKey, newRev); err != nil {
return err
}
}
}
return scanner.Err()
}
示例5: main
func main() {
var ingClient client.IngressInterface
if kubeClient, err := client.NewInCluster(); err != nil {
log.Fatalf("Failed to create client: %v.", err)
} else {
ingClient = kubeClient.Extensions().Ingress(os.Getenv("INGRESS_NAMESPACE"))
}
tmpl, _ := template.New("haproxy").Parse(haproxyConf)
rateLimiter := util.NewTokenBucketRateLimiter(0.1, 1)
known := &extensions.IngressList{}
// Controller loop
shellOut("haproxy -f /etc/haproxy/haproxy.cfg -p /var/run/haproxy-private.pid")
for {
rateLimiter.Accept()
ingresses, err := ingClient.List(api.ListOptions{})
if err != nil {
log.Printf("Error retrieving ingresses: %v", err)
continue
}
if reflect.DeepEqual(ingresses.Items, known.Items) {
log.Printf("Nothing Has Changed")
continue
}
known = ingresses
if w, err := os.Create("/etc/haproxy/haproxy.cfg"); err != nil {
log.Fatalf("Failed to open %v: %v", haproxyConf, err)
defer w.Close()
} else if err := tmpl.Execute(w, ingresses); err != nil {
log.Fatalf("Failed to write template %v", err)
}
restartHaproxy("haproxy_reload")
}
}
示例6: contactOthers
// Find all sibling pods in the service and post to their /write handler.
func contactOthers(state *State) {
const waitTimeout = 2 * time.Minute
defer state.doneContactingPeers()
client, err := client.NewInCluster()
if err != nil {
log.Fatalf("Unable to create client; error: %v\n", err)
}
// Double check that that worked by getting the server version.
if v, err := client.Discovery().ServerVersion(); err != nil {
log.Fatalf("Unable to get server version: %v\n", err)
} else {
log.Printf("Server version: %#v\n", v)
}
for start := time.Now(); time.Since(start) < waitTimeout; time.Sleep(5 * time.Second) {
eps := getWebserverEndpoints(client)
if eps.Len() >= *peerCount {
break
}
state.Logf("%v/%v has %v endpoints, which is less than %v as expected. Waiting for all endpoints to come up.", *namespace, *service, len(eps), *peerCount)
}
// Do this repeatedly, in case there's some propagation delay with getting
// newly started pods into the endpoints list.
for i := 0; i < 15; i++ {
eps := getWebserverEndpoints(client)
for ep := range eps {
state.Logf("Attempting to contact %s", ep)
contactSingle(ep, state)
}
time.Sleep(5 * time.Second)
}
}
示例7: main
func main() {
var ingClient client.IngressInterface
if kubeClient, err := client.NewInCluster(); err != nil {
log.Fatalf("Failed to create client: %v.", err)
} else {
ingClient = kubeClient.Extensions().Ingress(api.NamespaceAll)
}
tmpl, _ := template.New("nginx").Parse(nginxConf)
rateLimiter := util.NewTokenBucketRateLimiter(0.1, 1)
known := &extensions.IngressList{}
// Controller loop
shellOut("nginx")
for {
rateLimiter.Accept()
ingresses, err := ingClient.List(labels.Everything(), fields.Everything())
if err != nil || reflect.DeepEqual(ingresses.Items, known.Items) {
continue
}
known = ingresses
if w, err := os.Create("/etc/nginx/nginx.conf"); err != nil {
log.Fatalf("Failed to open %v: %v", nginxConf, err)
} else if err := tmpl.Execute(w, ingresses); err != nil {
log.Fatalf("Failed to write template %v", err)
}
shellOut("nginx -s reload")
}
}
示例8: main
func main() {
nginx.Start()
kubeClient, err := client.NewInCluster()
if err != nil {
log.Fatalf("Failed to create client: %v.", err)
}
rateLimiter := util.NewTokenBucketRateLimiter(0.1, 1)
known := &model.RouterConfig{}
// Main loop
for {
rateLimiter.Accept()
routerConfig, err := model.Build(kubeClient)
if err != nil {
log.Printf("Error building model; not modifying certs or configuration: %v.", err)
continue
}
if reflect.DeepEqual(routerConfig, known) {
continue
}
log.Println("INFO: Router configuration has changed in k8s.")
err = nginx.WriteCerts(routerConfig, "/opt/nginx/ssl")
if err != nil {
log.Printf("Failed to write certs; continuing with existing certs and configuration: %v", err)
continue
}
err = nginx.WriteConfig(routerConfig, "/opt/nginx/conf/nginx.conf")
if err != nil {
log.Printf("Failed to write new nginx configuration; continuing with existing configuration: %v", err)
continue
}
nginx.Reload()
known = routerConfig
}
}
示例9: runJoinCluster
func runJoinCluster(cmd *cobra.Command, args []string) error {
cli, err := client.NewInCluster()
if err != nil {
return fmt.Errorf("unable to connect k8s api server: %v", err)
}
labelSelector, err := labels.Parse(influxSelectors)
if err != nil {
return fmt.Errorf("unable to parse labels: %v", err)
}
fieldSelector := fields.Everything()
podIPs, err := podIps(cli, labelSelector, fieldSelector)
if err != nil {
return err
}
hostIP, err := externalIP()
if err != nil {
return err
}
peers := influxdbPeers(hostIP, podIPs)
iOpts := influxdOpts(hostIP, peers)
if err := ioutil.WriteFile(envVarFile, []byte(iOpts), 0644); err != nil {
return err
}
return nil
}
示例10: checksyncstatus
func (fs Filesystem) checksyncstatus(path string) error {
path = strings.TrimPrefix(path, "/home/minio")
path = "mnt/minio/data" + path
var lock sync.RWMutex
nosync := make(map[string]bool)
kubeClient, err := client.NewInCluster()
if err != nil {
return fmt.Errorf("unable to create client")
}
pclient := kubeClient.Pods("default")
selector, _ := labels.Parse("app=minio-sync")
list, err := pclient.List(selector, nil)
if err != nil {
return fmt.Errorf("list pods failed")
}
for _, pod := range list.Items {
fmt.Println(pod.Status.PodIP)
if pod.Status.Phase == "Running" {
nosync[pod.Status.PodIP] = false
}
}
allsync := true
var duration float64
for duration = 1; duration < 60; duration++ {
timeperiod := time.Duration(time.Second * time.Duration(duration))
fmt.Println(timeperiod)
time.Sleep(timeperiod)
var wg sync.WaitGroup
wg.Add(len(nosync))
for ip, sync := range nosync {
go func(ip string, sync bool) {
if !sync {
if doCurl("http://" + ip + ":3000/" + path) {
lock.Lock()
nosync[ip] = true
lock.Unlock()
} else {
if allsync {
allsync = false
}
}
}
wg.Done()
}(ip, sync)
}
wg.Wait()
if allsync {
break
}
allsync = true
}
for _, sync := range nosync {
if !sync {
return fmt.Errorf("sync failed took more time ")
}
}
return nil
}
示例11: main
func main() {
var ingClient client.IngressInterface
var secretsClient client.SecretsInterface
/* Anon http client
config := client.Config{
Host: "http://localhost:8080",
Username: "admin",
Password: "admin",
}
kubeClient, err := client.New(&config)
*/
kubeClient, err := client.NewInCluster()
if err != nil {
log.Fatalf("Failed to create client: %v.", err)
} else {
ingClient = kubeClient.Extensions().Ingress(api.NamespaceAll)
secretsClient = kubeClient.Secrets(api.NamespaceAll)
}
tmpl := template.New("nginx.tmpl").Funcs(template.FuncMap{"hasprefix": hasPrefix, "hassuffix": hasSuffix})
if _, err := tmpl.ParseFiles("./nginx.tmpl"); err != nil {
log.Fatalf("Failed to parse template %v", err)
}
rateLimiter := util.NewTokenBucketRateLimiter(0.1, 1)
known := &extensions.IngressList{}
known_secrets := &api.SecretList{}
// Controller loop
shellOut("nginx")
for {
rateLimiter.Accept()
ingresses, err := ingClient.List(api.ListOptions{})
if err != nil {
log.Printf("Error retrieving ingresses: %v", err)
continue
}
secrets, err := secretsClient.List(api.ListOptions{})
if err != nil {
log.Printf("Error retrieving secrets: %v", err)
continue
}
if reflect.DeepEqual(ingresses.Items, known.Items) && reflect.DeepEqual(secrets.Items, known_secrets.Items) {
continue
}
// Process SSL context
// old values
known = ingresses
known_secrets = secrets
// context variable
context := &Context{Ingress: ingresses, Secrets: secrets}
if w, err := os.Create("/etc/nginx/nginx.conf"); err != nil {
log.Fatalf("Failed to open %v: %v", err)
} else if err := tmpl.Execute(w, context); err != nil {
log.Fatalf("Failed to write template %v", err)
}
shellOut("nginx -s reload")
}
}
示例12: main
func main() {
var kubeClient *unversioned.Client
flags.AddGoFlagSet(flag.CommandLine)
flags.Parse(os.Args)
clientConfig := kubectl_util.DefaultClientConfig(flags)
glog.Infof("Using build: %v - %v", gitRepo, version)
if *buildCfg {
fmt.Printf("Example of ConfigMap to customize NGINX configuration:\n%v", nginx.ConfigMapAsString())
os.Exit(0)
}
if *defaultSvc == "" {
glog.Fatalf("Please specify --default-backend-service")
}
var err error
if *inCluster {
kubeClient, err = unversioned.NewInCluster()
} else {
config, connErr := clientConfig.ClientConfig()
if connErr != nil {
glog.Fatalf("error connecting to the client: %v", err)
}
kubeClient, err = unversioned.New(config)
}
if err != nil {
glog.Fatalf("failed to create client: %v", err)
}
runtimePodInfo := &podInfo{NodeIP: "127.0.0.1"}
if *inCluster {
runtimePodInfo, err = getPodDetails(kubeClient)
if err != nil {
glog.Fatalf("unexpected error getting runtime information: %v", err)
}
}
if err := isValidService(kubeClient, *defaultSvc); err != nil {
glog.Fatalf("no service with name %v found: %v", *defaultSvc, err)
}
glog.Infof("Validated %v as the default backend", *defaultSvc)
lbc, err := newLoadBalancerController(kubeClient, *resyncPeriod, *defaultSvc, *watchNamespace, *nxgConfigMap, *tcpConfigMapName, *udpConfigMapName, runtimePodInfo)
if err != nil {
glog.Fatalf("%v", err)
}
go registerHandlers(lbc)
go handleSigterm(lbc)
lbc.Run()
for {
glog.Infof("Handled quit, awaiting pod deletion")
time.Sleep(30 * time.Second)
}
}
示例13: main
func main() {
flag.Parse()
glog.Info("Kubernetes Elasticsearch logging discovery")
c, err := client.NewInCluster()
if err != nil {
glog.Fatalf("Failed to make client: %v", err)
}
namespace := api.NamespaceSystem
envNamespace := os.Getenv("NAMESPACE")
if envNamespace != "" {
if _, err := c.Namespaces().Get(envNamespace); err != nil {
glog.Fatalf("%s namespace doesn't exist: %v", envNamespace, err)
}
namespace = envNamespace
}
var elasticsearch *api.Service
// Look for endpoints associated with the Elasticsearch loggging service.
// First wait for the service to become available.
for t := time.Now(); time.Since(t) < 5*time.Minute; time.Sleep(10 * time.Second) {
elasticsearch, err = c.Services(namespace).Get("elasticsearch-logging")
if err == nil {
break
}
}
// If we did not find an elasticsearch logging service then log a warning
// and return without adding any unicast hosts.
if elasticsearch == nil {
glog.Warningf("Failed to find the elasticsearch-logging service: %v", err)
return
}
var endpoints *api.Endpoints
addrs := []string{}
// Wait for some endpoints.
count := 0
for t := time.Now(); time.Since(t) < 5*time.Minute; time.Sleep(10 * time.Second) {
endpoints, err = c.Endpoints(namespace).Get("elasticsearch-logging")
if err != nil {
continue
}
addrs = flattenSubsets(endpoints.Subsets)
glog.Infof("Found %s", addrs)
if len(addrs) > 0 && len(addrs) == count {
break
}
count = len(addrs)
}
// If there was an error finding endpoints then log a warning and quit.
if err != nil {
glog.Warningf("Error finding endpoints: %v", err)
return
}
glog.Infof("Endpoints = %s", addrs)
fmt.Printf("discovery.zen.ping.unicast.hosts: [%s]\n", strings.Join(addrs, ", "))
}
示例14: main
func main() {
flags.AddGoFlagSet(flag.CommandLine)
flags.Parse(os.Args)
clientConfig := kubectl_util.DefaultClientConfig(flags)
glog.Infof("Using build: %v - %v", gitRepo, version)
if *defaultSvc == "" {
glog.Fatalf("Please specify --default-backend-service")
}
kubeClient, err := unversioned.NewInCluster()
if err != nil {
config, err := clientConfig.ClientConfig()
if err != nil {
glog.Fatalf("error configuring the client: %v", err)
}
kubeClient, err = unversioned.New(config)
if err != nil {
glog.Fatalf("failed to create client: %v", err)
}
}
runtimePodInfo, err := getPodDetails(kubeClient)
if err != nil {
runtimePodInfo = &podInfo{NodeIP: "127.0.0.1"}
glog.Warningf("unexpected error getting runtime information: %v", err)
}
if err := isValidService(kubeClient, *defaultSvc); err != nil {
glog.Fatalf("no service with name %v found: %v", *defaultSvc, err)
}
glog.Infof("Validated %v as the default backend", *defaultSvc)
if *nxgConfigMap != "" {
_, _, err = parseNsName(*nxgConfigMap)
if err != nil {
glog.Fatalf("configmap error: %v", err)
}
}
lbc, err := newLoadBalancerController(kubeClient, *resyncPeriod,
*defaultSvc, *watchNamespace, *nxgConfigMap, *tcpConfigMapName,
*udpConfigMapName, *defSSLCertificate, *defHealthzURL, runtimePodInfo)
if err != nil {
glog.Fatalf("%v", err)
}
go registerHandlers(lbc)
go handleSigterm(lbc)
lbc.Run()
for {
glog.Infof("Handled quit, awaiting pod deletion")
time.Sleep(30 * time.Second)
}
}
示例15: main
func main() {
clientConfig := kubectl_util.DefaultClientConfig(flags)
flags.Parse(os.Args)
var err error
var kubeClient *unversioned.Client
if *cluster {
if kubeClient, err = unversioned.NewInCluster(); err != nil {
glog.Fatalf("Failed to create client: %v", err)
}
} else {
config, err := clientConfig.ClientConfig()
if err != nil {
glog.Fatalf("error connecting to the client: %v", err)
}
kubeClient, err = unversioned.New(config)
}
namespace, specified, err := clientConfig.Namespace()
if err != nil {
glog.Fatalf("unexpected error: %v", err)
}
if !specified {
namespace = ""
}
err = loadIPVModule()
if err != nil {
glog.Fatalf("Terminating execution: %v", err)
}
err = changeSysctl()
if err != nil {
glog.Fatalf("Terminating execution: %v", err)
}
err = resetIPVS()
if err != nil {
glog.Fatalf("Terminating execution: %v", err)
}
glog.Info("starting LVS configuration")
if *useUnicast {
glog.Info("keepalived will use unicast to sync the nodes")
}
ipvsc := newIPVSController(kubeClient, namespace, *useUnicast, *password)
go ipvsc.epController.Run(wait.NeverStop)
go ipvsc.svcController.Run(wait.NeverStop)
go wait.Until(ipvsc.worker, time.Second, wait.NeverStop)
time.Sleep(5 * time.Second)
glog.Info("starting keepalived to announce VIPs")
ipvsc.keepalived.Start()
}