本文整理匯總了Golang中github.com/golang/glog.Infof函數的典型用法代碼示例。如果您正苦於以下問題:Golang Infof函數的具體用法?Golang Infof怎麽用?Golang Infof使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了Infof函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: OnUpdate
// OnUpdate updates the registered endpoints with the new
// endpoint information, removes the registered endpoints
// no longer present in the provided endpoints.
func (impl LoadBalancerRR) OnUpdate(endpoints []api.Endpoints) {
tmp := make(map[string]bool)
impl.lock.Lock()
defer impl.lock.Unlock()
// First update / add all new endpoints for services.
for _, value := range endpoints {
existingEndpoints, exists := impl.endpointsMap[value.Name]
validEndpoints := impl.filterValidEndpoints(value.Endpoints)
if !exists || !reflect.DeepEqual(existingEndpoints, validEndpoints) {
glog.Infof("LoadBalancerRR: Setting endpoints for %s to %+v", value.Name, value.Endpoints)
impl.endpointsMap[value.Name] = validEndpoints
// Start RR from the beginning if added or updated.
impl.rrIndex[value.Name] = 0
}
tmp[value.Name] = true
}
// Then remove any endpoints no longer relevant
for key, value := range impl.endpointsMap {
_, exists := tmp[key]
if !exists {
glog.Infof("LoadBalancerRR: Removing endpoints for %s -> %+v", key, value)
delete(impl.endpointsMap, key)
}
}
}
示例2: Open
// Open must be called before sending requests to QueryEngine.
func (qe *QueryEngine) Open(dbconfigs *dbconfigs.DBConfigs, schemaOverrides []SchemaOverride) {
qe.dbconfigs = dbconfigs
appParams := dbconfigs.App.ConnParams
// Create dba params based on App connection params
// and Dba credentials.
dbaParams := dbconfigs.App.ConnParams
if dbconfigs.Dba.Uname != "" {
dbaParams.Uname = dbconfigs.Dba.Uname
dbaParams.Pass = dbconfigs.Dba.Pass
}
strictMode := false
if qe.strictMode.Get() != 0 {
strictMode = true
}
if !strictMode && dbconfigs.App.EnableRowcache {
panic(NewTabletError(ErrFatal, vtrpc.ErrorCode_INTERNAL_ERROR, "Rowcache cannot be enabled when queryserver-config-strict-mode is false"))
}
if dbconfigs.App.EnableRowcache {
qe.cachePool.Open()
log.Infof("rowcache is enabled")
} else {
log.Infof("rowcache is not enabled")
}
start := time.Now()
// schemaInfo depends on cachePool. Every table that has a rowcache
// points to the cachePool.
qe.schemaInfo.Open(&appParams, &dbaParams, schemaOverrides, qe.cachePool, strictMode)
log.Infof("Time taken to load the schema: %v", time.Now().Sub(start))
qe.connPool.Open(&appParams, &dbaParams)
qe.streamConnPool.Open(&appParams, &dbaParams)
qe.txPool.Open(&appParams, &dbaParams)
}
示例3: StartPods
// StartPods check for numPods in TestNS. If they exist, it no-ops, otherwise it starts up
// a temp rc, scales it to match numPods, then deletes the rc leaving behind the pods.
func StartPods(numPods int, host string, restClient *client.Client) error {
start := time.Now()
defer func() {
glog.Infof("StartPods took %v with numPods %d", time.Since(start), numPods)
}()
hostField := fields.OneTermEqualSelector(client.PodHost, host)
pods, err := restClient.Pods(TestNS).List(labels.Everything(), hostField)
if err != nil || len(pods.Items) == numPods {
return err
}
glog.Infof("Found %d pods that match host %v, require %d", len(pods.Items), hostField, numPods)
// For the sake of simplicity, assume all pods in TestNS have selectors matching TestRCManifest.
controller := RCFromManifest(TestRCManifest)
// Make the rc unique to the given host.
controller.Spec.Replicas = numPods
controller.Spec.Template.Spec.NodeName = host
controller.Name = controller.Name + host
controller.Spec.Selector["host"] = host
controller.Spec.Template.Labels["host"] = host
if rc, err := StartRC(controller, restClient); err != nil {
return err
} else {
// Delete the rc, otherwise when we restart master components for the next benchmark
// the rc controller will race with the pods controller in the rc manager.
return restClient.ReplicationControllers(TestNS).Delete(rc.Name)
}
}
示例4: RenderLocal
func (_ *UserTask) RenderLocal(t *local.LocalTarget, a, e, changes *UserTask) error {
if a == nil {
args := buildUseraddArgs(e)
glog.Infof("Creating user %q", e.Name)
cmd := exec.Command("useradd", args...)
output, err := cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("error creating user: %v\nOutput: %s", err, output)
}
} else {
var args []string
if changes.Shell != "" {
args = append(args, "-s", e.Shell)
}
if changes.Home != "" {
args = append(args, "-d", e.Home)
}
if len(args) != 0 {
args = append(args, e.Name)
glog.Infof("Reconfiguring user %q", e.Name)
cmd := exec.Command("usermod", args...)
output, err := cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("error reconfiguring user: %v\nOutput: %s", err, output)
}
}
}
return nil
}
示例5: Add
// Add adds a healthcheck if one for the same port doesn't already exist.
func (h *HealthChecks) Add(port int64, path string) error {
hc, _ := h.Get(port)
name := h.namer.BeName(port)
if path == "" {
path = h.defaultPath
}
if hc == nil {
glog.Infof("Creating health check %v", name)
if err := h.cloud.CreateHttpHealthCheck(
&compute.HttpHealthCheck{
Name: name,
Port: port,
RequestPath: path,
Description: "Default kubernetes L7 Loadbalancing health check.",
// How often to health check.
CheckIntervalSec: 1,
// How long to wait before claiming failure of a health check.
TimeoutSec: 1,
// Number of healthchecks to pass for a vm to be deemed healthy.
HealthyThreshold: 1,
// Number of healthchecks to fail before the vm is deemed unhealthy.
UnhealthyThreshold: 10,
}); err != nil {
return err
}
} else {
// TODO: Does this health check need an edge hop?
glog.Infof("Health check %v already exists", hc.Name)
}
return nil
}
示例6: nodeSyncLoop
// nodeSyncLoop handles updating the hosts pointed to by all load
// balancers whenever the set of nodes in the cluster changes.
func (s *ServiceController) nodeSyncLoop(period time.Duration) {
var prevHosts []string
var servicesToUpdate []*cachedService
for range time.Tick(period) {
nodes, err := s.nodeLister.NodeCondition(getNodeConditionPredicate()).List()
if err != nil {
glog.Errorf("Failed to retrieve current set of nodes from node lister: %v", err)
continue
}
newHosts := hostsFromNodeSlice(nodes)
if stringSlicesEqual(newHosts, prevHosts) {
// The set of nodes in the cluster hasn't changed, but we can retry
// updating any services that we failed to update last time around.
servicesToUpdate = s.updateLoadBalancerHosts(servicesToUpdate, newHosts)
continue
}
glog.Infof("Detected change in list of current cluster nodes. New node set: %v", newHosts)
// Try updating all services, and save the ones that fail to try again next
// round.
servicesToUpdate = s.cache.allServices()
numServices := len(servicesToUpdate)
servicesToUpdate = s.updateLoadBalancerHosts(servicesToUpdate, newHosts)
glog.Infof("Successfully updated %d out of %d load balancers to direct traffic to the updated set of nodes",
numServices-len(servicesToUpdate), numServices)
prevHosts = newHosts
}
}
示例7: terminateHealthChecks
// terminateHealthChecks is called when we enter lame duck mode.
// We will clean up our state, and shut down query service.
// We only do something if we are in targetTabletType state, and then
// we just go to spare.
func (agent *ActionAgent) terminateHealthChecks(targetTabletType pbt.TabletType) {
agent.actionMutex.Lock()
defer agent.actionMutex.Unlock()
log.Info("agent.terminateHealthChecks is starting")
// read the current tablet record
tablet := agent.Tablet()
if tablet.Type != targetTabletType {
log.Infof("Tablet in state %v, not changing it", tablet.Type)
return
}
// Change the Type to spare, update the health. Note we pass in a map
// that's not nil, meaning we will clear it.
if err := topotools.ChangeType(agent.batchCtx, agent.TopoServer, tablet.Alias, pbt.TabletType_SPARE, make(map[string]string)); err != nil {
log.Infof("Error updating tablet record: %v", err)
return
}
// Update the serving graph in our cell, only if we're dealing with
// a serving type
if err := agent.updateServingGraph(tablet, targetTabletType); err != nil {
log.Warningf("updateServingGraph failed (will still run post action callbacks, serving graph might be out of date): %v", err)
}
// We've already rebuilt the shard, which is the only reason we registered
// ourself as OnTermSync (synchronous). The rest can be done asynchronously.
go func() {
// Run the post action callbacks (let them shutdown the query service)
if err := agent.refreshTablet(agent.batchCtx, "terminatehealthcheck"); err != nil {
log.Warningf("refreshTablet failed: %v", err)
}
}()
}
示例8: Start
// Start launches a master. It will error if possible, but some background processes may still
// be running and the process should exit after it finishes.
func (m *Master) Start() error {
// Allow privileged containers
// TODO: make this configurable and not the default https://github.com/openshift/origin/issues/662
capabilities.Initialize(capabilities.Capabilities{
AllowPrivileged: true,
PrivilegedSources: capabilities.PrivilegedSources{
HostNetworkSources: []string{kubelettypes.ApiserverSource, kubelettypes.FileSource},
HostPIDSources: []string{kubelettypes.ApiserverSource, kubelettypes.FileSource},
HostIPCSources: []string{kubelettypes.ApiserverSource, kubelettypes.FileSource},
},
})
openshiftConfig, err := origin.BuildMasterConfig(*m.config)
if err != nil {
return err
}
kubeMasterConfig, err := BuildKubernetesMasterConfig(openshiftConfig)
if err != nil {
return err
}
switch {
case m.api:
glog.Infof("Starting master on %s (%s)", m.config.ServingInfo.BindAddress, version.Get().String())
glog.Infof("Public master address is %s", m.config.AssetConfig.MasterPublicURL)
if len(m.config.DisabledFeatures) > 0 {
glog.V(4).Infof("Disabled features: %s", strings.Join(m.config.DisabledFeatures, ", "))
}
glog.Infof("Using images from %q", openshiftConfig.ImageFor("<component>"))
if err := StartAPI(openshiftConfig, kubeMasterConfig); err != nil {
return err
}
case m.controllers:
glog.Infof("Starting controllers on %s (%s)", m.config.ServingInfo.BindAddress, version.Get().String())
if len(m.config.DisabledFeatures) > 0 {
glog.V(4).Infof("Disabled features: %s", strings.Join(m.config.DisabledFeatures, ", "))
}
glog.Infof("Using images from %q", openshiftConfig.ImageFor("<component>"))
if err := startHealth(openshiftConfig); err != nil {
return err
}
}
if m.controllers {
// run controllers asynchronously (not required to be "ready")
go func() {
if err := startControllers(openshiftConfig, kubeMasterConfig); err != nil {
glog.Fatal(err)
}
openshiftConfig.Informers.Start(utilwait.NeverStop)
}()
}
return nil
}
示例9: runOnce
// runOnce runs a given set of pods and returns their status.
func (kl *Kubelet) runOnce(pods []*api.Pod, retryDelay time.Duration) (results []RunPodResult, err error) {
ch := make(chan RunPodResult)
admitted := []*api.Pod{}
for _, pod := range pods {
// Check if we can admit the pod.
if ok, reason, message := kl.canAdmitPod(append(admitted, pod), pod); !ok {
kl.rejectPod(pod, reason, message)
} else {
admitted = append(admitted, pod)
}
go func(pod *api.Pod) {
err := kl.runPod(pod, retryDelay)
ch <- RunPodResult{pod, err}
}(pod)
}
glog.Infof("waiting for %d pods", len(pods))
failedPods := []string{}
for i := 0; i < len(pods); i++ {
res := <-ch
results = append(results, res)
if res.Err != nil {
// TODO(proppy): report which containers failed the pod.
glog.Infof("failed to start pod %q: %v", res.Pod.Name, res.Err)
failedPods = append(failedPods, res.Pod.Name)
} else {
glog.Infof("started pod %q", res.Pod.Name)
}
}
if len(failedPods) > 0 {
return results, fmt.Errorf("error running pods: %v", failedPods)
}
glog.Infof("%d pods started", len(pods))
return results, err
}
示例10: initHealthCheck
func (agent *ActionAgent) initHealthCheck() {
if !agent.IsRunningHealthCheck() {
log.Infof("No target_tablet_type specified, disabling any health check")
return
}
tt, err := topoproto.ParseTabletType(*targetTabletType)
if err != nil {
log.Fatalf("Invalid target tablet type %v: %v", *targetTabletType, err)
}
log.Infof("Starting periodic health check every %v with target_tablet_type=%v", *healthCheckInterval, *targetTabletType)
t := timer.NewTimer(*healthCheckInterval)
servenv.OnTermSync(func() {
// When we enter lameduck mode, we want to not call
// the health check any more. After this returns, we
// are guaranteed to not call it.
log.Info("Stopping periodic health check timer")
t.Stop()
// Now we can finish up and force ourselves to not healthy.
agent.terminateHealthChecks(tt)
})
t.Start(func() {
agent.runHealthCheck(tt)
})
t.Trigger()
}
示例11: Run
func (a *HorizontalController) Run(stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
glog.Infof("Starting HPA Controller")
go a.controller.Run(stopCh)
<-stopCh
glog.Infof("Shutting down HPA Controller")
}
示例12: openNodePort
func (proxier *Proxier) openNodePort(nodePort int, protocol api.Protocol, proxyIP net.IP, proxyPort int, name proxy.ServicePortName) error {
// TODO: Do we want to allow containers to access public services? Probably yes.
// TODO: We could refactor this to be the same code as portal, but with IP == nil
err := proxier.claimNodePort(nil, nodePort, protocol, name)
if err != nil {
return err
}
// Handle traffic from containers.
args := proxier.iptablesContainerNodePortArgs(nodePort, protocol, proxyIP, proxyPort, name)
existed, err := proxier.iptables.EnsureRule(iptables.Append, iptables.TableNAT, iptablesContainerNodePortChain, args...)
if err != nil {
glog.Errorf("Failed to install iptables %s rule for service %q", iptablesContainerNodePortChain, name)
return err
}
if !existed {
glog.Infof("Opened iptables from-containers public port for service %q on %s port %d", name, protocol, nodePort)
}
// Handle traffic from the host.
args = proxier.iptablesHostNodePortArgs(nodePort, protocol, proxyIP, proxyPort, name)
existed, err = proxier.iptables.EnsureRule(iptables.Append, iptables.TableNAT, iptablesHostNodePortChain, args...)
if err != nil {
glog.Errorf("Failed to install iptables %s rule for service %q", iptablesHostNodePortChain, name)
return err
}
if !existed {
glog.Infof("Opened iptables from-host public port for service %q on %s port %d", name, protocol, nodePort)
}
return nil
}
示例13: fetchNextState
// fetchNextState fetches the key (or waits for a change to a key) and then returns
// the nextIndex to read. It will watch no longer than s.waitDuration and then return
func (s *SourceEtcd) fetchNextState(fromIndex uint64) (nextIndex uint64, err error) {
var response *etcd.Response
if fromIndex == 0 {
response, err = s.client.Get(s.key, true, false)
} else {
response, err = s.client.Watch(s.key, fromIndex, false, nil, stopChannel(s.waitDuration))
if tools.IsEtcdWatchStoppedByUser(err) {
return fromIndex, nil
}
}
if err != nil {
return fromIndex, err
}
pods, err := responseToPods(response)
if err != nil {
glog.Infof("Response was in error: %#v", response)
return 0, fmt.Errorf("error parsing response: %#v", err)
}
glog.Infof("Got state from etcd: %+v", pods)
s.updates <- kubelet.PodUpdate{pods, kubelet.SET}
return response.Node.ModifiedIndex + 1, nil
}
示例14: main
func main() {
flag.Parse()
storageDriver, err := NewStorageDriver(*argDbDriver)
if err != nil {
glog.Fatalf("Failed to connect to database: %s", err)
}
containerManager, err := manager.New(storageDriver)
if err != nil {
glog.Fatalf("Failed to create a Container Manager: %s", err)
}
// Register Docker.
if err := docker.Register(containerManager); err != nil {
glog.Errorf("Docker registration failed: %v.", err)
}
// Register the raw driver.
if err := raw.Register(containerManager); err != nil {
glog.Fatalf("raw registration failed: %v.", err)
}
// Handler for static content.
http.HandleFunc(static.StaticResource, func(w http.ResponseWriter, r *http.Request) {
err := static.HandleRequest(w, r.URL)
if err != nil {
fmt.Fprintf(w, "%s", err)
}
})
// Register API handler.
if err := api.RegisterHandlers(containerManager); err != nil {
glog.Fatalf("failed to register API handlers: %s", err)
}
// Redirect / to containers page.
http.Handle("/", http.RedirectHandler(pages.ContainersPage, http.StatusTemporaryRedirect))
// Register the handler for the containers page.
http.HandleFunc(pages.ContainersPage, func(w http.ResponseWriter, r *http.Request) {
err := pages.ServerContainersPage(containerManager, w, r.URL)
if err != nil {
fmt.Fprintf(w, "%s", err)
}
})
defer glog.Flush()
go func() {
glog.Fatal(containerManager.Start())
}()
glog.Infof("Starting cAdvisor version: %q", info.VERSION)
glog.Infof("About to serve on port ", *argPort)
addr := fmt.Sprintf(":%v", *argPort)
glog.Fatal(http.ListenAndServe(addr, nil))
}
示例15: checkProxy
func (l *L7) checkProxy() (err error) {
if l.um == nil {
return fmt.Errorf("Cannot create proxy without urlmap.")
}
proxyName := truncate(fmt.Sprintf("%v-%v", targetProxyPrefix, l.Name))
proxy, _ := l.cloud.GetTargetHttpProxy(proxyName)
if proxy == nil {
glog.Infof("Creating new http proxy for urlmap %v", l.um.Name)
proxy, err = l.cloud.CreateTargetHttpProxy(l.um, proxyName)
if err != nil {
return err
}
l.tp = proxy
return nil
}
if !compareLinks(proxy.UrlMap, l.um.SelfLink) {
glog.Infof("Proxy %v has the wrong url map, setting %v overwriting %v",
proxy.Name, l.um, proxy.UrlMap)
if err := l.cloud.SetUrlMapForTargetHttpProxy(proxy, l.um); err != nil {
return err
}
}
l.tp = proxy
return nil
}