本文整理匯總了Golang中github.com/golang/glog.Warningf函數的典型用法代碼示例。如果您正苦於以下問題:Golang Warningf函數的具體用法?Golang Warningf怎麽用?Golang Warningf使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了Warningf函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: Warningf
func Warningf(ctx context.Context, format string, args ...interface{}) {
if ctx == nil || !hasTraceKey(ctx) {
glog.Warningf(format, args)
return
}
glog.Warningf(prependFormat(format), prependParam(args, ctx)...)
}
示例2: terminateHealthChecks
// terminateHealthChecks is called when we enter lame duck mode.
// We will clean up our state, and shut down query service.
// We only do something if we are in targetTabletType state, and then
// we just go to spare.
func (agent *ActionAgent) terminateHealthChecks(targetTabletType pbt.TabletType) {
agent.actionMutex.Lock()
defer agent.actionMutex.Unlock()
log.Info("agent.terminateHealthChecks is starting")
// read the current tablet record
tablet := agent.Tablet()
if tablet.Type != targetTabletType {
log.Infof("Tablet in state %v, not changing it", tablet.Type)
return
}
// Change the Type to spare, update the health. Note we pass in a map
// that's not nil, meaning we will clear it.
if err := topotools.ChangeType(agent.batchCtx, agent.TopoServer, tablet.Alias, pbt.TabletType_SPARE, make(map[string]string)); err != nil {
log.Infof("Error updating tablet record: %v", err)
return
}
// Update the serving graph in our cell, only if we're dealing with
// a serving type
if err := agent.updateServingGraph(tablet, targetTabletType); err != nil {
log.Warningf("updateServingGraph failed (will still run post action callbacks, serving graph might be out of date): %v", err)
}
// We've already rebuilt the shard, which is the only reason we registered
// ourself as OnTermSync (synchronous). The rest can be done asynchronously.
go func() {
// Run the post action callbacks (let them shutdown the query service)
if err := agent.refreshTablet(agent.batchCtx, "terminatehealthcheck"); err != nil {
log.Warningf("refreshTablet failed: %v", err)
}
}()
}
示例3: UnmountPath
// UnmountPath is a common unmount routine that unmounts the given path and
// deletes the remaining directory if successful.
func UnmountPath(mountPath string, mounter mount.Interface) error {
if pathExists, pathErr := PathExists(mountPath); pathErr != nil {
return fmt.Errorf("Error checking if path exists: %v", pathErr)
} else if !pathExists {
glog.Warningf("Warning: Unmount skipped because path does not exist: %v", mountPath)
return nil
}
notMnt, err := mounter.IsLikelyNotMountPoint(mountPath)
if err != nil {
return err
}
if notMnt {
glog.Warningf("Warning: %q is not a mountpoint, deleting", mountPath)
return os.Remove(mountPath)
}
// Unmount the mount path
if err := mounter.Unmount(mountPath); err != nil {
return err
}
notMnt, mntErr := mounter.IsLikelyNotMountPoint(mountPath)
if mntErr != nil {
return err
}
if notMnt {
glog.V(4).Info("%q is unmounted, deleting the directory", mountPath)
return os.Remove(mountPath)
}
return nil
}
示例4: warnNoRsync
func warnNoRsync() {
if isWindows() {
glog.Warningf(noRsyncWindowsWarning)
return
}
glog.Warningf(noRsyncUnixWarning)
}
示例5: generateEvents
// generateEvents is a helper function that generates some container
// life cycle events for containers in a pod.
func (r *runtime) generateEvents(runtimePod *kubecontainer.Pod, reason string, failure error) {
// Set up container references.
for _, c := range runtimePod.Containers {
containerID := string(c.ID)
id, err := parseContainerID(containerID)
if err != nil {
glog.Warningf("Invalid container ID %q", containerID)
continue
}
ref, ok := r.containerRefManager.GetRef(containerID)
if !ok {
glog.Warningf("No ref for container %q", containerID)
continue
}
// Note that 'rkt id' is the pod id.
uuid := util.ShortenString(id.uuid, 8)
switch reason {
case "Created":
r.recorder.Eventf(ref, "Created", "Created with rkt id %v", uuid)
case "Started":
r.recorder.Eventf(ref, "Started", "Started with rkt id %v", uuid)
case "Failed":
r.recorder.Eventf(ref, "Failed", "Failed to start with rkt id %v with error %v", uuid, failure)
case "Killing":
r.recorder.Eventf(ref, "Killing", "Killing with rkt id %v", uuid)
default:
glog.Errorf("rkt: Unexpected event %q", reason)
}
}
return
}
示例6: SendAppMessage
func SendAppMessage(amsg *AppMessage, uid int64) bool {
channel := GetChannel(uid)
channel.Publish(amsg)
route := app_route.FindRoute(amsg.appid)
if route == nil {
log.Warningf("can't dispatch app message, appid:%d uid:%d cmd:%s", amsg.appid, amsg.receiver, Command(amsg.msg.cmd))
return false
}
clients := route.FindClientSet(uid)
if len(clients) == 0 {
log.Warningf("can't dispatch app message, appid:%d uid:%d cmd:%s", amsg.appid, amsg.receiver, Command(amsg.msg.cmd))
return false
}
if clients != nil {
for c, _ := range clients {
if amsg.msgid > 0 {
c.ewt <- &EMessage{msgid: amsg.msgid, msg: amsg.msg}
} else {
c.wt <- amsg.msg
}
}
}
return true
}
示例7: setKeyspaceShardingInfo
func (wr *Wrangler) setKeyspaceShardingInfo(keyspace, shardingColumnName string, shardingColumnType key.KeyspaceIdType, force bool) error {
ki, err := wr.ts.GetKeyspace(keyspace)
if err != nil {
return err
}
if ki.ShardingColumnName != "" && ki.ShardingColumnName != shardingColumnName {
if force {
log.Warningf("Forcing keyspace ShardingColumnName change from %v to %v", ki.ShardingColumnName, shardingColumnName)
} else {
return fmt.Errorf("Cannot change ShardingColumnName from %v to %v (use -force to override)", ki.ShardingColumnName, shardingColumnName)
}
}
if ki.ShardingColumnType != key.KIT_UNSET && ki.ShardingColumnType != shardingColumnType {
if force {
log.Warningf("Forcing keyspace ShardingColumnType change from %v to %v", ki.ShardingColumnType, shardingColumnType)
} else {
return fmt.Errorf("Cannot change ShardingColumnType from %v to %v (use -force to override)", ki.ShardingColumnType, shardingColumnType)
}
}
ki.ShardingColumnName = shardingColumnName
ki.ShardingColumnType = shardingColumnType
return topo.UpdateKeyspace(wr.ts, ki)
}
示例8: executorRefs
// executorRefs returns a slice of known references to running executors known to this framework
func (k *framework) executorRefs() []executorRef {
slaves := k.slaveHostNames.SlaveIDs()
refs := make([]executorRef, 0, len(slaves))
for _, slaveID := range slaves {
hostname := k.slaveHostNames.HostName(slaveID)
if hostname == "" {
log.Warningf("hostname lookup for slaveID %q failed", slaveID)
continue
}
node := k.lookupNode(hostname)
if node == nil {
log.Warningf("node lookup for slaveID %q failed", slaveID)
continue
}
eid, ok := node.Annotations[meta.ExecutorIdKey]
if !ok {
log.Warningf("unable to find %q annotation for node %v", meta.ExecutorIdKey, node)
continue
}
refs = append(refs, executorRef{
executorID: mutil.NewExecutorID(eid),
slaveID: mutil.NewSlaveID(slaveID),
})
}
return refs
}
示例9: ListRuntimeInfo
// ListRuntimeInfo lists L7RuntimeInfo as understood by the loadbalancer module.
func (lbc *LoadBalancerController) ListRuntimeInfo() (lbs []*loadbalancers.L7RuntimeInfo, err error) {
ingList, err := lbc.ingLister.List()
if err != nil {
return lbs, err
}
for _, ing := range ingList.Items {
k, err := keyFunc(&ing)
if err != nil {
glog.Warningf("Cannot get key for Ingress %v/%v: %v", ing.Namespace, ing.Name, err)
continue
}
tls, err := lbc.tlsLoader.load(&ing)
if err != nil {
glog.Warningf("Cannot get certs for Ingress %v/%v: %v", ing.Namespace, ing.Name, err)
}
annotations := ingAnnotations(ing.ObjectMeta.Annotations)
lbs = append(lbs, &loadbalancers.L7RuntimeInfo{
Name: k,
TLS: tls,
AllowHTTP: annotations.allowHTTP(),
StaticIPName: annotations.staticIPName(),
})
}
return lbs, nil
}
示例10: RegisterTemp
// RegisterTmp create a ephemeral node, and watch it, if node droped then send a SIGQUIT to self.
func RegisterTemp(conn *zk.Conn, fpath, data string) error {
tpath, err := conn.Create(path.Join(fpath)+"/", []byte(data), zk.FlagEphemeral|zk.FlagSequence, zk.WorldACL(zk.PermAll))
if err != nil {
glog.Errorf("conn.Create(\"%s\", \"%s\", zk.FlagEphemeral|zk.FlagSequence) error(%v)", fpath, data, err)
return err
}
glog.V(1).Infof("create a zookeeper node:%s", tpath)
// watch self
go func() {
for {
glog.Infof("zk path: \"%s\" set a watch", tpath)
exist, _, watch, err := conn.ExistsW(tpath)
if err != nil {
glog.Errorf("zk.ExistsW(\"%s\") error(%v)", tpath, err)
glog.Warningf("zk path: \"%s\" set watch failed, kill itself", tpath)
killSelf()
return
}
if !exist {
glog.Warningf("zk path: \"%s\" not exist, kill itself", tpath)
killSelf()
return
}
event := <-watch
glog.Infof("zk path: \"%s\" receive a event %v", tpath, event)
}
}()
return nil
}
示例11: Start
func (im *realImageGCManager) Start() error {
go wait.Until(func() {
// Initial detection make detected time "unknown" in the past.
var ts time.Time
if im.initialized {
ts = time.Now()
}
err := im.detectImages(ts)
if err != nil {
glog.Warningf("[imageGCManager] Failed to monitor images: %v", err)
} else {
im.initialized = true
}
}, 5*time.Minute, wait.NeverStop)
// Start a goroutine periodically updates image cache.
// TODO(random-liu): Merge this with the previous loop.
go wait.Until(func() {
images, err := im.runtime.ListImages()
if err != nil {
glog.Warningf("[imageGCManager] Failed to update image list: %v", err)
} else {
im.imageCache.set(images)
}
}, 30*time.Second, wait.NeverStop)
return nil
}
示例12: handleQReq
// PDNS will query for "ANY" no matter what record type the client
// has asked for. Thus, we need to return data for all record
// types. PDNS will then filter for what the client needs. PDNS is
// sensitive to the order in which records are returned. If you
// return a CNAME first, it returns the CNAME for all queries.
// The DNS spec says you should not have conflicts between
// CNAME/SRV records, so this really shouldn't be an issue.
func (pd *pdns) handleQReq(req *pdnsReq) (lines []string, err error) {
// The default search is for "ANY", however we do not need to
// explicitly search for CNAME since that is implicitly handled in
// an A request.
qtypes := []string{"SOA", "SRV", "A"}
if req.qtype != "ANY" {
qtypes = []string{req.qtype}
}
lines = make([]string, 0, 16)
for _, qtype := range qtypes {
replies, err := pd.zr.getResult(qtype, req.qname)
if err != nil {
// If we aren't even the authority, we might as well give up.
if qtype == "SOA" {
return nil, err
}
log.Warningf("query failed %v %v: %v", qtype, req.qname, err)
continue
}
for _, reply := range replies {
lines = append(lines, reply.fmtReply())
}
}
if len(lines) == 0 {
emptyCount.Add(1)
log.Warningf("no results for %v %v", req.qtype, req.qname)
}
return lines, nil
}
示例13: effectiveHairpinMode
// effectiveHairpinMode determines the effective hairpin mode given the
// configured mode, container runtime, and whether cbr0 should be configured.
func effectiveHairpinMode(hairpinMode componentconfig.HairpinMode, containerRuntime string, configureCBR0 bool, networkPlugin string) (componentconfig.HairpinMode, error) {
// The hairpin mode setting doesn't matter if:
// - We're not using a bridge network. This is hard to check because we might
// be using a plugin. It matters if --configure-cbr0=true, and we currently
// don't pipe it down to any plugins.
// - It's set to hairpin-veth for a container runtime that doesn't know how
// to set the hairpin flag on the veth's of containers. Currently the
// docker runtime is the only one that understands this.
// - It's set to "none".
if hairpinMode == componentconfig.PromiscuousBridge || hairpinMode == componentconfig.HairpinVeth {
// Only on docker.
if containerRuntime != "docker" {
glog.Warningf("Hairpin mode set to %q but container runtime is %q, ignoring", hairpinMode, containerRuntime)
return componentconfig.HairpinNone, nil
}
if hairpinMode == componentconfig.PromiscuousBridge && !configureCBR0 && networkPlugin != "kubenet" {
// This is not a valid combination. Users might be using the
// default values (from before the hairpin-mode flag existed) and we
// should keep the old behavior.
glog.Warningf("Hairpin mode set to %q but configureCBR0 is false, falling back to %q", hairpinMode, componentconfig.HairpinVeth)
return componentconfig.HairpinVeth, nil
}
} else if hairpinMode == componentconfig.HairpinNone {
if configureCBR0 {
glog.Warningf("Hairpin mode set to %q and configureCBR0 is true, this might result in loss of hairpin packets", hairpinMode)
}
} else {
return "", fmt.Errorf("unknown value: %q", hairpinMode)
}
return hairpinMode, nil
}
示例14: volumeTestCleanup
// Clean both server and client pods.
func volumeTestCleanup(f *framework.Framework, config VolumeTestConfig) {
By(fmt.Sprint("cleaning the environment after ", config.prefix))
defer GinkgoRecover()
client := f.Client
podClient := client.Pods(config.namespace)
err := podClient.Delete(config.prefix+"-client", nil)
if err != nil {
// Log the error before failing test: if the test has already failed,
// framework.ExpectNoError() won't print anything to logs!
glog.Warningf("Failed to delete client pod: %v", err)
framework.ExpectNoError(err, "Failed to delete client pod: %v", err)
}
if config.serverImage != "" {
if err := f.WaitForPodTerminated(config.prefix+"-client", ""); !apierrs.IsNotFound(err) {
framework.ExpectNoError(err, "Failed to wait client pod terminated: %v", err)
}
// See issue #24100.
// Prevent umount errors by making sure making sure the client pod exits cleanly *before* the volume server pod exits.
By("sleeping a bit so client can stop and unmount")
time.Sleep(20 * time.Second)
err = podClient.Delete(config.prefix+"-server", nil)
if err != nil {
glog.Warningf("Failed to delete server pod: %v", err)
framework.ExpectNoError(err, "Failed to delete server pod: %v", err)
}
}
}
示例15: LoadVSchema
// LoadVSchema loads the VSchema from the topo. The function does
// not return an error. It instead logs warnings on failure.
func (plr *Planner) LoadVSchema(ctx context.Context) {
formal := &vindexes.VSchemaFormal{
Keyspaces: make(map[string]vindexes.KeyspaceFormal),
}
keyspaces, err := plr.serv.GetSrvKeyspaceNames(ctx, plr.cell)
if err != nil {
log.Warningf("Error loading vschema: could not read keyspaces: %v", err)
return
}
for _, keyspace := range keyspaces {
formal.Keyspaces[keyspace] = vindexes.KeyspaceFormal{}
kschema, err := plr.serv.GetVSchema(context.TODO(), keyspace)
if err != nil {
log.Warningf("Error loading vschema for keyspace: %s: %v", keyspace, err)
continue
}
var kformal vindexes.KeyspaceFormal
err = json.Unmarshal([]byte(kschema), &kformal)
if err != nil {
log.Warningf("Error unmarshalling vschema for keyspace: %s: %v", keyspace, err)
continue
}
formal.Keyspaces[keyspace] = kformal
}
vschema, err := vindexes.BuildVSchema(formal)
if err != nil {
log.Warningf("Error creating VSchema: %v", err)
return
}
plr.mu.Lock()
plr.vschema = vschema
plr.mu.Unlock()
}