本文整理汇总了Golang中github.com/zenoss/glog.Warningf函数的典型用法代码示例。如果您正苦于以下问题:Golang Warningf函数的具体用法?Golang Warningf怎么用?Golang Warningf使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了Warningf函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: DFSVolumeMonitorPollUpdateFunc
// DFSVolumeMonitorPollUpdateFunc restarts nfs based on status of monitored remotes
func (m *Monitor) DFSVolumeMonitorPollUpdateFunc(mountpoint, remoteIP string, hasUpdatedFile bool) {
// monitor dfs; log warnings each cycle; restart dfs if needed
if hasUpdatedFile {
return
} else if len(m.getMonitorRemoteHosts()) == 0 {
return
}
glog.Warningf("DFS NFS volume %s is not seen by remoteIP:%s - further action may be needed i.e: restart nfs", mountpoint, remoteIP)
now := time.Now()
since := now.Sub(m.previousRestart)
if !m.shouldRestart {
glog.Warningf("Not restarting DFS NFS service due to configuration setting: SERVICED_MONITOR_DFS_MASTER_RESTART=0")
return
} else if since < m.monitorInterval {
glog.Warningf("Not restarting DFS NFS service - have not surpassed interval: %s since last restart", m.monitorInterval)
return
}
m.previousRestart = now
if err := m.driver.Restart(); err != nil {
glog.Errorf("Error restarting DFS NFS service: %s", err)
}
}
示例2: remove
func (svc *IService) remove(notify chan<- int) {
defer close(notify)
ctr, err := docker.FindContainer(svc.name())
if err == docker.ErrNoSuchContainer {
return
} else if err != nil {
glog.Errorf("Could not get isvc container %s", svc.Name)
return
}
// report the log output
if output, err := exec.Command("docker", "logs", "--tail", "1000", ctr.ID).CombinedOutput(); err != nil {
glog.Warningf("Could not get logs for container %s", ctr.Name)
} else {
glog.V(1).Infof("Exited isvc %s:\n %s", svc.Name, string(output))
}
// kill the container if it is running
if ctr.IsRunning() {
glog.Warningf("isvc %s is still running; killing", svc.Name)
ctr.Kill()
}
// get the exit code
rc, _ := ctr.Wait(time.Second)
defer func() { notify <- rc }()
// delete the container
if err := ctr.Delete(true); err != nil {
glog.Errorf("Could not remove isvc %s: %s", ctr.Name, err)
}
}
示例3: addTemplates
func (d *daemon) addTemplates() {
root := utils.LocalDir("templates")
glog.V(1).Infof("Adding templates from %s", root)
// Don't block startup for this. It's merely a convenience.
go func() {
err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info == nil || !strings.HasSuffix(info.Name(), ".json") {
return nil
}
if info.IsDir() {
return filepath.SkipDir
}
var reader io.ReadCloser
if reader, err = os.Open(path); err != nil {
glog.Warningf("Unable to open template %s", path)
return nil
}
defer reader.Close()
st := servicetemplate.ServiceTemplate{}
if err := json.NewDecoder(reader).Decode(&st); err != nil {
glog.Warningf("Unable to parse template file %s", path)
return nil
}
glog.V(1).Infof("Adding service template %s", path)
d.facade.AddServiceTemplate(d.dsContext, st)
return nil
})
if err != nil {
glog.Warningf("Not loading templates from %s: %s", root, err)
}
}()
}
示例4: Post
// Send the list of stats to the TSDB.
func Post(destination string, stats []Sample) error {
payload := map[string][]Sample{"metrics": stats}
data, err := json.Marshal(payload)
if err != nil {
glog.Warningf("Couldn't marshal stats: ", err)
return err
}
statsreq, err := http.NewRequest("POST", destination, bytes.NewBuffer(data))
if err != nil {
glog.Warningf("Couldn't create stats request: ", err)
return err
}
statsreq.Header["User-Agent"] = []string{"Zenoss Metric Publisher"}
statsreq.Header["Content-Type"] = []string{"application/json"}
resp, reqerr := http.DefaultClient.Do(statsreq)
if reqerr != nil {
glog.Warningf("Couldn't post stats: ", reqerr)
return reqerr
}
if !strings.Contains(resp.Status, "200 OK") {
glog.Warningf("couldn't post stats: ", resp.Status)
return nil
}
resp.Body.Close()
return nil
}
示例5: UpdateRemoteMonitorFile
// UpdateRemoteMonitorFile is used by remote clients to write a tiny file to the DFS volume at the given cycle
func UpdateRemoteMonitorFile(localPath string, writeInterval time.Duration, ipAddr string, shutdown <-chan interface{}) {
monitorPath := path.Join(localPath, monitorSubDir)
remoteFile := path.Join(localPath, monitorSubDir, ipAddr)
glog.Infof("updating DFS volume monitor file %s at write interval: %s", remoteFile, writeInterval)
for {
glog.V(2).Infof("checking DFS monitor path %s", monitorPath)
_, err := os.Stat(monitorPath)
if err != nil {
glog.V(2).Infof("unable to stat DFS monitor path: %s %s", monitorPath, err)
if err := os.MkdirAll(monitorPath, 0755); err != nil {
glog.Warningf("unable to create DFS volume monitor path %s: %s", monitorPath, err)
} else {
glog.Infof("created DFS volume monitor path %s", monitorPath)
}
}
glog.V(2).Infof("writing DFS file %s", remoteFile)
if err := ioutil.WriteFile(remoteFile, []byte(ipAddr), 0600); err != nil {
glog.Warningf("unable to write DFS file %s: %s", remoteFile, err)
}
// wait for next cycle or shutdown
select {
case <-time.After(writeInterval):
case <-shutdown:
glog.Infof("no longer writing remote monitor status for DFS volume %s to %s", localPath, remoteFile)
return
}
}
}
示例6: start
// start schedules the given service instances with the proviced instance ID.
func (l *ServiceListener) start(svc *service.Service, instanceIDs []int) int {
var i, id int
for i, id = range instanceIDs {
if success := func(instanceID int) bool {
glog.V(2).Infof("Waiting to acquire scheduler lock for service %s (%s)", svc.Name, svc.ID)
// only one service instance can be scheduled at a time
l.Lock()
defer l.Unlock()
// If the service lock is enabled, do not try to start the service instance
glog.V(2).Infof("Scheduler lock acquired for service %s (%s); checking service lock", svc.Name, svc.ID)
if locked, err := IsServiceLocked(l.conn); err != nil {
glog.Errorf("Could not check service lock: %s", err)
return false
} else if locked {
glog.Warningf("Could not start instance %d; service %s (%s) is locked", instanceID, svc.Name, svc.ID)
return false
}
glog.V(2).Infof("Service is not locked, selecting a host for service %s (%s) #%d", svc.Name, svc.ID, id)
host, err := l.handler.SelectHost(svc)
if err != nil {
glog.Warningf("Could not assign a host to service %s (%s): %s", svc.Name, svc.ID, err)
return false
}
glog.V(2).Infof("Host %s found, building service instance %d for %s (%s)", host.ID, id, svc.Name, svc.ID)
state, err := servicestate.BuildFromService(svc, host.ID)
if err != nil {
glog.Warningf("Error creating service state for service %s (%s): %s", svc.Name, svc.ID, err)
return false
}
state.HostIP = host.IPAddr
state.InstanceID = instanceID
if err := addInstance(l.conn, *state); err != nil {
glog.Warningf("Could not add service instance %s for service %s (%s): %s", state.ID, svc.Name, svc.ID, err)
return false
}
glog.V(2).Infof("Starting service instance %s for service %s (%s) on host %s", state.ID, svc.Name, svc.ID, host.ID)
return true
}(id); !success {
// 'i' is the index of the unsuccessful instance id which should portray
// the number of successful instances. If you have 2 successful instances
// started, then i = 2 because it attempted to create the third index and
// failed
glog.Warningf("Started %d of %d service instances for %s (%s)", i, len(instanceIDs), svc.Name, svc.ID)
return i
}
}
// add 1 because the index of the last instance 'i' would be len(instanceIDs) - 1
return i + 1
}
示例7: Start
// Start starts a group of listeners that are governed by a master listener.
// When the master exits, it shuts down all of the child listeners and waits
// for all of the subprocesses to exit
func Start(shutdown <-chan interface{}, conn client.Connection, master Listener, listeners ...Listener) {
// shutdown the parent and child listeners
_shutdown := make(chan interface{})
// start the master
masterDone := make(chan struct{})
defer func() { <-masterDone }()
masterReady := make(chan error, 1)
go func() {
defer close(masterDone)
Listen(_shutdown, masterReady, conn, master)
}()
// wait for the master to be ready and then start the slaves
var childDone chan struct{}
select {
case err := <-masterReady:
if err != nil {
glog.Errorf("master listener at %s failed to start: %s", master.GetPath(), err)
return
}
childDone := make(chan struct{})
defer func() { <-childDone }()
go func() {
defer close(childDone)
// this handles restarts; retryLimit to reduce flapping
for i := 0; i <= retryLimit; i++ {
start(_shutdown, conn, listeners...)
select {
case <-_shutdown:
return
default:
glog.Warningf("Restarting child listeners for master at %s", master.GetPath())
}
}
glog.Warningf("Shutting down master listener at %s; child listeners exceeded retry limit", master.GetPath())
}()
case <-masterDone:
case <-shutdown:
}
defer close(_shutdown)
select {
case <-masterDone:
glog.Warningf("Master listener at %s died prematurely; shutting down", master.GetPath())
case <-childDone:
glog.Warningf("Child listeners for master %s died prematurely; shutting down", master.GetPath())
case <-shutdown:
glog.Infof("Receieved signal to shutdown for master listener %s", master.GetPath())
}
}
示例8: addEndpoint
// addEndpoint adds a mapping to defined application, if a mapping does not exist this method creates the list and adds the first element
func (a *HostAgent) addEndpoint(key string, endpoint dao.ApplicationEndpoint, endpoints map[string][]dao.ApplicationEndpoint) {
if _, ok := endpoints[key]; !ok {
endpoints[key] = make([]dao.ApplicationEndpoint, 0)
} else {
if len(endpoints[key]) > 0 {
glog.Warningf("Service %s has duplicate internal endpoint for key %s len(endpointList)=%d", endpoint.ServiceID, key, len(endpoints[key]))
for _, ep := range endpoints[key] {
glog.Warningf(" %+v", ep)
}
}
}
endpoints[key] = append(endpoints[key], endpoint)
}
示例9: UpdateService
func (this *ControlPlaneDao) UpdateService(svc service.Service, unused *int) error {
if err := this.facade.UpdateService(datastore.Get(), svc); err != nil {
return err
}
// Create the tenant volume
if tenantID, err := this.facade.GetTenantID(datastore.Get(), svc.ID); err != nil {
glog.Warningf("Could not get tenant for service %s: %s", svc.ID, err)
} else if _, err := this.dfs.GetVolume(tenantID); err != nil {
glog.Warningf("Could not create volume for tenant %s: %s", tenantID, err)
}
return nil
}
示例10: desynchronize
func (dfs *DistributedFilesystem) desynchronize(image *docker.Image) error {
// inspect the image
dImg, err := image.Inspect()
if err != nil {
glog.Errorf("Could not inspect image %s (%s): %s", image.ID, image.UUID, err)
return err
}
// look up services for that tenant
svcs, err := dfs.facade.GetServices(datastore.Get(), dao.ServiceRequest{TenantID: image.ID.User})
if err != nil {
glog.Errorf("Could not get services for tenant %s from %s (%s): %s", image.ID.User, image.ID, image.UUID, err)
return err
}
for _, svc := range svcs {
// figure out which services are using the provided image
svcImageID, err := commons.ParseImageID(svc.ImageID)
if err != nil {
glog.Warningf("Could not parse image %s for %s (%s): %s", svc.ImageID, svc.Name, svc.ID)
continue
} else if !svcImageID.Equals(image.ID) {
continue
}
// TODO: we need to switch to using dao.ControlPlane
conn, err := zzk.GetLocalConnection(zzk.GeneratePoolPath(svc.PoolID))
if err != nil {
glog.Warningf("Could not acquire connection to the coordinator (%s): %s", svc.PoolID, err)
continue
}
states, err := zkservice.GetServiceStates(conn, svc.ID)
if err != nil {
glog.Warningf("Could not get running services for %s (%s): %s", svc.Name, svc.ID)
continue
}
for _, state := range states {
// check if the instance has been running since before the commit
if state.IsRunning() && state.Started.Before(dImg.Created) {
state.InSync = false
if err := zkservice.UpdateServiceState(conn, &state); err != nil {
glog.Warningf("Could not update service state %s for %s (%s) as out of sync: %s", state.ID, svc.Name, svc.ID, err)
continue
}
}
}
}
return nil
}
示例11: connect
// connect returns a connection object or times out trying
func (zconn *zconn) connect(timeout time.Duration) (client.Connection, error) {
connC := make(chan client.Connection, 1)
zconn.connC <- connC
select {
case conn := <-connC:
return conn, nil
case <-time.After(timeout):
glog.Warningf("timed out waiting for connection")
return nil, ErrTimeout
case <-zconn.shutdownC:
glog.Warningf("receieved signal to shutdown")
return nil, ErrShutdown
}
}
示例12: monitor
// monitor checks for changes in a path-based connection
func (zconn *zconn) monitor(path string) {
var (
connC chan<- client.Connection
conn client.Connection
err error
)
defer func() {
if conn != nil {
conn.Close()
}
}()
for {
// wait for someone to request a connection, or shutdown
select {
case connC = <-zconn.connC:
case <-zconn.shutdownC:
return
}
retry:
// create a connection if it doesn't exist or ping the existing connection
if conn == nil {
conn, err = zconn.client.GetCustomConnection(path)
if err != nil {
glog.Warningf("Could not obtain a connection to %s: %s", path, err)
}
} else if _, err := conn.Children("/"); err == client.ErrConnectionClosed {
glog.Warningf("Could not ping connection to %s: %s", path, err)
conn = nil
}
// send the connection back
if conn != nil {
connC <- conn
continue
}
// if conn is nil, try to create a new connection
select {
case <-time.After(time.Second):
glog.Infof("Refreshing connection to zookeeper")
goto retry
case <-zconn.shutdownC:
return
}
}
}
示例13: startRemote
func (s *scheduler) startRemote(cancel <-chan struct{}, remote, local client.Connection) <-chan interface{} {
var (
shutdown = make(chan interface{})
done = make(chan interface{})
)
// wait to receieve a cancel channel or a done channel and shutdown
go func() {
defer close(shutdown)
select {
case <-cancel:
case <-done:
}
}()
// start the listeners and wait for shutdown or for something to break
go func() {
defer close(done)
glog.Infof("Remote connection established; synchronizing")
zzk.Start(shutdown, remote, nil, s.getPoolSynchronizer(), s.getEndpointSynchronizer(local))
glog.Warningf("Running in disconnected mode")
}()
// indicate when the listeners a finished
return done
}
示例14: StopServiceInstance
// StopServiceInstance stops a host state instance
func StopServiceInstance(conn client.Connection, hostID, stateID string) error {
// verify that the host is active
var isActive bool
hostIDs, err := GetActiveHosts(conn)
if err != nil {
glog.Warningf("Could not verify if host %s is active: %s", hostID, err)
isActive = false
} else {
for _, hid := range hostIDs {
if isActive = hid == hostID; isActive {
break
}
}
}
if isActive {
// try to stop the instance nicely
return updateInstance(conn, hostID, stateID, func(hsdata *HostState, _ *ss.ServiceState) {
glog.V(2).Infof("Stopping service instance via %s host %s", stateID, hostID)
hsdata.DesiredState = int(service.SVCStop)
})
} else {
// if the host isn't active, then remove the instance
var hs HostState
if err := conn.Get(hostpath(hostID, stateID), &hs); err != nil {
glog.Errorf("Could not look up host instance %s on host %s: %s", stateID, hostID, err)
return err
}
return removeInstance(conn, hs.ServiceID, hs.HostID, hs.ServiceStateID)
}
}
示例15: acceptor
func (mux *TCPMux) acceptor(listener net.Listener, closing chan chan struct{}) {
defer func() {
close(mux.connections)
}()
for {
conn, err := mux.listener.Accept()
if err != nil {
if strings.Contains(err.Error(), "too many open files") {
glog.Warningf("error accepting connections, retrying in 50 ms: %s", err)
select {
case <-closing:
glog.V(5).Info("shutting down acceptor")
return
case <-time.After(time.Millisecond * 50):
continue
}
}
glog.Errorf("shutting down acceptor: %s", err)
return
}
glog.V(5).Infof("accepted connection: %s", conn)
select {
case <-closing:
glog.V(5).Info("shutting down acceptor")
conn.Close()
return
case mux.connections <- conn:
}
}
}