本文整理匯總了Golang中github.com/golang/glog.Errorf函數的典型用法代碼示例。如果您正苦於以下問題:Golang Errorf函數的具體用法?Golang Errorf怎麽用?Golang Errorf使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了Errorf函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: cleanup
func (c *glusterfsCleaner) cleanup(dir string) error {
mountpoint, err := c.mounter.IsMountPoint(dir)
if err != nil {
glog.Errorf("Glusterfs: Error checking IsMountPoint: %v", err)
return err
}
if !mountpoint {
return os.RemoveAll(dir)
}
if err := c.mounter.Unmount(dir); err != nil {
glog.Errorf("Glusterfs: Unmounting failed: %v", err)
return err
}
mountpoint, mntErr := c.mounter.IsMountPoint(dir)
if mntErr != nil {
glog.Errorf("Glusterfs: IsMountpoint check failed: %v", mntErr)
return mntErr
}
if !mountpoint {
if err := os.RemoveAll(dir); err != nil {
return err
}
}
return nil
}
示例2: getServersToValidate
func (m *Master) getServersToValidate(c *Config) map[string]apiserver.Server {
serversToValidate := map[string]apiserver.Server{
"controller-manager": {Addr: "127.0.0.1", Port: ports.ControllerManagerPort, Path: "/healthz"},
"scheduler": {Addr: "127.0.0.1", Port: ports.SchedulerPort, Path: "/healthz"},
}
for ix, machine := range c.DatabaseStorage.Backends() {
etcdUrl, err := url.Parse(machine)
if err != nil {
glog.Errorf("Failed to parse etcd url for validation: %v", err)
continue
}
var port int
var addr string
if strings.Contains(etcdUrl.Host, ":") {
var portString string
addr, portString, err = net.SplitHostPort(etcdUrl.Host)
if err != nil {
glog.Errorf("Failed to split host/port: %s (%v)", etcdUrl.Host, err)
continue
}
port, _ = strconv.Atoi(portString)
} else {
addr = etcdUrl.Host
port = 4001
}
serversToValidate[fmt.Sprintf("etcd-%d", ix)] = apiserver.Server{Addr: addr, Port: port, Path: "/health", Validate: etcdstorage.EtcdHealthCheck}
}
return serversToValidate
}
示例3: Attach
// Attach checks with the GCE cloud provider if the specified volume is already
// attached to the node with the specified Name.
// If the volume is attached, it succeeds (returns nil).
// If it is not, Attach issues a call to the GCE cloud provider to attach it.
// Callers are responsible for retrying on failure.
// Callers are responsible for thread safety between concurrent attach and
// detach operations.
func (attacher *gcePersistentDiskAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
volumeSource, readOnly, err := getVolumeSource(spec)
if err != nil {
return "", err
}
pdName := volumeSource.PDName
attached, err := attacher.gceDisks.DiskIsAttached(pdName, nodeName)
if err != nil {
// Log error and continue with attach
glog.Errorf(
"Error checking if PD (%q) is already attached to current node (%q). Will continue and try attach anyway. err=%v",
pdName, nodeName, err)
}
if err == nil && attached {
// Volume is already attached to node.
glog.Infof("Attach operation is successful. PD %q is already attached to node %q.", pdName, nodeName)
} else {
if err := attacher.gceDisks.AttachDisk(pdName, nodeName, readOnly); err != nil {
glog.Errorf("Error attaching PD %q to node %q: %+v", pdName, nodeName, err)
return "", err
}
}
return path.Join(diskByIdPath, diskGooglePrefix+pdName), nil
}
示例4: TearDownAt
func (c *nfsCleaner) TearDownAt(dir string) error {
mountpoint, err := c.mounter.IsMountPoint(dir)
if err != nil {
glog.Errorf("Error checking IsMountPoint: %v", err)
return err
}
if !mountpoint {
return os.Remove(dir)
}
if err := c.mounter.Unmount(dir); err != nil {
glog.Errorf("Unmounting failed: %v", err)
return err
}
mountpoint, mntErr := c.mounter.IsMountPoint(dir)
if mntErr != nil {
glog.Errorf("IsMountpoint check failed: %v", mntErr)
return mntErr
}
if !mountpoint {
if err := os.Remove(dir); err != nil {
return err
}
}
return nil
}
示例5: deleteLocalSubnetRoute
func deleteLocalSubnetRoute(device, localSubnetCIDR string) {
const (
timeInterval = 100 * time.Millisecond
maxIntervals = 20
)
for i := 0; i < maxIntervals; i++ {
itx := ipcmd.NewTransaction(device)
routes, err := itx.GetRoutes()
if err != nil {
glog.Errorf("Could not get routes for dev %s: %v", device, err)
return
}
for _, route := range routes {
if strings.Contains(route, localSubnetCIDR) {
itx.DeleteRoute(localSubnetCIDR)
err = itx.EndTransaction()
if err != nil {
glog.Errorf("Could not delete subnet route %s from dev %s: %v", localSubnetCIDR, device, err)
}
return
}
}
time.Sleep(timeInterval)
}
glog.Errorf("Timed out looking for %s route for dev %s; if it appears later it will not be deleted.", localSubnetCIDR, device)
}
示例6: updateClaim
// updateClaim is callback from framework.Controller watching PersistentVolumeClaim
// events.
func (ctrl *PersistentVolumeController) updateClaim(oldObj, newObj interface{}) {
// Store the new claim version in the cache and do not process it if this is
// an old version.
new, err := storeObjectUpdate(ctrl.claims, newObj, "claim")
if err != nil {
glog.Errorf("%v", err)
}
if !new {
return
}
newClaim, ok := newObj.(*api.PersistentVolumeClaim)
if !ok {
glog.Errorf("Expected PersistentVolumeClaim but updateClaim received %+v", newObj)
return
}
if err := ctrl.syncClaim(newClaim); err != nil {
if errors.IsConflict(err) {
// Version conflict error happens quite often and the controller
// recovers from it easily.
glog.V(3).Infof("PersistentVolumeController could not update claim %q: %+v", claimToClaimKey(newClaim), err)
} else {
glog.Errorf("PersistentVolumeController could not update claim %q: %+v", claimToClaimKey(newClaim), err)
}
}
}
示例7: initializeCaches
// initializeCaches fills all controller caches with initial data from etcd in
// order to have the caches already filled when first addClaim/addVolume to
// perform initial synchronization of the controller.
func (ctrl *PersistentVolumeController) initializeCaches(volumeSource, claimSource cache.ListerWatcher) {
volumeListObj, err := volumeSource.List(api.ListOptions{})
if err != nil {
glog.Errorf("PersistentVolumeController can't initialize caches: %v", err)
return
}
volumeList, ok := volumeListObj.(*api.List)
if !ok {
glog.Errorf("PersistentVolumeController can't initialize caches, expected list of volumes, got: %+v", volumeListObj)
return
}
for _, volume := range volumeList.Items {
// Ignore template volumes from kubernetes 1.2
deleted := ctrl.upgradeVolumeFrom1_2(volume.(*api.PersistentVolume))
if !deleted {
storeObjectUpdate(ctrl.volumes.store, volume, "volume")
}
}
claimListObj, err := claimSource.List(api.ListOptions{})
if err != nil {
glog.Errorf("PersistentVolumeController can't initialize caches: %v", err)
return
}
claimList, ok := claimListObj.(*api.List)
if !ok {
glog.Errorf("PersistentVolumeController can't initialize caches, expected list of claims, got: %+v", volumeListObj)
return
}
for _, claim := range claimList.Items {
storeObjectUpdate(ctrl.claims, claim, "claim")
}
glog.V(4).Infof("controller initialized")
}
示例8: StoreEvents
// Stores events into the backend.
func (sink *influxdbSink) StoreEvents(events []kube_api.Event) error {
dataPoints := []*influxdb.Series{}
if events == nil || len(events) <= 0 {
return nil
}
if !sink.c.avoidColumns {
dataPoint, err := sink.storeEventsColumns(events)
if err != nil {
glog.Errorf("failed to parse events: %v", err)
return err
}
dataPoints = append(dataPoints, dataPoint)
} else {
for _, event := range events {
dataPoint, err := sink.storeEventNoColumns(event)
if err != nil {
glog.Errorf("failed to parse events: %v", err)
return err
}
dataPoints = append(dataPoints, dataPoint)
}
}
err := sink.client.WriteSeriesWithTimePrecision(dataPoints, influxdb.Millisecond)
if err != nil {
glog.Errorf("failed to write events to influxDB - %s", err)
sink.recordWriteFailure()
} else {
glog.V(1).Info("Successfully flushed events to influxDB")
}
return err
}
示例9: attach
// Attach exposes a volume on the host.
func (u *flexVolumeUtil) attach(f *flexVolumeMounter) (string, error) {
execPath := f.execPath
var options string
if f.options != nil {
out, err := json.Marshal(f.options)
if err != nil {
glog.Errorf("Failed to marshal plugin options, error: %s", err.Error())
return "", err
}
if len(out) != 0 {
options = string(out)
} else {
options = ""
}
}
cmd := f.runner.Command(execPath, attachCmd, options)
output, err := cmd.CombinedOutput()
if err != nil {
glog.Errorf("Failed to attach volume %s, output: %s, error: %s", f.volName, output, err.Error())
_, err := handleCmdResponse(attachCmd, output)
return "", err
}
status, err := handleCmdResponse(attachCmd, output)
if err != nil {
return "", err
}
glog.Infof("Successfully attached volume %s on device: %s", f.volName, status.Device)
return status.Device, nil
}
示例10: deletePod
// When a pod is deleted, enqueue the replica set that manages the pod and update its expectations.
// obj could be an *api.Pod, or a DeletionFinalStateUnknown marker item.
func (rsc *ReplicaSetController) deletePod(obj interface{}) {
pod, ok := obj.(*api.Pod)
// When a delete is dropped, the relist will notice a pod in the store not
// in the list, leading to the insertion of a tombstone object which contains
// the deleted key/value. Note that this value might be stale. If the pod
// changed labels the new ReplicaSet will not be woken up till the periodic resync.
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
glog.Errorf("Couldn't get object from tombstone %+v, could take up to %v before a replica set recreates a replica", obj, controller.ExpectationsTimeout)
return
}
pod, ok = tombstone.Obj.(*api.Pod)
if !ok {
glog.Errorf("Tombstone contained object that is not a pod %+v, could take up to %v before replica set recreates a replica", obj, controller.ExpectationsTimeout)
return
}
}
if rs := rsc.getPodReplicaSet(pod); rs != nil {
rsKey, err := controller.KeyFunc(rs)
if err != nil {
glog.Errorf("Couldn't get key for ReplicaSet %#v: %v", rs, err)
return
}
rsc.expectations.DeletionObserved(rsKey)
rsc.enqueueReplicaSet(rs)
}
}
示例11: deletePod
func (dc *DisruptionController) deletePod(obj interface{}) {
pod, ok := obj.(*api.Pod)
// When a delete is dropped, the relist will notice a pod in the store not
// in the list, leading to the insertion of a tombstone object which contains
// the deleted key/value. Note that this value might be stale. If the pod
// changed labels the new ReplicaSet will not be woken up till the periodic
// resync.
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
glog.Errorf("Couldn't get object from tombstone %+v", obj)
return
}
pod, ok = tombstone.Obj.(*api.Pod)
if !ok {
glog.Errorf("Tombstone contained object that is not a pod %+v", obj)
return
}
}
glog.V(4).Infof("deletePod called on pod %q", pod.Name)
pdb := dc.getPdbForPod(pod)
if pdb == nil {
glog.V(4).Infof("No matching pdb for pod %q", pod.Name)
return
}
glog.V(4).Infof("deletePod %q -> PDB %q", pod.Name, pdb.Name)
dc.enqueuePdb(pdb)
}
示例12: NewVolume
// NewVolume new a volume and init it.
func NewVolume(id int32, bfile, ifile string) (v *Volume, err error) {
v = &Volume{}
v.Id = id
if v.block, err = NewSuperBlock(bfile); err != nil {
log.Errorf("init super block: \"%s\" error(%v)", bfile, err)
return
}
if v.indexer, err = NewIndexer(ifile, 102400); err != nil {
log.Errorf("init indexer: %s error(%v)", ifile, err)
goto failed
}
v.needles = make(map[int64]NeedleCache)
if err = v.init(); err != nil {
goto failed
}
v.signal = make(chan uint32, volumeDelChNum)
v.compressKeys = []int64{}
go v.del()
return
failed:
v.block.Close()
if v.indexer != nil {
v.indexer.Close()
}
return
}
示例13: main
func main() {
var (
config *Config
zk *Zookeeper
p *Pitchfork
err error
)
flag.Parse()
defer log.Flush()
log.Infof("bfs pitchfork start")
if config, err = NewConfig(configFile); err != nil {
log.Errorf("NewConfig(\"%s\") error(%v)", configFile, err)
return
}
log.Infof("init zookeeper...")
if zk, err = NewZookeeper(config.ZookeeperAddrs, config.ZookeeperTimeout, config.ZookeeperPitchforkRoot, config.ZookeeperStoreRoot,
config.ZookeeperVolumeRoot); err != nil {
log.Errorf("NewZookeeper() failed, Quit now")
return
}
log.Infof("register pitchfork...")
if p, err = NewPitchfork(zk, config); err != nil {
log.Errorf("pitchfork NewPitchfork() failed, Quit now")
return
}
log.Infof("starts probe stores...")
go p.Probe()
StartSignal()
return
}
示例14: deletePod
// When a pod is deleted, enqueue the job that manages the pod and update its expectations.
// obj could be an *api.Pod, or a DeletionFinalStateUnknown marker item.
func (jm *JobController) deletePod(obj interface{}) {
pod, ok := obj.(*api.Pod)
// When a delete is dropped, the relist will notice a pod in the store not
// in the list, leading to the insertion of a tombstone object which contains
// the deleted key/value. Note that this value might be stale. If the pod
// changed labels the new job will not be woken up till the periodic resync.
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
glog.Errorf("Couldn't get object from tombstone %+v, could take up to %v before a job recreates a pod", obj, controller.ExpectationsTimeout)
return
}
pod, ok = tombstone.Obj.(*api.Pod)
if !ok {
glog.Errorf("Tombstone contained object that is not a pod %+v, could take up to %v before job recreates a pod", obj, controller.ExpectationsTimeout)
return
}
}
if job := jm.getPodJob(pod); job != nil {
jobKey, err := controller.KeyFunc(job)
if err != nil {
glog.Errorf("Couldn't get key for job %#v: %v", job, err)
return
}
jm.expectations.DeletionObserved(jobKey)
jm.enqueueController(job)
}
}
示例15: generateSSHKey
func (m *Master) generateSSHKey(user, privateKeyfile, publicKeyfile string) error {
private, public, err := util.GenerateKey(2048)
if err != nil {
return err
}
// If private keyfile already exists, we must have only made it halfway
// through last time, so delete it.
exists, err := util.FileExists(privateKeyfile)
if err != nil {
glog.Errorf("Error detecting if private key exists: %v", err)
} else if exists {
glog.Infof("Private key exists, but public key does not")
if err := os.Remove(privateKeyfile); err != nil {
glog.Errorf("Failed to remove stale private key: %v", err)
}
}
if err := ioutil.WriteFile(privateKeyfile, util.EncodePrivateKey(private), 0600); err != nil {
return err
}
publicKeyBytes, err := util.EncodePublicKey(public)
if err != nil {
return err
}
if err := ioutil.WriteFile(publicKeyfile+".tmp", publicKeyBytes, 0600); err != nil {
return err
}
return os.Rename(publicKeyfile+".tmp", publicKeyfile)
}