本文整理匯總了Golang中github.com/pivotal-golang/lager.Logger.Session方法的典型用法代碼示例。如果您正苦於以下問題:Golang Logger.Session方法的具體用法?Golang Logger.Session怎麽用?Golang Logger.Session使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類github.com/pivotal-golang/lager.Logger
的用法示例。
在下文中一共展示了Logger.Session方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: resolveActualsWithMissingCells
func (db *ETCDDB) resolveActualsWithMissingCells(logger lager.Logger, desired *models.DesiredLRP, actual *models.ActualLRP, starts *startRequests) func() {
return func() {
logger = logger.Session("start-missing-actual", lager.Data{
"process-guid": actual.ProcessGuid,
"index": actual.Index,
})
logger.Debug("removing-actual-lrp")
removeErr := db.RemoveActualLRP(logger, actual.ActualLRPKey.ProcessGuid, actual.ActualLRPKey.Index)
if removeErr != nil {
logger.Error("failed-removing-actual-lrp", removeErr)
return
}
logger.Debug("succeeded-removing-actual-lrp")
if actual.Index >= desired.Instances {
return
}
logger.Debug("creating-actual-lrp")
err := db.createActualLRP(logger, desired, actual.Index)
if err != nil {
logger.Error("failed-creating-actual-lrp", err)
return
}
logger.Debug("succeeded-creating-actual-lrp")
starts.Add(logger, &actual.ActualLRPKey)
}
}
示例2: TryNextPendingBuild
func (s *Scheduler) TryNextPendingBuild(logger lager.Logger, versions *algorithm.VersionsDB, job atc.JobConfig, resources atc.ResourceConfigs, resourceTypes atc.ResourceTypes) Waiter {
logger = logger.Session("try-next-pending")
wg := new(sync.WaitGroup)
wg.Add(1)
go func() {
defer wg.Done()
build, found, err := s.PipelineDB.GetNextPendingBuild(job.Name)
if err != nil {
logger.Error("failed-to-get-next-pending-build", err)
return
}
if !found {
return
}
jobService, err := NewJobService(job, s.PipelineDB, s.Scanner)
if err != nil {
logger.Error("failed-to-get-job-service", err)
return
}
s.ScheduleAndResumePendingBuild(logger, versions, build, job, resources, resourceTypes, jobService)
}()
return wg
}
示例3: Exec
// Exec a process in a bundle using 'runc exec'
func (r *RunRunc) Exec(log lager.Logger, bundlePath, id string, spec garden.ProcessSpec, io garden.ProcessIO) (garden.Process, error) {
log = log.Session("exec", lager.Data{"id": id, "path": spec.Path})
log.Info("started")
defer log.Info("finished")
tmpFile, err := ioutil.TempFile("", "guardianprocess")
if err != nil {
log.Error("tempfile-failed", err)
return nil, err
}
if err := r.writeProcessJSON(bundlePath, spec, tmpFile); err != nil {
log.Error("encode-failed", err)
return nil, fmt.Errorf("writeProcessJSON for container %s: %s", id, err)
}
cmd := r.runc.ExecCommand(id, tmpFile.Name())
process, err := r.tracker.Run(r.pidGenerator.Generate(), cmd, io, spec.TTY)
if err != nil {
log.Error("run-failed", err)
return nil, err
}
return process, nil
}
示例4: Create
// Create creates a bundle in the depot and starts its init process
func (c *Containerizer) Create(log lager.Logger, spec gardener.DesiredContainerSpec) error {
log = log.Session("containerizer-create", lager.Data{"handle": spec.Handle})
log.Info("started")
defer log.Info("finished")
if err := c.depot.Create(log, spec.Handle, c.bundler.Bundle(spec)); err != nil {
log.Error("create-failed", err)
return err
}
path, err := c.depot.Lookup(log, spec.Handle)
if err != nil {
log.Error("lookup-failed", err)
return err
}
stdoutR, stdoutW := io.Pipe()
_, err = c.runner.Start(log, path, spec.Handle, garden.ProcessIO{
Stdout: io.MultiWriter(logging.Writer(log), stdoutW),
Stderr: logging.Writer(log),
})
if err != nil {
log.Error("start", err)
return err
}
if err := c.startChecker.Check(log, stdoutR); err != nil {
log.Error("check", err)
return err
}
return nil
}
示例5: Resume
func (build *execBuild) Resume(logger lager.Logger) {
stepFactory := build.buildStepFactory(logger, build.metadata.Plan)
source := stepFactory.Using(&exec.NoopStep{}, exec.NewSourceRepository())
defer source.Release()
process := ifrit.Background(source)
exited := process.Wait()
aborted := false
var succeeded exec.Success
for {
select {
case err := <-exited:
if aborted {
succeeded = false
} else if !source.Result(&succeeded) {
logger.Error("step-had-no-result", errors.New("step failed to provide us with a result"))
succeeded = false
}
build.delegate.Finish(logger.Session("finish"), err, succeeded, aborted)
return
case sig := <-build.signals:
process.Signal(sig)
if sig == os.Kill {
aborted = true
}
}
}
}
示例6: CellEvents
func (db *serviceClient) CellEvents(logger lager.Logger) <-chan models.CellEvent {
logger = logger.Session("cell-events")
disappearanceWatcher, disappeared := locket.NewDisappearanceWatcher(logger, db.consulClient, CellSchemaRoot(), db.clock)
process := ifrit.Invoke(disappearanceWatcher)
events := make(chan models.CellEvent)
go func() {
for {
select {
case keys, ok := <-disappeared:
if !ok {
process.Signal(os.Interrupt)
return
}
cellIDs := make([]string, len(keys))
for i, key := range keys {
cellIDs[i] = path.Base(key)
}
logger.Info("cell-disappeared", lager.Data{"cell_ids": cellIDs})
events <- models.NewCellDisappearedEvent(cellIDs)
}
}
}()
return events
}
示例7: getDockerRegistryServices
func getDockerRegistryServices(consulCluster string, backendLogger lager.Logger) ([]consulServiceInfo, error) {
logger := backendLogger.Session("docker-registry-consul-services")
response, err := http.Get(consulCluster + "/v1/catalog/service/docker-registry")
if err != nil {
return nil, err
}
defer response.Body.Close()
body, err := ioutil.ReadAll(response.Body)
if err != nil {
return nil, err
}
var ips []consulServiceInfo
err = json.Unmarshal(body, &ips)
if err != nil {
return nil, err
}
if len(ips) == 0 {
return nil, ErrMissingDockerRegistry
}
logger.Debug("docker-registry-consul-services", lager.Data{"ips": ips})
return ips, nil
}
示例8: TryNextPendingBuild
func (s *Scheduler) TryNextPendingBuild(logger lager.Logger, job atc.JobConfig, resources atc.ResourceConfigs) Waiter {
logger = logger.Session("try-next-pending")
wg := new(sync.WaitGroup)
wg.Add(1)
go func() {
defer wg.Done()
build, err := s.PipelineDB.GetNextPendingBuild(job.Name)
if err != nil {
if err == db.ErrNoBuild {
return
}
logger.Error("failed-to-get-next-pending-build", err)
return
}
s.scheduleAndResumePendingBuild(logger, build, job, resources)
}()
return wg
}
示例9: ConvergeLRPs
func (db *ETCDDB) ConvergeLRPs(logger lager.Logger, cellSet models.CellSet) ([]*auctioneer.LRPStartRequest, []*models.ActualLRPKeyWithSchedulingInfo, []*models.ActualLRPKey) {
convergeStart := db.clock.Now()
convergeLRPRunsCounter.Increment()
logger = logger.Session("etcd")
logger.Info("starting-convergence")
defer logger.Info("finished-convergence")
defer func() {
err := convergeLRPDuration.Send(time.Since(convergeStart))
if err != nil {
logger.Error("failed-sending-converge-lrp-duration-metric", err)
}
}()
logger.Debug("gathering-convergence-input")
input, err := db.GatherAndPruneLRPs(logger, cellSet)
if err != nil {
logger.Error("failed-gathering-convergence-input", err)
return nil, nil, nil
}
logger.Debug("succeeded-gathering-convergence-input")
changes := CalculateConvergence(logger, db.clock, models.NewDefaultRestartCalculator(), input)
return db.ResolveConvergence(logger, input.DesiredLRPs, changes)
}
示例10: lastOperation
func lastOperation(serviceBroker ServiceBroker, router httpRouter, logger lager.Logger) http.HandlerFunc {
return func(w http.ResponseWriter, req *http.Request) {
vars := router.Vars(req)
instanceID := vars["instance_id"]
logger := logger.Session(lastOperationLogKey, lager.Data{
instanceIDLogKey: instanceID,
})
lastOperationResponse, err := serviceBroker.LastOperation(instanceID)
if err != nil {
switch err {
case ErrInstanceDoesNotExist:
logger.Error(instanceMissingErrorKey, err)
respond(w, http.StatusGone, EmptyResponse{})
default:
logger.Error(unknownErrorKey, err)
respond(w, http.StatusInternalServerError, ErrorResponse{
Description: err.Error(),
})
}
return
}
respond(w, http.StatusOK, lastOperationResponse)
}
}
示例11: provision
func provision(serviceBroker ServiceBroker, router httpRouter, logger lager.Logger) http.HandlerFunc {
return func(w http.ResponseWriter, req *http.Request) {
vars := router.Vars(req)
instanceID := vars["instance_id"]
acceptsIncomplete := false
if req.URL.Query().Get("accepts_incomplete") == "true" {
acceptsIncomplete = true
}
logger := logger.Session(provisionLogKey, lager.Data{
instanceIDLogKey: instanceID,
})
var details ProvisionDetails
if err := json.NewDecoder(req.Body).Decode(&details); err != nil {
logger.Error(invalidProvisionDetailsErrorKey, err)
respond(w, http.StatusBadRequest, ErrorResponse{
Description: err.Error(),
})
return
}
logger = logger.WithData(lager.Data{
provisionDetailsLogKey: details,
})
provisioningResponse, asynch, err := serviceBroker.Provision(instanceID, details, acceptsIncomplete)
if err != nil {
switch err {
case ErrInstanceAlreadyExists:
logger.Error(instanceAlreadyExistsErrorKey, err)
respond(w, http.StatusConflict, EmptyResponse{})
case ErrInstanceLimitMet:
logger.Error(instanceLimitReachedErrorKey, err)
respond(w, http.StatusInternalServerError, ErrorResponse{
Description: err.Error(),
})
case ErrAsyncRequired:
logger.Error(instanceAsyncRequiredErrorKey, err)
respond(w, statusUnprocessableEntity, ErrorResponse{
Error: "AsyncRequired",
Description: err.Error(),
})
default:
logger.Error(unknownErrorKey, err)
respond(w, http.StatusInternalServerError, ErrorResponse{
Description: err.Error(),
})
}
return
}
if asynch {
respond(w, http.StatusAccepted, provisioningResponse)
return
}
respond(w, http.StatusCreated, provisioningResponse)
}
}
示例12: bind
func bind(serviceBroker ServiceBroker, router httpRouter, logger lager.Logger) http.HandlerFunc {
return func(w http.ResponseWriter, req *http.Request) {
vars := router.Vars(req)
instanceID := vars["instance_id"]
bindingID := vars["binding_id"]
logger := logger.Session(bindLogKey, lager.Data{
instanceIDLogKey: instanceID,
bindingIDLogKey: bindingID,
})
var details BindDetails
if err := json.NewDecoder(req.Body).Decode(&details); err != nil {
logger.Error(invalidBindDetailsErrorKey, err)
respond(w, http.StatusBadRequest, ErrorResponse{
Description: err.Error(),
})
return
}
logger = logger.WithData(lager.Data{
bindDetailsLogKey: details,
})
bindingResponse, err := serviceBroker.Bind(instanceID, bindingID, details)
if err != nil {
switch err {
case ErrInstanceDoesNotExist:
logger.Error(instanceMissingErrorKey, err)
respond(w, http.StatusInternalServerError, ErrorResponse{
Description: err.Error(),
})
case ErrBindingAlreadyExists:
logger.Error(bindingAlreadyExistsErrorKey, err)
respond(w, http.StatusConflict, ErrorResponse{
Description: err.Error(),
})
case ErrAppGUIDRequired:
logger.Error(bindingAppGUIDRequiredErrorKey, err)
respond(w, statusUnprocessableEntity, ErrorResponse{
Error: "RequiresApp",
Description: err.Error(),
})
case ErrInstanceNotBindable:
logger.Error(instanceNotBindableErrorKey, err)
respond(w, http.StatusInternalServerError, ErrorResponse{
Description: err.Error(),
})
default:
logger.Error(unknownErrorKey, err)
respond(w, http.StatusInternalServerError, ErrorResponse{
Description: err.Error(),
})
}
return
}
respond(w, http.StatusCreated, bindingResponse)
}
}
示例13: Emit
func (event SchedulingJobDuration) Emit(logger lager.Logger) {
state := "ok"
if event.Duration > time.Second {
state = "warning"
}
if event.Duration > 5*time.Second {
state = "critical"
}
emit(
logger.Session("job-scheduling-duration", lager.Data{
"pipeline": event.PipelineName,
"job": event.JobName,
"duration": event.Duration.String(),
}),
goryman.Event{
Service: "scheduling: job duration (ms)",
Metric: ms(event.Duration),
State: state,
Attributes: map[string]string{
"pipeline": event.PipelineName,
"job": event.JobName,
},
},
)
}
示例14: heartbeatContinuously
func (v *volume) heartbeatContinuously(logger lager.Logger, pacemaker clock.Ticker, initialTTL time.Duration) {
defer v.heartbeating.Done()
defer pacemaker.Stop()
logger.Debug("start")
defer logger.Debug("done")
ttlToSet := initialTTL
for {
select {
case <-pacemaker.C():
ttl, found, err := v.db.GetVolumeTTL(v.Handle())
if err != nil {
logger.Error("failed-to-lookup-ttl", err)
} else {
if !found {
logger.Info("volume-expired-from-database")
return
}
ttlToSet = ttl
}
v.heartbeat(logger.Session("tick"), ttlToSet)
case finalTTL := <-v.release:
if finalTTL != nil {
v.heartbeat(logger.Session("final"), *finalTTL)
}
return
}
}
}
示例15: resolveRestartableCrashedActualLRPS
func (db *ETCDDB) resolveRestartableCrashedActualLRPS(logger lager.Logger, actualLRP *models.ActualLRP, starts *startRequests) func() {
return func() {
actualKey := actualLRP.ActualLRPKey
logger = logger.Session("restart-crash", lager.Data{
"process_guid": actualKey.ProcessGuid,
"index": actualKey.Index,
})
if actualLRP.State != models.ActualLRPStateCrashed {
logger.Error("failed-actual-lrp-state-is-not-crashed", nil)
return
}
logger.Debug("unclaiming-actual-lrp", lager.Data{"process_guid": actualLRP.ActualLRPKey.ProcessGuid, "index": actualLRP.ActualLRPKey.Index})
_, err := db.unclaimActualLRP(logger, &actualLRP.ActualLRPKey, &actualLRP.ActualLRPInstanceKey)
if err != nil {
logger.Error("failed-unclaiming-crash", err)
return
}
logger.Debug("succeeded-unclaiming-actual-lrp")
starts.Add(logger, &actualKey)
}
}