本文整理汇总了Golang中github.com/evergreen-ci/evergreen/model/distro.Find函数的典型用法代码示例。如果您正苦于以下问题:Golang Find函数的具体用法?Golang Find怎么用?Golang Find使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了Find函数的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: listDistros
func (as *APIServer) listDistros(w http.ResponseWriter, r *http.Request) {
distros, err := distro.Find(distro.BySpawnAllowed())
if err != nil {
as.LoggedError(w, r, http.StatusInternalServerError, err)
return
}
distroList := []string{}
for _, d := range distros {
distroList = append(distroList, d.Id)
}
as.WriteJSON(w, http.StatusOK, spawnResponse{Distros: distroList})
}
示例2: getDistroIds
// create a slice of all valid distro names
func getDistroIds() ([]string, error) {
// create a slice of all known distros
distros, err := distro.Find(distro.All)
if err != nil {
return nil, err
}
distroIds := []string{}
for _, d := range distros {
if !util.SliceContains(distroIds, d.Id) {
distroIds = append(distroIds, d.Id)
}
}
return distroIds, nil
}
示例3: populateDistroIds
// create a slice of all valid distro names
func populateDistroIds() *ValidationError {
// create a slice of all known distros
distros, err := distro.Find(distro.All)
if err != nil {
return &ValidationError{
Message: fmt.Sprintf("error finding distros: %v", err),
Level: Error,
}
}
distroIds = []string{}
for _, d := range distros {
if !util.SliceContains(distroIds, d.Id) {
distroIds = append(distroIds, d.Id)
}
}
return nil
}
示例4: UpdateStaticHosts
func UpdateStaticHosts(e *evergreen.Settings) error {
distros, err := distro.Find(distro.ByProvider(static.ProviderName))
if err != nil {
return err
}
activeStaticHosts := make([]string, 0)
settings := &static.Settings{}
for _, d := range distros {
err = mapstructure.Decode(d.ProviderSettings, settings)
if err != nil {
return fmt.Errorf("invalid static settings for '%v'", d.Id)
}
for _, h := range settings.Hosts {
hostInfo, err := util.ParseSSHInfo(h.Name)
if err != nil {
return err
}
user := hostInfo.User
if user == "" {
user = d.User
}
staticHost := host.Host{
Id: h.Name,
User: user,
Host: h.Name,
Distro: d,
CreationTime: time.Now(),
Provider: evergreen.HostTypeStatic,
StartedBy: evergreen.User,
Status: evergreen.HostRunning,
Provisioned: true,
}
// upsert the host
_, err = staticHost.Upsert()
if err != nil {
return err
}
activeStaticHosts = append(activeStaticHosts, h.Name)
}
}
return host.DecommissionInactiveStaticHosts(activeStaticHosts)
}
示例5: listSpawnableDistros
func (uis *UIServer) listSpawnableDistros(w http.ResponseWriter, r *http.Request) {
// load in the distros
distros, err := distro.Find(distro.All)
if err != nil {
uis.LoggedError(w, r, http.StatusInternalServerError, fmt.Errorf("Error loading distros: %v", err))
return
}
distroList := []map[string]interface{}{}
for _, d := range distros {
if d.SpawnAllowed {
distroList = append(distroList, map[string]interface{}{
"name": d.Id,
"userDataFile": d.UserData.File,
"userDataValidate": d.UserData.Validate})
}
}
uis.WriteJSON(w, http.StatusOK, distroList)
}
示例6: distrosPage
func (uis *UIServer) distrosPage(w http.ResponseWriter, r *http.Request) {
projCtx := MustHaveProjectContext(r)
distros, err := distro.Find(distro.All)
if err != nil {
message := fmt.Sprintf("error fetching distros: %v", err)
PushFlash(uis.CookieStore, r, w, NewErrorFlash(message))
http.Error(w, message, http.StatusInternalServerError)
return
}
sort.Sort(&sortableDistro{distros})
uis.WriteHTML(w, http.StatusOK, struct {
Distros []distro.Distro
Keys map[string]string
User *user.DBUser
ProjectData projectContext
Flashes []interface{}
}{distros, uis.Settings.Keys, GetUser(r), projCtx, PopFlashes(uis.CookieStore, r, w)},
"base", "distros.html", "base_angular.html", "menu.html")
}
示例7: RunAllMonitoring
// run all monitoring functions
func RunAllMonitoring(settings *evergreen.Settings) error {
// load in all of the distros
distros, err := distro.Find(db.Q{})
if err != nil {
return fmt.Errorf("error finding distros: %v", err)
}
// fetch the project refs, which we will use to get all of the projects
projectRefs, err := model.FindAllProjectRefs()
if err != nil {
return fmt.Errorf("error loading in project refs: %v", err)
}
// turn the project refs into a map of the project id -> project
projects := map[string]model.Project{}
for _, ref := range projectRefs {
// only monitor projects that are enabled
if !ref.Enabled {
continue
}
project, err := model.FindProject("", &ref)
// continue on error to stop the whole monitoring process from
// being held up
if err != nil {
evergreen.Logger.Logf(slogger.ERROR, "error finding project %v: %v",
ref.Identifier, err)
continue
}
if project == nil {
evergreen.Logger.Logf(slogger.ERROR, "no project entry found for"+
" ref %v", ref.Identifier)
continue
}
projects[project.Identifier] = *project
}
// initialize the task monitor
taskMonitor := &TaskMonitor{
flaggingFuncs: defaultTaskFlaggingFuncs,
}
// clean up any necessary tasks
errs := taskMonitor.CleanupTasks(projects)
for _, err := range errs {
evergreen.Logger.Logf(slogger.ERROR, "Error cleaning up tasks: %v", err)
}
// initialize the host monitor
hostMonitor := &HostMonitor{
flaggingFuncs: defaultHostFlaggingFuncs,
monitoringFuncs: defaultHostMonitoringFuncs,
}
// clean up any necessary hosts
errs = hostMonitor.CleanupHosts(distros, settings)
for _, err := range errs {
evergreen.Logger.Logf(slogger.ERROR, "Error cleaning up hosts: %v", err)
}
// run monitoring checks
errs = hostMonitor.RunMonitoringChecks(settings)
for _, err := range errs {
evergreen.Logger.Logf(slogger.ERROR, "Error running host monitoring checks: %v", err)
}
// initialize the notifier
notifier := &Notifier{
notificationBuilders: defaultNotificationBuilders,
}
// send notifications
errs = notifier.Notify(settings)
for _, err := range errs {
evergreen.Logger.Logf(slogger.ERROR, "Error sending notifications: %v", err)
}
// Do alerts for spawnhosts - collect all hosts expiring in the next 12 hours.
// The trigger logic will filter out any hosts that aren't in a notification window, or have
// already have alerts sent.
now := time.Now()
thresholdTime := now.Add(12 * time.Hour)
expiringSoonHosts, err := host.Find(host.ByExpiringBetween(now, thresholdTime))
if err != nil {
return err
}
for _, h := range expiringSoonHosts {
err := alerts.RunSpawnWarningTriggers(&h)
if err != nil {
evergreen.Logger.Logf(slogger.ERROR, "Error queueing alert: %v", err)
}
}
return nil
//.........这里部分代码省略.........
示例8: Schedule
// Schedule all of the tasks to be run. Works by finding all of the tasks that
// are ready to be run, splitting them by distro, prioritizing them, and saving
// the per-distro queues. Then determines the number of new hosts to spin up
// for each distro, and spins them up.
func (self *Scheduler) Schedule() error {
// make sure the correct static hosts are in the database
evergreen.Logger.Logf(slogger.INFO, "Updating static hosts...")
err := model.UpdateStaticHosts(self.Settings)
if err != nil {
return fmt.Errorf("error updating static hosts: %v", err)
}
// find all tasks ready to be run
evergreen.Logger.Logf(slogger.INFO, "Finding runnable tasks...")
runnableTasks, err := self.FindRunnableTasks()
if err != nil {
return fmt.Errorf("Error finding runnable tasks: %v", err)
}
evergreen.Logger.Logf(slogger.INFO, "There are %v tasks ready to be run", len(runnableTasks))
// split the tasks by distro
tasksByDistro, taskRunDistros, err := self.splitTasksByDistro(runnableTasks)
if err != nil {
return fmt.Errorf("Error splitting tasks by distro to run on: %v", err)
}
// load in all of the distros
distros, err := distro.Find(distro.All)
if err != nil {
return fmt.Errorf("Error finding distros: %v", err)
}
taskIdToMinQueuePos := make(map[string]int)
// get the expected run duration of all runnable tasks
taskExpectedDuration, err := self.GetExpectedDurations(runnableTasks)
if err != nil {
return fmt.Errorf("Error getting expected task durations: %v", err)
}
// prioritize the tasks, one distro at a time
taskQueueItems := make(map[string][]model.TaskQueueItem)
for _, d := range distros {
runnableTasksForDistro := tasksByDistro[d.Id]
evergreen.Logger.Logf(slogger.INFO, "Prioritizing %v tasks for distro %v...",
len(runnableTasksForDistro), d.Id)
prioritizedTasks, err := self.PrioritizeTasks(self.Settings,
runnableTasksForDistro)
if err != nil {
return fmt.Errorf("Error prioritizing tasks: %v", err)
}
// Update the running minimums of queue position
// The value is 1-based primarily so that we can differentiate between
// no value and being first in a queue
for i, prioritizedTask := range prioritizedTasks {
minQueuePos, ok := taskIdToMinQueuePos[prioritizedTask.Id]
if ok {
taskIdToMinQueuePos[prioritizedTask.Id] =
int(math.Min(float64(minQueuePos), float64(i+1)))
} else {
taskIdToMinQueuePos[prioritizedTask.Id] = i + 1
}
}
// persist the queue of tasks
evergreen.Logger.Logf(slogger.INFO, "Saving task queue for distro %v...",
d.Id)
queuedTasks, err := self.PersistTaskQueue(d.Id, prioritizedTasks,
taskExpectedDuration)
if err != nil {
return fmt.Errorf("Error saving task queue: %v", err)
}
// track scheduled time for prioritized tasks
err = model.SetTasksScheduledTime(prioritizedTasks, time.Now())
if err != nil {
return fmt.Errorf("Error setting scheduled time for prioritized "+
"tasks: %v", err)
}
taskQueueItems[d.Id] = queuedTasks
}
err = model.UpdateMinQueuePos(taskIdToMinQueuePos)
if err != nil {
return fmt.Errorf("Error updating tasks with queue positions: %v", err)
}
// split distros by name
distrosByName := make(map[string]distro.Distro)
for _, d := range distros {
distrosByName[d.Id] = d
}
// fetch all hosts, split by distro
//.........这里部分代码省略.........
示例9: Schedule
// Schedule all of the tasks to be run. Works by finding all of the tasks that
// are ready to be run, splitting them by distro, prioritizing them, and saving
// the per-distro queues. Then determines the number of new hosts to spin up
// for each distro, and spins them up.
func (s *Scheduler) Schedule() error {
// make sure the correct static hosts are in the database
evergreen.Logger.Logf(slogger.INFO, "Updating static hosts...")
err := model.UpdateStaticHosts(s.Settings)
if err != nil {
return fmt.Errorf("error updating static hosts: %v", err)
}
// find all tasks ready to be run
evergreen.Logger.Logf(slogger.INFO, "Finding runnable tasks...")
runnableTasks, err := s.FindRunnableTasks()
if err != nil {
return fmt.Errorf("Error finding runnable tasks: %v", err)
}
evergreen.Logger.Logf(slogger.INFO, "There are %v tasks ready to be run", len(runnableTasks))
// split the tasks by distro
tasksByDistro, taskRunDistros, err := s.splitTasksByDistro(runnableTasks)
if err != nil {
return fmt.Errorf("Error splitting tasks by distro to run on: %v", err)
}
// load in all of the distros
distros, err := distro.Find(distro.All)
if err != nil {
return fmt.Errorf("Error finding distros: %v", err)
}
// get the expected run duration of all runnable tasks
taskExpectedDuration, err := s.GetExpectedDurations(runnableTasks)
if err != nil {
return fmt.Errorf("Error getting expected task durations: %v", err)
}
distroInputChan := make(chan distroSchedulerInput, len(distros))
// put all of the needed input for the distro scheduler into a channel to be read by the
// distro scheduling loop
for distroId, task := range tasksByDistro {
distroInputChan <- distroSchedulerInput{
distroId: distroId,
runnableTasksForDistro: task,
}
}
// close the channel to signal that the loop reading from it can terminate
close(distroInputChan)
workers := runtime.NumCPU()
wg := sync.WaitGroup{}
wg.Add(workers)
// make a channel to collect all of function results from scheduling the distros
distroSchedulerResultChan := make(chan *distroSchedulerResult)
// for each worker, create a new goroutine
for i := 0; i < workers; i++ {
go func() {
defer wg.Done()
// read the inputs for scheduling this distro
for d := range distroInputChan {
// schedule the distro
res := s.scheduleDistro(d.distroId, d.runnableTasksForDistro, taskExpectedDuration)
if res.err != nil {
evergreen.Logger.Logf(slogger.ERROR, "%v", err)
}
// write the results out to a results channel
distroSchedulerResultChan <- res
}
}()
}
// signal the errCollector goroutine that it can terminate it's loop
// intialize a map of scheduler events
schedulerEvents := map[string]event.TaskQueueInfo{}
// prioritize the tasks, one distro at a time
taskQueueItems := make(map[string][]model.TaskQueueItem)
var errResult error
go func() {
for res := range distroSchedulerResultChan {
if res.err != nil {
errResult = fmt.Errorf("error scheduling tasks on distro %v: %v", res.distroId, err)
return
}
schedulerEvents[res.distroId] = res.schedulerEvent
taskQueueItems[res.distroId] = res.taskQueueItem
}
}()
//.........这里部分代码省略.........
示例10: allTaskQueues
func (uis *UIServer) allTaskQueues(w http.ResponseWriter, r *http.Request) {
projCtx := MustHaveProjectContext(r)
taskQueues, err := model.FindAllTaskQueues()
if err != nil {
uis.LoggedError(w, r, http.StatusInternalServerError,
fmt.Errorf("Error finding task queues: %v", err))
return
}
// find all distros so that we only display task queues of distros that exist.
allDistros, err := distro.Find(distro.All.WithFields(distro.IdKey))
if err != nil {
message := fmt.Sprintf("error fetching distros: %v", err)
http.Error(w, message, http.StatusInternalServerError)
return
}
distroIds := []string{}
for _, d := range allDistros {
distroIds = append(distroIds, d.Id)
}
// cached map of version id to relevant patch
cachedPatches := map[string]*patch.Patch{}
// convert the task queues to the ui versions
uiTaskQueues := []uiTaskQueue{}
for _, tQ := range taskQueues {
asUI := uiTaskQueue{
Distro: tQ.Distro,
Queue: []uiTaskQueueItem{},
}
if len(tQ.Queue) == 0 {
uiTaskQueues = append(uiTaskQueues, asUI)
continue
}
// convert the individual task queue items
taskIds := []string{}
for _, item := range tQ.Queue {
// cache the ids, for fetching the tasks from the db
taskIds = append(taskIds, item.Id)
queueItemAsUI := uiTaskQueueItem{
Id: item.Id,
DisplayName: item.DisplayName,
BuildVariant: item.BuildVariant,
RevisionOrderNumber: item.RevisionOrderNumber,
Requester: item.Requester,
Revision: item.Revision,
Project: item.Project,
ExpectedDuration: item.ExpectedDuration,
Priority: item.Priority,
}
asUI.Queue = append(asUI.Queue, queueItemAsUI)
}
// find all the relevant tasks
tasks, err := task.Find(task.ByIds(taskIds).WithFields(task.VersionKey, task.BuildIdKey))
if err != nil {
msg := fmt.Sprintf("Error finding tasks: %v", err)
evergreen.Logger.Errorf(slogger.ERROR, msg)
http.Error(w, msg, http.StatusInternalServerError)
return
}
// store all of the version and build ids in the relevant task queue
// items
for _, task := range tasks {
// this sucks, but it's because we're not guaranteed the order out
// of the db
for idx, queueItemAsUI := range asUI.Queue {
if queueItemAsUI.Id == task.Id {
queueItemAsUI.Version = task.Version
queueItemAsUI.Build = task.BuildId
asUI.Queue[idx] = queueItemAsUI
}
}
}
// add all of the necessary patch info into the relevant task queue
// items
for idx, queueItemAsUI := range asUI.Queue {
if queueItemAsUI.Requester == evergreen.PatchVersionRequester {
// fetch the patch, if necessary
var p *patch.Patch
var ok bool
if p, ok = cachedPatches[queueItemAsUI.Version]; ok {
queueItemAsUI.User = p.Author
asUI.Queue[idx] = queueItemAsUI
} else {
p, err = patch.FindOne(
patch.ByVersion(queueItemAsUI.Version).WithFields(patch.AuthorKey),
)
if err != nil {
msg := fmt.Sprintf("Error finding patch: %v", err)
evergreen.Logger.Errorf(slogger.ERROR, msg)
//.........这里部分代码省略.........