本文整理汇总了Golang中go/skia/org/infra/go/util.MkdirAll函数的典型用法代码示例。如果您正苦于以下问题:Golang MkdirAll函数的具体用法?Golang MkdirAll怎么用?Golang MkdirAll使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了MkdirAll函数的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: mergeUploadCSVFiles
func mergeUploadCSVFiles(runID string, gs *util.GsUtil) error {
localOutputDir := filepath.Join(util.StorageDir, util.BenchmarkRunsDir, runID)
skutil.MkdirAll(localOutputDir, 0700)
// Copy outputs from all slaves locally.
for i := 0; i < util.NUM_WORKERS; i++ {
workerNum := i + 1
workerLocalOutputPath := filepath.Join(localOutputDir, fmt.Sprintf("slave%d", workerNum)+".csv")
workerRemoteOutputPath := filepath.Join(util.BenchmarkRunsDir, runID, fmt.Sprintf("slave%d", workerNum), "outputs", runID+".output")
respBody, err := gs.GetRemoteFileContents(workerRemoteOutputPath)
if err != nil {
glog.Errorf("Could not fetch %s: %s", workerRemoteOutputPath, err)
// TODO(rmistry): Should we instead return here? We can only return
// here if all 100 slaves reliably run without any failures which they
// really should.
continue
}
defer skutil.Close(respBody)
out, err := os.Create(workerLocalOutputPath)
if err != nil {
return fmt.Errorf("Unable to create file %s: %s", workerLocalOutputPath, err)
}
defer skutil.Close(out)
defer skutil.Remove(workerLocalOutputPath)
if _, err = io.Copy(out, respBody); err != nil {
return fmt.Errorf("Unable to copy to file %s: %s", workerLocalOutputPath, err)
}
}
// Call csv_merger.py to merge all results into a single results CSV.
_, currentFile, _, _ := runtime.Caller(0)
pathToPyFiles := filepath.Join(
filepath.Dir((filepath.Dir(filepath.Dir(filepath.Dir(currentFile))))),
"py")
pathToCsvMerger := filepath.Join(pathToPyFiles, "csv_merger.py")
outputFileName := runID + ".output"
args := []string{
pathToCsvMerger,
"--csv_dir=" + localOutputDir,
"--output_csv_name=" + filepath.Join(localOutputDir, outputFileName),
}
if err := util.ExecuteCmd("python", args, []string{}, 1*time.Hour, nil, nil); err != nil {
return fmt.Errorf("Error running csv_merger.py: %s", err)
}
// Copy the output file to Google Storage.
remoteOutputDir := filepath.Join(util.BenchmarkRunsDir, runID, "consolidated_outputs")
if err := gs.UploadFile(outputFileName, localOutputDir, remoteOutputDir); err != nil {
return fmt.Errorf("Unable to upload %s to %s: %s", outputFileName, remoteOutputDir, err)
}
return nil
}
示例2: mergeUploadCSVFiles
func mergeUploadCSVFiles(runID string, gs *util.GsUtil) ([]string, error) {
localOutputDir := filepath.Join(util.StorageDir, util.BenchmarkRunsDir, runID)
skutil.MkdirAll(localOutputDir, 0700)
noOutputSlaves := []string{}
// Copy outputs from all slaves locally.
for i := 0; i < util.NumWorkers(); i++ {
workerNum := i + 1
workerLocalOutputPath := filepath.Join(localOutputDir, fmt.Sprintf("slave%d", workerNum)+".csv")
workerRemoteOutputPath := filepath.Join(util.BenchmarkRunsDir, runID, fmt.Sprintf("slave%d", workerNum), "outputs", runID+".output")
respBody, err := gs.GetRemoteFileContents(workerRemoteOutputPath)
if err != nil {
glog.Errorf("Could not fetch %s: %s", workerRemoteOutputPath, err)
noOutputSlaves = append(noOutputSlaves, fmt.Sprintf(util.WORKER_NAME_TEMPLATE, workerNum))
continue
}
defer skutil.Close(respBody)
out, err := os.Create(workerLocalOutputPath)
if err != nil {
return noOutputSlaves, fmt.Errorf("Unable to create file %s: %s", workerLocalOutputPath, err)
}
defer skutil.Close(out)
defer skutil.Remove(workerLocalOutputPath)
if _, err = io.Copy(out, respBody); err != nil {
return noOutputSlaves, fmt.Errorf("Unable to copy to file %s: %s", workerLocalOutputPath, err)
}
}
// Call csv_merger.py to merge all results into a single results CSV.
_, currentFile, _, _ := runtime.Caller(0)
pathToPyFiles := filepath.Join(
filepath.Dir((filepath.Dir(filepath.Dir(filepath.Dir(currentFile))))),
"py")
pathToCsvMerger := filepath.Join(pathToPyFiles, "csv_merger.py")
outputFileName := runID + ".output"
args := []string{
pathToCsvMerger,
"--csv_dir=" + localOutputDir,
"--output_csv_name=" + filepath.Join(localOutputDir, outputFileName),
}
err := util.ExecuteCmd("python", args, []string{}, util.CSV_MERGER_TIMEOUT, nil, nil)
if err != nil {
return noOutputSlaves, fmt.Errorf("Error running csv_merger.py: %s", err)
}
// Copy the output file to Google Storage.
remoteOutputDir := filepath.Join(util.BenchmarkRunsDir, runID, "consolidated_outputs")
if err := gs.UploadFile(outputFileName, localOutputDir, remoteOutputDir); err != nil {
return noOutputSlaves, fmt.Errorf("Unable to upload %s to %s: %s", outputFileName, remoteOutputDir, err)
}
return noOutputSlaves, nil
}
示例3: runRenderPictures
func runRenderPictures(localSkpsDir, localOutputDir, remoteOutputDir string, runGpu bool) error {
picturesArgs := *renderPicturesArgs
if runGpu {
glog.Info("Run with GPU has been specified. Using --config gpu.")
reg, _ := regexp.Compile("--config [a-zA-Z0-9]+")
picturesArgs = reg.ReplaceAllString(picturesArgs, "--config gpu")
}
skutil.MkdirAll(localOutputDir, 0700)
args := []string{
"-r", localSkpsDir,
"-w", localOutputDir,
"--writeJsonSummaryPath", filepath.Join(localOutputDir, "summary.json"),
"--imageBaseGSUrl", remoteOutputDir,
}
for _, picturesArg := range strings.Split(picturesArgs, " ") {
args = append(args, picturesArg)
}
if err := util.ExecuteCmd(filepath.Join(util.SkiaTreeDir, "out", "Release", util.BINARY_RENDER_PICTURES), args, []string{"DISPLAY=:0"}, 15*time.Minute, nil, nil); err != nil {
return fmt.Errorf("Failure when running render_pictures: %s", err)
}
return nil
}
示例4: main
//.........这里部分代码省略.........
RunIDNoPatch string
RunIDWithPatch string
BenchmarkName string
BenchmarkExtraArgs string
BrowserExtraArgsNoPatch string
BrowserExtraArgsWithPatch string
RepeatBenchmark int
TargetPlatform string
}{
WorkerNum: util.WORKER_NUM_KEYWORD,
LogDir: util.GLogDir,
PagesetType: *pagesetType,
ChromiumBuildNoPatch: chromiumBuildNoPatch,
ChromiumBuildWithPatch: chromiumBuildWithPatch,
RunIDNoPatch: runIDNoPatch,
RunIDWithPatch: runIDWithPatch,
BenchmarkName: *benchmarkName,
BenchmarkExtraArgs: *benchmarkExtraArgs,
BrowserExtraArgsNoPatch: *browserExtraArgsNoPatch,
BrowserExtraArgsWithPatch: *browserExtraArgsWithPatch,
RepeatBenchmark: *repeatBenchmark,
TargetPlatform: *targetPlatform,
}); err != nil {
glog.Errorf("Failed to execute template: %s", err)
return
}
cmd := []string{
fmt.Sprintf("cd %s;", util.CtTreeDir),
"git pull;",
"make all;",
// The main command that runs run_chromium_perf on all workers.
runChromiumPerfCmdBytes.String(),
}
// Setting a 1 day timeout since it may take a while run benchmarks with many
// repeats.
if _, err := util.SSH(strings.Join(cmd, " "), util.Slaves, 1*24*time.Hour); err != nil {
glog.Errorf("Error while running cmd %s: %s", cmd, err)
return
}
// If "--output-format=csv-pivot-table" was specified then merge all CSV files and upload.
if strings.Contains(*benchmarkExtraArgs, "--output-format=csv-pivot-table") {
for _, runID := range []string{runIDNoPatch, runIDWithPatch} {
if err := mergeUploadCSVFiles(runID, gs); err != nil {
glog.Errorf("Unable to merge and upload CSV files for %s: %s", runID, err)
}
}
}
// Compare the resultant CSV files using csv_comparer.py
noPatchCSVPath := filepath.Join(util.StorageDir, util.BenchmarkRunsDir, runIDNoPatch, runIDNoPatch+".output")
withPatchCSVPath := filepath.Join(util.StorageDir, util.BenchmarkRunsDir, runIDWithPatch, runIDWithPatch+".output")
htmlOutputDir := filepath.Join(util.StorageDir, util.ChromiumPerfRunsDir, *runID, "html")
skutil.MkdirAll(htmlOutputDir, 0700)
htmlRemoteDir := filepath.Join(remoteOutputDir, "html")
htmlOutputLinkBase := util.GS_HTTP_LINK + filepath.Join(util.GS_BUCKET_NAME, htmlRemoteDir) + "/"
htmlOutputLink = htmlOutputLinkBase + "index.html"
noPatchOutputLink = util.GS_HTTP_LINK + filepath.Join(util.GS_BUCKET_NAME, util.BenchmarkRunsDir, runIDNoPatch, "consolidated_outputs", runIDNoPatch+".output")
withPatchOutputLink = util.GS_HTTP_LINK + filepath.Join(util.GS_BUCKET_NAME, util.BenchmarkRunsDir, runIDWithPatch, "consolidated_outputs", runIDWithPatch+".output")
// Construct path to the csv_comparer python script.
_, currentFile, _, _ := runtime.Caller(0)
pathToPyFiles := filepath.Join(
filepath.Dir((filepath.Dir(filepath.Dir(filepath.Dir(currentFile))))),
"py")
pathToCsvComparer := filepath.Join(pathToPyFiles, "csv_comparer.py")
args := []string{
pathToCsvComparer,
"--csv_file1=" + noPatchCSVPath,
"--csv_file2=" + withPatchCSVPath,
"--output_html=" + htmlOutputDir,
"--variance_threshold=" + strconv.FormatFloat(*varianceThreshold, 'f', 2, 64),
"--discard_outliers=" + strconv.FormatFloat(*discardOutliers, 'f', 2, 64),
"--absolute_url=" + htmlOutputLinkBase,
"--requester_email=" + *emails,
"--skia_patch_link=" + skiaPatchLink,
"--blink_patch_link=" + blinkPatchLink,
"--chromium_patch_link=" + chromiumPatchLink,
"--raw_csv_nopatch=" + noPatchOutputLink,
"--raw_csv_withpatch=" + withPatchOutputLink,
"--num_repeated=" + strconv.Itoa(*repeatBenchmark),
"--target_platform=" + *targetPlatform,
"--browser_args_nopatch=" + *browserExtraArgsNoPatch,
"--browser_args_withpatch=" + *browserExtraArgsWithPatch,
"--pageset_type=" + *pagesetType,
"--chromium_hash=" + chromiumHash,
"--skia_hash=" + skiaHash,
}
if err := util.ExecuteCmd("python", args, []string{}, 2*time.Hour, nil, nil); err != nil {
glog.Errorf("Error running csv_comparer.py: %s", err)
return
}
// Copy the HTML files to Google Storage.
if err := gs.UploadDir(htmlOutputDir, htmlRemoteDir, true); err != nil {
glog.Errorf("Could not upload %s to %s: %s", htmlOutputDir, htmlRemoteDir, err)
return
}
taskCompletedSuccessfully = true
}
示例5: downloadRemoteDir
// downloadRemoteDir downloads the specified Google Storage dir to the specified
// local dir. The local dir will be emptied and recreated. Handles multiple levels
// of directories.
func (gs *GsUtil) downloadRemoteDir(localDir, gsDir string) error {
// Empty the local dir.
util.RemoveAll(localDir)
// Create the local dir.
util.MkdirAll(localDir, 0700)
// The channel where the storage objects to be deleted will be sent to.
chStorageObjects := make(chan filePathToStorageObject, MAX_CHANNEL_SIZE)
req := gs.service.Objects.List(GS_BUCKET_NAME).Prefix(gsDir + "/")
for req != nil {
resp, err := req.Do()
if err != nil {
return fmt.Errorf("Error occured while listing %s: %s", gsDir, err)
}
for _, result := range resp.Items {
fileName := filepath.Base(result.Name)
// If downloading from subdir then add it to the fileName.
fileGsDir := filepath.Dir(result.Name)
subDirs := strings.TrimPrefix(fileGsDir, gsDir)
if subDirs != "" {
dirTokens := strings.Split(subDirs, "/")
for i := range dirTokens {
fileName = filepath.Join(dirTokens[len(dirTokens)-i-1], fileName)
}
// Create the local directory.
util.MkdirAll(filepath.Join(localDir, filepath.Dir(fileName)), 0700)
}
chStorageObjects <- filePathToStorageObject{storageObject: result, filePath: fileName}
}
if len(resp.NextPageToken) > 0 {
req.PageToken(resp.NextPageToken)
} else {
req = nil
}
}
close(chStorageObjects)
// Kick off goroutines to download the storage objects.
var wg sync.WaitGroup
for i := 0; i < GOROUTINE_POOL_SIZE; i++ {
wg.Add(1)
go func(goroutineNum int) {
defer wg.Done()
for obj := range chStorageObjects {
result := obj.storageObject
filePath := obj.filePath
respBody, err := getRespBody(result, gs.client)
if err != nil {
glog.Errorf("Could not fetch %s: %s", result.MediaLink, err)
return
}
defer util.Close(respBody)
outputFile := filepath.Join(localDir, filePath)
out, err := os.Create(outputFile)
if err != nil {
glog.Errorf("Unable to create file %s: %s", outputFile, err)
return
}
defer util.Close(out)
if _, err = io.Copy(out, respBody); err != nil {
glog.Error(err)
return
}
glog.Infof("Downloaded gs://%s/%s to %s with goroutine#%d", GS_BUCKET_NAME, result.Name, outputFile, goroutineNum)
// Sleep for a second after downloading file to avoid bombarding Cloud
// storage.
time.Sleep(time.Second)
}
}(i + 1)
}
wg.Wait()
return nil
}
示例6: main
func main() {
defer common.LogPanic()
worker_common.Init()
defer util.TimeTrack(time.Now(), "Capturing Archives")
defer glog.Flush()
// Create the task file so that the master knows this worker is still busy.
skutil.LogErr(util.CreateTaskFile(util.ACTIVITY_CAPTURING_ARCHIVES))
defer util.DeleteTaskFile(util.ACTIVITY_CAPTURING_ARCHIVES)
if *chromiumBuild == "" {
glog.Error("Must specify --chromium_build")
return
}
// Reset the local chromium checkout.
if err := util.ResetCheckout(util.ChromiumSrcDir); err != nil {
glog.Errorf("Could not reset %s: %s", util.ChromiumSrcDir, err)
return
}
// Sync the local chromium checkout.
if err := util.SyncDir(util.ChromiumSrcDir); err != nil {
glog.Errorf("Could not gclient sync %s: %s", util.ChromiumSrcDir, err)
return
}
// Delete and remake the local webpage archives directory.
pathToArchives := filepath.Join(util.WebArchivesDir, *pagesetType)
skutil.RemoveAll(pathToArchives)
skutil.MkdirAll(pathToArchives, 0700)
// Instantiate GsUtil object.
gs, err := util.NewGsUtil(nil)
if err != nil {
glog.Error(err)
return
}
// Download the specified chromium build if it does not exist locally.
if err := gs.DownloadChromiumBuild(*chromiumBuild); err != nil {
glog.Error(err)
return
}
// Download pagesets if they do not exist locally.
if err := gs.DownloadWorkerArtifacts(util.PAGESETS_DIR_NAME, *pagesetType, *workerNum); err != nil {
glog.Error(err)
return
}
pathToPagesets := filepath.Join(util.PagesetsDir, *pagesetType)
chromiumBinary := filepath.Join(util.ChromiumBuildsDir, *chromiumBuild, util.BINARY_CHROME)
recordWprBinary := filepath.Join(util.TelemetryBinariesDir, util.BINARY_RECORD_WPR)
timeoutSecs := util.PagesetTypeToInfo[*pagesetType].CaptureArchivesTimeoutSecs
// Loop through all pagesets.
fileInfos, err := ioutil.ReadDir(pathToPagesets)
if err != nil {
glog.Errorf("Unable to read the pagesets dir %s: %s", pathToPagesets, err)
return
}
glog.Infof("The %s fileInfos are: %s", len(fileInfos), fileInfos)
for _, fileInfo := range fileInfos {
pagesetBaseName := filepath.Base(fileInfo.Name())
if pagesetBaseName == util.TIMESTAMP_FILE_NAME || filepath.Ext(pagesetBaseName) == ".pyc" {
// Ignore timestamp files and .pyc files.
continue
}
// Read the pageset.
pagesetPath := filepath.Join(pathToPagesets, fileInfo.Name())
decodedPageset, err := util.ReadPageset(pagesetPath)
if err != nil {
glog.Errorf("Could not read %s: %s", pagesetPath, err)
return
}
glog.Infof("===== Processing %s =====", pagesetPath)
args := []string{
util.CAPTURE_ARCHIVES_DEFAULT_CT_BENCHMARK,
"--extra-browser-args=--disable-setuid-sandbox",
"--browser=exact",
"--browser-executable=" + chromiumBinary,
"--user-agent=" + decodedPageset.UserAgent,
"--urls-list=" + decodedPageset.UrlsList,
"--archive-data-file=" + decodedPageset.ArchiveDataFile,
}
env := []string{
fmt.Sprintf("PYTHONPATH=%s:$PYTHONPATH", pathToPagesets),
"DISPLAY=:0",
}
skutil.LogErr(util.ExecuteCmd(recordWprBinary, args, env, time.Duration(timeoutSecs)*time.Second, nil, nil))
}
// Write timestamp to the webpage archives dir.
skutil.LogErr(util.CreateTimestampFile(pathToArchives))
// Upload webpage archives dir to Google Storage.
if err := gs.UploadWorkerArtifacts(util.WEB_ARCHIVES_DIR_NAME, *pagesetType, *workerNum); err != nil {
glog.Error(err)
return
//.........这里部分代码省略.........
示例7: main
func main() {
defer common.LogPanic()
common.Init()
defer util.TimeTrack(time.Now(), "Creating Pagesets")
defer glog.Flush()
// Create the task file so that the master knows this worker is still busy.
skutil.LogErr(util.CreateTaskFile(util.ACTIVITY_CREATING_PAGESETS))
defer util.DeleteTaskFile(util.ACTIVITY_CREATING_PAGESETS)
// Delete and remake the local pagesets directory.
pathToPagesets := filepath.Join(util.PagesetsDir, *pagesetType)
skutil.RemoveAll(pathToPagesets)
skutil.MkdirAll(pathToPagesets, 0700)
// Get info about the specified pageset type.
pagesetTypeInfo := util.PagesetTypeToInfo[*pagesetType]
csvSource := pagesetTypeInfo.CSVSource
numPages := pagesetTypeInfo.NumPages
userAgent := pagesetTypeInfo.UserAgent
// Download the CSV file from Google Storage to a tmp location.
gs, err := util.NewGsUtil(nil)
if err != nil {
glog.Error(err)
return
}
respBody, err := gs.GetRemoteFileContents(csvSource)
if err != nil {
glog.Error(err)
return
}
defer skutil.Close(respBody)
csvFile := filepath.Join(os.TempDir(), filepath.Base(csvSource))
out, err := os.Create(csvFile)
if err != nil {
glog.Errorf("Unable to create file %s: %s", csvFile, err)
return
}
defer skutil.Close(out)
defer skutil.Remove(csvFile)
if _, err = io.Copy(out, respBody); err != nil {
glog.Error(err)
return
}
// Figure out which pagesets this worker should generate.
numPagesPerSlave := numPages / util.NUM_WORKERS
startNum := (*workerNum-1)*numPagesPerSlave + 1
endNum := *workerNum * numPagesPerSlave
// Construct path to the create_page_set.py python script.
_, currentFile, _, _ := runtime.Caller(0)
createPageSetScript := filepath.Join(
filepath.Dir((filepath.Dir(filepath.Dir(filepath.Dir(currentFile))))),
"py", "create_page_set.py")
// Execute the create_page_set.py python script.
timeoutSecs := util.PagesetTypeToInfo[*pagesetType].CreatePagesetsTimeoutSecs
for currNum := startNum; currNum <= endNum; currNum++ {
args := []string{
createPageSetScript,
"-s", strconv.Itoa(currNum),
"-e", strconv.Itoa(currNum),
"-c", csvFile,
"-p", *pagesetType,
"-u", userAgent,
"-o", pathToPagesets,
}
if err := util.ExecuteCmd("python", args, []string{}, time.Duration(timeoutSecs)*time.Second, nil, nil); err != nil {
glog.Error(err)
return
}
}
// Write timestamp to the pagesets dir.
skutil.LogErr(util.CreateTimestampFile(pathToPagesets))
// Upload pagesets dir to Google Storage.
if err := gs.UploadWorkerArtifacts(util.PAGESETS_DIR_NAME, *pagesetType, *workerNum); err != nil {
glog.Error(err)
return
}
}
示例8: CreateChromiumBuild
// runID is the unique id of the current run (typically requester + timestamp).
// targetPlatform is the platform the benchmark will run on (Android / Linux ).
// chromiumHash is the hash the checkout should be synced to. If not specified then
// Chromium's Tot hash is used.
// skiaHash is the hash the checkout should be synced to. If not specified then
// Skia's LKGR hash is used (the hash in Chromium's DEPS file).
// applyPatches if true looks for Chromium/Blink/Skia patches in the temp dir and
// runs once with the patch applied and once without the patch applied.
func CreateChromiumBuild(runID, targetPlatform, chromiumHash, skiaHash string, applyPatches bool) (string, string, error) {
// Determine which build dir and fetch target to use.
var chromiumBuildDir, fetchTarget string
if targetPlatform == "Android" {
chromiumBuildDir = filepath.Join(ChromiumBuildsDir, "android_base")
fetchTarget = "android"
} else if targetPlatform == "Linux" {
chromiumBuildDir = filepath.Join(ChromiumBuildsDir, "linux_base")
fetchTarget = "chromium"
} else {
return "", "", fmt.Errorf("Unrecognized target_platform %s", targetPlatform)
}
util.MkdirAll(chromiumBuildDir, 0700)
// Find which Chromium commit hash should be used.
var err error
if chromiumHash == "" {
chromiumHash, err = getChromiumHash()
if err != nil {
return "", "", fmt.Errorf("Error while finding Chromium's Hash: %s", err)
}
}
// Find which Skia commit hash should be used.
if skiaHash == "" {
skiaHash, err = getSkiaHash()
if err != nil {
return "", "", fmt.Errorf("Error while finding Skia's Hash: %s", err)
}
}
// Run chromium sync command using the above commit hashes.
// Construct path to the sync_skia_in_chrome python script.
_, currentFile, _, _ := runtime.Caller(0)
pathToPyFiles := filepath.Join(
filepath.Dir((filepath.Dir(filepath.Dir(currentFile)))),
"py")
syncArgs := []string{
filepath.Join(pathToPyFiles, "sync_skia_in_chrome.py"),
"--destination=" + chromiumBuildDir,
"--fetch_target=" + fetchTarget,
"--chrome_revision=" + chromiumHash,
"--skia_revision=" + skiaHash,
}
if err := ExecuteCmd("python", syncArgs, []string{}, 2*time.Hour, nil, nil); err != nil {
glog.Warning("There was an error. Deleting base directory and trying again.")
util.RemoveAll(chromiumBuildDir)
util.MkdirAll(chromiumBuildDir, 0700)
if err := ExecuteCmd("python", syncArgs, []string{}, 2*time.Hour, nil, nil); err != nil {
return "", "", fmt.Errorf("There was an error checking out chromium %s + skia %s: %s", chromiumHash, skiaHash, err)
}
}
// Make sure we are starting from a clean slate.
if err := resetChromiumCheckout(filepath.Join(chromiumBuildDir, "src")); err != nil {
return "", "", fmt.Errorf("Could not reset the chromium checkout in %s: %s", chromiumBuildDir, err)
}
googleStorageDirName := fmt.Sprintf("%s-%s-%s", getTruncatedHash(chromiumHash), getTruncatedHash(skiaHash), runID)
if runID == "" {
// Do not include the runID in the dir name if it is not specified.
googleStorageDirName = fmt.Sprintf("%s-%s", getTruncatedHash(chromiumHash), getTruncatedHash(skiaHash))
}
if applyPatches {
if err := applyRepoPatches(filepath.Join(chromiumBuildDir, "src"), runID); err != nil {
return "", "", fmt.Errorf("Could not apply patches in the chromium checkout in %s: %s", chromiumBuildDir, err)
}
// Add "try" prefix and "withpatch" suffix.
googleStorageDirName = fmt.Sprintf("try-%s-withpatch", googleStorageDirName)
}
// Build chromium.
if err := buildChromium(chromiumBuildDir, targetPlatform); err != nil {
return "", "", fmt.Errorf("There was an error building chromium %s + skia %s: %s", chromiumHash, skiaHash, err)
}
// Upload to Google Storage.
gs, err := NewGsUtil(nil)
if err != nil {
return "", "", fmt.Errorf("Could not create GS object: %s", err)
}
if err := uploadChromiumBuild(filepath.Join(chromiumBuildDir, "src", "out", "Release"), filepath.Join(CHROMIUM_BUILDS_DIR_NAME, googleStorageDirName), targetPlatform, gs); err != nil {
return "", "", fmt.Errorf("There was an error uploaded the chromium build dir %s: %s", filepath.Join(chromiumBuildDir, "src", "out", "Release"), err)
}
// Check for the applypatch flag and reset and then build again and copy to
// google storage.
if applyPatches {
// Now build chromium without the patches and upload it to Google Storage.
// Make sure we are starting from a clean slate.
if err := resetChromiumCheckout(filepath.Join(chromiumBuildDir, "src")); err != nil {
return "", "", fmt.Errorf("Could not reset the chromium checkout in %s: %s", chromiumBuildDir, err)
}
//.........这里部分代码省略.........
示例9: main
func main() {
common.Init()
defer util.CleanTmpDir()
defer util.TimeTrack(time.Now(), "Running Chromium Perf")
defer glog.Flush()
// Validate required arguments.
if *chromiumBuildNoPatch == "" {
glog.Error("Must specify --chromium_build_nopatch")
return
}
if *chromiumBuildWithPatch == "" {
glog.Error("Must specify --chromium_build_withpatch")
return
}
if *runIDNoPatch == "" {
glog.Error("Must specify --run_id_nopatch")
return
}
if *runIDWithPatch == "" {
glog.Error("Must specify --run_id_withpatch")
return
}
if *benchmarkName == "" {
glog.Error("Must specify --benchmark_name")
return
}
// Reset the local chromium checkout.
if err := util.ResetCheckout(util.ChromiumSrcDir); err != nil {
glog.Errorf("Could not reset %s: %s", util.ChromiumSrcDir, err)
return
}
// Sync the local chromium checkout.
if err := util.SyncDir(util.ChromiumSrcDir); err != nil {
glog.Errorf("Could not gclient sync %s: %s", util.ChromiumSrcDir, err)
return
}
// Create the task file so that the master knows this worker is still busy.
skutil.LogErr(util.CreateTaskFile(util.ACTIVITY_RUNNING_CHROMIUM_PERF))
defer util.DeleteTaskFile(util.ACTIVITY_RUNNING_CHROMIUM_PERF)
if *targetPlatform == util.PLATFORM_ANDROID {
if err := adb.VerifyLocalDevice(); err != nil {
// Android device missing or offline.
glog.Errorf("Could not find Android device: %s", err)
return
}
// Make sure adb shell is running as root.
skutil.LogErr(
util.ExecuteCmd(util.BINARY_ADB, []string{"root"}, []string{}, 5*time.Minute, nil, nil))
}
// Instantiate GsUtil object.
gs, err := util.NewGsUtil(nil)
if err != nil {
glog.Error(err)
return
}
// Download the specified chromium builds.
for _, chromiumBuild := range []string{*chromiumBuildNoPatch, *chromiumBuildWithPatch} {
if err := gs.DownloadChromiumBuild(chromiumBuild); err != nil {
glog.Error(err)
return
}
//Delete the chromium build to save space when we are done.
defer skutil.RemoveAll(filepath.Join(util.ChromiumBuildsDir, chromiumBuild))
}
chromiumBinaryNoPatch := filepath.Join(util.ChromiumBuildsDir, *chromiumBuildNoPatch, util.BINARY_CHROME)
chromiumBinaryWithPatch := filepath.Join(util.ChromiumBuildsDir, *chromiumBuildWithPatch, util.BINARY_CHROME)
// Download pagesets if they do not exist locally.
if err := gs.DownloadWorkerArtifacts(util.PAGESETS_DIR_NAME, *pagesetType, *workerNum); err != nil {
glog.Error(err)
return
}
pathToPagesets := filepath.Join(util.PagesetsDir, *pagesetType)
// Download archives if they do not exist locally.
if err := gs.DownloadWorkerArtifacts(util.WEB_ARCHIVES_DIR_NAME, *pagesetType, *workerNum); err != nil {
glog.Error(err)
return
}
// Establish nopatch output paths.
localOutputDirNoPatch := filepath.Join(util.StorageDir, util.BenchmarkRunsDir, *runIDNoPatch)
skutil.RemoveAll(localOutputDirNoPatch)
skutil.MkdirAll(localOutputDirNoPatch, 0700)
defer skutil.RemoveAll(localOutputDirNoPatch)
remoteDirNoPatch := filepath.Join(util.BenchmarkRunsDir, *runIDNoPatch)
// Establish withpatch output paths.
localOutputDirWithPatch := filepath.Join(util.StorageDir, util.BenchmarkRunsDir, *runIDWithPatch)
skutil.RemoveAll(localOutputDirWithPatch)
skutil.MkdirAll(localOutputDirWithPatch, 0700)
defer skutil.RemoveAll(localOutputDirWithPatch)
remoteDirWithPatch := filepath.Join(util.BenchmarkRunsDir, *runIDWithPatch)
//.........这里部分代码省略.........