本文整理汇总了C#中IConfig.GetCompositeDiagnoser方法的典型用法代码示例。如果您正苦于以下问题:C# IConfig.GetCompositeDiagnoser方法的具体用法?C# IConfig.GetCompositeDiagnoser怎么用?C# IConfig.GetCompositeDiagnoser使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类IConfig
的用法示例。
在下文中一共展示了IConfig.GetCompositeDiagnoser方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: Execute
private static List<ExecuteResult> Execute(ILogger logger, Benchmark benchmark, IToolchain toolchain, BuildResult buildResult, IConfig config)
{
var executeResults = new List<ExecuteResult>();
logger.WriteLineInfo("// *** Execute ***");
var launchCount = Math.Max(1, benchmark.Job.LaunchCount.IsAuto ? 2 : benchmark.Job.LaunchCount.Value);
for (int processNumber = 0; processNumber < launchCount; processNumber++)
{
var printedProcessNumber = (benchmark.Job.LaunchCount.IsAuto && processNumber < 2) ? "" : " / " + launchCount.ToString();
logger.WriteLineInfo($"// Launch: {processNumber + 1}{printedProcessNumber}");
var executeResult = toolchain.Executor.Execute(buildResult, benchmark, logger);
if (!executeResult.FoundExecutable)
logger.WriteLineError("Executable not found");
executeResults.Add(executeResult);
var measurements = executeResults
.SelectMany(r => r.Data)
.Select(line => Measurement.Parse(logger, line, 0))
.Where(r => r.IterationMode != IterationMode.Unknown).
ToArray();
if (!measurements.Any())
{
// Something went wrong during the benchmark, don't bother doing more runs
logger.WriteLineError($"No more Benchmark runs will be launched as NO measurements were obtained from the previous run!");
break;
}
if (benchmark.Job.LaunchCount.IsAuto && processNumber == 1)
{
var idleApprox = new Statistics(measurements.Where(m => m.IterationMode == IterationMode.IdleTarget).Select(m => m.Nanoseconds)).Median;
var mainApprox = new Statistics(measurements.Where(m => m.IterationMode == IterationMode.MainTarget).Select(m => m.Nanoseconds)).Median;
var percent = idleApprox / mainApprox * 100;
launchCount = (int)Math.Round(Math.Max(2, 2 + (percent - 1) / 3)); // an empirical formula
}
}
logger.WriteLine();
// Do a "Diagnostic" run, but DISCARD the results, so that the overhead of Diagnostics doesn't skew the overall results
if (config.GetDiagnosers().Count() > 0)
{
logger.WriteLineInfo($"// Run, Diagnostic");
config.GetCompositeDiagnoser().Start(benchmark);
var executeResult = toolchain.Executor.Execute(buildResult, benchmark, logger, config.GetCompositeDiagnoser());
var allRuns = executeResult.Data.Select(line => Measurement.Parse(logger, line, 0)).Where(r => r.IterationMode != IterationMode.Unknown).ToList();
var report = new BenchmarkReport(benchmark, null, null, new[] { executeResult }, allRuns);
config.GetCompositeDiagnoser().Stop(benchmark, report);
if (!executeResult.FoundExecutable)
logger.WriteLineError("Executable not found");
logger.WriteLine();
}
return executeResults;
}
示例2: Run
private static Summary Run(Benchmark[] benchmarks, ILogger logger, string title, IConfig config, string rootArtifactsFolderPath, Func<IJob, IToolchain> toolchainProvider)
{
logger.WriteLineHeader("// ***** BenchmarkRunner: Start *****");
logger.WriteLineInfo("// Found benchmarks:");
foreach (var benchmark in benchmarks)
logger.WriteLineInfo($"// {benchmark.ShortInfo}");
logger.WriteLine();
var validationErrors = Validate(benchmarks, logger, config);
if (validationErrors.Any(validationError => validationError.IsCritical))
{
return Summary.CreateFailed(benchmarks, title, HostEnvironmentInfo.GetCurrent(), config, GetResultsFolderPath(rootArtifactsFolderPath), validationErrors);
}
var globalChronometer = Chronometer.Start();
var reports = new List<BenchmarkReport>();
foreach (var benchmark in benchmarks)
{
var report = Run(benchmark, logger, config, rootArtifactsFolderPath, toolchainProvider);
reports.Add(report);
if (report.GetResultRuns().Any())
logger.WriteLineStatistic(report.GetResultRuns().GetStatistics().ToTimeStr());
logger.WriteLine();
}
var clockSpan = globalChronometer.Stop();
var summary = new Summary(title, reports, HostEnvironmentInfo.GetCurrent(), config, GetResultsFolderPath(rootArtifactsFolderPath), clockSpan.GetTimeSpan(), validationErrors);
logger.WriteLineHeader("// ***** BenchmarkRunner: Finish *****");
logger.WriteLine();
logger.WriteLineHeader("// * Export *");
var currentDirectory = Directory.GetCurrentDirectory();
foreach (var file in config.GetCompositeExporter().ExportToFiles(summary))
{
logger.WriteLineInfo($" {file.Replace(currentDirectory, string.Empty).Trim('/', '\\')}");
}
logger.WriteLine();
logger.WriteLineHeader("// * Detailed results *");
// TODO: make exporter
foreach (var report in reports)
{
logger.WriteLineInfo(report.Benchmark.ShortInfo);
logger.WriteLineStatistic(report.GetResultRuns().GetStatistics().ToTimeStr());
logger.WriteLine();
}
LogTotalTime(logger, clockSpan.GetTimeSpan());
logger.WriteLine();
logger.WriteLineHeader("// * Summary *");
MarkdownExporter.Console.ExportToLog(summary, logger);
// TODO: make exporter
var warnings = config.GetCompositeAnalyser().Analyse(summary).ToList();
if (warnings.Count > 0)
{
logger.WriteLine();
logger.WriteLineError("// * Warnings * ");
foreach (var warning in warnings)
logger.WriteLineError($"{warning.Message}");
}
if (config.GetDiagnosers().Count() > 0)
{
logger.WriteLine();
config.GetCompositeDiagnoser().DisplayResults(logger);
}
logger.WriteLine();
logger.WriteLineHeader("// ***** BenchmarkRunner: End *****");
return summary;
}
示例3: Execute
private static List<ExecuteResult> Execute(ILogger logger, Benchmark benchmark, IToolchain toolchain, BuildResult buildResult, IConfig config, IResolver resolver)
{
var executeResults = new List<ExecuteResult>();
logger.WriteLineInfo("// *** Execute ***");
bool analyzeRunToRunVariance = benchmark.Job.ResolveValue(AccuracyMode.AnalyzeLaunchVarianceCharacteristic, resolver);
bool autoLaunchCount = !benchmark.Job.HasValue(RunMode.LaunchCountCharacteristic);
int defaultValue = analyzeRunToRunVariance ? 2 : 1;
int launchCount = Math.Max(
1,
autoLaunchCount ? defaultValue: benchmark.Job.Run.LaunchCount);
for (int launchIndex = 0; launchIndex < launchCount; launchIndex++)
{
string printedLaunchCount = (analyzeRunToRunVariance &&
autoLaunchCount &&
launchIndex < 2)
? ""
: " / " + launchCount;
logger.WriteLineInfo($"// Launch: {launchIndex + 1}{printedLaunchCount}");
var executeResult = toolchain.Executor.Execute(buildResult, benchmark, logger, resolver);
if (!executeResult.FoundExecutable)
logger.WriteLineError("Executable not found");
if (executeResult.ExitCode != 0)
logger.WriteLineError("ExitCode != 0");
executeResults.Add(executeResult);
var measurements = executeResults
.SelectMany(r => r.Data)
.Select(line => Measurement.Parse(logger, line, 0))
.Where(r => r.IterationMode != IterationMode.Unknown).
ToArray();
if (!measurements.Any())
{
// Something went wrong during the benchmark, don't bother doing more runs
logger.WriteLineError($"No more Benchmark runs will be launched as NO measurements were obtained from the previous run!");
break;
}
if (autoLaunchCount && launchIndex == 1 && analyzeRunToRunVariance)
{
// TODO: improve this logic
var idleApprox = new Statistics(measurements.Where(m => m.IterationMode == IterationMode.IdleTarget).Select(m => m.Nanoseconds)).Median;
var mainApprox = new Statistics(measurements.Where(m => m.IterationMode == IterationMode.MainTarget).Select(m => m.Nanoseconds)).Median;
var percent = idleApprox / mainApprox * 100;
launchCount = (int)Math.Round(Math.Max(2, 2 + (percent - 1) / 3)); // an empirical formula
}
}
logger.WriteLine();
// Do a "Diagnostic" run, but DISCARD the results, so that the overhead of Diagnostics doesn't skew the overall results
if (config.GetDiagnosers().Any())
{
logger.WriteLineInfo("// Run, Diagnostic");
var compositeDiagnoser = config.GetCompositeDiagnoser();
var executeResult = toolchain.Executor.Execute(buildResult, benchmark, logger, resolver, compositeDiagnoser);
var allRuns = executeResult.Data.Select(line => Measurement.Parse(logger, line, 0)).Where(r => r.IterationMode != IterationMode.Unknown).ToList();
var report = new BenchmarkReport(benchmark, null, null, new[] { executeResult }, allRuns);
compositeDiagnoser.ProcessResults(benchmark, report);
if (!executeResult.FoundExecutable)
logger.WriteLineError("Executable not found");
logger.WriteLine();
}
return executeResults;
}