From 2033cd827f62a934958492813a043f871d8eba5d Mon Sep 17 00:00:00 2001 From: lukaszcl <120112546+lukaszcl@users.noreply.github.com> Date: Thu, 5 Dec 2024 15:16:26 +0100 Subject: [PATCH 01/21] Refactor flakeguard reports --- tools/flakeguard/cmd/aggregate_results.go | 116 ++-- tools/flakeguard/cmd/check_test_owners.go | 4 +- tools/flakeguard/cmd/filter_results.go | 45 ++ tools/flakeguard/cmd/generate_report.go | 77 +++ tools/flakeguard/cmd/run.go | 23 +- tools/flakeguard/main.go | 1 + tools/flakeguard/reports/data.go | 221 ++++++++ tools/flakeguard/reports/data_test.go | 449 +++++++++++++++ tools/flakeguard/reports/io.go | 109 ++++ tools/flakeguard/reports/presentation.go | 201 +++++++ tools/flakeguard/reports/presentation_test.go | 267 +++++++++ tools/flakeguard/reports/reports.go | 536 ------------------ tools/flakeguard/reports/reports_test.go | 384 ------------- 13 files changed, 1443 insertions(+), 990 deletions(-) create mode 100644 tools/flakeguard/cmd/filter_results.go create mode 100644 tools/flakeguard/cmd/generate_report.go create mode 100644 tools/flakeguard/reports/data.go create mode 100644 tools/flakeguard/reports/data_test.go create mode 100644 tools/flakeguard/reports/io.go create mode 100644 tools/flakeguard/reports/presentation.go create mode 100644 tools/flakeguard/reports/presentation_test.go delete mode 100644 tools/flakeguard/reports/reports.go delete mode 100644 tools/flakeguard/reports/reports_test.go diff --git a/tools/flakeguard/cmd/aggregate_results.go b/tools/flakeguard/cmd/aggregate_results.go index 5a31f433e..924520abd 100644 --- a/tools/flakeguard/cmd/aggregate_results.go +++ b/tools/flakeguard/cmd/aggregate_results.go @@ -1,100 +1,90 @@ package cmd import ( - "encoding/json" - "log" - "os" - "path/filepath" + "fmt" "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/reports" "github.com/spf13/cobra" ) -var ( - resultsFolderPath string - outputResultsPath string - outputLogsPath string - codeOwnersPath string - projectPath string - maxPassRatio float64 - filterFailed bool -) - var AggregateResultsCmd = &cobra.Command{ Use: "aggregate-results", - Short: "Aggregate test results and optionally filter failed tests based on a threshold", + Short: "Aggregate test results into a single report, with optional filtering and code owners mapping", RunE: func(cmd *cobra.Command, args []string) error { - // Read test reports from files - var testReports []*reports.TestReport - err := filepath.Walk(resultsFolderPath, func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if !info.IsDir() && filepath.Ext(path) == ".json" { - // Read file content - data, readErr := os.ReadFile(path) - if readErr != nil { - return readErr - } - var report *reports.TestReport - if jsonErr := json.Unmarshal(data, &report); jsonErr != nil { - return jsonErr - } - testReports = append(testReports, report) - } - return nil - }) + // Get flag values + aggregateResultsPath, _ := cmd.Flags().GetString("results-path") + aggregateOutputPath, _ := cmd.Flags().GetString("output-path") + includeOutputs, _ := cmd.Flags().GetBool("include-outputs") + includePackageOutputs, _ := cmd.Flags().GetBool("include-package-outputs") + filterFailed, _ := cmd.Flags().GetBool("filter-failed") + maxPassRatio, _ := cmd.Flags().GetFloat64("max-pass-ratio") + codeOwnersPath, _ := cmd.Flags().GetString("codeowners-path") + repoPath, _ := cmd.Flags().GetString("repo-path") + + // Load test reports from JSON files + testReports, err := reports.LoadReports(aggregateResultsPath) if err != nil { - log.Fatalf("Error reading test reports: %v", err) + return fmt.Errorf("error loading test reports: %w", err) } - allReport, err := reports.Aggregate(testReports...) + // Aggregate the reports + aggregatedReport, err := reports.Aggregate(testReports...) if err != nil { - log.Fatalf("Error aggregating results: %v", err) + return fmt.Errorf("error aggregating test reports: %w", err) } - // Map test results to paths - err = reports.MapTestResultsToPaths(allReport, projectPath) + // Map test results to test paths + err = reports.MapTestResultsToPaths(aggregatedReport, repoPath) if err != nil { - log.Fatalf("Error mapping test results to paths: %v", err) + return fmt.Errorf("error mapping test results to paths: %w", err) } - // Map test results to owners if CODEOWNERS path is provided + // Map test results to code owners if codeOwnersPath is provided if codeOwnersPath != "" { - err = reports.MapTestResultsToOwners(allReport, codeOwnersPath) + err = reports.MapTestResultsToOwners(aggregatedReport, codeOwnersPath) if err != nil { - log.Fatalf("Error mapping test results to owners: %v", err) + return fmt.Errorf("error mapping test results to code owners: %w", err) } } - var resultsToSave []reports.TestResult - + // Filter results if needed if filterFailed { - // Filter to only include tests that failed below the threshold - for _, result := range allReport.Results { - if result.PassRatio < maxPassRatio && !result.Skipped { - resultsToSave = append(resultsToSave, result) + aggregatedReport.Results = reports.FilterTests(aggregatedReport.Results, func(tr reports.TestResult) bool { + return !tr.Skipped && tr.PassRatio < maxPassRatio + }) + } + + // Process the aggregated results based on the flags + if !includeOutputs || !includePackageOutputs { + for i := range aggregatedReport.Results { + if !includeOutputs { + aggregatedReport.Results[i].Outputs = nil + } + if !includePackageOutputs { + aggregatedReport.Results[i].PackageOutputs = nil } } - } else { - resultsToSave = allReport.Results } - allReport.Results = resultsToSave - // Output results to JSON files - if len(resultsToSave) > 0 { - return reports.SaveFilteredResultsAndLogs(outputResultsPath, outputLogsPath, allReport, codeOwnersPath != "") + // Save the aggregated report + if err := reports.SaveReport(reports.OSFileSystem{}, aggregateOutputPath, *aggregatedReport); err != nil { + return fmt.Errorf("error saving aggregated report: %w", err) } + + fmt.Printf("Aggregated report saved to %s\n", aggregateOutputPath) return nil }, } func init() { - AggregateResultsCmd.Flags().StringVarP(&resultsFolderPath, "results-path", "p", "", "Path to the folder containing JSON test result files") - AggregateResultsCmd.Flags().StringVarP(&outputResultsPath, "output-results", "o", "./results", "Path to output the aggregated or filtered test results in JSON and markdown format") - AggregateResultsCmd.Flags().StringVarP(&outputLogsPath, "output-logs", "l", "", "Path to output the filtered test logs in JSON format") - AggregateResultsCmd.Flags().Float64VarP(&maxPassRatio, "max-pass-ratio", "m", 1.0, "The maximum (non-inclusive) pass ratio threshold for a test to be considered a failure. Any tests below this pass rate will be considered flaky.") - AggregateResultsCmd.Flags().BoolVarP(&filterFailed, "filter-failed", "f", false, "If true, filter and output only failed tests based on the max-pass-ratio threshold") - AggregateResultsCmd.Flags().StringVarP(&codeOwnersPath, "codeowners-path", "c", "", "Path to the CODEOWNERS file") - AggregateResultsCmd.Flags().StringVarP(&projectPath, "project-path", "r", ".", "The path to the Go project. Default is the current directory. Useful for subprojects") + AggregateResultsCmd.Flags().StringP("results-path", "p", "", "Path to the folder containing JSON test result files (required)") + AggregateResultsCmd.Flags().StringP("output-path", "o", "./aggregated-results.json", "Path to output the aggregated test results") + AggregateResultsCmd.Flags().Bool("include-outputs", false, "Include test outputs in the aggregated test results") + AggregateResultsCmd.Flags().Bool("include-package-outputs", false, "Include test package outputs in the aggregated test results") + AggregateResultsCmd.Flags().Bool("filter-failed", false, "If true, filter and output only failed tests based on the max-pass-ratio threshold") + AggregateResultsCmd.Flags().Float64("max-pass-ratio", 1.0, "The maximum pass ratio threshold for a test to be considered flaky. Any tests below this pass rate will be considered flaky.") + AggregateResultsCmd.Flags().String("codeowners-path", "", "Path to the CODEOWNERS file") + AggregateResultsCmd.Flags().String("repo-path", ".", "The path to the root of the repository/project") + AggregateResultsCmd.MarkFlagRequired("results-path") + AggregateResultsCmd.MarkFlagRequired("repo-path") } diff --git a/tools/flakeguard/cmd/check_test_owners.go b/tools/flakeguard/cmd/check_test_owners.go index b6c348e48..aa9199366 100644 --- a/tools/flakeguard/cmd/check_test_owners.go +++ b/tools/flakeguard/cmd/check_test_owners.go @@ -20,6 +20,8 @@ var CheckTestOwnersCmd = &cobra.Command{ Use: "check-test-owners", Short: "Check which tests in the project do not have code owners", RunE: func(cmd *cobra.Command, args []string) error { + projectPath, _ := cmd.Flags().GetString("project-path") + // Scan project for test functions testFileMap, err := reports.ScanTestFiles(projectPath) if err != nil { @@ -79,7 +81,7 @@ var CheckTestOwnersCmd = &cobra.Command{ } func init() { - CheckTestOwnersCmd.Flags().StringVarP(&projectPath, "project-path", "p", ".", "Path to the root of the project") + CheckTestOwnersCmd.Flags().StringP("project-path", "p", ".", "Path to the root of the project") CheckTestOwnersCmd.Flags().StringVarP(&codeownersPath, "codeowners-path", "c", ".github/CODEOWNERS", "Path to the CODEOWNERS file") CheckTestOwnersCmd.Flags().BoolVarP(&printTestFunctions, "print-test-functions", "t", false, "Print all test functions without owners") } diff --git a/tools/flakeguard/cmd/filter_results.go b/tools/flakeguard/cmd/filter_results.go new file mode 100644 index 000000000..286afcb0f --- /dev/null +++ b/tools/flakeguard/cmd/filter_results.go @@ -0,0 +1,45 @@ +package cmd + +import ( + "fmt" + + "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/reports" + + "github.com/spf13/cobra" +) + +var ( + filterInputPath string + filterOutputPath string + filterMaxPassRatio float64 +) + +var filterCmd = &cobra.Command{ + Use: "filter", + Short: "Filter aggregated test results based on criteria", + RunE: func(cmd *cobra.Command, args []string) error { + // Load the aggregated report + aggregatedReport, err := reports.LoadReport(filterInputPath) + if err != nil { + return fmt.Errorf("error loading aggregated report: %w", err) + } + + // Filter the test results + filteredReport := reports.FilterResults(aggregatedReport, filterMaxPassRatio) + + // Save the filtered report + if err := reports.SaveReport(reports.OSFileSystem{}, filterOutputPath, *filteredReport); err != nil { + return fmt.Errorf("error saving filtered report: %w", err) + } + + fmt.Printf("Filtered report saved to %s\n", filterOutputPath) + return nil + }, +} + +func init() { + filterCmd.Flags().StringVarP(&filterInputPath, "input-path", "i", "", "Path to the aggregated test results file (required)") + filterCmd.Flags().StringVarP(&filterOutputPath, "output-path", "o", "./filtered-results.json", "Path to output the filtered test results") + filterCmd.Flags().Float64VarP(&filterMaxPassRatio, "max-pass-ratio", "m", 1.0, "Maximum pass ratio threshold for filtering tests") + filterCmd.MarkFlagRequired("input-path") +} diff --git a/tools/flakeguard/cmd/generate_report.go b/tools/flakeguard/cmd/generate_report.go new file mode 100644 index 000000000..6dccda2c5 --- /dev/null +++ b/tools/flakeguard/cmd/generate_report.go @@ -0,0 +1,77 @@ +package cmd + +import ( + "fmt" + "strings" + + "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/reports" + "github.com/spf13/cobra" +) + +var ( + reportInputPath string + reportOutputPath string + reportFormat string + reportCodeOwnersPath string + reportProjectPath string +) + +var GenerateReportCmd = &cobra.Command{ + Use: "generate-report", + Short: "Generate reports from test results", + RunE: func(cmd *cobra.Command, args []string) error { + // Load the test results + testReport, err := reports.LoadReport(reportInputPath) + if err != nil { + return fmt.Errorf("error loading test report: %w", err) + } + + // Generate the report + if err := generateReport(testReport, reportFormat, reportOutputPath); err != nil { + return fmt.Errorf("error generating report: %w", err) + } + + fmt.Printf("Report generated at %s\n", reportOutputPath) + return nil + }, +} + +func init() { + GenerateReportCmd.Flags().StringVarP(&reportInputPath, "aggregated-report-path", "i", "", "Path to the aggregated test results file (required)") + GenerateReportCmd.Flags().StringVarP(&reportOutputPath, "output-path", "o", "./report", "Path to output the generated report (without extension)") + GenerateReportCmd.Flags().StringVarP(&reportFormat, "format", "f", "markdown", "Format of the report (markdown, json)") + GenerateReportCmd.MarkFlagRequired("aggregated-report-path") +} + +func generateReport(report *reports.TestReport, format, outputPath string) error { + fs := reports.OSFileSystem{} + switch strings.ToLower(format) { + case "markdown": + mdFileName := outputPath + ".md" + mdFile, err := fs.Create(mdFileName) + if err != nil { + return fmt.Errorf("error creating markdown file: %w", err) + } + defer mdFile.Close() + reports.GenerateMarkdownSummary(mdFile, report, 1.0) + fmt.Printf("Markdown report saved to %s\n", mdFileName) + case "json": + jsonFileName := outputPath + ".json" + if err := reports.SaveReportNoLogs(fs, jsonFileName, *report); err != nil { + return fmt.Errorf("error saving JSON report: %w", err) + } + fmt.Printf("JSON report saved to %s\n", jsonFileName) + default: + return fmt.Errorf("unsupported report format: %s", format) + } + + // Generate summary JSON + summaryData := reports.GenerateSummaryData(report.Results, 1.0) + summaryFileName := outputPath + "-summary.json" + if err := reports.SaveSummaryAsJSON(fs, summaryFileName, summaryData); err != nil { + return fmt.Errorf("error saving summary JSON: %w", err) + } + fmt.Printf("Summary JSON saved to %s\n", summaryFileName) + + return nil +} diff --git a/tools/flakeguard/cmd/run.go b/tools/flakeguard/cmd/run.go index d8530d613..acfeac47e 100644 --- a/tools/flakeguard/cmd/run.go +++ b/tools/flakeguard/cmd/run.go @@ -17,6 +17,7 @@ var RunTestsCmd = &cobra.Command{ Use: "run", Short: "Run tests to check if they are flaky", Run: func(cmd *cobra.Command, args []string) { + // Retrieve flags projectPath, _ := cmd.Flags().GetString("project-path") testPackagesJson, _ := cmd.Flags().GetString("test-packages-json") testPackagesArg, _ := cmd.Flags().GetStringSlice("test-packages") @@ -37,6 +38,7 @@ var RunTestsCmd = &cobra.Command{ log.Fatalf("Error: %v", err) } + // Determine test packages var testPackages []string if testPackagesJson != "" { if err := json.Unmarshal([]byte(testPackagesJson), &testPackages); err != nil { @@ -48,7 +50,8 @@ var RunTestsCmd = &cobra.Command{ log.Fatalf("Error: must specify either --test-packages-json or --test-packages") } - runner := runner.Runner{ + // Initialize the runner + testRunner := runner.Runner{ ProjectPath: projectPath, Verbose: true, RunCount: runCount, @@ -62,7 +65,8 @@ var RunTestsCmd = &cobra.Command{ ShuffleSeed: shuffleSeed, } - testReport, err := runner.RunTests() + // Run the tests + testReport, err := testRunner.RunTests() if err != nil { fmt.Printf("Error running tests: %v\n", err) os.Exit(1) @@ -71,7 +75,8 @@ var RunTestsCmd = &cobra.Command{ // Print all failed tests including flaky tests if printFailedTests { fmt.Printf("PassRatio threshold for flaky tests: %.2f\n", maxPassRatio) - reports.PrintResults(os.Stdout, testReport.Results, maxPassRatio, false, false) + // Use RenderResults instead of PrintResults + reports.RenderResults(os.Stdout, testReport.Results, maxPassRatio, false) } // Save the test results in JSON format @@ -80,14 +85,20 @@ var RunTestsCmd = &cobra.Command{ if err != nil { log.Fatalf("Error marshaling test results to JSON: %v", err) } - if err := os.WriteFile(outputPath, jsonData, 0644); err != nil { //nolint:gosec + if err := os.WriteFile(outputPath, jsonData, 0644); err != nil { log.Fatalf("Error writing test results to file: %v", err) } fmt.Printf("All test results saved to %s\n", outputPath) } - flakyTests := reports.FilterFlakyTests(testReport.Results, maxPassRatio) + // Filter flaky tests using FilterTests + flakyTests := reports.FilterTests(testReport.Results, func(tr reports.TestResult) bool { + return !tr.Skipped && tr.PassRatio < maxPassRatio + }) + if len(flakyTests) > 0 { + fmt.Printf("Found %d flaky tests below the pass ratio threshold of %.2f:\n", len(flakyTests), maxPassRatio) + reports.RenderResults(os.Stdout, flakyTests, maxPassRatio, false) // Exit with error code if there are flaky tests os.Exit(1) } else if len(testReport.Results) == 0 { @@ -112,7 +123,7 @@ func init() { RunTestsCmd.Flags().StringSlice("skip-tests", nil, "Comma-separated list of test names to skip from running") RunTestsCmd.Flags().StringSlice("select-tests", nil, "Comma-separated list of test names to specifically run") RunTestsCmd.Flags().Bool("print-failed-tests", true, "Print failed test results to the console") - RunTestsCmd.Flags().Float64("max-pass-ratio", 1.0, "The maximum (non-inclusive) pass ratio threshold for a test to be considered a failure. Any tests below this pass rate will be considered flaky.") + RunTestsCmd.Flags().Float64("max-pass-ratio", 1.0, "The maximum pass ratio threshold for a test to be considered flaky. Any tests below this pass rate will be considered flaky.") } func checkDependencies(projectPath string) error { diff --git a/tools/flakeguard/main.go b/tools/flakeguard/main.go index 7e3e32811..ad22e6f7a 100644 --- a/tools/flakeguard/main.go +++ b/tools/flakeguard/main.go @@ -30,6 +30,7 @@ func init() { rootCmd.AddCommand(cmd.RunTestsCmd) rootCmd.AddCommand(cmd.AggregateResultsCmd) rootCmd.AddCommand(cmd.CheckTestOwnersCmd) + rootCmd.AddCommand(cmd.GenerateReportCmd) } func main() { diff --git a/tools/flakeguard/reports/data.go b/tools/flakeguard/reports/data.go new file mode 100644 index 000000000..a34f3f2e2 --- /dev/null +++ b/tools/flakeguard/reports/data.go @@ -0,0 +1,221 @@ +package reports + +import ( + "fmt" + "math" + "sort" + "strings" + "time" +) + +// Data Structures + +type TestReport struct { + GoProject string + TestRunCount int + RaceDetection bool + ExcludedTests []string + SelectedTests []string + Results []TestResult +} + +type TestResult struct { + TestName string + TestPackage string + PackagePanic bool + Panic bool + Timeout bool + Race bool + Skipped bool + PassRatio float64 + Runs int + Failures int + Successes int + Skips int + Outputs []string + Durations []time.Duration + PackageOutputs []string + TestPath string + CodeOwners []string +} + +type SummaryData struct { + TotalTests int `json:"total_tests"` + PanickedTests int `json:"panicked_tests"` + RacedTests int `json:"raced_tests"` + FlakyTests int `json:"flaky_tests"` + FlakyTestRatio string `json:"flaky_test_ratio"` + TotalRuns int `json:"total_runs"` + PassedRuns int `json:"passed_runs"` + FailedRuns int `json:"failed_runs"` + SkippedRuns int `json:"skipped_runs"` + PassRatio string `json:"pass_ratio"` + MaxPassRatio float64 `json:"max_pass_ratio"` + AveragePassRatio float64 `json:"average_pass_ratio"` +} + +// Data Processing Functions + +func GenerateSummaryData(tests []TestResult, maxPassRatio float64) SummaryData { + var runs, passes, fails, skips, panickedTests, racedTests, flakyTests int + for _, result := range tests { + runs += result.Runs + passes += result.Successes + fails += result.Failures + skips += result.Skips + if result.Panic { + panickedTests++ + flakyTests++ + } else if result.Race { + racedTests++ + flakyTests++ + } else if result.PassRatio < maxPassRatio { + flakyTests++ + } + } + + passPercentage := 100.0 + flakePercentage := 0.0 + averagePassRatio := 1.0 + + if runs > 0 { + passPercentage = math.Round((float64(passes)/float64(runs)*100)*100) / 100 + averagePassRatio = float64(passes) / float64(runs) + } + if len(tests) > 0 { + flakePercentage = math.Round((float64(flakyTests)/float64(len(tests))*100)*100) / 100 + } + + return SummaryData{ + TotalTests: len(tests), + PanickedTests: panickedTests, + RacedTests: racedTests, + FlakyTests: flakyTests, + FlakyTestRatio: fmt.Sprintf("%.2f%%", flakePercentage), + TotalRuns: runs, + PassedRuns: passes, + FailedRuns: fails, + SkippedRuns: skips, + PassRatio: fmt.Sprintf("%.2f%%", passPercentage), + MaxPassRatio: maxPassRatio, + AveragePassRatio: averagePassRatio, + } +} + +func FilterResults(report *TestReport, maxPassRatio float64) *TestReport { + filteredResults := FilterTests(report.Results, func(tr TestResult) bool { + return !tr.Skipped && tr.PassRatio < maxPassRatio + }) + report.Results = filteredResults + return report +} + +func FilterTests(results []TestResult, predicate func(TestResult) bool) []TestResult { + var filtered []TestResult + for _, result := range results { + if predicate(result) { + filtered = append(filtered, result) + } + } + return filtered +} + +func Aggregate(reports ...*TestReport) (*TestReport, error) { + testMap := make(map[string]TestResult) + fullReport := &TestReport{} + excludedTests := map[string]struct{}{} + selectedTests := map[string]struct{}{} + + for _, report := range reports { + if fullReport.GoProject == "" { + fullReport.GoProject = report.GoProject + } else if fullReport.GoProject != report.GoProject { + return nil, fmt.Errorf("reports with different Go projects found, expected %s, got %s", fullReport.GoProject, report.GoProject) + } + fullReport.TestRunCount += report.TestRunCount + fullReport.RaceDetection = report.RaceDetection && fullReport.RaceDetection + for _, test := range report.ExcludedTests { + excludedTests[test] = struct{}{} + } + for _, test := range report.SelectedTests { + selectedTests[test] = struct{}{} + } + for _, result := range report.Results { + key := result.TestName + "|" + result.TestPackage + if existing, found := testMap[key]; found { + existing = mergeTestResults(existing, result) + testMap[key] = existing + } else { + testMap[key] = result + } + } + } + + for test := range excludedTests { + fullReport.ExcludedTests = append(fullReport.ExcludedTests, test) + } + for test := range selectedTests { + fullReport.SelectedTests = append(fullReport.SelectedTests, test) + } + + var aggregatedResults []TestResult + for _, result := range testMap { + aggregatedResults = append(aggregatedResults, result) + } + + sortTestResults(aggregatedResults) + fullReport.Results = aggregatedResults + + return fullReport, nil +} + +func mergeTestResults(a, b TestResult) TestResult { + a.Runs += b.Runs + a.Durations = append(a.Durations, b.Durations...) + a.Outputs = append(a.Outputs, b.Outputs...) + a.PackageOutputs = append(a.PackageOutputs, b.PackageOutputs...) + a.Successes += b.Successes + a.Failures += b.Failures + a.Panic = a.Panic || b.Panic + a.Race = a.Race || b.Race + a.Skips += b.Skips + a.Skipped = a.Skipped && b.Skipped + + if a.Runs > 0 { + a.PassRatio = float64(a.Successes) / float64(a.Runs) + } else { + a.PassRatio = 0.0 + } + + return a +} + +func sortTestResults(results []TestResult) { + sort.Slice(results, func(i, j int) bool { + if results[i].TestPackage != results[j].TestPackage { + return results[i].TestPackage < results[j].TestPackage + } + iParts := strings.Split(results[i].TestName, "/") + jParts := strings.Split(results[j].TestName, "/") + for k := 0; k < len(iParts) && k < len(jParts); k++ { + if iParts[k] != jParts[k] { + return iParts[k] < jParts[k] + } + } + if len(iParts) != len(jParts) { + return len(iParts) < len(jParts) + } + return results[i].PassRatio < results[j].PassRatio + }) +} + +func avgDuration(durations []time.Duration) time.Duration { + if len(durations) == 0 { + return 0 + } + var total time.Duration + for _, d := range durations { + total += d + } + return total / time.Duration(len(durations)) +} diff --git a/tools/flakeguard/reports/data_test.go b/tools/flakeguard/reports/data_test.go new file mode 100644 index 000000000..d8fed49fc --- /dev/null +++ b/tools/flakeguard/reports/data_test.go @@ -0,0 +1,449 @@ +package reports + +import ( + "math" + "reflect" + "sort" + "testing" + "time" +) + +// TestGenerateSummaryData tests the GenerateSummaryData function. +func TestGenerateSummaryData(t *testing.T) { + tests := []struct { + name string + testResults []TestResult + maxPassRatio float64 + expected SummaryData + }{ + { + name: "All tests passed", + testResults: []TestResult{ + {PassRatio: 1.0, Runs: 10, Successes: 10}, + {PassRatio: 1.0, Runs: 5, Successes: 5}, + }, + maxPassRatio: 1.0, + expected: SummaryData{ + TotalTests: 2, + PanickedTests: 0, + RacedTests: 0, + FlakyTests: 0, + FlakyTestRatio: "0.00%", + TotalRuns: 15, + PassedRuns: 15, + FailedRuns: 0, + SkippedRuns: 0, + PassRatio: "100.00%", + MaxPassRatio: 1.0, + AveragePassRatio: 1.0, + }, + }, + { + name: "Some flaky tests", + testResults: []TestResult{ + {PassRatio: 0.8, Runs: 10, Successes: 8, Failures: 2}, + {PassRatio: 1.0, Runs: 5, Successes: 5}, + {PassRatio: 0.5, Runs: 4, Successes: 2, Failures: 2}, + }, + maxPassRatio: 0.9, + expected: SummaryData{ + TotalTests: 3, + PanickedTests: 0, + RacedTests: 0, + FlakyTests: 2, + FlakyTestRatio: "66.67%", + TotalRuns: 19, + PassedRuns: 15, + FailedRuns: 4, + SkippedRuns: 0, + PassRatio: "78.95%", + MaxPassRatio: 0.9, + AveragePassRatio: 0.7894736842105263, + }, + }, + { + name: "Tests with panics and races", + testResults: []TestResult{ + {PassRatio: 1.0, Runs: 5, Successes: 5, Panic: true}, + {PassRatio: 0.9, Runs: 10, Successes: 9, Failures: 1, Race: true}, + {PassRatio: 1.0, Runs: 3, Successes: 3}, + }, + maxPassRatio: 1.0, + expected: SummaryData{ + TotalTests: 3, + PanickedTests: 1, + RacedTests: 1, + FlakyTests: 2, + FlakyTestRatio: "66.67%", + TotalRuns: 18, + PassedRuns: 17, + FailedRuns: 1, + SkippedRuns: 0, + PassRatio: "94.44%", + MaxPassRatio: 1.0, + AveragePassRatio: 0.9444444444444444, + }, + }, + { + name: "No tests ran", + testResults: []TestResult{}, + maxPassRatio: 1.0, + expected: SummaryData{ + TotalTests: 0, + PanickedTests: 0, + RacedTests: 0, + FlakyTests: 0, + FlakyTestRatio: "0.00%", + TotalRuns: 0, + PassedRuns: 0, + FailedRuns: 0, + SkippedRuns: 0, + PassRatio: "100.00%", + MaxPassRatio: 1.0, + AveragePassRatio: 1.0, + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + summary := GenerateSummaryData(tc.testResults, tc.maxPassRatio) + if !reflect.DeepEqual(summary, tc.expected) { + t.Errorf("Expected %+v, got %+v", tc.expected, summary) + } + }) + } +} + +// TestFilterTests tests the FilterTests function. +func TestFilterTests(t *testing.T) { + testResults := []TestResult{ + {TestName: "TestA", PassRatio: 1.0, Skipped: false}, + {TestName: "TestB", PassRatio: 0.8, Skipped: false}, + {TestName: "TestC", PassRatio: 0.7, Skipped: true}, + {TestName: "TestD", PassRatio: 0.6, Skipped: false}, + } + + // Filter tests with PassRatio < 0.9 and not skipped + filtered := FilterTests(testResults, func(tr TestResult) bool { + return !tr.Skipped && tr.PassRatio < 0.9 + }) + + expected := []TestResult{ + {TestName: "TestB", PassRatio: 0.8, Skipped: false}, + {TestName: "TestD", PassRatio: 0.6, Skipped: false}, + } + + if !reflect.DeepEqual(filtered, expected) { + t.Errorf("Expected %+v, got %+v", expected, filtered) + } +} + +func TestFilterFailedTests(t *testing.T) { + results := []TestResult{ + {TestName: "Test1", PassRatio: 0.5, Skipped: false}, + {TestName: "Test2", PassRatio: 0.9, Skipped: false}, + {TestName: "Test3", PassRatio: 0.3, Skipped: false}, + {TestName: "Test4", PassRatio: 0.8, Skipped: true}, // Skipped test + } + + failedTests := FilterTests(results, func(tr TestResult) bool { + return !tr.Skipped && tr.PassRatio < 0.6 + }) + expected := []TestResult{ + {TestName: "Test1", PassRatio: 0.5, Skipped: false}, + {TestName: "Test3", PassRatio: 0.3, Skipped: false}, + } + + if !reflect.DeepEqual(failedTests, expected) { + t.Errorf("Expected failed tests %+v, got %+v", expected, failedTests) + } +} + +func TestFilterPassedTests(t *testing.T) { + results := []TestResult{ + {TestName: "Test1", PassRatio: 0.7, Skipped: false}, + {TestName: "Test2", PassRatio: 1.0, Skipped: false}, + {TestName: "Test3", PassRatio: 0.3, Skipped: false}, + {TestName: "Test4", PassRatio: 0.8, Skipped: true}, // Skipped test + } + + passedTests := FilterTests(results, func(tr TestResult) bool { + return !tr.Skipped && tr.PassRatio >= 0.6 + }) + expected := []TestResult{ + {TestName: "Test1", PassRatio: 0.7, Skipped: false}, + {TestName: "Test2", PassRatio: 1.0, Skipped: false}, + } + + if !reflect.DeepEqual(passedTests, expected) { + t.Errorf("Expected passed tests %+v, got %+v", expected, passedTests) + } +} + +func TestFilterSkippedTests(t *testing.T) { + results := []TestResult{ + {TestName: "Test1", PassRatio: 0.7, Skipped: false}, + {TestName: "Test2", PassRatio: 1.0, Skipped: true}, + {TestName: "Test3", PassRatio: 0.3, Skipped: false}, + {TestName: "Test4", PassRatio: 0.8, Skipped: true}, + } + + skippedTests := FilterTests(results, func(tr TestResult) bool { + return tr.Skipped + }) + expected := []TestResult{ + {TestName: "Test2", PassRatio: 1.0, Skipped: true}, + {TestName: "Test4", PassRatio: 0.8, Skipped: true}, + } + + if !reflect.DeepEqual(skippedTests, expected) { + t.Errorf("Expected skipped tests %+v, got %+v", expected, skippedTests) + } +} + +// TestAggregate tests the Aggregate function. +func TestAggregate(t *testing.T) { + report1 := &TestReport{ + GoProject: "ProjectX", + TestRunCount: 2, + Results: []TestResult{ + { + TestName: "TestA", + TestPackage: "pkg1", + Runs: 2, + Successes: 2, + PassRatio: 1.0, + }, + { + TestName: "TestB", + TestPackage: "pkg1", + Runs: 2, + Successes: 1, + Failures: 1, + PassRatio: 0.5, + }, + }, + } + + report2 := &TestReport{ + GoProject: "ProjectX", + TestRunCount: 3, + Results: []TestResult{ + { + TestName: "TestA", + TestPackage: "pkg1", + Runs: 3, + Successes: 3, + PassRatio: 1.0, + }, + { + TestName: "TestC", + TestPackage: "pkg2", + Runs: 3, + Successes: 2, + Failures: 1, + PassRatio: 0.6667, + }, + }, + } + + aggregatedReport, err := Aggregate(report1, report2) + if err != nil { + t.Fatalf("Error aggregating reports: %v", err) + } + + expectedResults := []TestResult{ + { + TestName: "TestA", + TestPackage: "pkg1", + Runs: 5, + Successes: 5, + Failures: 0, + PassRatio: 1.0, + }, + { + TestName: "TestB", + TestPackage: "pkg1", + Runs: 2, + Successes: 1, + Failures: 1, + PassRatio: 0.5, + }, + { + TestName: "TestC", + TestPackage: "pkg2", + Runs: 3, + Successes: 2, + Failures: 1, + PassRatio: 0.6667, + }, + } + + // Sort results for comparison + sort.Slice(expectedResults, func(i, j int) bool { + return expectedResults[i].TestName < expectedResults[j].TestName + }) + sort.Slice(aggregatedReport.Results, func(i, j int) bool { + return aggregatedReport.Results[i].TestName < aggregatedReport.Results[j].TestName + }) + + for i, result := range aggregatedReport.Results { + expected := expectedResults[i] + if result.TestName != expected.TestName || + result.TestPackage != expected.TestPackage || + result.Runs != expected.Runs || + result.Successes != expected.Successes || + result.Failures != expected.Failures || + math.Abs(result.PassRatio-expected.PassRatio) > 0.0001 { + t.Errorf("Mismatch in aggregated result for test %s. Expected %+v, got %+v", expected.TestName, expected, result) + } + } +} + +// TestMergeTestResults tests the mergeTestResults function. +func TestMergeTestResults(t *testing.T) { + a := TestResult{ + TestName: "TestA", + TestPackage: "pkg1", + Runs: 2, + Successes: 2, + Failures: 0, + Skips: 0, + Durations: []time.Duration{time.Second, time.Second}, + Outputs: []string{"Output1", "Output2"}, + PackageOutputs: []string{"PkgOutput1"}, + Panic: false, + Race: false, + Skipped: false, + } + + b := TestResult{ + TestName: "TestA", + TestPackage: "pkg1", + Runs: 3, + Successes: 2, + Failures: 1, + Skips: 0, + Durations: []time.Duration{2 * time.Second, 2 * time.Second, 2 * time.Second}, + Outputs: []string{"Output3", "Output4", "Output5"}, + PackageOutputs: []string{"PkgOutput2"}, + Panic: true, + Race: false, + Skipped: false, + } + + merged := mergeTestResults(a, b) + + expected := TestResult{ + TestName: "TestA", + TestPackage: "pkg1", + Runs: 5, + Successes: 4, + Failures: 1, + Skips: 0, + Durations: []time.Duration{time.Second, time.Second, 2 * time.Second, 2 * time.Second, 2 * time.Second}, + Outputs: []string{"Output1", "Output2", "Output3", "Output4", "Output5"}, + PackageOutputs: []string{"PkgOutput1", "PkgOutput2"}, + Panic: true, + Race: false, + Skipped: false, + PassRatio: 0.8, + } + + if !reflect.DeepEqual(merged, expected) { + t.Errorf("Expected %+v, got %+v", expected, merged) + } +} + +// TestAvgDuration tests the avgDuration function. +func TestAvgDuration(t *testing.T) { + durations := []time.Duration{ + time.Second, + 2 * time.Second, + 3 * time.Second, + } + expected := 2 * time.Second + + avg := avgDuration(durations) + if avg != expected { + t.Errorf("Expected average duration %v, got %v", expected, avg) + } + + // Test with empty slice + avg = avgDuration([]time.Duration{}) + if avg != 0 { + t.Errorf("Expected average duration 0, got %v", avg) + } +} + +func TestAggregate_AllSkippedTests(t *testing.T) { + report1 := &TestReport{ + GoProject: "ProjectX", + TestRunCount: 3, + Results: []TestResult{ + { + TestName: "TestSkipped", + TestPackage: "pkg1", + Skipped: true, + Runs: 0, + Skips: 3, + PassRatio: 0.0, // Or set to -1 to indicate undefined + }, + }, + } + + report2 := &TestReport{ + GoProject: "ProjectX", + TestRunCount: 2, + Results: []TestResult{ + { + TestName: "TestSkipped", + TestPackage: "pkg1", + Skipped: true, + Runs: 0, + Skips: 2, + PassRatio: 0.0, + }, + }, + } + + aggregatedReport, err := Aggregate(report1, report2) + if err != nil { + t.Fatalf("Error aggregating reports: %v", err) + } + + expectedResult := TestResult{ + TestName: "TestSkipped", + TestPackage: "pkg1", + Skipped: true, + Runs: 0, + Skips: 5, + PassRatio: 0.0, + } + + if len(aggregatedReport.Results) != 1 { + t.Fatalf("Expected 1 result, got %d", len(aggregatedReport.Results)) + } + + result := aggregatedReport.Results[0] + + if result.TestName != expectedResult.TestName { + t.Errorf("Expected TestName %v, got %v", expectedResult.TestName, result.TestName) + } + if result.TestPackage != expectedResult.TestPackage { + t.Errorf("Expected TestPackage %v, got %v", expectedResult.TestPackage, result.TestPackage) + } + if result.Skipped != expectedResult.Skipped { + t.Errorf("Expected Skipped %v, got %v", expectedResult.Skipped, result.Skipped) + } + if result.Runs != expectedResult.Runs { + t.Errorf("Expected Runs %v, got %v", expectedResult.Runs, result.Runs) + } + if result.Skips != expectedResult.Skips { + t.Errorf("Expected Skips %v, got %v", expectedResult.Skips, result.Skips) + } + if result.PassRatio != expectedResult.PassRatio { + t.Errorf("Expected PassRatio %v, got %v", expectedResult.PassRatio, result.PassRatio) + } +} diff --git a/tools/flakeguard/reports/io.go b/tools/flakeguard/reports/io.go new file mode 100644 index 000000000..e38952ff6 --- /dev/null +++ b/tools/flakeguard/reports/io.go @@ -0,0 +1,109 @@ +package reports + +import ( + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" +) + +// FileSystem interface and implementations + +type FileSystem interface { + MkdirAll(path string, perm os.FileMode) error + Create(name string) (io.WriteCloser, error) + WriteFile(filename string, data []byte, perm os.FileMode) error +} + +type OSFileSystem struct{} + +func (OSFileSystem) MkdirAll(path string, perm os.FileMode) error { + return os.MkdirAll(path, perm) +} + +func (OSFileSystem) Create(name string) (io.WriteCloser, error) { + return os.Create(name) +} + +func (OSFileSystem) WriteFile(filename string, data []byte, perm os.FileMode) error { + return os.WriteFile(filename, data, perm) +} + +// LoadReports reads JSON files from a directory and returns a slice of TestReport pointers +func LoadReports(resultsPath string) ([]*TestReport, error) { + var testReports []*TestReport + err := filepath.Walk(resultsPath, func(path string, info os.FileInfo, err error) error { + if err != nil { + return fmt.Errorf("error accessing path %s: %w", path, err) + } + if !info.IsDir() && filepath.Ext(path) == ".json" { + data, readErr := os.ReadFile(path) + if readErr != nil { + return fmt.Errorf("error reading file %s: %w", path, readErr) + } + var report TestReport + if jsonErr := json.Unmarshal(data, &report); jsonErr != nil { + return fmt.Errorf("error unmarshaling JSON from file %s: %w", path, jsonErr) + } + testReports = append(testReports, &report) + } + return nil + }) + if err != nil { + return nil, err + } + return testReports, nil +} + +// LoadReport reads a JSON file and returns a TestReport pointer +func LoadReport(filePath string) (*TestReport, error) { + data, err := os.ReadFile(filePath) + if err != nil { + return nil, fmt.Errorf("error reading file %s: %w", filePath, err) + } + var report TestReport + if err := json.Unmarshal(data, &report); err != nil { + return nil, fmt.Errorf("error unmarshaling JSON from file %s: %w", filePath, err) + } + return &report, nil +} + +func SaveSummaryAsJSON(fs FileSystem, path string, summary SummaryData) error { + file, err := fs.Create(path) + if err != nil { + return fmt.Errorf("error creating JSON summary file: %w", err) + } + defer file.Close() + + encoder := json.NewEncoder(file) + encoder.SetIndent("", " ") + if err := encoder.Encode(summary); err != nil { + return fmt.Errorf("error writing JSON summary: %w", err) + } + return nil +} + +func SaveReportNoLogs(fs FileSystem, filePath string, report TestReport) error { + var filteredResults []TestResult + for _, r := range report.Results { + r.Outputs = nil + r.PackageOutputs = nil + filteredResults = append(filteredResults, r) + } + report.Results = filteredResults + + data, err := json.MarshalIndent(report, "", " ") + if err != nil { + return fmt.Errorf("error marshaling results: %v", err) + } + return fs.WriteFile(filePath, data, 0644) +} + +func SaveReport(fs FileSystem, filePath string, report TestReport) error { + data, err := json.MarshalIndent(report, "", " ") + if err != nil { + return fmt.Errorf("error marshaling outputs: %v", err) + } + return fs.WriteFile(filePath, data, 0644) +} diff --git a/tools/flakeguard/reports/presentation.go b/tools/flakeguard/reports/presentation.go new file mode 100644 index 000000000..bd489a5ec --- /dev/null +++ b/tools/flakeguard/reports/presentation.go @@ -0,0 +1,201 @@ +package reports + +import ( + "bytes" + "fmt" + "io" + "strings" + + "golang.org/x/text/language" + "golang.org/x/text/message" +) + +func GenerateResultsTable( + results []TestResult, + expectedPassRatio float64, + markdown bool, +) [][]string { + p := message.NewPrinter(language.English) + sortTestResults(results) + + headers := []string{ + "Name", + "Pass Ratio", + "Panicked?", + "Timed Out?", + "Race?", + "Runs", + "Successes", + "Failures", + "Skips", + "Package", + "Package Panicked?", + "Avg Duration", + "Code Owners", + } + + if markdown { + for i, header := range headers { + headers[i] = fmt.Sprintf("**%s**", header) + } + } + + table := [][]string{headers} + for _, result := range results { + if result.PassRatio < expectedPassRatio { + row := []string{ + result.TestName, + fmt.Sprintf("%.2f%%", result.PassRatio*100), + fmt.Sprintf("%t", result.Panic), + fmt.Sprintf("%t", result.Timeout), + fmt.Sprintf("%t", result.Race), + p.Sprintf("%d", result.Runs), + p.Sprintf("%d", result.Successes), + p.Sprintf("%d", result.Failures), + p.Sprintf("%d", result.Skips), + result.TestPackage, + fmt.Sprintf("%t", result.PackagePanic), + avgDuration(result.Durations).String(), + } + + // Code owners + owners := "Unknown" + if len(result.CodeOwners) > 0 { + owners = strings.Join(result.CodeOwners, ", ") + } + row = append(row, owners) + + table = append(table, row) + } + } + return table +} + +func GenerateMarkdownSummary(w io.Writer, testReport *TestReport, maxPassRatio float64) { + settingsTable := buildSettingsTable(testReport, maxPassRatio) + fmt.Fprint(w, "# Flakeguard Summary\n\n") + printTable(w, settingsTable) + fmt.Fprintln(w) + + if len(testReport.Results) == 0 { + fmt.Fprintln(w, "## No tests ran :warning:") + return + } + + summary := GenerateSummaryData(testReport.Results, maxPassRatio) + if summary.AveragePassRatio < maxPassRatio { + fmt.Fprintln(w, "## Found Flaky Tests :x:") + } else { + fmt.Fprintln(w, "## No Flakes Found :white_check_mark:") + } + + RenderResults(w, testReport.Results, maxPassRatio, true) +} + +func buildSettingsTable(testReport *TestReport, maxPassRatio float64) [][]string { + rows := [][]string{ + {"**Setting**", "**Value**"}, + {"Project", testReport.GoProject}, + {"Max Pass Ratio", fmt.Sprintf("%.2f%%", maxPassRatio*100)}, + {"Test Run Count", fmt.Sprintf("%d", testReport.TestRunCount)}, + {"Race Detection", fmt.Sprintf("%t", testReport.RaceDetection)}, + } + if len(testReport.ExcludedTests) > 0 { + rows = append(rows, []string{"Excluded Tests", strings.Join(testReport.ExcludedTests, ", ")}) + } + if len(testReport.SelectedTests) > 0 { + rows = append(rows, []string{"Selected Tests", strings.Join(testReport.SelectedTests, ", ")}) + } + return rows +} + +func RenderResults( + w io.Writer, + tests []TestResult, + maxPassRatio float64, + markdown bool, +) { + resultsTable := GenerateResultsTable(tests, maxPassRatio, markdown) + summary := GenerateSummaryData(tests, maxPassRatio) + renderSummaryTable(w, summary, markdown) + renderTestResultsTable(w, resultsTable, markdown) +} + +func renderSummaryTable(w io.Writer, summary SummaryData, markdown bool) { + summaryData := [][]string{ + {"Category", "Total"}, + {"Tests", fmt.Sprintf("%d", summary.TotalTests)}, + {"Panicked Tests", fmt.Sprintf("%d", summary.PanickedTests)}, + {"Raced Tests", fmt.Sprintf("%d", summary.RacedTests)}, + {"Flaky Tests", fmt.Sprintf("%d", summary.FlakyTests)}, + {"Flaky Test Ratio", summary.FlakyTestRatio}, + {"Runs", fmt.Sprintf("%d", summary.TotalRuns)}, + {"Passes", fmt.Sprintf("%d", summary.PassedRuns)}, + {"Failures", fmt.Sprintf("%d", summary.FailedRuns)}, + {"Skips", fmt.Sprintf("%d", summary.SkippedRuns)}, + {"Pass Ratio", summary.PassRatio}, + } + if markdown { + for i, row := range summaryData { + if i == 0 { + summaryData[i] = []string{"**Category**", "**Total**"} + } else { + summaryData[i] = []string{fmt.Sprintf("**%s**", row[0]), row[1]} + } + } + } + printTable(w, summaryData) + fmt.Fprintln(w) +} + +func renderTestResultsTable(w io.Writer, table [][]string, markdown bool) { + if len(table) <= 1 { + fmt.Fprintln(w, "No tests found under the specified pass ratio threshold.") + return + } + printTable(w, table) +} + +func printTable(w io.Writer, table [][]string) { + colWidths := calculateColumnWidths(table) + separator := buildSeparator(colWidths) + + for i, row := range table { + printRow(w, row, colWidths) + if i == 0 { + fmt.Fprintln(w, separator) + } + } +} + +func calculateColumnWidths(table [][]string) []int { + colWidths := make([]int, len(table[0])) + for _, row := range table { + for i, cell := range row { + if len(cell) > colWidths[i] { + colWidths[i] = len(cell) + } + } + } + return colWidths +} + +func buildSeparator(colWidths []int) string { + var buffer bytes.Buffer + for _, width := range colWidths { + buffer.WriteString("|-") + buffer.WriteString(strings.Repeat("-", width)) + buffer.WriteString("-") + } + buffer.WriteString("|") + return buffer.String() +} + +func printRow(w io.Writer, row []string, colWidths []int) { + var buffer bytes.Buffer + for i, cell := range row { + buffer.WriteString(fmt.Sprintf("| %-*s ", colWidths[i], cell)) + } + buffer.WriteString("|") + fmt.Fprintln(w, buffer.String()) +} diff --git a/tools/flakeguard/reports/presentation_test.go b/tools/flakeguard/reports/presentation_test.go new file mode 100644 index 000000000..0597e863b --- /dev/null +++ b/tools/flakeguard/reports/presentation_test.go @@ -0,0 +1,267 @@ +package reports + +import ( + "bytes" + "reflect" + "strings" + "testing" + "time" +) + +// TestGenerateResultsTable tests the GenerateResultsTable function. +func TestGenerateResultsTable(t *testing.T) { + testResults := []TestResult{ + { + TestName: "TestA", + PassRatio: 0.8, + Panic: false, + Timeout: false, + Race: false, + Runs: 5, + Successes: 4, + Failures: 1, + Skips: 0, + TestPackage: "pkg1", + PackagePanic: false, + Durations: []time.Duration{time.Second, time.Second, time.Second, time.Second, time.Second}, + CodeOwners: []string{"owner1"}, + }, + { + TestName: "TestB", + PassRatio: 1.0, + Panic: false, + Timeout: false, + Race: false, + Runs: 3, + Successes: 3, + Failures: 0, + Skips: 0, + TestPackage: "pkg2", + PackagePanic: false, + Durations: []time.Duration{2 * time.Second, 2 * time.Second, 2 * time.Second}, + CodeOwners: []string{"owner2"}, + }, + } + + expectedPassRatio := 0.9 + markdown := false + + table := GenerateResultsTable(testResults, expectedPassRatio, markdown) + + // Only TestA should be included since its PassRatio is below 0.9 + if len(table) != 2 { + t.Fatalf("Expected table length 2 (headers + 1 row), got %d", len(table)) + } + + // Verify headers + headers := table[0] + expectedHeaders := []string{ + "Name", + "Pass Ratio", + "Panicked?", + "Timed Out?", + "Race?", + "Runs", + "Successes", + "Failures", + "Skips", + "Package", + "Package Panicked?", + "Avg Duration", + "Code Owners", + } + if !reflect.DeepEqual(headers, expectedHeaders) { + t.Errorf("Expected headers %+v, got %+v", expectedHeaders, headers) + } + + // Verify row data + row := table[1] + expectedRow := []string{ + "TestA", + "80.00%", + "false", + "false", + "false", + "5", + "4", + "1", + "0", + "pkg1", + "false", + "1s", + "owner1", + } + if !reflect.DeepEqual(row, expectedRow) { + t.Errorf("Expected row %+v, got %+v", expectedRow, row) + } +} + +// TestGenerateMarkdownSummary tests the GenerateMarkdownSummary function. +func TestGenerateMarkdownSummary(t *testing.T) { + testReport := &TestReport{ + GoProject: "ProjectX", + TestRunCount: 3, + RaceDetection: true, + Results: []TestResult{ + { + TestName: "TestA", + PassRatio: 0.8, + Runs: 5, + Successes: 4, + Failures: 1, + TestPackage: "pkg1", + CodeOwners: []string{"owner1"}, + Durations: []time.Duration{time.Second, time.Second, time.Second, time.Second, time.Second}, + }, + { + TestName: "TestB", + PassRatio: 1.0, + Runs: 3, + Successes: 3, + Failures: 0, + TestPackage: "pkg2", + CodeOwners: []string{"owner2"}, + Durations: []time.Duration{2 * time.Second, 2 * time.Second, 2 * time.Second}, + }, + }, + } + + var buffer bytes.Buffer + maxPassRatio := 0.9 + + GenerateMarkdownSummary(&buffer, testReport, maxPassRatio) + + output := buffer.String() + + // Check that the summary includes the expected headings + if !strings.Contains(output, "# Flakeguard Summary") { + t.Error("Expected markdown summary to contain '# Flakeguard Summary'") + } + if !strings.Contains(output, "## Found Flaky Tests :x:") { + t.Error("Expected markdown summary to contain '## Found Flaky Tests :x:'") + } + if !strings.Contains(output, "| **Name**") { + t.Error("Expected markdown table headers for test results") + } + if !strings.Contains(output, "| TestA ") { + t.Error("Expected markdown table to include TestA") + } + if strings.Contains(output, "| TestB ") { + t.Error("Did not expect markdown table to include TestB since its pass ratio is above the threshold") + } +} + +// TestPrintTable tests the printTable function. +func TestPrintTable(t *testing.T) { + table := [][]string{ + {"Header1", "Header2", "Header3"}, + {"Row1Col1", "Row1Col2", "Row1Col3"}, + {"Row2Col1", "Row2Col2", "Row2Col3"}, + } + + var buffer bytes.Buffer + printTable(&buffer, table) + + output := buffer.String() + + expected := `| Header1 | Header2 | Header3 | +|----------|----------|----------| +| Row1Col1 | Row1Col2 | Row1Col3 | +| Row2Col1 | Row2Col2 | Row2Col3 | +` + + if output != expected { + t.Errorf("Expected output:\n%s\nGot:\n%s", expected, output) + } +} + +func TestRenderResults(t *testing.T) { + testcases := []struct { + name string + testResults []TestResult + maxPassRatio float64 + expectedSummary SummaryData + expectedStringsContain []string + }{ + { + name: "single flaky test", + testResults: []TestResult{ + { + TestName: "Test1", + TestPackage: "package1", + PassRatio: 0.75, + Successes: 3, + Failures: 1, + Skipped: false, + Runs: 4, + Durations: []time.Duration{ + time.Millisecond * 1200, + time.Millisecond * 900, + time.Millisecond * 1100, + time.Second, + }, + }, + }, + maxPassRatio: 0.9, + expectedSummary: SummaryData{ + TotalTests: 1, + PanickedTests: 0, + RacedTests: 0, + FlakyTests: 1, + FlakyTestRatio: "100.00%", + TotalRuns: 4, + PassedRuns: 3, + FailedRuns: 1, + SkippedRuns: 0, + PassRatio: "75.00%", + MaxPassRatio: 0.9, + AveragePassRatio: 0.75, + }, + expectedStringsContain: []string{"Test1", "package1", "75.00%", "false", "1.05s", "4", "0"}, + }, + // Add more test cases as needed + } + + for _, tc := range testcases { + tc := tc // capture range variable + t.Run(tc.name, func(t *testing.T) { + var buf bytes.Buffer + + RenderResults(&buf, tc.testResults, tc.maxPassRatio, false) + output := buf.String() + + // Generate the summary data + summary := GenerateSummaryData(tc.testResults, tc.maxPassRatio) + + // Verify summary data + if summary.TotalTests != tc.expectedSummary.TotalTests { + t.Errorf("Expected TotalTests %v, got %v", tc.expectedSummary.TotalTests, summary.TotalTests) + } + if summary.TotalRuns != tc.expectedSummary.TotalRuns { + t.Errorf("Expected TotalRuns %v, got %v", tc.expectedSummary.TotalRuns, summary.TotalRuns) + } + if summary.PassedRuns != tc.expectedSummary.PassedRuns { + t.Errorf("Expected PassedRuns %v, got %v", tc.expectedSummary.PassedRuns, summary.PassedRuns) + } + if summary.FailedRuns != tc.expectedSummary.FailedRuns { + t.Errorf("Expected FailedRuns %v, got %v", tc.expectedSummary.FailedRuns, summary.FailedRuns) + } + if summary.FlakyTests != tc.expectedSummary.FlakyTests { + t.Errorf("Expected FlakyTests %v, got %v", tc.expectedSummary.FlakyTests, summary.FlakyTests) + } + if summary.PassRatio != tc.expectedSummary.PassRatio { + t.Errorf("Expected PassRatio %v, got %v", tc.expectedSummary.PassRatio, summary.PassRatio) + } + if summary.AveragePassRatio != tc.expectedSummary.AveragePassRatio { + t.Errorf("Expected AveragePassRatio %v, got %v", tc.expectedSummary.AveragePassRatio, summary.AveragePassRatio) + } + + // Verify output content + for _, expected := range tc.expectedStringsContain { + if !strings.Contains(output, expected) { + t.Errorf("Expected output to contain %q, but it did not", expected) + } + } + }) + } +} diff --git a/tools/flakeguard/reports/reports.go b/tools/flakeguard/reports/reports.go deleted file mode 100644 index 23d622875..000000000 --- a/tools/flakeguard/reports/reports.go +++ /dev/null @@ -1,536 +0,0 @@ -package reports - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "math" - "os" - "path/filepath" - "sort" - "strings" - "time" - - "golang.org/x/text/language" - "golang.org/x/text/message" -) - -// TestReport represents the report of all tests run through flakeguard. -type TestReport struct { - GoProject string - TestRunCount int - RaceDetection bool - ExcludedTests []string - SelectedTests []string - Results []TestResult -} - -// TestResult represents the result of a single test being run through flakeguard. -type TestResult struct { - TestName string - TestPackage string - PackagePanic bool // Indicates a package-level panic - Panic bool // Indicates a test-level panic - Timeout bool // Indicates if the test timed out - Race bool // Indicates if the test caused a data race - Skipped bool // Indicates if the test was skipped - PassRatio float64 // Pass ratio in decimal format like 0.5 - Runs int // Count of how many times the test was run - Failures int // Count of how many times the test failed - Successes int // Count of how many times the test passed - Skips int // Count of how many times the test was skipped - Outputs []string `json:"outputs,omitempty"` // Stores outputs for a test - Durations []time.Duration // Stores elapsed time for each run of the test - PackageOutputs []string `json:"package_outputs,omitempty"` // Stores package-level outputs - TestPath string // Path to the test file - CodeOwners []string // Owners of the test -} - -// FilterFailedTests returns a slice of TestResult where the pass ratio is below the specified threshold. -func FilterFailedTests(results []TestResult, maxPassRatio float64) []TestResult { - var failedTests []TestResult - for _, result := range results { - if !result.Skipped && result.PassRatio < maxPassRatio { - failedTests = append(failedTests, result) - } - } - return failedTests -} - -// FilterFlakyTests returns a slice of TestResult where the pass ratio is between the min pass ratio and the threshold. -func FilterFlakyTests(testResults []TestResult, maxPassRatio float64) []TestResult { - var flakyTests []TestResult - for _, test := range testResults { - if test.PassRatio < maxPassRatio && !test.Skipped { - flakyTests = append(flakyTests, test) - } - } - return flakyTests -} - -// FilterPassedTests returns a slice of TestResult where the tests passed and were not skipped. -func FilterPassedTests(results []TestResult, maxPassRatio float64) []TestResult { - var passedTests []TestResult - for _, result := range results { - if !result.Skipped && result.PassRatio >= maxPassRatio { - passedTests = append(passedTests, result) - } - } - return passedTests -} - -// FilterSkippedTests returns a slice of TestResult where the tests were skipped. -func FilterSkippedTests(results []TestResult) []TestResult { - var skippedTests []TestResult - for _, result := range results { - if result.Skipped { - skippedTests = append(skippedTests, result) - } - } - return skippedTests -} - -// Aggregate aggregates multiple test reports into a single report. -func Aggregate(reportsToAggregate ...*TestReport) (*TestReport, error) { - var ( - // Map to hold unique tests based on their TestName and TestPackage - // Key: TestName|TestPackage, Value: TestResult - testMap = make(map[string]TestResult) - fullReport = &TestReport{} - excludedTests = map[string]struct{}{} - selectedTests = map[string]struct{}{} - ) - - // Read all JSON files in the folder - for _, report := range reportsToAggregate { - if fullReport.GoProject == "" { - fullReport.GoProject = report.GoProject - } else if fullReport.GoProject != report.GoProject { - return nil, fmt.Errorf("reports with different Go projects found, expected %s, got %s", fullReport.GoProject, report.GoProject) - } - fullReport.TestRunCount += report.TestRunCount - fullReport.RaceDetection = report.RaceDetection && fullReport.RaceDetection - for _, test := range report.ExcludedTests { - excludedTests[test] = struct{}{} - } - for _, test := range report.SelectedTests { - selectedTests[test] = struct{}{} - } - // Process each test results - for _, result := range report.Results { - // Unique key for each test based on TestName and TestPackage - key := result.TestName + "|" + result.TestPackage - if existingResult, found := testMap[key]; found { - // Aggregate runs, durations, and outputs - existingResult.Runs = existingResult.Runs + result.Runs - existingResult.Durations = append(existingResult.Durations, result.Durations...) - existingResult.Outputs = append(existingResult.Outputs, result.Outputs...) - existingResult.PackageOutputs = append(existingResult.PackageOutputs, result.PackageOutputs...) - existingResult.Successes += result.Successes - existingResult.Failures += result.Failures - existingResult.Panic = existingResult.Panic || result.Panic - existingResult.Race = existingResult.Race || result.Race - existingResult.Skips += result.Skips - existingResult.PassRatio = 1.0 - if existingResult.Runs > 0 { - existingResult.PassRatio = float64(existingResult.Successes) / float64(existingResult.Runs) - } - - existingResult.Skipped = existingResult.Skipped && result.Skipped // Mark as skipped only if all occurrences are skipped - - // Update the map with the aggregated result - testMap[key] = existingResult - } else { - // Add new entry to the map - testMap[key] = result - } - } - } - // Aggregate - for test := range excludedTests { - fullReport.ExcludedTests = append(fullReport.ExcludedTests, test) - } - for test := range selectedTests { - fullReport.SelectedTests = append(fullReport.SelectedTests, test) - } - - var ( - aggregatedResults = make([]TestResult, 0, len(testMap)) - allSuccesses int - ) - for _, result := range testMap { - aggregatedResults = append(aggregatedResults, result) - allSuccesses += result.Successes - } - - sortTestResults(aggregatedResults) - fullReport.Results = aggregatedResults - - return fullReport, nil -} - -func TestResultsTable( - results []TestResult, - expectedPassRatio float64, - includeCodeOwners bool, - markdown bool, -) (resultsTable [][]string, runs, passes, fails, skips, panickedTests, racedTests, flakyTests int) { - p := message.NewPrinter(language.English) // For formatting numbers - sortTestResults(results) - - headers := []string{ - "Name", - "Pass Ratio", - "Panicked?", - "Timed Out?", - "Race?", - "Runs", - "Successes", - "Failures", - "Skips", - "Package", - "Package Panicked?", - "Avg Duration", - } - - if includeCodeOwners { - headers = append(headers, "Code Owners") - } - if markdown { - for i, header := range headers { - headers[i] = fmt.Sprintf("**%s**", header) - } - } - - resultsTable = [][]string{} - resultsTable = append(resultsTable, headers) - for _, result := range results { - if result.PassRatio < expectedPassRatio { - row := []string{ - result.TestName, - fmt.Sprintf("%.2f%%", result.PassRatio*100), - fmt.Sprintf("%t", result.Panic), - fmt.Sprintf("%t", result.Timeout), - fmt.Sprintf("%t", result.Race), - p.Sprintf("%d", result.Runs), - p.Sprintf("%d", result.Successes), - p.Sprintf("%d", result.Failures), - p.Sprintf("%d", result.Skips), - result.TestPackage, - fmt.Sprintf("%t", result.PackagePanic), - avgDuration(result.Durations).String(), - } - - if includeCodeOwners { - owners := "Unknown" - if len(result.CodeOwners) > 0 { - owners = strings.Join(result.CodeOwners, ", ") - } - row = append(row, owners) - } - - resultsTable = append(resultsTable, row) - } - - runs += result.Runs - passes += result.Successes - fails += result.Failures - skips += result.Skips - if result.Panic { - panickedTests++ - flakyTests++ - } else if result.Race { - racedTests++ - flakyTests++ - } else if result.PassRatio < expectedPassRatio { - flakyTests++ - } - } - return -} - -// PrintTests prints tests in a pretty format -func PrintResults( - w io.Writer, - tests []TestResult, - maxPassRatio float64, - markdown bool, - includeCodeOwners bool, // Include code owners in the output. Set to true if test results have code owners -) (runs, passes, fails, skips, panickedTests, racedTests, flakyTests int) { - var ( - resultsTable [][]string - passRatioStr string - flakeRatioStr string - p = message.NewPrinter(language.English) // For formatting numbers - ) - resultsTable, runs, passes, fails, skips, panickedTests, racedTests, flakyTests = TestResultsTable(tests, maxPassRatio, markdown, includeCodeOwners) - // Print out summary data - if runs == 0 || passes == runs { - passRatioStr = "100%" - flakeRatioStr = "0%" - } else { - passPercentage := float64(passes) / float64(runs) * 100 - truncatedPassPercentage := math.Floor(passPercentage*100) / 100 // Truncate to 2 decimal places - flakePercentage := float64(flakyTests) / float64(len(tests)) * 100 - truncatedFlakePercentage := math.Floor(flakePercentage*100) / 100 // Truncate to 2 decimal places - passRatioStr = fmt.Sprintf("%.2f%%", truncatedPassPercentage) - flakeRatioStr = fmt.Sprintf("%.2f%%", truncatedFlakePercentage) - } - summaryData := [][]string{ - {"Category", "Total"}, - {"Tests", p.Sprint(len(tests))}, - {"Panicked Tests", p.Sprint(panickedTests)}, - {"Raced Tests", p.Sprint(racedTests)}, - {"Flaky Tests", p.Sprint(flakyTests)}, - {"Flaky Test Ratio", flakeRatioStr}, - {"Runs", p.Sprint(runs)}, - {"Passes", p.Sprint(passes)}, - {"Failures", p.Sprint(fails)}, - {"Skips", p.Sprint(skips)}, - {"Pass Ratio", passRatioStr}, - } - if markdown { - for i, row := range summaryData { - if i == 0 { - summaryData[i] = []string{"**Category**", "**Total**"} - } else { - summaryData[i] = []string{fmt.Sprintf("**%s**", row[0]), row[1]} - } - } - } - - colWidths := make([]int, len(summaryData[0])) - - for _, row := range summaryData { - for i, cell := range row { - if len(cell) > colWidths[i] { - colWidths[i] = len(cell) - } - } - } - if len(resultsTable) <= 1 { - fmt.Fprintf(w, "No tests found under pass ratio of %.2f%%\n", maxPassRatio*100) - return - } - - printRow := func(cells []string) { - fmt.Fprintf(w, "| %-*s | %-*s |\n", colWidths[0], cells[0], colWidths[1], cells[1]) - } - printSeparator := func() { - fmt.Fprintf(w, "|-%s-|-%s-|\n", strings.Repeat("-", colWidths[0]), strings.Repeat("-", colWidths[1])) - } - printRow(summaryData[0]) - printSeparator() - for _, row := range summaryData[1:] { - printRow(row) - } - fmt.Fprintln(w) - - // Print out test data - resultsHeaders := resultsTable[0] - colWidths = make([]int, len(resultsHeaders)) - for i, header := range resultsHeaders { - colWidths[i] = len(header) - } - for rowNum := 1; rowNum < len(resultsTable); rowNum++ { - for i, cell := range resultsTable[rowNum] { - if len(cell) > colWidths[i] { - colWidths[i] = len(cell) - } - } - } - - printRow = func(cells []string) { - var buffer bytes.Buffer - for i, cell := range cells { - buffer.WriteString(fmt.Sprintf(" %-*s |", colWidths[i], cell)) - } - fmt.Fprintln(w, "|"+buffer.String()) - } - - printSeparator = func() { - var buffer bytes.Buffer - for _, width := range colWidths { - buffer.WriteString(" " + strings.Repeat("-", width) + " |") - } - fmt.Fprintln(w, "|"+buffer.String()) - } - - printRow(resultsHeaders) - printSeparator() - for rowNum := 1; rowNum < len(resultsTable); rowNum++ { - printRow(resultsTable[rowNum]) - } - return -} - -// MarkdownSummary builds a summary of test results in markdown format, handy for reporting in CI and Slack -func MarkdownSummary(w io.Writer, testReport *TestReport, maxPassRatio float64, includeCodeOwners bool) { - var ( - avgPassRatio = 1.0 - testsData = bytes.NewBuffer(nil) - tests = testReport.Results - ) - - rows := [][]string{ - {"**Setting**", "**Value**"}, - {"Project", testReport.GoProject}, - {"Max Pass Ratio", fmt.Sprintf("%.2f%%", maxPassRatio*100)}, - {"Test Run Count", fmt.Sprintf("%d", testReport.TestRunCount)}, - {"Race Detection", fmt.Sprintf("%t", testReport.RaceDetection)}, - } - if len(testReport.ExcludedTests) > 0 { - rows = append(rows, []string{"Excluded Tests", strings.Join(testReport.ExcludedTests, ", ")}) - } - if len(testReport.SelectedTests) > 0 { - rows = append(rows, []string{"Selected Tests", strings.Join(testReport.SelectedTests, ", ")}) - } - colWidths := make([]int, len(rows[0])) - - // Calculate column widths - for _, row := range rows { - for i, cell := range row { - if len(cell) > colWidths[i] { - colWidths[i] = len(cell) - } - } - } - - printRow := func(cells []string) { - fmt.Fprintf(w, "| %-*s | %-*s |\n", colWidths[0], cells[0], colWidths[1], cells[1]) - } - printSeparator := func() { - fmt.Fprintf(w, "|-%s-|-%s-|\n", strings.Repeat("-", colWidths[0]), strings.Repeat("-", colWidths[1])) - } - fmt.Fprint(w, "# Flakeguard Summary\n\n") - // Print settings data - printRow(rows[0]) - printSeparator() - for _, row := range rows[1:] { - printRow(row) - } - fmt.Fprintln(w) - - if len(tests) == 0 { - fmt.Fprintln(w, "## No tests ran :warning:") - return - } - - allRuns, passes, _, _, _, _, _ := PrintResults(testsData, tests, maxPassRatio, true, includeCodeOwners) - if allRuns > 0 { - avgPassRatio = float64(passes) / float64(allRuns) - } - if avgPassRatio < maxPassRatio { - fmt.Fprint(w, "## Found Flaky Tests :x:\n\n") - } else { - fmt.Fprint(w, "## No Flakes Found :white_check_mark:\n\n") - } - fmt.Fprint(w, testsData.String()) -} - -// Helper function to save filtered results and logs to specified paths -func SaveFilteredResultsAndLogs(outputResultsPath, outputLogsPath string, report *TestReport, includeCodeOwners bool) error { - if outputResultsPath != "" { - if err := os.MkdirAll(filepath.Dir(outputResultsPath), 0755); err != nil { //nolint:gosec - return fmt.Errorf("error creating output directory: %w", err) - } - jsonFileName := strings.TrimSuffix(outputResultsPath, filepath.Ext(outputResultsPath)) + ".json" - mdFileName := strings.TrimSuffix(outputResultsPath, filepath.Ext(outputResultsPath)) + ".md" - // no pointer to avoid destroying the original report - if err := saveReportNoLogs(jsonFileName, *report); err != nil { - return fmt.Errorf("error writing filtered results to file: %w", err) - } - summaryFile, err := os.Create(mdFileName) - if err != nil { - return fmt.Errorf("error creating markdown file: %w", err) - } - defer summaryFile.Close() - MarkdownSummary(summaryFile, report, 1.0, includeCodeOwners) - fmt.Printf("Test results saved to %s and summary to %s\n", jsonFileName, mdFileName) - } else { - fmt.Println("No failed tests found based on the specified threshold and min pass ratio.") - } - - if outputLogsPath != "" { - if err := os.MkdirAll(filepath.Dir(outputLogsPath), 0755); err != nil { //nolint:gosec - return fmt.Errorf("error creating output directory: %w", err) - } - // no pointer to avoid destroying the original report - if err := saveReport(outputLogsPath, *report); err != nil { - return fmt.Errorf("error writing filtered logs to file: %w", err) - } - fmt.Printf("Test logs saved to %s\n", outputLogsPath) - } - return nil -} - -// saveReportNoLogs saves the test results to JSON without logs -// as outputs can take up a lot of space and are not always needed. -// Outputs can be saved separately using saveTestOutputs -func saveReportNoLogs(filePath string, report TestReport) error { - var filteredResults []TestResult - for _, r := range report.Results { - filteredResult := r - filteredResult.Outputs = nil - filteredResult.PackageOutputs = nil - filteredResults = append(filteredResults, filteredResult) - } - report.Results = filteredResults - - data, err := json.MarshalIndent(report, "", " ") - if err != nil { - return fmt.Errorf("error marshaling results: %v", err) - } - return os.WriteFile(filePath, data, 0644) //nolint:gosec -} - -// saveReport saves the test results to JSON -func saveReport(filePath string, report TestReport) error { - data, err := json.MarshalIndent(report, "", " ") - if err != nil { - return fmt.Errorf("error marshaling outputs: %v", err) - } - return os.WriteFile(filePath, data, 0644) //nolint:gosec -} - -// avgDuration calculates the average duration from a slice of time.Duration -func avgDuration(durations []time.Duration) time.Duration { - if len(durations) == 0 { - return 0 - } - var total time.Duration - for _, d := range durations { - total += d - } - return total / time.Duration(len(durations)) -} - -// sortTestResults sorts results by TestPackage, TestName, and PassRatio for consistent comparison and pretty printing -func sortTestResults(results []TestResult) { - sort.Slice(results, func(i, j int) bool { - // Compare TestPackage first - if results[i].TestPackage != results[j].TestPackage { - return results[i].TestPackage < results[j].TestPackage - } - - // Split TestName into components for hierarchical comparison - iParts := strings.Split(results[i].TestName, "/") - jParts := strings.Split(results[j].TestName, "/") - - // Compare each part of the TestName hierarchically - for k := 0; k < len(iParts) && k < len(jParts); k++ { - if iParts[k] != jParts[k] { - return iParts[k] < jParts[k] - } - } - - // If all compared parts are equal, the shorter name (parent) comes first - if len(iParts) != len(jParts) { - return len(iParts) < len(jParts) - } - - // Finally, compare PassRatio if everything else is equal - return results[i].PassRatio < results[j].PassRatio - }) -} diff --git a/tools/flakeguard/reports/reports_test.go b/tools/flakeguard/reports/reports_test.go deleted file mode 100644 index 6c0695628..000000000 --- a/tools/flakeguard/reports/reports_test.go +++ /dev/null @@ -1,384 +0,0 @@ -package reports - -import ( - "bytes" - "os" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestFilterFailedTests(t *testing.T) { - results := []TestResult{ - {TestName: "Test1", PassRatio: 0.5, Skipped: false}, - {TestName: "Test2", PassRatio: 0.9, Skipped: false}, - {TestName: "Test3", PassRatio: 0.3, Skipped: false}, - {TestName: "Test4", PassRatio: 0.8, Skipped: true}, // Skipped test - } - - failedTests := FilterFailedTests(results, 0.6) - expected := []string{"Test1", "Test3"} - - require.Equal(t, len(expected), len(failedTests), "not as many failed tests as expected") - - for i, test := range failedTests { - assert.Equal(t, expected[i], test.TestName, "wrong test name") - } -} - -func TestFilterPassedTests(t *testing.T) { - results := []TestResult{ - {TestName: "Test1", PassRatio: 0.7, Skipped: false}, - {TestName: "Test2", PassRatio: 1.0, Skipped: false}, - {TestName: "Test3", PassRatio: 0.3, Skipped: false}, - {TestName: "Test4", PassRatio: 0.8, Skipped: true}, // Skipped test - } - - passedTests := FilterPassedTests(results, 0.6) - expected := []string{"Test1", "Test2"} - - require.Equal(t, len(expected), len(passedTests), "not as many passed tests as expected") - - for i, test := range passedTests { - assert.Equal(t, expected[i], test.TestName, "wrong test name") - } -} - -func TestFilterSkippedTests(t *testing.T) { - results := []TestResult{ - {TestName: "Test1", PassRatio: 0.7, Skipped: false}, - {TestName: "Test2", PassRatio: 1.0, Skipped: true}, - {TestName: "Test3", PassRatio: 0.3, Skipped: false}, - {TestName: "Test4", PassRatio: 0.8, Skipped: true}, - } - - skippedTests := FilterSkippedTests(results) - expected := []string{"Test2", "Test4"} - - require.Equal(t, len(expected), len(skippedTests), "not as many skipped tests as expected") - - for i, test := range skippedTests { - assert.Equal(t, expected[i], test.TestName, "wrong test name") - } -} - -func TestPrintTests(t *testing.T) { - testcases := []struct { - name string - testResults []TestResult - maxPassRatio float64 - expectedRuns int - expectedPasses int - expectedFails int - expectedSkippedTests int - expectedPanickedTests int - expectedRacedTests int - expectedFlakyTests int - expectedStringsContain []string - }{ - { - name: "single flaky test", - testResults: []TestResult{ - { - TestName: "Test1", - TestPackage: "package1", - PassRatio: 0.75, - Successes: 3, - Failures: 1, - Skipped: false, - Runs: 4, - Durations: []time.Duration{time.Millisecond * 1200, time.Millisecond * 900, time.Millisecond * 1100, time.Second}, - }, - }, - maxPassRatio: 1.0, - expectedRuns: 4, - expectedPasses: 3, - expectedFails: 1, - expectedSkippedTests: 0, - expectedPanickedTests: 0, - expectedRacedTests: 0, - expectedFlakyTests: 1, - expectedStringsContain: []string{"Test1", "package1", "75.00%", "false", "1.05s", "4", "0"}, - }, - { - name: "multiple passing tests", - testResults: []TestResult{ - { - TestName: "Test1", - TestPackage: "package1", - PassRatio: 1.0, - Skipped: false, - Successes: 4, - Runs: 4, - Durations: []time.Duration{time.Millisecond * 1200, time.Millisecond * 900, time.Millisecond * 1100, time.Second}, - }, - { - TestName: "Test2", - TestPackage: "package1", - PassRatio: 1.0, - Skipped: false, - Successes: 4, - Runs: 4, - Durations: []time.Duration{time.Millisecond * 1200, time.Millisecond * 900, time.Millisecond * 1100, time.Second}, - }, - }, - maxPassRatio: 1.0, - expectedRuns: 8, - expectedPasses: 8, - expectedFails: 0, - expectedSkippedTests: 0, - expectedPanickedTests: 0, - expectedRacedTests: 0, - expectedFlakyTests: 0, - expectedStringsContain: []string{}, - }, - } - - for _, testCase := range testcases { - tc := testCase - t.Run(tc.name, func(t *testing.T) { - t.Parallel() - var buf bytes.Buffer - - runs, passes, fails, skips, panickedTests, racedTests, flakyTests := PrintResults(&buf, tc.testResults, tc.maxPassRatio, false, false) - assert.Equal(t, tc.expectedRuns, runs, "wrong number of runs") - assert.Equal(t, tc.expectedPasses, passes, "wrong number of passes") - assert.Equal(t, tc.expectedFails, fails, "wrong number of failures") - assert.Equal(t, tc.expectedSkippedTests, skips, "wrong number of skips") - assert.Equal(t, tc.expectedPanickedTests, panickedTests, "wrong number of panicked tests") - assert.Equal(t, tc.expectedRacedTests, racedTests, "wrong number of raced tests") - assert.Equal(t, tc.expectedFlakyTests, flakyTests, "wrong number of flaky tests") - - // Get the output as a string - output := buf.String() - for _, expected := range tc.expectedStringsContain { - assert.Contains(t, output, expected, "output does not contain expected string") - } - }) - } - -} - -func TestAggregateTestResults(t *testing.T) { - // Create a temporary directory for test JSON files - tempDir, err := os.MkdirTemp("", "aggregatetestresults") - require.NoError(t, err) - t.Cleanup(func() { - os.RemoveAll(tempDir) - }) - - // Test cases - testCases := []struct { - description string - inputReports []*TestReport - expectedReport *TestReport - }{ - { - description: "Unique test results", - inputReports: []*TestReport{ - { - TestRunCount: 2, // 2 runs of A and 4 runs of B will add up to 6 total runs. Not quite ideal. - GoProject: "project1", - RaceDetection: false, - Results: []TestResult{ - { - TestName: "TestA", - TestPackage: "pkgA", - PassRatio: 1, - Skipped: false, - Runs: 2, - Successes: 2, - Durations: []time.Duration{time.Millisecond * 10, time.Millisecond * 20}, - Outputs: []string{"Output1", "Output2"}, - }, - }, - }, - { - TestRunCount: 4, - GoProject: "project1", - RaceDetection: false, - Results: []TestResult{ - { - TestName: "TestB", - TestPackage: "pkgB", - PassRatio: 0.5, - Skipped: false, - Runs: 4, - Successes: 2, - Failures: 2, - Durations: []time.Duration{time.Millisecond * 50, time.Millisecond * 50, time.Millisecond * 50, time.Millisecond * 50}, - Outputs: []string{"Output3", "Output4", "Output5", "Output6"}, - }, - }, - }, - }, - expectedReport: &TestReport{ - TestRunCount: 6, - GoProject: "project1", - RaceDetection: false, - Results: []TestResult{ - { - TestName: "TestA", - TestPackage: "pkgA", - PassRatio: 1, - Skipped: false, - Runs: 2, - Successes: 2, - Durations: []time.Duration{time.Millisecond * 10, time.Millisecond * 20}, - Outputs: []string{"Output1", "Output2"}, - }, - { - TestName: "TestB", - TestPackage: "pkgB", - PassRatio: 0.5, - Skipped: false, - Runs: 4, - Successes: 2, - Failures: 2, - Durations: []time.Duration{time.Millisecond * 50, time.Millisecond * 50, time.Millisecond * 50, time.Millisecond * 50}, - Outputs: []string{"Output3", "Output4", "Output5", "Output6"}, - }, - }, - }, - }, - { - description: "Duplicate test results with aggregation", - inputReports: []*TestReport{ - { - TestRunCount: 2, - GoProject: "project2", - RaceDetection: false, - Results: []TestResult{ - { - TestName: "TestC", - TestPackage: "pkgC", - PassRatio: 1, - Skipped: false, - Runs: 2, - Successes: 2, - Durations: []time.Duration{time.Millisecond * 100, time.Millisecond * 100}, - Outputs: []string{"Output7", "Output8"}, - }, - }, - }, - { - TestRunCount: 2, - GoProject: "project2", - RaceDetection: false, - Results: []TestResult{ - { - TestName: "TestC", - TestPackage: "pkgC", - PassRatio: 1, - Skipped: false, - Runs: 0, - Skips: 2, - Durations: []time.Duration{time.Millisecond * 200, time.Millisecond * 200}, - Outputs: []string{"Output9", "Output10"}, - }, - }, - }, - }, - expectedReport: &TestReport{ - TestRunCount: 4, - GoProject: "project2", - RaceDetection: false, - Results: []TestResult{ - { - TestName: "TestC", - TestPackage: "pkgC", - PassRatio: 1.0, - Skipped: false, - Runs: 2, - Successes: 2, - Durations: []time.Duration{time.Millisecond * 100, time.Millisecond * 100, time.Millisecond * 200, time.Millisecond * 200}, - Outputs: []string{"Output7", "Output8", "Output9", "Output10"}, - }, - }, - }, - }, - { - description: "All Skipped test results", - inputReports: []*TestReport{ - { - TestRunCount: 3, - GoProject: "project3", - RaceDetection: false, - Results: []TestResult{ - { - TestName: "TestD", - TestPackage: "pkgD", - PassRatio: 1, - Skipped: true, - Runs: 0, - Durations: []time.Duration{time.Millisecond * 100, time.Millisecond * 200, time.Millisecond * 100}, - Outputs: []string{"Output11", "Output12", "Output13"}, - }, - }, - }, - { - TestRunCount: 2, - GoProject: "project3", - RaceDetection: false, - Results: []TestResult{ - { - TestName: "TestD", - TestPackage: "pkgD", - PassRatio: 1, - Skipped: true, - Runs: 0, - Durations: []time.Duration{time.Millisecond * 150, time.Millisecond * 150}, - Outputs: []string{"Output14", "Output15"}, - }, - }, - }, - }, - expectedReport: &TestReport{ - TestRunCount: 5, - GoProject: "project3", - RaceDetection: false, - Results: []TestResult{ - { - TestName: "TestD", - TestPackage: "pkgD", - PassRatio: 1, - Skipped: true, // Should remain true as all runs are skipped - Runs: 0, - Durations: []time.Duration{time.Millisecond * 100, time.Millisecond * 200, time.Millisecond * 100, time.Millisecond * 150, time.Millisecond * 150}, - Outputs: []string{"Output11", "Output12", "Output13", "Output14", "Output15"}, - }, - }, - }, - }, - } - - for _, tc := range testCases { - t.Run(tc.description, func(t *testing.T) { - finalReport, err := Aggregate(tc.inputReports...) - if err != nil { - t.Fatalf("AggregateTestResults failed: %v", err) - } - - sortTestResults(finalReport.Results) - sortTestResults(tc.expectedReport.Results) - - assert.Equal(t, tc.expectedReport.TestRunCount, finalReport.TestRunCount, "TestRunCount mismatch") - assert.Equal(t, tc.expectedReport.GoProject, finalReport.GoProject, "GoProject mismatch") - assert.Equal(t, tc.expectedReport.RaceDetection, finalReport.RaceDetection, "RaceDetection mismatch") - - require.Equal(t, len(tc.expectedReport.Results), len(finalReport.Results), "number of results mismatch") - for i, expected := range tc.expectedReport.Results { - got := finalReport.Results[i] - assert.Equal(t, expected.TestName, got.TestName, "TestName mismatch") - assert.Equal(t, expected.TestPackage, got.TestPackage, "TestPackage mismatch") - assert.Equal(t, expected.Runs, got.Runs, "Runs mismatch") - assert.Equal(t, expected.Skipped, got.Skipped, "Skipped mismatch") - assert.Equal(t, expected.PassRatio, got.PassRatio, "PassRatio mismatch") - assert.Equal(t, len(expected.Durations), len(got.Durations), "Durations mismatch") - assert.Equal(t, len(expected.Outputs), len(got.Outputs), "Outputs mismatch") - } - }) - } -} From 5edc8bb87b1b9697951910eeaf72905c8b7b27f2 Mon Sep 17 00:00:00 2001 From: lukaszcl <120112546+lukaszcl@users.noreply.github.com> Date: Thu, 5 Dec 2024 16:34:23 +0100 Subject: [PATCH 02/21] Use single report cmd --- tools/flakeguard/cmd/generate_report.go | 77 -------- tools/flakeguard/cmd/report.go | 238 ++++++++++++++++++++++++ tools/flakeguard/go.mod | 6 + tools/flakeguard/go.sum | 13 ++ tools/flakeguard/main.go | 3 +- 5 files changed, 258 insertions(+), 79 deletions(-) delete mode 100644 tools/flakeguard/cmd/generate_report.go create mode 100644 tools/flakeguard/cmd/report.go diff --git a/tools/flakeguard/cmd/generate_report.go b/tools/flakeguard/cmd/generate_report.go deleted file mode 100644 index 6dccda2c5..000000000 --- a/tools/flakeguard/cmd/generate_report.go +++ /dev/null @@ -1,77 +0,0 @@ -package cmd - -import ( - "fmt" - "strings" - - "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/reports" - "github.com/spf13/cobra" -) - -var ( - reportInputPath string - reportOutputPath string - reportFormat string - reportCodeOwnersPath string - reportProjectPath string -) - -var GenerateReportCmd = &cobra.Command{ - Use: "generate-report", - Short: "Generate reports from test results", - RunE: func(cmd *cobra.Command, args []string) error { - // Load the test results - testReport, err := reports.LoadReport(reportInputPath) - if err != nil { - return fmt.Errorf("error loading test report: %w", err) - } - - // Generate the report - if err := generateReport(testReport, reportFormat, reportOutputPath); err != nil { - return fmt.Errorf("error generating report: %w", err) - } - - fmt.Printf("Report generated at %s\n", reportOutputPath) - return nil - }, -} - -func init() { - GenerateReportCmd.Flags().StringVarP(&reportInputPath, "aggregated-report-path", "i", "", "Path to the aggregated test results file (required)") - GenerateReportCmd.Flags().StringVarP(&reportOutputPath, "output-path", "o", "./report", "Path to output the generated report (without extension)") - GenerateReportCmd.Flags().StringVarP(&reportFormat, "format", "f", "markdown", "Format of the report (markdown, json)") - GenerateReportCmd.MarkFlagRequired("aggregated-report-path") -} - -func generateReport(report *reports.TestReport, format, outputPath string) error { - fs := reports.OSFileSystem{} - switch strings.ToLower(format) { - case "markdown": - mdFileName := outputPath + ".md" - mdFile, err := fs.Create(mdFileName) - if err != nil { - return fmt.Errorf("error creating markdown file: %w", err) - } - defer mdFile.Close() - reports.GenerateMarkdownSummary(mdFile, report, 1.0) - fmt.Printf("Markdown report saved to %s\n", mdFileName) - case "json": - jsonFileName := outputPath + ".json" - if err := reports.SaveReportNoLogs(fs, jsonFileName, *report); err != nil { - return fmt.Errorf("error saving JSON report: %w", err) - } - fmt.Printf("JSON report saved to %s\n", jsonFileName) - default: - return fmt.Errorf("unsupported report format: %s", format) - } - - // Generate summary JSON - summaryData := reports.GenerateSummaryData(report.Results, 1.0) - summaryFileName := outputPath + "-summary.json" - if err := reports.SaveSummaryAsJSON(fs, summaryFileName, summaryData); err != nil { - return fmt.Errorf("error saving summary JSON: %w", err) - } - fmt.Printf("Summary JSON saved to %s\n", summaryFileName) - - return nil -} diff --git a/tools/flakeguard/cmd/report.go b/tools/flakeguard/cmd/report.go new file mode 100644 index 000000000..f19d83100 --- /dev/null +++ b/tools/flakeguard/cmd/report.go @@ -0,0 +1,238 @@ +package cmd + +import ( + "fmt" + "path/filepath" + "strings" + "time" + + "github.com/briandowns/spinner" + "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/reports" + "github.com/spf13/cobra" +) + +var ReportCmd = &cobra.Command{ + Use: "report", + Short: "Aggregate test results and generate reports", + RunE: func(cmd *cobra.Command, args []string) error { + fs := reports.OSFileSystem{} + + // Get flag values directly using cmd.Flags().Get* methods + reportResultsPath, _ := cmd.Flags().GetString("results-path") + reportOutputPath, _ := cmd.Flags().GetString("output-path") + reportFormats, _ := cmd.Flags().GetString("format") + reportMaxPassRatio, _ := cmd.Flags().GetFloat64("max-pass-ratio") + reportCodeOwnersPath, _ := cmd.Flags().GetString("codeowners-path") + reportRepoPath, _ := cmd.Flags().GetString("repo-path") + + // Split the formats into a slice + formats := strings.Split(reportFormats, ",") + + // Start spinner for loading test reports + s := spinner.New(spinner.CharSets[11], 100*time.Millisecond) + s.Suffix = " Loading test reports..." + s.Start() + + // Load test reports from JSON files + testReports, err := reports.LoadReports(reportResultsPath) + if err != nil { + s.Stop() + return fmt.Errorf("error loading test reports: %w", err) + } + s.Stop() + fmt.Println("Test reports loaded successfully.") + + // Start spinner for aggregating reports + s = spinner.New(spinner.CharSets[11], 100*time.Millisecond) + s.Suffix = " Aggregating test reports..." + s.Start() + + // Aggregate the reports + aggregatedReport, err := reports.Aggregate(testReports...) + if err != nil { + s.Stop() + return fmt.Errorf("error aggregating test reports: %w", err) + } + s.Stop() + fmt.Println("Test reports aggregated successfully.") + + // Start spinner for mapping test results to paths + s = spinner.New(spinner.CharSets[11], 100*time.Millisecond) + s.Suffix = " Mapping test results to paths..." + s.Start() + + // Map test results to test paths + err = reports.MapTestResultsToPaths(aggregatedReport, reportRepoPath) + if err != nil { + s.Stop() + return fmt.Errorf("error mapping test results to paths: %w", err) + } + s.Stop() + fmt.Println("Test results mapped to paths successfully.") + + // Map test results to code owners if codeOwnersPath is provided + if reportCodeOwnersPath != "" { + s = spinner.New(spinner.CharSets[11], 100*time.Millisecond) + s.Suffix = " Mapping test results to code owners..." + s.Start() + + err = reports.MapTestResultsToOwners(aggregatedReport, reportCodeOwnersPath) + if err != nil { + s.Stop() + return fmt.Errorf("error mapping test results to code owners: %w", err) + } + s.Stop() + fmt.Println("Test results mapped to code owners successfully.") + } + + // Exclude outputs and package outputs from the aggregated report of all tests + for i := range aggregatedReport.Results { + aggregatedReport.Results[i].Outputs = nil + aggregatedReport.Results[i].PackageOutputs = nil + } + + // Create output directory if it doesn't exist + outputDir := reportOutputPath + if err := fs.MkdirAll(outputDir, 0755); err != nil { + return fmt.Errorf("error creating output directory: %w", err) + } + + // Save the aggregated report (all tests) + allTestsReportPath := filepath.Join(outputDir, "all-tests-report.json") + if err := reports.SaveReport(fs, allTestsReportPath, *aggregatedReport); err != nil { + return fmt.Errorf("error saving all tests report: %w", err) + } + fmt.Printf("All tests report saved to %s\n", allTestsReportPath) + + // Generate and save the reports (all tests) in specified formats + for _, format := range formats { + s = spinner.New(spinner.CharSets[11], 100*time.Millisecond) + s.Suffix = fmt.Sprintf(" Generating all tests report in format %s...", format) + s.Start() + + if err := generateReport(aggregatedReport, format, filepath.Join(outputDir, "all-tests")); err != nil { + s.Stop() + return fmt.Errorf("error generating all tests report in format %s: %w", format, err) + } + s.Stop() + fmt.Printf("All tests report in format %s generated successfully.\n", format) + } + + // Filter failed tests (PassRatio < maxPassRatio and not skipped) + s = spinner.New(spinner.CharSets[11], 100*time.Millisecond) + s.Suffix = " Filtering failed tests..." + s.Start() + + failedTests := reports.FilterTests(aggregatedReport.Results, func(tr reports.TestResult) bool { + return !tr.Skipped && tr.PassRatio < reportMaxPassRatio + }) + s.Stop() + fmt.Println("Failed tests filtered successfully.") + + // For failed tests, include outputs and package outputs + for i := range failedTests { + // Retrieve outputs and package outputs from original reports + failedTests[i].Outputs = getOriginalOutputs(testReports, failedTests[i].TestName, failedTests[i].TestPackage) + failedTests[i].PackageOutputs = getOriginalPackageOutputs(testReports, failedTests[i].TestName, failedTests[i].TestPackage) + } + + // Create a new report for failed tests + failedReport := &reports.TestReport{ + GoProject: aggregatedReport.GoProject, + TestRunCount: aggregatedReport.TestRunCount, + RaceDetection: aggregatedReport.RaceDetection, + ExcludedTests: aggregatedReport.ExcludedTests, + SelectedTests: aggregatedReport.SelectedTests, + Results: failedTests, + } + + // Save the failed tests report + failedTestsReportPath := filepath.Join(outputDir, "failed-tests-report.json") + if err := reports.SaveReport(fs, failedTestsReportPath, *failedReport); err != nil { + return fmt.Errorf("error saving failed tests report: %w", err) + } + fmt.Printf("Failed tests report saved to %s\n", failedTestsReportPath) + + // Generate and save the reports for failed tests in specified formats + for _, format := range formats { + s = spinner.New(spinner.CharSets[11], 100*time.Millisecond) + s.Suffix = fmt.Sprintf(" Generating failed tests report in format %s...", format) + s.Start() + + if err := generateReport(failedReport, format, filepath.Join(outputDir, "failed-tests")); err != nil { + s.Stop() + return fmt.Errorf("error generating failed tests report in format %s: %w", format, err) + } + s.Stop() + fmt.Printf("Failed tests report in format %s generated successfully.\n", format) + } + + fmt.Printf("Reports generated at: %s\n", reportOutputPath) + + return nil + }, +} + +func init() { + ReportCmd.Flags().StringP("results-path", "p", "", "Path to the folder containing JSON test result files (required)") + ReportCmd.Flags().StringP("output-path", "o", "./report", "Path to output the generated report files") + ReportCmd.Flags().StringP("format", "f", "markdown,json", "Comma-separated list of report formats (markdown,json)") + ReportCmd.Flags().Float64P("max-pass-ratio", "", 1.0, "The maximum pass ratio threshold for a test to be considered flaky") + ReportCmd.Flags().StringP("codeowners-path", "", "", "Path to the CODEOWNERS file") + ReportCmd.Flags().StringP("repo-path", "", ".", "The path to the root of the repository/project") + ReportCmd.MarkFlagRequired("results-path") +} + +func generateReport(report *reports.TestReport, format, outputPath string) error { + fs := reports.OSFileSystem{} + format = strings.ToLower(strings.TrimSpace(format)) + switch format { + case "markdown": + mdFileName := outputPath + ".md" + mdFile, err := fs.Create(mdFileName) + if err != nil { + return fmt.Errorf("error creating markdown file: %w", err) + } + defer mdFile.Close() + reports.GenerateMarkdownSummary(mdFile, report, 1.0) + case "json": + jsonFileName := outputPath + ".json" + if err := reports.SaveReportNoLogs(fs, jsonFileName, *report); err != nil { + return fmt.Errorf("error saving JSON report: %w", err) + } + default: + return fmt.Errorf("unsupported report format: %s", format) + } + + // Generate summary JSON + summaryData := reports.GenerateSummaryData(report.Results, 1.0) + summaryFileName := outputPath + "-summary.json" + if err := reports.SaveSummaryAsJSON(fs, summaryFileName, summaryData); err != nil { + return fmt.Errorf("error saving summary JSON: %w", err) + } + + return nil +} + +// Helper functions to retrieve original outputs and package outputs +func getOriginalOutputs(reports []*reports.TestReport, testName, testPackage string) []string { + for _, report := range reports { + for _, result := range report.Results { + if result.TestName == testName && result.TestPackage == testPackage { + return result.Outputs + } + } + } + return nil +} + +func getOriginalPackageOutputs(reports []*reports.TestReport, testName, testPackage string) []string { + for _, report := range reports { + for _, result := range report.Results { + if result.TestName == testName && result.TestPackage == testPackage { + return result.PackageOutputs + } + } + } + return nil +} diff --git a/tools/flakeguard/go.mod b/tools/flakeguard/go.mod index 2f118e1ee..b32f49f09 100644 --- a/tools/flakeguard/go.mod +++ b/tools/flakeguard/go.mod @@ -8,8 +8,14 @@ require ( ) require ( + github.com/briandowns/spinner v1.23.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect + github.com/fatih/color v1.7.0 // indirect + github.com/mattn/go-colorable v0.1.2 // indirect + github.com/mattn/go-isatty v0.0.8 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect + golang.org/x/sys v0.0.0-20220412211240-33da011f77ad // indirect + golang.org/x/term v0.1.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/tools/flakeguard/go.sum b/tools/flakeguard/go.sum index 464b7d866..958472b10 100644 --- a/tools/flakeguard/go.sum +++ b/tools/flakeguard/go.sum @@ -1,8 +1,16 @@ +github.com/briandowns/spinner v1.23.1 h1:t5fDPmScwUjozhDj4FA46p5acZWIPXYE30qW2Ptu650= +github.com/briandowns/spinner v1.23.1/go.mod h1:LaZeM4wm2Ywy6vO571mvhQNRcWfRUnXOs0RcKV0wYKM= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= @@ -12,6 +20,11 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad h1:ntjMns5wyP/fN65tdBD4g8J5w8n015+iIIs9rtjXkY0= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.1.0 h1:g6Z6vPFA9dYBAF7DWcH6sCcOntplXsDKcliusYijMlw= +golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= diff --git a/tools/flakeguard/main.go b/tools/flakeguard/main.go index ad22e6f7a..a22b34ff4 100644 --- a/tools/flakeguard/main.go +++ b/tools/flakeguard/main.go @@ -28,9 +28,8 @@ func init() { rootCmd.AddCommand(cmd.FindTestsCmd) rootCmd.AddCommand(cmd.RunTestsCmd) - rootCmd.AddCommand(cmd.AggregateResultsCmd) + rootCmd.AddCommand(cmd.ReportCmd) rootCmd.AddCommand(cmd.CheckTestOwnersCmd) - rootCmd.AddCommand(cmd.GenerateReportCmd) } func main() { From 12b3779a43ee58ad0dbc6ef8f21f82534ca02ae3 Mon Sep 17 00:00:00 2001 From: lukaszcl <120112546+lukaszcl@users.noreply.github.com> Date: Thu, 5 Dec 2024 16:55:36 +0100 Subject: [PATCH 03/21] Fix --- tools/flakeguard/reports/data.go | 14 ++- tools/flakeguard/reports/data_test.go | 6 +- tools/flakeguard/reports/presentation.go | 22 ++++- tools/flakeguard/reports/presentation_test.go | 95 +++++++------------ 4 files changed, 64 insertions(+), 73 deletions(-) diff --git a/tools/flakeguard/reports/data.go b/tools/flakeguard/reports/data.go index a34f3f2e2..6174b916d 100644 --- a/tools/flakeguard/reports/data.go +++ b/tools/flakeguard/reports/data.go @@ -63,13 +63,15 @@ func GenerateSummaryData(tests []TestResult, maxPassRatio float64) SummaryData { passes += result.Successes fails += result.Failures skips += result.Skips + if result.Panic { panickedTests++ flakyTests++ } else if result.Race { racedTests++ flakyTests++ - } else if result.PassRatio < maxPassRatio { + } else if !result.Skipped && result.Runs > 0 && result.PassRatio < maxPassRatio { + // Exclude skipped tests and tests with no runs flakyTests++ } } @@ -82,12 +84,14 @@ func GenerateSummaryData(tests []TestResult, maxPassRatio float64) SummaryData { passPercentage = math.Round((float64(passes)/float64(runs)*100)*100) / 100 averagePassRatio = float64(passes) / float64(runs) } - if len(tests) > 0 { - flakePercentage = math.Round((float64(flakyTests)/float64(len(tests))*100)*100) / 100 + totalTests := len(tests) // Include skipped tests in total tests + totalExecutedTests := totalTests - skips // Tests that were actually executed + if totalExecutedTests > 0 { + flakePercentage = math.Round((float64(flakyTests)/float64(totalExecutedTests)*100)*100) / 100 } return SummaryData{ - TotalTests: len(tests), + TotalTests: totalTests, PanickedTests: panickedTests, RacedTests: racedTests, FlakyTests: flakyTests, @@ -184,7 +188,7 @@ func mergeTestResults(a, b TestResult) TestResult { if a.Runs > 0 { a.PassRatio = float64(a.Successes) / float64(a.Runs) } else { - a.PassRatio = 0.0 + a.PassRatio = -1.0 // Indicate undefined pass ratio for skipped tests } return a diff --git a/tools/flakeguard/reports/data_test.go b/tools/flakeguard/reports/data_test.go index d8fed49fc..f556b73c6 100644 --- a/tools/flakeguard/reports/data_test.go +++ b/tools/flakeguard/reports/data_test.go @@ -388,7 +388,7 @@ func TestAggregate_AllSkippedTests(t *testing.T) { Skipped: true, Runs: 0, Skips: 3, - PassRatio: 0.0, // Or set to -1 to indicate undefined + PassRatio: -1, // 1 indicate undefined }, }, } @@ -403,7 +403,7 @@ func TestAggregate_AllSkippedTests(t *testing.T) { Skipped: true, Runs: 0, Skips: 2, - PassRatio: 0.0, + PassRatio: -1, }, }, } @@ -419,7 +419,7 @@ func TestAggregate_AllSkippedTests(t *testing.T) { Skipped: true, Runs: 0, Skips: 5, - PassRatio: 0.0, + PassRatio: -1, } if len(aggregatedReport.Results) != 1 { diff --git a/tools/flakeguard/reports/presentation.go b/tools/flakeguard/reports/presentation.go index bd489a5ec..26e8503a4 100644 --- a/tools/flakeguard/reports/presentation.go +++ b/tools/flakeguard/reports/presentation.go @@ -10,7 +10,7 @@ import ( "golang.org/x/text/message" ) -func GenerateResultsTable( +func GenerateFlakyTestsTable( results []TestResult, expectedPassRatio float64, markdown bool, @@ -18,6 +18,7 @@ func GenerateResultsTable( p := message.NewPrinter(language.English) sortTestResults(results) + // Headers in the requested order headers := []string{ "Name", "Pass Ratio", @@ -34,18 +35,22 @@ func GenerateResultsTable( "Code Owners", } + // Format headers for Markdown if needed if markdown { for i, header := range headers { headers[i] = fmt.Sprintf("**%s**", header) } } + // Initialize the table with headers table := [][]string{headers} + for _, result := range results { - if result.PassRatio < expectedPassRatio { + // Exclude skipped tests and only include tests below the expected pass ratio + if !result.Skipped && result.PassRatio < expectedPassRatio { row := []string{ result.TestName, - fmt.Sprintf("%.2f%%", result.PassRatio*100), + formatPassRatio(result.PassRatio), fmt.Sprintf("%t", result.Panic), fmt.Sprintf("%t", result.Timeout), fmt.Sprintf("%t", result.Race), @@ -58,7 +63,7 @@ func GenerateResultsTable( avgDuration(result.Durations).String(), } - // Code owners + // Add code owners owners := "Unknown" if len(result.CodeOwners) > 0 { owners = strings.Join(result.CodeOwners, ", ") @@ -71,6 +76,13 @@ func GenerateResultsTable( return table } +func formatPassRatio(passRatio float64) string { + if passRatio < 0 { + return "N/A" // Handle undefined pass ratios (e.g., skipped tests) + } + return fmt.Sprintf("%.2f%%", passRatio*100) +} + func GenerateMarkdownSummary(w io.Writer, testReport *TestReport, maxPassRatio float64) { settingsTable := buildSettingsTable(testReport, maxPassRatio) fmt.Fprint(w, "# Flakeguard Summary\n\n") @@ -115,7 +127,7 @@ func RenderResults( maxPassRatio float64, markdown bool, ) { - resultsTable := GenerateResultsTable(tests, maxPassRatio, markdown) + resultsTable := GenerateFlakyTestsTable(tests, maxPassRatio, markdown) summary := GenerateSummaryData(tests, maxPassRatio) renderSummaryTable(w, summary, markdown) renderTestResultsTable(w, resultsTable, markdown) diff --git a/tools/flakeguard/reports/presentation_test.go b/tools/flakeguard/reports/presentation_test.go index 0597e863b..52a7bb5b9 100644 --- a/tools/flakeguard/reports/presentation_test.go +++ b/tools/flakeguard/reports/presentation_test.go @@ -8,91 +8,66 @@ import ( "time" ) -// TestGenerateResultsTable tests the GenerateResultsTable function. -func TestGenerateResultsTable(t *testing.T) { - testResults := []TestResult{ +func TestGenerateFlakyTestsTable(t *testing.T) { + results := []TestResult{ { - TestName: "TestA", - PassRatio: 0.8, - Panic: false, - Timeout: false, - Race: false, - Runs: 5, - Successes: 4, - Failures: 1, - Skips: 0, - TestPackage: "pkg1", - PackagePanic: false, - Durations: []time.Duration{time.Second, time.Second, time.Second, time.Second, time.Second}, - CodeOwners: []string{"owner1"}, + TestName: "TestFlaky", + PassRatio: 0.5, + Skipped: false, + Runs: 2, + Successes: 1, + Failures: 1, + TestPackage: "pkg1", + CodeOwners: []string{"owner1"}, }, { - TestName: "TestB", - PassRatio: 1.0, - Panic: false, - Timeout: false, - Race: false, - Runs: 3, - Successes: 3, - Failures: 0, - Skips: 0, - TestPackage: "pkg2", - PackagePanic: false, - Durations: []time.Duration{2 * time.Second, 2 * time.Second, 2 * time.Second}, - CodeOwners: []string{"owner2"}, + TestName: "TestSkipped", + PassRatio: -1.0, + Skipped: true, + Runs: 0, + Skips: 1, + TestPackage: "pkg2", + CodeOwners: []string{"owner2"}, }, } expectedPassRatio := 0.9 markdown := false - table := GenerateResultsTable(testResults, expectedPassRatio, markdown) - - // Only TestA should be included since its PassRatio is below 0.9 - if len(table) != 2 { - t.Fatalf("Expected table length 2 (headers + 1 row), got %d", len(table)) - } + table := GenerateFlakyTestsTable(results, expectedPassRatio, markdown) // Verify headers - headers := table[0] expectedHeaders := []string{ - "Name", - "Pass Ratio", - "Panicked?", - "Timed Out?", - "Race?", - "Runs", - "Successes", - "Failures", - "Skips", - "Package", - "Package Panicked?", - "Avg Duration", - "Code Owners", + "Name", "Pass Ratio", "Panicked?", "Timed Out?", "Race?", "Runs", + "Successes", "Failures", "Skips", "Package", "Package Panicked?", + "Avg Duration", "Code Owners", } - if !reflect.DeepEqual(headers, expectedHeaders) { - t.Errorf("Expected headers %+v, got %+v", expectedHeaders, headers) + if !reflect.DeepEqual(table[0], expectedHeaders) { + t.Errorf("Expected headers %+v, got %+v", expectedHeaders, table[0]) + } + + // Verify rows (only TestFlaky should appear) + if len(table) != 2 { // 1 header row + 1 data row + t.Fatalf("Expected table length 2 (headers + 1 row), got %d", len(table)) } - // Verify row data - row := table[1] expectedRow := []string{ - "TestA", - "80.00%", + "TestFlaky", + "50.00%", "false", "false", "false", - "5", - "4", + "2", + "1", "1", "0", "pkg1", "false", - "1s", + "0s", "owner1", } - if !reflect.DeepEqual(row, expectedRow) { - t.Errorf("Expected row %+v, got %+v", expectedRow, row) + if !reflect.DeepEqual(table[1], expectedRow) { + t.Errorf("Expected row %+v, got %+v", expectedRow, table[1]) } } From acfb773d0ad65be3f88a683028344422ac02ba57 Mon Sep 17 00:00:00 2001 From: lukaszcl <120112546+lukaszcl@users.noreply.github.com> Date: Thu, 5 Dec 2024 17:15:12 +0100 Subject: [PATCH 04/21] Fix report files --- tools/flakeguard/cmd/report.go | 48 ++++++++++++---------------------- 1 file changed, 16 insertions(+), 32 deletions(-) diff --git a/tools/flakeguard/cmd/report.go b/tools/flakeguard/cmd/report.go index f19d83100..baaf0d4dd 100644 --- a/tools/flakeguard/cmd/report.go +++ b/tools/flakeguard/cmd/report.go @@ -17,7 +17,7 @@ var ReportCmd = &cobra.Command{ RunE: func(cmd *cobra.Command, args []string) error { fs := reports.OSFileSystem{} - // Get flag values directly using cmd.Flags().Get* methods + // Get flag values reportResultsPath, _ := cmd.Flags().GetString("results-path") reportOutputPath, _ := cmd.Flags().GetString("output-path") reportFormats, _ := cmd.Flags().GetString("format") @@ -104,18 +104,19 @@ var ReportCmd = &cobra.Command{ } fmt.Printf("All tests report saved to %s\n", allTestsReportPath) - // Generate and save the reports (all tests) in specified formats + // Generate and save the summary reports (all tests) in specified formats for _, format := range formats { + format = strings.ToLower(strings.TrimSpace(format)) s = spinner.New(spinner.CharSets[11], 100*time.Millisecond) - s.Suffix = fmt.Sprintf(" Generating all tests report in format %s...", format) + s.Suffix = fmt.Sprintf(" Generating all tests summary in format %s...", format) s.Start() if err := generateReport(aggregatedReport, format, filepath.Join(outputDir, "all-tests")); err != nil { s.Stop() - return fmt.Errorf("error generating all tests report in format %s: %w", format, err) + return fmt.Errorf("error generating all tests summary in format %s: %w", format, err) } s.Stop() - fmt.Printf("All tests report in format %s generated successfully.\n", format) + fmt.Printf("All tests summary in format %s generated successfully.\n", format) } // Filter failed tests (PassRatio < maxPassRatio and not skipped) @@ -153,20 +154,6 @@ var ReportCmd = &cobra.Command{ } fmt.Printf("Failed tests report saved to %s\n", failedTestsReportPath) - // Generate and save the reports for failed tests in specified formats - for _, format := range formats { - s = spinner.New(spinner.CharSets[11], 100*time.Millisecond) - s.Suffix = fmt.Sprintf(" Generating failed tests report in format %s...", format) - s.Start() - - if err := generateReport(failedReport, format, filepath.Join(outputDir, "failed-tests")); err != nil { - s.Stop() - return fmt.Errorf("error generating failed tests report in format %s: %w", format, err) - } - s.Stop() - fmt.Printf("Failed tests report in format %s generated successfully.\n", format) - } - fmt.Printf("Reports generated at: %s\n", reportOutputPath) return nil @@ -176,7 +163,7 @@ var ReportCmd = &cobra.Command{ func init() { ReportCmd.Flags().StringP("results-path", "p", "", "Path to the folder containing JSON test result files (required)") ReportCmd.Flags().StringP("output-path", "o", "./report", "Path to output the generated report files") - ReportCmd.Flags().StringP("format", "f", "markdown,json", "Comma-separated list of report formats (markdown,json)") + ReportCmd.Flags().StringP("format", "f", "markdown,json", "Comma-separated list of summary report formats (markdown,json)") ReportCmd.Flags().Float64P("max-pass-ratio", "", 1.0, "The maximum pass ratio threshold for a test to be considered flaky") ReportCmd.Flags().StringP("codeowners-path", "", "", "Path to the CODEOWNERS file") ReportCmd.Flags().StringP("repo-path", "", ".", "The path to the root of the repository/project") @@ -186,9 +173,11 @@ func init() { func generateReport(report *reports.TestReport, format, outputPath string) error { fs := reports.OSFileSystem{} format = strings.ToLower(strings.TrimSpace(format)) + switch format { case "markdown": - mdFileName := outputPath + ".md" + // Adjust the markdown filename to include "-summary" + mdFileName := outputPath + "-summary.md" mdFile, err := fs.Create(mdFileName) if err != nil { return fmt.Errorf("error creating markdown file: %w", err) @@ -196,19 +185,14 @@ func generateReport(report *reports.TestReport, format, outputPath string) error defer mdFile.Close() reports.GenerateMarkdownSummary(mdFile, report, 1.0) case "json": - jsonFileName := outputPath + ".json" - if err := reports.SaveReportNoLogs(fs, jsonFileName, *report); err != nil { - return fmt.Errorf("error saving JSON report: %w", err) + // Generate summary JSON + summaryData := reports.GenerateSummaryData(report.Results, 1.0) + summaryFileName := outputPath + "-summary.json" + if err := reports.SaveSummaryAsJSON(fs, summaryFileName, summaryData); err != nil { + return fmt.Errorf("error saving summary JSON: %w", err) } default: - return fmt.Errorf("unsupported report format: %s", format) - } - - // Generate summary JSON - summaryData := reports.GenerateSummaryData(report.Results, 1.0) - summaryFileName := outputPath + "-summary.json" - if err := reports.SaveSummaryAsJSON(fs, summaryFileName, summaryData); err != nil { - return fmt.Errorf("error saving summary JSON: %w", err) + return fmt.Errorf("unsupported summary report format: %s", format) } return nil From d9043b4d20ecea135a17989eb0f11dcc7085fe2c Mon Sep 17 00:00:00 2001 From: lukaszcl <120112546+lukaszcl@users.noreply.github.com> Date: Thu, 5 Dec 2024 17:26:06 +0100 Subject: [PATCH 05/21] Remove old cmd --- tools/flakeguard/cmd/aggregate_results.go | 90 ----------------------- tools/flakeguard/cmd/filter_results.go | 45 ------------ 2 files changed, 135 deletions(-) delete mode 100644 tools/flakeguard/cmd/aggregate_results.go delete mode 100644 tools/flakeguard/cmd/filter_results.go diff --git a/tools/flakeguard/cmd/aggregate_results.go b/tools/flakeguard/cmd/aggregate_results.go deleted file mode 100644 index 924520abd..000000000 --- a/tools/flakeguard/cmd/aggregate_results.go +++ /dev/null @@ -1,90 +0,0 @@ -package cmd - -import ( - "fmt" - - "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/reports" - "github.com/spf13/cobra" -) - -var AggregateResultsCmd = &cobra.Command{ - Use: "aggregate-results", - Short: "Aggregate test results into a single report, with optional filtering and code owners mapping", - RunE: func(cmd *cobra.Command, args []string) error { - // Get flag values - aggregateResultsPath, _ := cmd.Flags().GetString("results-path") - aggregateOutputPath, _ := cmd.Flags().GetString("output-path") - includeOutputs, _ := cmd.Flags().GetBool("include-outputs") - includePackageOutputs, _ := cmd.Flags().GetBool("include-package-outputs") - filterFailed, _ := cmd.Flags().GetBool("filter-failed") - maxPassRatio, _ := cmd.Flags().GetFloat64("max-pass-ratio") - codeOwnersPath, _ := cmd.Flags().GetString("codeowners-path") - repoPath, _ := cmd.Flags().GetString("repo-path") - - // Load test reports from JSON files - testReports, err := reports.LoadReports(aggregateResultsPath) - if err != nil { - return fmt.Errorf("error loading test reports: %w", err) - } - - // Aggregate the reports - aggregatedReport, err := reports.Aggregate(testReports...) - if err != nil { - return fmt.Errorf("error aggregating test reports: %w", err) - } - - // Map test results to test paths - err = reports.MapTestResultsToPaths(aggregatedReport, repoPath) - if err != nil { - return fmt.Errorf("error mapping test results to paths: %w", err) - } - - // Map test results to code owners if codeOwnersPath is provided - if codeOwnersPath != "" { - err = reports.MapTestResultsToOwners(aggregatedReport, codeOwnersPath) - if err != nil { - return fmt.Errorf("error mapping test results to code owners: %w", err) - } - } - - // Filter results if needed - if filterFailed { - aggregatedReport.Results = reports.FilterTests(aggregatedReport.Results, func(tr reports.TestResult) bool { - return !tr.Skipped && tr.PassRatio < maxPassRatio - }) - } - - // Process the aggregated results based on the flags - if !includeOutputs || !includePackageOutputs { - for i := range aggregatedReport.Results { - if !includeOutputs { - aggregatedReport.Results[i].Outputs = nil - } - if !includePackageOutputs { - aggregatedReport.Results[i].PackageOutputs = nil - } - } - } - - // Save the aggregated report - if err := reports.SaveReport(reports.OSFileSystem{}, aggregateOutputPath, *aggregatedReport); err != nil { - return fmt.Errorf("error saving aggregated report: %w", err) - } - - fmt.Printf("Aggregated report saved to %s\n", aggregateOutputPath) - return nil - }, -} - -func init() { - AggregateResultsCmd.Flags().StringP("results-path", "p", "", "Path to the folder containing JSON test result files (required)") - AggregateResultsCmd.Flags().StringP("output-path", "o", "./aggregated-results.json", "Path to output the aggregated test results") - AggregateResultsCmd.Flags().Bool("include-outputs", false, "Include test outputs in the aggregated test results") - AggregateResultsCmd.Flags().Bool("include-package-outputs", false, "Include test package outputs in the aggregated test results") - AggregateResultsCmd.Flags().Bool("filter-failed", false, "If true, filter and output only failed tests based on the max-pass-ratio threshold") - AggregateResultsCmd.Flags().Float64("max-pass-ratio", 1.0, "The maximum pass ratio threshold for a test to be considered flaky. Any tests below this pass rate will be considered flaky.") - AggregateResultsCmd.Flags().String("codeowners-path", "", "Path to the CODEOWNERS file") - AggregateResultsCmd.Flags().String("repo-path", ".", "The path to the root of the repository/project") - AggregateResultsCmd.MarkFlagRequired("results-path") - AggregateResultsCmd.MarkFlagRequired("repo-path") -} diff --git a/tools/flakeguard/cmd/filter_results.go b/tools/flakeguard/cmd/filter_results.go deleted file mode 100644 index 286afcb0f..000000000 --- a/tools/flakeguard/cmd/filter_results.go +++ /dev/null @@ -1,45 +0,0 @@ -package cmd - -import ( - "fmt" - - "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/reports" - - "github.com/spf13/cobra" -) - -var ( - filterInputPath string - filterOutputPath string - filterMaxPassRatio float64 -) - -var filterCmd = &cobra.Command{ - Use: "filter", - Short: "Filter aggregated test results based on criteria", - RunE: func(cmd *cobra.Command, args []string) error { - // Load the aggregated report - aggregatedReport, err := reports.LoadReport(filterInputPath) - if err != nil { - return fmt.Errorf("error loading aggregated report: %w", err) - } - - // Filter the test results - filteredReport := reports.FilterResults(aggregatedReport, filterMaxPassRatio) - - // Save the filtered report - if err := reports.SaveReport(reports.OSFileSystem{}, filterOutputPath, *filteredReport); err != nil { - return fmt.Errorf("error saving filtered report: %w", err) - } - - fmt.Printf("Filtered report saved to %s\n", filterOutputPath) - return nil - }, -} - -func init() { - filterCmd.Flags().StringVarP(&filterInputPath, "input-path", "i", "", "Path to the aggregated test results file (required)") - filterCmd.Flags().StringVarP(&filterOutputPath, "output-path", "o", "./filtered-results.json", "Path to output the filtered test results") - filterCmd.Flags().Float64VarP(&filterMaxPassRatio, "max-pass-ratio", "m", 1.0, "Maximum pass ratio threshold for filtering tests") - filterCmd.MarkFlagRequired("input-path") -} From d3a8549a0e5d0ab7f2b3452fe31a7fdfb129ab00 Mon Sep 17 00:00:00 2001 From: lukaszcl <120112546+lukaszcl@users.noreply.github.com> Date: Fri, 6 Dec 2024 13:10:52 +0100 Subject: [PATCH 06/21] Add PR comment to flakeguard report --- tools/flakeguard/cmd/report.go | 105 ++++++++++++------ tools/flakeguard/reports/presentation.go | 41 ++++++- tools/flakeguard/reports/presentation_test.go | 78 ++++++++++++- 3 files changed, 186 insertions(+), 38 deletions(-) diff --git a/tools/flakeguard/cmd/report.go b/tools/flakeguard/cmd/report.go index baaf0d4dd..a57cdddb6 100644 --- a/tools/flakeguard/cmd/report.go +++ b/tools/flakeguard/cmd/report.go @@ -20,13 +20,10 @@ var ReportCmd = &cobra.Command{ // Get flag values reportResultsPath, _ := cmd.Flags().GetString("results-path") reportOutputPath, _ := cmd.Flags().GetString("output-path") - reportFormats, _ := cmd.Flags().GetString("format") reportMaxPassRatio, _ := cmd.Flags().GetFloat64("max-pass-ratio") reportCodeOwnersPath, _ := cmd.Flags().GetString("codeowners-path") reportRepoPath, _ := cmd.Flags().GetString("repo-path") - - // Split the formats into a slice - formats := strings.Split(reportFormats, ",") + generatePRComment, _ := cmd.Flags().GetBool("generate-pr-comment") // Start spinner for loading test reports s := spinner.New(spinner.CharSets[11], 100*time.Millisecond) @@ -104,19 +101,57 @@ var ReportCmd = &cobra.Command{ } fmt.Printf("All tests report saved to %s\n", allTestsReportPath) - // Generate and save the summary reports (all tests) in specified formats - for _, format := range formats { - format = strings.ToLower(strings.TrimSpace(format)) + // Generate GitHub summary markdown + s = spinner.New(spinner.CharSets[11], 100*time.Millisecond) + s.Suffix = " Generating GitHub summary markdown..." + s.Start() + + err = generateGitHubSummaryMarkdown(aggregatedReport, filepath.Join(outputDir, "all-tests")) + if err != nil { + s.Stop() + return fmt.Errorf("error generating GitHub summary markdown: %w", err) + } + s.Stop() + fmt.Println("GitHub summary markdown generated successfully.") + + if generatePRComment { + // Retrieve required flags + currentBranch, _ := cmd.Flags().GetString("current-branch") + currentCommitSHA, _ := cmd.Flags().GetString("current-commit-sha") + baseBranch, _ := cmd.Flags().GetString("base-branch") + repoURL, _ := cmd.Flags().GetString("repo-url") + actionRunID, _ := cmd.Flags().GetString("action-run-id") + + // Validate that required flags are provided + missingFlags := []string{} + if currentBranch == "" { + missingFlags = append(missingFlags, "--current-branch") + } + if currentCommitSHA == "" { + missingFlags = append(missingFlags, "--current-commit-sha") + } + if repoURL == "" { + missingFlags = append(missingFlags, "--repo-url") + } + if actionRunID == "" { + missingFlags = append(missingFlags, "--action-run-id") + } + if len(missingFlags) > 0 { + return fmt.Errorf("the following flags are required when --generate-pr-comment is set: %s", strings.Join(missingFlags, ", ")) + } + + // Generate PR comment markdown s = spinner.New(spinner.CharSets[11], 100*time.Millisecond) - s.Suffix = fmt.Sprintf(" Generating all tests summary in format %s...", format) + s.Suffix = " Generating PR comment markdown..." s.Start() - if err := generateReport(aggregatedReport, format, filepath.Join(outputDir, "all-tests")); err != nil { + err = generatePRCommentMarkdown(aggregatedReport, filepath.Join(outputDir, "all-tests"), baseBranch, currentBranch, currentCommitSHA, repoURL, actionRunID) + if err != nil { s.Stop() - return fmt.Errorf("error generating all tests summary in format %s: %w", format, err) + return fmt.Errorf("error generating PR comment markdown: %w", err) } s.Stop() - fmt.Printf("All tests summary in format %s generated successfully.\n", format) + fmt.Println("PR comment markdown generated successfully.") } // Filter failed tests (PassRatio < maxPassRatio and not skipped) @@ -163,38 +198,40 @@ var ReportCmd = &cobra.Command{ func init() { ReportCmd.Flags().StringP("results-path", "p", "", "Path to the folder containing JSON test result files (required)") ReportCmd.Flags().StringP("output-path", "o", "./report", "Path to output the generated report files") - ReportCmd.Flags().StringP("format", "f", "markdown,json", "Comma-separated list of summary report formats (markdown,json)") ReportCmd.Flags().Float64P("max-pass-ratio", "", 1.0, "The maximum pass ratio threshold for a test to be considered flaky") ReportCmd.Flags().StringP("codeowners-path", "", "", "Path to the CODEOWNERS file") ReportCmd.Flags().StringP("repo-path", "", ".", "The path to the root of the repository/project") + ReportCmd.Flags().Bool("generate-pr-comment", false, "Set to true to generate PR comment markdown") + ReportCmd.Flags().String("base-branch", "develop", "The base branch to compare against (used in PR comment)") + ReportCmd.Flags().String("current-branch", "", "The current branch name (required if generate-pr-comment is set)") + ReportCmd.Flags().String("current-commit-sha", "", "The current commit SHA (required if generate-pr-comment is set)") + ReportCmd.Flags().String("repo-url", "", "The repository URL (required if generate-pr-comment is set)") + ReportCmd.Flags().String("action-run-id", "", "The GitHub Actions run ID (required if generate-pr-comment is set)") + ReportCmd.MarkFlagRequired("results-path") } -func generateReport(report *reports.TestReport, format, outputPath string) error { +func generateGitHubSummaryMarkdown(report *reports.TestReport, outputPath string) error { fs := reports.OSFileSystem{} - format = strings.ToLower(strings.TrimSpace(format)) - - switch format { - case "markdown": - // Adjust the markdown filename to include "-summary" - mdFileName := outputPath + "-summary.md" - mdFile, err := fs.Create(mdFileName) - if err != nil { - return fmt.Errorf("error creating markdown file: %w", err) - } - defer mdFile.Close() - reports.GenerateMarkdownSummary(mdFile, report, 1.0) - case "json": - // Generate summary JSON - summaryData := reports.GenerateSummaryData(report.Results, 1.0) - summaryFileName := outputPath + "-summary.json" - if err := reports.SaveSummaryAsJSON(fs, summaryFileName, summaryData); err != nil { - return fmt.Errorf("error saving summary JSON: %w", err) - } - default: - return fmt.Errorf("unsupported summary report format: %s", format) + mdFileName := outputPath + "-summary.md" + mdFile, err := fs.Create(mdFileName) + if err != nil { + return fmt.Errorf("error creating GitHub summary markdown file: %w", err) } + defer mdFile.Close() + reports.GenerateGitHubSummaryMarkdown(mdFile, report, 1.0) + return nil +} +func generatePRCommentMarkdown(report *reports.TestReport, outputPath, baseBranch, currentBranch, currentCommitSHA, repoURL, actionRunID string) error { + fs := reports.OSFileSystem{} + mdFileName := outputPath + "-pr-comment.md" + mdFile, err := fs.Create(mdFileName) + if err != nil { + return fmt.Errorf("error creating PR comment markdown file: %w", err) + } + defer mdFile.Close() + reports.GeneratePRCommentMarkdown(mdFile, report, 1.0, baseBranch, currentBranch, currentCommitSHA, repoURL, actionRunID) return nil } diff --git a/tools/flakeguard/reports/presentation.go b/tools/flakeguard/reports/presentation.go index 26e8503a4..cb6e3fde0 100644 --- a/tools/flakeguard/reports/presentation.go +++ b/tools/flakeguard/reports/presentation.go @@ -83,7 +83,7 @@ func formatPassRatio(passRatio float64) string { return fmt.Sprintf("%.2f%%", passRatio*100) } -func GenerateMarkdownSummary(w io.Writer, testReport *TestReport, maxPassRatio float64) { +func GenerateGitHubSummaryMarkdown(w io.Writer, testReport *TestReport, maxPassRatio float64) { settingsTable := buildSettingsTable(testReport, maxPassRatio) fmt.Fprint(w, "# Flakeguard Summary\n\n") printTable(w, settingsTable) @@ -104,6 +104,45 @@ func GenerateMarkdownSummary(w io.Writer, testReport *TestReport, maxPassRatio f RenderResults(w, testReport.Results, maxPassRatio, true) } +func GeneratePRCommentMarkdown(w io.Writer, testReport *TestReport, maxPassRatio float64, baseBranch, currentBranch, currentCommitSHA, repoURL, actionRunID string) { + fmt.Fprint(w, "# Flakeguard Summary\n\n") + + // Construct additionalInfo inside the function + additionalInfo := fmt.Sprintf( + "Ran new or updated tests between `%s` and %s (`%s`).", + baseBranch, + currentCommitSHA, + currentBranch, + ) + + // Construct the links + viewDetailsLink := fmt.Sprintf("[View Flaky Detector Details](%s/actions/runs/%s)", repoURL, actionRunID) + compareChangesLink := fmt.Sprintf("[Compare Changes](%s/compare/%s...%s#files_bucket)", repoURL, baseBranch, currentCommitSHA) + linksLine := fmt.Sprintf("%s | %s", viewDetailsLink, compareChangesLink) + + // Include additional information + fmt.Fprintln(w, additionalInfo) + fmt.Fprintln(w) // Add an extra newline for formatting + + // Include the links + fmt.Fprintln(w, linksLine) + fmt.Fprintln(w) // Add an extra newline for formatting + + if len(testReport.Results) == 0 { + fmt.Fprintln(w, "## No tests ran :warning:") + return + } + + summary := GenerateSummaryData(testReport.Results, maxPassRatio) + if summary.AveragePassRatio < maxPassRatio { + fmt.Fprintln(w, "## Found Flaky Tests :x:") + } else { + fmt.Fprintln(w, "## No Flakes Found :white_check_mark:") + } + + RenderResults(w, testReport.Results, maxPassRatio, true) +} + func buildSettingsTable(testReport *TestReport, maxPassRatio float64) [][]string { rows := [][]string{ {"**Setting**", "**Value**"}, diff --git a/tools/flakeguard/reports/presentation_test.go b/tools/flakeguard/reports/presentation_test.go index 52a7bb5b9..7dfdace2e 100644 --- a/tools/flakeguard/reports/presentation_test.go +++ b/tools/flakeguard/reports/presentation_test.go @@ -2,6 +2,7 @@ package reports import ( "bytes" + "fmt" "reflect" "strings" "testing" @@ -71,8 +72,8 @@ func TestGenerateFlakyTestsTable(t *testing.T) { } } -// TestGenerateMarkdownSummary tests the GenerateMarkdownSummary function. -func TestGenerateMarkdownSummary(t *testing.T) { +// TestGenerateGitHubSummaryMarkdown tests the GenerateGitHubSummaryMarkdown function. +func TestGenerateGitHubSummaryMarkdown(t *testing.T) { testReport := &TestReport{ GoProject: "ProjectX", TestRunCount: 3, @@ -104,7 +105,7 @@ func TestGenerateMarkdownSummary(t *testing.T) { var buffer bytes.Buffer maxPassRatio := 0.9 - GenerateMarkdownSummary(&buffer, testReport, maxPassRatio) + GenerateGitHubSummaryMarkdown(&buffer, testReport, maxPassRatio) output := buffer.String() @@ -126,6 +127,77 @@ func TestGenerateMarkdownSummary(t *testing.T) { } } +// TestGeneratePRCommentMarkdown tests the GeneratePRCommentMarkdown function. +func TestGeneratePRCommentMarkdown(t *testing.T) { + testReport := &TestReport{ + GoProject: "ProjectX", + TestRunCount: 3, + RaceDetection: true, + Results: []TestResult{ + { + TestName: "TestA", + PassRatio: 0.8, + Runs: 5, + Successes: 4, + Failures: 1, + TestPackage: "pkg1", + CodeOwners: []string{"owner1"}, + Durations: []time.Duration{time.Second, time.Second, time.Second, time.Second, time.Second}, + }, + { + TestName: "TestB", + PassRatio: 1.0, + Runs: 3, + Successes: 3, + Failures: 0, + TestPackage: "pkg2", + CodeOwners: []string{"owner2"}, + Durations: []time.Duration{2 * time.Second, 2 * time.Second, 2 * time.Second}, + }, + }, + } + + var buffer bytes.Buffer + maxPassRatio := 0.9 + baseBranch := "develop" + currentBranch := "feature-branch" + currentCommitSHA := "abcdef1234567890" + repoURL := "https://github.com/example/repo" + actionRunID := "123456789" + + GeneratePRCommentMarkdown(&buffer, testReport, maxPassRatio, baseBranch, currentBranch, currentCommitSHA, repoURL, actionRunID) + + output := buffer.String() + + fmt.Println(output) + + // Check that the output includes the expected headings and links + if !strings.Contains(output, "# Flakeguard Summary") { + t.Error("Expected markdown summary to contain '# Flakeguard Summary'") + } + if !strings.Contains(output, fmt.Sprintf("Ran new or updated tests between `%s` and %s (`%s`).", baseBranch, currentCommitSHA, currentBranch)) { + t.Error("Expected markdown to contain the additional info line with branches and commit SHA") + } + if !strings.Contains(output, fmt.Sprintf("[View Flaky Detector Details](%s/actions/runs/%s)", repoURL, actionRunID)) { + t.Error("Expected markdown to contain the 'View Flaky Detector Details' link") + } + if !strings.Contains(output, fmt.Sprintf("[Compare Changes](%s/compare/%s...%s#files_bucket)", repoURL, baseBranch, currentCommitSHA)) { + t.Error("Expected markdown to contain the 'Compare Changes' link") + } + if !strings.Contains(output, "## Found Flaky Tests :x:") { + t.Error("Expected markdown summary to contain '## Found Flaky Tests :x:'") + } + if !strings.Contains(output, "| **Name**") { + t.Error("Expected markdown table headers for test results") + } + if !strings.Contains(output, "| TestA ") { + t.Error("Expected markdown table to include TestA") + } + if strings.Contains(output, "| TestB ") { + t.Error("Did not expect markdown table to include TestB since its pass ratio is above the threshold") + } +} + // TestPrintTable tests the printTable function. func TestPrintTable(t *testing.T) { table := [][]string{ From c7f571971959de23014e801ce7a1ca09f3984c1e Mon Sep 17 00:00:00 2001 From: lukaszcl <120112546+lukaszcl@users.noreply.github.com> Date: Fri, 6 Dec 2024 13:34:34 +0100 Subject: [PATCH 07/21] Update PR comment --- tools/flakeguard/reports/presentation.go | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/tools/flakeguard/reports/presentation.go b/tools/flakeguard/reports/presentation.go index cb6e3fde0..8376de614 100644 --- a/tools/flakeguard/reports/presentation.go +++ b/tools/flakeguard/reports/presentation.go @@ -107,7 +107,7 @@ func GenerateGitHubSummaryMarkdown(w io.Writer, testReport *TestReport, maxPassR func GeneratePRCommentMarkdown(w io.Writer, testReport *TestReport, maxPassRatio float64, baseBranch, currentBranch, currentCommitSHA, repoURL, actionRunID string) { fmt.Fprint(w, "# Flakeguard Summary\n\n") - // Construct additionalInfo inside the function + // Construct additional info additionalInfo := fmt.Sprintf( "Ran new or updated tests between `%s` and %s (`%s`).", baseBranch, @@ -133,14 +133,15 @@ func GeneratePRCommentMarkdown(w io.Writer, testReport *TestReport, maxPassRatio return } - summary := GenerateSummaryData(testReport.Results, maxPassRatio) - if summary.AveragePassRatio < maxPassRatio { + // Add the flaky tests section + if GenerateSummaryData(testReport.Results, maxPassRatio).AveragePassRatio < maxPassRatio { fmt.Fprintln(w, "## Found Flaky Tests :x:") } else { fmt.Fprintln(w, "## No Flakes Found :white_check_mark:") } - RenderResults(w, testReport.Results, maxPassRatio, true) + resultsTable := GenerateFlakyTestsTable(testReport.Results, maxPassRatio, true) + renderTestResultsTable(w, resultsTable, true) } func buildSettingsTable(testReport *TestReport, maxPassRatio float64) [][]string { From 1e6eecca8030d187b90e05175e310fcc8d37f9e4 Mon Sep 17 00:00:00 2001 From: lukaszcl <120112546+lukaszcl@users.noreply.github.com> Date: Fri, 6 Dec 2024 13:40:26 +0100 Subject: [PATCH 08/21] Fix summary json --- tools/flakeguard/cmd/report.go | 37 ++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/tools/flakeguard/cmd/report.go b/tools/flakeguard/cmd/report.go index a57cdddb6..acb925cd8 100644 --- a/tools/flakeguard/cmd/report.go +++ b/tools/flakeguard/cmd/report.go @@ -1,6 +1,7 @@ package cmd import ( + "encoding/json" "fmt" "path/filepath" "strings" @@ -114,6 +115,19 @@ var ReportCmd = &cobra.Command{ s.Stop() fmt.Println("GitHub summary markdown generated successfully.") + // Generate all-tests-summary.json + s = spinner.New(spinner.CharSets[11], 100*time.Millisecond) + s.Suffix = " Generating all-tests-summary.json..." + s.Start() + + err = generateAllTestsSummaryJSON(aggregatedReport, filepath.Join(outputDir, "all-tests-summary.json"), reportMaxPassRatio) + if err != nil { + s.Stop() + return fmt.Errorf("error generating all-tests-summary.json: %w", err) + } + s.Stop() + fmt.Println("all-tests-summary.json generated successfully.") + if generatePRComment { // Retrieve required flags currentBranch, _ := cmd.Flags().GetString("current-branch") @@ -235,6 +249,29 @@ func generatePRCommentMarkdown(report *reports.TestReport, outputPath, baseBranc return nil } +// New function to generate all-tests-summary.json +func generateAllTestsSummaryJSON(report *reports.TestReport, outputPath string, maxPassRatio float64) error { + summary := reports.GenerateSummaryData(report.Results, maxPassRatio) + data, err := json.Marshal(summary) + if err != nil { + return fmt.Errorf("error marshaling summary data to JSON: %w", err) + } + + fs := reports.OSFileSystem{} + jsonFile, err := fs.Create(outputPath) + if err != nil { + return fmt.Errorf("error creating all-tests-summary.json file: %w", err) + } + defer jsonFile.Close() + + _, err = jsonFile.Write(data) + if err != nil { + return fmt.Errorf("error writing summary data to all-tests-summary.json: %w", err) + } + + return nil +} + // Helper functions to retrieve original outputs and package outputs func getOriginalOutputs(reports []*reports.TestReport, testName, testPackage string) []string { for _, report := range reports { From 6e82b1d547c5ae9c30cd972e88f512a7391404fc Mon Sep 17 00:00:00 2001 From: lukaszcl <120112546+lukaszcl@users.noreply.github.com> Date: Fri, 6 Dec 2024 14:08:07 +0100 Subject: [PATCH 09/21] Rename artifacts --- tools/flakeguard/cmd/report.go | 45 +++++++++++++++++++++++----------- 1 file changed, 31 insertions(+), 14 deletions(-) diff --git a/tools/flakeguard/cmd/report.go b/tools/flakeguard/cmd/report.go index acb925cd8..1273d9566 100644 --- a/tools/flakeguard/cmd/report.go +++ b/tools/flakeguard/cmd/report.go @@ -96,7 +96,7 @@ var ReportCmd = &cobra.Command{ } // Save the aggregated report (all tests) - allTestsReportPath := filepath.Join(outputDir, "all-tests-report.json") + allTestsReportPath := filepath.Join(outputDir, "all-test-results.json") if err := reports.SaveReport(fs, allTestsReportPath, *aggregatedReport); err != nil { return fmt.Errorf("error saving all tests report: %w", err) } @@ -107,7 +107,7 @@ var ReportCmd = &cobra.Command{ s.Suffix = " Generating GitHub summary markdown..." s.Start() - err = generateGitHubSummaryMarkdown(aggregatedReport, filepath.Join(outputDir, "all-tests")) + err = generateGitHubSummaryMarkdown(aggregatedReport, filepath.Join(outputDir, "all-test")) if err != nil { s.Stop() return fmt.Errorf("error generating GitHub summary markdown: %w", err) @@ -117,16 +117,16 @@ var ReportCmd = &cobra.Command{ // Generate all-tests-summary.json s = spinner.New(spinner.CharSets[11], 100*time.Millisecond) - s.Suffix = " Generating all-tests-summary.json..." + s.Suffix = " Generating all-test-summary.json..." s.Start() - err = generateAllTestsSummaryJSON(aggregatedReport, filepath.Join(outputDir, "all-tests-summary.json"), reportMaxPassRatio) + err = generateAllTestsSummaryJSON(aggregatedReport, filepath.Join(outputDir, "all-test-summary.json"), reportMaxPassRatio) if err != nil { s.Stop() - return fmt.Errorf("error generating all-tests-summary.json: %w", err) + return fmt.Errorf("error generating all-test-summary.json: %w", err) } s.Stop() - fmt.Println("all-tests-summary.json generated successfully.") + fmt.Println("all-test-summary.json generated successfully.") if generatePRComment { // Retrieve required flags @@ -159,7 +159,7 @@ var ReportCmd = &cobra.Command{ s.Suffix = " Generating PR comment markdown..." s.Start() - err = generatePRCommentMarkdown(aggregatedReport, filepath.Join(outputDir, "all-tests"), baseBranch, currentBranch, currentCommitSHA, repoURL, actionRunID) + err = generatePRCommentMarkdown(aggregatedReport, filepath.Join(outputDir, "all-test"), baseBranch, currentBranch, currentCommitSHA, repoURL, actionRunID) if err != nil { s.Stop() return fmt.Errorf("error generating PR comment markdown: %w", err) @@ -179,7 +179,24 @@ var ReportCmd = &cobra.Command{ s.Stop() fmt.Println("Failed tests filtered successfully.") - // For failed tests, include outputs and package outputs + // Create a new report for failed tests + failedReportNoLogs := &reports.TestReport{ + GoProject: aggregatedReport.GoProject, + TestRunCount: aggregatedReport.TestRunCount, + RaceDetection: aggregatedReport.RaceDetection, + ExcludedTests: aggregatedReport.ExcludedTests, + SelectedTests: aggregatedReport.SelectedTests, + Results: failedTests, + } + + // Save the failed tests report with no logs + failedTestsReportNoLogsPath := filepath.Join(outputDir, "failed-test-results.json") + if err := reports.SaveReport(fs, failedTestsReportNoLogsPath, *failedReportNoLogs); err != nil { + return fmt.Errorf("error saving failed tests report: %w", err) + } + fmt.Printf("Failed tests report without logs saved to %s\n", failedTestsReportNoLogsPath) + + // Retrieve outputs and package outputs for failed tests for i := range failedTests { // Retrieve outputs and package outputs from original reports failedTests[i].Outputs = getOriginalOutputs(testReports, failedTests[i].TestName, failedTests[i].TestPackage) @@ -187,7 +204,7 @@ var ReportCmd = &cobra.Command{ } // Create a new report for failed tests - failedReport := &reports.TestReport{ + failedReportWithLogs := &reports.TestReport{ GoProject: aggregatedReport.GoProject, TestRunCount: aggregatedReport.TestRunCount, RaceDetection: aggregatedReport.RaceDetection, @@ -197,11 +214,11 @@ var ReportCmd = &cobra.Command{ } // Save the failed tests report - failedTestsReportPath := filepath.Join(outputDir, "failed-tests-report.json") - if err := reports.SaveReport(fs, failedTestsReportPath, *failedReport); err != nil { + failedTestsReportWithLogsPath := filepath.Join(outputDir, "failed-test-results-with-logs.json") + if err := reports.SaveReport(fs, failedTestsReportWithLogsPath, *failedReportWithLogs); err != nil { return fmt.Errorf("error saving failed tests report: %w", err) } - fmt.Printf("Failed tests report saved to %s\n", failedTestsReportPath) + fmt.Printf("Failed tests report with logs saved to %s\n", failedTestsReportWithLogsPath) fmt.Printf("Reports generated at: %s\n", reportOutputPath) @@ -260,13 +277,13 @@ func generateAllTestsSummaryJSON(report *reports.TestReport, outputPath string, fs := reports.OSFileSystem{} jsonFile, err := fs.Create(outputPath) if err != nil { - return fmt.Errorf("error creating all-tests-summary.json file: %w", err) + return fmt.Errorf("error creating file: %w", err) } defer jsonFile.Close() _, err = jsonFile.Write(data) if err != nil { - return fmt.Errorf("error writing summary data to all-tests-summary.json: %w", err) + return fmt.Errorf("error writing data to file: %w", err) } return nil From 805e9f0c083d8e3a4582d60983ede4494384d4be Mon Sep 17 00:00:00 2001 From: lukaszcl <120112546+lukaszcl@users.noreply.github.com> Date: Fri, 6 Dec 2024 14:39:33 +0100 Subject: [PATCH 10/21] Fix printing results in run cmd --- tools/flakeguard/cmd/run.go | 25 +++++-------------------- 1 file changed, 5 insertions(+), 20 deletions(-) diff --git a/tools/flakeguard/cmd/run.go b/tools/flakeguard/cmd/run.go index acfeac47e..c19cc406e 100644 --- a/tools/flakeguard/cmd/run.go +++ b/tools/flakeguard/cmd/run.go @@ -72,37 +72,22 @@ var RunTestsCmd = &cobra.Command{ os.Exit(1) } - // Print all failed tests including flaky tests - if printFailedTests { - fmt.Printf("PassRatio threshold for flaky tests: %.2f\n", maxPassRatio) - // Use RenderResults instead of PrintResults - reports.RenderResults(os.Stdout, testReport.Results, maxPassRatio, false) - } - // Save the test results in JSON format if outputPath != "" && len(testReport.Results) > 0 { jsonData, err := json.MarshalIndent(testReport, "", " ") if err != nil { log.Fatalf("Error marshaling test results to JSON: %v", err) } - if err := os.WriteFile(outputPath, jsonData, 0644); err != nil { + if err := os.WriteFile(outputPath, jsonData, 0600); err != nil { log.Fatalf("Error writing test results to file: %v", err) } fmt.Printf("All test results saved to %s\n", outputPath) } - // Filter flaky tests using FilterTests - flakyTests := reports.FilterTests(testReport.Results, func(tr reports.TestResult) bool { - return !tr.Skipped && tr.PassRatio < maxPassRatio - }) - - if len(flakyTests) > 0 { - fmt.Printf("Found %d flaky tests below the pass ratio threshold of %.2f:\n", len(flakyTests), maxPassRatio) - reports.RenderResults(os.Stdout, flakyTests, maxPassRatio, false) - // Exit with error code if there are flaky tests - os.Exit(1) - } else if len(testReport.Results) == 0 { - fmt.Printf("No tests were run for the specified packages.\n") + // Print all failed tests including flaky tests + if printFailedTests { + fmt.Printf("\nFlakeguard Summary\n") + reports.RenderResults(os.Stdout, testReport.Results, maxPassRatio, false) } }, } From 694e5998db29594936e62f68d027fc14772903f5 Mon Sep 17 00:00:00 2001 From: lukaszcl <120112546+lukaszcl@users.noreply.github.com> Date: Fri, 6 Dec 2024 14:43:56 +0100 Subject: [PATCH 11/21] fix --- tools/flakeguard/cmd/run.go | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/tools/flakeguard/cmd/run.go b/tools/flakeguard/cmd/run.go index c19cc406e..f7e1609ee 100644 --- a/tools/flakeguard/cmd/run.go +++ b/tools/flakeguard/cmd/run.go @@ -29,7 +29,6 @@ var RunTestsCmd = &cobra.Command{ maxPassRatio, _ := cmd.Flags().GetFloat64("max-pass-ratio") skipTests, _ := cmd.Flags().GetStringSlice("skip-tests") selectTests, _ := cmd.Flags().GetStringSlice("select-tests") - printFailedTests, _ := cmd.Flags().GetBool("print-failed-tests") useShuffle, _ := cmd.Flags().GetBool("shuffle") shuffleSeed, _ := cmd.Flags().GetString("shuffle-seed") @@ -84,10 +83,17 @@ var RunTestsCmd = &cobra.Command{ fmt.Printf("All test results saved to %s\n", outputPath) } - // Print all failed tests including flaky tests - if printFailedTests { + // Filter flaky tests using FilterTests + flakyTests := reports.FilterTests(testReport.Results, func(tr reports.TestResult) bool { + return !tr.Skipped && tr.PassRatio < maxPassRatio + }) + + if len(flakyTests) > 0 { + fmt.Printf("Found %d flaky tests below the pass ratio threshold of %.2f:\n", len(flakyTests), maxPassRatio) fmt.Printf("\nFlakeguard Summary\n") - reports.RenderResults(os.Stdout, testReport.Results, maxPassRatio, false) + reports.RenderResults(os.Stdout, flakyTests, maxPassRatio, false) + // Exit with error code if there are flaky tests + os.Exit(1) } }, } @@ -107,7 +113,6 @@ func init() { RunTestsCmd.Flags().String("output-json", "", "Path to output the test results in JSON format") RunTestsCmd.Flags().StringSlice("skip-tests", nil, "Comma-separated list of test names to skip from running") RunTestsCmd.Flags().StringSlice("select-tests", nil, "Comma-separated list of test names to specifically run") - RunTestsCmd.Flags().Bool("print-failed-tests", true, "Print failed test results to the console") RunTestsCmd.Flags().Float64("max-pass-ratio", 1.0, "The maximum pass ratio threshold for a test to be considered flaky. Any tests below this pass rate will be considered flaky.") } From fe9728048aeac0d095c52fc1a2f4c247fbcfd27e Mon Sep 17 00:00:00 2001 From: lukaszcl <120112546+lukaszcl@users.noreply.github.com> Date: Fri, 6 Dec 2024 14:47:25 +0100 Subject: [PATCH 12/21] fix --- tools/flakeguard/cmd/run.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/flakeguard/cmd/run.go b/tools/flakeguard/cmd/run.go index f7e1609ee..047e9ff95 100644 --- a/tools/flakeguard/cmd/run.go +++ b/tools/flakeguard/cmd/run.go @@ -94,6 +94,8 @@ var RunTestsCmd = &cobra.Command{ reports.RenderResults(os.Stdout, flakyTests, maxPassRatio, false) // Exit with error code if there are flaky tests os.Exit(1) + } else if len(testReport.Results) == 0 { + fmt.Printf("No tests were run for the specified packages.\n") } }, } From 2185a8f65252e998615d9028aba372fcdf86a090 Mon Sep 17 00:00:00 2001 From: lukaszcl <120112546+lukaszcl@users.noreply.github.com> Date: Fri, 6 Dec 2024 14:47:38 +0100 Subject: [PATCH 13/21] fix run --- tools/flakeguard/cmd/run.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/tools/flakeguard/cmd/run.go b/tools/flakeguard/cmd/run.go index 047e9ff95..9b020abb9 100644 --- a/tools/flakeguard/cmd/run.go +++ b/tools/flakeguard/cmd/run.go @@ -83,6 +83,11 @@ var RunTestsCmd = &cobra.Command{ fmt.Printf("All test results saved to %s\n", outputPath) } + if len(testReport.Results) == 0 { + fmt.Printf("No tests were run for the specified packages.\n") + return + } + // Filter flaky tests using FilterTests flakyTests := reports.FilterTests(testReport.Results, func(tr reports.TestResult) bool { return !tr.Skipped && tr.PassRatio < maxPassRatio @@ -94,8 +99,6 @@ var RunTestsCmd = &cobra.Command{ reports.RenderResults(os.Stdout, flakyTests, maxPassRatio, false) // Exit with error code if there are flaky tests os.Exit(1) - } else if len(testReport.Results) == 0 { - fmt.Printf("No tests were run for the specified packages.\n") } }, } From eb1cdf65f59ee42e45ec752fa62fd402c7c34857 Mon Sep 17 00:00:00 2001 From: lukaszcl <120112546+lukaszcl@users.noreply.github.com> Date: Fri, 6 Dec 2024 16:57:24 +0100 Subject: [PATCH 14/21] Fix pass ratio calculations --- tools/flakeguard/reports/data.go | 66 ++++----- tools/flakeguard/reports/data_test.go | 137 +++++++++++------- tools/flakeguard/reports/presentation.go | 4 +- tools/flakeguard/reports/presentation_test.go | 28 ++-- 4 files changed, 134 insertions(+), 101 deletions(-) diff --git a/tools/flakeguard/reports/data.go b/tools/flakeguard/reports/data.go index 6174b916d..b58caf9f7 100644 --- a/tools/flakeguard/reports/data.go +++ b/tools/flakeguard/reports/data.go @@ -40,30 +40,34 @@ type TestResult struct { } type SummaryData struct { - TotalTests int `json:"total_tests"` - PanickedTests int `json:"panicked_tests"` - RacedTests int `json:"raced_tests"` - FlakyTests int `json:"flaky_tests"` - FlakyTestRatio string `json:"flaky_test_ratio"` - TotalRuns int `json:"total_runs"` - PassedRuns int `json:"passed_runs"` - FailedRuns int `json:"failed_runs"` - SkippedRuns int `json:"skipped_runs"` - PassRatio string `json:"pass_ratio"` - MaxPassRatio float64 `json:"max_pass_ratio"` - AveragePassRatio float64 `json:"average_pass_ratio"` + TotalTests int `json:"total_tests"` + PanickedTests int `json:"panicked_tests"` + RacedTests int `json:"raced_tests"` + FlakyTests int `json:"flaky_tests"` + FlakyTestRatio string `json:"flaky_test_ratio"` + TotalRuns int `json:"total_runs"` + PassedRuns int `json:"passed_runs"` + FailedRuns int `json:"failed_runs"` + SkippedRuns int `json:"skipped_runs"` + PassRatio string `json:"pass_ratio"` + MaxPassRatio float64 `json:"max_pass_ratio"` } // Data Processing Functions func GenerateSummaryData(tests []TestResult, maxPassRatio float64) SummaryData { - var runs, passes, fails, skips, panickedTests, racedTests, flakyTests int + var runs, passes, fails, skips, panickedTests, racedTests, flakyTests, skippedTests int for _, result := range tests { runs += result.Runs passes += result.Successes fails += result.Failures skips += result.Skips + // Count tests that were entirely skipped + if result.Runs == 0 && result.Skipped { + skippedTests++ + } + if result.Panic { panickedTests++ flakyTests++ @@ -71,38 +75,34 @@ func GenerateSummaryData(tests []TestResult, maxPassRatio float64) SummaryData { racedTests++ flakyTests++ } else if !result.Skipped && result.Runs > 0 && result.PassRatio < maxPassRatio { - // Exclude skipped tests and tests with no runs flakyTests++ } } passPercentage := 100.0 flakePercentage := 0.0 - averagePassRatio := 1.0 if runs > 0 { - passPercentage = math.Round((float64(passes)/float64(runs)*100)*100) / 100 - averagePassRatio = float64(passes) / float64(runs) + passPercentage = math.Floor((float64(passes)/float64(runs)*100)*100) / 100 // Truncate to 2 decimal places } - totalTests := len(tests) // Include skipped tests in total tests - totalExecutedTests := totalTests - skips // Tests that were actually executed - if totalExecutedTests > 0 { - flakePercentage = math.Round((float64(flakyTests)/float64(totalExecutedTests)*100)*100) / 100 + + totalTests := len(tests) + if totalTests > 0 { + flakePercentage = math.Floor((float64(flakyTests)/float64(totalTests)*100)*100) / 100 // Truncate to 2 decimal places } return SummaryData{ - TotalTests: totalTests, - PanickedTests: panickedTests, - RacedTests: racedTests, - FlakyTests: flakyTests, - FlakyTestRatio: fmt.Sprintf("%.2f%%", flakePercentage), - TotalRuns: runs, - PassedRuns: passes, - FailedRuns: fails, - SkippedRuns: skips, - PassRatio: fmt.Sprintf("%.2f%%", passPercentage), - MaxPassRatio: maxPassRatio, - AveragePassRatio: averagePassRatio, + TotalTests: totalTests, + PanickedTests: panickedTests, + RacedTests: racedTests, + FlakyTests: flakyTests, + FlakyTestRatio: fmt.Sprintf("%.2f%%", flakePercentage), + TotalRuns: runs, + PassedRuns: passes, + FailedRuns: fails, + SkippedRuns: skips, + PassRatio: fmt.Sprintf("%.2f%%", passPercentage), + MaxPassRatio: maxPassRatio, } } diff --git a/tools/flakeguard/reports/data_test.go b/tools/flakeguard/reports/data_test.go index f556b73c6..5f75ddab3 100644 --- a/tools/flakeguard/reports/data_test.go +++ b/tools/flakeguard/reports/data_test.go @@ -24,18 +24,17 @@ func TestGenerateSummaryData(t *testing.T) { }, maxPassRatio: 1.0, expected: SummaryData{ - TotalTests: 2, - PanickedTests: 0, - RacedTests: 0, - FlakyTests: 0, - FlakyTestRatio: "0.00%", - TotalRuns: 15, - PassedRuns: 15, - FailedRuns: 0, - SkippedRuns: 0, - PassRatio: "100.00%", - MaxPassRatio: 1.0, - AveragePassRatio: 1.0, + TotalTests: 2, + PanickedTests: 0, + RacedTests: 0, + FlakyTests: 0, + FlakyTestRatio: "0.00%", + TotalRuns: 15, + PassedRuns: 15, + FailedRuns: 0, + SkippedRuns: 0, + PassRatio: "100.00%", + MaxPassRatio: 1.0, }, }, { @@ -47,18 +46,17 @@ func TestGenerateSummaryData(t *testing.T) { }, maxPassRatio: 0.9, expected: SummaryData{ - TotalTests: 3, - PanickedTests: 0, - RacedTests: 0, - FlakyTests: 2, - FlakyTestRatio: "66.67%", - TotalRuns: 19, - PassedRuns: 15, - FailedRuns: 4, - SkippedRuns: 0, - PassRatio: "78.95%", - MaxPassRatio: 0.9, - AveragePassRatio: 0.7894736842105263, + TotalTests: 3, + PanickedTests: 0, + RacedTests: 0, + FlakyTests: 2, + FlakyTestRatio: "66.67%", + TotalRuns: 19, + PassedRuns: 15, + FailedRuns: 4, + SkippedRuns: 0, + PassRatio: "78.95%", + MaxPassRatio: 0.9, }, }, { @@ -70,18 +68,17 @@ func TestGenerateSummaryData(t *testing.T) { }, maxPassRatio: 1.0, expected: SummaryData{ - TotalTests: 3, - PanickedTests: 1, - RacedTests: 1, - FlakyTests: 2, - FlakyTestRatio: "66.67%", - TotalRuns: 18, - PassedRuns: 17, - FailedRuns: 1, - SkippedRuns: 0, - PassRatio: "94.44%", - MaxPassRatio: 1.0, - AveragePassRatio: 0.9444444444444444, + TotalTests: 3, + PanickedTests: 1, + RacedTests: 1, + FlakyTests: 2, + FlakyTestRatio: "66.67%", + TotalRuns: 18, + PassedRuns: 17, + FailedRuns: 1, + SkippedRuns: 0, + PassRatio: "94.44%", + MaxPassRatio: 1.0, }, }, { @@ -89,18 +86,60 @@ func TestGenerateSummaryData(t *testing.T) { testResults: []TestResult{}, maxPassRatio: 1.0, expected: SummaryData{ - TotalTests: 0, - PanickedTests: 0, - RacedTests: 0, - FlakyTests: 0, - FlakyTestRatio: "0.00%", - TotalRuns: 0, - PassedRuns: 0, - FailedRuns: 0, - SkippedRuns: 0, - PassRatio: "100.00%", - MaxPassRatio: 1.0, - AveragePassRatio: 1.0, + TotalTests: 0, + PanickedTests: 0, + RacedTests: 0, + FlakyTests: 0, + FlakyTestRatio: "0.00%", + TotalRuns: 0, + PassedRuns: 0, + FailedRuns: 0, + SkippedRuns: 0, + PassRatio: "100.00%", + MaxPassRatio: 1.0, + }, + }, + { + name: "Skipped tests included in total but not executed", + testResults: []TestResult{ + {PassRatio: -1.0, Runs: 0, Successes: 0, Skips: 1, Skipped: true}, + {PassRatio: 0.7, Runs: 10, Successes: 7, Failures: 3}, + }, + maxPassRatio: 0.8, + expected: SummaryData{ + TotalTests: 2, + PanickedTests: 0, + RacedTests: 0, + FlakyTests: 1, + FlakyTestRatio: "50.00%", + TotalRuns: 10, + PassedRuns: 7, + FailedRuns: 3, + SkippedRuns: 1, + PassRatio: "70.00%", + MaxPassRatio: 0.8, + }, + }, + { + name: "Mixed skipped and executed tests", + testResults: []TestResult{ + {PassRatio: -1.0, Runs: 0, Successes: 0, Skips: 1, Skipped: true}, + {PassRatio: 0.9, Runs: 10, Successes: 9, Failures: 1}, + {PassRatio: 0.5, Runs: 4, Successes: 2, Failures: 2}, + }, + maxPassRatio: 0.85, + expected: SummaryData{ + TotalTests: 3, + PanickedTests: 0, + RacedTests: 0, + FlakyTests: 1, + FlakyTestRatio: "33.33%", + TotalRuns: 14, + PassedRuns: 11, + FailedRuns: 3, + SkippedRuns: 1, + PassRatio: "78.57%", + MaxPassRatio: 0.85, }, }, } @@ -109,7 +148,7 @@ func TestGenerateSummaryData(t *testing.T) { t.Run(tc.name, func(t *testing.T) { summary := GenerateSummaryData(tc.testResults, tc.maxPassRatio) if !reflect.DeepEqual(summary, tc.expected) { - t.Errorf("Expected %+v, got %+v", tc.expected, summary) + t.Errorf("Test %s failed. Expected %+v, got %+v", tc.name, tc.expected, summary) } }) } diff --git a/tools/flakeguard/reports/presentation.go b/tools/flakeguard/reports/presentation.go index 8376de614..a10245cd9 100644 --- a/tools/flakeguard/reports/presentation.go +++ b/tools/flakeguard/reports/presentation.go @@ -95,7 +95,7 @@ func GenerateGitHubSummaryMarkdown(w io.Writer, testReport *TestReport, maxPassR } summary := GenerateSummaryData(testReport.Results, maxPassRatio) - if summary.AveragePassRatio < maxPassRatio { + if summary.FlakyTests > 0 { fmt.Fprintln(w, "## Found Flaky Tests :x:") } else { fmt.Fprintln(w, "## No Flakes Found :white_check_mark:") @@ -134,7 +134,7 @@ func GeneratePRCommentMarkdown(w io.Writer, testReport *TestReport, maxPassRatio } // Add the flaky tests section - if GenerateSummaryData(testReport.Results, maxPassRatio).AveragePassRatio < maxPassRatio { + if GenerateSummaryData(testReport.Results, maxPassRatio).FlakyTests > 0 { fmt.Fprintln(w, "## Found Flaky Tests :x:") } else { fmt.Fprintln(w, "## No Flakes Found :white_check_mark:") diff --git a/tools/flakeguard/reports/presentation_test.go b/tools/flakeguard/reports/presentation_test.go index 7dfdace2e..20bb967bd 100644 --- a/tools/flakeguard/reports/presentation_test.go +++ b/tools/flakeguard/reports/presentation_test.go @@ -169,8 +169,6 @@ func TestGeneratePRCommentMarkdown(t *testing.T) { output := buffer.String() - fmt.Println(output) - // Check that the output includes the expected headings and links if !strings.Contains(output, "# Flakeguard Summary") { t.Error("Expected markdown summary to contain '# Flakeguard Summary'") @@ -251,18 +249,17 @@ func TestRenderResults(t *testing.T) { }, maxPassRatio: 0.9, expectedSummary: SummaryData{ - TotalTests: 1, - PanickedTests: 0, - RacedTests: 0, - FlakyTests: 1, - FlakyTestRatio: "100.00%", - TotalRuns: 4, - PassedRuns: 3, - FailedRuns: 1, - SkippedRuns: 0, - PassRatio: "75.00%", - MaxPassRatio: 0.9, - AveragePassRatio: 0.75, + TotalTests: 1, + PanickedTests: 0, + RacedTests: 0, + FlakyTests: 1, + FlakyTestRatio: "100.00%", + TotalRuns: 4, + PassedRuns: 3, + FailedRuns: 1, + SkippedRuns: 0, + PassRatio: "75.00%", + MaxPassRatio: 0.9, }, expectedStringsContain: []string{"Test1", "package1", "75.00%", "false", "1.05s", "4", "0"}, }, @@ -299,9 +296,6 @@ func TestRenderResults(t *testing.T) { if summary.PassRatio != tc.expectedSummary.PassRatio { t.Errorf("Expected PassRatio %v, got %v", tc.expectedSummary.PassRatio, summary.PassRatio) } - if summary.AveragePassRatio != tc.expectedSummary.AveragePassRatio { - t.Errorf("Expected AveragePassRatio %v, got %v", tc.expectedSummary.AveragePassRatio, summary.AveragePassRatio) - } // Verify output content for _, expected := range tc.expectedStringsContain { From 04f007a3d78c45f46674dc01bee83f655c957013 Mon Sep 17 00:00:00 2001 From: lukaszcl <120112546+lukaszcl@users.noreply.github.com> Date: Fri, 6 Dec 2024 17:28:06 +0100 Subject: [PATCH 15/21] Fix panic in flakeguard runner https://github.com/smartcontractkit/chainlink/actions/runs/12200639955/job/34037331531 --- tools/flakeguard/runner/runner.go | 118 +++++++++++++++++------------- 1 file changed, 69 insertions(+), 49 deletions(-) diff --git a/tools/flakeguard/runner/runner.go b/tools/flakeguard/runner/runner.go index 58f5f0431..16a3254d4 100644 --- a/tools/flakeguard/runner/runner.go +++ b/tools/flakeguard/runner/runner.go @@ -56,7 +56,7 @@ func (r *Runner) RunTests() (*reports.TestReport, error) { r.rawOutputs[p] = &bytes.Buffer{} } separator := strings.Repeat("-", 80) - r.rawOutputs[p].WriteString(fmt.Sprintf("Run %d%s\n", i+1, separator)) + r.rawOutputs[p].WriteString(fmt.Sprintf("Run %d\n%s\n", i+1, separator)) } jsonFilePath, passed, err := r.runTests(p) if err != nil { @@ -83,7 +83,7 @@ func (r *Runner) RunTests() (*reports.TestReport, error) { }, nil } -// RawOutput retrieves the raw output from the test runs, if CollectRawOutput enabled. +// RawOutputs retrieves the raw output from the test runs, if CollectRawOutput enabled. // packageName : raw output func (r *Runner) RawOutputs() map[string]*bytes.Buffer { return r.rawOutputs @@ -120,7 +120,6 @@ func (r *Runner) runTests(packageName string) (string, bool, error) { selectPattern := strings.Join(r.SelectTests, "$|^") args = append(args, fmt.Sprintf("-run=^%s$", selectPattern)) } - args = append(args, "2>/dev/null") if r.Verbose { log.Printf("Running command: go %s\n", strings.Join(args, " ")) @@ -249,7 +248,6 @@ func parseTestResults(expectedRuns int, filePaths []string) ([]reports.TestResul result = testDetails[key] } - // TODO: This is a bit of a logical mess, probably worth a refactor if entryLine.Output != "" { if panicDetectionMode || raceDetectionMode { // currently collecting panic or race output detectedEntries = append(detectedEntries, entryLine) @@ -281,22 +279,32 @@ func parseTestResults(expectedRuns int, filePaths []string) ([]reports.TestResul return nil, err } panicTestKey := fmt.Sprintf("%s/%s", entryLine.Package, panicTest) - testDetails[panicTestKey].Panic = true - testDetails[panicTestKey].Timeout = timeout - testDetails[panicTestKey].Failures++ - testDetails[panicTestKey].Runs++ - // TODO: durations and panics are weird in the same way as Runs: lots of double-counting - // duration, err := time.ParseDuration(strconv.FormatFloat(entryLine.Elapsed, 'f', -1, 64) + "s") - // if err != nil { - // return nil, fmt.Errorf("failed to parse duration: %w", err) - // } - // testDetails[panicTestKey].Durations = append(testDetails[panicTestKey].Durations, duration) - testDetails[panicTestKey].Outputs = append(testDetails[panicTestKey].Outputs, entryLine.Output) + + // Ensure the test exists in testDetails + result, exists := testDetails[panicTestKey] + if !exists { + // Create a new TestResult if it doesn't exist + result = &reports.TestResult{ + TestName: panicTest, + TestPackage: entryLine.Package, + PassRatio: 0, + Outputs: []string{}, + PackageOutputs: []string{}, + } + testDetails[panicTestKey] = result + } + + result.Panic = true + result.Timeout = timeout + result.Failures++ + result.Runs++ + + // Handle outputs for _, entry := range detectedEntries { if entry.Test == "" { - testDetails[panicTestKey].PackageOutputs = append(testDetails[panicTestKey].PackageOutputs, entry.Output) + result.PackageOutputs = append(result.PackageOutputs, entry.Output) } else { - testDetails[panicTestKey].Outputs = append(testDetails[panicTestKey].Outputs, entry.Output) + result.Outputs = append(result.Outputs, entry.Output) } } } else if raceDetectionMode { @@ -305,21 +313,31 @@ func parseTestResults(expectedRuns int, filePaths []string) ([]reports.TestResul return nil, err } raceTestKey := fmt.Sprintf("%s/%s", entryLine.Package, raceTest) - testDetails[raceTestKey].Race = true - testDetails[raceTestKey].Failures++ - testDetails[raceTestKey].Runs++ - // TODO: durations and races are weird in the same way as Runs: lots of double-counting - // duration, err := time.ParseDuration(strconv.FormatFloat(entryLine.Elapsed, 'f', -1, 64) + "s") - // if err != nil { - // return nil, fmt.Errorf("failed to parse duration: %w", err) - // } - // testDetails[raceTestKey].Durations = append(testDetails[raceTestKey].Durations, duration) - testDetails[raceTestKey].Outputs = append(testDetails[raceTestKey].Outputs, entryLine.Output) + + // Ensure the test exists in testDetails + result, exists := testDetails[raceTestKey] + if !exists { + // Create a new TestResult if it doesn't exist + result = &reports.TestResult{ + TestName: raceTest, + TestPackage: entryLine.Package, + PassRatio: 0, + Outputs: []string{}, + PackageOutputs: []string{}, + } + testDetails[raceTestKey] = result + } + + result.Race = true + result.Failures++ + result.Runs++ + + // Handle outputs for _, entry := range detectedEntries { if entry.Test == "" { - testDetails[raceTestKey].PackageOutputs = append(testDetails[raceTestKey].PackageOutputs, entry.Output) + result.PackageOutputs = append(result.PackageOutputs, entry.Output) } else { - testDetails[raceTestKey].Outputs = append(testDetails[raceTestKey].Outputs, entry.Output) + result.Outputs = append(result.Outputs, entry.Output) } } } @@ -383,28 +401,28 @@ func parseTestResults(expectedRuns int, filePaths []string) ([]reports.TestResul if parentTestResult, exists := testDetails[parentTestKey]; exists { if parentTestResult.Panic { for _, subTest := range subTests { - subTestKey := fmt.Sprintf("%s/%s", parentTestKey, subTest) - if _, exists := testDetails[subTestKey]; exists { - if testDetails[subTestKey].Failures > 0 { // If the parent test panicked, any of its subtests that failed could be the culprit - testDetails[subTestKey].Panic = true - testDetails[subTestKey].Outputs = append(testDetails[subTestKey].Outputs, "Panic in parent test") + subTestKey := fmt.Sprintf("%s/%s", parentTestResult.TestPackage, subTest) + if subTestResult, exists := testDetails[subTestKey]; exists { + if subTestResult.Failures > 0 { // If the parent test panicked, any of its subtests that failed could be the culprit + subTestResult.Panic = true + subTestResult.Outputs = append(subTestResult.Outputs, "Panic in parent test") } } else { - fmt.Printf("WARN: expected to fine subtest '%s' inside parent test '%s', but not found\n", parentTestKey, subTestKey) + log.Printf("WARN: expected to find subtest '%s' inside parent test '%s', but not found\n", subTestKey, parentTestKey) } } } } else { - fmt.Printf("WARN: expected to find parent test '%s' for sub tests, but not found\n", parentTestKey) + log.Printf("WARN: expected to find parent test '%s' for subtests, but not found\n", parentTestKey) } } for _, result := range testDetails { - if result.Runs > expectedRuns { // Panics can introduce double-counting test failures, this is a hacky correction for it + if result.Runs > expectedRuns { // Panics can introduce double-counting test failures, this is a correction for it if result.Panic { result.Failures = expectedRuns result.Runs = expectedRuns } else { - fmt.Printf("WARN: '%s' has %d test runs, exceeding expected amount of %d in an unexpected way, this may be due to oddly presented panics\n", result.TestName, result.Runs, expectedRuns) + log.Printf("WARN: '%s' has %d test runs, exceeding expected amount of %d; this may be due to unexpected panics\n", result.TestName, result.Runs, expectedRuns) } } // If a package panicked, all tests in that package will be marked as panicking @@ -420,8 +438,7 @@ func parseTestResults(expectedRuns int, filePaths []string) ([]reports.TestResul return results, nil } -// properly attributes panics to the test that caused them -// Go JSON output gets confused, especially when tests are run in parallel +// attributePanicToTest properly attributes panics to the test that caused them. func attributePanicToTest(panicPackage string, panicEntries []entry) (test string, timeout bool, err error) { regexSanitizePanicPackage := filepath.Base(panicPackage) panicAttributionRe := regexp.MustCompile(fmt.Sprintf(`%s\.(Test[^\.\(]+)`, regexSanitizePanicPackage)) @@ -430,17 +447,18 @@ func attributePanicToTest(panicPackage string, panicEntries []entry) (test strin for _, entry := range panicEntries { entriesOutputs = append(entriesOutputs, entry.Output) if matches := panicAttributionRe.FindStringSubmatch(entry.Output); len(matches) > 1 { - return matches[1], false, nil + testName := strings.TrimSpace(matches[1]) + return testName, false, nil } if matches := timeoutAttributionRe.FindStringSubmatch(entry.Output); len(matches) > 1 { - return matches[1], true, nil + testName := strings.TrimSpace(matches[1]) + return testName, true, nil } } return "", false, fmt.Errorf("failed to attribute panic to test, using regex %s on these strings:\n%s", panicAttributionRe.String(), strings.Join(entriesOutputs, "")) } -// properly attributes races to the test that caused them -// Go JSON output gets confused, especially when tests are run in parallel +// attributeRaceToTest properly attributes races to the test that caused them. func attributeRaceToTest(racePackage string, raceEntries []entry) (string, error) { regexSanitizeRacePackage := filepath.Base(racePackage) raceAttributionRe := regexp.MustCompile(fmt.Sprintf(`%s\.(Test[^\.\(]+)`, regexSanitizeRacePackage)) @@ -448,13 +466,14 @@ func attributeRaceToTest(racePackage string, raceEntries []entry) (string, error for _, entry := range raceEntries { entriesOutputs = append(entriesOutputs, entry.Output) if matches := raceAttributionRe.FindStringSubmatch(entry.Output); len(matches) > 1 { - return matches[1], nil + testName := strings.TrimSpace(matches[1]) + return testName, nil } } return "", fmt.Errorf("failed to attribute race to test, using regex: %s on these strings:\n%s", raceAttributionRe.String(), strings.Join(entriesOutputs, "")) } -// parseSubTest checks if a test name is a subtest and returns the parent and sub names +// parseSubTest checks if a test name is a subtest and returns the parent and sub names. func parseSubTest(testName string) (parentTestName, subTestName string) { parts := strings.SplitN(testName, "/", 2) if len(parts) == 1 { @@ -463,7 +482,7 @@ func parseSubTest(testName string) (parentTestName, subTestName string) { return parts[0], parts[1] } -// prettyProjectPath returns the project path formatted for pretty printing in results +// prettyProjectPath returns the project path formatted for pretty printing in results. func prettyProjectPath(projectPath string) (string, error) { // Walk up the directory structure to find go.mod absPath, err := filepath.Abs(projectPath) @@ -493,8 +512,9 @@ func prettyProjectPath(projectPath string) (string, error) { for _, line := range strings.Split(string(goModData), "\n") { if strings.HasPrefix(line, "module ") { goProject := strings.TrimSpace(strings.TrimPrefix(line, "module ")) - projectPath = strings.TrimPrefix(projectPath, goProject) - return filepath.Join(goProject, projectPath), nil + relativePath := strings.TrimPrefix(projectPath, dir) + relativePath = strings.TrimLeft(relativePath, string(os.PathSeparator)) + return filepath.Join(goProject, relativePath), nil } } From 50b12abdfc8c931381ce443826131f2ecf80a3ca Mon Sep 17 00:00:00 2001 From: lukaszcl <120112546+lukaszcl@users.noreply.github.com> Date: Fri, 6 Dec 2024 17:45:51 +0100 Subject: [PATCH 16/21] fix tests --- tools/flakeguard/reports/data_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/flakeguard/reports/data_test.go b/tools/flakeguard/reports/data_test.go index 5f75ddab3..c9960b8de 100644 --- a/tools/flakeguard/reports/data_test.go +++ b/tools/flakeguard/reports/data_test.go @@ -50,12 +50,12 @@ func TestGenerateSummaryData(t *testing.T) { PanickedTests: 0, RacedTests: 0, FlakyTests: 2, - FlakyTestRatio: "66.67%", + FlakyTestRatio: "66.66%", TotalRuns: 19, PassedRuns: 15, FailedRuns: 4, SkippedRuns: 0, - PassRatio: "78.95%", + PassRatio: "78.94%", MaxPassRatio: 0.9, }, }, @@ -72,7 +72,7 @@ func TestGenerateSummaryData(t *testing.T) { PanickedTests: 1, RacedTests: 1, FlakyTests: 2, - FlakyTestRatio: "66.67%", + FlakyTestRatio: "66.66%", TotalRuns: 18, PassedRuns: 17, FailedRuns: 1, From 084ceff0f5d939438f850c5f9bbb2d6d37400f1c Mon Sep 17 00:00:00 2001 From: lukaszcl <120112546+lukaszcl@users.noreply.github.com> Date: Fri, 6 Dec 2024 17:52:13 +0100 Subject: [PATCH 17/21] Fix parse --- tools/flakeguard/runner/runner.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tools/flakeguard/runner/runner.go b/tools/flakeguard/runner/runner.go index 16a3254d4..c8369e9e8 100644 --- a/tools/flakeguard/runner/runner.go +++ b/tools/flakeguard/runner/runner.go @@ -401,9 +401,10 @@ func parseTestResults(expectedRuns int, filePaths []string) ([]reports.TestResul if parentTestResult, exists := testDetails[parentTestKey]; exists { if parentTestResult.Panic { for _, subTest := range subTests { - subTestKey := fmt.Sprintf("%s/%s", parentTestResult.TestPackage, subTest) + // Include parent test name in subTestKey + subTestKey := fmt.Sprintf("%s/%s/%s", parentTestResult.TestPackage, parentTestResult.TestName, subTest) if subTestResult, exists := testDetails[subTestKey]; exists { - if subTestResult.Failures > 0 { // If the parent test panicked, any of its subtests that failed could be the culprit + if subTestResult.Failures > 0 { subTestResult.Panic = true subTestResult.Outputs = append(subTestResult.Outputs, "Panic in parent test") } From 1540a1b52ecbac49717a8d948863f9f9327d70c4 Mon Sep 17 00:00:00 2001 From: lukaszcl <120112546+lukaszcl@users.noreply.github.com> Date: Fri, 6 Dec 2024 19:53:22 +0100 Subject: [PATCH 18/21] Fix missing outputs --- tools/flakeguard/cmd/report.go | 110 +++++++++-------------- tools/flakeguard/reports/data_test.go | 122 ++++++++++++++++++++++++++ 2 files changed, 164 insertions(+), 68 deletions(-) diff --git a/tools/flakeguard/cmd/report.go b/tools/flakeguard/cmd/report.go index 1273d9566..aea04af62 100644 --- a/tools/flakeguard/cmd/report.go +++ b/tools/flakeguard/cmd/report.go @@ -83,19 +83,51 @@ var ReportCmd = &cobra.Command{ fmt.Println("Test results mapped to code owners successfully.") } - // Exclude outputs and package outputs from the aggregated report of all tests - for i := range aggregatedReport.Results { - aggregatedReport.Results[i].Outputs = nil - aggregatedReport.Results[i].PackageOutputs = nil - } - // Create output directory if it doesn't exist outputDir := reportOutputPath if err := fs.MkdirAll(outputDir, 0755); err != nil { return fmt.Errorf("error creating output directory: %w", err) } - // Save the aggregated report (all tests) + // Filter failed tests (PassRatio < maxPassRatio and not skipped) + s = spinner.New(spinner.CharSets[11], 100*time.Millisecond) + s.Suffix = " Filtering failed tests..." + s.Start() + + failedTests := reports.FilterTests(aggregatedReport.Results, func(tr reports.TestResult) bool { + return !tr.Skipped && tr.PassRatio < reportMaxPassRatio + }) + s.Stop() + fmt.Println("Failed tests filtered successfully.") + + // Create a new report for failed tests with logs + failedReportWithLogs := &reports.TestReport{ + GoProject: aggregatedReport.GoProject, + TestRunCount: aggregatedReport.TestRunCount, + RaceDetection: aggregatedReport.RaceDetection, + ExcludedTests: aggregatedReport.ExcludedTests, + SelectedTests: aggregatedReport.SelectedTests, + Results: failedTests, + } + + // Save the failed tests report with logs + failedTestsReportWithLogsPath := filepath.Join(outputDir, "failed-test-results-with-logs.json") + if err := reports.SaveReport(fs, failedTestsReportWithLogsPath, *failedReportWithLogs); err != nil { + return fmt.Errorf("error saving failed tests report with logs: %w", err) + } + fmt.Printf("Failed tests report with logs saved to %s\n", failedTestsReportWithLogsPath) + + // Set Outputs and PackageOutputs to nil for reports without logs + for i := range aggregatedReport.Results { + aggregatedReport.Results[i].Outputs = nil + aggregatedReport.Results[i].PackageOutputs = nil + } + for i := range failedTests { + failedTests[i].Outputs = nil + failedTests[i].PackageOutputs = nil + } + + // Save the aggregated report (all tests) without logs allTestsReportPath := filepath.Join(outputDir, "all-test-results.json") if err := reports.SaveReport(fs, allTestsReportPath, *aggregatedReport); err != nil { return fmt.Errorf("error saving all tests report: %w", err) @@ -168,18 +200,7 @@ var ReportCmd = &cobra.Command{ fmt.Println("PR comment markdown generated successfully.") } - // Filter failed tests (PassRatio < maxPassRatio and not skipped) - s = spinner.New(spinner.CharSets[11], 100*time.Millisecond) - s.Suffix = " Filtering failed tests..." - s.Start() - - failedTests := reports.FilterTests(aggregatedReport.Results, func(tr reports.TestResult) bool { - return !tr.Skipped && tr.PassRatio < reportMaxPassRatio - }) - s.Stop() - fmt.Println("Failed tests filtered successfully.") - - // Create a new report for failed tests + // Create a new report for failed tests without logs failedReportNoLogs := &reports.TestReport{ GoProject: aggregatedReport.GoProject, TestRunCount: aggregatedReport.TestRunCount, @@ -189,37 +210,13 @@ var ReportCmd = &cobra.Command{ Results: failedTests, } - // Save the failed tests report with no logs + // Save the failed tests report without logs failedTestsReportNoLogsPath := filepath.Join(outputDir, "failed-test-results.json") if err := reports.SaveReport(fs, failedTestsReportNoLogsPath, *failedReportNoLogs); err != nil { - return fmt.Errorf("error saving failed tests report: %w", err) + return fmt.Errorf("error saving failed tests report without logs: %w", err) } fmt.Printf("Failed tests report without logs saved to %s\n", failedTestsReportNoLogsPath) - // Retrieve outputs and package outputs for failed tests - for i := range failedTests { - // Retrieve outputs and package outputs from original reports - failedTests[i].Outputs = getOriginalOutputs(testReports, failedTests[i].TestName, failedTests[i].TestPackage) - failedTests[i].PackageOutputs = getOriginalPackageOutputs(testReports, failedTests[i].TestName, failedTests[i].TestPackage) - } - - // Create a new report for failed tests - failedReportWithLogs := &reports.TestReport{ - GoProject: aggregatedReport.GoProject, - TestRunCount: aggregatedReport.TestRunCount, - RaceDetection: aggregatedReport.RaceDetection, - ExcludedTests: aggregatedReport.ExcludedTests, - SelectedTests: aggregatedReport.SelectedTests, - Results: failedTests, - } - - // Save the failed tests report - failedTestsReportWithLogsPath := filepath.Join(outputDir, "failed-test-results-with-logs.json") - if err := reports.SaveReport(fs, failedTestsReportWithLogsPath, *failedReportWithLogs); err != nil { - return fmt.Errorf("error saving failed tests report: %w", err) - } - fmt.Printf("Failed tests report with logs saved to %s\n", failedTestsReportWithLogsPath) - fmt.Printf("Reports generated at: %s\n", reportOutputPath) return nil @@ -288,26 +285,3 @@ func generateAllTestsSummaryJSON(report *reports.TestReport, outputPath string, return nil } - -// Helper functions to retrieve original outputs and package outputs -func getOriginalOutputs(reports []*reports.TestReport, testName, testPackage string) []string { - for _, report := range reports { - for _, result := range report.Results { - if result.TestName == testName && result.TestPackage == testPackage { - return result.Outputs - } - } - } - return nil -} - -func getOriginalPackageOutputs(reports []*reports.TestReport, testName, testPackage string) []string { - for _, report := range reports { - for _, result := range report.Results { - if result.TestName == testName && result.TestPackage == testPackage { - return result.PackageOutputs - } - } - } - return nil -} diff --git a/tools/flakeguard/reports/data_test.go b/tools/flakeguard/reports/data_test.go index c9960b8de..8ae69621e 100644 --- a/tools/flakeguard/reports/data_test.go +++ b/tools/flakeguard/reports/data_test.go @@ -340,6 +340,128 @@ func TestAggregate(t *testing.T) { } } +func TestAggregateOutputs(t *testing.T) { + report1 := &TestReport{ + GoProject: "ProjectX", + TestRunCount: 1, + Results: []TestResult{ + { + TestName: "TestOutput", + TestPackage: "pkg1", + Runs: 1, + Successes: 1, + Outputs: []string{"Output from report1 test run"}, + PackageOutputs: []string{"Package output from report1"}, + }, + }, + } + + report2 := &TestReport{ + GoProject: "ProjectX", + TestRunCount: 1, + Results: []TestResult{ + { + TestName: "TestOutput", + TestPackage: "pkg1", + Runs: 1, + Successes: 1, + Outputs: []string{"Output from report2 test run"}, + PackageOutputs: []string{"Package output from report2"}, + }, + }, + } + + aggregatedReport, err := Aggregate(report1, report2) + if err != nil { + t.Fatalf("Error aggregating reports: %v", err) + } + + if len(aggregatedReport.Results) != 1 { + t.Fatalf("Expected 1 result, got %d", len(aggregatedReport.Results)) + } + + result := aggregatedReport.Results[0] + + // Expected outputs + expectedOutputs := []string{ + "Output from report1 test run", + "Output from report2 test run", + } + expectedPackageOutputs := []string{ + "Package output from report1", + "Package output from report2", + } + + if !reflect.DeepEqual(result.Outputs, expectedOutputs) { + t.Errorf("Expected Outputs %v, got %v", expectedOutputs, result.Outputs) + } + + if !reflect.DeepEqual(result.PackageOutputs, expectedPackageOutputs) { + t.Errorf("Expected PackageOutputs %v, got %v", expectedPackageOutputs, result.PackageOutputs) + } +} + +func TestAggregateIdenticalOutputs(t *testing.T) { + report1 := &TestReport{ + GoProject: "ProjectX", + TestRunCount: 1, + Results: []TestResult{ + { + TestName: "TestIdenticalOutput", + TestPackage: "pkg1", + Runs: 1, + Successes: 1, + Outputs: []string{"Identical output"}, + PackageOutputs: []string{"Identical package output"}, + }, + }, + } + + report2 := &TestReport{ + GoProject: "ProjectX", + TestRunCount: 1, + Results: []TestResult{ + { + TestName: "TestIdenticalOutput", + TestPackage: "pkg1", + Runs: 1, + Successes: 1, + Outputs: []string{"Identical output"}, + PackageOutputs: []string{"Identical package output"}, + }, + }, + } + + aggregatedReport, err := Aggregate(report1, report2) + if err != nil { + t.Fatalf("Error aggregating reports: %v", err) + } + + if len(aggregatedReport.Results) != 1 { + t.Fatalf("Expected 1 result, got %d", len(aggregatedReport.Results)) + } + + result := aggregatedReport.Results[0] + + // Expected outputs + expectedOutputs := []string{ + "Identical output", + "Identical output", + } + expectedPackageOutputs := []string{ + "Identical package output", + "Identical package output", + } + + if !reflect.DeepEqual(result.Outputs, expectedOutputs) { + t.Errorf("Expected Outputs %v, got %v", expectedOutputs, result.Outputs) + } + + if !reflect.DeepEqual(result.PackageOutputs, expectedPackageOutputs) { + t.Errorf("Expected PackageOutputs %v, got %v", expectedPackageOutputs, result.PackageOutputs) + } +} + // TestMergeTestResults tests the mergeTestResults function. func TestMergeTestResults(t *testing.T) { a := TestResult{ From 66533339e44b0fe816a880d97e259682819d4668 Mon Sep 17 00:00:00 2001 From: lukaszcl <120112546+lukaszcl@users.noreply.github.com> Date: Mon, 9 Dec 2024 12:41:52 +0100 Subject: [PATCH 19/21] Split into 2 commands to get links to artifacts in markdown --- tools/flakeguard/cmd/aggregate_results.go | 198 ++++++++++++ tools/flakeguard/cmd/generate_report.go | 288 ++++++++++++++++++ tools/flakeguard/cmd/report.go | 287 ----------------- tools/flakeguard/go.mod | 5 +- tools/flakeguard/go.sum | 10 + tools/flakeguard/main.go | 3 +- tools/flakeguard/reports/presentation.go | 22 +- tools/flakeguard/reports/presentation_test.go | 4 +- 8 files changed, 524 insertions(+), 293 deletions(-) create mode 100644 tools/flakeguard/cmd/aggregate_results.go create mode 100644 tools/flakeguard/cmd/generate_report.go delete mode 100644 tools/flakeguard/cmd/report.go diff --git a/tools/flakeguard/cmd/aggregate_results.go b/tools/flakeguard/cmd/aggregate_results.go new file mode 100644 index 000000000..cdf3d695f --- /dev/null +++ b/tools/flakeguard/cmd/aggregate_results.go @@ -0,0 +1,198 @@ +package cmd + +import ( + "encoding/json" + "fmt" + "path/filepath" + "time" + + "github.com/briandowns/spinner" + "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/reports" + "github.com/spf13/cobra" +) + +var AggregateResultsCmd = &cobra.Command{ + Use: "aggregate-results", + Short: "Aggregate test results into a single JSON report", + RunE: func(cmd *cobra.Command, args []string) error { + fs := reports.OSFileSystem{} + + // Get flag values + resultsPath, _ := cmd.Flags().GetString("results-path") + outputDir, _ := cmd.Flags().GetString("output-path") + summaryFileName, _ := cmd.Flags().GetString("summary-file-name") + maxPassRatio, _ := cmd.Flags().GetFloat64("max-pass-ratio") + codeOwnersPath, _ := cmd.Flags().GetString("codeowners-path") + repoPath, _ := cmd.Flags().GetString("repo-path") + + // Ensure the output directory exists + if err := fs.MkdirAll(outputDir, 0755); err != nil { + return fmt.Errorf("error creating output directory: %w", err) + } + + // Start spinner for loading test reports + s := spinner.New(spinner.CharSets[11], 100*time.Millisecond) + s.Suffix = " Loading test reports..." + s.Start() + + // Load test reports from JSON files + testReports, err := reports.LoadReports(resultsPath) + if err != nil { + s.Stop() + return fmt.Errorf("error loading test reports: %w", err) + } + s.Stop() + fmt.Println("Test reports loaded successfully.") + + // Start spinner for aggregating reports + s = spinner.New(spinner.CharSets[11], 100*time.Millisecond) + s.Suffix = " Aggregating test reports..." + s.Start() + + // Aggregate the reports + aggregatedReport, err := reports.Aggregate(testReports...) + if err != nil { + s.Stop() + return fmt.Errorf("error aggregating test reports: %w", err) + } + s.Stop() + fmt.Println("Test reports aggregated successfully.") + + // Start spinner for mapping test results to paths + s = spinner.New(spinner.CharSets[11], 100*time.Millisecond) + s.Suffix = " Mapping test results to paths..." + s.Start() + + // Map test results to test paths + err = reports.MapTestResultsToPaths(aggregatedReport, repoPath) + if err != nil { + s.Stop() + return fmt.Errorf("error mapping test results to paths: %w", err) + } + s.Stop() + fmt.Println("Test results mapped to paths successfully.") + + // Map test results to code owners if codeOwnersPath is provided + if codeOwnersPath != "" { + s = spinner.New(spinner.CharSets[11], 100*time.Millisecond) + s.Suffix = " Mapping test results to code owners..." + s.Start() + + err = reports.MapTestResultsToOwners(aggregatedReport, codeOwnersPath) + if err != nil { + s.Stop() + return fmt.Errorf("error mapping test results to code owners: %w", err) + } + s.Stop() + fmt.Println("Test results mapped to code owners successfully.") + } + + // Save the aggregated report to the output directory + aggregatedReportPath := filepath.Join(outputDir, "all-test-results.json") + if err := reports.SaveReport(fs, aggregatedReportPath, *aggregatedReport); err != nil { + return fmt.Errorf("error saving aggregated test report: %w", err) + } + fmt.Printf("Aggregated test report saved to %s\n", aggregatedReportPath) + + // Filter failed tests (PassRatio < maxPassRatio and not skipped) + s = spinner.New(spinner.CharSets[11], 100*time.Millisecond) + s.Suffix = " Filtering failed tests..." + s.Start() + + failedTests := reports.FilterTests(aggregatedReport.Results, func(tr reports.TestResult) bool { + return !tr.Skipped && tr.PassRatio < maxPassRatio + }) + s.Stop() + + // Check if there are any failed tests + if len(failedTests) > 0 { + fmt.Printf("Found %d failed test(s).\n", len(failedTests)) + + // Create a new report for failed tests with logs + failedReportWithLogs := &reports.TestReport{ + GoProject: aggregatedReport.GoProject, + TestRunCount: aggregatedReport.TestRunCount, + RaceDetection: aggregatedReport.RaceDetection, + ExcludedTests: aggregatedReport.ExcludedTests, + SelectedTests: aggregatedReport.SelectedTests, + Results: failedTests, + } + + // Save the failed tests report with logs + failedTestsReportWithLogsPath := filepath.Join(outputDir, "failed-test-results-with-logs.json") + if err := reports.SaveReport(fs, failedTestsReportWithLogsPath, *failedReportWithLogs); err != nil { + return fmt.Errorf("error saving failed tests report with logs: %w", err) + } + fmt.Printf("Failed tests report with logs saved to %s\n", failedTestsReportWithLogsPath) + + // Remove logs from test results for the report without logs + for i := range failedReportWithLogs.Results { + failedReportWithLogs.Results[i].Outputs = nil + failedReportWithLogs.Results[i].PackageOutputs = nil + } + + // Save the failed tests report without logs + failedTestsReportNoLogsPath := filepath.Join(outputDir, "failed-test-results.json") + if err := reports.SaveReport(fs, failedTestsReportNoLogsPath, *failedReportWithLogs); err != nil { + return fmt.Errorf("error saving failed tests report without logs: %w", err) + } + fmt.Printf("Failed tests report without logs saved to %s\n", failedTestsReportNoLogsPath) + } else { + fmt.Println("No failed tests found. Skipping generation of failed tests reports.") + } + + // Generate all-tests-summary.json + if summaryFileName != "" { + s = spinner.New(spinner.CharSets[11], 100*time.Millisecond) + s.Suffix = " Generating summary json..." + s.Start() + + summaryFilePath := filepath.Join(outputDir, summaryFileName) + err = generateAllTestsSummaryJSON(aggregatedReport, summaryFilePath, maxPassRatio) + if err != nil { + s.Stop() + return fmt.Errorf("error generating summary json: %w", err) + } + s.Stop() + fmt.Printf("Summary generated at %s\n", summaryFilePath) + } + + fmt.Println("Aggregation complete.") + + return nil + }, +} + +func init() { + AggregateResultsCmd.Flags().StringP("results-path", "p", "", "Path to the folder containing JSON test result files (required)") + AggregateResultsCmd.Flags().StringP("output-path", "o", "./report", "Path to output the aggregated results (directory)") + AggregateResultsCmd.Flags().StringP("summary-file-name", "s", "all-test-summary.json", "Name of the summary JSON file") + AggregateResultsCmd.Flags().Float64P("max-pass-ratio", "", 1.0, "The maximum pass ratio threshold for a test to be considered flaky") + AggregateResultsCmd.Flags().StringP("codeowners-path", "", "", "Path to the CODEOWNERS file") + AggregateResultsCmd.Flags().StringP("repo-path", "", ".", "The path to the root of the repository/project") + + AggregateResultsCmd.MarkFlagRequired("results-path") +} + +// New function to generate all-tests-summary.json +func generateAllTestsSummaryJSON(report *reports.TestReport, outputPath string, maxPassRatio float64) error { + summary := reports.GenerateSummaryData(report.Results, maxPassRatio) + data, err := json.Marshal(summary) + if err != nil { + return fmt.Errorf("error marshaling summary data to JSON: %w", err) + } + + fs := reports.OSFileSystem{} + jsonFile, err := fs.Create(outputPath) + if err != nil { + return fmt.Errorf("error creating file: %w", err) + } + defer jsonFile.Close() + + _, err = jsonFile.Write(data) + if err != nil { + return fmt.Errorf("error writing data to file: %w", err) + } + + return nil +} diff --git a/tools/flakeguard/cmd/generate_report.go b/tools/flakeguard/cmd/generate_report.go new file mode 100644 index 000000000..cf94458e2 --- /dev/null +++ b/tools/flakeguard/cmd/generate_report.go @@ -0,0 +1,288 @@ +package cmd + +import ( + "context" + "encoding/json" + "fmt" + "os" + "path/filepath" + "strings" + "time" + + "github.com/briandowns/spinner" + "github.com/google/go-github/v67/github" + "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/reports" + "github.com/spf13/cobra" + "golang.org/x/oauth2" +) + +type SummaryData struct { + TotalTests int `json:"total_tests"` + PanickedTests int `json:"panicked_tests"` + RacedTests int `json:"raced_tests"` + FlakyTests int `json:"flaky_tests"` + FlakyTestRatio string `json:"flaky_test_ratio"` + TotalRuns int `json:"total_runs"` + PassedRuns int `json:"passed_runs"` + FailedRuns int `json:"failed_runs"` + SkippedRuns int `json:"skipped_runs"` + PassRatio string `json:"pass_ratio"` + MaxPassRatio float64 `json:"max_pass_ratio"` +} + +var GenerateReportCmd = &cobra.Command{ + Use: "generate-report", + Short: "Generate reports from an aggregated test results", + RunE: func(cmd *cobra.Command, args []string) error { + fs := reports.OSFileSystem{} + + // Get flag values + aggregatedResultsPath, _ := cmd.Flags().GetString("aggregated-results-path") + summaryPath, _ := cmd.Flags().GetString("summary-path") + outputDir, _ := cmd.Flags().GetString("output-path") + maxPassRatio, _ := cmd.Flags().GetFloat64("max-pass-ratio") + generatePRComment, _ := cmd.Flags().GetBool("generate-pr-comment") + githubRepo, _ := cmd.Flags().GetString("github-repository") + githubRunID, _ := cmd.Flags().GetInt64("github-run-id") + artifactName, _ := cmd.Flags().GetString("failed-tests-artifact-name") + + // Get the GitHub token from environment variable + githubToken := os.Getenv("GITHUB_TOKEN") + if githubToken == "" { + return fmt.Errorf("GITHUB_TOKEN environment variable is not set") + } + + // Load the aggregated report + s := spinner.New(spinner.CharSets[11], 100*time.Millisecond) + s.Suffix = " Loading aggregated test report..." + s.Start() + + aggregatedReport := &reports.TestReport{} + reportFile, err := os.Open(aggregatedResultsPath) + if err != nil { + s.Stop() + return fmt.Errorf("error opening aggregated test report: %w", err) + } + defer reportFile.Close() + + if err := json.NewDecoder(reportFile).Decode(aggregatedReport); err != nil { + s.Stop() + return fmt.Errorf("error decoding aggregated test report: %w", err) + } + s.Stop() + fmt.Println("Aggregated test report loaded successfully.") + + // Load the summary data to check for failed tests + var summaryData SummaryData + + if summaryPath == "" { + return fmt.Errorf("--summary-path is required") + } + + summaryFile, err := os.Open(summaryPath) + if err != nil { + return fmt.Errorf("error opening summary JSON file: %w", err) + } + defer summaryFile.Close() + + if err := json.NewDecoder(summaryFile).Decode(&summaryData); err != nil { + return fmt.Errorf("error decoding summary JSON file: %w", err) + } + + // Check if there are failed tests + hasFailedTests := summaryData.FailedRuns > 0 + + var artifactLink string + if hasFailedTests { + // Fetch artifact link from GitHub API + artifactLink, err = fetchArtifactLink(githubToken, githubRepo, githubRunID, artifactName) + if err != nil { + return fmt.Errorf("error fetching artifact link: %w", err) + } + } else { + // No failed tests, set artifactLink to empty string + artifactLink = "" + fmt.Println("No failed tests found. Skipping artifact link generation.") + } + + // Create output directory if it doesn't exist + if err := fs.MkdirAll(outputDir, 0755); err != nil { + return fmt.Errorf("error creating output directory: %w", err) + } + + // Generate GitHub summary markdown + s = spinner.New(spinner.CharSets[11], 100*time.Millisecond) + s.Suffix = " Generating GitHub summary markdown..." + s.Start() + + err = generateGitHubSummaryMarkdown(aggregatedReport, filepath.Join(outputDir, "all-test"), artifactLink, artifactName) + if err != nil { + s.Stop() + return fmt.Errorf("error generating GitHub summary markdown: %w", err) + } + s.Stop() + fmt.Println("GitHub summary markdown generated successfully.") + + if generatePRComment { + // Retrieve required flags + currentBranch, _ := cmd.Flags().GetString("current-branch") + currentCommitSHA, _ := cmd.Flags().GetString("current-commit-sha") + baseBranch, _ := cmd.Flags().GetString("base-branch") + repoURL, _ := cmd.Flags().GetString("repo-url") + actionRunID, _ := cmd.Flags().GetString("action-run-id") + + // Validate that required flags are provided + missingFlags := []string{} + if currentBranch == "" { + missingFlags = append(missingFlags, "--current-branch") + } + if currentCommitSHA == "" { + missingFlags = append(missingFlags, "--current-commit-sha") + } + if repoURL == "" { + missingFlags = append(missingFlags, "--repo-url") + } + if actionRunID == "" { + missingFlags = append(missingFlags, "--action-run-id") + } + if len(missingFlags) > 0 { + return fmt.Errorf("the following flags are required when --generate-pr-comment is set: %s", strings.Join(missingFlags, ", ")) + } + + // Generate PR comment markdown + s = spinner.New(spinner.CharSets[11], 100*time.Millisecond) + s.Suffix = " Generating PR comment markdown..." + s.Start() + + err = generatePRCommentMarkdown( + aggregatedReport, + filepath.Join(outputDir, "all-test"), + baseBranch, + currentBranch, + currentCommitSHA, + repoURL, + actionRunID, + artifactName, + artifactLink, + maxPassRatio, + ) + if err != nil { + s.Stop() + return fmt.Errorf("error generating PR comment markdown: %w", err) + } + s.Stop() + fmt.Println("PR comment markdown generated successfully.") + } + + fmt.Printf("Reports generated at: %s\n", outputDir) + + return nil + }, +} + +func init() { + GenerateReportCmd.Flags().StringP("aggregated-results-path", "i", "", "Path to the aggregated JSON report file (required)") + GenerateReportCmd.Flags().StringP("summary-path", "s", "", "Path to the summary JSON file (required)") + GenerateReportCmd.Flags().StringP("output-path", "o", "./report", "Path to output the generated report files") + GenerateReportCmd.Flags().Float64P("max-pass-ratio", "", 1.0, "The maximum pass ratio threshold for a test to be considered flaky") + GenerateReportCmd.Flags().Bool("generate-pr-comment", false, "Set to true to generate PR comment markdown") + GenerateReportCmd.Flags().String("base-branch", "develop", "The base branch to compare against (used in PR comment)") + GenerateReportCmd.Flags().String("current-branch", "", "The current branch name (required if generate-pr-comment is set)") + GenerateReportCmd.Flags().String("current-commit-sha", "", "The current commit SHA (required if generate-pr-comment is set)") + GenerateReportCmd.Flags().String("repo-url", "", "The repository URL (required if generate-pr-comment is set)") + GenerateReportCmd.Flags().String("action-run-id", "", "The GitHub Actions run ID (required if generate-pr-comment is set)") + GenerateReportCmd.Flags().String("github-repository", "", "The GitHub repository in the format owner/repo (required)") + GenerateReportCmd.Flags().Int64("github-run-id", 0, "The GitHub Actions run ID (required)") + GenerateReportCmd.Flags().String("failed-tests-artifact-name", "failed-test-results-with-logs.json", "The name of the failed tests artifact (default 'failed-test-results-with-logs.json')") + + GenerateReportCmd.MarkFlagRequired("aggregated-results-path") + GenerateReportCmd.MarkFlagRequired("summary-path") + GenerateReportCmd.MarkFlagRequired("github-repository") + GenerateReportCmd.MarkFlagRequired("github-run-id") +} + +func fetchArtifactLink(githubToken, githubRepo string, githubRunID int64, artifactName string) (string, error) { + ctx := context.Background() + ts := oauth2.StaticTokenSource( + &oauth2.Token{AccessToken: githubToken}, + ) + tc := oauth2.NewClient(ctx, ts) + client := github.NewClient(tc) + + // Split the repository into owner and repo + repoParts := strings.SplitN(githubRepo, "/", 2) + if len(repoParts) != 2 { + return "", fmt.Errorf("invalid format for --github-repository, expected owner/repo") + } + owner, repo := repoParts[0], repoParts[1] + + // List artifacts for the workflow run + opts := &github.ListOptions{PerPage: 100} + artifacts, _, err := client.Actions.ListWorkflowRunArtifacts(ctx, owner, repo, githubRunID, opts) + if err != nil { + return "", fmt.Errorf("error listing artifacts: %w", err) + } + + // Find the artifact + for _, artifact := range artifacts.Artifacts { + if artifact.GetName() == artifactName { + // Construct the artifact URL using the artifact ID + artifactID := artifact.GetID() + artifactURL := fmt.Sprintf("https://github.com/%s/%s/actions/runs/%d/artifacts/%d", owner, repo, githubRunID, artifactID) + return artifactURL, nil + } + } + + return "", fmt.Errorf("artifact '%s' not found in the workflow run", artifactName) +} + +func generateGitHubSummaryMarkdown(report *reports.TestReport, outputPath, artifactLink, artifactName string) error { + fs := reports.OSFileSystem{} + mdFileName := outputPath + "-summary.md" + mdFile, err := fs.Create(mdFileName) + if err != nil { + return fmt.Errorf("error creating GitHub summary markdown file: %w", err) + } + defer mdFile.Close() + + // Generate the summary markdown + reports.GenerateGitHubSummaryMarkdown(mdFile, report, 1.0, artifactName, artifactLink) + + return nil +} + +func generatePRCommentMarkdown( + report *reports.TestReport, + outputPath, + baseBranch, + currentBranch, + currentCommitSHA, + repoURL, + actionRunID, + artifactName, + artifactLink string, + maxPassRatio float64, +) error { + fs := reports.OSFileSystem{} + mdFileName := outputPath + "-pr-comment.md" + mdFile, err := fs.Create(mdFileName) + if err != nil { + return fmt.Errorf("error creating PR comment markdown file: %w", err) + } + defer mdFile.Close() + + reports.GeneratePRCommentMarkdown( + mdFile, + report, + maxPassRatio, + baseBranch, + currentBranch, + currentCommitSHA, + repoURL, + actionRunID, + artifactName, + artifactLink, + ) + + return nil +} diff --git a/tools/flakeguard/cmd/report.go b/tools/flakeguard/cmd/report.go deleted file mode 100644 index aea04af62..000000000 --- a/tools/flakeguard/cmd/report.go +++ /dev/null @@ -1,287 +0,0 @@ -package cmd - -import ( - "encoding/json" - "fmt" - "path/filepath" - "strings" - "time" - - "github.com/briandowns/spinner" - "github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard/reports" - "github.com/spf13/cobra" -) - -var ReportCmd = &cobra.Command{ - Use: "report", - Short: "Aggregate test results and generate reports", - RunE: func(cmd *cobra.Command, args []string) error { - fs := reports.OSFileSystem{} - - // Get flag values - reportResultsPath, _ := cmd.Flags().GetString("results-path") - reportOutputPath, _ := cmd.Flags().GetString("output-path") - reportMaxPassRatio, _ := cmd.Flags().GetFloat64("max-pass-ratio") - reportCodeOwnersPath, _ := cmd.Flags().GetString("codeowners-path") - reportRepoPath, _ := cmd.Flags().GetString("repo-path") - generatePRComment, _ := cmd.Flags().GetBool("generate-pr-comment") - - // Start spinner for loading test reports - s := spinner.New(spinner.CharSets[11], 100*time.Millisecond) - s.Suffix = " Loading test reports..." - s.Start() - - // Load test reports from JSON files - testReports, err := reports.LoadReports(reportResultsPath) - if err != nil { - s.Stop() - return fmt.Errorf("error loading test reports: %w", err) - } - s.Stop() - fmt.Println("Test reports loaded successfully.") - - // Start spinner for aggregating reports - s = spinner.New(spinner.CharSets[11], 100*time.Millisecond) - s.Suffix = " Aggregating test reports..." - s.Start() - - // Aggregate the reports - aggregatedReport, err := reports.Aggregate(testReports...) - if err != nil { - s.Stop() - return fmt.Errorf("error aggregating test reports: %w", err) - } - s.Stop() - fmt.Println("Test reports aggregated successfully.") - - // Start spinner for mapping test results to paths - s = spinner.New(spinner.CharSets[11], 100*time.Millisecond) - s.Suffix = " Mapping test results to paths..." - s.Start() - - // Map test results to test paths - err = reports.MapTestResultsToPaths(aggregatedReport, reportRepoPath) - if err != nil { - s.Stop() - return fmt.Errorf("error mapping test results to paths: %w", err) - } - s.Stop() - fmt.Println("Test results mapped to paths successfully.") - - // Map test results to code owners if codeOwnersPath is provided - if reportCodeOwnersPath != "" { - s = spinner.New(spinner.CharSets[11], 100*time.Millisecond) - s.Suffix = " Mapping test results to code owners..." - s.Start() - - err = reports.MapTestResultsToOwners(aggregatedReport, reportCodeOwnersPath) - if err != nil { - s.Stop() - return fmt.Errorf("error mapping test results to code owners: %w", err) - } - s.Stop() - fmt.Println("Test results mapped to code owners successfully.") - } - - // Create output directory if it doesn't exist - outputDir := reportOutputPath - if err := fs.MkdirAll(outputDir, 0755); err != nil { - return fmt.Errorf("error creating output directory: %w", err) - } - - // Filter failed tests (PassRatio < maxPassRatio and not skipped) - s = spinner.New(spinner.CharSets[11], 100*time.Millisecond) - s.Suffix = " Filtering failed tests..." - s.Start() - - failedTests := reports.FilterTests(aggregatedReport.Results, func(tr reports.TestResult) bool { - return !tr.Skipped && tr.PassRatio < reportMaxPassRatio - }) - s.Stop() - fmt.Println("Failed tests filtered successfully.") - - // Create a new report for failed tests with logs - failedReportWithLogs := &reports.TestReport{ - GoProject: aggregatedReport.GoProject, - TestRunCount: aggregatedReport.TestRunCount, - RaceDetection: aggregatedReport.RaceDetection, - ExcludedTests: aggregatedReport.ExcludedTests, - SelectedTests: aggregatedReport.SelectedTests, - Results: failedTests, - } - - // Save the failed tests report with logs - failedTestsReportWithLogsPath := filepath.Join(outputDir, "failed-test-results-with-logs.json") - if err := reports.SaveReport(fs, failedTestsReportWithLogsPath, *failedReportWithLogs); err != nil { - return fmt.Errorf("error saving failed tests report with logs: %w", err) - } - fmt.Printf("Failed tests report with logs saved to %s\n", failedTestsReportWithLogsPath) - - // Set Outputs and PackageOutputs to nil for reports without logs - for i := range aggregatedReport.Results { - aggregatedReport.Results[i].Outputs = nil - aggregatedReport.Results[i].PackageOutputs = nil - } - for i := range failedTests { - failedTests[i].Outputs = nil - failedTests[i].PackageOutputs = nil - } - - // Save the aggregated report (all tests) without logs - allTestsReportPath := filepath.Join(outputDir, "all-test-results.json") - if err := reports.SaveReport(fs, allTestsReportPath, *aggregatedReport); err != nil { - return fmt.Errorf("error saving all tests report: %w", err) - } - fmt.Printf("All tests report saved to %s\n", allTestsReportPath) - - // Generate GitHub summary markdown - s = spinner.New(spinner.CharSets[11], 100*time.Millisecond) - s.Suffix = " Generating GitHub summary markdown..." - s.Start() - - err = generateGitHubSummaryMarkdown(aggregatedReport, filepath.Join(outputDir, "all-test")) - if err != nil { - s.Stop() - return fmt.Errorf("error generating GitHub summary markdown: %w", err) - } - s.Stop() - fmt.Println("GitHub summary markdown generated successfully.") - - // Generate all-tests-summary.json - s = spinner.New(spinner.CharSets[11], 100*time.Millisecond) - s.Suffix = " Generating all-test-summary.json..." - s.Start() - - err = generateAllTestsSummaryJSON(aggregatedReport, filepath.Join(outputDir, "all-test-summary.json"), reportMaxPassRatio) - if err != nil { - s.Stop() - return fmt.Errorf("error generating all-test-summary.json: %w", err) - } - s.Stop() - fmt.Println("all-test-summary.json generated successfully.") - - if generatePRComment { - // Retrieve required flags - currentBranch, _ := cmd.Flags().GetString("current-branch") - currentCommitSHA, _ := cmd.Flags().GetString("current-commit-sha") - baseBranch, _ := cmd.Flags().GetString("base-branch") - repoURL, _ := cmd.Flags().GetString("repo-url") - actionRunID, _ := cmd.Flags().GetString("action-run-id") - - // Validate that required flags are provided - missingFlags := []string{} - if currentBranch == "" { - missingFlags = append(missingFlags, "--current-branch") - } - if currentCommitSHA == "" { - missingFlags = append(missingFlags, "--current-commit-sha") - } - if repoURL == "" { - missingFlags = append(missingFlags, "--repo-url") - } - if actionRunID == "" { - missingFlags = append(missingFlags, "--action-run-id") - } - if len(missingFlags) > 0 { - return fmt.Errorf("the following flags are required when --generate-pr-comment is set: %s", strings.Join(missingFlags, ", ")) - } - - // Generate PR comment markdown - s = spinner.New(spinner.CharSets[11], 100*time.Millisecond) - s.Suffix = " Generating PR comment markdown..." - s.Start() - - err = generatePRCommentMarkdown(aggregatedReport, filepath.Join(outputDir, "all-test"), baseBranch, currentBranch, currentCommitSHA, repoURL, actionRunID) - if err != nil { - s.Stop() - return fmt.Errorf("error generating PR comment markdown: %w", err) - } - s.Stop() - fmt.Println("PR comment markdown generated successfully.") - } - - // Create a new report for failed tests without logs - failedReportNoLogs := &reports.TestReport{ - GoProject: aggregatedReport.GoProject, - TestRunCount: aggregatedReport.TestRunCount, - RaceDetection: aggregatedReport.RaceDetection, - ExcludedTests: aggregatedReport.ExcludedTests, - SelectedTests: aggregatedReport.SelectedTests, - Results: failedTests, - } - - // Save the failed tests report without logs - failedTestsReportNoLogsPath := filepath.Join(outputDir, "failed-test-results.json") - if err := reports.SaveReport(fs, failedTestsReportNoLogsPath, *failedReportNoLogs); err != nil { - return fmt.Errorf("error saving failed tests report without logs: %w", err) - } - fmt.Printf("Failed tests report without logs saved to %s\n", failedTestsReportNoLogsPath) - - fmt.Printf("Reports generated at: %s\n", reportOutputPath) - - return nil - }, -} - -func init() { - ReportCmd.Flags().StringP("results-path", "p", "", "Path to the folder containing JSON test result files (required)") - ReportCmd.Flags().StringP("output-path", "o", "./report", "Path to output the generated report files") - ReportCmd.Flags().Float64P("max-pass-ratio", "", 1.0, "The maximum pass ratio threshold for a test to be considered flaky") - ReportCmd.Flags().StringP("codeowners-path", "", "", "Path to the CODEOWNERS file") - ReportCmd.Flags().StringP("repo-path", "", ".", "The path to the root of the repository/project") - ReportCmd.Flags().Bool("generate-pr-comment", false, "Set to true to generate PR comment markdown") - ReportCmd.Flags().String("base-branch", "develop", "The base branch to compare against (used in PR comment)") - ReportCmd.Flags().String("current-branch", "", "The current branch name (required if generate-pr-comment is set)") - ReportCmd.Flags().String("current-commit-sha", "", "The current commit SHA (required if generate-pr-comment is set)") - ReportCmd.Flags().String("repo-url", "", "The repository URL (required if generate-pr-comment is set)") - ReportCmd.Flags().String("action-run-id", "", "The GitHub Actions run ID (required if generate-pr-comment is set)") - - ReportCmd.MarkFlagRequired("results-path") -} - -func generateGitHubSummaryMarkdown(report *reports.TestReport, outputPath string) error { - fs := reports.OSFileSystem{} - mdFileName := outputPath + "-summary.md" - mdFile, err := fs.Create(mdFileName) - if err != nil { - return fmt.Errorf("error creating GitHub summary markdown file: %w", err) - } - defer mdFile.Close() - reports.GenerateGitHubSummaryMarkdown(mdFile, report, 1.0) - return nil -} - -func generatePRCommentMarkdown(report *reports.TestReport, outputPath, baseBranch, currentBranch, currentCommitSHA, repoURL, actionRunID string) error { - fs := reports.OSFileSystem{} - mdFileName := outputPath + "-pr-comment.md" - mdFile, err := fs.Create(mdFileName) - if err != nil { - return fmt.Errorf("error creating PR comment markdown file: %w", err) - } - defer mdFile.Close() - reports.GeneratePRCommentMarkdown(mdFile, report, 1.0, baseBranch, currentBranch, currentCommitSHA, repoURL, actionRunID) - return nil -} - -// New function to generate all-tests-summary.json -func generateAllTestsSummaryJSON(report *reports.TestReport, outputPath string, maxPassRatio float64) error { - summary := reports.GenerateSummaryData(report.Results, maxPassRatio) - data, err := json.Marshal(summary) - if err != nil { - return fmt.Errorf("error marshaling summary data to JSON: %w", err) - } - - fs := reports.OSFileSystem{} - jsonFile, err := fs.Create(outputPath) - if err != nil { - return fmt.Errorf("error creating file: %w", err) - } - defer jsonFile.Close() - - _, err = jsonFile.Write(data) - if err != nil { - return fmt.Errorf("error writing data to file: %w", err) - } - - return nil -} diff --git a/tools/flakeguard/go.mod b/tools/flakeguard/go.mod index b32f49f09..3fe7ca682 100644 --- a/tools/flakeguard/go.mod +++ b/tools/flakeguard/go.mod @@ -3,14 +3,17 @@ module github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard go 1.21.9 require ( + github.com/briandowns/spinner v1.23.1 + github.com/google/go-github/v67 v67.0.0 github.com/spf13/cobra v1.8.1 + golang.org/x/oauth2 v0.24.0 golang.org/x/text v0.20.0 ) require ( - github.com/briandowns/spinner v1.23.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/fatih/color v1.7.0 // indirect + github.com/google/go-querystring v1.1.0 // indirect github.com/mattn/go-colorable v0.1.2 // indirect github.com/mattn/go-isatty v0.0.8 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect diff --git a/tools/flakeguard/go.sum b/tools/flakeguard/go.sum index 958472b10..8aec97935 100644 --- a/tools/flakeguard/go.sum +++ b/tools/flakeguard/go.sum @@ -5,6 +5,13 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-github/v67 v67.0.0 h1:g11NDAmfaBaCO8qYdI9fsmbaRipHNWRIU/2YGvlh4rg= +github.com/google/go-github/v67 v67.0.0/go.mod h1:zH3K7BxjFndr9QSeFibx4lTKkYS3K9nDanoI1NjaOtY= +github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU= @@ -20,6 +27,8 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +golang.org/x/oauth2 v0.24.0 h1:KTBBxWqUa0ykRPLtV69rRto9TLXcqYkeswu48x/gvNE= +golang.org/x/oauth2 v0.24.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad h1:ntjMns5wyP/fN65tdBD4g8J5w8n015+iIIs9rtjXkY0= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -27,6 +36,7 @@ golang.org/x/term v0.1.0 h1:g6Z6vPFA9dYBAF7DWcH6sCcOntplXsDKcliusYijMlw= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= diff --git a/tools/flakeguard/main.go b/tools/flakeguard/main.go index a22b34ff4..1a9ea061f 100644 --- a/tools/flakeguard/main.go +++ b/tools/flakeguard/main.go @@ -28,8 +28,9 @@ func init() { rootCmd.AddCommand(cmd.FindTestsCmd) rootCmd.AddCommand(cmd.RunTestsCmd) - rootCmd.AddCommand(cmd.ReportCmd) rootCmd.AddCommand(cmd.CheckTestOwnersCmd) + rootCmd.AddCommand(cmd.AggregateResultsCmd) + rootCmd.AddCommand(cmd.GenerateReportCmd) } func main() { diff --git a/tools/flakeguard/reports/presentation.go b/tools/flakeguard/reports/presentation.go index a10245cd9..424f6a4f9 100644 --- a/tools/flakeguard/reports/presentation.go +++ b/tools/flakeguard/reports/presentation.go @@ -83,7 +83,7 @@ func formatPassRatio(passRatio float64) string { return fmt.Sprintf("%.2f%%", passRatio*100) } -func GenerateGitHubSummaryMarkdown(w io.Writer, testReport *TestReport, maxPassRatio float64) { +func GenerateGitHubSummaryMarkdown(w io.Writer, testReport *TestReport, maxPassRatio float64, artifactName, artifactLink string) { settingsTable := buildSettingsTable(testReport, maxPassRatio) fmt.Fprint(w, "# Flakeguard Summary\n\n") printTable(w, settingsTable) @@ -102,9 +102,13 @@ func GenerateGitHubSummaryMarkdown(w io.Writer, testReport *TestReport, maxPassR } RenderResults(w, testReport.Results, maxPassRatio, true) + + if artifactLink != "" { + renderArtifactSection(w, artifactName, artifactLink) + } } -func GeneratePRCommentMarkdown(w io.Writer, testReport *TestReport, maxPassRatio float64, baseBranch, currentBranch, currentCommitSHA, repoURL, actionRunID string) { +func GeneratePRCommentMarkdown(w io.Writer, testReport *TestReport, maxPassRatio float64, baseBranch, currentBranch, currentCommitSHA, repoURL, actionRunID, artifactName, artifactLink string) { fmt.Fprint(w, "# Flakeguard Summary\n\n") // Construct additional info @@ -142,6 +146,10 @@ func GeneratePRCommentMarkdown(w io.Writer, testReport *TestReport, maxPassRatio resultsTable := GenerateFlakyTestsTable(testReport.Results, maxPassRatio, true) renderTestResultsTable(w, resultsTable, true) + + if artifactLink != "" { + renderArtifactSection(w, artifactName, artifactLink) + } } func buildSettingsTable(testReport *TestReport, maxPassRatio float64) [][]string { @@ -208,6 +216,16 @@ func renderTestResultsTable(w io.Writer, table [][]string, markdown bool) { printTable(w, table) } +func renderArtifactSection(w io.Writer, artifactName, artifactLink string) { + if artifactLink != "" { + fmt.Fprintln(w) + fmt.Fprintln(w, "## Artifacts") + fmt.Fprintln(w) + fmt.Fprintf(w, "For detailed logs of the failed tests, please refer to the artifact [%s](%s).\n", artifactName, artifactLink) + fmt.Fprintln(w, "This artifact contains all outputs from failed tests.") + } +} + func printTable(w io.Writer, table [][]string) { colWidths := calculateColumnWidths(table) separator := buildSeparator(colWidths) diff --git a/tools/flakeguard/reports/presentation_test.go b/tools/flakeguard/reports/presentation_test.go index 20bb967bd..2d28a0517 100644 --- a/tools/flakeguard/reports/presentation_test.go +++ b/tools/flakeguard/reports/presentation_test.go @@ -105,7 +105,7 @@ func TestGenerateGitHubSummaryMarkdown(t *testing.T) { var buffer bytes.Buffer maxPassRatio := 0.9 - GenerateGitHubSummaryMarkdown(&buffer, testReport, maxPassRatio) + GenerateGitHubSummaryMarkdown(&buffer, testReport, maxPassRatio, "", "") output := buffer.String() @@ -165,7 +165,7 @@ func TestGeneratePRCommentMarkdown(t *testing.T) { repoURL := "https://github.com/example/repo" actionRunID := "123456789" - GeneratePRCommentMarkdown(&buffer, testReport, maxPassRatio, baseBranch, currentBranch, currentCommitSHA, repoURL, actionRunID) + GeneratePRCommentMarkdown(&buffer, testReport, maxPassRatio, baseBranch, currentBranch, currentCommitSHA, repoURL, actionRunID, "", "") output := buffer.String() From 34fa8f2dab51f7cfd24fadec57c78978876c9a10 Mon Sep 17 00:00:00 2001 From: lukaszcl <120112546+lukaszcl@users.noreply.github.com> Date: Mon, 9 Dec 2024 14:33:56 +0100 Subject: [PATCH 20/21] Show no tests executed in reports when no test results --- tools/flakeguard/reports/presentation.go | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/tools/flakeguard/reports/presentation.go b/tools/flakeguard/reports/presentation.go index 424f6a4f9..39b22aa5b 100644 --- a/tools/flakeguard/reports/presentation.go +++ b/tools/flakeguard/reports/presentation.go @@ -84,16 +84,17 @@ func formatPassRatio(passRatio float64) string { } func GenerateGitHubSummaryMarkdown(w io.Writer, testReport *TestReport, maxPassRatio float64, artifactName, artifactLink string) { - settingsTable := buildSettingsTable(testReport, maxPassRatio) fmt.Fprint(w, "# Flakeguard Summary\n\n") - printTable(w, settingsTable) - fmt.Fprintln(w) if len(testReport.Results) == 0 { - fmt.Fprintln(w, "## No tests ran :warning:") + fmt.Fprintln(w, "No tests were executed.") return } + settingsTable := buildSettingsTable(testReport, maxPassRatio) + printTable(w, settingsTable) + fmt.Fprintln(w) + summary := GenerateSummaryData(testReport.Results, maxPassRatio) if summary.FlakyTests > 0 { fmt.Fprintln(w, "## Found Flaky Tests :x:") @@ -111,6 +112,11 @@ func GenerateGitHubSummaryMarkdown(w io.Writer, testReport *TestReport, maxPassR func GeneratePRCommentMarkdown(w io.Writer, testReport *TestReport, maxPassRatio float64, baseBranch, currentBranch, currentCommitSHA, repoURL, actionRunID, artifactName, artifactLink string) { fmt.Fprint(w, "# Flakeguard Summary\n\n") + if len(testReport.Results) == 0 { + fmt.Fprintln(w, "No tests were executed.") + return + } + // Construct additional info additionalInfo := fmt.Sprintf( "Ran new or updated tests between `%s` and %s (`%s`).", @@ -132,11 +138,6 @@ func GeneratePRCommentMarkdown(w io.Writer, testReport *TestReport, maxPassRatio fmt.Fprintln(w, linksLine) fmt.Fprintln(w) // Add an extra newline for formatting - if len(testReport.Results) == 0 { - fmt.Fprintln(w, "## No tests ran :warning:") - return - } - // Add the flaky tests section if GenerateSummaryData(testReport.Results, maxPassRatio).FlakyTests > 0 { fmt.Fprintln(w, "## Found Flaky Tests :x:") From 75f07e45d34693a7f4a5ba7c5b5d427f90bde8ae Mon Sep 17 00:00:00 2001 From: lukaszcl <120112546+lukaszcl@users.noreply.github.com> Date: Mon, 9 Dec 2024 14:37:59 +0100 Subject: [PATCH 21/21] update artifact message --- tools/flakeguard/reports/presentation.go | 1 - 1 file changed, 1 deletion(-) diff --git a/tools/flakeguard/reports/presentation.go b/tools/flakeguard/reports/presentation.go index 39b22aa5b..5c2efd6b7 100644 --- a/tools/flakeguard/reports/presentation.go +++ b/tools/flakeguard/reports/presentation.go @@ -223,7 +223,6 @@ func renderArtifactSection(w io.Writer, artifactName, artifactLink string) { fmt.Fprintln(w, "## Artifacts") fmt.Fprintln(w) fmt.Fprintf(w, "For detailed logs of the failed tests, please refer to the artifact [%s](%s).\n", artifactName, artifactLink) - fmt.Fprintln(w, "This artifact contains all outputs from failed tests.") } }