diff --git a/go.mod b/go.mod index 585b5f38..754fadad 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( github.com/RedHatInsights/insights-content-service v0.0.0-20201009081018-083923779f00 github.com/RedHatInsights/insights-operator-utils v1.22.0 github.com/RedHatInsights/insights-results-aggregator-data v1.3.3 - github.com/RedHatInsights/insights-results-types v1.2.0 + github.com/RedHatInsights/insights-results-types v1.3.1 github.com/Shopify/sarama v1.27.1 github.com/deckarep/golang-set v1.7.1 github.com/dgrijalva/jwt-go v3.2.0+incompatible diff --git a/go.sum b/go.sum index 0d96e64a..e127dcbc 100644 --- a/go.sum +++ b/go.sum @@ -55,8 +55,9 @@ github.com/RedHatInsights/insights-results-aggregator-data v1.3.1/go.mod h1:Ylo2 github.com/RedHatInsights/insights-results-aggregator-data v1.3.2/go.mod h1:E1UaB+IjJ/muxvMstVoqJrB82zVKNykjTtCiM3tMHoM= github.com/RedHatInsights/insights-results-aggregator-data v1.3.3 h1:K6j+Sr+S0iUqFEfevl+MI0dEXn5AxFeJn9FTU5razWA= github.com/RedHatInsights/insights-results-aggregator-data v1.3.3/go.mod h1:udHNC7lBxYnu9AqMahABqvuclCzWUWSkbacQbUaehfI= -github.com/RedHatInsights/insights-results-types v1.2.0 h1:tFaGQE2h/4OIhf8sI/lRwJ/Fh0soO7a47de9x4irIJs= github.com/RedHatInsights/insights-results-types v1.2.0/go.mod h1:6VVdMTGU/BAS2cW0KrHAUiDyocpyKqpPpEyp6AJ1tk8= +github.com/RedHatInsights/insights-results-types v1.3.1 h1:dctLHdEHsZuK1R0TTFomkYJHIk6je2okbrmMBnHWvAQ= +github.com/RedHatInsights/insights-results-types v1.3.1/go.mod h1:6VVdMTGU/BAS2cW0KrHAUiDyocpyKqpPpEyp6AJ1tk8= github.com/RedHatInsights/kafka-zerolog v0.0.0-20210304172207-928f026dc7ec h1:/msFfckx6EIj0rZncrMUfNixFvsLbOiRIe4J0AurhDo= github.com/RedHatInsights/kafka-zerolog v0.0.0-20210304172207-928f026dc7ec/go.mod h1:HJul5oCsCRNiRlh/ayJDGdW3PzGlid/5aaQwJBn7was= github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0= diff --git a/openapi.json b/openapi.json index 01e8fbe0..d4792399 100644 --- a/openapi.json +++ b/openapi.json @@ -1287,6 +1287,102 @@ } } }, + "/clusters/organizations/{org_id}/users/{user_id}/recommendations": { + "post": { + "summary": "getClustersRecommendationsList retrieves all hitting recommendations for all clusters given in the POST body", + "operationId": "getClustersRecommendationsPost", + "description": "Recommendations will be retrieved based on the list of cluster IDs that is part of request body.", + "parameters": [ + { + "name": "org_id", + "in": "path", + "description": "Organization ID represented as positive integer", + "required": true, + "schema": { + "type": "integer", + "format": "int64", + "minimum": 0 + } + }, + { + "name": "user_id", + "in": "path", + "required": true, + "description": "Numeric ID of the user. An example: `42`", + "schema": { + "type": "string" + } + } + ], + "requestBody": { + "description": "List of cluster IDs. An example: `34c3ecc5-624a-49a5-bab8-4fdc5e51a266.", + "required": true, + "content": { + "application/json": { + "schema": { + } + } + } + }, + "responses": { + "200": { + "description": "Map of clusters and a list of hitting recommendations for corresponding clusters", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "clusters": { + "type": "object", + "additionalProperties": { + "type": "object", + "properties": { + "created_at": { + "type": "string", + "description": "The time of last analysis for this cluster." + }, + "recommendations": { + "type": "array", + "items": { + "type": "string", + "description": "The array of recommendation IDs", + "example": [ + "rule.module|ERROR_KEY", + "rule.another.module|SPECIFIC_KEY" + ] + } + } + } + }, + "example": { + "1234ecc5-624a-49a5-bab8-4fdc5e51a266": { + "created_at": "2021-09-07T15:50+00Z", + "recommendations": [ + "rule.module1|ERROR_KEY1", + "rule.module2|ERROR_KEY2" + ] + }, + "5678ecc5-624a-49a5-bab8-4fdc5e51a266": { + "created_at": "2021-09-07T15:50+00Z", + "recommendations": [ + "rule.module1|ERROR_KEY1", + "rule.module2|ERROR_KEY2" + ] + } + } + }, + "status": { + "type": "string", + "example": "ok" + } + } + } + } + } + } + } + } + }, "/rules/users/{userId}/disabled": { "get": { "summary": "Returns a list of rules disabled from current account", diff --git a/server/endpoints.go b/server/endpoints.go index 81164950..f06979f8 100644 --- a/server/endpoints.go +++ b/server/endpoints.go @@ -81,6 +81,8 @@ const ( // RecommendationsListEndpoint receives a list of clusters in POST body and returns a list of all recommendations hitting for them RecommendationsListEndpoint = "recommendations/organizations/{org_id}/users/{user_id}/list" + // ClustersRecommendationsListEndpoint receives a list of clusters in POST body and returns a list of clusters with lists of hitting recommendations + ClustersRecommendationsListEndpoint = "clusters/organizations/{org_id}/users/{user_id}/recommendations" // Rating accepts a list of ratings in the request body and store them in the database for the given user Rating = "rules/organizations/{org_id}/users/{user_id}/rating" @@ -166,4 +168,5 @@ func (server *HTTPServer) addRuleEnableDisableEndpointsToRouter(router *mux.Rout // are related to the Insights Advisor application func (server *HTTPServer) addInsightsAdvisorEndpointsToRouter(router *mux.Router, apiPrefix string) { router.HandleFunc(apiPrefix+RecommendationsListEndpoint, server.getRecommendations).Methods(http.MethodPost, http.MethodOptions) + router.HandleFunc(apiPrefix+ClustersRecommendationsListEndpoint, server.getClustersRecommendationsList).Methods(http.MethodPost, http.MethodOptions) } diff --git a/server/reports.go b/server/reports.go index fd5292ae..1e0b33d4 100644 --- a/server/reports.go +++ b/server/reports.go @@ -304,3 +304,45 @@ func (server *HTTPServer) getRecommendations(writer http.ResponseWriter, request log.Error().Err(err).Msg(responseDataError) } } + +// getClustersRecommendationsList retrieves all recommendations hitting for all clusters specified in the request body +func (server *HTTPServer) getClustersRecommendationsList(writer http.ResponseWriter, request *http.Request) { + tStart := time.Now() + + userID, ok := readUserID(writer, request) + if !ok { + // everything has been handled + return + } + log.Info().Str(userIDstr, string(userID)).Msg("getClustersRecommendationsList") + + orgID, ok := readOrgID(writer, request) + if !ok { + // everything has been handled + return + } + log.Info().Int(orgIDStr, int(orgID)).Msg("getClustersRecommendationsList") + + var listOfClusters []string + err := json.NewDecoder(request.Body).Decode(&listOfClusters) + if err != nil { + handleServerError(writer, err) + return + } + log.Info().Msgf("getClustersRecommendationsList number of clusters: %d", len(listOfClusters)) + + clustersRecommendations, err := server.Storage.ReadClusterListRecommendations(listOfClusters, orgID) + if err != nil { + log.Error().Err(err).Msg("Errors retrieving recommendations") + handleServerError(writer, err) + return + } + + log.Info().Uint32(orgIDStr, uint32(orgID)).Msgf( + "getClustersRecommendationsList took %s", time.Since(tStart), + ) + err = responses.SendOK(writer, responses.BuildOkResponseWithData("clusters", clustersRecommendations)) + if err != nil { + log.Error().Err(err).Msg(responseDataError) + } +} diff --git a/server/server_test.go b/server/server_test.go index 122d326c..385a0764 100644 --- a/server/server_test.go +++ b/server/server_test.go @@ -1443,3 +1443,83 @@ func TestServeInfoMap(t *testing.T) { StatusCode: http.StatusOK, }) } + +func TestHTTPServer_ClustersRecommendationsListEndpoint_NoRecommendations(t *testing.T) { + mockStorage, closer := helpers.MustGetMockStorage(t, true) + defer closer() + + err := mockStorage.WriteRecommendationsForCluster( + testdata.OrgID, testdata.ClusterName, testdata.Report0Rules, + ) + helpers.FailOnError(t, err) + + clusterList := []types.ClusterName{testdata.GetRandomClusterID()} + reqBody, _ := json.Marshal(clusterList) + + helpers.AssertAPIRequest(t, mockStorage, nil, &helpers.APIRequest{ + Method: http.MethodPost, + Endpoint: server.ClustersRecommendationsListEndpoint, + EndpointArgs: []interface{}{testdata.OrgID, testdata.UserID}, + Body: reqBody, + }, &helpers.APIResponse{ + StatusCode: http.StatusOK, + Body: `{"clusters":{},"status":"ok"}`, + }) +} + +func TestHTTPServer_ClustersRecommendationsListEndpoint_2Recs1Cluster(t *testing.T) { + mockStorage, closer := helpers.MustGetMockStorage(t, true) + defer closer() + + err := mockStorage.WriteRecommendationsForCluster( + testdata.OrgID, testdata.ClusterName, testdata.Report2Rules, + ) + helpers.FailOnError(t, err) + + clusterList := []types.ClusterName{testdata.ClusterName} + reqBody, _ := json.Marshal(clusterList) + + // can't check body directly because of variable created_at timestamp, contents are tested in storage tests + helpers.AssertAPIRequest(t, mockStorage, nil, &helpers.APIRequest{ + Method: http.MethodPost, + Endpoint: server.RecommendationsListEndpoint, + EndpointArgs: []interface{}{testdata.OrgID, testdata.UserID}, + Body: reqBody, + }, &helpers.APIResponse{ + StatusCode: http.StatusOK, + }) +} + +func TestHTTPServer_ClustersRecommendationsListEndpoint_BadOrgIDBadRequest(t *testing.T) { + mockStorage, closer := helpers.MustGetMockStorage(t, true) + defer closer() + + clusterList := []types.ClusterName{testdata.GetRandomClusterID()} + reqBody, _ := json.Marshal(clusterList) + + helpers.AssertAPIRequest(t, mockStorage, nil, &helpers.APIRequest{ + Method: http.MethodPost, + Endpoint: server.ClustersRecommendationsListEndpoint, + EndpointArgs: []interface{}{"string", testdata.UserID}, + Body: reqBody, + }, &helpers.APIResponse{ + StatusCode: http.StatusBadRequest, + }) +} + +func TestHTTPServer_ClustersRecommendationsListEndpoint_MissingClusterListBadRequest(t *testing.T) { + mockStorage, closer := helpers.MustGetMockStorage(t, true) + defer closer() + + var clusterListBadType string + reqBody, _ := json.Marshal(clusterListBadType) + + helpers.AssertAPIRequest(t, mockStorage, nil, &helpers.APIRequest{ + Method: http.MethodPost, + Endpoint: server.ClustersRecommendationsListEndpoint, + EndpointArgs: []interface{}{testdata.OrgID, testdata.UserID}, + Body: reqBody, + }, &helpers.APIResponse{ + StatusCode: http.StatusBadRequest, + }) +} diff --git a/storage/consts.go b/storage/consts.go index fc1c7bfe..23e5d24f 100644 --- a/storage/consts.go +++ b/storage/consts.go @@ -25,10 +25,10 @@ const ( selectorsKey = "selectors" // key for recommendations' creation time createdAtKey = "created_at" - // closeStatementError error string closeStatementError = "Unable to close statement" - // inClauseError when constructing IN clause fails inClauseError = "error constructing WHERE IN clause" + // recommendationTimestampFormat represents the datetime format of the created_at of recommendation table + recommendationTimestampFormat = "2006-01-02 15:04:05.000000000+00:00" ) diff --git a/storage/export_test.go b/storage/export_test.go index d9caf666..d93284e5 100644 --- a/storage/export_test.go +++ b/storage/export_test.go @@ -35,8 +35,9 @@ import ( type SQLHooks = sqlHooks const ( - LogFormatterString = logFormatterString - SQLHooksKeyQueryBeginTime = sqlHooksKeyQueryBeginTime + LogFormatterString = logFormatterString + SQLHooksKeyQueryBeginTime = sqlHooksKeyQueryBeginTime + RecommendationTimestampFormat = recommendationTimestampFormat ) var ( diff --git a/storage/noop_storage.go b/storage/noop_storage.go index 55d53444..a46ba8c5 100644 --- a/storage/noop_storage.go +++ b/storage/noop_storage.go @@ -329,3 +329,10 @@ func (*NoopStorage) ListOfClustersForOrgSpecificRule( ) ([]ctypes.HittingClustersData, error) { return nil, nil } + +// ReadClusterListRecommendations retrieves cluster IDs and a list of hitting rules for each one +func (*NoopStorage) ReadClusterListRecommendations( + clusterList []string, orgID types.OrgID, +) (ctypes.ClusterRecommendationMap, error) { + return nil, nil +} diff --git a/storage/noop_storage_test.go b/storage/noop_storage_test.go index 541a05c9..1833789a 100644 --- a/storage/noop_storage_test.go +++ b/storage/noop_storage_test.go @@ -90,4 +90,6 @@ func TestNoopStorage_Methods_Cont2(t *testing.T) { _, _ = noopStorage.ListOfSystemWideDisabledRules(orgID, userID) _, _ = noopStorage.ListOfClustersForOrgSpecificRule(0, "", nil) _, _ = noopStorage.ListOfClustersForOrgSpecificRule(0, "", []string{"a"}) + _, _ = noopStorage.ReadRecommendationsForClusters([]string{}, types.OrgID(1)) + _, _ = noopStorage.ReadClusterListRecommendations([]string{}, types.OrgID(1)) } diff --git a/storage/storage.go b/storage/storage.go index ac7bbe5e..72177bf3 100644 --- a/storage/storage.go +++ b/storage/storage.go @@ -185,6 +185,9 @@ type Storage interface { ListOfSystemWideDisabledRules( orgID types.OrgID, userID types.UserID) ([]ctypes.SystemWideRuleDisable, error) ReadRecommendationsForClusters([]string, types.OrgID) (ctypes.RecommendationImpactedClusters, error) + ReadClusterListRecommendations(clusterList []string, orgID types.OrgID) ( + ctypes.ClusterRecommendationMap, error, + ) } // DBStorage is an implementation of Storage interface that use selected SQL like database @@ -1045,20 +1048,6 @@ func (storage DBStorage) ReadRecommendationsForClusters( return recommendationsMap, err } - if storage.dbDriverType != types.DBDriverSQLite3 { - // EXPLAIN query to check performance and index usage - explRows, err := storage.connection.Query("EXPLAIN ANALYZE "+query, orgID) - if err != nil { - log.Error().Err(err).Msg("error explaining query") - return recommendationsMap, err - } - for explRows.Next() { - var step string - _ = explRows.Scan(&step) - log.Info().Msgf("EXPLAIN ReadRecommendationsForClusters: %v", step) - } - } - for rows.Next() { var ( ruleID types.RuleID @@ -1080,6 +1069,74 @@ func (storage DBStorage) ReadRecommendationsForClusters( return recommendationsMap, nil } +// ReadClusterListRecommendations retrieves cluster IDs and a list of hitting rules for each one +func (storage DBStorage) ReadClusterListRecommendations( + clusterList []string, + orgID types.OrgID, +) (ctypes.ClusterRecommendationMap, error) { + + clusterMap := make(ctypes.ClusterRecommendationMap, 0) + + if len(clusterList) < 1 { + return clusterMap, nil + } + + // disable "G202 (CWE-89): SQL string concatenation" + // #nosec G202 + query := ` + SELECT + cluster_id, rule_id, created_at + FROM + recommendation + WHERE org_id = $1 AND cluster_id IN (%v)` + // #nosec G201 + query = fmt.Sprintf(query, inClauseFromSlice(clusterList)) + + rows, err := storage.connection.Query(query, orgID) + if err != nil { + log.Error().Err(err).Msg("query to get recommendations") + return clusterMap, err + } + + for rows.Next() { + var ( + clusterID ctypes.ClusterName + ruleID ctypes.RuleID + timestamp string + ) + + err := rows.Scan( + &clusterID, + &ruleID, + ×tamp, + ) + if err != nil { + log.Error().Err(err).Msg("read one recommendation") + return clusterMap, err + } + + parsedT, err := time.Parse(recommendationTimestampFormat, timestamp) + if err != nil { + log.Error().Err(err).Msgf("unparsable timestamp %v", timestamp) + return clusterMap, err + } + + if cluster, exists := clusterMap[clusterID]; exists { + cluster.Recommendations = append(cluster.Recommendations, ruleID) + clusterMap[clusterID] = cluster + } else { + // create entry in map for new cluster ID + clusterMap[clusterID] = ctypes.ClusterRecommendationList{ + // created at is the same for all rows for each cluster + CreatedAt: parsedT, + Recommendations: []ctypes.RuleID{ruleID}, + } + } + + } + return clusterMap, nil +} + // ReportsCount reads number of all records stored in database func (storage DBStorage) ReportsCount() (int, error) { count := -1 diff --git a/storage/storage_test.go b/storage/storage_test.go index 9eba6efd..7812f3d0 100644 --- a/storage/storage_test.go +++ b/storage/storage_test.go @@ -1317,3 +1317,118 @@ func TestDBStorageReadRecommendationsForNonexistingClusters(t *testing.T) { assert.Equal(t, ctypes.RecommendationImpactedClusters{}, res) } + +// TestDBStorageReadClusterListRecommendationsNoRecommendations checks that when no recommendations +// are stored, it is an OK state +func TestDBStorageReadClusterListRecommendationsNoRecommendations(t *testing.T) { + mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + defer closer() + + err := mockStorage.WriteRecommendationsForCluster( + testdata.OrgID, testdata.ClusterName, testdata.ClusterReportEmpty, + ) + helpers.FailOnError(t, err) + + expect := make(ctypes.ClusterRecommendationMap) + + res, err := mockStorage.ReadClusterListRecommendations([]string{string(testdata.ClusterName)}, testdata.OrgID) + helpers.FailOnError(t, err) + + assert.Equal(t, expect, res) +} + +// TestDBStorageReadClusterListRecommendationsDifferentCluster checks that when no recommendations +// are stored, it is an OK state +func TestDBStorageReadClusterListRecommendationsDifferentCluster(t *testing.T) { + mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + defer closer() + + err := mockStorage.WriteRecommendationsForCluster( + testdata.OrgID, testdata.ClusterName, testdata.Report3Rules, + ) + helpers.FailOnError(t, err) + + clusterList := make([]string, 3) + for i := range clusterList { + clusterList[i] = string(testdata.GetRandomClusterID()) + } + expect := make(ctypes.ClusterRecommendationMap) + + res, err := mockStorage.ReadClusterListRecommendations(clusterList, testdata.OrgID) + helpers.FailOnError(t, err) + + assert.Equal(t, expect, res) +} + +// TestDBStorageReadClusterListRecommendationsGet1Cluster loads several recommendations for the same org +// but "simulates" a situation where we only get one of them from the AMS API +func TestDBStorageReadClusterListRecommendationsGet1Cluster(t *testing.T) { + mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + defer closer() + + clusterList := make([]string, 3) + for i := range clusterList { + randomClusterID := testdata.GetRandomClusterID() + + clusterList[i] = string(randomClusterID) + + err := mockStorage.WriteRecommendationsForCluster( + testdata.OrgID, randomClusterID, testdata.Report3Rules, + ) + helpers.FailOnError(t, err) + } + + // we only retrieve one cluster + res, err := mockStorage.ReadClusterListRecommendations([]string{string(clusterList[0])}, testdata.OrgID) + helpers.FailOnError(t, err) + + expectList := []ctypes.RuleID{ + testdata.Rule1CompositeID, + testdata.Rule2CompositeID, + testdata.Rule3CompositeID, + } + + expectedClusterID := types.ClusterName(clusterList[0]) + assert.Contains(t, res, expectedClusterID) + assert.ElementsMatch(t, expectList, res[expectedClusterID].Recommendations) + // trivial timestamp check + assert.True(t, res[expectedClusterID].CreatedAt.After(time.Now().Add(-time.Hour))) +} + +// TestDBStorageReadClusterListRecommendationsGet1Cluster loads several recommendations for the same org +// but "simulates" a situation where we only get one of them from the AMS API +func TestDBStorageReadClusterListRecommendationsGetMoreClusters(t *testing.T) { + mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + defer closer() + + clusterList := make([]string, 3) + for i := range clusterList { + randomClusterID := testdata.GetRandomClusterID() + + clusterList[i] = string(randomClusterID) + + err := mockStorage.WriteRecommendationsForCluster( + testdata.OrgID, randomClusterID, testdata.Report3Rules, + ) + helpers.FailOnError(t, err) + } + + // we only retrieve one cluster + res, err := mockStorage.ReadClusterListRecommendations([]string{string(clusterList[0]), string(clusterList[1])}, testdata.OrgID) + helpers.FailOnError(t, err) + + expectRuleList := []ctypes.RuleID{ + testdata.Rule1CompositeID, + testdata.Rule2CompositeID, + testdata.Rule3CompositeID, + } + + expectedCluster1ID := types.ClusterName(clusterList[0]) + expectedCluster2ID := types.ClusterName(clusterList[1]) + assert.Contains(t, res, expectedCluster1ID) + assert.Contains(t, res, expectedCluster2ID) + assert.ElementsMatch(t, expectRuleList, res[expectedCluster1ID].Recommendations) + assert.ElementsMatch(t, expectRuleList, res[expectedCluster2ID].Recommendations) + assert.True(t, res[expectedCluster1ID].CreatedAt.After(time.Now().Add(-time.Hour))) + assert.True(t, res[expectedCluster2ID].CreatedAt.After(time.Now().Add(-time.Hour))) +}