diff --git a/.eslintignore b/.eslintignore deleted file mode 100644 index 728adc67dc8727..00000000000000 --- a/.eslintignore +++ /dev/null @@ -1,9 +0,0 @@ -**/dist/**/* -**/vendor/**/* -**/tests/**/fixtures/**/* -!.github -*.d.ts -config/chartcuterie/config.js -fixtures/profiles/embedded.js -fixtures/artifact_bundle_debug_ids/**/* -fixtures/artifact_bundle_duplicated_debug_ids/**/* diff --git a/.eslintrc.js b/.eslintrc.js deleted file mode 100644 index dfb3482ca6aa79..00000000000000 --- a/.eslintrc.js +++ /dev/null @@ -1,976 +0,0 @@ -/* eslint-env node */ - -const detectDeprecations = !!process.env.SENTRY_DETECT_DEPRECATIONS; - -const baseRules = { - /** - * Strict mode - */ - // https://eslint.org/docs/rules/strict - strict: ['error', 'global'], - - /** - * Variables - */ - // https://eslint.org/docs/rules/no-shadow-restricted-names - 'no-shadow-restricted-names': ['error'], - - /** - * Possible errors - */ - // https://eslint.org/docs/rules/no-cond-assign - 'no-cond-assign': ['error', 'always'], - - // https://eslint.org/docs/rules/no-alert - 'no-alert': ['error'], - - // https://eslint.org/docs/rules/no-constant-condition - 'no-constant-condition': ['warn'], - - // https://eslint.org/docs/rules/no-empty - 'no-empty': ['error'], - - // https://eslint.org/docs/rules/no-ex-assign - 'no-ex-assign': ['error'], - - // https://eslint.org/docs/rules/no-extra-boolean-cast - 'no-extra-boolean-cast': ['error'], - - // https://eslint.org/docs/rules/no-func-assign - 'no-func-assign': ['error'], - - // https://eslint.org/docs/rules/no-inner-declarations - 'no-inner-declarations': ['error'], - - // https://eslint.org/docs/rules/no-invalid-regexp - 'no-invalid-regexp': ['error'], - - // https://eslint.org/docs/rules/no-irregular-whitespace - 'no-irregular-whitespace': ['error'], - - // https://eslint.org/docs/rules/no-obj-calls - 'no-obj-calls': ['error'], - - // https://eslint.org/docs/rules/no-sparse-arrays - 'no-sparse-arrays': ['error'], - - // https://eslint.org/docs/rules/block-scoped-var - 'block-scoped-var': ['error'], - - /** - * Best practices - */ - // https://eslint.org/docs/rules/consistent-return - 'consistent-return': ['error'], - - // https://eslint.org/docs/rules/default-case - 'default-case': ['error'], - - // https://eslint.org/docs/rules/dot-notation - 'dot-notation': [ - 'error', - { - allowKeywords: true, - }, - ], - - // https://eslint.org/docs/rules/guard-for-in [REVISIT ME] - 'guard-for-in': ['off'], - - // https://eslint.org/docs/rules/no-caller - 'no-caller': ['error'], - - // https://eslint.org/docs/rules/no-eval - 'no-eval': ['error'], - - // https://eslint.org/docs/rules/no-extend-native - 'no-extend-native': ['error'], - - // https://eslint.org/docs/rules/no-extra-bind - 'no-extra-bind': ['error'], - - // https://eslint.org/docs/rules/no-fallthrough - 'no-fallthrough': ['error'], - - // https://eslint.org/docs/rules/no-floating-decimal - 'no-floating-decimal': ['error'], - - // https://eslint.org/docs/rules/no-implied-eval - 'no-implied-eval': ['error'], - - // https://eslint.org/docs/rules/no-lone-blocks - 'no-lone-blocks': ['error'], - - // https://eslint.org/docs/rules/no-loop-func - 'no-loop-func': ['error'], - - // https://eslint.org/docs/rules/no-multi-str - 'no-multi-str': ['error'], - - // https://eslint.org/docs/rules/no-native-reassign - 'no-native-reassign': ['error'], - - // https://eslint.org/docs/rules/no-new - 'no-new': ['error'], - - // https://eslint.org/docs/rules/no-new-func - 'no-new-func': ['error'], - - // https://eslint.org/docs/rules/no-new-wrappers - 'no-new-wrappers': ['error'], - - // https://eslint.org/docs/rules/no-octal - 'no-octal': ['error'], - - // https://eslint.org/docs/rules/no-octal-escape - 'no-octal-escape': ['error'], - - // https://eslint.org/docs/rules/no-param-reassign [REVISIT ME] - 'no-param-reassign': ['off'], - - // https://eslint.org/docs/rules/no-proto - 'no-proto': ['error'], - - // https://eslint.org/docs/rules/no-return-assign - 'no-return-assign': ['error'], - - // https://eslint.org/docs/rules/no-script-url - 'no-script-url': ['error'], - - // https://eslint.org/docs/rules/no-self-compare - 'no-self-compare': ['error'], - - // https://eslint.org/docs/rules/no-sequences - 'no-sequences': ['error'], - - // https://eslint.org/docs/rules/no-throw-literal - 'no-throw-literal': ['error'], - - // https://eslint.org/docs/rules/no-with - 'no-with': ['error'], - - // https://eslint.org/docs/rules/radix - radix: ['error'], - - // https://eslint.org/docs/rules/space-in-brackets.html - 'computed-property-spacing': ['error', 'never'], - - // https://eslint.org/docs/rules/space-in-brackets.html - 'array-bracket-spacing': ['error', 'never'], - - // https://eslint.org/docs/rules/space-in-brackets.html - 'object-curly-spacing': ['error', 'never'], - - // https://eslint.org/docs/rules/object-shorthand - 'object-shorthand': ['error', 'properties'], - - // https://eslint.org/docs/rules/space-infix-ops.html - 'space-infix-ops': ['error'], - - // https://eslint.org/docs/rules/vars-on-top - 'vars-on-top': ['off'], - - // https://eslint.org/docs/rules/wrap-iife - 'wrap-iife': ['error', 'any'], - - // https://eslint.org/docs/rules/array-callback-return - 'array-callback-return': ['error'], - - // https://eslint.org/docs/rules/yoda - yoda: ['error'], - - // https://eslint.org/docs/rules/no-else-return - 'no-else-return': ['error', {allowElseIf: false}], - - // https://eslint.org/docs/rules/require-await - 'require-await': ['error'], - - // https://eslint.org/docs/rules/multiline-comment-style - 'multiline-comment-style': ['error', 'separate-lines'], - - // https://eslint.org/docs/rules/spaced-comment - 'spaced-comment': [ - 'error', - 'always', - { - line: {markers: ['/'], exceptions: ['-', '+']}, - block: {exceptions: ['*'], balanced: true}, - }, - ], -}; - -const reactReactRules = { - // https://github.com/yannickcr/eslint-plugin-react/blob/master/docs/rules/display-name.md - 'react/display-name': ['off'], - - // https://github.com/yannickcr/eslint-plugin-react/blob/master/docs/rules/no-multi-comp.md - 'react/no-multi-comp': [ - 'off', - { - ignoreStateless: true, - }, - ], - - // https://github.com/yannickcr/eslint-plugin-react/blob/master/docs/rules/jsx-fragments.md - 'react/jsx-fragments': ['error', 'element'], - - // https://github.com/yannickcr/eslint-plugin-react/blob/master/docs/rules/jsx-handler-names.md - // Ensures that any component or prop methods used to handle events are correctly prefixed. - 'react/jsx-handler-names': [ - 'off', - { - eventHandlerPrefix: 'handle', - eventHandlerPropPrefix: 'on', - }, - ], - - // https://github.com/yannickcr/eslint-plugin-react/blob/master/docs/rules/jsx-key.md - 'react/jsx-key': ['error'], - - // https://github.com/yannickcr/eslint-plugin-react/blob/master/docs/rules/jsx-no-undef.md - 'react/jsx-no-undef': ['error'], - - // Disabled as we use the newer JSX transform babel plugin. - // https://github.com/yannickcr/eslint-plugin-react/blob/master/docs/rules/jsx-uses-react.md - 'react/jsx-uses-react': ['off'], - - // https://github.com/yannickcr/eslint-plugin-react/blob/master/docs/rules/jsx-uses-vars.md - 'react/jsx-uses-vars': ['error'], - - /** - * Deprecation related rules - */ - // https://github.com/yannickcr/eslint-plugin-react/blob/master/docs/rules/no-deprecated.md - 'react/no-deprecated': ['error'], - - // Prevent usage of the return value of React.render - // deprecation: https://facebook.github.io/react/docs/react-dom.html#render - // https://github.com/yannickcr/eslint-plugin-react/blob/master/docs/rules/no-render-return-value.md - 'react/no-render-return-value': ['error'], - - // Children should always be actual children, not passed in as a prop. - // When using JSX, the children should be nested between the opening and closing tags. When not using JSX, the children should be passed as additional arguments to React.createElement. - // https://github.com/yannickcr/eslint-plugin-react/blob/master/docs/rules/no-children-prop.md - 'react/no-children-prop': ['error'], - - // This rule helps prevent problems caused by using children and the dangerouslySetInnerHTML prop at the same time. - // React will throw a warning if this rule is ignored. - // https://github.com/yannickcr/eslint-plugin-react/blob/master/docs/rules/no-danger-with-children.md - 'react/no-danger-with-children': ['error'], - - // Prevent direct mutation of this.state - // https://github.com/yannickcr/eslint-plugin-react/blob/master/docs/rules/no-direct-mutation-state.md - 'react/no-direct-mutation-state': ['error'], - - // https://github.com/yannickcr/eslint-plugin-react/blob/master/docs/rules/no-did-mount-set-state.md - 'react/no-did-mount-set-state': ['error'], - - // https://github.com/yannickcr/eslint-plugin-react/blob/master/docs/rules/no-did-update-set-state.md" - 'react/no-did-update-set-state': ['error'], - - // https://github.com/yannickcr/eslint-plugin-react/blob/master/docs/rules/no-redundant-should-component-update.md - 'react/no-redundant-should-component-update': ['error'], - - // https://github.com/yannickcr/eslint-plugin-react/blob/master/docs/rules/no-typos.md - 'react/no-typos': ['error'], - - // Prevent invalid characters from appearing in markup - // https://github.com/yannickcr/eslint-plugin-react/blob/master/docs/rules/no-unescaped-entities.md - 'react/no-unescaped-entities': ['off'], - - // https://github.com/yannickcr/eslint-plugin-react/blob/master/docs/rules/no-unknown-property.md - 'react/no-unknown-property': ['error', {ignore: ['css']}], - - // https://github.com/yannickcr/eslint-plugin-react/blob/master/docs/rules/no-unused-prop-types.md - // Disabled since this currently fails to correctly detect a lot of - // typescript prop type usage. - 'react/no-unused-prop-types': ['off'], - - // We do not need proptypes since we're using typescript - 'react/prop-types': ['off'], - - // When writing the render method in a component it is easy to forget to return the JSX content. - // This rule will warn if the return statement is missing. - // https://github.com/yannickcr/eslint-plugin-react/blob/master/docs/rules/require-render-return.md - 'react/require-render-return': ['error'], - - // Disabled as we are using the newer JSX transform babel plugin. - // https://github.com/yannickcr/eslint-plugin-react/blob/master/docs/rules/react-in-jsx-scope.md - 'react/react-in-jsx-scope': ['off'], - - // https://github.com/yannickcr/eslint-plugin-react/blob/master/docs/rules/self-closing-comp.md - 'react/self-closing-comp': ['error'], - - // This also causes issues with typescript - // See: https://github.com/yannickcr/eslint-plugin-react/issues/2066 - // - // https://github.com/yannickcr/eslint-plugin-react/blob/master/docs/rules/sort-comp.md - 'react/sort-comp': ['warn'], - - // Disabled because of prettier - // https://github.com/yannickcr/eslint-plugin-react/blob/master/docs/rules/wrap-multilines.md - 'react/jsx-wrap-multilines': ['off'], - - // Consistent (never add ={true}) - // https://github.com/yannickcr/eslint-plugin-react/blob/master/docs/rules/jsx-boolean-value.md - 'react/jsx-boolean-value': ['error', 'never'], - - // Consistent function component declaration styles - // https://github.com/jsx-eslint/eslint-plugin-react/blob/master/docs/rules/function-component-definition.md - 'react/function-component-definition': [ - 'error', - {namedComponents: 'function-declaration'}, - ], -}; - -const reactImportRules = { - // Not recommended to be enabled with typescript-eslint - // https://typescript-eslint.io/linting/troubleshooting/performance-troubleshooting/#eslint-plugin-import - 'import/no-unresolved': ['off'], - 'import/named': ['off'], - 'import/default': ['off'], - 'import/export': ['off'], - 'import/no-named-as-default-member': ['off'], - - // Redflags - // do not allow a default import name to match a named export (airbnb: error) - // Issue with `DefaultIssuePlugin` and `app/plugins/index` - // https://github.com/benmosher/eslint-plugin-import/blob/master/docs/rules/no-named-as-default.md - 'import/no-named-as-default': ['off'], - - // disallow use of jsdoc-marked-deprecated imports - // https://github.com/benmosher/eslint-plugin-import/blob/master/docs/rules/no-deprecated.md - 'import/no-deprecated': ['off'], - - // Forbid mutable exports (airbnb: error) - // https://github.com/benmosher/eslint-plugin-import/blob/master/docs/rules/no-mutable-exports.md - // TODO: enable? - 'import/no-mutable-exports': ['off'], - - // disallow require() - // https://github.com/benmosher/eslint-plugin-import/blob/master/docs/rules/no-commonjs.md - 'import/no-commonjs': ['off'], - - // disallow AMD require/define - // https://github.com/benmosher/eslint-plugin-import/blob/master/docs/rules/no-amd.md - 'import/no-amd': ['error'], - - // disallow duplicate imports - // https://github.com/benmosher/eslint-plugin-import/blob/master/docs/rules/no-duplicates.md - 'import/no-duplicates': ['error'], - - // disallow namespace imports - // https://github.com/benmosher/eslint-plugin-import/blob/master/docs/rules/no-namespace.md - 'import/no-namespace': ['off'], - - // Ensure consistent use of file extension within the import path - // https://github.com/benmosher/eslint-plugin-import/blob/master/docs/rules/extensions.md - // TODO this fucks up getsentry - 'import/extensions': [ - 'off', - 'always', - { - js: 'never', - jsx: 'never', - }, - ], - - // Require a newline after the last import/require in a group - // https://github.com/benmosher/eslint-plugin-import/blob/master/docs/rules/newline-after-import.md - 'import/newline-after-import': ['error'], - - // Require modules with a single export to use a default export (airbnb: error) - // https://github.com/benmosher/eslint-plugin-import/blob/master/docs/rules/prefer-default-export.md - 'import/prefer-default-export': ['off'], - - // Restrict which files can be imported in a given folder - // https://github.com/benmosher/eslint-plugin-import/blob/master/docs/rules/no-restricted-paths.md - 'import/no-restricted-paths': ['off'], - - // Forbid modules to have too many dependencies - // https://github.com/benmosher/eslint-plugin-import/blob/master/docs/rules/max-dependencies.md - 'import/max-dependencies': ['off', {max: 10}], - - // Forbid import of modules using absolute paths - // https://github.com/benmosher/eslint-plugin-import/blob/master/docs/rules/no-absolute-path.md - 'import/no-absolute-path': ['error'], - - // Forbid require() calls with expressions (airbnb: error) - // https://github.com/benmosher/eslint-plugin-import/blob/master/docs/rules/no-dynamic-require.md - 'import/no-dynamic-require': ['off'], - - // Use webpack default chunk names - 'import/dynamic-import-chunkname': ['off'], - - // prevent importing the submodules of other modules - // https://github.com/benmosher/eslint-plugin-import/blob/master/docs/rules/no-internal-modules.md - 'import/no-internal-modules': [ - 'off', - { - allow: [], - }, - ], - - // Warn if a module could be mistakenly parsed as a script by a consumer - // leveraging Unambiguous JavaScript Grammar - // https://github.com/benmosher/eslint-plugin-import/blob/master/docs/rules/unambiguous.md - // this should not be enabled until this proposal has at least been *presented* to TC39. - // At the moment, it"s not a thing. - 'import/unambiguous': ['off'], - - // Forbid Webpack loader syntax in imports - // https://github.com/benmosher/eslint-plugin-import/blob/master/docs/rules/no-webpack-loader-syntax.md - 'import/no-webpack-loader-syntax': ['error'], - - // Prevent unassigned imports - // https://github.com/benmosher/eslint-plugin-import/blob/master/docs/rules/no-unassigned-import.md - // importing for side effects is perfectly acceptable, if you need side effects. - 'import/no-unassigned-import': ['off'], - - // Prevent importing the default as if it were named - // https://github.com/benmosher/eslint-plugin-import/blob/master/docs/rules/no-named-default.md - 'import/no-named-default': ['error'], - - // Reports if a module"s default export is unnamed - // https://github.com/benmosher/eslint-plugin-import/blob/d9b712ac7fd1fddc391f7b234827925c160d956f/docs/rules/no-anonymous-default-export.md - 'import/no-anonymous-default-export': [ - 'error', - { - allowArray: false, - allowArrowFunction: false, - allowAnonymousClass: false, - allowAnonymousFunction: false, - allowCallExpression: true, - allowLiteral: false, - allowObject: false, - }, - ], -}; - -const reactJestRules = { - 'jest/no-disabled-tests': 'error', -}; - -const reactRules = { - ...reactReactRules, - ...reactImportRules, - ...reactJestRules, - /** - * React hooks - */ - 'react-hooks/exhaustive-deps': 'error', - // Biome not yet enforcing all parts of this rule https://github.com/biomejs/biome/issues/1984 - 'react-hooks/rules-of-hooks': 'error', - - /** - * Custom - */ - // highlights literals in JSX components w/o translation tags - 'getsentry/jsx-needs-il8n': ['off'], - 'testing-library/render-result-naming-convention': 'off', - 'testing-library/no-unnecessary-act': 'off', - - // Disabled as we have many tests which render as simple validations - 'jest/expect-expect': 'off', - - // Disabled as we have some comment out tests that cannot be - // uncommented due to typescript errors. - 'jest/no-commented-out-tests': 'off', - - // Disabled as we do sometimes have conditional expects - 'jest/no-conditional-expect': 'off', - - // Useful for exporting some test utilities - 'jest/no-export': 'off', - - 'typescript-sort-keys/interface': [ - 'error', - 'asc', - {caseSensitive: true, natural: false, requiredFirst: true}, - ], -}; - -const appRules = { - /** - * emotion rules for v10 - * - * This probably aren't as necessary anymore, but let's remove when we move to v11 - */ - '@emotion/jsx-import': 'off', - '@emotion/no-vanilla': 'error', - '@emotion/import-from-emotion': 'error', - '@emotion/styled-import': 'error', - - // no-undef is redundant with typescript as tsc will complain - // A downside is that we won't get eslint errors about it, but your editors should - // support tsc errors so.... - // https://eslint.org/docs/rules/no-undef - 'no-undef': 'off', - - // Let formatter handle this - 'arrow-body-style': 'off', - - /** - * Need to use typescript version of these rules - * https://eslint.org/docs/rules/no-shadow - */ - 'no-shadow': 'off', - '@typescript-eslint/no-shadow': 'error', - - // This only override the `args` rule (which is "none"). There are too many errors and it's difficult to manually - // fix them all, so we'll have to incrementally update. - // https://eslint.org/docs/rules/no-unused-vars - 'no-unused-vars': 'off', - '@typescript-eslint/no-unused-vars': [ - 'error', - { - vars: 'all', - args: 'all', - // TODO(scttcper): We could enable this to enforce catch (error) - // https://eslint.org/docs/latest/rules/no-unused-vars#caughterrors - caughtErrors: 'none', - - // Ignore vars that start with an underscore - // e.g. if you want to omit a property using object spread: - // - // const {name: _name, ...props} = this.props; - // - varsIgnorePattern: '^_', - argsIgnorePattern: '^_', - destructuredArrayIgnorePattern: '^_', - }, - ], - - // https://eslint.org/docs/rules/no-use-before-define - 'no-use-before-define': 'off', - // This seems to have been turned on while previously it had been off - '@typescript-eslint/no-use-before-define': ['off'], - - /** - * Restricted imports, e.g. deprecated libraries, etc - * - * See: https://eslint.org/docs/rules/no-restricted-imports - */ - 'no-restricted-imports': [ - 'error', - { - paths: [ - { - name: 'enzyme', - message: - 'Please import from `sentry-test/enzyme` instead. See: https://github.com/getsentry/frontend-handbook#undefined-theme-properties-in-tests for more information', - }, - { - name: '@testing-library/react', - message: - 'Please import from `sentry-test/reactTestingLibrary` instead so that we can ensure consistency throughout the codebase', - }, - { - name: '@testing-library/react-hooks', - message: - 'Please import from `sentry-test/reactTestingLibrary` instead so that we can ensure consistency throughout the codebase', - }, - { - name: '@testing-library/user-event', - message: - 'Please import from `sentry-test/reactTestingLibrary` instead so that we can ensure consistency throughout the codebase', - }, - { - name: '@sentry/browser', - message: - 'Please import from `@sentry/react` to ensure consistency throughout the codebase.', - }, - { - name: 'marked', - message: - "Please import marked from 'app/utils/marked' so that we can ensure sanitation of marked output", - }, - - { - name: 'lodash', - message: - "Please import lodash utilities individually. e.g. `import isEqual from 'lodash/isEqual';`. See https://github.com/getsentry/frontend-handbook#lodash from for information", - }, - { - name: 'lodash/get', - message: - 'Optional chaining `?.` and nullish coalescing operators `??` are available and preferred over using `lodash/get`. See https://github.com/getsentry/frontend-handbook#new-syntax for more information', - }, - { - name: 'react-bootstrap', - message: - 'Avoid usage of any react-bootstrap components as it will soon be removed', - }, - { - name: 'sentry/utils/theme', - importNames: ['lightColors', 'darkColors'], - message: - "'lightColors' and 'darkColors' exports intended for use in Storybook only. Instead, use theme prop from emotion or the useTheme hook.", - }, - { - name: 'react-router', - importNames: ['withRouter'], - message: - "Use 'useLocation', 'useParams', 'useNavigate', 'useRoutes' from sentry/utils instead.", - }, - { - name: 'sentry/utils/withSentryRouter', - importNames: ['withSentryRouter'], - message: - "Use 'useLocation', 'useParams', 'useNavigate', 'useRoutes' from sentry/utils instead.", - }, - ], - }, - ], - - /** - * Better import sorting - */ - 'sort-imports': 'off', - 'import/order': 'off', - 'simple-import-sort/imports': [ - 'error', - { - groups: [ - // Side effect imports. - ['^\\u0000'], - - // Node.js builtins. - // biome-ignore lint/correctness/noNodejsModules: Need to get the list of things! - [`^(${require('node:module').builtinModules.join('|')})(/|$)`], - - // Packages. `react` related packages come first. - ['^react', '^@?\\w'], - - // Test should be separate from the app - ['^(sentry-test|getsentry-test)(/.*|$)'], - - // Internal packages. - ['^(sentry-locale|sentry-images)(/.*|$)'], - - ['^(getsentry-images)(/.*|$)'], - - ['^(app|sentry)(/.*|$)'], - - // Getsentry packages. - ['^(admin|getsentry)(/.*|$)'], - - // Style imports. - ['^.+\\.less$'], - - // Parent imports. Put `..` last. - ['^\\.\\.(?!/?$)', '^\\.\\./?$'], - - // Other relative imports. Put same-folder imports and `.` last. - ['^\\./(?=.*/)(?!/?$)', '^\\.(?!/?$)', '^\\./?$'], - ], - }, - ], - - 'sentry/no-digits-in-tn': ['error'], - - 'sentry/no-dynamic-translations': ['error'], - - // https://github.com/xojs/eslint-config-xo-typescript/blob/9791a067d6a119a21a4db72c02f1da95e25ffbb6/index.js#L95 - '@typescript-eslint/no-restricted-types': [ - 'error', - { - types: { - // TODO(scttcper): Turn object on to make our types more strict - // object: { - // message: 'The `object` type is hard to use. Use `Record` instead. See: https://github.com/typescript-eslint/typescript-eslint/pull/848', - // fixWith: 'Record' - // }, - Buffer: { - message: - 'Use Uint8Array instead. See: https://sindresorhus.com/blog/goodbye-nodejs-buffer', - suggest: ['Uint8Array'], - }, - '[]': "Don't use the empty array type `[]`. It only allows empty arrays. Use `SomeType[]` instead.", - '[[]]': - "Don't use `[[]]`. It only allows an array with a single element which is an empty array. Use `SomeType[][]` instead.", - '[[[]]]': "Don't use `[[[]]]`. Use `SomeType[][][]` instead.", - }, - }, - ], - // TODO(scttcper): Turn no-empty-object-type on to make our types more strict - // '@typescript-eslint/no-empty-object-type': 'error', - // TODO(scttcper): Turn no-function on to make our types more strict - // '@typescript-eslint/no-unsafe-function-type': 'error', - '@typescript-eslint/no-wrapper-object-types': 'error', - - // Naming convention enforcements - '@typescript-eslint/naming-convention': [ - 'error', - { - selector: 'typeLike', - format: ['PascalCase'], - leadingUnderscore: 'allow', - }, - { - selector: 'enumMember', - format: ['UPPER_CASE'], - }, - ], - - // Don't allow lookbehind expressions in regexp as they crash safari - // We've accidentally used lookbehinds a few times and caused problems. - 'no-lookahead-lookbehind-regexp/no-lookahead-lookbehind-regexp': [ - 'error', - 'no-lookbehind', - 'no-negative-lookbehind', - ], -}; - -const strictRules = { - // https://eslint.org/docs/rules/no-console - 'no-console': ['error'], - - // https://github.com/yannickcr/eslint-plugin-react/blob/master/docs/rules/no-is-mounted.md - 'react/no-is-mounted': ['error'], - - // https://github.com/yannickcr/eslint-plugin-react/blob/master/docs/rules/no-find-dom-node.md - // Recommended to use callback refs instead - 'react/no-find-dom-node': ['error'], - - // https://github.com/yannickcr/eslint-plugin-react/blob/master/docs/rules/no-string-refs.md - // This is now considered legacy, callback refs preferred - 'react/no-string-refs': ['error'], - - 'jest/no-large-snapshots': ['error', {maxSize: 2000}], - - 'sentry/no-styled-shortcut': ['error'], -}; - -const extendsList = [ - 'plugin:jest/recommended', - 'plugin:jest-dom/recommended', - 'plugin:import/typescript', -]; -if (detectDeprecations) { - extendsList.push('plugin:deprecation/recommended'); -} - -module.exports = { - root: true, - extends: extendsList, - - plugins: [ - 'jest-dom', - 'testing-library', - 'typescript-sort-keys', - 'react-hooks', - '@typescript-eslint', - '@emotion', - 'import', - 'react', - 'sentry', - 'simple-import-sort', - 'no-lookahead-lookbehind-regexp', - ], - - parser: '@typescript-eslint/parser', - - parserOptions: detectDeprecations - ? { - warnOnUnsupportedTypeScriptVersion: false, - ecmaVersion: 6, - sourceType: 'module', - ecmaFeatures: { - jsx: true, - modules: true, - legacyDecorators: true, - }, - project: './tsconfig.json', - } - : { - warnOnUnsupportedTypeScriptVersion: false, - ecmaVersion: 6, - sourceType: 'module', - ecmaFeatures: { - jsx: true, - modules: true, - legacyDecorators: true, - }, - }, - - env: { - browser: true, - es6: true, - jest: true, - jquery: true, // hard-loaded into vendor.js - }, - - globals: { - require: false, - expect: false, - MockApiClient: true, - tick: true, - jest: true, - }, - - settings: { - react: { - version: '17.0.2', // React version, can not `detect` because of getsentry - }, - 'import/parsers': { - '@typescript-eslint/parser': ['.ts', '.tsx'], - }, - 'import/resolver': { - typescript: {}, - }, - 'import/extensions': ['.js', '.jsx'], - }, - - rules: { - ...baseRules, - ...reactRules, - ...appRules, - ...strictRules, - 'react-hooks/rules-of-hooks': 'error', - 'react-hooks/exhaustive-deps': [ - 'error', - {additionalHooks: '(useEffectAfterFirstRender|useMemoWithPrevious)'}, - ], - 'no-restricted-imports': [ - 'error', - { - patterns: [ - { - group: ['sentry/components/devtoolbar/*'], - message: 'Do not depend on toolbar internals', - }, - ], - paths: [ - { - name: '@testing-library/react', - message: - 'Please import from `sentry-test/reactTestingLibrary` instead so that we can ensure consistency throughout the codebase', - }, - { - name: '@testing-library/react-hooks', - message: - 'Please import from `sentry-test/reactTestingLibrary` instead so that we can ensure consistency throughout the codebase', - }, - { - name: '@testing-library/user-event', - message: - 'Please import from `sentry-test/reactTestingLibrary` instead so that we can ensure consistency throughout the codebase', - }, - { - name: '@sentry/browser', - message: - 'Please import from `@sentry/react` to ensure consistency throughout the codebase.', - }, - { - name: 'marked', - message: - "Please import marked from 'app/utils/marked' so that we can ensure sanitation of marked output", - }, - { - name: 'lodash', - message: - "Please import lodash utilities individually. e.g. `import isEqual from 'lodash/isEqual';`. See https://github.com/getsentry/frontend-handbook#lodash from for information", - }, - { - name: 'lodash/get', - message: - 'Optional chaining `?.` and nullish coalescing operators `??` are available and preferred over using `lodash/get`. See https://github.com/getsentry/frontend-handbook#new-syntax for more information', - }, - { - name: 'sentry/utils/theme', - importNames: ['lightColors', 'darkColors'], - message: - "'lightColors' and 'darkColors' exports intended for use in Storybook only. Instead, use theme prop from emotion or the useTheme hook.", - }, - { - name: 'react-router', - importNames: ['withRouter'], - message: - "Use 'useLocation', 'useParams', 'useNavigate', 'useRoutes' from sentry/utils instead.", - }, - { - name: 'sentry/utils/withSentryRouter', - importNames: ['withSentryRouter'], - message: - "Use 'useLocation', 'useParams', 'useNavigate', 'useRoutes' from sentry/utils instead.", - }, - { - name: 'qs', - message: 'Please use query-string instead of qs', - }, - { - name: 'moment', - message: 'Please import moment-timezone instead of moment', - }, - ], - }, - ], - - // TODO(@anonrig): Remove this from eslint-sentry-config - 'space-infix-ops': 'off', - 'object-shorthand': 'off', - 'object-curly-spacing': 'off', - 'import/no-amd': 'off', - 'no-danger-with-children': 'off', - 'no-fallthrough': 'off', - 'no-obj-calls': 'off', - 'array-bracket-spacing': 'off', - 'computed-property-spacing': 'off', - 'react/no-danger-with-children': 'off', - 'jest/no-disabled-tests': 'off', - }, - // JSON file formatting is handled by Biome. ESLint should not be linting - // and formatting these files. - ignorePatterns: ['*.json'], - overrides: [ - { - files: ['static/app/components/devtoolbar/**/*.{ts,tsx}'], - rules: { - 'no-restricted-imports': [ - 'error', - { - paths: [ - { - name: 'sentry/utils/queryClient', - message: - 'Import from `@tanstack/react-query` and `./hooks/useFetchApiData` or `./hooks/useFetchInfiniteApiData` instead.', - }, - ], - }, - ], - }, - }, - { - files: ['static/**/*.spec.{ts,js}', 'tests/js/**/*.{ts,js}'], - extends: ['plugin:testing-library/react', ...extendsList], - rules: { - ...baseRules, - ...reactRules, - ...appRules, - ...strictRules, - // TODO(@anonrig): Remove this from eslint-sentry-config - 'space-infix-ops': 'off', - 'object-shorthand': 'off', - 'object-curly-spacing': 'off', - 'import/no-amd': 'off', - 'no-danger-with-children': 'off', - 'no-fallthrough': 'off', - 'no-obj-calls': 'off', - 'array-bracket-spacing': 'off', - 'computed-property-spacing': 'off', - 'react/no-danger-with-children': 'off', - 'jest/no-disabled-tests': 'off', - }, - }, - { - // We specify rules explicitly for the sdk-loader here so we do not have - // eslint ignore comments included in the source file, which is consumed - // by users. - files: ['**/js-sdk-loader.ts'], - rules: { - 'no-console': 'off', - }, - }, - ], -}; diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 9695c88749836f..098974ecd3aa4a 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -122,6 +122,7 @@ pyproject.toml @getsentry/owners-pytho babel.config.* @getsentry/owners-js-build biome.json @getsentry/owners-js-build build-utils/ @getsentry/owners-js-build +eslint.config.mjs @getsentry/owners-js-build jest.config.ts @getsentry/owners-js-build tsconfig.* @getsentry/owners-js-build webpack.config.* @getsentry/owners-js-build @@ -348,12 +349,23 @@ tests/sentry/api/endpoints/test_organization_dashboard_widget_details.py @ge ## DevToolbar -/src/sentry/templates/sentry/toolbar/ @getsentry/replay -/src/sentry/toolbar/ @getsentry/replay -/tests/sentry/toolbar/ @getsentry/replay +/src/sentry/templates/sentry/toolbar/ @getsentry/replay +/src/sentry/toolbar/ @getsentry/replay +/tests/sentry/toolbar/ @getsentry/replay +/static/app/components/devtoolbar/ @getsentry/replay-frontend +/src/sentry/middleware/devtoolbar.py @getsentry/replay-backend +/tests/sentry/middleware/test_devtoolbar.py @getsentry/replay-backend ## End of DevToolbar +## Frontend +/static/app/components/analyticsArea.spec.tsx @getsentry/app-frontend +/static/app/components/analyticsArea.tsx @getsentry/app-frontend +/static/app/components/events/interfaces/ @getsentry/app-frontend +/static/app/components/forms/ @getsentry/app-frontend +## End of Frontend + + ## Integrations /src/sentry/sentry_apps/ @getsentry/product-owners-settings-integrations @getsentry/ecosystem /tests/sentry/sentry_apps @getsentry/product-owners-settings-integrations @getsentry/ecosystem @@ -459,13 +471,10 @@ tests/sentry/api/endpoints/test_organization_dashboard_widget_details.py @ge ## Telemetry Experience -/src/sentry/api/endpoints/organization_ddm.py @getsentry/telemetry-experience -/tests/sentry/api/endpoints/test_organization_ddm_meta.py @getsentry/telemetry-experience /src/sentry/api/endpoints/organization_metric* @getsentry/telemetry-experience /tests/sentry/api/endpoints/test_organization_metric* @getsentry/telemetry-experience /src/sentry/api/endpoints/organization_sessions.py @getsentry/telemetry-experience /tests/snuba/api/endpoints/test_organization_sessions.py @getsentry/telemetry-experience -/src/sentry/api/endpoints/projects_metrics.py @getsentry/telemetry-experience /tests/sentry/api/endpoints/test_projects_metrics_visibility.py @getsentry/telemetry-experience /src/sentry/api/endpoints/organization_onboarding* @getsentry/telemetry-experience /tests/sentry/api/endpoints/test_organization_onboarding* @getsentry/telemetry-experience @@ -481,11 +490,8 @@ tests/sentry/api/endpoints/test_organization_dashboard_widget_details.py @ge /src/sentry/sentry_metrics/visibility/ @getsentry/telemetry-experience /tests/sentry/sentry_metrics/visibility/ @getsentry/telemetry-experience /src/sentry/sentry_metrics/extraction_rules.py @getsentry/telemetry-experience -/tests/sentry/sentry_metrics/test_extraction_rules.py @getsentry/telemetry-experience /src/sentry/snuba/metrics/ @getsentry/telemetry-experience /tests/sentry/snuba/metrics/ @getsentry/telemetry-experience -/src/sentry/relay/config/metric_extraction.py @getsentry/telemetry-experience -/tests/sentry/relay/config/test_metric_extraction.py @getsentry/telemetry-experience /static/app/actionCreators/metrics.spec.tsx @getsentry/telemetry-experience /static/app/actionCreators/metrics.tsx @getsentry/telemetry-experience @@ -497,8 +503,6 @@ tests/sentry/api/endpoints/test_organization_dashboard_widget_details.py @ge /static/app/types/project.tsx @getsentry/telemetry-experience /static/app/utils/metrics/ @getsentry/telemetry-experience /static/app/views/metrics/ @getsentry/telemetry-experience -/static/app/views/performance/landing/dynamicSamplingMetricsAccuracy.spec.tsx @getsentry/telemetry-experience -/static/app/views/performance/landing/dynamicSamplingMetricsAccuracyAlert.tsx @getsentry/telemetry-experience /static/app/views/settings/project/dynamicSampling/ @getsentry/telemetry-experience /static/app/views/settings/dynamicSampling/ @getsentry/telemetry-experience /static/app/views/settings/projectMetrics/* @getsentry/telemetry-experience @@ -631,3 +635,7 @@ tests/sentry/api/endpoints/test_organization_dashboard_widget_details.py @ge # Taskworkers /src/sentry/taskworker/ @getsentry/taskworker /tests/sentry/taskworker/ @getsentry/taskworker + +# Tempest +/src/sentry/tempest/ @getsentry/gdx +/tests/sentry/tempest/ @getsentry/gdx diff --git a/.github/workflows/frontend-lint-burndown.yml b/.github/workflows/frontend-lint-burndown.yml index 21279bc7ed19cb..e9f99589e8db9b 100644 --- a/.github/workflows/frontend-lint-burndown.yml +++ b/.github/workflows/frontend-lint-burndown.yml @@ -17,9 +17,9 @@ jobs: steps: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - - name: Install dependencies & inject eslint-plugin-deprecation + - name: Install dependencies id: dependencies - run: yarn add --dev eslint-plugin-deprecation + run: yarn install # Setup custom tsc matcher, see https://github.com/actions/setup-node/issues/97 - name: setup matchers diff --git a/.github/workflows/shuffle-tests.yml b/.github/workflows/shuffle-tests.yml index 43be8dbf5ff293..4b5013a7310045 100644 --- a/.github/workflows/shuffle-tests.yml +++ b/.github/workflows/shuffle-tests.yml @@ -28,18 +28,22 @@ jobs: name: run backend tests runs-on: ubuntu-24.04 timeout-minutes: 90 + permissions: + contents: read + id-token: write strategy: # This helps not having to run multiple jobs because one fails, thus, reducing resource usage # and reducing the risk that one of many runs would turn red again (read: intermittent tests) fail-fast: false matrix: # XXX: When updating this, make sure you also update MATRIX_INSTANCE_TOTAL. - instance: [0, 1, 2, 3, 4, 5, 6] + instance: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] pg-version: ['14'] env: - # XXX: MATRIX_INSTANCE_TOTAL must be hardcoded to the length of strategy.matrix.instance. - MATRIX_INSTANCE_TOTAL: 7 + # XXX: `MATRIX_INSTANCE_TOTAL` must be hardcoded to the length of `strategy.matrix.instance`. + # If this increases, make sure to also increase `flags.backend.after_n_builds` in `codecov.yml`. + MATRIX_INSTANCE_TOTAL: 11 steps: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 @@ -48,7 +52,10 @@ jobs: uses: ./.github/actions/setup-sentry id: setup with: + redis_cluster: true + kafka: true snuba: true + symbolicator: true # Right now, we run so few bigtable related tests that the # overhead of running bigtable in all backend tests # is way smaller than the time it would take to run in its own job. diff --git a/.github/workflows/test_docker_compose_acceptance.yml b/.github/workflows/test_devservices_acceptance.yml similarity index 87% rename from .github/workflows/test_docker_compose_acceptance.yml rename to .github/workflows/test_devservices_acceptance.yml index aa81794eb085ba..279c41114d277c 100644 --- a/.github/workflows/test_docker_compose_acceptance.yml +++ b/.github/workflows/test_devservices_acceptance.yml @@ -1,10 +1,10 @@ # Also note that this name *MUST* match the filename because GHA # only provides the workflow name (https://docs.github.com/en/free-pro-team@latest/actions/reference/environment-variables#default-environment-variables) # and GH APIs only support querying by workflow *FILENAME* (https://developer.github.com/v3/actions/workflows/#get-a-workflow) -name: test-docker-compose-acceptance +name: test-devservices-acceptance on: schedule: - - cron: '0 0 * * *' + - cron: '0 * * * *' # Cancel in progress workflows on pull_requests. # https://docs.github.com/en/actions/using-jobs/using-concurrency#example-using-a-fallback-value @@ -16,10 +16,13 @@ concurrency: env: SEGMENT_DOWNLOAD_TIMEOUT_MINS: 3 NODE_OPTIONS: '--max-old-space-size=4096' + USE_NEW_DEVSERVICES: 1 + IS_DEV: 1 + CHARTCUTERIE_CONFIG_PATH: ${{ github.workspace }}/config/chartcuterie jobs: - docker-compose-acceptance: - name: docker-compose-acceptance + devservices-acceptance: + name: devservices-acceptance runs-on: ubuntu-24.04 timeout-minutes: 30 permissions: @@ -89,15 +92,9 @@ jobs: uses: ./.github/actions/test-setup-sentry-devservices id: setup - - name: copy chartcuterie config to devservices chartcuterie directory - run: | - ls config/chartcuterie - cp -r config/chartcuterie devservices - - name: Bring up devservices run: | - docker network create sentry - docker compose -f devservices/docker-compose-testing.yml up -d redis postgres snuba clickhouse chartcuterie + devservices up --mode acceptance-ci - name: Run acceptance tests (#${{ steps.setup.outputs.matrix-instance-number }} of ${{ steps.setup.outputs.matrix-instance-total }}) run: make run-acceptance @@ -127,14 +124,13 @@ jobs: - name: Inspect failure if: failure() run: | - docker compose -f devservices/docker-compose-testing.yml ps - docker compose -f devservices/docker-compose-testing.yml logs --tail 1000 + devservices logs - docker-compose-acceptance-required-checks: + devservices-acceptance-required-checks: # this is a required check so we need this job to always run and report a status. if: always() - name: Docker Compose Acceptance - needs: [docker-compose-acceptance] + name: Devservices Acceptance + needs: [devservices-acceptance] runs-on: ubuntu-24.04 timeout-minutes: 3 steps: diff --git a/.github/workflows/test_docker_compose_backend.yml b/.github/workflows/test_devservices_backend.yml similarity index 75% rename from .github/workflows/test_docker_compose_backend.yml rename to .github/workflows/test_devservices_backend.yml index 179b1efee17362..ec3f0ae5645eab 100644 --- a/.github/workflows/test_docker_compose_backend.yml +++ b/.github/workflows/test_devservices_backend.yml @@ -1,9 +1,8 @@ -name: test-docker-compose-backend +name: test-devservices-backend on: schedule: - - cron: '0 0 * * *' - + - cron: '0 * * * *' # Cancel in progress workflows on pull_requests. # https://docs.github.com/en/actions/using-jobs/using-concurrency#example-using-a-fallback-value concurrency: @@ -13,9 +12,11 @@ concurrency: # hack for https://github.com/actions/cache/issues/810#issuecomment-1222550359 env: SEGMENT_DOWNLOAD_TIMEOUT_MINS: 3 + USE_NEW_DEVSERVICES: 1 + IS_DEV: 1 jobs: - docker-compose-api-docs: + devservices-api-docs: name: api docs test runs-on: ubuntu-24.04 steps: @@ -31,9 +32,7 @@ jobs: id: setup - name: Bring up devservices - run: | - docker network create sentry - docker compose -f devservices/docker-compose-testing.yml up -d redis postgres snuba clickhouse + run: devservices up - name: Run API docs tests # install ts-node for ts build scripts to execute properly without potentially installing @@ -44,11 +43,9 @@ jobs: - name: Inspect failure if: failure() - run: | - docker compose -f devservices/docker-compose-testing.yml ps - docker compose -f devservices/docker-compose-testing.yml logs --tail 1000 + run: devservices logs - docker-compose-backend-test: + devservices-backend-test: name: backend test runs-on: ubuntu-24.04 timeout-minutes: 60 @@ -73,13 +70,11 @@ jobs: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Setup sentry env + id: setup uses: ./.github/actions/test-setup-sentry-devservices - name: Bring up devservices - run: | - docker network create sentry - echo "BIGTABLE_EMULATOR_HOST=127.0.0.1:8086" >> $GITHUB_ENV - docker compose -f devservices/docker-compose-testing.yml up -d + run: devservices up --mode backend-ci - name: Run backend test (${{ steps.setup.outputs.matrix-instance-number }} of ${{ steps.setup.outputs.matrix-instance-total }}) run: | @@ -107,11 +102,9 @@ jobs: - name: Inspect failure if: failure() - run: | - docker compose -f devservices/docker-compose-testing.yml ps - docker compose -f devservices/docker-compose-testing.yml logs --tail 1000 + run: devservices logs - docker-compose-backend-migration-tests: + devservices-backend-migration-tests: name: backend migration tests runs-on: ubuntu-24.04 timeout-minutes: 30 @@ -127,9 +120,7 @@ jobs: id: setup - name: Bring up devservices - run: | - docker network create sentry - docker compose -f devservices/docker-compose-testing.yml up -d redis postgres snuba clickhouse + run: devservices up - name: run tests run: | @@ -146,11 +137,9 @@ jobs: - name: Inspect failure if: failure() - run: | - docker compose -f devservices/docker-compose-testing.yml ps - docker compose -f devservices/docker-compose-testing.yml logs --tail 1000 + run: devservices logs - docker-compose-cli: + devservices-cli: name: cli test runs-on: ubuntu-24.04 timeout-minutes: 10 @@ -165,9 +154,7 @@ jobs: id: setup - name: Bring up devservices - run: | - docker network create sentry - docker compose -f devservices/docker-compose-testing.yml up -d redis postgres + run: devservices up --mode migrations - name: Run test run: | @@ -184,11 +171,9 @@ jobs: - name: Inspect failure if: failure() - run: | - docker compose -f devservices/docker-compose-testing.yml ps - docker compose -f devservices/docker-compose-testing.yml logs --tail 1000 + run: devservices logs - docker-compose-migration: + devservices-migration: name: check migration runs-on: ubuntu-24.04 strategy: @@ -204,9 +189,7 @@ jobs: id: setup - name: Bring up devservices - run: | - docker network create sentry - docker compose -f devservices/docker-compose-testing.yml up -d redis postgres + run: devservices up --mode migrations - name: Migration & lockfile checks env: @@ -217,11 +200,9 @@ jobs: - name: Inspect failure if: failure() - run: | - docker compose -f devservices/docker-compose-testing.yml ps - docker compose -f devservices/docker-compose-testing.yml logs --tail 1000 + run: devservices logs - docker-compose-monolith-dbs: + devservices-monolith-dbs: name: monolith-dbs test runs-on: ubuntu-24.04 timeout-minutes: 20 @@ -236,9 +217,7 @@ jobs: id: setup - name: Bring up devservices - run: | - docker network create sentry - docker compose -f devservices/docker-compose-testing.yml up -d redis postgres + run: devservices up --mode migrations - name: Run test run: | @@ -265,24 +244,22 @@ jobs: - name: Inspect failure if: failure() - run: | - docker compose -f devservices/docker-compose-testing.yml ps - docker compose -f devservices/docker-compose-testing.yml logs --tail 1000 + run: devservices logs # This check runs once all dependent jobs have passed # It symbolizes that all required Backend checks have succesfully passed (Or skipped) # This step is the only required backend check - docker-compose-backend-required-check: + devservices-backend-required-check: needs: [ - docker-compose-api-docs, - docker-compose-backend-test, - docker-compose-backend-migration-tests, - docker-compose-cli, - docker-compose-migration, - docker-compose-monolith-dbs, + devservices-api-docs, + devservices-backend-test, + devservices-backend-migration-tests, + devservices-cli, + devservices-migration, + devservices-monolith-dbs, ] - name: Docker Compose Backend + name: Devservices Backend # This is necessary since a failed/skipped dependent job would cause this job to be skipped if: always() runs-on: ubuntu-24.04 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index bd07e25b9a6872..4a6418b9c0f0f1 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -117,7 +117,7 @@ repos: - id: eslint name: eslint language: system - files: \.[jt]sx?$ + files: \.(ts|js|tsx|jsx|mjs)$ entry: ./node_modules/.bin/eslint --quiet --fix - id: stylelint diff --git a/CHANGES b/CHANGES index 84afe672222eeb..0655a554ced755 100644 --- a/CHANGES +++ b/CHANGES @@ -1,3 +1,99 @@ +24.12.1 +------- + +### Various fixes & improvements + +- fix: fixes KeyError when running with stale topic dlq (#82512) by @lynnagara +- chore(issue-views): Add analytics back to tab actions (#82504) by @MichaelSun48 +- chore(sentry apps): Introduce new error types for sentry apps (#82507) by @Christinarlong +- fix timezone normalization (#82496) by @kneeyo1 +- ref(tsc): convert teamAccessRequestModal to FC (#82470) by @michellewzhang +- ref(tsc): convert dashboardWidgetQuerySelectorModal to FC (#82466) by @michellewzhang +- ref(issue-views): Overhaul issue views state and logic to a new context (#82429) by @MichaelSun48 +- ref: strptime -> fromisoformat in tests (#82488) by @asottile-sentry +- chore(various): Fix linter warnings (#82494) by @lobsterkatie +- ref(insights): Split out `getAxisMaxForPercentageSeries` (#82493) by @gggritso +- fix(ecosystem): Track metrics for issue detail ticket creation (#82436) by @GabeVillalobos +- ref(aci): pass WorkflowJob into process_workflows (#82489) by @cathteng +- fix(group-events): Fix typo and error text (#82490) by @leeandher +- fix(web): Add react_config context on auth pages take 2 (#82480) by @BYK +- feat(alerts): ACI dual write alert rule helpers (#82400) by @ceorourke +- feat(dashboards): Pass `LineChart` series meta alongside the data (#82047) by @gggritso +- fix(eap): Numeric attribute filtering in snql eap (#82472) by @Zylphrex +- chore(issues): Opt in a few more endpoint tests to stronger types (#82382) by @mrduncan +- ref: remove calls to iso_format in testutils (#82461) by @asottile-sentry +- feat(dashboards): enable sorting by column in table view (#82239) by @harshithadurai +- ref(workflow_engine): remove remaining references to condition in validators (#82438) by @mifu67 +- fix(flags): separate permission class (#82463) by @oioki +- feat(new-trace): Fixing scroll on trace drawer (#82475) by @Abdkhan14 +- support routing stale messages to lowpri topic (#82322) by @lynnagara + +_Plus 240 more_ + +24.12.0 +------- + +### Various fixes & improvements + +- chore(utils): allow duplicate values in registry by making reverse lookup optional (#82114) by @cathteng +- feat(workflow_engine): Add `process_data_packets` method (#82002) by @saponifi3d +- ref(workflow_engine): Remove DetectorType (#82111) by @saponifi3d +- chore(aci milestone 3): move aggregation value helpers to incidents directory (#82103) by @mifu67 +- ref(feedback): remove spam detection logs - replaced by redash (#82071) by @aliu39 +- fix(activity): Update activity message for linked issues (#82064) by @snigdhas +- ref(replay): improve error messages for invalid searches that raise CouldNotParseValue (#82048) by @aliu39 +- fix(toolbar): Include credentials with fetch requests (#82108) by @ryan953 +- feat(autofix): Add support for streamed output (#82024) by @roaga +- :sparkles: feat(discord): add button to redirect for user linking (#82104) by @iamrajjoshi +- feat(workflow_engine): Adding support for `process_workflows` in the IssuePlatform (#81975) by @saponifi3d +- ref: remove skip_for_relay_store (#82106) by @asottile-sentry +- feat(metric-issues): Configure workflow notifications by group type (#81609) by @snigdhas +- fix(iphone-codes): update frontend definitions (#82100) by @armcknight +- ref: improve grouphash_metadata test (#82101) by @asottile-sentry +- fix(iphone-codes): update BE mapping; remove unused method (#82094) by @armcknight +- ref: remove xfail_if_not_postgres (#82097) by @asottile-sentry +- ref: fix typing for endpoints.project_rule_preview (#82089) by @asottile-sentry +- ref: fix types for test_event_attachment_details (#82091) by @asottile-sentry +- ref: remove requires_not_arm64 (#82093) by @asottile-sentry +- chore(aci): enforce config schema without subclassing (#81979) by @cathteng +- ref: fix types for eventstore.test_base (#82092) by @asottile-sentry +- chore(stacktrace): Make source map tooltip aligned (#82016) by @MichaelSun48 +- ref: delete unused GroupEnvironmentWithStatsSerializer (#82090) by @asottile-sentry + +_Plus 93 more_ + +24.11.2 +------- + +### Various fixes & improvements + +- fix(dashboards): Abbreviate `LineChartWidget` Y axis integers (#81937) by @gggritso +- Revert "chore(profiling): remove profiling.stack_trace_rules.allowed_project_ids option (#81903)" (d0bea1aa) by @getsentry-bot +- feat(widget-builder): Add limit field to widget builder hook (#81944) by @nikkikapadia +- fix(alerts): Fix EAP alert filter bar to behave more like explore (#81946) by @edwardgou-sentry +- feat(alerts): Renames eap metrics in ui to spans (#81917) by @edwardgou-sentry +- feat(alerts): Limits eap alert time windows and periods (#81916) by @edwardgou-sentry +- chore(insights): Remove bundle analysis UI flag (#81932) by @gggritso +- feat(widget-builder): Batch URL param changes (#81923) by @narsaynorath +- fix(merged): Always show a link for latests event of a merged group (#81947) by @leeandher +- fix(dashboards): Add missing propagated props in `LineChartWidget` (#81935) by @gggritso +- chore(profiling): remove profiling.stack_trace_rules.allowed_project_ids option (#81903) by @viglia +- feat(new-trace): remove prefix related (#81918) by @doralchan +- :mag: nit(integration slo): cleanup tests (#81943) by @iamrajjoshi +- ref(widget-builder): Split out tests (#81949) by @narsaynorath +- fix(trace-view): Web Vitals scores (#81945) by @0Calories +- ref(insights): Simplify `SpanTimeCharts` (#81931) by @gggritso +- fix(oauth): only remove the related tokens (#81677) by @sentaur-athena +- :wrench: chore(integration slo): cleaning up tests and use util method (#81936) by @iamrajjoshi +- feat(issue summary): Change 3-dot menu to dropdown (#81928) by @roaga +- feat(sdk): Upgrade @sentry SDKs to v8.43.0 (#81925) by @aliu39 +- Better logging for backpressure (#81648) by @kneeyo1 +- feat(dashboards): add success message when favoriting dashboards (#81887) by @harshithadurai +- feat(ui): Add dark app loading theme (#81611) by @scttcper +- ref(dashboards): Export Widget component props (#81924) by @gggritso + +_Plus 442 more_ + 24.11.1 ------- diff --git a/api-docs/openapi.json b/api-docs/openapi.json index f7cba5f6d5345f..a40ec189185cb3 100644 --- a/api-docs/openapi.json +++ b/api-docs/openapi.json @@ -15,10 +15,14 @@ }, "servers": [ { - "url": "https://us.sentry.io/" - }, - { - "url": "https://de.sentry.io/" + "url": "https://{region}.sentry.io", + "variables": { + "region": { + "default": "us", + "description": "The data-storage-location for an organization", + "enum": ["us", "de"] + } + } } ], "tags": [ diff --git a/api-docs/paths/events/issue-details.json b/api-docs/paths/events/issue-details.json index ac00d75f3c5fcb..15cdfb55ec7a70 100644 --- a/api-docs/paths/events/issue-details.json +++ b/api-docs/paths/events/issue-details.json @@ -216,7 +216,7 @@ }, "statusDetails": { "type": "object", - "description": "Additional details about the status of the issue.", + "description": "Additional details about the resolution. Supported values are `\"inRelease\"`, `\"inNextRelease\"`, `\"inCommit\"`, `\"ignoreDuration\"`, `\"ignoreCount\"`, `\"ignoreWindow\"`, `\"ignoreUserCount\"`, and `\"ignoreUserWindow\"`.", "properties": { "inNextRelease": { "type": "boolean", diff --git a/api-docs/paths/projects/dsyms.json b/api-docs/paths/projects/dsyms.json index 8221656d080a20..911b76e55585a4 100644 --- a/api-docs/paths/projects/dsyms.json +++ b/api-docs/paths/projects/dsyms.json @@ -45,7 +45,7 @@ }, "post": { "tags": ["Projects"], - "description": "Upload a new debug information file for the given release.\n\nUnlike other API requests, files must be uploaded using the\ntraditional multipart/form-data content-type.\n\nThe file uploaded is a zip archive of an Apple .dSYM folder which\ncontains the individual debug images. Uploading through this endpoint\nwill create different files for the contained images.", + "description": "Upload a new debug information file for the given release.\n\nUnlike other API requests, files must be uploaded using the\ntraditional multipart/form-data content-type.\n\nRequests to this endpoint should use the region-specific domain eg. `us.sentry.io` or `de.sentry.io`.\n\nThe file uploaded is a zip archive of an Apple .dSYM folder which\ncontains the individual debug images. Uploading through this endpoint\nwill create different files for the contained images.", "operationId": "Upload a New File", "parameters": [ { @@ -109,6 +109,9 @@ { "auth_token": ["project:write"] } + ], + "servers": [ + {"url": "https://{region}.sentry.io"} ] }, "delete": { diff --git a/api-docs/paths/releases/project-release-files.json b/api-docs/paths/releases/project-release-files.json index cfacc62efd3777..695e414037c0bc 100644 --- a/api-docs/paths/releases/project-release-files.json +++ b/api-docs/paths/releases/project-release-files.json @@ -74,7 +74,7 @@ }, "post": { "tags": ["Releases"], - "description": "Upload a new project release file.", + "description": "Upload a new file for the given release.\n\nUnlike other API requests, files must be uploaded using the traditional multipart/form-data content-type.\n\nRequests to this endpoint should use the region-specific domain eg. `us.sentry.io` or `de.sentry.io`\n\nThe optional 'name' attribute should reflect the absolute path that this file will be referenced as. For example, in the case of JavaScript you might specify the full web URI.", "operationId": "Upload a New Project Release File", "parameters": [ { @@ -171,6 +171,9 @@ { "auth_token": ["project:releases"] } + ], + "servers": [ + {"url": "https://{region}.sentry.io"} ] } } diff --git a/api-docs/paths/releases/release-files.json b/api-docs/paths/releases/release-files.json index ab1adf49ce89e6..918e26c50e2365 100644 --- a/api-docs/paths/releases/release-files.json +++ b/api-docs/paths/releases/release-files.json @@ -65,7 +65,7 @@ }, "post": { "tags": ["Releases"], - "description": "Upload a new organization release file.", + "description": "Upload a new file for the given release.\n\nUnlike other API requests, files must be uploaded using the traditional multipart/form-data content-type.\n\nRequests to this endpoint should use the region-specific domain eg. `us.sentry.io` or `de.sentry.io`.\n\nThe optional 'name' attribute should reflect the absolute path that this file will be referenced as. For example, in the case of JavaScript you might specify the full web URI.", "operationId": "Upload a New Organization Release File", "parameters": [ { @@ -135,6 +135,9 @@ { "auth_token": ["project:releases"] } + ], + "servers": [ + {"url": "https://{region}.sentry.io"} ] } } diff --git a/bin/benchmark_codeowners/benchmark b/bin/benchmark_codeowners/benchmark index 4c71d12e583d0a..78d6b611f9498a 100755 --- a/bin/benchmark_codeowners/benchmark +++ b/bin/benchmark_codeowners/benchmark @@ -1,5 +1,7 @@ #!/usr/bin/env python # isort: skip_file +# flake8: noqa: S002 + """ This script benchmarks the performance of issue owner assignment in Sentry. @@ -15,7 +17,13 @@ import time from sentry.models.organization import Organization from sentry.models.projectownership import ProjectOwnership from sentry.models.project import Project +from sentry.models.team import Team from sentry.utils import json +import sentry_sdk +from sentry.models.projectteam import ProjectTeam + +# disable sentry as it creates lots of noise in the output +sentry_sdk.init(None) def main(code_mapping_file, event_data_file): @@ -42,6 +50,30 @@ def main(code_mapping_file, event_data_file): name=project_name, slug=project_slug, id=project_id, organization_id=org.id ) + # create teams for all actors + teams_to_create = [] + seen_teams = set() + for rule in code_mapping["rules"]: + for owner in rule["owners"]: + team_name = owner["identifier"] + if team_name not in seen_teams: + teams_to_create.append( + Team( + name=team_name, + slug=team_name, + organization_id=org.id, + id=owner["id"], + ) + ) + seen_teams.add(team_name) + + # delete teams from previous runs + Team.objects.filter(id__in=[team.id for team in teams_to_create]).delete() + + Team.objects.bulk_create(teams_to_create) + for team in Team.objects.filter(organization_id=org.id): + ProjectTeam.objects.create(project_id=project.id, team_id=team.id) + # create a projectownership ProjectOwnership.objects.get_or_create( project_id=project.id, @@ -51,9 +83,18 @@ def main(code_mapping_file, event_data_file): event_data = get_event_data() start = time.time() - ProjectOwnership.get_issue_owners(project.id, event_data) + issue_owners = ProjectOwnership.get_issue_owners(project.id, event_data) elapsed_time = time.time() - start print(f"Time taken: {elapsed_time:.6f} seconds") # noqa + print("Ownership rules:") + for rule, teams, rule_type in issue_owners: + print(f"\nRule:") + print(f" Type: {rule_type}") + print(f" Pattern: {rule.matcher.pattern}") + print(" Teams:") + for team in teams: # type: ignore[assignment] + if isinstance(team, Team): # Only handle Team objects + print(f" - {team.name} (id: {team.id})") if __name__ == "__main__": diff --git a/biome.json b/biome.json index e18ffc69671ea7..24fd4555ce47d2 100644 --- a/biome.json +++ b/biome.json @@ -65,7 +65,6 @@ "noMisrefactoredShorthandAssign": "error", "useAwait": "error", "useNamespaceKeyword": "error", - "noSkippedTests": "error", "noFocusedTests": "error", "noDuplicateTestHooks": "error" }, diff --git a/build-utils/sentry-instrumentation.ts b/build-utils/sentry-instrumentation.ts index c6797c1e22f490..ae502acd76f044 100644 --- a/build-utils/sentry-instrumentation.ts +++ b/build-utils/sentry-instrumentation.ts @@ -1,6 +1,6 @@ /* eslint-env node */ +import type {Span} from '@sentry/core'; import type * as Sentry from '@sentry/node'; -import type {Span} from '@sentry/types'; import crypto from 'node:crypto'; import https from 'node:https'; import os from 'node:os'; @@ -71,7 +71,7 @@ class SentryInstrumentation { sentry.setTag('arch', os.arch()); sentry.setTag( 'cpu', - cpus?.length ? `${cpus[0].model} (cores: ${cpus.length})}` : 'N/A' + cpus?.length ? `${cpus[0]!.model} (cores: ${cpus.length})}` : 'N/A' ); this.Sentry = sentry; @@ -96,7 +96,7 @@ class SentryInstrumentation { .filter(assetName => !assetName.endsWith('.map')) .forEach(assetName => { const asset = compilation.assets[assetName]; - const size = asset.size(); + const size = asset!.size(); const file = assetName; const body = JSON.stringify({ file, diff --git a/codecov.yml b/codecov.yml index 1a981e84c7c040..4b8b260fe743f9 100644 --- a/codecov.yml +++ b/codecov.yml @@ -25,7 +25,6 @@ coverage: - static/app/routes.tsx - static/app/**/*.stories.tsx - static/app/**/__stories__/ - - tests/ component_management: individual_components: @@ -54,7 +53,7 @@ flags: after_n_builds: 4 backend: paths: - - 'src/sentry/**/*.py' + - '**/*.py' carryforward: true # Do not send any status checks until n coverage reports are uploaded. # NOTE: If you change this, make sure to change `comment.after_n_builds` below as well. diff --git a/config/tsconfig.base.json b/config/tsconfig.base.json index 326841c8574a4a..e837746b45bb24 100644 --- a/config/tsconfig.base.json +++ b/config/tsconfig.base.json @@ -40,6 +40,7 @@ "noImplicitThis": true, "noUnusedLocals": true, "noUnusedParameters": true, + "noUncheckedIndexedAccess": true, "strict": true, "strictBindCallApply": false, "useUnknownInCatchVariables": false, diff --git a/devservices/clickhouse/config.xml b/devservices/clickhouse/config.xml deleted file mode 100644 index 327d60661b29da..00000000000000 --- a/devservices/clickhouse/config.xml +++ /dev/null @@ -1,6 +0,0 @@ - - 0.3 - - 1 - - diff --git a/devservices/config.yml b/devservices/config.yml index e8e6a5b0d1b2be..f4cf7a263b0160 100644 --- a/devservices/config.yml +++ b/devservices/config.yml @@ -25,9 +25,36 @@ x-sentry-service-config: repo_name: sentry-shared-redis branch: main repo_link: https://github.com/getsentry/sentry-shared-redis.git + symbolicator: + description: A symbolication service for native stacktraces and minidumps with symbol server support + remote: + repo_name: symbolicator + branch: master + repo_link: https://github.com/getsentry/symbolicator.git + mode: default + bigtable: + description: Bigtable emulator + redis-cluster: + description: Redis cluster used for testing + chartcuterie: + description: Chartcuterie is a service that generates charts + remote: + repo_name: chartcuterie + branch: master + repo_link: https://github.com/getsentry/chartcuterie.git + taskbroker: + description: Service used to process asynchronous tasks + remote: + repo_name: taskbroker + branch: main + repo_link: https://github.com/getsentry/taskbroker.git + mode: containerized modes: default: [snuba, postgres, relay] migrations: [postgres, redis] + acceptance-ci: [postgres, snuba, chartcuterie] + taskbroker: [snuba, postgres, relay, taskbroker] + backend-ci: [snuba, postgres, redis, bigtable, redis-cluster, symbolicator] services: postgres: @@ -61,6 +88,29 @@ services: labels: - orchestrator=devservices restart: unless-stopped + bigtable: + image: 'ghcr.io/getsentry/cbtemulator:d28ad6b63e461e8c05084b8c83f1c06627068c04' + ports: + - '127.0.0.1:8086:8086' + networks: + - devservices + extra_hosts: + - host.docker.internal:host-gateway + redis-cluster: + image: ghcr.io/getsentry/docker-redis-cluster:7.0.10 + ports: + - '127.0.0.1:7000:7000' + - '127.0.0.1:7001:7001' + - '127.0.0.1:7002:7002' + - '127.0.0.1:7003:7003' + - '127.0.0.1:7004:7004' + - '127.0.0.1:7005:7005' + networks: + - devservices + extra_hosts: + - host.docker.internal:host-gateway + environment: + - IP=0.0.0.0 networks: devservices: diff --git a/devservices/docker-compose-testing.yml b/devservices/docker-compose-testing.yml deleted file mode 100644 index 86e6da6a05053e..00000000000000 --- a/devservices/docker-compose-testing.yml +++ /dev/null @@ -1,282 +0,0 @@ -x-restart-policy: &restart_policy - restart: unless-stopped -x-depends_on-healthy: &depends_on-healthy - condition: service_healthy -x-depends_on-default: &depends_on-default - condition: service_started -x-healthcheck-defaults: &healthcheck_defaults - interval: 30s - timeout: 1m30s - retries: 10 - start_period: 10s -services: - redis: - <<: *restart_policy - container_name: sentry_redis - image: ghcr.io/getsentry/image-mirror-library-redis:5.0-alpine - healthcheck: - <<: *healthcheck_defaults - test: redis-cli ping - command: - [ - 'redis-server', - '--appendonly', - 'yes', - '--save', - '60', - '20', - '--auto-aof-rewrite-percentage', - '100', - '--auto-aof-rewrite-min-size', - '64mb', - ] - volumes: - - 'sentry-redis:/data' - ports: - - '6379:6379' - networks: - - sentry - extra_hosts: - host.docker.internal: host-gateway - postgres: - <<: *restart_policy - container_name: sentry_postgres - # Using the same postgres version as Sentry dev for consistency purposes - image: 'ghcr.io/getsentry/image-mirror-library-postgres:14-alpine' - healthcheck: - <<: *healthcheck_defaults - # Using default user "postgres" from sentry/sentry.conf.example.py or value of POSTGRES_USER if provided - test: ['CMD-SHELL', 'pg_isready -U ${POSTGRES_USER:-postgres}'] - 'command': - [ - 'postgres', - '-c', - 'wal_level=logical', - '-c', - 'max_replication_slots=1', - '-c', - 'max_wal_senders=1', - ] - environment: - POSTGRES_HOST_AUTH_METHOD: 'trust' - POSTGRES_DB: 'sentry' - volumes: - - 'sentry-postgres:/var/lib/postgresql/data' - ports: - - '5432:5432' - networks: - - sentry - extra_hosts: - host.docker.internal: host-gateway - kafka: - <<: *restart_policy - image: 'ghcr.io/getsentry/image-mirror-confluentinc-cp-kafka:7.5.0' - container_name: sentry_kafka - environment: - # https://docs.confluent.io/platform/current/installation/docker/config-reference.html#cp-kakfa-example - KAFKA_PROCESS_ROLES: 'broker,controller' - KAFKA_CONTROLLER_QUORUM_VOTERS: '1@127.0.0.1:29093' - KAFKA_CONTROLLER_LISTENER_NAMES: 'CONTROLLER' - KAFKA_NODE_ID: '1' - CLUSTER_ID: 'MkU3OEVBNTcwNTJENDM2Qk' - KAFKA_LISTENERS: 'PLAINTEXT://0.0.0.0:29092,INTERNAL://0.0.0.0:9093,EXTERNAL://0.0.0.0:9092,CONTROLLER://0.0.0.0:29093' - KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://127.0.0.1:29092,INTERNAL://kafka:9093,EXTERNAL://127.0.0.1:9092' - KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'PLAINTEXT:PLAINTEXT,INTERNAL:PLAINTEXT,EXTERNAL:PLAINTEXT,CONTROLLER:PLAINTEXT' - KAFKA_INTER_BROKER_LISTENER_NAME: 'PLAINTEXT' - KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: '1' - KAFKA_OFFSETS_TOPIC_NUM_PARTITIONS: '1' - KAFKA_LOG_RETENTION_HOURS: '24' - KAFKA_MESSAGE_MAX_BYTES: '50000000' #50MB or bust - KAFKA_MAX_REQUEST_SIZE: '50000000' #50MB on requests apparently too - volumes: - - 'sentry-kafka:/var/lib/kafka/data' - - 'sentry-kafka-log:/var/lib/kafka/log' - healthcheck: - <<: *healthcheck_defaults - test: ['CMD-SHELL', 'nc -z localhost 9092'] - interval: 10s - timeout: 10s - retries: 30 - ports: - - '9092:9092' - - '9093:9093' - networks: - - sentry - extra_hosts: - host.docker.internal: host-gateway - clickhouse: - <<: *restart_policy - container_name: sentry_clickhouse - image: 'ghcr.io/getsentry/image-mirror-altinity-clickhouse-server:23.8.11.29.altinitystable' - ulimits: - nofile: - soft: 262144 - hard: 262144 - volumes: - - 'sentry-clickhouse:/var/lib/clickhouse' - - 'sentry-clickhouse-log:/var/log/clickhouse-server' - - type: bind - read_only: true - source: ./clickhouse/config.xml - target: /etc/clickhouse-server/config.d/sentry.xml - healthcheck: - test: [ - 'CMD-SHELL', - # Manually override any http_proxy envvar that might be set, because - # this wget does not support no_proxy. See: - # https://github.com/getsentry/self-hosted/issues/1537 - "http_proxy='' wget -nv -t1 --spider 'http://localhost:8123/' || exit 1", - ] - interval: 10s - timeout: 10s - retries: 30 - ports: - - '8123:8123' - - '9000:9000' - - '9009:9009' - networks: - - sentry - extra_hosts: - host.docker.internal: host-gateway - symbolicator: - <<: *restart_policy - container_name: sentry_symbolicator - image: 'us-central1-docker.pkg.dev/sentryio/symbolicator/image:nightly' - volumes: - - 'sentry-symbolicator:/data' - - type: bind - read_only: true - source: ./symbolicator - target: /etc/symbolicator - command: run -c /etc/symbolicator/config.yml - ports: - - '3021:3021' - networks: - - sentry - extra_hosts: - host.docker.internal: host-gateway - vroom: - <<: *restart_policy - container_name: sentry_vroom - image: 'us-central1-docker.pkg.dev/sentryio/vroom/vroom:latest' - environment: - SENTRY_KAFKA_BROKERS_PROFILING: 'sentry_kafka:9092' - SENTRY_KAFKA_BROKERS_OCCURRENCES: 'sentry_kafka:9092' - SENTRY_BUCKET_PROFILES: file://localhost//var/lib/sentry-profiles - SENTRY_SNUBA_HOST: 'http://snuba-api:1218' - volumes: - - sentry-vroom:/var/lib/sentry-profiles - depends_on: - kafka: - <<: *depends_on-healthy - ports: - - '8085:8085' - networks: - - sentry - extra_hosts: - host.docker.internal: host-gateway - snuba: - <<: *restart_policy - container_name: sentry_snuba - image: ghcr.io/getsentry/snuba:latest - ports: - - '1218:1218' - - '1219:1219' - networks: - - sentry - command: ['devserver'] - environment: - PYTHONUNBUFFERED: '1' - SNUBA_SETTINGS: docker - DEBUG: '1' - CLICKHOUSE_HOST: 'clickhouse' - CLICKHOUSE_PORT: '9000' - CLICKHOUSE_HTTP_PORT: '8123' - DEFAULT_BROKERS: 'kafka:9093' - REDIS_HOST: 'redis' - REDIS_PORT: '6379' - REDIS_DB: '1' - ENABLE_SENTRY_METRICS_DEV: '${ENABLE_SENTRY_METRICS_DEV:-}' - ENABLE_PROFILES_CONSUMER: '${ENABLE_PROFILES_CONSUMER:-}' - ENABLE_SPANS_CONSUMER: '${ENABLE_SPANS_CONSUMER:-}' - ENABLE_ISSUE_OCCURRENCE_CONSUMER: '${ENABLE_ISSUE_OCCURRENCE_CONSUMER:-}' - ENABLE_AUTORUN_MIGRATION_SEARCH_ISSUES: '1' - ENABLE_GROUP_ATTRIBUTES_CONSUMER: '${ENABLE_GROUP_ATTRIBUTES_CONSUMER:-}' - platform: linux/amd64 - depends_on: - - kafka - - redis - - clickhouse - extra_hosts: - host.docker.internal: host-gateway - bigtable: - <<: *restart_policy - container_name: sentry_bigtable - image: 'ghcr.io/getsentry/cbtemulator:d28ad6b63e461e8c05084b8c83f1c06627068c04' - ports: - - '8086:8086' - networks: - - sentry - extra_hosts: - host.docker.internal: host-gateway - redis-cluster: - <<: *restart_policy - container_name: sentry_redis-cluster - image: ghcr.io/getsentry/docker-redis-cluster:7.0.10 - ports: - - '7000:7000' - - '7001:7001' - - '7002:7002' - - '7003:7003' - - '7004:7004' - - '7005:7005' - networks: - - sentry - volumes: - - sentry-redis-cluster:/redis-data - environment: - - IP=0.0.0.0 - chartcuterie: - <<: *restart_policy - container_name: sentry_chartcuterie - image: 'us-central1-docker.pkg.dev/sentryio/chartcuterie/image:latest' - environment: - CHARTCUTERIE_CONFIG: /etc/chartcuterie/config.js - CHARTCUTERIE_CONFIG_POLLING: true - volumes: - - ./chartcuterie:/etc/chartcuterie - ports: - - '7901:9090' - networks: - - sentry - extra_hosts: - host.docker.internal: host-gateway - healthcheck: - <<: *healthcheck_defaults - # Using default user "postgres" from sentry/sentry.conf.example.py or value of POSTGRES_USER if provided - test: - [ - 'CMD-SHELL', - 'docker exec sentry_chartcuterie python3 -c "import urllib.request; urllib.request.urlopen(\"http://127.0.0.1:9090/api/chartcuterie/healthcheck/live\", timeout=5)"', - ] - -volumes: - # These store application data that should persist across restarts. - sentry-data: - sentry-postgres: - sentry-redis: - sentry-redis-cluster: - sentry-kafka: - sentry-clickhouse: - sentry-symbolicator: - # This volume stores profiles and should be persisted. - # Not being external will still persist data across restarts. - # It won't persist if someone does a docker compose down -v. - sentry-vroom: - sentry-kafka-log: - sentry-clickhouse-log: - -networks: - sentry: - name: sentry - external: true diff --git a/devservices/symbolicator/config.yml b/devservices/symbolicator/config.yml deleted file mode 100644 index 290d752a6dd04c..00000000000000 --- a/devservices/symbolicator/config.yml +++ /dev/null @@ -1,11 +0,0 @@ -bind: '0.0.0.0:3021' -logging: - level: 'debug' - format: 'pretty' - enable_backtraces: true - -# explicitly disable caches as it's not something we want in tests. in -# development it may be less ideal. perhaps we should do the same thing as we -# do with relay one day (one container per test/session), although that will be -# slow -cache_dir: null diff --git a/eslint.config.mjs b/eslint.config.mjs new file mode 100644 index 00000000000000..10f0a1b91fbfae --- /dev/null +++ b/eslint.config.mjs @@ -0,0 +1,838 @@ +// @ts-check +/** + * Understanding & making changes to this file: + * + * This is your friend: + * `npx eslint --inspect-config` + */ +import * as emotion from '@emotion/eslint-plugin'; +import prettier from 'eslint-config-prettier'; +import importPlugin from 'eslint-plugin-import'; +import jest from 'eslint-plugin-jest'; +import jestDom from 'eslint-plugin-jest-dom'; +import react from 'eslint-plugin-react'; +import reactHooks from 'eslint-plugin-react-hooks'; +import sentry from 'eslint-plugin-sentry'; +import simpleImportSort from 'eslint-plugin-simple-import-sort'; +import testingLibrary from 'eslint-plugin-testing-library'; +import typescriptSortKeys from 'eslint-plugin-typescript-sort-keys'; +import globals from 'globals'; +import invariant from 'invariant'; +// biome-ignore lint/correctness/noNodejsModules: Need to get the list of things! +import {builtinModules} from 'node:module'; +import typescript from 'typescript-eslint'; + +invariant(react.configs.flat, 'For typescript'); + +const restrictedImportPatterns = [ + { + group: ['sentry/components/devtoolbar/*'], + message: 'Do not depend on toolbar internals', + }, +]; + +const restrictedImportPaths = [ + { + name: '@testing-library/react', + message: + 'Please import from `sentry-test/reactTestingLibrary` instead so that we can ensure consistency throughout the codebase', + }, + { + name: '@testing-library/react-hooks', + message: + 'Please import from `sentry-test/reactTestingLibrary` instead so that we can ensure consistency throughout the codebase', + }, + { + name: '@testing-library/user-event', + message: + 'Please import from `sentry-test/reactTestingLibrary` instead so that we can ensure consistency throughout the codebase', + }, + { + name: '@sentry/browser', + message: + 'Please import from `@sentry/react` to ensure consistency throughout the codebase.', + }, + { + name: 'marked', + message: + "Please import marked from 'app/utils/marked' so that we can ensure sanitation of marked output", + }, + { + name: 'lodash', + message: + "Please import lodash utilities individually. e.g. `import isEqual from 'lodash/isEqual';`. See https://github.com/getsentry/frontend-handbook#lodash from for information", + }, + { + name: 'lodash/get', + message: + 'Optional chaining `?.` and nullish coalescing operators `??` are available and preferred over using `lodash/get`. See https://github.com/getsentry/frontend-handbook#new-syntax for more information', + }, + { + name: 'sentry/utils/theme', + importNames: ['lightColors', 'darkColors'], + message: + "'lightColors' and 'darkColors' exports intended for use in Storybook only. Instead, use theme prop from emotion or the useTheme hook.", + }, + { + name: 'react-router', + importNames: ['withRouter'], + message: + "Use 'useLocation', 'useParams', 'useNavigate', 'useRoutes' from sentry/utils instead.", + }, + { + name: 'sentry/utils/withSentryRouter', + message: + "Use 'useLocation', 'useParams', 'useNavigate', 'useRoutes' from sentry/utils instead.", + }, + { + name: 'qs', + message: 'Please use query-string instead of qs', + }, + { + name: 'moment', + message: 'Please import moment-timezone instead of moment', + }, +]; + +// Used by both: `languageOptions` & `parserOptions` +const ecmaVersion = 6; // TODO(ryan953): change to 'latest' + +/** + * To get started with this ESLint Configuration list be sure to read at least + * these sections of the docs: + * - https://eslint.org/docs/latest/use/configure/configuration-files#specifying-files-and-ignores + * - https://eslint.org/docs/latest/use/configure/configuration-files#cascading-configuration-objects + */ + +export default typescript.config([ + { + // Main parser & linter options + // Rules are defined below and inherit these properties + // https://eslint.org/docs/latest/use/configure/configuration-files#configuration-objects + name: 'eslint/global/languageOptions', + languageOptions: { + ecmaVersion, + sourceType: 'module', + globals: { + // TODO(ryan953): globals.browser seems to have a bug with trailing whitespace + ...Object.fromEntries( + Object.keys(globals.browser).map(key => [key.trim(), false]) + ), + ...globals.jest, + MockApiClient: true, + tick: true, + }, + parser: typescript.parser, + parserOptions: { + ecmaFeatures: { + globalReturn: false, + }, + ecmaVersion, + + // https://typescript-eslint.io/packages/parser/#emitdecoratormetadata + emitDecoratorMetadata: undefined, + + // https://typescript-eslint.io/packages/parser/#experimentaldecorators + experimentalDecorators: undefined, + + // https://typescript-eslint.io/packages/parser/#jsdocparsingmode + jsDocParsingMode: process.env.SENTRY_DETECT_DEPRECATIONS ? 'all' : 'none', + + // https://typescript-eslint.io/packages/parser/#project + project: process.env.SENTRY_DETECT_DEPRECATIONS ? './tsconfig.json' : false, + + // https://typescript-eslint.io/packages/parser/#projectservice + // `projectService` is recommended, but slower, with our current tsconfig files. + // projectService: true, + // tsconfigRootDir: import.meta.dirname, + }, + }, + linterOptions: { + noInlineConfig: false, + reportUnusedDisableDirectives: 'error', + }, + settings: { + react: { + version: '18.2.0', + defaultVersion: '18.2', + }, + 'import/parsers': {'@typescript-eslint/parser': ['.ts', '.tsx']}, + 'import/resolver': {typescript: {}}, + 'import/extensions': ['.js', '.jsx'], + }, + }, + { + name: 'eslint/global/files', + // Default file selection + // https://eslint.org/docs/latest/use/configure/configuration-files#specifying-files-and-ignores + files: ['**/*.js', '**/*.mjs', '**/*.ts', '**/*.jsx', '**/*.tsx'], + }, + { + name: 'eslint/global/ignores', + // Global ignores + // https://eslint.org/docs/latest/use/configure/configuration-files#globally-ignoring-files-with-ignores + ignores: [ + '.devenv/**/*', + '.github/**/*', + '.mypy_cache/**/*', + '.pytest_cache/**/*', + '.venv/**/*', + '**/*.benchmark.ts', + '**/*.d.ts', + '**/dist/**/*', + '**/tests/**/fixtures/**/*', + '**/vendor/**/*', + 'build-utils/**/*', + 'config/chartcuterie/config.js', // TODO: see if this file exists + 'fixtures/artifact_bundle/**/*', + 'fixtures/artifact_bundle_debug_ids/**/*', + 'fixtures/artifact_bundle_duplicated_debug_ids/**/*', + 'fixtures/profiles/embedded.js', + 'jest.config.ts', + 'api-docs/**/*', + 'src/sentry/static/sentry/js/**/*', + 'src/sentry/templates/sentry/**/*', + 'stylelint.config.js', + ], + }, + /** + * Global Rules + * Any ruleset that does not include `files` or `ignores` fields + * + * Plugins are configured within each configuration object. + * https://eslint.org/docs/latest/use/configure/configuration-files#configuration-objects + * + * Rules are grouped by plugin. If you want to override a specific rule inside + * the recommended set, then it's recommended to spread the new rule on top + * of the predefined ones. + * + * For example: if you want to enable a new plugin in the codebase and their + * recommended rules (or a new rule that's part of an existing plugin) + * First you'd setup a configuration object for that plugin: + * { + * name: 'my-plugin/recommended', + * ...myPlugin.configs.recommended, + * }, + * Second you'd override the rule you want to deal with, maybe making it a + * warning to start: + * { + * name: 'my-plugin/recommended', + * ...myPlugin.configs.recommended, + * rules: { + * ...myPlugin.configs.recommended.rules, + * ['the-rule']: 'warn', + * } + * }, + * Finally, once all warnings are fixed, update from 'warning' to 'error', or + * remove the override and rely on the recommended rules again. + */ + { + name: 'eslint/rules', + rules: { + // https://eslint.org/docs/rules/strict + strict: ['error', 'global'], + + /** + * Variables + */ + // https://eslint.org/docs/rules/no-shadow-restricted-names + 'no-shadow-restricted-names': 'error', + + /** + * Possible errors + */ + // https://eslint.org/docs/rules/no-cond-assign + 'no-cond-assign': ['error', 'always'], + + // https://eslint.org/docs/rules/no-alert + 'no-alert': 'error', + + // https://eslint.org/docs/rules/no-constant-condition + 'no-constant-condition': 'warn', + + // https://eslint.org/docs/rules/no-empty + 'no-empty': 'error', + + // https://eslint.org/docs/rules/no-ex-assign + 'no-ex-assign': 'error', + + // https://eslint.org/docs/rules/no-extra-boolean-cast + 'no-extra-boolean-cast': 'error', + + // https://eslint.org/docs/rules/no-func-assign + 'no-func-assign': 'error', + + // https://eslint.org/docs/rules/no-inner-declarations + 'no-inner-declarations': 'error', + + // https://eslint.org/docs/rules/no-invalid-regexp + 'no-invalid-regexp': 'error', + + // https://eslint.org/docs/rules/no-irregular-whitespace + 'no-irregular-whitespace': 'error', + + // https://eslint.org/docs/rules/no-obj-calls + 'no-obj-calls': 'error', + + // https://eslint.org/docs/rules/no-sparse-arrays + 'no-sparse-arrays': 'error', + + // https://eslint.org/docs/rules/block-scoped-var + 'block-scoped-var': 'error', + + /** + * Best practices + */ + // https://eslint.org/docs/rules/consistent-return + 'consistent-return': 'error', + + // https://eslint.org/docs/rules/default-case + 'default-case': 'error', + + // https://eslint.org/docs/rules/dot-notation + 'dot-notation': ['error', {allowKeywords: true}], + + // https://eslint.org/docs/rules/guard-for-in [REVISIT ME] + 'guard-for-in': 'off', + + // https://eslint.org/docs/rules/no-caller + 'no-caller': 'error', + + // https://eslint.org/docs/rules/no-eval + 'no-eval': 'error', + + // https://eslint.org/docs/rules/no-extend-native + 'no-extend-native': 'error', + + // https://eslint.org/docs/rules/no-extra-bind + 'no-extra-bind': 'error', + + // https://eslint.org/docs/rules/no-fallthrough + 'no-fallthrough': 'error', + + // https://eslint.org/docs/rules/no-floating-decimal + 'no-floating-decimal': 'error', + + // https://eslint.org/docs/rules/no-implied-eval + 'no-implied-eval': 'error', + + // https://eslint.org/docs/rules/no-lone-blocks + 'no-lone-blocks': 'error', + + // https://eslint.org/docs/rules/no-loop-func + 'no-loop-func': 'error', + + // https://eslint.org/docs/rules/no-multi-str + 'no-multi-str': 'error', + + // https://eslint.org/docs/rules/no-native-reassign + 'no-native-reassign': 'error', + + // https://eslint.org/docs/rules/no-new + 'no-new': 'error', + + // https://eslint.org/docs/rules/no-new-func + 'no-new-func': 'error', + + // https://eslint.org/docs/rules/no-new-wrappers + 'no-new-wrappers': 'error', + + // https://eslint.org/docs/rules/no-octal + 'no-octal': 'error', + + // https://eslint.org/docs/rules/no-octal-escape + 'no-octal-escape': 'error', + + // https://eslint.org/docs/rules/no-param-reassign [REVISIT ME] + 'no-param-reassign': 'off', + + // https://eslint.org/docs/rules/no-proto + 'no-proto': 'error', + + // https://eslint.org/docs/rules/no-return-assign + 'no-return-assign': 'error', + + // https://eslint.org/docs/rules/no-script-url + 'no-script-url': 'error', + + // https://eslint.org/docs/rules/no-self-compare + 'no-self-compare': 'error', + + // https://eslint.org/docs/rules/no-sequences + 'no-sequences': 'error', + + // https://eslint.org/docs/rules/no-throw-literal + 'no-throw-literal': 'error', + + // https://eslint.org/docs/rules/no-with + 'no-with': 'error', + + // https://eslint.org/docs/rules/radix + radix: 'error', + + // https://eslint.org/docs/rules/object-shorthand + 'object-shorthand': ['error', 'properties'], + + // https://eslint.org/docs/rules/vars-on-top + 'vars-on-top': 'off', + + // https://eslint.org/docs/rules/wrap-iife + 'wrap-iife': ['error', 'any'], + + // https://eslint.org/docs/rules/array-callback-return + 'array-callback-return': 'error', + + // https://eslint.org/docs/rules/yoda + yoda: 'error', + + // https://eslint.org/docs/rules/no-else-return + 'no-else-return': ['error', {allowElseIf: false}], + + // https://eslint.org/docs/rules/require-await + 'require-await': 'error', + + // https://eslint.org/docs/rules/multiline-comment-style + 'multiline-comment-style': ['error', 'separate-lines'], + + // https://eslint.org/docs/rules/spaced-comment + 'spaced-comment': [ + 'error', + 'always', + { + line: {markers: ['/'], exceptions: ['-', '+']}, + block: {exceptions: ['*'], balanced: true}, + }, + ], + + // Let formatter handle this + 'arrow-body-style': 'off', + + /** + * Restricted imports, e.g. deprecated libraries, etc + * + * See: https://eslint.org/docs/rules/no-restricted-imports + */ + 'no-restricted-imports': [ + 'error', + { + patterns: restrictedImportPatterns, + paths: restrictedImportPaths, + }, + ], + + // https://eslint.org/docs/rules/no-console + 'no-console': 'error', + }, + }, + { + ...importPlugin.flatConfigs.recommended, + name: 'plugin/import', + rules: { + // We override all the rules that are in the recommended, react, and typescript rulesets + + // From the recommended ruleset: + // https://github.com/import-js/eslint-plugin-import/blob/main/docs/rules/export.md + 'import/export': 'error', + + // 5 rules not recommended to be enabled with typescript-eslint + // https://typescript-eslint.io/troubleshooting/typed-linting/performance/#slow-eslint-rules + 'import/named': 'off', + 'import/namespace': 'off', + 'import/default': 'off', + 'import/no-named-as-default-member': 'off', + 'import/no-unresolved': 'off', + + // Require a newline after the last import/require in a group + // Why doesn't prettier handle this? https://prettier.io/docs/en/rationale.html#empty-lines + // https://github.com/benmosher/eslint-plugin-import/blob/main/docs/rules/newline-after-import.md + 'import/newline-after-import': 'error', + + // do not allow a default import name to match a named export (airbnb: error) + // https://github.com/benmosher/eslint-plugin-import/blob/main/docs/rules/no-named-as-default.md + 'import/no-named-as-default': 'off', + + // Prevent importing the default as if it were named + // https://github.com/benmosher/eslint-plugin-import/blob/main/docs/rules/no-named-default.md + 'import/no-named-default': 'error', + + // disallow AMD require/define + // https://github.com/benmosher/eslint-plugin-import/blob/main/docs/rules/no-amd.md + 'import/no-amd': 'error', + + // disallow duplicate imports + // https://github.com/benmosher/eslint-plugin-import/blob/main/docs/rules/no-duplicates.md + 'import/no-duplicates': 'error', + + // Forbid import of modules using absolute paths + // https://github.com/benmosher/eslint-plugin-import/blob/main/docs/rules/no-absolute-path.md + 'import/no-absolute-path': 'error', + + // Forbid Webpack loader syntax in imports + // https://github.com/benmosher/eslint-plugin-import/blob/main/docs/rules/no-webpack-loader-syntax.md + 'import/no-webpack-loader-syntax': 'error', + + // Reports if a module"s default export is unnamed + // https://github.com/benmosher/eslint-plugin-import/blob/main/docs/rules/no-anonymous-default-export.md + 'import/no-anonymous-default-export': 'error', + }, + }, + { + name: 'plugin/react', + plugins: { + ...react.configs.flat.recommended.plugins, + // @ts-ignore noUncheckedIndexedAccess + ...react.configs.flat['jsx-runtime'].plugins, + }, + rules: { + ...react.configs.flat.recommended.rules, + // @ts-ignore noUncheckedIndexedAccess + ...react.configs.flat['jsx-runtime'].rules, + + // https://github.com/yannickcr/eslint-plugin-react/blob/master/docs/rules/display-name.md + 'react/display-name': 'off', + + // https://github.com/yannickcr/eslint-plugin-react/blob/master/docs/rules/no-multi-comp.md + 'react/no-multi-comp': ['off', {ignoreStateless: true}], + + // https://github.com/yannickcr/eslint-plugin-react/blob/master/docs/rules/jsx-fragments.md + 'react/jsx-fragments': ['error', 'element'], + + // https://github.com/yannickcr/eslint-plugin-react/blob/master/docs/rules/jsx-handler-names.md + // Ensures that any component or prop methods used to handle events are correctly prefixed. + 'react/jsx-handler-names': [ + 'off', + {eventHandlerPrefix: 'handle', eventHandlerPropPrefix: 'on'}, + ], + + // Disabled as we use the newer JSX transform babel plugin. + // https://github.com/yannickcr/eslint-plugin-react/blob/master/docs/rules/jsx-uses-react.md + 'react/jsx-uses-react': 'off', + + // https://github.com/yannickcr/eslint-plugin-react/blob/master/docs/rules/no-did-mount-set-state.md + 'react/no-did-mount-set-state': 'error', + + // https://github.com/yannickcr/eslint-plugin-react/blob/master/docs/rules/no-did-update-set-state.md" + 'react/no-did-update-set-state': 'error', + + // https://github.com/yannickcr/eslint-plugin-react/blob/master/docs/rules/no-redundant-should-component-update.md + 'react/no-redundant-should-component-update': 'error', + + // https://github.com/yannickcr/eslint-plugin-react/blob/master/docs/rules/no-typos.md + 'react/no-typos': 'error', + + // Prevent invalid characters from appearing in markup + // https://github.com/yannickcr/eslint-plugin-react/blob/master/docs/rules/no-unescaped-entities.md + 'react/no-unescaped-entities': 'off', + + // https://github.com/yannickcr/eslint-plugin-react/blob/master/docs/rules/no-unknown-property.md + 'react/no-unknown-property': ['error', {ignore: ['css']}], + + // We do not need proptypes since we're using typescript + 'react/prop-types': 'off', + + // Disabled as we are using the newer JSX transform babel plugin. + // https://github.com/yannickcr/eslint-plugin-react/blob/master/docs/rules/react-in-jsx-scope.md + 'react/react-in-jsx-scope': 'off', + + // https://github.com/yannickcr/eslint-plugin-react/blob/master/docs/rules/self-closing-comp.md + 'react/self-closing-comp': 'error', + + // This also causes issues with typescript + // See: https://github.com/yannickcr/eslint-plugin-react/issues/2066 + // + // https://github.com/yannickcr/eslint-plugin-react/blob/master/docs/rules/sort-comp.md + 'react/sort-comp': 'warn', + + // Consistent (never add ={true}) + // https://github.com/yannickcr/eslint-plugin-react/blob/master/docs/rules/jsx-boolean-value.md + 'react/jsx-boolean-value': ['error', 'never'], + + // Consistent function component declaration styles + // https://github.com/jsx-eslint/eslint-plugin-react/blob/master/docs/rules/function-component-definition.md + 'react/function-component-definition': [ + 'error', + {namedComponents: 'function-declaration'}, + ], + }, + }, + { + name: 'plugin/react-hooks', + plugins: {'react-hooks': reactHooks}, + rules: { + 'react-hooks/rules-of-hooks': 'error', + 'react-hooks/exhaustive-deps': [ + 'error', + {additionalHooks: '(useEffectAfterFirstRender|useMemoWithPrevious)'}, + ], + }, + }, + { + name: 'plugin/@typescript-eslint', + plugins: {'@typescript-eslint': typescript.plugin}, + rules: { + // no-undef is redundant with typescript as tsc will complain + // A downside is that we won't get eslint errors about it, but your editors should + // support tsc errors so.... + // https://eslint.org/docs/rules/no-undef + 'no-undef': 'off', + + /** + * Need to use typescript version of these rules + * https://eslint.org/docs/rules/no-shadow + */ + 'no-shadow': 'off', + '@typescript-eslint/no-shadow': 'error', + + // This only override the `args` rule (which is "none"). There are too many errors and it's difficult to manually + // fix them all, so we'll have to incrementally update. + // https://eslint.org/docs/rules/no-unused-vars + 'no-unused-vars': 'off', + '@typescript-eslint/no-unused-vars': [ + 'error', + { + vars: 'all', + args: 'all', + // TODO(scttcper): We could enable this to enforce catch (error) + // https://eslint.org/docs/latest/rules/no-unused-vars#caughterrors + caughtErrors: 'none', + + // Ignore vars that start with an underscore + // e.g. if you want to omit a property using object spread: + // + // const {name: _name, ...props} = this.props; + // + varsIgnorePattern: '^_', + argsIgnorePattern: '^_', + destructuredArrayIgnorePattern: '^_', + }, + ], + + // https://eslint.org/docs/rules/no-use-before-define + 'no-use-before-define': 'off', + // This seems to have been turned on while previously it had been off + '@typescript-eslint/no-use-before-define': 'off', + + // https://github.com/xojs/eslint-config-xo-typescript/blob/9791a067d6a119a21a4db72c02f1da95e25ffbb6/index.js#L95 + '@typescript-eslint/no-restricted-types': [ + 'error', + { + types: { + // TODO(scttcper): Turn object on to make our types more strict + // object: { + // message: 'The `object` type is hard to use. Use `Record` instead. See: https://github.com/typescript-eslint/typescript-eslint/pull/848', + // fixWith: 'Record' + // }, + Buffer: { + message: + 'Use Uint8Array instead. See: https://sindresorhus.com/blog/goodbye-nodejs-buffer', + suggest: ['Uint8Array'], + }, + '[]': "Don't use the empty array type `[]`. It only allows empty arrays. Use `SomeType[]` instead.", + '[[]]': + "Don't use `[[]]`. It only allows an array with a single element which is an empty array. Use `SomeType[][]` instead.", + '[[[]]]': "Don't use `[[[]]]`. Use `SomeType[][][]` instead.", + }, + }, + ], + // TODO(scttcper): Turn no-empty-object-type on to make our types more strict + // '@typescript-eslint/no-empty-object-type': 'error', + // TODO(scttcper): Turn no-function on to make our types more strict + // '@typescript-eslint/no-unsafe-function-type': 'error', + '@typescript-eslint/no-wrapper-object-types': 'error', + + // Naming convention enforcements + '@typescript-eslint/naming-convention': [ + 'error', + { + selector: 'typeLike', + format: ['PascalCase'], + leadingUnderscore: 'allow', + }, + { + selector: 'enumMember', + format: ['UPPER_CASE'], + }, + ], + }, + }, + { + name: 'plugin/@typescript-eslint && process.env.SENTRY_DETECT_DEPRECATIONS', + rules: { + '@typescript-eslint/no-deprecated': process.env.SENTRY_DETECT_DEPRECATIONS + ? 'error' + : 'off', + }, + }, + { + name: 'plugin/typescript-sort-keys', + plugins: {'typescript-sort-keys': typescriptSortKeys}, + rules: { + 'typescript-sort-keys/interface': [ + 'error', + 'asc', + {caseSensitive: true, natural: false, requiredFirst: true}, + ], + }, + }, + { + name: 'plugin/simple-import-sort', + plugins: {'simple-import-sort': simpleImportSort}, + rules: { + /** + * Better import sorting + */ + 'sort-imports': 'off', + 'simple-import-sort/imports': [ + 'error', + { + groups: [ + // Side effect imports. + ['^\\u0000'], + + // Node.js builtins. + [`^(${builtinModules.join('|')})(/|$)`], + + // Packages. `react` related packages come first. + ['^react', '^@?\\w'], + + // Test should be separate from the app + ['^(sentry-test|getsentry-test)(/.*|$)'], + + // Internal packages. + ['^(sentry-locale|sentry-images)(/.*|$)'], + + ['^(getsentry-images)(/.*|$)'], + + ['^(app|sentry)(/.*|$)'], + + // Getsentry packages. + ['^(admin|getsentry)(/.*|$)'], + + // Style imports. + ['^.+\\.less$'], + + // Parent imports. Put `..` last. + ['^\\.\\.(?!/?$)', '^\\.\\./?$'], + + // Other relative imports. Put same-folder imports and `.` last. + ['^\\./(?=.*/)(?!/?$)', '^\\.(?!/?$)', '^\\./?$'], + ], + }, + ], + }, + }, + { + name: 'plugin/sentry', + plugins: {sentry}, + rules: { + 'sentry/no-digits-in-tn': 'error', + 'sentry/no-dynamic-translations': 'error', + 'sentry/no-styled-shortcut': 'error', + }, + }, + { + name: 'plugin/@emotion', + plugins: {'@emotion': emotion}, + rules: { + '@emotion/import-from-emotion': 'off', // Not needed, in v11 we import from @emotion/react + '@emotion/jsx-import': 'off', // Not needed, handled by babel + '@emotion/no-vanilla': 'error', + '@emotion/pkg-renaming': 'off', // Not needed, we have migrated to v11 and the old package names cannot be used anymore + '@emotion/styled-import': 'error', + '@emotion/syntax-preference': ['off', 'string'], // TODO(ryan953): Enable this so `css={css``}` is required + }, + }, + { + name: 'plugin/jest', + files: ['**/*.spec.{ts,js,tsx,jsx}', 'tests/js/**/*.{ts,js,tsx,jsx}'], + plugins: jest.configs['flat/recommended'].plugins, + rules: { + ...jest.configs['flat/recommended'].rules, + ...jest.configs['flat/style'].rules, + + // `recommended` set this to warn, we've upgraded to error + 'jest/no-disabled-tests': 'error', + + // `recommended` set this to warn, we've downgraded to off + // Disabled as we have many tests which render as simple validations + 'jest/expect-expect': 'off', + + // Disabled as we have some comment out tests that cannot be + // uncommented due to typescript errors. + 'jest/no-commented-out-tests': 'off', // TODO(ryan953): Fix violations then delete this line + + // Disabled as we do sometimes have conditional expects + 'jest/no-conditional-expect': 'off', // TODO(ryan953): Fix violations then delete this line + + // We don't recommend snapshots, but if there are any keep it small + 'jest/no-large-snapshots': ['error', {maxSize: 2000}], + }, + }, + { + name: 'plugin/jest-dom', + files: ['**/*.spec.{ts,js,tsx,jsx}', 'tests/js/**/*.{ts,js,tsx,jsx}'], + ...jestDom.configs['flat/recommended'], + }, + { + name: 'plugin/testing-library', + files: ['**/*.spec.{ts,js,tsx,jsx}', 'tests/js/**/*.{ts,js,tsx,jsx}'], + ...testingLibrary.configs['flat/react'], + rules: { + ...testingLibrary.configs['flat/react'].rules, + 'testing-library/render-result-naming-convention': 'off', + 'testing-library/no-unnecessary-act': 'off', + }, + }, + { + name: 'plugin/prettier', + ...prettier, + }, + { + name: 'files/devtoolbar', + files: ['static/app/components/devtoolbar/**/*.{ts,tsx}'], + rules: { + 'no-restricted-imports': [ + 'error', + { + paths: [ + ...restrictedImportPaths, + { + name: 'sentry/utils/queryClient', + message: + 'Import from `@tanstack/react-query` and `./hooks/useFetchApiData` or `./hooks/useFetchInfiniteApiData` instead.', + }, + ], + }, + ], + }, + }, + { + name: 'files/sentry-test', + files: ['**/*.spec.{ts,js,tsx,jsx}', 'tests/js/**/*.{ts,js,tsx,jsx}'], + rules: { + 'no-restricted-imports': [ + 'error', + { + patterns: restrictedImportPatterns, + paths: [ + ...restrictedImportPaths, + { + name: 'sentry/locale', + message: 'Translations are not needed in tests.', + }, + ], + }, + ], + }, + }, + { + // We specify rules explicitly for the sdk-loader here so we do not have + // eslint ignore comments included in the source file, which is consumed + // by users. + name: 'files/js-sdk-loader.ts', + files: ['**/js-sdk-loader.ts'], + rules: { + 'no-console': 'off', + }, + }, +]); diff --git a/fixtures/backup/model_dependencies/detailed.json b/fixtures/backup/model_dependencies/detailed.json index 36a9e3dbd3f9f0..fa37af84c3316f 100644 --- a/fixtures/backup/model_dependencies/detailed.json +++ b/fixtures/backup/model_dependencies/detailed.json @@ -3223,22 +3223,6 @@ ] ] }, - "sentry.metricskeyindexer": { - "dangling": false, - "foreign_keys": {}, - "model": "sentry.metricskeyindexer", - "relocation_dependencies": [], - "relocation_scope": "Excluded", - "silos": [ - "Region" - ], - "table_name": "sentry_metricskeyindexer", - "uniques": [ - [ - "string" - ] - ] - }, "sentry.monitor": { "dangling": false, "foreign_keys": { @@ -6242,6 +6226,34 @@ ] ] }, + "tempest.tempestcredentials": { + "dangling": false, + "foreign_keys": { + "created_by_id": { + "kind": "HybridCloudForeignKey", + "model": "sentry.user", + "nullable": true + }, + "project": { + "kind": "FlexibleForeignKey", + "model": "sentry.project", + "nullable": false + } + }, + "model": "tempest.tempestcredentials", + "relocation_dependencies": [], + "relocation_scope": "Excluded", + "silos": [ + "Region" + ], + "table_name": "tempest_tempestcredentials", + "uniques": [ + [ + "client_id", + "project" + ] + ] + }, "uptime.projectuptimesubscription": { "dangling": false, "foreign_keys": { @@ -6302,6 +6314,26 @@ ] ] }, + "uptime.uptimesubscriptionregion": { + "dangling": false, + "foreign_keys": { + "uptime_subscription": { + "kind": "FlexibleForeignKey", + "model": "uptime.uptimesubscription", + "nullable": false + } + }, + "model": "uptime.uptimesubscriptionregion", + "relocation_dependencies": [], + "relocation_scope": "Excluded", + "silos": [ + "Region" + ], + "table_name": "uptime_uptimesubscriptionregion", + "uniques": [ + [] + ] + }, "workflow_engine.action": { "dangling": false, "foreign_keys": { @@ -6691,4 +6723,4 @@ ] ] } -} \ No newline at end of file +} diff --git a/fixtures/backup/model_dependencies/flat.json b/fixtures/backup/model_dependencies/flat.json index a0d8ea7b429b94..dbab6290805ea0 100644 --- a/fixtures/backup/model_dependencies/flat.json +++ b/fixtures/backup/model_dependencies/flat.json @@ -447,7 +447,6 @@ "sentry.lostpasswordhash": [ "sentry.user" ], - "sentry.metricskeyindexer": [], "sentry.monitor": [ "sentry.organization", "sentry.project", @@ -857,6 +856,10 @@ "social_auth.usersocialauth": [ "sentry.user" ], + "tempest.tempestcredentials": [ + "sentry.project", + "sentry.user" + ], "uptime.projectuptimesubscription": [ "sentry.environment", "sentry.project", @@ -865,6 +868,9 @@ "uptime.uptimesubscription" ], "uptime.uptimesubscription": [], + "uptime.uptimesubscriptionregion": [ + "uptime.uptimesubscription" + ], "workflow_engine.action": [ "sentry.integration" ], @@ -924,4 +930,4 @@ "workflow_engine.dataconditiongroup", "workflow_engine.workflow" ] -} \ No newline at end of file +} diff --git a/fixtures/backup/model_dependencies/sorted.json b/fixtures/backup/model_dependencies/sorted.json index 35b2892abcf3a4..dd666a1a710ca4 100644 --- a/fixtures/backup/model_dependencies/sorted.json +++ b/fixtures/backup/model_dependencies/sorted.json @@ -18,7 +18,6 @@ "sentry.identityprovider", "sentry.integration", "sentry.integrationfeature", - "sentry.metricskeyindexer", "sentry.monitorlocation", "sentry.option", "sentry.organization", @@ -49,6 +48,7 @@ "sentry.userroleuser", "social_auth.usersocialauth", "uptime.uptimesubscription", + "uptime.uptimesubscriptionregion", "workflow_engine.action", "workflow_engine.dataconditiongroup", "workflow_engine.dataconditiongroupaction", @@ -117,6 +117,7 @@ "workflow_engine.detector", "workflow_engine.datasourcedetector", "uptime.projectuptimesubscription", + "tempest.tempestcredentials", "sentry.userreport", "sentry.useroption", "sentry.useremail", @@ -246,4 +247,4 @@ "sentry.incidentsnapshot", "sentry.incidentproject", "sentry.incidentactivity" -] \ No newline at end of file +] diff --git a/fixtures/backup/model_dependencies/truncate.json b/fixtures/backup/model_dependencies/truncate.json index 39ed0286c7e715..415a6f5e10ec8b 100644 --- a/fixtures/backup/model_dependencies/truncate.json +++ b/fixtures/backup/model_dependencies/truncate.json @@ -18,7 +18,6 @@ "sentry_identityprovider", "sentry_integration", "sentry_integrationfeature", - "sentry_metricskeyindexer", "sentry_monitorlocation", "sentry_option", "sentry_organization", @@ -49,6 +48,7 @@ "sentry_userrole_users", "social_auth_usersocialauth", "uptime_uptimesubscription", + "uptime_uptimesubscriptionregion", "workflow_engine_action", "workflow_engine_dataconditiongroup", "workflow_engine_dataconditiongroupaction", @@ -117,6 +117,7 @@ "workflow_engine_detector", "workflow_engine_datasourcedetector", "uptime_projectuptimesubscription", + "tempest_tempestcredentials", "sentry_userreport", "sentry_useroption", "sentry_useremail", @@ -246,4 +247,4 @@ "sentry_incidentsnapshot", "sentry_incidentproject", "sentry_incidentactivity" -] \ No newline at end of file +] diff --git a/fixtures/sdk_crash_detection/crash_event_android.py b/fixtures/sdk_crash_detection/crash_event_android.py index bf6634215561a9..d7990c3ecb4cfb 100644 --- a/fixtures/sdk_crash_detection/crash_event_android.py +++ b/fixtures/sdk_crash_detection/crash_event_android.py @@ -96,6 +96,22 @@ def get_apex_crash_event( ) +def get_exception( + frames: Sequence[Mapping[str, str]], + mechanism=None, +) -> dict[str, object]: + if mechanism is None: + # linter complains about mutable arguments otherwise + mechanism = {"type": "onerror", "handled": False} + return { + "type": "IllegalArgumentException", + "value": "SDK Crash", + "module": "java.lang", + "stacktrace": {"frames": frames}, + "mechanism": mechanism, + } + + def get_crash_event_with_frames(frames: Sequence[Mapping[str, str]], **kwargs) -> dict[str, object]: result = { "event_id": "0a52a8331d3b45089ebd74f8118d4fa1", @@ -103,17 +119,7 @@ def get_crash_event_with_frames(frames: Sequence[Mapping[str, str]], **kwargs) - "dist": "2", "platform": "java", "environment": "debug", - "exception": { - "values": [ - { - "type": "IllegalArgumentException", - "value": "SDK Crash", - "module": "java.lang", - "stacktrace": {"frames": frames}, - "mechanism": {"type": "onerror", "handled": False}, - } - ] - }, + "exception": {"values": [get_exception(frames)]}, "key_id": "1336851", "level": "fatal", "contexts": { diff --git a/fixtures/sdk_crash_detection/crash_event_react_native.py b/fixtures/sdk_crash_detection/crash_event_react_native.py index a700e30f46c1c8..8884cb115a600c 100644 --- a/fixtures/sdk_crash_detection/crash_event_react_native.py +++ b/fixtures/sdk_crash_detection/crash_event_react_native.py @@ -46,6 +46,18 @@ def get_frames(filename: str) -> Sequence[MutableMapping[str, str]]: return frames +def get_exception( + frames: Sequence[Mapping[str, str]], + mechanism_type: str = "onerror", +) -> dict[str, object]: + return { + "type": "Error", + "value": "Uncaught Thrown Error", + "stacktrace": {"frames": frames}, + "mechanism": {"type": mechanism_type, "handled": False}, + } + + def get_crash_event( filename="/Users/sentry.user/git-repos/sentry-react-native/dist/js/client.js", **kwargs ) -> dict[str, object]: @@ -60,16 +72,7 @@ def get_crash_event_with_frames(frames: Sequence[Mapping[str, str]], **kwargs) - "platform": "javascript", "message": "", "environment": "dev", - "exception": { - "values": [ - { - "type": "Error", - "value": "Uncaught Thrown Error", - "stacktrace": {"frames": frames}, - "mechanism": {"type": "onerror", "handled": False}, - } - ] - }, + "exception": {"values": [get_exception(frames)]}, "key_id": "3554525", "level": "fatal", "contexts": { diff --git a/fixtures/search-syntax/explicit_number_tag.json b/fixtures/search-syntax/explicit_number_tag.json new file mode 100644 index 00000000000000..57337d8d6f7fda --- /dev/null +++ b/fixtures/search-syntax/explicit_number_tag.json @@ -0,0 +1,43 @@ +[ + { + "query": "tags[foo,number]:456 release:1.2.1 tags[project_id,number]:123", + "result": [ + {"type": "spaces", "value": ""}, + { + "type": "filter", + "filter": "text", + "negated": false, + "key": { + "type": "keyExplicitNumberTag", + "prefix": "tags", + "key": {"type": "keySimple", "value": "foo", "quoted": false} + }, + "operator": "", + "value": {"type": "valueText", "value": "456", "quoted": false} + }, + {"type": "spaces", "value": " "}, + { + "type": "filter", + "filter": "text", + "negated": false, + "key": {"type": "keySimple", "value": "release", "quoted": false}, + "operator": "", + "value": {"type": "valueText", "value": "1.2.1", "quoted": false} + }, + {"type": "spaces", "value": " "}, + { + "type": "filter", + "filter": "text", + "negated": false, + "key": { + "type": "keyExplicitNumberTag", + "prefix": "tags", + "key": {"type": "keySimple", "value": "project_id", "quoted": false} + }, + "operator": "", + "value": {"type": "valueText", "value": "123", "quoted": false} + }, + {"type": "spaces", "value": ""} + ] + } +] diff --git a/fixtures/search-syntax/explicit_number_tags_in_filter.json b/fixtures/search-syntax/explicit_number_tags_in_filter.json new file mode 100644 index 00000000000000..2e7cd62cc6321d --- /dev/null +++ b/fixtures/search-syntax/explicit_number_tags_in_filter.json @@ -0,0 +1,34 @@ +[ + { + "query": "tags[foo,number]:[123, 456]", + "result": [ + {"type": "spaces", "value": ""}, + { + "type": "filter", + "filter": "textIn", + "negated": false, + "key": { + "type": "keyExplicitNumberTag", + "prefix": "tags", + "key": {"type": "keySimple", "value": "foo", "quoted": false} + }, + "operator": "", + "value": { + "type": "valueTextList", + "items": [ + { + "separator": "", + "value": {"type": "valueText", "value": "123", "quoted": false} + }, + { + "separator": ", ", + "value": {"type": "valueText", "value": "456", "quoted": false} + } + ] + } + }, + {"type": "spaces", "value": ""} + ] + } +] + diff --git a/fixtures/search-syntax/explicit_string_tag.json b/fixtures/search-syntax/explicit_string_tag.json new file mode 100644 index 00000000000000..8dd326ff2ed9fe --- /dev/null +++ b/fixtures/search-syntax/explicit_string_tag.json @@ -0,0 +1,43 @@ +[ + { + "query": "tags[fruit,string]:apple release:1.2.1 tags[project_id,string]:123", + "result": [ + {"type": "spaces", "value": ""}, + { + "type": "filter", + "filter": "text", + "negated": false, + "key": { + "type": "keyExplicitStringTag", + "prefix": "tags", + "key": {"type": "keySimple", "value": "fruit", "quoted": false} + }, + "operator": "", + "value": {"type": "valueText", "value": "apple", "quoted": false} + }, + {"type": "spaces", "value": " "}, + { + "type": "filter", + "filter": "text", + "negated": false, + "key": {"type": "keySimple", "value": "release", "quoted": false}, + "operator": "", + "value": {"type": "valueText", "value": "1.2.1", "quoted": false} + }, + {"type": "spaces", "value": " "}, + { + "type": "filter", + "filter": "text", + "negated": false, + "key": { + "type": "keyExplicitStringTag", + "prefix": "tags", + "key": {"type": "keySimple", "value": "project_id", "quoted": false} + }, + "operator": "", + "value": {"type": "valueText", "value": "123", "quoted": false} + }, + {"type": "spaces", "value": ""} + ] + } +] diff --git a/fixtures/search-syntax/explicit_string_tags_in_filter.json b/fixtures/search-syntax/explicit_string_tags_in_filter.json new file mode 100644 index 00000000000000..ebf6b7038d9da8 --- /dev/null +++ b/fixtures/search-syntax/explicit_string_tags_in_filter.json @@ -0,0 +1,65 @@ +[ + { + "query": "tags[fruit,string]:[apple, pear]", + "result": [ + {"type": "spaces", "value": ""}, + { + "type": "filter", + "filter": "textIn", + "negated": false, + "key": { + "type": "keyExplicitStringTag", + "prefix": "tags", + "key": {"type": "keySimple", "value": "fruit", "quoted": false} + }, + "operator": "", + "value": { + "type": "valueTextList", + "items": [ + { + "separator": "", + "value": {"type": "valueText", "value": "apple", "quoted": false} + }, + { + "separator": ", ", + "value": {"type": "valueText", "value": "pear", "quoted": false} + } + ] + } + }, + {"type": "spaces", "value": ""} + ] + }, + { + "query": "tags[fruit,string]:[\"apple wow\", \"pear\"]", + "result": [ + {"type": "spaces", "value": ""}, + { + "type": "filter", + "filter": "textIn", + "negated": false, + "key": { + "type": "keyExplicitStringTag", + "prefix": "tags", + "key": {"type": "keySimple", "value": "fruit", "quoted": false} + }, + "operator": "", + "value": { + "type": "valueTextList", + "items": [ + { + "separator": "", + "value": {"type": "valueText", "value": "apple wow", "quoted": true} + }, + { + "separator": ", ", + "value": {"type": "valueText", "value": "pear", "quoted": true} + } + ] + } + }, + {"type": "spaces", "value": ""} + ] + } +] + diff --git a/migrations_lockfile.txt b/migrations_lockfile.txt index ef62e79b102a35..8110b79486af6a 100644 --- a/migrations_lockfile.txt +++ b/migrations_lockfile.txt @@ -15,10 +15,12 @@ remote_subscriptions: 0003_drop_remote_subscription replays: 0004_index_together -sentry: 0802_remove_grouping_auto_update_option +sentry: 0804_delete_metrics_key_indexer_pt2 social_auth: 0002_default_auto_field -uptime: 0018_add_trace_sampling_field_to_uptime +tempest: 0001_create_tempest_credentials_model -workflow_engine: 0015_create_rule_lookup_tables +uptime: 0021_drop_region_table_col + +workflow_engine: 0019_drop_dataconditions_condition diff --git a/package.json b/package.json index 0a27d0d93efa85..d630a938fb33f8 100644 --- a/package.json +++ b/package.json @@ -56,13 +56,13 @@ "@sentry-internal/rrweb": "2.26.0", "@sentry-internal/rrweb-player": "2.26.0", "@sentry-internal/rrweb-snapshot": "2.26.0", - "@sentry/core": "8.39.0-beta.0", - "@sentry/node": "8.39.0-beta.0", - "@sentry/react": "8.39.0-beta.0", + "@sentry/core": "8.43.0", + "@sentry/node": "8.43.0", + "@sentry/react": "8.43.0", "@sentry/release-parser": "^1.3.1", "@sentry/status-page-list": "^0.3.0", - "@sentry/types": "8.39.0-beta.0", - "@sentry/utils": "8.39.0-beta.0", + "@sentry/types": "8.43.0", + "@sentry/utils": "8.43.0", "@sentry/webpack-plugin": "^2.22.4", "@spotlightjs/spotlight": "^2.0.0-alpha.1", "@tanstack/react-query": "^5.56.2", @@ -177,32 +177,34 @@ "@biomejs/biome": "^1.9.1", "@codecov/webpack-plugin": "^1.2.0", "@emotion/eslint-plugin": "^11.12.0", + "@eslint/compat": "^1.2.4", + "@eslint/eslintrc": "^3.2.0", + "@eslint/js": "^9.17.0", "@pmmmwh/react-refresh-webpack-plugin": "0.5.15", "@sentry/jest-environment": "6.0.0", - "@sentry/profiling-node": "8.39.0-beta.0", + "@sentry/profiling-node": "8.43.0", "@styled/typescript-styled-plugin": "^1.0.1", "@testing-library/dom": "10.1.0", "@testing-library/jest-dom": "6.4.5", "@testing-library/react": "16.0.0", "@testing-library/user-event": "14.5.2", "@types/node": "^22.9.1", - "@typescript-eslint/eslint-plugin": "^8.8.1", - "@typescript-eslint/parser": "^8.8.1", "babel-gettext-extractor": "^4.1.3", "babel-jest": "29.7.0", "benchmark": "^2.1.4", - "eslint": "8.57.1", - "eslint-import-resolver-typescript": "^3.6.3", + "eslint": "^9.17.0", + "eslint-config-prettier": "^9.1.0", + "eslint-import-resolver-typescript": "^3.7.0", "eslint-plugin-import": "^2.31.0", - "eslint-plugin-jest": "^28.8.3", - "eslint-plugin-jest-dom": "^5.4.0", - "eslint-plugin-no-lookahead-lookbehind-regexp": "0.1.0", - "eslint-plugin-react": "^7.37.1", - "eslint-plugin-react-hooks": "^4.6.2", + "eslint-plugin-jest": "^28.10.0", + "eslint-plugin-jest-dom": "^5.5.0", + "eslint-plugin-react": "^7.37.3", + "eslint-plugin-react-hooks": "5.0.0", "eslint-plugin-sentry": "^2.10.0", "eslint-plugin-simple-import-sort": "^12.1.1", - "eslint-plugin-testing-library": "^6.3.0", + "eslint-plugin-testing-library": "^7.1.1", "eslint-plugin-typescript-sort-keys": "^3.3.0", + "globals": "^15.14.0", "html-webpack-plugin": "^5.6.0", "jest": "29.7.0", "jest-canvas-mock": "^2.5.2", @@ -215,6 +217,7 @@ "stylelint-config-recommended": "^14.0.1", "terser": "5.31.6", "tsconfig-paths": "^4.2.0", + "typescript-eslint": "^8.18.2", "webpack-dev-server": "5.1.0" }, "resolutions": { @@ -232,12 +235,12 @@ "test-precommit": "node scripts/test.js --bail --findRelatedTests -u", "test-staged": "node scripts/test.js --findRelatedTests $(git diff --name-only --cached)", "lint": "yarn lint:biome && yarn lint:prettier && yarn lint:js && yarn lint:css", - "lint:js": "eslint . --ext .js,.ts,.tsx", + "lint:js": "eslint", "lint:css": "stylelint '**/*.[jt]sx'", "lint:biome": "biome check .", "lint:prettier": "prettier \"**/*.md\" \"**/*.yaml\" \"**/*.[jt]s(x)?\" --check --log-level=error", "fix": "yarn fix:biome && yarn fix:prettier && yarn fix:eslint", - "fix:eslint": "eslint . --ext .js,.ts,.tsx --fix", + "fix:eslint": "eslint --fix", "fix:biome": "biome check . --write", "fix:prettier": "prettier \"**/*.md\" \"**/*.yaml\" \"**/*.[jt]s(x)?\" --write --log-level=error", "dev": "(yarn check --verify-tree || yarn install --check-files) && sentry devserver", diff --git a/pyproject.toml b/pyproject.toml index 83c9759ec9c211..0a21b614af9296 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -120,7 +120,6 @@ ignore_missing_imports = true module = [ "sentry.api.base", "sentry.api.bases.organization_events", - "sentry.api.bases.organizationmember", "sentry.api.bases.team", "sentry.api.endpoints.accept_organization_invite", "sentry.api.endpoints.auth_config", @@ -132,7 +131,6 @@ module = [ "sentry.api.endpoints.index", "sentry.api.endpoints.internal.mail", "sentry.api.endpoints.organization_details", - "sentry.api.endpoints.organization_events", "sentry.api.endpoints.organization_events_facets_performance", "sentry.api.endpoints.organization_events_meta", "sentry.api.endpoints.organization_events_spans_performance", @@ -146,18 +144,14 @@ module = [ "sentry.api.endpoints.organization_releases", "sentry.api.endpoints.organization_request_project_creation", "sentry.api.endpoints.organization_search_details", - "sentry.api.endpoints.organization_sessions", "sentry.api.endpoints.organization_stats", "sentry.api.endpoints.project_index", "sentry.api.endpoints.project_ownership", "sentry.api.endpoints.project_release_files", "sentry.api.endpoints.project_repo_path_parsing", - "sentry.api.endpoints.project_rule_preview", "sentry.api.endpoints.project_rules_configuration", - "sentry.api.endpoints.project_servicehook_stats", "sentry.api.endpoints.project_transaction_names", "sentry.api.endpoints.team_details", - "sentry.api.endpoints.team_release_count", "sentry.api.endpoints.user_subscriptions", "sentry.api.event_search", "sentry.api.helpers.group_index.index", @@ -167,7 +161,6 @@ module = [ "sentry.api.permissions", "sentry.api.serializers.models.auth_provider", "sentry.api.serializers.models.dashboard", - "sentry.api.serializers.models.environment", "sentry.api.serializers.models.event", "sentry.api.serializers.models.group", "sentry.api.serializers.models.group_stream", @@ -176,7 +169,6 @@ module = [ "sentry.api.serializers.models.project", "sentry.api.serializers.models.role", "sentry.api.serializers.models.rule", - "sentry.api.serializers.models.team", "sentry.api.serializers.rest_framework.mentions", "sentry.auth.helper", "sentry.auth.provider", @@ -214,7 +206,6 @@ module = [ "sentry.integrations.gitlab.client", "sentry.integrations.gitlab.integration", "sentry.integrations.gitlab.issues", - "sentry.integrations.jira.actions.form", "sentry.integrations.jira.client", "sentry.integrations.jira.integration", "sentry.integrations.jira.views.base", @@ -285,10 +276,7 @@ module = [ "sentry.scim.endpoints.utils", "sentry.search.events.builder.errors", "sentry.search.events.builder.metrics", - "sentry.search.events.datasets.discover", "sentry.search.events.datasets.filter_aliases", - "sentry.search.events.datasets.function_aliases", - "sentry.search.events.datasets.metrics", "sentry.search.events.datasets.metrics_layer", "sentry.search.events.fields", "sentry.search.events.filter", @@ -302,8 +290,6 @@ module = [ "sentry.snuba.metrics.datasource", "sentry.snuba.metrics.fields.base", "sentry.snuba.metrics.query_builder", - "sentry.snuba.sessions_v2", - "sentry.snuba.spans_indexed", "sentry.snuba.spans_metrics", "sentry.tagstore.snuba.backend", "sentry.tagstore.types", @@ -324,17 +310,11 @@ module = [ "sentry_plugins.bitbucket.mixins", "sentry_plugins.github.plugin", "sentry_plugins.jira.plugin", - "tests.sentry.api.bases.test_organization", - "tests.sentry.api.bases.test_project", - "tests.sentry.api.bases.test_team", "tests.sentry.api.endpoints.notifications.test_notification_actions_details", "tests.sentry.api.endpoints.notifications.test_notification_actions_index", - "tests.sentry.api.endpoints.test_event_attachment_details", "tests.sentry.api.helpers.test_group_index", - "tests.sentry.api.test_authentication", "tests.sentry.api.test_base", "tests.sentry.api.test_event_search", - "tests.sentry.eventstore.test_base", "tests.sentry.issues.test_utils", ] disable_error_code = [ @@ -360,13 +340,22 @@ disable_error_code = [ # begin: stronger typing [[tool.mypy.overrides]] module = [ + "fixtures.safe_migrations_apps.*", + "sentry.analytics.*", + "sentry.api.endpoints.integrations.sentry_apps.installation.external_issue.*", "sentry.api.endpoints.project_backfill_similar_issues_embeddings_records", + "sentry.api.endpoints.release_thresholds.health_checks.*", + "sentry.api.endpoints.relocations.artifacts.*", "sentry.api.helpers.deprecation", "sentry.api.helpers.source_map_helper", + "sentry.api.serializers.models.organization_member.*", + "sentry.audit_log.services.*", "sentry.auth.services.*", "sentry.auth.view", "sentry.buffer.*", "sentry.build.*", + "sentry.data_secrecy.models.*", + "sentry.data_secrecy.service.*", "sentry.db.models.fields.citext", "sentry.db.models.fields.foreignkey", "sentry.db.models.fields.hybrid_cloud_foreign_key", @@ -375,31 +364,82 @@ module = [ "sentry.db.models.paranoia", "sentry.db.models.utils", "sentry.deletions.*", + "sentry.digests.*", "sentry.digests.notifications", + "sentry.dynamic_sampling.models.*", + "sentry.dynamic_sampling.rules.biases.*", + "sentry.dynamic_sampling.rules.combinators.*", + "sentry.dynamic_sampling.rules.helpers.*", + "sentry.dynamic_sampling.tasks.helpers.*", + "sentry.eventstore.reprocessing.*", "sentry.eventstore.reprocessing.redis", + "sentry.eventstream.*", "sentry.eventtypes.error", + "sentry.feedback.migrations.*", + "sentry.flags.migrations.*", + "sentry.grouping.api", "sentry.grouping.component", "sentry.grouping.fingerprinting", + "sentry.grouping.fingerprinting.*", + "sentry.grouping.grouping_info", "sentry.grouping.ingest.*", "sentry.grouping.parameterization", + "sentry.grouping.utils", + "sentry.grouping.variants", "sentry.hybridcloud.*", + "sentry.identity.discord.*", "sentry.identity.github_enterprise.*", + "sentry.identity.services.*", + "sentry.identity.vsts_extension.*", + "sentry.incidents.utils.*", "sentry.ingest.slicing", + "sentry.integrations.discord.actions.*", + "sentry.integrations.discord.message_builder.base.component.*", + "sentry.integrations.discord.message_builder.base.embed.*", + "sentry.integrations.discord.utils.*", + "sentry.integrations.discord.views.*", + "sentry.integrations.discord.webhooks.*", + "sentry.integrations.github.actions.*", + "sentry.integrations.github_enterprise.actions.*", + "sentry.integrations.jira.actions.*", + "sentry.integrations.jira.endpoints.*", + "sentry.integrations.jira.models.*", + "sentry.integrations.jira_server.actions.*", + "sentry.integrations.jira_server.utils.*", "sentry.integrations.models.integration_feature", + "sentry.integrations.project_management.*", + "sentry.integrations.repository.*", + "sentry.integrations.services.*", + "sentry.integrations.slack.threads.*", + "sentry.integrations.slack.views.*", + "sentry.integrations.vsts.actions.*", + "sentry.integrations.vsts.tasks.*", + "sentry.integrations.web.debug.*", "sentry.issues", "sentry.issues.analytics", "sentry.issues.apps", "sentry.issues.constants", "sentry.issues.endpoints", + "sentry.issues.endpoints.actionable_items", "sentry.issues.endpoints.group_activities", "sentry.issues.endpoints.group_event_details", "sentry.issues.endpoints.group_events", - "sentry.issues.endpoints.group_participants", + "sentry.issues.endpoints.group_notes", + "sentry.issues.endpoints.group_notes_details", + "sentry.issues.endpoints.group_similar_issues_embeddings", + "sentry.issues.endpoints.group_tombstone", + "sentry.issues.endpoints.group_tombstone_details", + "sentry.issues.endpoints.organization_eventid", + "sentry.issues.endpoints.organization_group_index", "sentry.issues.endpoints.organization_group_index_stats", "sentry.issues.endpoints.organization_group_search_views", "sentry.issues.endpoints.organization_release_previous_commits", "sentry.issues.endpoints.organization_searches", + "sentry.issues.endpoints.organization_shortid", + "sentry.issues.endpoints.project_event_details", "sentry.issues.endpoints.project_events", + "sentry.issues.endpoints.project_group_index", + "sentry.issues.endpoints.project_group_stats", "sentry.issues.endpoints.project_stacktrace_link", "sentry.issues.endpoints.related_issues", "sentry.issues.endpoints.shared_group_details", @@ -420,6 +460,7 @@ module = [ "sentry.issues.receivers", "sentry.issues.related.*", "sentry.issues.run", + "sentry.issues.services.*", "sentry.issues.status_change", "sentry.issues.status_change_consumer", "sentry.issues.status_change_message", @@ -427,39 +468,78 @@ module = [ "sentry.lang.java.processing", "sentry.llm.*", "sentry.migrations.*", + "sentry.models.activity", "sentry.models.event", "sentry.models.eventattachment", + "sentry.models.groupassignee", + "sentry.models.grouphistory", "sentry.models.groupsubscription", - "sentry.monkey", + "sentry.models.options.*", + "sentry.monkey.*", + "sentry.nodestore.*", "sentry.nodestore.base", "sentry.nodestore.bigtable.backend", "sentry.nodestore.django.backend", "sentry.nodestore.django.models", "sentry.nodestore.filesystem.backend", "sentry.nodestore.models", + "sentry.notifications.services.*", "sentry.organizations.*", "sentry.ownership.*", "sentry.plugins.base.response", "sentry.plugins.base.view", + "sentry.plugins.validators.*", + "sentry.post_process_forwarder.*", "sentry.profiles.*", - "sentry.projects.services.*", + "sentry.projects.*", + "sentry.queue.*", "sentry.ratelimits.leaky_bucket", "sentry.relay.config.metric_extraction", + "sentry.relay.types.*", + "sentry.release_health.release_monitor.*", + "sentry.relocation.services.relocation_export.*", + "sentry.remote_subscriptions.migrations.*", + "sentry.replays.consumers.*", + "sentry.replays.lib.new_query.*", + "sentry.replays.migrations.*", "sentry.reprocessing2", + "sentry.roles.*", + "sentry.rules.actions.sentry_apps.*", + "sentry.rules.conditions.*", + "sentry.rules.history.endpoints.*", "sentry.runner.*", "sentry.search.snuba.backend", - "sentry.sentry_metrics.consumers.indexer.slicing_router", + "sentry.security.*", + "sentry.seer.similarity.*", + "sentry.sentry_apps.external_issues.*", + "sentry.sentry_apps.services.*", + "sentry.sentry_apps.utils.*", + "sentry.sentry_apps.web.*", + "sentry.sentry_metrics.consumers.indexer.*", + "sentry.sentry_metrics.indexer.limiters.*", + "sentry.shared_integrations.exceptions.*", + "sentry.slug.*", "sentry.snuba.metrics.extraction", + "sentry.snuba.metrics.naming_layer.*", + "sentry.snuba.query_subscriptions.*", + "sentry.spans.grouping.*", "sentry.stacktraces.platform", "sentry.tasks.beacon", "sentry.tasks.commit_context", + "sentry.tasks.embeddings_grouping.backfill_seer_grouping_records_for_project", "sentry.tasks.on_demand_metrics", "sentry.tasks.reprocessing2", "sentry.tasks.store", "sentry.taskworker.*", + "sentry.tempest.endpoints.*", + "sentry.tempest.migrations.*", "sentry.testutils.helpers.task_runner", - "sentry.types.actor", - "sentry.types.region", + "sentry.testutils.skips", + "sentry.toolbar.utils.*", + "sentry.trash.*", + "sentry.types.*", + "sentry.uptime.migrations.*", + "sentry.usage_accountant.*", "sentry.users.*", "sentry.utils.arroyo", "sentry.utils.assets", @@ -476,6 +556,7 @@ module = [ "sentry.utils.imports", "sentry.utils.iterators", "sentry.utils.javascript", + "sentry.utils.kvstore.*", "sentry.utils.lazy_service_wrapper", "sentry.utils.locking.*", "sentry.utils.migrations", @@ -486,6 +567,7 @@ module = [ "sentry.utils.pubsub", "sentry.utils.redis", "sentry.utils.redis_metrics", + "sentry.utils.sdk_crashes.*", "sentry.utils.sentry_apps.*", "sentry.utils.services", "sentry.utils.sms", @@ -496,20 +578,46 @@ module = [ "sentry.web.frontend.auth_provider_login", "sentry.web.frontend.cli", "sentry.web.frontend.csv", + "sentry.web.frontend.mixins.*", + "sentry.workflow_engine.handlers.action.*", + "sentry.workflow_engine.handlers.condition.*", + "sentry.workflow_engine.migrations.*", "sentry_plugins.base", + "social_auth.migrations.*", + "sudo.*", + "tests.sentry.audit_log.services.*", "tests.sentry.deletions.test_group", "tests.sentry.event_manager.test_event_manager", "tests.sentry.grouping.ingest.test_seer", "tests.sentry.grouping.test_fingerprinting", "tests.sentry.hybridcloud.*", + "tests.sentry.incidents.serializers.*", + "tests.sentry.integrations.msteams.webhook.*", + "tests.sentry.integrations.repository.base.*", + "tests.sentry.integrations.repository.issue_alert.*", + "tests.sentry.integrations.slack.threads.*", "tests.sentry.issues", "tests.sentry.issues.endpoints", "tests.sentry.issues.endpoints.test_actionable_items", + "tests.sentry.issues.endpoints.test_group_activities", + "tests.sentry.issues.endpoints.test_group_details", + "tests.sentry.issues.endpoints.test_group_event_details", + "tests.sentry.issues.endpoints.test_group_events", + "tests.sentry.issues.endpoints.test_group_hashes", + "tests.sentry.issues.endpoints.test_group_notes", + "tests.sentry.issues.endpoints.test_group_notes_details", + "tests.sentry.issues.endpoints.test_group_similar_issues_embeddings", + "tests.sentry.issues.endpoints.test_group_tombstone", + "tests.sentry.issues.endpoints.test_group_tombstone_details", "tests.sentry.issues.endpoints.test_organization_group_search_views", "tests.sentry.issues.endpoints.test_organization_searches", + "tests.sentry.issues.endpoints.test_organization_shortid", + "tests.sentry.issues.endpoints.test_project_group_stats", "tests.sentry.issues.endpoints.test_project_stacktrace_link", "tests.sentry.issues.endpoints.test_related_issues", + "tests.sentry.issues.endpoints.test_shared_group_details", "tests.sentry.issues.endpoints.test_source_map_debug", + "tests.sentry.issues.endpoints.test_team_groups_old", "tests.sentry.issues.test_attributes", "tests.sentry.issues.test_escalating", "tests.sentry.issues.test_escalating_issues_alg", @@ -530,12 +638,25 @@ module = [ "tests.sentry.issues.test_status_change", "tests.sentry.issues.test_status_change_consumer", "tests.sentry.issues.test_update_inbox", + "tests.sentry.organizations.*", "tests.sentry.ownership.*", + "tests.sentry.post_process_forwarder.*", + "tests.sentry.profiling.*", + "tests.sentry.queue.*", "tests.sentry.ratelimits.test_leaky_bucket", "tests.sentry.relay.config.test_metric_extraction", + "tests.sentry.replays.unit.lib.*", + "tests.sentry.rules.actions.base.*", + "tests.sentry.security.*", + "tests.sentry.snuba.metrics.test_metrics_query_layer.*", + "tests.sentry.tasks.integrations.*", "tests.sentry.tasks.test_on_demand_metrics", + "tests.sentry.types.*", "tests.sentry.types.test_actor", "tests.sentry.types.test_region", + "tests.sentry.usage_accountant.*", + "tests.sentry.users.services.*", + "tests.sentry.utils.mockdata.*", "tests.sentry.web.frontend.test_cli", "tools.*", ] diff --git a/requirements-base.txt b/requirements-base.txt index 4c9b1123811dd5..7b4659d0bbf825 100644 --- a/requirements-base.txt +++ b/requirements-base.txt @@ -41,7 +41,7 @@ packaging>=24.1 parsimonious>=0.10.0 petname>=2.6 phonenumberslite>=8.12.32 -Pillow>=10.4.0 +Pillow>=11.0.0 progressbar2>=3.41.0 protobuf>=5.27.3 proto-plus>=1.25.0 @@ -65,12 +65,12 @@ requests>=2.32.3 rfc3339-validator>=0.1.2 rfc3986-validator>=0.1.1 # [end] jsonschema format validators -sentry-arroyo>=2.18.2 -sentry-kafka-schemas>=0.1.122 +sentry-arroyo>=2.19.9 +sentry-kafka-schemas>=0.1.125 sentry-ophio==1.0.0 sentry-protos>=0.1.37 sentry-redis-tools>=0.1.7 -sentry-relay>=0.9.3 +sentry-relay>=0.9.4 sentry-sdk[http2]>=2.19.2 slack-sdk>=3.27.2 snuba-sdk>=3.0.43 diff --git a/requirements-dev-frozen.txt b/requirements-dev-frozen.txt index e56d30cc4a488d..b562c3cee84851 100644 --- a/requirements-dev-frozen.txt +++ b/requirements-dev-frozen.txt @@ -23,6 +23,7 @@ celery==5.3.5 certifi==2024.7.4 cffi==1.17.1 cfgv==3.3.1 +chardet==5.2.0 charset-normalizer==3.4.0 click==8.1.7 click-didyoumean==0.3.0 @@ -36,10 +37,10 @@ cryptography==43.0.1 cssselect==1.0.3 cssutils==2.9.0 datadog==0.49.1 -devservices==1.0.5 +devservices==1.0.8 distlib==0.3.8 distro==1.8.0 -django==5.1.1 +django==5.1.4 django-crispy-forms==1.14.0 django-csp==3.8 django-pg-zero-downtime-migrations==0.16 @@ -105,7 +106,7 @@ mmh3==4.0.0 more-itertools==8.13.0 msgpack==1.1.0 msgpack-types==0.2.0 -mypy==1.13.0 +mypy==1.14.0 mypy-extensions==1.0.0 nodeenv==1.9.1 oauthlib==3.1.0 @@ -125,7 +126,7 @@ pep517==0.12.0 petname==2.6 phabricator==0.7.0 phonenumberslite==8.12.55 -pillow==10.4.0 +pillow==11.0.0 pip-tools==7.1.0 platformdirs==4.2.0 pluggy==1.5.0 @@ -169,7 +170,7 @@ redis==3.4.1 redis-py-cluster==2.1.0 referencing==0.30.2 regex==2022.9.13 -reportlab==4.0.7 +reportlab==4.2.5 requests==2.32.3 requests-file==2.1.0 requests-oauthlib==1.2.0 @@ -180,17 +181,17 @@ rpds-py==0.20.0 rsa==4.8 s3transfer==0.10.0 selenium==4.16.0 -sentry-arroyo==2.18.2 +sentry-arroyo==2.19.9 sentry-cli==2.16.0 sentry-covdefaults-disable-branch-coverage==1.0.2 sentry-devenv==1.14.2 sentry-forked-django-stubs==5.1.1.post1 -sentry-forked-djangorestframework-stubs==3.15.1.post2 -sentry-kafka-schemas==0.1.122 +sentry-forked-djangorestframework-stubs==3.15.2.post1 +sentry-kafka-schemas==0.1.125 sentry-ophio==1.0.0 -sentry-protos==0.1.37 +sentry-protos==0.1.39 sentry-redis-tools==0.1.7 -sentry-relay==0.9.3 +sentry-relay==0.9.4 sentry-sdk==2.19.2 sentry-usage-accountant==0.0.10 simplejson==3.17.6 @@ -241,7 +242,7 @@ virtualenv==20.25.0 wcwidth==0.2.10 werkzeug==3.0.6 wheel==0.38.4 -wrapt==1.17.0rc1 +wrapt==1.17.0 wsproto==1.1.0 xmlsec==1.3.14 zstandard==0.18.0 diff --git a/requirements-dev.txt b/requirements-dev.txt index 9dd1a81506ba10..0748993df7b1a1 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -1,7 +1,7 @@ --index-url https://pypi.devinfra.sentry.io/simple sentry-devenv>=1.14.2 -devservices>=1.0.5 +devservices>=1.0.8 covdefaults>=2.3.0 sentry-covdefaults-disable-branch-coverage>=1.0.2 @@ -38,10 +38,10 @@ packaging>=21.3 # for type checking sentry-forked-django-stubs>=5.1.1.post1 -sentry-forked-djangorestframework-stubs>=3.15.1.post2 +sentry-forked-djangorestframework-stubs>=3.15.2.post1 lxml-stubs msgpack-types>=0.2.0 -mypy>=1.13 +mypy>=1.14 types-beautifulsoup4 types-cachetools types-jsonschema diff --git a/requirements-frozen.txt b/requirements-frozen.txt index 40b270e613db9c..79ed8385b6c540 100644 --- a/requirements-frozen.txt +++ b/requirements-frozen.txt @@ -20,6 +20,7 @@ cachetools==5.3.0 celery==5.3.5 certifi==2024.7.4 cffi==1.17.1 +chardet==5.2.0 charset-normalizer==3.4.0 click==8.1.7 click-didyoumean==0.3.0 @@ -32,7 +33,7 @@ cssselect==1.0.3 cssutils==2.9.0 datadog==0.49.1 distro==1.8.0 -django==5.1.1 +django==5.1.4 django-crispy-forms==1.14.0 django-csp==3.8 django-pg-zero-downtime-migrations==0.16 @@ -88,7 +89,7 @@ parsimonious==0.10.0 petname==2.6 phabricator==0.7.0 phonenumberslite==8.12.55 -pillow==10.4.0 +pillow==11.0.0 progressbar2==3.41.0 prompt-toolkit==3.0.41 proto-plus==1.25.0 @@ -115,7 +116,7 @@ redis==3.4.1 redis-py-cluster==2.1.0 referencing==0.30.2 regex==2022.9.13 -reportlab==4.0.7 +reportlab==4.2.5 requests==2.32.3 requests-file==2.1.0 requests-oauthlib==1.2.0 @@ -124,12 +125,12 @@ rfc3986-validator==0.1.1 rpds-py==0.20.0 rsa==4.8 s3transfer==0.10.0 -sentry-arroyo==2.18.2 -sentry-kafka-schemas==0.1.122 +sentry-arroyo==2.19.9 +sentry-kafka-schemas==0.1.125 sentry-ophio==1.0.0 -sentry-protos==0.1.37 +sentry-protos==0.1.39 sentry-redis-tools==0.1.7 -sentry-relay==0.9.3 +sentry-relay==0.9.4 sentry-sdk==2.19.2 sentry-usage-accountant==0.0.10 simplejson==3.17.6 diff --git a/requirements-getsentry.txt b/requirements-getsentry.txt index 2dbf4c23d860b8..21cff131171f2d 100644 --- a/requirements-getsentry.txt +++ b/requirements-getsentry.txt @@ -10,5 +10,5 @@ Avalara==20.9.0 iso3166 pycountry==17.5.14 pyvat==1.3.15 -reportlab==4.0.7 +reportlab==4.2.5 stripe==3.1.0 diff --git a/setup.cfg b/setup.cfg index aae014f9aa7768..c7b83b9787a533 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,6 +1,6 @@ [metadata] name = sentry -version = 24.12.0.dev0 +version = 25.1.0.dev0 description = A realtime logging and aggregation server. long_description = file: README.md long_description_content_type = text/markdown diff --git a/src/sentry/analytics/events/groupowner_assignment.py b/src/sentry/analytics/events/groupowner_assignment.py index 30d2913bed831e..81926fecc2bd98 100644 --- a/src/sentry/analytics/events/groupowner_assignment.py +++ b/src/sentry/analytics/events/groupowner_assignment.py @@ -9,6 +9,9 @@ class GroupOwnerAssignment(analytics.Event): analytics.Attribute("project_id"), analytics.Attribute("group_id"), analytics.Attribute("new_assignment", type=bool), + analytics.Attribute("user_id", required=False), + analytics.Attribute("group_owner_type"), + analytics.Attribute("method", required=False), ) diff --git a/src/sentry/api/api_owners.py b/src/sentry/api/api_owners.py index 60e5d1884f26a3..efb352fe7185ce 100644 --- a/src/sentry/api/api_owners.py +++ b/src/sentry/api/api_owners.py @@ -28,3 +28,4 @@ class ApiOwner(Enum): TELEMETRY_EXPERIENCE = "telemetry-experience" UNOWNED = "unowned" WEB_FRONTEND_SDKS = "team-web-sdk-frontend" + GDX = "gdx" diff --git a/src/sentry/api/authentication.py b/src/sentry/api/authentication.py index eb0fd888294f29..01a76662f5393a 100644 --- a/src/sentry/api/authentication.py +++ b/src/sentry/api/authentication.py @@ -293,11 +293,11 @@ class ClientIdSecretAuthentication(QuietBasicAuthentication): """ def authenticate(self, request: Request): - if not request.json_body: + if not request.data: raise AuthenticationFailed("Invalid request") - client_id = request.json_body.get("client_id") - client_secret = request.json_body.get("client_secret") + client_id = request.data.get("client_id") + client_secret = request.data.get("client_secret") invalid_pair_error = AuthenticationFailed("Invalid Client ID / Secret pair") diff --git a/src/sentry/api/base.py b/src/sentry/api/base.py index fddcd7dd555142..680da60495fa82 100644 --- a/src/sentry/api/base.py +++ b/src/sentry/api/base.py @@ -9,7 +9,6 @@ from typing import Any from urllib.parse import quote as urlquote -import orjson import sentry_sdk from django.conf import settings from django.http import HttpResponse @@ -342,29 +341,6 @@ def handle_exception_with_details( def create_audit_entry(self, request: Request, transaction_id=None, **kwargs): return create_audit_entry(request, transaction_id, audit_logger, **kwargs) - def load_json_body(self, request: Request): - """ - Attempts to load the request body when it's JSON. - - The end result is ``request.json_body`` having a value. When it can't - load the body as JSON, for any reason, ``request.json_body`` is None. - - The request flow is unaffected and no exceptions are ever raised. - """ - - request.json_body = None - - if not request.META.get("CONTENT_TYPE", "").startswith("application/json"): - return - - if not len(request.body): - return - - try: - request.json_body = orjson.loads(request.body) - except orjson.JSONDecodeError: - return - def initialize_request(self, request: HttpRequest, *args: Any, **kwargs: Any) -> Request: # XXX: Since DRF 3.x, when the request is passed into # `initialize_request` it's set as an internal variable on the returned @@ -398,7 +374,10 @@ def dispatch(self, request: Request, *args, **kwargs) -> Response: self.args = args self.kwargs = kwargs request = self.initialize_request(request, *args, **kwargs) - self.load_json_body(request) + # XXX: without this seemingly useless access to `.body` we are + # unable to access `request.body` later on due to `rest_framework` + # loading the request body via `request.read()` + request.body self.request = request self.headers = self.default_response_headers # deprecate? diff --git a/src/sentry/api/bases/organization.py b/src/sentry/api/bases/organization.py index 1866b60c6316f1..4e5d2e632d0abc 100644 --- a/src/sentry/api/bases/organization.py +++ b/src/sentry/api/bases/organization.py @@ -230,6 +230,14 @@ class OrganizationMetricsPermission(OrganizationPermission): } +class OrganizationFlagWebHookSigningSecretPermission(OrganizationPermission): + scope_map = { + "GET": ["org:read", "org:write", "org:admin"], + "POST": ["org:read", "org:write", "org:admin"], + "DELETE": ["org:write", "org:admin"], + } + + class ControlSiloOrganizationEndpoint(Endpoint): """ A base class for endpoints that use an organization scoping but lives in the control silo diff --git a/src/sentry/api/bases/organizationmember.py b/src/sentry/api/bases/organizationmember.py index 4f322056d3a701..c140bb06f73572 100644 --- a/src/sentry/api/bases/organizationmember.py +++ b/src/sentry/api/bases/organizationmember.py @@ -1,11 +1,11 @@ from __future__ import annotations -from typing import Any +from typing import Any, NotRequired, TypedDict from rest_framework import serializers +from rest_framework.fields import empty from rest_framework.request import Request -from sentry import features from sentry.api.exceptions import ResourceDoesNotExist from sentry.api.permissions import StaffPermissionMixin from sentry.db.models.fields.bounded import BoundedAutoField @@ -43,10 +43,7 @@ def has_object_permission( is_role_above_member = "member:admin" in scopes or "member:write" in scopes if isinstance(organization, RpcUserOrganizationContext): organization = organization.organization - return is_role_above_member or ( - features.has("organizations:members-invite-teammates", organization) - and not organization.flags.disable_member_invite - ) + return is_role_above_member or not organization.flags.disable_member_invite class MemberAndStaffPermission(StaffPermissionMixin, MemberPermission): @@ -65,7 +62,7 @@ def to_internal_value(self, data): return data return super().to_internal_value(data) - def run_validation(self, data): + def run_validation(self, data=empty): if data == "me": return data return super().run_validation(data) @@ -75,6 +72,15 @@ class MemberSerializer(serializers.Serializer): id = MemberIdField(min_value=0, max_value=BoundedAutoField.MAX_VALUE, required=True) +class _FilterKwargs(TypedDict): + organization: Organization + user_id: NotRequired[int] + user_is_active: NotRequired[bool] + id: NotRequired[int | str] + organization_id: NotRequired[int] + invite_status: NotRequired[int] + + class OrganizationMemberEndpoint(OrganizationEndpoint): def convert_args( self, @@ -105,16 +111,16 @@ def _get_member( member_id: int | str, invite_status: InviteStatus | None = None, ) -> OrganizationMember: - args = [] - kwargs = dict(organization=organization) + kwargs: _FilterKwargs = {"organization": organization} if member_id == "me": - kwargs.update(user_id=request.user.id, user_is_active=True) + kwargs["user_id"] = request.user.id + kwargs["user_is_active"] = True else: - kwargs.update(id=member_id, organization_id=organization.id) + kwargs["id"] = member_id + kwargs["organization_id"] = organization.id if invite_status: - kwargs.update(invite_status=invite_status.value) + kwargs["invite_status"] = invite_status.value - om = OrganizationMember.objects.filter(*args, **kwargs).get() - return om + return OrganizationMember.objects.filter(**kwargs).get() diff --git a/src/sentry/api/decorators.py b/src/sentry/api/decorators.py index ef28203d678411..c6e265588890d0 100644 --- a/src/sentry/api/decorators.py +++ b/src/sentry/api/decorators.py @@ -4,7 +4,11 @@ from rest_framework.request import Request from rest_framework.response import Response -from sentry.api.exceptions import EmailVerificationRequired, SudoRequired +from sentry.api.exceptions import ( + EmailVerificationRequired, + PrimaryEmailVerificationRequired, + SudoRequired, +) from sentry.models.apikey import is_api_key_auth from sentry.models.apitoken import is_api_token_auth from sentry.models.orgauthtoken import is_org_auth_token_auth @@ -45,3 +49,13 @@ def wrapped(self, request: Request, *args, **kwargs) -> Response: return func(self, request, *args, **kwargs) return wrapped + + +def primary_email_verification_required(func): + @wraps(func) + def wrapped(self, request: Request, *args, **kwargs) -> Response: + if isinstance(request.user, AnonymousUser) or not request.user.has_verified_primary_email(): + raise PrimaryEmailVerificationRequired(request.user) + return func(self, request, *args, **kwargs) + + return wrapped diff --git a/src/sentry/api/endpoints/admin_project_configs.py b/src/sentry/api/endpoints/admin_project_configs.py index 3b46ff73c17ac0..b8b1b5a8730b87 100644 --- a/src/sentry/api/endpoints/admin_project_configs.py +++ b/src/sentry/api/endpoints/admin_project_configs.py @@ -1,3 +1,6 @@ +from collections.abc import MutableMapping +from typing import Any + from django.http import Http404 from rest_framework.request import Request from rest_framework.response import Response @@ -7,11 +10,12 @@ from sentry.api.base import Endpoint, region_silo_endpoint from sentry.api.permissions import SuperuserOrStaffFeatureFlaggedPermission from sentry.models.project import Project +from sentry.models.projectkey import ProjectKey from sentry.relay import projectconfig_cache +from sentry.relay.config import ProjectConfig, get_project_config from sentry.tasks.relay import schedule_invalidate_project_config -# NOTE: This endpoint should be in getsentry @region_silo_endpoint class AdminRelayProjectConfigsEndpoint(Endpoint): owner = ApiOwner.OWNERS_INGEST @@ -22,45 +26,109 @@ class AdminRelayProjectConfigsEndpoint(Endpoint): permission_classes = (SuperuserOrStaffFeatureFlaggedPermission,) def get(self, request: Request) -> Response: + """The GET endpoint retrieves the project configs for a specific project_id + or a set of project keys. + If a projectId is provided, the configs for all project keys are returned. + If a projectKey is provided, the config for that specific project key is returned. + Both a projectId and a projectKey may be provided in the same request. + + If the project config is currently in cache, will return the cache entry. + If the project config is not in cache, the project config for that key will be null. + """ project_id = request.GET.get("projectId") + project_key_param = request.GET.get("projectKey") - project_keys = [] - if project_id is not None: - try: + if not project_id and not project_key_param: + return Response( + {"error": "Please supply either the projectId or projectKey parameter."}, status=400 + ) + + try: + if project_id: project = Project.objects.get_from_cache(id=project_id) - for project_key in project.key_set.all(): - project_keys.append(project_key.public_key) + else: + project = None + if project_key_param: + supplied_project_key = ProjectKey.objects.get(public_key=project_key_param) + else: + supplied_project_key = None + except Exception: + raise Http404 - except Exception: - raise Http404 + project_keys = self._get_project_keys(project, supplied_project_key) - project_key_param = request.GET.get("projectKey") - if project_key_param is not None: - project_keys.append(project_key_param) - - configs = {} - for key in project_keys: - cached_config = projectconfig_cache.backend.get(key) - if cached_config is not None: - configs[key] = cached_config + configs: MutableMapping[str, MutableMapping[str, Any] | ProjectConfig | None] = {} + uncached_keys = [] + for project_key in project_keys: + if isinstance(project_key, ProjectKey) and project_key.public_key is not None: + cached_config = projectconfig_cache.backend.get(project_key.public_key) + if cached_config is not None: + configs[project_key.public_key] = cached_config + else: + configs[project_key.public_key] = None + uncached_keys.append(project_key) + + if uncached_keys: + if supplied_project_key is not None: + generated_configs = self._get_project_config_sync( + supplied_project_key.project, uncached_keys + ) + elif project is not None: + generated_configs = self._get_project_config_sync(project, uncached_keys) else: - configs[key] = None + generated_configs = {} + + for key, config in generated_configs.items(): + configs[key] = config - # TODO: if we don't think we'll add anything to the endpoint - # we may as well return just the configs return Response({"configs": configs}, status=200) def post(self, request: Request) -> Response: - """Regenerate the project config""" - project_id = request.GET.get("projectId") + """The POST endpoint recalculates the project configs for a specific projectId. + The project config for all projectKeys of the provided projectId is recalculated + in a sync manner and stored in the cache subsequently. + """ + project_id = request.data.get("projectId") - if project_id is not None: - try: - schedule_invalidate_project_config( - project_id=project_id, trigger="_admin_trigger_invalidate_project_config" - ) + if not project_id: + return Response({"error": "Missing projectId parameter"}, status=400) + + try: + project = Project.objects.get_from_cache(id=project_id) + project_keys = self._get_project_keys(project) + schedule_invalidate_project_config( + project_id=project_id, trigger="_admin_trigger_invalidate_project_config" + ) + except Exception: + raise Http404 + + configs = self._get_project_config_sync(project, project_keys) + projectconfig_cache.backend.set_many(configs) + return Response(status=201) + + def _get_project_keys( + self, project: Project | None = None, project_key: ProjectKey | None = None + ) -> list[ProjectKey]: + project_keys = [] + + if project_key is not None: + project_keys.append(project_key) + + if project is not None: + for project_key2 in project.key_set.all(): + project_keys.append(project_key2) + + return project_keys + + def _get_project_config_sync( + self, project: Project, project_keys: list[ProjectKey] + ) -> MutableMapping[str, MutableMapping[str, Any]]: + configs: MutableMapping[str, MutableMapping[str, Any]] = {} - except Exception: - raise Http404 + for project_key in project_keys: + if project_key.public_key is not None: + configs[project_key.public_key] = get_project_config( + project, project_keys=[project_key] + ).to_dict() - return Response(status=204) + return configs diff --git a/src/sentry/api/endpoints/api_application_details.py b/src/sentry/api/endpoints/api_application_details.py index 73f6dadeeba959..09492f1e433d73 100644 --- a/src/sentry/api/endpoints/api_application_details.py +++ b/src/sentry/api/endpoints/api_application_details.py @@ -33,8 +33,26 @@ class ApiApplicationSerializer(serializers.Serializer): ) +class ApiApplicationEndpoint(Endpoint): + def convert_args( + self, + request: Request, + app_id: str, + *args, + **kwargs, + ): + try: + application = ApiApplication.objects.get( + owner_id=request.user.id, client_id=app_id, status=ApiApplicationStatus.active + ) + except ApiApplication.DoesNotExist: + raise ResourceDoesNotExist + kwargs["application"] = application + return (args, kwargs) + + @control_silo_endpoint -class ApiApplicationDetailsEndpoint(Endpoint): +class ApiApplicationDetailsEndpoint(ApiApplicationEndpoint): publish_status = { "DELETE": ApiPublishStatus.PRIVATE, "GET": ApiPublishStatus.PRIVATE, @@ -43,24 +61,10 @@ class ApiApplicationDetailsEndpoint(Endpoint): authentication_classes = (SessionAuthentication,) permission_classes = (IsAuthenticated,) - def get(self, request: Request, app_id) -> Response: - try: - instance = ApiApplication.objects.get( - owner_id=request.user.id, client_id=app_id, status=ApiApplicationStatus.active - ) - except ApiApplication.DoesNotExist: - raise ResourceDoesNotExist - - return Response(serialize(instance, request.user)) - - def put(self, request: Request, app_id) -> Response: - try: - instance = ApiApplication.objects.get( - owner_id=request.user.id, client_id=app_id, status=ApiApplicationStatus.active - ) - except ApiApplication.DoesNotExist: - raise ResourceDoesNotExist + def get(self, request: Request, application: ApiApplication) -> Response: + return Response(serialize(application, request.user)) + def put(self, request: Request, application: ApiApplication) -> Response: serializer = ApiApplicationSerializer(data=request.data, partial=True) if serializer.is_valid(): @@ -79,22 +83,15 @@ def put(self, request: Request, app_id) -> Response: if "termsUrl" in result: kwargs["terms_url"] = result["termsUrl"] if kwargs: - instance.update(**kwargs) - return Response(serialize(instance, request.user), status=200) + application.update(**kwargs) + return Response(serialize(application, request.user), status=200) return Response(serializer.errors, status=400) - def delete(self, request: Request, app_id) -> Response: - try: - instance = ApiApplication.objects.get( - owner_id=request.user.id, client_id=app_id, status=ApiApplicationStatus.active - ) - except ApiApplication.DoesNotExist: - raise ResourceDoesNotExist - + def delete(self, request: Request, application: ApiApplication) -> Response: with transaction.atomic(using=router.db_for_write(ApiApplication)): - updated = ApiApplication.objects.filter(id=instance.id).update( + updated = ApiApplication.objects.filter(id=application.id).update( status=ApiApplicationStatus.pending_deletion ) if updated: - ScheduledDeletion.schedule(instance, days=0, actor=request.user) + ScheduledDeletion.schedule(application, days=0, actor=request.user) return Response(status=204) diff --git a/src/sentry/api/endpoints/api_application_rotate_secret.py b/src/sentry/api/endpoints/api_application_rotate_secret.py index 31e02a9729e8d7..a86a9fe663fc5a 100644 --- a/src/sentry/api/endpoints/api_application_rotate_secret.py +++ b/src/sentry/api/endpoints/api_application_rotate_secret.py @@ -5,14 +5,14 @@ from sentry.api.api_owners import ApiOwner from sentry.api.api_publish_status import ApiPublishStatus -from sentry.api.base import Endpoint, control_silo_endpoint -from sentry.api.exceptions import ResourceDoesNotExist +from sentry.api.base import control_silo_endpoint +from sentry.api.endpoints.api_application_details import ApiApplicationEndpoint from sentry.api.serializers import serialize -from sentry.models.apiapplication import ApiApplication, ApiApplicationStatus, generate_token +from sentry.models.apiapplication import ApiApplication, generate_token @control_silo_endpoint -class ApiApplicationRotateSecretEndpoint(Endpoint): +class ApiApplicationRotateSecretEndpoint(ApiApplicationEndpoint): publish_status = { "POST": ApiPublishStatus.PRIVATE, } @@ -20,13 +20,7 @@ class ApiApplicationRotateSecretEndpoint(Endpoint): authentication_classes = (SessionAuthentication,) permission_classes = (IsAuthenticated,) - def post(self, request: Request, app_id) -> Response: - try: - api_application = ApiApplication.objects.get( - owner_id=request.user.id, client_id=app_id, status=ApiApplicationStatus.active - ) - except ApiApplication.DoesNotExist: - raise ResourceDoesNotExist + def post(self, request: Request, application: ApiApplication) -> Response: new_token = generate_token() - api_application.update(client_secret=new_token) + application.update(client_secret=new_token) return Response(serialize({"clientSecret": new_token})) diff --git a/src/sentry/api/endpoints/api_authorizations.py b/src/sentry/api/endpoints/api_authorizations.py index c8daaad0b88755..3013e0dca904b1 100644 --- a/src/sentry/api/endpoints/api_authorizations.py +++ b/src/sentry/api/endpoints/api_authorizations.py @@ -50,7 +50,9 @@ def delete(self, request: Request) -> Response: with outbox_context(transaction.atomic(using=router.db_for_write(ApiToken)), flush=False): for token in ApiToken.objects.filter( - user_id=request.user.id, application=auth.application_id + user_id=request.user.id, + application=auth.application_id, + scoping_organization_id=auth.organization_id, ): token.delete() diff --git a/src/sentry/api/endpoints/chunk.py b/src/sentry/api/endpoints/chunk.py index 9095cef5a7ab6e..f8e1c275f05037 100644 --- a/src/sentry/api/endpoints/chunk.py +++ b/src/sentry/api/endpoints/chunk.py @@ -36,6 +36,7 @@ "portablepdbs", # Portable PDB debug file "artifact_bundles", # Artifact Bundles for JavaScript Source Maps "artifact_bundles_v2", # The `assemble` endpoint will check for missing chunks + "proguard", # Chunk-uploaded proguard mappings ) @@ -122,6 +123,10 @@ def post(self, request: Request, organization) -> Response: """ Upload chunks and store them as FileBlobs ````````````````````````````````````````` + + Requests to this endpoint should use the region-specific domain + eg. `us.sentry.io` or `de.sentry.io` + :pparam file file: The filename should be sha1 hash of the content. Also not you can add up to MAX_CHUNKS_PER_REQUEST files in this request. diff --git a/src/sentry/api/endpoints/debug_files.py b/src/sentry/api/endpoints/debug_files.py index f623e71a8e865c..a59efb4bb8c780 100644 --- a/src/sentry/api/endpoints/debug_files.py +++ b/src/sentry/api/endpoints/debug_files.py @@ -189,9 +189,9 @@ def get(self, request: Request, project: Project) -> Response: class DebugFilesEndpoint(ProjectEndpoint): owner = ApiOwner.OWNERS_INGEST publish_status = { - "DELETE": ApiPublishStatus.UNKNOWN, - "GET": ApiPublishStatus.UNKNOWN, - "POST": ApiPublishStatus.UNKNOWN, + "DELETE": ApiPublishStatus.PRIVATE, + "GET": ApiPublishStatus.PRIVATE, + "POST": ApiPublishStatus.PRIVATE, } permission_classes = (ProjectReleasePermission,) @@ -351,6 +351,9 @@ def post(self, request: Request, project: Project) -> Response: Unlike other API requests, files must be uploaded using the traditional multipart/form-data content-type. + Requests to this endpoint should use the region-specific domain + eg. `us.sentry.io` or `de.sentry.io` + The file uploaded is a zip archive of a Apple .dSYM folder which contains the individual debug images. Uploading through this endpoint will create different files for the contained images. diff --git a/src/sentry/api/endpoints/event_attachment_details.py b/src/sentry/api/endpoints/event_attachment_details.py index a31d68a8778729..a2b71da478f977 100644 --- a/src/sentry/api/endpoints/event_attachment_details.py +++ b/src/sentry/api/endpoints/event_attachment_details.py @@ -51,8 +51,8 @@ def has_object_permission(self, request: Request, view, project): class EventAttachmentDetailsEndpoint(ProjectEndpoint): owner = ApiOwner.OWNERS_INGEST publish_status = { - "DELETE": ApiPublishStatus.UNKNOWN, - "GET": ApiPublishStatus.UNKNOWN, + "DELETE": ApiPublishStatus.PRIVATE, + "GET": ApiPublishStatus.PRIVATE, } permission_classes = (EventAttachmentDetailsPermission,) diff --git a/src/sentry/api/endpoints/event_attachments.py b/src/sentry/api/endpoints/event_attachments.py index aca3603758ca0b..5acbbbd832de8e 100644 --- a/src/sentry/api/endpoints/event_attachments.py +++ b/src/sentry/api/endpoints/event_attachments.py @@ -16,7 +16,7 @@ class EventAttachmentsEndpoint(ProjectEndpoint): owner = ApiOwner.OWNERS_INGEST publish_status = { - "GET": ApiPublishStatus.UNKNOWN, + "GET": ApiPublishStatus.PRIVATE, } def get(self, request: Request, project, event_id) -> Response: diff --git a/src/sentry/api/endpoints/group_integration_details.py b/src/sentry/api/endpoints/group_integration_details.py index 7663f366a458c0..fdc22c9e9e9601 100644 --- a/src/sentry/api/endpoints/group_integration_details.py +++ b/src/sentry/api/endpoints/group_integration_details.py @@ -266,12 +266,26 @@ def post(self, request: Request, group, integration_id) -> Response: ) installation = integration.get_installation(organization_id=organization_id) - try: - data = installation.create_issue(request.data) - except IntegrationFormError as exc: - return Response(exc.field_errors, status=400) - except IntegrationError as e: - return Response({"non_field_errors": [str(e)]}, status=400) + + with ProjectManagementEvent( + action_type=ProjectManagementActionType.CREATE_EXTERNAL_ISSUE_VIA_ISSUE_DETAIL, + integration=integration, + ).capture() as lifecycle: + lifecycle.add_extras( + { + "provider": integration.provider, + "integration_id": integration.id, + } + ) + + try: + data = installation.create_issue(request.data) + except IntegrationFormError as exc: + lifecycle.record_halt(exc) + return Response(exc.field_errors, status=400) + except IntegrationError as e: + lifecycle.record_failure(e) + return Response({"non_field_errors": [str(e)]}, status=400) external_issue_key = installation.make_external_key(data) external_issue, created = ExternalIssue.objects.get_or_create( diff --git a/src/sentry/api/endpoints/group_tagkey_details.py b/src/sentry/api/endpoints/group_tagkey_details.py index 1a43dce0f2290d..5bc94f32388487 100644 --- a/src/sentry/api/endpoints/group_tagkey_details.py +++ b/src/sentry/api/endpoints/group_tagkey_details.py @@ -1,5 +1,4 @@ -from drf_spectacular.types import OpenApiTypes -from drf_spectacular.utils import OpenApiParameter, extend_schema +from drf_spectacular.utils import extend_schema from rest_framework.request import Request from rest_framework.response import Response @@ -38,13 +37,7 @@ class GroupTagKeyDetailsEndpoint(GroupEndpoint, EnvironmentMixin): IssueParams.ISSUE_ID, IssueParams.ISSUES_OR_GROUPS, GlobalParams.ORG_ID_OR_SLUG, - OpenApiParameter( - name="key", - location=OpenApiParameter.PATH, - type=OpenApiTypes.STR, - description="The tag key to look the values up for.", - required=True, - ), + IssueParams.KEY, GlobalParams.ENVIRONMENT, ], responses={ diff --git a/src/sentry/api/endpoints/group_tagkey_values.py b/src/sentry/api/endpoints/group_tagkey_values.py index 42525aed6df77d..5b7f8d5e9c9260 100644 --- a/src/sentry/api/endpoints/group_tagkey_values.py +++ b/src/sentry/api/endpoints/group_tagkey_values.py @@ -1,7 +1,9 @@ +from drf_spectacular.utils import extend_schema from rest_framework.request import Request from rest_framework.response import Response from sentry import analytics, tagstore +from sentry.api.api_owners import ApiOwner from sentry.api.api_publish_status import ApiPublishStatus from sentry.api.base import EnvironmentMixin, region_silo_endpoint from sentry.api.bases.group import GroupEndpoint @@ -9,25 +11,51 @@ from sentry.api.helpers.environments import get_environments from sentry.api.serializers import serialize from sentry.api.serializers.models.tagvalue import UserTagValueSerializer +from sentry.apidocs.constants import ( + RESPONSE_BAD_REQUEST, + RESPONSE_FORBIDDEN, + RESPONSE_NOT_FOUND, + RESPONSE_UNAUTHORIZED, +) +from sentry.apidocs.examples.tags_examples import TagsExamples +from sentry.apidocs.parameters import GlobalParams, IssueParams +from sentry.apidocs.utils import inline_sentry_response_serializer +from sentry.tagstore.types import TagValueSerializerResponse +@extend_schema(tags=["Events"]) @region_silo_endpoint class GroupTagKeyValuesEndpoint(GroupEndpoint, EnvironmentMixin): publish_status = { - "GET": ApiPublishStatus.UNKNOWN, + "GET": ApiPublishStatus.PUBLIC, } + owner = ApiOwner.ISSUES + @extend_schema( + operation_id="List a Tag's Values for an Issue", + description="Returns a list of values associated with this key for an issue.\nReturns at most 1000 values when paginated.", + parameters=[ + IssueParams.ISSUE_ID, + IssueParams.ISSUES_OR_GROUPS, + GlobalParams.ORG_ID_OR_SLUG, + IssueParams.KEY, + IssueParams.SORT, + GlobalParams.ENVIRONMENT, + ], + responses={ + 200: inline_sentry_response_serializer( + "TagKeyValuesDict", list[TagValueSerializerResponse] + ), + 400: RESPONSE_BAD_REQUEST, + 401: RESPONSE_UNAUTHORIZED, + 403: RESPONSE_FORBIDDEN, + 404: RESPONSE_NOT_FOUND, + }, + examples=[TagsExamples.GROUP_TAGKEY_VALUES], + ) def get(self, request: Request, group, key) -> Response: """ List a Tag's Values - ``````````````````` - - Return a list of values associated with this key for an issue. - When paginated can return at most 1000 values. - - :pparam string issue_id: the ID of the issue to retrieve. - :pparam string key: the tag key to look the values up for. - :auth: required """ analytics.record( "eventuser_endpoint.request", diff --git a/src/sentry/api/endpoints/organization_dashboards.py b/src/sentry/api/endpoints/organization_dashboards.py index 7599bd16839449..fd6b9096f8e8f0 100644 --- a/src/sentry/api/endpoints/organization_dashboards.py +++ b/src/sentry/api/endpoints/organization_dashboards.py @@ -1,7 +1,7 @@ from __future__ import annotations from django.db import IntegrityError, router, transaction -from django.db.models import Case, IntegerField, When +from django.db.models import Case, Exists, IntegerField, OuterRef, Value, When from drf_spectacular.utils import extend_schema from rest_framework.request import Request from rest_framework.response import Response @@ -28,8 +28,10 @@ from sentry.apidocs.examples.dashboard_examples import DashboardExamples from sentry.apidocs.parameters import CursorQueryParam, GlobalParams, VisibilityParams from sentry.apidocs.utils import inline_sentry_response_serializer -from sentry.models.dashboard import Dashboard +from sentry.db.models.fields.text import CharField +from sentry.models.dashboard import Dashboard, DashboardFavoriteUser from sentry.models.organization import Organization +from sentry.users.services.user.service import user_service MAX_RETRIES = 2 DUPLICATE_TITLE_PATTERN = r"(.*) copy(?:$|\s(\d+))" @@ -163,14 +165,43 @@ def get(self, request: Request, organization) -> Response: order_by = ["last_visited" if desc else "-last_visited"] elif sort_by == "mydashboards": - order_by = [ - Case( - When(created_by_id=request.user.id, then=-1), - default="created_by_id", - output_field=IntegerField(), - ), - "-date_added", - ] + if features.has( + "organizations:dashboards-table-view", organization, actor=request.user + ): + user_name_dict = { + user.id: user.name + for user in user_service.get_many_by_id( + ids=list(dashboards.values_list("created_by_id", flat=True)) + ) + } + dashboards = dashboards.annotate( + user_name=Case( + *[ + When(created_by_id=user_id, then=Value(user_name)) + for user_id, user_name in user_name_dict.items() + ], + default=Value(""), + output_field=CharField(), + ) + ) + order_by = [ + Case( + When(created_by_id=request.user.id, then=-1), + default=1, + output_field=IntegerField(), + ), + "-user_name" if desc else "user_name", + "-date_added", + ] + else: + order_by = [ + Case( + When(created_by_id=request.user.id, then=-1), + default="created_by_id", + output_field=IntegerField(), + ), + "-date_added", + ] elif sort_by == "myDashboardsAndRecentlyViewed": order_by = [ @@ -184,9 +215,13 @@ def get(self, request: Request, organization) -> Response: if features.has("organizations:dashboards-favourite", organization, actor=request.user): pin_by = request.query_params.get("pin") if pin_by == "favorites": + favorited_by_subquery = DashboardFavoriteUser.objects.filter( + dashboard=OuterRef("pk"), user_id=request.user.id + ) + order_by_favorites = [ Case( - When(dashboardfavoriteuser__user_id=request.user.id, then=-1), + When(Exists(favorited_by_subquery), then=-1), default=1, output_field=IntegerField(), ) diff --git a/src/sentry/api/endpoints/organization_events.py b/src/sentry/api/endpoints/organization_events.py index 938f8609eb01e1..80577c317c5ed3 100644 --- a/src/sentry/api/endpoints/organization_events.py +++ b/src/sentry/api/endpoints/organization_events.py @@ -35,6 +35,7 @@ ) from sentry.snuba.metrics.extraction import MetricSpecType from sentry.snuba.referrer import Referrer +from sentry.snuba.types import DatasetQuery from sentry.snuba.utils import dataset_split_decision_inferred_from_query, get_dataset from sentry.types.ratelimit import RateLimit, RateLimitCategory from sentry.utils.snuba import SnubaError @@ -55,13 +56,14 @@ class DiscoverDatasetSplitException(Exception): pass -ALLOWED_EVENTS_REFERRERS = { +ALLOWED_EVENTS_REFERRERS: set[str] = { Referrer.API_ORGANIZATION_EVENTS.value, Referrer.API_ORGANIZATION_EVENTS_V2.value, Referrer.API_DASHBOARDS_TABLEWIDGET.value, Referrer.API_DASHBOARDS_BIGNUMBERWIDGET.value, Referrer.API_DISCOVER_TRANSACTIONS_LIST.value, Referrer.API_DISCOVER_QUERY_TABLE.value, + Referrer.API_INSIGHTS_USER_GEO_SUBREGION_SELECTOR.value, Referrer.API_PERFORMANCE_BROWSER_RESOURCE_MAIN_TABLE.value, Referrer.API_PERFORMANCE_BROWSER_RESOURCES_PAGE_SELECTOR.value, Referrer.API_PERFORMANCE_BROWSER_WEB_VITALS_PROJECT.value, @@ -167,10 +169,11 @@ class DiscoverDatasetSplitException(Exception): Referrer.API_PERFORMANCE_MOBILE_UI_METRICS_RIBBON.value, Referrer.API_PERFORMANCE_SPAN_SUMMARY_HEADER_DATA.value, Referrer.API_PERFORMANCE_SPAN_SUMMARY_TABLE.value, - Referrer.API_EXPLORE_SPANS_SAMPLES_TABLE, + Referrer.API_EXPLORE_SPANS_SAMPLES_TABLE.value, + Referrer.ISSUE_DETAILS_STREAMLINE_GRAPH.value, + Referrer.ISSUE_DETAILS_STREAMLINE_LIST.value, } -API_TOKEN_REFERRER = Referrer.API_AUTH_TOKEN_EVENTS.value LEGACY_RATE_LIMIT = dict(limit=30, window=1, concurrent_limit=15) # reduced limit will be the future default for all organizations not explicitly on increased limit @@ -273,8 +276,7 @@ class OrganizationEventsEndpoint(OrganizationEventsV2EndpointBase): enforce_rate_limit = True - def rate_limits(*args, **kwargs) -> dict[str, dict[RateLimitCategory, RateLimit]]: - return rate_limit_events(*args, **kwargs) + rate_limits = rate_limit_events def get_features(self, organization: Organization, request: Request) -> Mapping[str, bool]: feature_names = [ @@ -295,11 +297,13 @@ def get_features(self, organization: Organization, request: Request) -> Mapping[ actor=request.user, ) - all_features = ( - batch_features.get(f"organization:{organization.id}", {}) - if batch_features is not None - else {} - ) + all_features: dict[str, bool] = {} + + if batch_features is not None: + for feature_name, result in batch_features.get( + f"organization:{organization.id}", {} + ).items(): + all_features[feature_name] = bool(result) for feature_name in feature_names: if feature_name not in all_features: @@ -379,7 +383,7 @@ def get(self, request: Request, organization) -> Response: } ) except InvalidParams as err: - raise ParseError(err) + raise ParseError(detail=str(err)) batch_features = self.get_features(organization, request) @@ -418,7 +422,9 @@ def get(self, request: Request, organization) -> Response: # Force the referrer to "api.auth-token.events" for events requests authorized through a bearer token if request.auth: - referrer = API_TOKEN_REFERRER + referrer = Referrer.API_AUTH_TOKEN_EVENTS.value + elif referrer is None: + referrer = Referrer.API_ORGANIZATION_EVENTS.value elif referrer not in ALLOWED_EVENTS_REFERRERS: if referrer: with sentry_sdk.isolation_scope() as scope: @@ -431,12 +437,18 @@ def get(self, request: Request, organization) -> Response: use_aggregate_conditions = request.GET.get("allowAggregateConditions", "1") == "1" # Only works when dataset == spans use_rpc = request.GET.get("useRpc", "0") == "1" - - def _data_fn(scoped_dataset, offset, limit, query) -> dict[str, Any]: + sentry_sdk.set_tag("performance.use_rpc", use_rpc) + + def _data_fn( + dataset_query: DatasetQuery, + offset: int, + limit: int, + query: str | None, + ): if use_rpc and dataset == spans_eap: return spans_rpc.run_table_query( params=snuba_params, - query_string=query, + query_string=query or "", selected_columns=self.get_field_list(organization, request), orderby=self.get_orderby(request), offset=offset, @@ -448,9 +460,9 @@ def _data_fn(scoped_dataset, offset, limit, query) -> dict[str, Any]: ), ) query_source = self.get_request_source(request) - return scoped_dataset.query( + return dataset_query( selected_columns=self.get_field_list(organization, request), - query=query, + query=query or "", snuba_params=snuba_params, equations=self.get_equation_list(organization, request), orderby=self.get_orderby(request), @@ -459,24 +471,30 @@ def _data_fn(scoped_dataset, offset, limit, query) -> dict[str, Any]: referrer=referrer, auto_fields=True, auto_aggregations=True, - use_aggregate_conditions=use_aggregate_conditions, allow_metric_aggregates=allow_metric_aggregates, + use_aggregate_conditions=use_aggregate_conditions, transform_alias_to_input_format=True, # Whether the flag is enabled or not, regardless of the referrer has_metrics=use_metrics, use_metrics_layer=batch_features.get("organizations:use-metrics-layer", False), on_demand_metrics_enabled=on_demand_metrics_enabled, on_demand_metrics_type=on_demand_metrics_type, - query_source=query_source, fallback_to_transactions=features.has( "organizations:performance-discover-dataset-selector", organization, actor=request.user, ), + query_source=query_source, ) @sentry_sdk.tracing.trace - def _dashboards_data_fn(scoped_dataset, offset, limit, scoped_query, dashboard_widget_id): + def _dashboards_data_fn( + scoped_dataset_query: DatasetQuery, + offset: int, + limit: int, + scoped_query: str | None, + dashboard_widget_id: str, + ): try: widget = DashboardWidget.objects.get(id=dashboard_widget_id) does_widget_have_split = widget.discover_widget_split is not None @@ -487,27 +505,29 @@ def _dashboards_data_fn(scoped_dataset, offset, limit, scoped_query, dashboard_w ) if does_widget_have_split and not has_override_feature: + dataset_query: DatasetQuery + # This is essentially cached behaviour and we skip the check if widget.discover_widget_split == DashboardWidgetTypes.ERROR_EVENTS: - split_dataset = errors + dataset_query = errors.query elif widget.discover_widget_split == DashboardWidgetTypes.TRANSACTION_LIKE: # We can't add event.type:transaction for now because of on-demand. - split_dataset = scoped_dataset + dataset_query = scoped_dataset_query else: - split_dataset = discover + dataset_query = discover.query - return _data_fn(split_dataset, offset, limit, scoped_query) + return _data_fn(dataset_query, offset, limit, scoped_query) with handle_query_errors(): try: - error_results = _data_fn(errors, offset, limit, scoped_query) + error_results = _data_fn(errors.query, offset, limit, scoped_query) # Widget has not split the discover dataset yet, so we need to check if there are errors etc. has_errors = len(error_results["data"]) > 0 except SnubaError: has_errors = False error_results = None - original_results = _data_fn(scoped_dataset, offset, limit, scoped_query) + original_results = _data_fn(scoped_dataset_query, offset, limit, scoped_query) if original_results.get("data") is not None: dataset_meta = original_results.get("meta", {}) else: @@ -524,7 +544,9 @@ def _dashboards_data_fn(scoped_dataset, offset, limit, scoped_query, dashboard_w if has_errors and has_other_data and not using_metrics: # In the case that the original request was not using the metrics dataset, we cannot be certain that other data is solely transactions. sentry_sdk.set_tag("third_split_query", True) - transaction_results = _data_fn(transactions, offset, limit, scoped_query) + transaction_results = _data_fn( + transactions.query, offset, limit, scoped_query + ) has_transactions = len(transaction_results["data"]) > 0 decision = self.save_split_decision( @@ -532,7 +554,7 @@ def _dashboards_data_fn(scoped_dataset, offset, limit, scoped_query, dashboard_w ) if decision == DashboardWidgetTypes.DISCOVER: - return _data_fn(discover, offset, limit, scoped_query) + return _data_fn(discover.query, offset, limit, scoped_query) elif decision == DashboardWidgetTypes.TRANSACTION_LIKE: original_results["meta"]["discoverSplitDecision"] = ( DashboardWidgetTypes.get_type_name( @@ -550,13 +572,19 @@ def _dashboards_data_fn(scoped_dataset, offset, limit, scoped_query, dashboard_w except Exception as e: # Swallow the exception if it was due to the discover split, and try again one more time. if isinstance(e, ParseError): - return _data_fn(scoped_dataset, offset, limit, scoped_query) + return _data_fn(scoped_dataset_query, offset, limit, scoped_query) sentry_sdk.capture_exception(e) - return _data_fn(scoped_dataset, offset, limit, scoped_query) + return _data_fn(scoped_dataset_query, offset, limit, scoped_query) @sentry_sdk.tracing.trace - def _discover_data_fn(scoped_dataset, offset, limit, scoped_query, discover_saved_query_id): + def _discover_data_fn( + scoped_dataset_query: DatasetQuery, + offset: int, + limit: int, + scoped_query: str | None, + discover_saved_query_id: str, + ): try: discover_query = DiscoverSavedQuery.objects.get( id=discover_saved_query_id, organization=organization @@ -565,7 +593,7 @@ def _discover_data_fn(scoped_dataset, offset, limit, scoped_query, discover_save discover_query.dataset is not DiscoverSavedQueryTypes.DISCOVER ) if does_widget_have_split: - return _data_fn(scoped_dataset, offset, limit, scoped_query) + return _data_fn(scoped_dataset_query, offset, limit, scoped_query) dataset_inferred_from_query = dataset_split_decision_inferred_from_query( self.get_field_list(organization, request), @@ -576,9 +604,11 @@ def _discover_data_fn(scoped_dataset, offset, limit, scoped_query, discover_save # See if we can infer which dataset based on selected columns and query string. with handle_query_errors(): - if dataset_inferred_from_query is not None: + if ( + dataset := SAVED_QUERY_DATASET_MAP.get(dataset_inferred_from_query) + ) is not None: result = _data_fn( - SAVED_QUERY_DATASET_MAP[dataset_inferred_from_query], + dataset.query, offset, limit, scoped_query, @@ -602,11 +632,11 @@ def _discover_data_fn(scoped_dataset, offset, limit, scoped_query, discover_save with ThreadPoolExecutor(max_workers=3) as exe: futures = { exe.submit( - _data_fn, get_dataset(dataset_), offset, limit, scoped_query - ): dataset_ - for dataset_ in [ - "errors", - "transactions", + _data_fn, dataset_query, offset, limit, scoped_query + ): dataset_name + for dataset_name, dataset_query in [ + ("errors", errors.query), + ("transactions", transactions.query), ] } @@ -660,10 +690,10 @@ def _discover_data_fn(scoped_dataset, offset, limit, scoped_query, discover_save except Exception as e: # Swallow the exception if it was due to the discover split, and try again one more time. if isinstance(e, ParseError): - return _data_fn(scoped_dataset, offset, limit, scoped_query) + return _data_fn(scoped_dataset_query, offset, limit, scoped_query) sentry_sdk.capture_exception(e) - return _data_fn(scoped_dataset, offset, limit, scoped_query) + return _data_fn(scoped_dataset_query, offset, limit, scoped_query) def data_fn_factory(scoped_dataset): """ @@ -677,17 +707,17 @@ def data_fn_factory(scoped_dataset): dashboard_widget_id = request.GET.get("dashboardWidgetId", None) discover_saved_query_id = request.GET.get("discoverSavedQueryId", None) - def fn(offset, limit) -> dict[str, Any]: + def fn(offset, limit): if save_discover_dataset_decision and discover_saved_query_id: return _discover_data_fn( - scoped_dataset, offset, limit, scoped_query, discover_saved_query_id + scoped_dataset.query, offset, limit, scoped_query, discover_saved_query_id ) if not (metrics_enhanced and dashboard_widget_id): - return _data_fn(scoped_dataset, offset, limit, scoped_query) + return _data_fn(scoped_dataset.query, offset, limit, scoped_query) return _dashboards_data_fn( - scoped_dataset, offset, limit, scoped_query, dashboard_widget_id + scoped_dataset.query, offset, limit, scoped_query, dashboard_widget_id ) return fn diff --git a/src/sentry/api/endpoints/organization_member/details.py b/src/sentry/api/endpoints/organization_member/details.py index 1564634770ea69..28a78b458a3545 100644 --- a/src/sentry/api/endpoints/organization_member/details.py +++ b/src/sentry/api/endpoints/organization_member/details.py @@ -226,10 +226,7 @@ def put( is_member = not ( request.access.has_scope("member:invite") and request.access.has_scope("member:admin") ) - enable_member_invite = ( - features.has("organizations:members-invite-teammates", organization) - and not organization.flags.disable_member_invite - ) + enable_member_invite = not organization.flags.disable_member_invite # Members can only resend invites reinvite_request_only = set(result.keys()).issubset({"reinvite", "regenerate"}) # Members can only resend invites that they sent @@ -470,8 +467,7 @@ def delete( if acting_member != member: if not request.access.has_scope("member:admin"): if ( - features.has("organizations:members-invite-teammates", organization) - and not organization.flags.disable_member_invite + not organization.flags.disable_member_invite and request.access.has_scope("member:invite") ): return self._handle_deletion_by_member( diff --git a/src/sentry/api/endpoints/organization_metrics_meta.py b/src/sentry/api/endpoints/organization_metrics_meta.py index d31a96d9dc2a03..42f7ac530d692e 100644 --- a/src/sentry/api/endpoints/organization_metrics_meta.py +++ b/src/sentry/api/endpoints/organization_metrics_meta.py @@ -11,7 +11,6 @@ from sentry.snuba import metrics_performance COUNT_UNPARAM = "count_unparameterized_transactions()" -COUNT_HAS_TXN = "count_has_transaction_name()" COUNT_NULL = "count_null_transactions()" diff --git a/src/sentry/api/endpoints/organization_release_files.py b/src/sentry/api/endpoints/organization_release_files.py index 4e057726f2c47f..6acd32c6126b34 100644 --- a/src/sentry/api/endpoints/organization_release_files.py +++ b/src/sentry/api/endpoints/organization_release_files.py @@ -52,6 +52,9 @@ def post(self, request: Request, organization, version) -> Response: Unlike other API requests, files must be uploaded using the traditional multipart/form-data content-type. + Requests to this endpoint should use the region-specific domain + eg. `us.sentry.io` or `de.sentry.io` + The optional 'name' attribute should reflect the absolute path that this file will be referenced as. For example, in the case of JavaScript you might specify the full web URI. diff --git a/src/sentry/api/endpoints/organization_releases.py b/src/sentry/api/endpoints/organization_releases.py index ac88f8c0442372..dc4810abe3e290 100644 --- a/src/sentry/api/endpoints/organization_releases.py +++ b/src/sentry/api/endpoints/organization_releases.py @@ -437,8 +437,11 @@ def post(self, request: Request, organization) -> Response: :pparam string organization_id_or_slug: the id or slug of the organization the release belongs to. - :param string version: a version identifier for this release. Can - be a version number, a commit hash etc. + :param string version: a version identifier for this release. Can + be a version number, a commit hash etc. It cannot contain certain + whitespace characters (`\\r`, `\\n`, `\\f`, `\\x0c`, `\\t`) or any + slashes (`\\`, `/`). The version names `.`, `..` and `latest` are also + reserved, and cannot be used. :param string ref: an optional commit reference. This is useful if a tagged version has been provided. :param url url: a URL that points to the release. This can be the diff --git a/src/sentry/api/endpoints/organization_sessions.py b/src/sentry/api/endpoints/organization_sessions.py index 372c1e58b40128..5d418045c2239d 100644 --- a/src/sentry/api/endpoints/organization_sessions.py +++ b/src/sentry/api/endpoints/organization_sessions.py @@ -1,7 +1,6 @@ from contextlib import contextmanager import sentry_sdk -from django.utils.datastructures import MultiValueDict from drf_spectacular.utils import extend_schema from rest_framework.exceptions import ParseError from rest_framework.request import Request @@ -117,15 +116,10 @@ def build_sessions_query( except NoProjects: raise NoProjects("No projects available") # give it a description - # HACK to prevent front-end crash when release health is sessions-based: - query_params = MultiValueDict(request.GET) - if not release_health.backend.is_metrics_based() and request.GET.get("interval") == "10s": - query_params["interval"] = "1m" - query_config = release_health.backend.sessions_query_config(organization) return QueryDefinition( - query=query_params, + query=request.GET, params=params, offset=offset, limit=limit, diff --git a/src/sentry/api/endpoints/project_backfill_similar_issues_embeddings_records.py b/src/sentry/api/endpoints/project_backfill_similar_issues_embeddings_records.py index a3b4598fbab85c..c2b5bea900ef93 100644 --- a/src/sentry/api/endpoints/project_backfill_similar_issues_embeddings_records.py +++ b/src/sentry/api/endpoints/project_backfill_similar_issues_embeddings_records.py @@ -2,7 +2,6 @@ from rest_framework.request import Request from rest_framework.response import Response -from sentry import features from sentry.api.api_owners import ApiOwner from sentry.api.api_publish_status import ApiPublishStatus from sentry.api.base import region_silo_endpoint @@ -22,10 +21,6 @@ class ProjectBackfillSimilarIssuesEmbeddingsRecords(ProjectEndpoint): } def post(self, request: Request, project: Project) -> Response: - # needs to have the flag to run - if not features.has("projects:similarity-embeddings-backfill", project): - return Response(status=404) - # needs to either be a superuser or be in single org mode if not (is_active_superuser(request) or settings.SENTRY_SINGLE_ORGANIZATION): return Response(status=404) diff --git a/src/sentry/api/endpoints/project_release_files.py b/src/sentry/api/endpoints/project_release_files.py index 5674787662a85d..cedb8e32f5ae6b 100644 --- a/src/sentry/api/endpoints/project_release_files.py +++ b/src/sentry/api/endpoints/project_release_files.py @@ -272,6 +272,9 @@ def post(self, request: Request, project, version) -> Response: Unlike other API requests, files must be uploaded using the traditional multipart/form-data content-type. + Requests to this endpoint should use the region-specific domain + eg. `us.sentry.io` or `de.sentry.io` + The optional 'name' attribute should reflect the absolute path that this file will be referenced as. For example, in the case of JavaScript you might specify the full web URI. diff --git a/src/sentry/api/endpoints/project_rule_preview.py b/src/sentry/api/endpoints/project_rule_preview.py index 713fb931d31912..0ee40b273db646 100644 --- a/src/sentry/api/endpoints/project_rule_preview.py +++ b/src/sentry/api/endpoints/project_rule_preview.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from collections.abc import Mapping from typing import Any @@ -10,10 +12,11 @@ from sentry.api.api_publish_status import ApiPublishStatus from sentry.api.base import region_silo_endpoint from sentry.api.bases.project import ProjectAlertRulePermission, ProjectEndpoint -from sentry.api.serializers import GroupSerializer, serialize +from sentry.api.serializers import serialize +from sentry.api.serializers.models.group import BaseGroupSerializerResponse, GroupSerializer from sentry.api.serializers.rest_framework.rule import RulePreviewSerializer from sentry.models.group import Group -from sentry.models.groupinbox import get_inbox_details +from sentry.models.groupinbox import InboxDetails, get_inbox_details from sentry.rules.history.preview import preview @@ -86,12 +89,19 @@ def post(self, request: Request, project) -> Response: return response +class _PreviewResponse(BaseGroupSerializerResponse): + inbox: InboxDetails + lastTriggered: int + + class PreviewSerializer(GroupSerializer): def serialize( - self, obj: dict[str, Any], attrs: Mapping[Any, Any], user: Any, **kwargs: Any - ) -> dict[str, Any]: + self, obj: Group, attrs: Mapping[Any, Any], user: Any, **kwargs: Any + ) -> _PreviewResponse: result = super().serialize(obj, attrs, user, **kwargs) group_id = int(result["id"]) - result["inbox"] = kwargs["inbox_details"].get(group_id) - result["lastTriggered"] = kwargs["group_fires"][group_id] - return result + return { + **result, + "inbox": kwargs["inbox_details"].get(group_id), + "lastTriggered": kwargs["group_fires"][group_id], + } diff --git a/src/sentry/api/endpoints/project_servicehook_stats.py b/src/sentry/api/endpoints/project_servicehook_stats.py index dffeb8c606ae61..c6d6c1e35a758e 100644 --- a/src/sentry/api/endpoints/project_servicehook_stats.py +++ b/src/sentry/api/endpoints/project_servicehook_stats.py @@ -26,9 +26,9 @@ def get(self, request: Request, project, hook_id) -> Response: stat_args = self._parse_args(request) - stats = {} + stats: dict[int, dict[str, int]] = {} for model, name in ((TSDBModel.servicehook_fired, "total"),): - result = tsdb.get_range( + result = tsdb.backend.get_range( model=model, keys=[hook.id], **stat_args, diff --git a/src/sentry/api/endpoints/project_team_details.py b/src/sentry/api/endpoints/project_team_details.py index d28572ab6d09b1..de546e775ecf06 100644 --- a/src/sentry/api/endpoints/project_team_details.py +++ b/src/sentry/api/endpoints/project_team_details.py @@ -90,7 +90,12 @@ def post(self, request: Request, project, team: Team) -> Response: event=audit_log.get_event_id("PROJECT_TEAM_ADD"), data={"team_slug": team.slug, "project_slug": project.slug}, ) - return Response(serialize(project, request.user, ProjectWithTeamSerializer()), status=201) + return Response( + serialize( + project, request.user, ProjectWithTeamSerializer(collapse=["unusedFeatures"]) + ), + status=201, + ) @extend_schema( operation_id="Delete a Team from a Project", diff --git a/src/sentry/api/endpoints/team_release_count.py b/src/sentry/api/endpoints/team_release_count.py index 7d7806b836b4f3..a0f7b34ca329fd 100644 --- a/src/sentry/api/endpoints/team_release_count.py +++ b/src/sentry/api/endpoints/team_release_count.py @@ -47,8 +47,8 @@ def get(self, request: Request, team) -> Response: ) agg_project_counts = {} - project_avgs = defaultdict(int) - this_week_totals = defaultdict(int) + project_avgs: dict[int, float] = defaultdict(int) + this_week_totals: dict[int, int] = defaultdict(int) this_week_start = now() - timedelta(days=7) for row in per_project_daily_release_counts: project_avgs[row["projects"]] += row["count"] @@ -56,8 +56,8 @@ def get(self, request: Request, team) -> Response: if row["bucket"] >= this_week_start: this_week_totals[row["projects"]] += row["count"] - for row in project_avgs: - project_avgs[row] = (project_avgs[row] / (end - start).days) * 7 + for project_id in project_avgs: + project_avgs[project_id] = (project_avgs[project_id] / (end - start).days) * 7 current_day = start.date() end_date = end.date() diff --git a/src/sentry/api/endpoints/user_organizationintegrations.py b/src/sentry/api/endpoints/user_organizationintegrations.py index fb772593004615..ced6ea411bd85d 100644 --- a/src/sentry/api/endpoints/user_organizationintegrations.py +++ b/src/sentry/api/endpoints/user_organizationintegrations.py @@ -8,6 +8,7 @@ from sentry.api.serializers import serialize from sentry.constants import ObjectStatus from sentry.integrations.models.organization_integration import OrganizationIntegration +from sentry.organizations.services.organization import organization_service from sentry.users.api.bases.user import UserEndpoint from sentry.users.services.user.service import user_service @@ -33,8 +34,15 @@ def get(self, request: Request, user) -> Response: if request.user.id is not None else () ) + organization_ids = [] + for o in organizations: + org_context = organization_service.get_organization_by_id( + id=o.id, user_id=request.user.id + ) + if org_context and org_context.member and "org:read" in org_context.member.scopes: + organization_ids.append(o.id) queryset = OrganizationIntegration.objects.filter( - organization_id__in=[o.id for o in organizations], + organization_id__in=organization_ids, status=ObjectStatus.ACTIVE, integration__status=ObjectStatus.ACTIVE, ) diff --git a/src/sentry/api/event_search.py b/src/sentry/api/event_search.py index dfaec71ec874c0..cc76cff4940dca 100644 --- a/src/sentry/api/event_search.py +++ b/src/sentry/api/event_search.py @@ -145,7 +145,6 @@ raw_aggregate_param = ~r"[^()\t\n, \"]+" quoted_aggregate_param = '"' ('\\"' / ~r'[^\t\n\"]')* '"' search_key = explicit_number_tag_key / key / quoted_key -search_type = "number" / "string" text_key = explicit_tag_key / explicit_string_tag_key / search_key value = ~r"[^()\t\n ]*" quoted_value = '"' ('\\"' / ~r'[^"]')* '"' @@ -1071,9 +1070,6 @@ def visit_explicit_string_tag_key(self, node, children): def visit_explicit_number_tag_key(self, node, children): return SearchKey(f"tags[{children[2].name},number]") - def visit_search_type(self, node, children): - return node.text - def visit_aggregate_key(self, node, children): children = remove_optional_nodes(children) children = remove_space(children) diff --git a/src/sentry/api/exceptions.py b/src/sentry/api/exceptions.py index 2e5689a148aacd..babd399f8d9655 100644 --- a/src/sentry/api/exceptions.py +++ b/src/sentry/api/exceptions.py @@ -118,6 +118,15 @@ def __init__(self, user): super().__init__(username=user.username) +class PrimaryEmailVerificationRequired(SentryAPIException): + status_code = status.HTTP_401_UNAUTHORIZED + code = "primary-email-verification-required" + message = "Primary email verification required." + + def __init__(self, user): + super().__init__(username=user.username) + + class TwoFactorRequired(SentryAPIException): status_code = status.HTTP_401_UNAUTHORIZED code = "2fa-required" diff --git a/src/sentry/api/helpers/group_index/delete.py b/src/sentry/api/helpers/group_index/delete.py index 0aa10caf082221..bdfd3ae683d361 100644 --- a/src/sentry/api/helpers/group_index/delete.py +++ b/src/sentry/api/helpers/group_index/delete.py @@ -124,7 +124,7 @@ def delete_groups( request: Request, projects: Sequence[Project], organization_id: int, - search_fn: SearchFunction, + search_fn: SearchFunction | None = None, ) -> Response: """ `search_fn` refers to the `search.query` method with the appropriate @@ -139,7 +139,7 @@ def delete_groups( id__in=set(group_ids), ).exclude(status__in=[GroupStatus.PENDING_DELETION, GroupStatus.DELETION_IN_PROGRESS]) ) - else: + elif search_fn: try: cursor_result, _ = search_fn( { diff --git a/src/sentry/api/helpers/group_index/update.py b/src/sentry/api/helpers/group_index/update.py index a95ebdfdbf8149..180c8ff71ebfbc 100644 --- a/src/sentry/api/helpers/group_index/update.py +++ b/src/sentry/api/helpers/group_index/update.py @@ -165,10 +165,7 @@ def get_current_release_version_of_group(group: Group, follows_semver: bool = Fa def update_groups( request: Request, - group_ids: Sequence[int | str] | None, - projects: Sequence[Project], - organization_id: int, - search_fn: SearchFunction | None = None, + groups: Sequence[Group], user: RpcUser | User | AnonymousUser | None = None, data: Mapping[str, Any] | None = None, ) -> Response: @@ -178,17 +175,15 @@ def update_groups( acting_user = user if user and user.is_authenticated else None data = data or request.data - try: - group_ids, group_list = get_group_ids_and_group_list( - organization_id, projects, group_ids, search_fn - ) - except ValidationError: - logger.exception("Error getting group ids and group list") # Track the error in Sentry - return Response( - {"detail": "Invalid query. Error getting group ids and group list"}, status=400 - ) + # so we won't have to requery for each group + project_lookup = {g.project_id: g.project for g in groups} + projects = list(project_lookup.values()) + + # Assert all projects belong to the same organization + if len({p.organization_id for p in projects}) > 1: + return Response({"detail": "All groups must belong to same organization."}, status=400) - if not group_ids or not group_list: + if not groups: return Response({"detail": "No groups found"}, status=204) serializer = validate_request(request, projects, data) @@ -201,17 +196,9 @@ def update_groups( acting_user = user if user.is_authenticated else None - # so we won't have to requery for each group - project_lookup = {g.project_id: g.project for g in group_list} - group_project_ids = {g.project_id for g in group_list} - # filter projects down to only those that have groups in the search results - projects = [p for p in projects if p.id in group_project_ids] - - queryset = Group.objects.filter(id__in=group_ids) - discard = result.get("discard") if discard: - return handle_discard(request, list(queryset), projects, acting_user) + return handle_discard(request, groups, projects, acting_user) status_details = result.pop("statusDetails", result) status = result.get("status") @@ -219,7 +206,7 @@ def update_groups( if "priority" in result: handle_priority( priority=result["priority"], - group_list=group_list, + group_list=groups, acting_user=acting_user, project_lookup=project_lookup, ) @@ -228,7 +215,7 @@ def update_groups( result, res_type = handle_resolve_in_release( status, status_details, - group_list, + groups, projects, project_lookup, acting_user, @@ -240,7 +227,7 @@ def update_groups( elif status: result = handle_other_status_updates( result, - group_list, + groups, projects, project_lookup, status_details, @@ -250,7 +237,7 @@ def update_groups( return prepare_response( result, - group_list, + groups, project_lookup, projects, acting_user, @@ -260,6 +247,40 @@ def update_groups( ) +def update_groups_with_search_fn( + request: Request, + group_ids: Sequence[int | str] | None, + projects: Sequence[Project], + organization_id: int, + search_fn: SearchFunction, +) -> Response: + group_list = [] + if group_ids: + group_list = get_group_list(organization_id, projects, group_ids) + + if not group_list: + try: + # It can raise ValidationError + cursor_result, _ = search_fn( + { + "limit": BULK_MUTATION_LIMIT, + "paginator_options": {"max_limit": BULK_MUTATION_LIMIT}, + } + ) + except ValidationError: + logger.exception("Error getting group ids and group list") # Track the error in Sentry + return Response( + {"detail": "Invalid query. Error getting group ids and group list"}, status=400 + ) + + group_list = list(cursor_result) + + if not group_list: + return Response({"detail": "No groups found"}, status=204) + + return update_groups(request, group_list) + + def validate_request( request: Request, projects: Sequence[Project], @@ -284,56 +305,36 @@ def validate_request( return serializer -def get_group_ids_and_group_list( +def get_group_list( organization_id: int, projects: Sequence[Project], - group_ids: Sequence[int | str] | None, - search_fn: SearchFunction | None, -) -> tuple[list[int | str], list[Group]]: + group_ids: Sequence[int | str], +) -> list[Group]: """ - Gets group IDs and group list based on provided filters. + Gets group list based on provided filters. Args: organization_id: ID of the organization projects: Sequence of projects to filter groups by - group_ids: Optional sequence of specific group IDs to fetch - search_fn: Optional search function to find groups if no IDs provided + group_ids: Sequence of specific group IDs to fetch - Returns: - Tuple of: - - List of group IDs that were found - - List of Group objects that were found - - Notes: - - If group_ids provided, filters to only valid groups in the org/projects - - If no group_ids but search_fn provided, uses search to find groups - - Limited to BULK_MUTATION_LIMIT results when using search + Returns: List of Group objects filtered to only valid groups in the org/projects """ - _group_ids: list[int | str] = [] - _group_list: list[Group] = [] - - if group_ids: - _group_list = list( + groups = [] + # Convert all group IDs to integers and filter out any non-integer values + group_ids_int = [int(gid) for gid in group_ids if str(gid).isdigit()] + if group_ids_int: + return list( Group.objects.filter( - project__organization_id=organization_id, project__in=projects, id__in=group_ids + project__organization_id=organization_id, project__in=projects, id__in=group_ids_int ) ) - # filter down group ids to only valid matches - _group_ids = [g.id for g in _group_list] - - if search_fn and not _group_ids: - # It can raise ValidationError - cursor_result, _ = search_fn( - { - "limit": BULK_MUTATION_LIMIT, - "paginator_options": {"max_limit": BULK_MUTATION_LIMIT}, - } - ) - - _group_list = list(cursor_result) - _group_ids = [g.id for g in _group_list] + else: + for group_id in group_ids: + if isinstance(group_id, str): + groups.append(Group.objects.by_qualified_short_id(organization_id, group_id)) - return _group_ids, _group_list + return groups def handle_resolve_in_release( @@ -459,6 +460,10 @@ def handle_resolve_in_release( except IndexError: release = None for group in group_list: + # If the group is already resolved, we don't need to do anything + if group.status == GroupStatus.RESOLVED: + continue + with transaction.atomic(router.db_for_write(Group)): process_group_resolution( group, @@ -706,9 +711,6 @@ def handle_other_status_updates( new_substatus = infer_substatus(new_status, new_substatus, status_details, group_list) with transaction.atomic(router.db_for_write(Group)): - # TODO(gilbert): update() doesn't call pre_save and bypasses any substatus defaulting we have there - # we should centralize the logic for validating and defaulting substatus values - # and refactor pre_save and the above new_substatus assignment to account for this status_updated = queryset.exclude(status=new_status).update( status=new_status, substatus=new_substatus ) diff --git a/src/sentry/api/helpers/ios_models.py b/src/sentry/api/helpers/ios_models.py new file mode 100644 index 00000000000000..ab296dbbc0bd1b --- /dev/null +++ b/src/sentry/api/helpers/ios_models.py @@ -0,0 +1,290 @@ +IPHONE4 = "iPhone 4" +IPHONE5 = "iPhone 5" +IPHONE5C = "iPhone 5c" +IPHONE5S = "iPhone 5s" +IPHONE7 = "iPhone 7" +IPHONE7PLUS = "iPhone 7 Plus" +IPHONE8 = "iPhone 8" +IPHONE8PLUS = "iPhone 8 Plus" +IPHONEX = "iPhone X" +IPHONEXSMAX = "iPhone XS Max" + +IPAD2 = "iPad 2" +IPADGEN3 = "iPad (3rd gen)" +IPADGEN4 = "iPad (4th gen)" +IPADGEN5 = "iPad (5th gen)" +IPADGEN6 = "iPad (6th gen)" +IPADGEN7 = "iPad (7th gen)" +IPADGEN8 = "iPad (8th gen)" +IPADGEN9 = "iPad (9th gen)" +IPADGEN10 = "iPad (10th gen)" + +IPADAIRGEN1 = "iPad Air (1st gen)" +IPADAIR2 = "iPad Air 2" +IPADAIRGEN3 = "iPad Air (3rd gen)" +IPADAIRGEN4 = "iPad Air (4th gen)" +IPADAIRGEN5 = "iPad Air (5th gen)" +IPADAIRGEN6 = "iPad Air (6th gen)" +IPADAIRGEN7 = "iPad Air (7th gen)" + +IPADPRO9GEN1 = "iPad Pro (9.7-inch)" +IPADPRO10 = "iPad Pro (10.5-inch)" +IPADPRO11GEN1 = "iPad Pro (11-inch, 1st gen)" +IPADPRO11GEN2 = "iPad Pro (11-inch, 2nd gen)" +IPADPRO11GEN3 = "iPad Pro (11-inch, 3rd gen)" +IPADPRO11GEN4 = "iPad Pro (11-inch, 4th gen)" +IPADPRO11GEN5 = "iPad Pro (11 inch, 5th gen)" +IPADPRO12GEN1 = "iPad Pro (12.9-inch, 1st gen)" +IPADPRO12GEN2 = "iPad Pro (12.9-inch, 2nd gen)" +IPADPRO12GEN3 = "iPad Pro (12.9-inch, 3rd gen)" +IPADPRO12GEN4 = "iPad Pro (12.9-inch, 4th gen)" +IPADPRO12GEN5 = "iPad Pro (12.9-inch, 5th gen)" +IPADPRO12GEN6 = "iPad Pro (12.9-inch, 6th gen)" +IPADPRO12GEN7 = "iPad Pro (12.9-inch, 7th gen)" + +IPADMINIGEN1 = "iPad mini (1st gen)" +IPADMINI2 = "iPad mini 2" +IPADMINI3 = "iPad mini 3" +IPADMINI4 = "iPad mini 4" +IPADMINIGEN5 = "iPad mini (5th gen)" +IPADMINIGEN6 = "iPad mini (6th gen)" + +APPLEWATCHGEN1 = "Apple Watch (1st gen)" +APPLEWATCHSERIES1 = "Apple Watch Series 1" +APPLEWATCHSERIES2 = "Apple Watch Series 2" +APPLEWATCHSERIES3 = "Apple Watch Series 3" +APPLEWATCHSERIES4 = "Apple Watch Series 4" +APPLEWATCHSERIES5 = "Apple Watch Series 5" +APPLEWATCHSERIES6 = "Apple Watch Series 6" +APPLEWATCHSERIES7 = "Apple Watch Series 7" +APPLEWATCHSERIES8 = "Apple Watch Series 8" +APPLEWATCHSERIES9 = "Apple Watch Series 9" +APPLEWATCHSERIES10 = "Apple Watch Series 10" +APPLEWATCHSE1 = "Apple Watch SE (1st gen)" +APPLEWATCHSE2 = "Apple Watch SE (2nd gen)" + +APPLETVGEN1 = "Apple TV (1st gen)" +APPLETVGEN2 = "Apple TV (2nd gen)" +APPLETVGEN3 = "Apple TV (3rd gen)" + +# see https://theapplewiki.com/wiki/models +IOS_MODELS: dict[str, str] = { + # iPhone + "iPhone1,1": "iPhone (1st gen)", + "iPhone1,2": "iPhone 3G", + "iPhone2,1": "iPhone 3GS", + "iPhone3,1": IPHONE4, + "iPhone3,2": IPHONE4, + "iPhone3,3": IPHONE4, + "iPhone4,1": "iPhone 4S", + "iPhone5,1": IPHONE5, + "iPhone5,2": IPHONE5, + "iPhone5,3": IPHONE5C, + "iPhone5,4": IPHONE5C, + "iPhone6,1": IPHONE5S, + "iPhone6,2": IPHONE5S, + "iPhone7,2": "iPhone 6", + "iPhone7,1": "iPhone 6 Plus", + "iPhone8,1": "iPhone 6s", + "iPhone8,2": "iPhone 6s Plus", + "iPhone8,4": "iPhone SE (1st gen)", + "iPhone9,1": IPHONE7, + "iPhone9,3": IPHONE7, + "iPhone9,2": IPHONE7PLUS, + "iPhone9,4": IPHONE7PLUS, + "iPhone10,1": IPHONE8, + "iPhone10,4": IPHONE8, + "iPhone10,2": IPHONE8PLUS, + "iPhone10,5": IPHONE8PLUS, + "iPhone10,3": IPHONEX, + "iPhone10,6": IPHONEX, + "iPhone11,8": "iPhone XR", + "iPhone11,2": "iPhone XS", + "iPhone11,4": IPHONEXSMAX, + "iPhone11,6": IPHONEXSMAX, + "iPhone12,1": "iPhone 11", + "iPhone12,3": "iPhone 11 Pro", + "iPhone12,5": "iPhone 11 Pro Max", + "iPhone12,8": "iPhone SE (2nd gen)", + "iPhone13,1": "iPhone 12 mini", + "iPhone13,2": "iPhone 12", + "iPhone13,3": "iPhone 12 Pro", + "iPhone13,4": "iPhone 12 Pro Max", + "iPhone14,4": "iPhone 13 mini", + "iPhone14,5": "iPhone 13", + "iPhone14,2": "iPhone 13 Pro", + "iPhone14,3": "iPhone 13 Pro Max", + "iPhone14,6": "iPhone SE (3rd gen)", + "iPhone14,7": "iPhone 14", + "iPhone14,8": "iPhone 14 Plus", + "iPhone15,2": "iPhone 14 Pro", + "iPhone15,3": "iPhone 14 Pro Max", + "iPhone15,4": "iPhone 15", + "iPhone15,5": "iPhone 15 Plus", + "iPhone16,1": "iPhone 15 Pro", + "iPhone16,2": "iPhone 15 Pro Max", + "iPhone17,1": "iPhone 16 Pro", + "iPhone17,2": "iPhone 16 Pro Max", + "iPhone17,3": "iPhone 16", + "iPhone17,4": "iPhone 16 Plus", + # iPod Touch + "iPod1,1": "iPod touch (1st gen)", + "iPod2,1": "iPod touch (2nd gen)", + "iPod3,1": "iPod touch (3rd gen)", + "iPod4,1": "iPod touch (4th gen)", + "iPod5,1": "iPod touch (5th gen)", + "iPod7,1": "iPod touch (6th gen)", + "iPod9,1": "iPod touch (7th gen)", + # iPad + "iPad1,1": "iPad (1st gen)", + "iPad2,1": IPAD2, + "iPad2,2": IPAD2, + "iPad2,3": IPAD2, + "iPad2,4": IPAD2, + "iPad3,1": IPADGEN3, + "iPad3,2": IPADGEN3, + "iPad3,3": IPADGEN3, + "iPad3,4": IPADGEN4, + "iPad3,5": IPADGEN4, + "iPad3,6": IPADGEN4, + "iPad6,11": IPADGEN5, + "iPad6,12": IPADGEN5, + "iPad7,5": IPADGEN6, + "iPad7,6": IPADGEN6, + "iPad7,11": IPADGEN7, + "iPad7,12": IPADGEN7, + "iPad11,6": IPADGEN8, + "iPad11,7": IPADGEN8, + "iPad12,1": IPADGEN9, + "iPad12,2": IPADGEN9, + # iPad Air + "iPad4,1": IPADAIRGEN1, + "iPad4,2": IPADAIRGEN1, + "iPad4,3": IPADAIRGEN1, + "iPad5,3": IPADAIR2, + "iPad5,4": IPADAIR2, + "iPad11,3": IPADAIRGEN3, + "iPad11,4": IPADAIRGEN3, + "iPad13,1": IPADAIRGEN4, + "iPad13,2": IPADAIRGEN4, + "iPad13,16": IPADAIRGEN5, + "iPad13,17": IPADAIRGEN5, + "iPad14,8": IPADAIRGEN6, + "iPad14,9": IPADAIRGEN6, + "iPad14,10": IPADAIRGEN7, + "iPad14,11": IPADAIRGEN7, + # iPad Pro + "iPad6,7": IPADPRO12GEN1, + "iPad6,3": IPADPRO9GEN1, + "iPad6,4": IPADPRO9GEN1, + "iPad6,8": IPADPRO12GEN1, + "iPad7,1": IPADPRO12GEN2, + "iPad7,2": IPADPRO12GEN2, + "iPad7,3": IPADPRO10, + "iPad7,4": IPADPRO10, + "iPad8,1": IPADPRO11GEN1, + "iPad8,2": IPADPRO11GEN1, + "iPad8,3": IPADPRO11GEN1, + "iPad8,4": IPADPRO11GEN1, + "iPad8,5": IPADPRO12GEN3, + "iPad8,6": IPADPRO12GEN3, + "iPad8,7": IPADPRO12GEN3, + "iPad8,8": IPADPRO12GEN3, + "iPad8,9": IPADPRO11GEN2, + "iPad8,10": IPADPRO11GEN2, + "iPad8,11": IPADPRO12GEN4, + "iPad8,12": IPADPRO12GEN4, + "iPad13,4": IPADPRO11GEN3, + "iPad13,5": IPADPRO11GEN3, + "iPad13,6": IPADPRO11GEN3, + "iPad13,7": IPADPRO11GEN3, + "iPad13,8": IPADPRO12GEN5, + "iPad13,9": IPADPRO12GEN5, + "iPad13,10": IPADPRO12GEN5, + "iPad13,11": IPADPRO12GEN5, + "iPad14,3": IPADPRO11GEN4, + "iPad14,4": IPADPRO11GEN4, + "iPad14,5": IPADPRO12GEN6, + "iPad14,6": IPADPRO12GEN6, + "iPad16,3": IPADPRO11GEN5, + "iPad16,4": IPADPRO11GEN5, + "iPad16,5": IPADPRO12GEN7, + "iPad16,6": IPADPRO12GEN7, + # iPad Mini + "iPad2,5": IPADMINIGEN1, + "iPad2,6": IPADMINIGEN1, + "iPad2,7": IPADMINIGEN1, + "iPad4,4": IPADMINI2, + "iPad4,5": IPADMINI2, + "iPad4,6": IPADMINI2, + "iPad4,7": IPADMINI3, + "iPad4,8": IPADMINI3, + "iPad4,9": IPADMINI3, + "iPad5,1": IPADMINI4, + "iPad5,2": IPADMINI4, + "iPad11,1": IPADMINIGEN5, + "iPad11,2": IPADMINIGEN5, + "iPad13,18": IPADGEN10, + "iPad13,19": IPADGEN10, + "iPad14,1": IPADMINIGEN6, + "iPad14,2": IPADMINIGEN6, + # Apple Watch + "Watch1,1": APPLEWATCHGEN1, + "Watch1,2": APPLEWATCHGEN1, + "Watch2,6": APPLEWATCHSERIES1, + "Watch2,7": APPLEWATCHSERIES1, + "Watch2,3": APPLEWATCHSERIES2, + "Watch2,4": APPLEWATCHSERIES2, + "Watch3,1": APPLEWATCHSERIES3, + "Watch3,2": APPLEWATCHSERIES3, + "Watch3,3": APPLEWATCHSERIES3, + "Watch3,4": APPLEWATCHSERIES3, + "Watch4,1": APPLEWATCHSERIES4, + "Watch4,2": APPLEWATCHSERIES4, + "Watch4,3": APPLEWATCHSERIES4, + "Watch4,4": APPLEWATCHSERIES4, + "Watch5,1": APPLEWATCHSERIES5, + "Watch5,2": APPLEWATCHSERIES5, + "Watch5,3": APPLEWATCHSERIES5, + "Watch5,4": APPLEWATCHSERIES5, + "Watch6,3": APPLEWATCHSERIES6, + "Watch6,4": APPLEWATCHSERIES6, + "Watch6,6": APPLEWATCHSERIES7, + "Watch6,7": APPLEWATCHSERIES7, + "Watch6,8": APPLEWATCHSERIES7, + "Watch6,9": APPLEWATCHSERIES7, + "Watch6,14": APPLEWATCHSERIES8, + "Watch6,15": APPLEWATCHSERIES8, + "Watch6,16": APPLEWATCHSERIES8, + "Watch6,17": APPLEWATCHSERIES8, + "Watch7,1": APPLEWATCHSERIES9, + "Watch7,2": APPLEWATCHSERIES9, + "Watch7,3": APPLEWATCHSERIES9, + "Watch7,4": APPLEWATCHSERIES9, + "Watch7,8": APPLEWATCHSERIES10, + "Watch7,9": APPLEWATCHSERIES10, + "Watch7,10": APPLEWATCHSERIES10, + "Watch7,11": APPLEWATCHSERIES10, + # Apple Watch SE + "Watch5,9": APPLEWATCHSE1, + "Watch5,10": APPLEWATCHSE1, + "Watch5,11": APPLEWATCHSE1, + "Watch5,12": APPLEWATCHSE1, + "Watch6,10": APPLEWATCHSE2, + "Watch6,11": APPLEWATCHSE2, + "Watch6,12": APPLEWATCHSE2, + "Watch6,13": APPLEWATCHSE2, + # Apple Watch Ultra + "Watch6,18": "Apple Watch Ultra (1st gen)", + "Watch7,5": "Apple Watch Ultra (2nd gen)", + # Apple TV + "AppleTV1,1": "Apple TV (1st gen)", + "AppleTV2,1": "Apple TV (2nd gen)", + "AppleTV3,1": APPLETVGEN3, + "AppleTV3,2": APPLETVGEN3, + "AppleTV5,3": "Apple TV (4th gen)", + "AppleTV6,2": "Apple TV 4K", + "AppleTV11,1": "Apple TV 4K (2nd gen)", + "i386": "iOS Simulator (i386)", + "x86_64": "iOS Simulator (x86_64)", +} diff --git a/src/sentry/api/helpers/mobile.py b/src/sentry/api/helpers/mobile.py index b00a5a5f3b3c52..c991c3d2526670 100644 --- a/src/sentry/api/helpers/mobile.py +++ b/src/sentry/api/helpers/mobile.py @@ -1,7 +1,7 @@ from __future__ import annotations from sentry.api.helpers.android_models import ANDROID_MODELS -from sentry.profiles.device import IOS_MODELS +from sentry.api.helpers.ios_models import IOS_MODELS def get_readable_device_name(device: str) -> str | None: diff --git a/src/sentry/api/serializers/models/environment.py b/src/sentry/api/serializers/models/environment.py index 5f4413f45b9013..4f69d7468d4d08 100644 --- a/src/sentry/api/serializers/models/environment.py +++ b/src/sentry/api/serializers/models/environment.py @@ -1,15 +1,7 @@ -from collections import namedtuple -from datetime import timedelta from typing import TypedDict -from django.utils import timezone - -from sentry import tsdb from sentry.api.serializers import Serializer, register from sentry.models.environment import Environment, EnvironmentProject -from sentry.tsdb.base import TSDBModel - -StatsPeriod = namedtuple("StatsPeriod", ("segments", "interval")) class EnvironmentSerializerResponse(TypedDict): @@ -39,49 +31,3 @@ def serialize( "name": obj.environment.name, "isHidden": obj.is_hidden is True, } - - -class GroupEnvironmentWithStatsSerializer(EnvironmentSerializer): - STATS_PERIODS = { - "24h": StatsPeriod(24, timedelta(hours=1)), - "30d": StatsPeriod(30, timedelta(hours=24)), - } - - def __init__(self, group, since=None, until=None): - self.group = group - self.since = since - self.until = until - - def get_attrs(self, item_list, user, **kwargs): - attrs = {item: {"stats": {}} for item in item_list} - items = {self.group.id: []} - for item in item_list: - items[self.group.id].append(item.id) - - for key, (segments, interval) in self.STATS_PERIODS.items(): - until = self.until or timezone.now() - since = self.since or until - (segments * interval) - - try: - stats = tsdb.get_frequency_series( - model=TSDBModel.frequent_environments_by_group, - items=items, - start=since, - end=until, - rollup=int(interval.total_seconds()), - ) - except NotImplementedError: - # TODO(dcramer): probably should log this, but not worth - # erring out - stats = {} - - for item in item_list: - attrs[item]["stats"][key] = [ - (k, v[item.id]) for k, v in stats.get(self.group.id, {}) - ] - return attrs - - def serialize(self, obj, attrs, user, **kwargs): - result = super().serialize(obj, attrs, user) - result["stats"] = attrs["stats"] - return result diff --git a/src/sentry/api/serializers/models/event.py b/src/sentry/api/serializers/models/event.py index 1d2255fa2e3941..2cbc423f51dc8e 100644 --- a/src/sentry/api/serializers/models/event.py +++ b/src/sentry/api/serializers/models/event.py @@ -172,7 +172,6 @@ class TransactionEventFields(TypedDict, total=False): endTimestamp: datetime measurements: Any breakdowns: Any - _metrics_summary: Any class EventSerializerResponse( @@ -377,10 +376,6 @@ def __serialize_transaction_attrs(self, attrs, obj) -> TransactionEventFields: "breakdowns": obj.data.get("breakdowns"), } - # The _ reflects the temporary nature of this field. - if (transaction_metrics_summary := obj.data.get("_metrics_summary")) is not None: - transaction_attrs["_metrics_summary"] = transaction_metrics_summary - return transaction_attrs def __serialize_error_attrs(self, attrs, obj) -> ErrorEventFields: diff --git a/src/sentry/api/serializers/models/group.py b/src/sentry/api/serializers/models/group.py index b229c161ccc933..4a78c94cb8f270 100644 --- a/src/sentry/api/serializers/models/group.py +++ b/src/sentry/api/serializers/models/group.py @@ -317,7 +317,7 @@ def get_attrs( return result def serialize( - self, obj: Group, attrs: MutableMapping[str, Any], user: Any, **kwargs: Any + self, obj: Group, attrs: Mapping[str, Any], user: Any, **kwargs: Any ) -> BaseGroupSerializerResponse: status_details, status_label = self._get_status(attrs, obj) permalink = self._get_permalink(attrs, obj) @@ -390,7 +390,7 @@ def _collapse(self, key) -> bool: return False return key in self.collapse - def _get_status(self, attrs: MutableMapping[str, Any], obj: Group): + def _get_status(self, attrs: Mapping[str, Any], obj: Group): status = obj.status status_details = {} if attrs["ignore_until"]: @@ -850,7 +850,7 @@ def __seen_stats_impl( class SharedGroupSerializer(GroupSerializer): def serialize( - self, obj: Group, attrs: MutableMapping[str, Any], user: Any, **kwargs: Any + self, obj: Group, attrs: Mapping[str, Any], user: Any, **kwargs: Any ) -> BaseGroupSerializerResponse: result = super().serialize(obj, attrs, user) diff --git a/src/sentry/api/serializers/models/organization_access_request.py b/src/sentry/api/serializers/models/organization_access_request.py index 0e5587d295465c..f96ad98652a2a5 100644 --- a/src/sentry/api/serializers/models/organization_access_request.py +++ b/src/sentry/api/serializers/models/organization_access_request.py @@ -5,17 +5,41 @@ @register(OrganizationAccessRequest) class OrganizationAccessRequestSerializer(Serializer): - def serialize(self, obj, attrs, user, **kwargs): - serialized_user = None - if obj.requester_id: - serialized_users = user_service.serialize_many(filter=dict(user_ids=[obj.requester_id])) - if serialized_users: - serialized_user = serialized_users[0] + def get_attrs(self, item_list, user, **kwargs): + + serialized_requesters = user_service.serialize_many( + filter=dict(user_ids=[item.requester_id for item in item_list if item.requester_id]) + ) + + serialized_requesters_by_id = { + int(requester["id"]): requester for requester in serialized_requesters + } + + serialized_members = serialize( + [item.member for item in item_list], + user, + ) + + serialized_members_by_id = {int(member["id"]): member for member in serialized_members} - d = { + serialized_teams = serialize([item.team for item in item_list], user) + + serialized_teams_by_id = {int(team["id"]): team for team in serialized_teams} + + return { + item: { + "requester": serialized_requesters_by_id.get(item.requester_id), + "member": serialized_members_by_id.get(item.member_id), + "team": serialized_teams_by_id.get(item.team_id), + } + for item in item_list + } + + def serialize(self, obj, attrs, user, **kwargs): + serialized_access_request = { "id": str(obj.id), - "member": serialize(obj.member), - "team": serialize(obj.team), - "requester": serialized_user, + "member": attrs["member"], + "team": attrs["team"], + "requester": attrs["requester"], } - return d + return serialized_access_request diff --git a/src/sentry/api/serializers/models/project.py b/src/sentry/api/serializers/models/project.py index 840d4af67c0dce..3b98730448b3af 100644 --- a/src/sentry/api/serializers/models/project.py +++ b/src/sentry/api/serializers/models/project.py @@ -80,7 +80,6 @@ "servicehooks", "similarity-embeddings", "similarity-embeddings-delete-by-hash", - "similarity-embeddings-backfill", } diff --git a/src/sentry/api/serializers/models/team.py b/src/sentry/api/serializers/models/team.py index f64f993e5c72cd..0aa1845775702e 100644 --- a/src/sentry/api/serializers/models/team.py +++ b/src/sentry/api/serializers/models/team.py @@ -2,10 +2,11 @@ import dataclasses from collections import defaultdict -from collections.abc import Mapping, MutableMapping, MutableSequence, Sequence +from collections.abc import Mapping, Sequence from datetime import datetime -from typing import TYPE_CHECKING, AbstractSet, Any, TypedDict +from typing import TYPE_CHECKING, Any, TypedDict +from django.contrib.auth.models import AnonymousUser from django.db.models import Count from sentry import roles @@ -39,9 +40,9 @@ def _get_team_memberships( team_list: Sequence[Team], - user: User, + user: User | AnonymousUser, optimization: SingularRpcAccessOrgOptimization | None = None, -) -> Mapping[int, str | None]: +) -> dict[int, str | None]: """Get memberships the user has in the provided team list""" if not user.is_authenticated: return {} @@ -62,7 +63,7 @@ def _get_team_memberships( } -def get_member_totals(team_list: Sequence[Team], user: User) -> Mapping[str, int]: +def get_member_totals(team_list: Sequence[Team], user: User | AnonymousUser) -> dict[int, int]: """Get the total number of members in each team""" if not user.is_authenticated: return {} @@ -79,8 +80,10 @@ def get_member_totals(team_list: Sequence[Team], user: User) -> Mapping[str, int def get_org_roles( - org_ids: set[int], user: User, optimization: SingularRpcAccessOrgOptimization | None = None -) -> Mapping[int, str]: + org_ids: set[int], + user: User | AnonymousUser, + optimization: SingularRpcAccessOrgOptimization | None = None, +) -> dict[int, str]: """ Get the roles the user has in each org """ @@ -103,7 +106,7 @@ def get_org_roles( } -def get_access_requests(item_list: Sequence[Team], user: User) -> AbstractSet[Team]: +def get_access_requests(item_list: Sequence[Team], user: User | AnonymousUser) -> frozenset[int]: if user.is_authenticated: return frozenset( OrganizationAccessRequest.objects.filter( @@ -123,7 +126,7 @@ class BaseTeamSerializerResponse(TypedDict): id: str slug: str name: str - dateCreated: datetime + dateCreated: datetime | None isMember: bool teamRole: str | None flags: dict[str, Any] @@ -179,9 +182,11 @@ def _collapse(self, key: str) -> bool: def get_attrs( self, item_list: Sequence[Team], user: User, **kwargs: Any - ) -> MutableMapping[Team, MutableMapping[str, Any]]: + ) -> dict[Team, dict[str, Any]]: + from sentry.api.serializers.models.project import ProjectSerializer + request = env.request - org_ids: set[int] = {t.organization_id for t in item_list} + org_ids = {t.organization_id for t in item_list} assert len(org_ids) == 1, "Cross organization query for teams" @@ -195,13 +200,14 @@ def get_attrs( access_requests = get_access_requests(item_list, user) is_superuser = request and is_active_superuser(request) and request.user == user - result: MutableMapping[Team, MutableMapping[str, Any]] = {} + result: dict[Team, dict[str, Any]] = {} organization = Organization.objects.get_from_cache(id=list(org_ids)[0]) for team in item_list: is_member = team.id in team_memberships org_role = roles_by_org.get(team.organization_id) - team_role_id, team_role_scopes = team_memberships.get(team.id), set() + team_role_id = team_memberships.get(team.id) + team_role_scopes: frozenset[str] = frozenset() has_access = bool( is_member @@ -240,7 +246,11 @@ def get_attrs( projects = [pt.project for pt in project_teams] projects_by_id = { - project.id: data for project, data in zip(projects, serialize(projects, user)) + project.id: data + for project, data in zip( + projects, + serialize(projects, user, ProjectSerializer(collapse=["unusedFeatures"])), + ) } project_map = defaultdict(list) @@ -268,7 +278,7 @@ def get_attrs( def serialize( self, obj: Team, attrs: Mapping[str, Any], user: Any, **kwargs: Any ) -> BaseTeamSerializerResponse: - result: BaseTeamSerializerResponse = { + return { "id": str(obj.id), "slug": obj.slug, "name": obj.name, @@ -284,8 +294,6 @@ def serialize( "avatar": {"avatarType": "letter_avatar", "avatarUuid": None}, } - return result - # See TeamSerializerResponse for explanation as to why this is needed class TeamSerializer(BaseTeamSerializer): @@ -294,17 +302,19 @@ def serialize( ) -> TeamSerializerResponse: result = super().serialize(obj, attrs, user, **kwargs) + opt: _TeamSerializerResponseOptional = {} + # Expandable attributes. if self._expand("externalTeams"): - result["externalTeams"] = attrs["externalTeams"] + opt["externalTeams"] = attrs["externalTeams"] if self._expand("organization"): - result["organization"] = serialize(obj.organization, user) + opt["organization"] = serialize(obj.organization, user) if self._expand("projects"): - result["projects"] = attrs["projects"] + opt["projects"] = attrs["projects"] - return result + return {**result, **opt} class TeamWithProjectsSerializer(TeamSerializer): @@ -316,14 +326,14 @@ def __init__(self) -> None: def get_scim_teams_members( team_list: Sequence[Team], -) -> MutableMapping[Team, MutableSequence[MutableMapping[str, Any]]]: +) -> dict[Team, list[dict[str, Any]]]: members = RangeQuerySetWrapper( OrganizationMember.objects.filter(teams__in=team_list) .prefetch_related("teams") .distinct("id"), limit=10000, ) - member_map: MutableMapping[Team, MutableSequence[MutableMapping[str, Any]]] = defaultdict(list) + member_map: dict[Team, list[dict[str, Any]]] = defaultdict(list) for member in members: for team in member.teams.all(): member_map[team].append({"value": str(member.id), "display": member.get_email()}) @@ -382,16 +392,16 @@ def __init__( def get_attrs( self, item_list: Sequence[Team], user: Any, **kwargs: Any - ) -> Mapping[Team, MutableMapping[str, Any]]: + ) -> dict[Team, dict[str, Any]]: - result: MutableMapping[int, MutableMapping[str, Any]] = { + result: dict[int, dict[str, Any]] = { team.id: ({"members": []} if "members" in self.expand else {}) for team in item_list } - teams_by_id: Mapping[int, Team] = {t.id: t for t in item_list} + teams_by_id = {t.id: t for t in item_list} if teams_by_id and "members" in self.expand: - team_ids: list[int] = [t.id for t in item_list] - team_memberships: list[TeamMembership] = get_team_memberships(team_ids=team_ids) + team_ids = [t.id for t in item_list] + team_memberships = get_team_memberships(team_ids=team_ids) for team_member in team_memberships: for team_id in team_member.team_ids: diff --git a/src/sentry/api/urls.py b/src/sentry/api/urls.py index ab5ea0cba6d58a..ab1953a40c79e0 100644 --- a/src/sentry/api/urls.py +++ b/src/sentry/api/urls.py @@ -178,7 +178,6 @@ GroupHashesEndpoint, GroupNotesDetailsEndpoint, GroupNotesEndpoint, - GroupParticipantsEndpoint, GroupSimilarIssuesEmbeddingsEndpoint, GroupSimilarIssuesEndpoint, GroupTombstoneDetailsEndpoint, @@ -313,6 +312,11 @@ from sentry.sentry_apps.api.endpoints.sentry_internal_app_tokens import ( SentryInternalAppTokensEndpoint, ) +from sentry.tempest.endpoints.tempest_credentials import TempestCredentialsEndpoint +from sentry.tempest.endpoints.tempest_credentials_details import TempestCredentialsDetailsEndpoint +from sentry.uptime.endpoints.organiation_uptime_alert_index import ( + OrganizationUptimeAlertIndexEndpoint, +) from sentry.uptime.endpoints.project_uptime_alert_details import ProjectUptimeAlertDetailsEndpoint from sentry.uptime.endpoints.project_uptime_alert_index import ProjectUptimeAlertIndexEndpoint from sentry.users.api.endpoints.authenticator_index import AuthenticatorIndexEndpoint @@ -792,11 +796,6 @@ def create_group_urls(name_prefix: str) -> list[URLPattern | URLResolver]: GroupFirstLastReleaseEndpoint.as_view(), name=f"{name_prefix}-group-first-last-release", ), - re_path( - r"^(?P[^\/]+)/participants/$", - GroupParticipantsEndpoint.as_view(), - name=f"{name_prefix}-group-participants", - ), re_path( r"^(?P[^\/]+)/autofix/$", GroupAutofixEndpoint.as_view(), @@ -2197,6 +2196,12 @@ def create_group_urls(name_prefix: str) -> list[URLPattern | URLResolver]: OrganizationForkEndpoint.as_view(), name="sentry-api-0-organization-fork", ), + # Uptime + re_path( + r"^(?P[^\/]+)/uptime/$", + OrganizationUptimeAlertIndexEndpoint.as_view(), + name="sentry-api-0-organization-uptime-alert-index", + ), ] PROJECT_URLS: list[URLPattern | URLResolver] = [ @@ -2787,6 +2792,17 @@ def create_group_urls(name_prefix: str) -> list[URLPattern | URLResolver]: ProjectUptimeAlertIndexEndpoint.as_view(), name="sentry-api-0-project-uptime-alert-index", ), + # Tempest + re_path( + r"^(?P[^\/]+)/(?P[^\/]+)/tempest-credentials/$", + TempestCredentialsEndpoint.as_view(), + name="sentry-api-0-project-tempest-credentials", + ), + re_path( + r"^(?P[^\/]+)/(?P[^\/]+)/tempest-credentials/(?P\d+)/$", + TempestCredentialsDetailsEndpoint.as_view(), + name="sentry-api-0-project-tempest-credentials-details", + ), *workflow_urls.urlpatterns, ] diff --git a/src/sentry/apidocs/examples/tags_examples.py b/src/sentry/apidocs/examples/tags_examples.py index 08325a83443976..de4cf843ea0dc5 100644 --- a/src/sentry/apidocs/examples/tags_examples.py +++ b/src/sentry/apidocs/examples/tags_examples.py @@ -7,17 +7,17 @@ "totalValues": 3, "topValues": [ { - "key": "chunkymonkey", - "name": "Chunky Monkey", - "value": "chunkymonkey", + "key": "strawberry", + "name": "Strawberry", + "value": "strawberry", "count": 2, "lastSeen": "2024-01-01T00:00:00Z", "firstSeen": "2024-01-01T00:00:00Z", }, { - "key": "halfbaked", - "name": "Half Baked", - "value": "halfbaked", + "key": "vanilla", + "name": "Vanilla", + "value": "vanilla", "count": 1, "lastSeen": "2024-01-01T00:00:00Z", "firstSeen": "2024-01-01T00:00:00Z", @@ -25,6 +25,41 @@ ], } +SIMPLE_TAG_VALUES = [ + { + "key": "strawberry", + "name": "Strawberry", + "value": "strawberry", + "count": 2, + "lastSeen": "2024-01-01T00:00:00Z", + "firstSeen": "2024-01-01T00:00:00Z", + }, + { + "key": "vanilla", + "name": "Vanilla", + "value": "vanilla", + "count": 1, + "lastSeen": "2024-01-01T00:00:00Z", + "firstSeen": "2024-01-01T00:00:00Z", + }, + { + "key": "chocolate", + "name": "Chocolate", + "value": "chocolate", + "count": 1, + "lastSeen": "2024-01-01T00:00:00Z", + "firstSeen": "2024-01-01T00:00:00Z", + }, + { + "key": "Neopolitan", + "name": "Neopolitan", + "value": "neopolitan", + "count": 1, + "lastSeen": "2024-01-01T00:00:00Z", + "firstSeen": "2024-01-01T00:00:00Z", + }, +] + class TagsExamples: GROUP_TAGKEY_DETAILS = OpenApiExample( @@ -33,3 +68,10 @@ class TagsExamples: response_only=True, status_codes=["200"], ) + + GROUP_TAGKEY_VALUES = OpenApiExample( + "Return all tag values for a specific tag", + value=SIMPLE_TAG_VALUES, + response_only=True, + status_codes=["200"], + ) diff --git a/src/sentry/apidocs/parameters.py b/src/sentry/apidocs/parameters.py index 863f74aa30d05e..2e77189235a0d9 100644 --- a/src/sentry/apidocs/parameters.py +++ b/src/sentry/apidocs/parameters.py @@ -284,6 +284,14 @@ class SCIMParams: class IssueParams: + KEY = OpenApiParameter( + name="key", + location=OpenApiParameter.PATH, + type=OpenApiTypes.STR, + description="The tag key to look the values up for.", + required=True, + ) + ISSUES_OR_GROUPS = OpenApiParameter( name="var", location="path", @@ -299,6 +307,15 @@ class IssueParams: description="The ID of the issue you'd like to query.", ) + SORT = OpenApiParameter( + name="sort", + location="query", + required=False, + type=str, + description="Sort order of the resulting tag values. Prefix with '-' for descending order. Default is '-id'.", + enum=["id", "date", "age", "count"], + ) + class IssueAlertParams: ISSUE_RULE_ID = OpenApiParameter( @@ -418,6 +435,13 @@ class UptimeParams: type=int, description="The ID of the uptime alert rule you'd like to query.", ) + OWNER = OpenApiParameter( + name="owner", + location="query", + required=False, + type=str, + description="The owner of the uptime alert, in the format `user:id` or `team:id`. May be specified multiple times.", + ) class EventParams: @@ -445,6 +469,41 @@ class EventParams: description="Index of the exception that should be used for source map resolution.", ) + EVENT_ID_EXTENDED = OpenApiParameter( + name="event_id", + type=OpenApiTypes.STR, + location=OpenApiParameter.PATH, + description="The ID of the event to retrieve, or 'latest', 'oldest', or 'recommended'.", + required=True, + enum=["latest", "oldest", "recommended"], + ) + + FULL_PAYLOAD = OpenApiParameter( + name="full", + type=OpenApiTypes.BOOL, + location=OpenApiParameter.QUERY, + description="Specify true to include the full event body, including the stacktrace, in the event payload.", + required=False, + default=False, + ) + + SAMPLE = OpenApiParameter( + name="sample", + type=OpenApiTypes.BOOL, + location=OpenApiParameter.QUERY, + description="Return events in pseudo-random order. This is deterministic so an identical query will always return the same events in the same order.", + required=False, + default=False, + ) + + QUERY = OpenApiParameter( + name="query", + location=OpenApiParameter.QUERY, + type=OpenApiTypes.STR, + description="An optional search query for filtering events.", + required=False, + ) + class ProjectParams: FILTER_ID = OpenApiParameter( diff --git a/src/sentry/auth/services/auth/model.py b/src/sentry/auth/services/auth/model.py index d0bbd928b60133..82254cc4bad5ad 100644 --- a/src/sentry/auth/services/auth/model.py +++ b/src/sentry/auth/services/auth/model.py @@ -22,7 +22,7 @@ class RpcApiKey(RpcModel): id: int = -1 organization_id: int = -1 - key: str = "" + key: str = Field(repr=False, default="") status: int = 0 allowed_origins: list[str] = Field(default_factory=list) label: str = "" @@ -35,8 +35,8 @@ class RpcApiToken(RpcModel): organization_id: int | None = None application_id: int | None = None application_is_active: bool = False - token: str = "" - hashed_token: str | None = None + token: str = Field(repr=False, default="") + hashed_token: str | None = Field(repr=False, default=None) expires_at: datetime.datetime | None = None allowed_origins: list[str] = Field(default_factory=list) scope_list: list[str] = Field(default_factory=list) diff --git a/src/sentry/auth/superuser.py b/src/sentry/auth/superuser.py index bdf0921bdcd823..fa48351cad52e6 100644 --- a/src/sentry/auth/superuser.py +++ b/src/sentry/auth/superuser.py @@ -155,12 +155,6 @@ class SuperuserAccessFormInvalidJson(SentryAPIException): message = "The request contains invalid json" -class EmptySuperuserAccessForm(SentryAPIException): - status_code = status.HTTP_400_BAD_REQUEST - code = "empty-superuser-access-form" - message = "The request contains an empty superuser access form data" - - class Superuser(ElevatedMode): allowed_ips = frozenset(ipaddress.ip_network(str(v), strict=False) for v in ALLOWED_IPS) org_id = SUPERUSER_ORG_ID @@ -456,13 +450,6 @@ def enable_and_log_superuser_access(): tags={"reason": SuperuserAccessFormInvalidJson.code}, ) raise SuperuserAccessFormInvalidJson() - except AttributeError: - metrics.incr( - "superuser.failure", - sample_rate=1.0, - tags={"reason": EmptySuperuserAccessForm.code}, - ) - raise EmptySuperuserAccessForm() su_access_info = SuperuserAccessSerializer(data=su_access_json) diff --git a/src/sentry/backup/comparators.py b/src/sentry/backup/comparators.py index 840c3d85a73e53..3d4cfc0eca942a 100644 --- a/src/sentry/backup/comparators.py +++ b/src/sentry/backup/comparators.py @@ -903,6 +903,9 @@ def get_default_comparators() -> dict[str, list[JSONScrubbingComparator]]: "workflow_engine.alertruletriggerdatacondition": [ DateUpdatedComparator("date_updated", "date_added") ], + "tempest.tempestcredentials": [ + DateUpdatedComparator("date_updated", "date_added"), + ], }, ) diff --git a/src/sentry/backup/dependencies.py b/src/sentry/backup/dependencies.py index 4a7184e1187993..e0cf4a543dd356 100644 --- a/src/sentry/backup/dependencies.py +++ b/src/sentry/backup/dependencies.py @@ -95,28 +95,23 @@ def __repr__(self) -> str: # # TODO(getsentry/team-ospo#190): We should find a better way to store this information than a magic # list in this file. We should probably make a field (or method?) on `BaseModel` instead. -@unique -class RelocationRootModels(Enum): - """ - Record the "root" models for a given `RelocationScope`. - """ - - Excluded: list[NormalizedModelName] = [] - User = [NormalizedModelName("sentry.user")] - Organization = [NormalizedModelName("sentry.organization")] - Config = [ - NormalizedModelName("sentry.controloption"), - NormalizedModelName("sentry.option"), - NormalizedModelName("sentry.relay"), - NormalizedModelName("sentry.relayusage"), - NormalizedModelName("sentry.userrole"), - ] +_ROOT_MODELS: tuple[NormalizedModelName, ...] = ( + # RelocationScope.User + NormalizedModelName("sentry.user"), + # RelocationScope.Organization + NormalizedModelName("sentry.organization"), + # RelocationScope.Config + NormalizedModelName("sentry.controloption"), + NormalizedModelName("sentry.option"), + NormalizedModelName("sentry.relay"), + NormalizedModelName("sentry.relayusage"), + NormalizedModelName("sentry.userrole"), + # RelocationScope.Global # TODO(getsentry/team-ospo#188): Split out extension scope root models from this list. - Global = [ - NormalizedModelName("sentry.apiapplication"), - NormalizedModelName("sentry.integration"), - NormalizedModelName("sentry.sentryapp"), - ] + NormalizedModelName("sentry.apiapplication"), + NormalizedModelName("sentry.integration"), + NormalizedModelName("sentry.sentryapp"), +) @unique @@ -433,6 +428,10 @@ def dependencies() -> dict[NormalizedModelName, ModelRelations]: if model._meta.app_label in {"sessions", "sites", "test", "getsentry"}: continue + # exclude proxy models since the backup test is already done on a parent if needed + if model._meta.proxy: + continue + foreign_keys: dict[str, ForeignField] = dict() uniques: set[frozenset[str]] = { frozenset(combo) for combo in model._meta.unique_together @@ -538,10 +537,7 @@ def dependencies() -> dict[NormalizedModelName, ModelRelations]: ) # Get a flat list of "root" models, then mark all of them as non-dangling. - relocation_root_models: list[NormalizedModelName] = [] - for root_models in RelocationRootModels: - relocation_root_models.extend(root_models.value) - for model_name in relocation_root_models: + for model_name in _ROOT_MODELS: model_dependencies_dict[model_name].dangling = False # TODO(getsentry/team-ospo#190): In practice, we can treat `AlertRule`'s dependency on diff --git a/src/sentry/conf/api_pagination_allowlist_do_not_modify.py b/src/sentry/conf/api_pagination_allowlist_do_not_modify.py index d3ad9bce77bc6a..ab3353d9b670b1 100644 --- a/src/sentry/conf/api_pagination_allowlist_do_not_modify.py +++ b/src/sentry/conf/api_pagination_allowlist_do_not_modify.py @@ -16,7 +16,6 @@ "GitlabIssueSearchEndpoint", "GroupEventsEndpoint", "GroupIntegrationsEndpoint", - "GroupParticipantsEndpoint", "GroupSimilarIssuesEmbeddingsEndpoint", "GroupStatsEndpoint", "GroupTagsEndpoint", diff --git a/src/sentry/conf/server.py b/src/sentry/conf/server.py index a0ea57ef0ad929..0d0ed2a0d40e50 100644 --- a/src/sentry/conf/server.py +++ b/src/sentry/conf/server.py @@ -21,12 +21,13 @@ SENTRY_API_PAGINATION_ALLOWLIST_DO_NOT_MODIFY, ) from sentry.conf.types.celery import SplitQueueSize, SplitQueueTaskRoute -from sentry.conf.types.kafka_definition import ConsumerDefinition +from sentry.conf.types.kafka_definition import ConsumerDefinition, Topic from sentry.conf.types.logging_config import LoggingConfig from sentry.conf.types.role_dict import RoleDict from sentry.conf.types.sdk_config import ServerSdkConfig from sentry.conf.types.sentry_config import SentryMode from sentry.conf.types.service_options import ServiceOptions +from sentry.conf.types.uptime import UptimeRegionConfig from sentry.utils import json # NOQA (used in getsentry config) from sentry.utils.celery import crontab_with_minute_jitter, make_split_task_queues from sentry.utils.types import Type, type_from_value @@ -405,6 +406,7 @@ def env( "sentry.flags", "sentry.monitors", "sentry.uptime", + "sentry.tempest", "sentry.replays", "sentry.release_health", "sentry.search", @@ -835,9 +837,6 @@ def SOCIAL_AUTH_DEFAULT_USERNAME() -> str: "sentry.integrations.tasks", ) -# tmp(michal): Default configuration for post_process* queues split -SENTRY_POST_PROCESS_QUEUE_SPLIT_ROUTER: dict[str, Callable[[], str]] = {} - # Enable split queue routing CELERY_ROUTES = ("sentry.queue.routers.SplitQueueTaskRouter",) @@ -854,7 +853,14 @@ def SOCIAL_AUTH_DEFAULT_USERNAME() -> str: "total": 3, "in_use": 3, }, - } + }, + "sentry.profiles.task.process_profile": { + "default_queue": "profiles.process", + "queues_config": { + "total": 3, + "in_use": 3, + }, + }, } CELERY_SPLIT_TASK_QUEUES_REGION = make_split_task_queues(CELERY_SPLIT_QUEUE_TASK_ROUTES_REGION) @@ -945,6 +951,7 @@ def SOCIAL_AUTH_DEFAULT_USERNAME() -> str: "dynamicsampling", routing_key="dynamicsampling", ), + Queue("tempest", routing_key="tempest"), Queue("incidents", routing_key="incidents"), Queue("incident_snapshots", routing_key="incident_snapshots"), Queue("incidents", routing_key="incidents"), @@ -1225,8 +1232,8 @@ def SOCIAL_AUTH_DEFAULT_USERNAME() -> str: }, "weekly-escalating-forecast": { "task": "sentry.tasks.weekly_escalating_forecast.run_escalating_forecast", - # Run every 6 hours - "schedule": crontab(minute="0", hour="*/6"), + # Run once a day at 00:00 + "schedule": crontab(minute="0", hour="0"), "options": {"expires": 60 * 60 * 3}, }, "schedule_auto_transition_to_ongoing": { @@ -1332,7 +1339,10 @@ def SOCIAL_AUTH_DEFAULT_USERNAME() -> str: # The list of modules that workers will import after starting up # Like celery, taskworkers need to import task modules to make tasks # accessible to the worker. -TASKWORKER_IMPORTS: tuple[str, ...] = () +TASKWORKER_IMPORTS: tuple[str, ...] = ( + # Used for tests + "sentry.taskworker.tasks.examples", +) TASKWORKER_ROUTER: str = "sentry.taskworker.router.DefaultRouter" TASKWORKER_ROUTES: dict[str, str] = {} @@ -1461,7 +1471,18 @@ def custom_parameter_sort(parameter: dict) -> tuple[str, int]: "PARSER_WHITELIST": ["rest_framework.parsers.JSONParser"], "POSTPROCESSING_HOOKS": ["sentry.apidocs.hooks.custom_postprocessing_hook"], "PREPROCESSING_HOOKS": ["sentry.apidocs.hooks.custom_preprocessing_hook"], - "SERVERS": [{"url": "https://us.sentry.io"}, {"url": "https://de.sentry.io"}], + "SERVERS": [ + { + "url": "https://{region}.sentry.io", + "variables": { + "region": { + "default": "us", + "description": "The data-storage-location for an organization", + "enum": ["us", "de"], + }, + }, + }, + ], "SORT_OPERATION_PARAMETERS": custom_parameter_sort, "TAGS": OPENAPI_TAGS, "TITLE": "API Reference", @@ -2232,6 +2253,9 @@ def custom_parameter_sort(parameter: dict) -> tuple[str, int]: # This flag activates uptime checks in the developemnt environment SENTRY_USE_UPTIME = False +# This flag activates the taskbroker in devservices +SENTRY_USE_TASKBROKER = False + # SENTRY_DEVSERVICES = { # "service-name": lambda settings, options: ( # { @@ -2412,6 +2436,21 @@ def custom_parameter_sort(parameter: dict) -> tuple[str, int]: "platform": "linux/amd64", } ), + "taskbroker": lambda settings, options: ( + { + "image": "ghcr.io/getsentry/taskbroker:latest", + "ports": {"50051/tcp": 50051}, + "environment": { + "TASKBROKER_KAFKA_CLUSTER": ( + "kafka-kafka-1" + if os.environ.get("USE_NEW_DEVSERVICES") == "1" + else "sentry_kafka" + ), + }, + "only_if": settings.SENTRY_USE_TASKBROKER, + "platform": "linux/amd64", + } + ), "bigtable": lambda settings, options: ( { "image": "ghcr.io/getsentry/cbtemulator:d28ad6b63e461e8c05084b8c83f1c06627068c04", @@ -2493,7 +2532,7 @@ def custom_parameter_sort(parameter: dict) -> tuple[str, int]: SENTRY_SELF_HOSTED_ERRORS_ONLY = False # only referenced in getsentry to provide the stable beacon version # updated with scripts/bump-version.sh -SELF_HOSTED_STABLE_VERSION = "24.11.1" +SELF_HOSTED_STABLE_VERSION = "24.12.1" # Whether we should look at X-Forwarded-For header or not # when checking REMOTE_ADDR ip addresses @@ -2940,6 +2979,7 @@ def custom_parameter_sort(parameter: dict) -> tuple[str, int]: "remote_subscriptions", "uptime", "workflow_engine", + "tempest", ) # Where to write the lockfile to. MIGRATIONS_LOCKFILE_PATH = os.path.join(PROJECT_ROOT, os.path.pardir, os.path.pardir) @@ -3354,7 +3394,6 @@ def custom_parameter_sort(parameter: dict) -> tuple[str, int]: "sentry-api-0-group-integrations", "sentry-api-0-group-integration-details", "sentry-api-0-group-current-release", - "sentry-api-0-group-participants", "sentry-api-0-shared-group-details", # Unscoped profiling URLs "sentry-api-0-profiling-project-profile", @@ -3408,6 +3447,16 @@ def custom_parameter_sort(parameter: dict) -> tuple[str, int]: SIMILARITY_BACKFILL_COHORT_MAP: dict[str, list[int]] = {} +UPTIME_REGIONS = [ + UptimeRegionConfig( + slug="default", + name="Default Region", + config_topic=Topic.UPTIME_CONFIGS, + enabled=True, + ), +] + + # Devserver configuration overrides. ngrok_host = os.environ.get("SENTRY_DEVSERVER_NGROK") if ngrok_host: diff --git a/src/sentry/conf/types/kafka_definition.py b/src/sentry/conf/types/kafka_definition.py index 59ae1228343494..f0013025c3de45 100644 --- a/src/sentry/conf/types/kafka_definition.py +++ b/src/sentry/conf/types/kafka_definition.py @@ -86,6 +86,8 @@ class ConsumerDefinition(TypedDict, total=False): dlq_max_invalid_ratio: float | None dlq_max_consecutive_count: int | None + stale_topic: Topic + def validate_consumer_definition(consumer_definition: ConsumerDefinition) -> None: if "dlq_topic" not in consumer_definition and ( diff --git a/src/sentry/conf/types/uptime.py b/src/sentry/conf/types/uptime.py new file mode 100644 index 00000000000000..2e645848a70ae4 --- /dev/null +++ b/src/sentry/conf/types/uptime.py @@ -0,0 +1,15 @@ +import dataclasses + +from sentry.conf.types.kafka_definition import Topic + + +@dataclasses.dataclass +class UptimeRegionConfig: + """ + Defines a region which uptime checks can be run in. + """ + + slug: str + name: str + config_topic: Topic + enabled: bool diff --git a/src/sentry/consumers/__init__.py b/src/sentry/consumers/__init__.py index bb21f85fadb759..cbd369fcc298fc 100644 --- a/src/sentry/consumers/__init__.py +++ b/src/sentry/consumers/__init__.py @@ -6,11 +6,10 @@ import click from arroyo.backends.abstract import Consumer -from arroyo.backends.kafka import KafkaProducer from arroyo.backends.kafka.configuration import build_kafka_consumer_configuration from arroyo.backends.kafka.consumer import KafkaConsumer from arroyo.commit import ONCE_PER_SECOND -from arroyo.dlq import DlqLimit, DlqPolicy, KafkaDlqProducer +from arroyo.dlq import DlqPolicy from arroyo.processing.processor import StreamProcessor from arroyo.processing.strategies import Healthcheck from arroyo.processing.strategies.abstract import ProcessingStrategy, ProcessingStrategyFactory @@ -22,11 +21,12 @@ Topic, validate_consumer_definition, ) +from sentry.consumers.dlq import DlqStaleMessagesStrategyFactoryWrapper, maybe_build_dlq_producer from sentry.consumers.validate_schema import ValidateSchema from sentry.eventstream.types import EventStreamEventType from sentry.ingest.types import ConsumerType from sentry.utils.imports import import_string -from sentry.utils.kafka_config import get_kafka_producer_cluster_options, get_topic_definition +from sentry.utils.kafka_config import get_topic_definition logger = logging.getLogger(__name__) @@ -135,6 +135,37 @@ def ingest_monitors_options() -> list[click.Option]: return options +def uptime_options() -> list[click.Option]: + """Return a list of uptime-results options.""" + options = [ + click.Option( + ["--mode", "mode"], + type=click.Choice(["serial", "parallel"]), + default="serial", + help="The mode to process results in. Parallel uses multithreading.", + ), + click.Option( + ["--max-batch-size", "max_batch_size"], + type=int, + default=500, + help="Maximum number of results to batch before processing in parallel.", + ), + click.Option( + ["--max-batch-time", "max_batch_time"], + type=int, + default=1, + help="Maximum time spent batching results to batch before processing in parallel.", + ), + click.Option( + ["--max-workers", "max_workers"], + type=int, + default=None, + help="The maximum number of threads to spawn in parallel mode.", + ), + ] + return options + + def ingest_events_options() -> list[click.Option]: """ Options for the "events"-like consumers: `events`, `attachments`, `transactions`. @@ -263,6 +294,7 @@ def ingest_transactions_options() -> list[click.Option]: "uptime-results": { "topic": Topic.UPTIME_RESULTS, "strategy_factory": "sentry.uptime.consumers.results_consumer.UptimeResultsStrategyFactory", + "click_options": uptime_options(), }, "billing-metrics-consumer": { "topic": Topic.SNUBA_GENERIC_METRICS, @@ -315,6 +347,7 @@ def ingest_transactions_options() -> list[click.Option]: "consumer_type": ConsumerType.Events, }, "dlq_topic": Topic.INGEST_EVENTS_DLQ, + "stale_topic": Topic.INGEST_EVENTS_DLQ, }, "ingest-feedback-events": { "topic": Topic.INGEST_FEEDBACK_EVENTS, @@ -339,6 +372,7 @@ def ingest_transactions_options() -> list[click.Option]: "strategy_factory": "sentry.ingest.consumer.factory.IngestTransactionsStrategyFactory", "click_options": ingest_transactions_options(), "dlq_topic": Topic.INGEST_TRANSACTIONS_DLQ, + "stale_topic": Topic.INGEST_TRANSACTIONS_DLQ, }, "ingest-metrics": { "topic": Topic.INGEST_METRICS, @@ -348,8 +382,6 @@ def ingest_transactions_options() -> list[click.Option]: "ingest_profile": "release-health", }, "dlq_topic": Topic.INGEST_METRICS_DLQ, - "dlq_max_invalid_ratio": 0.01, - "dlq_max_consecutive_count": 1000, }, "ingest-generic-metrics": { "topic": Topic.INGEST_PERFORMANCE_METRICS, @@ -359,8 +391,6 @@ def ingest_transactions_options() -> list[click.Option]: "ingest_profile": "performance", }, "dlq_topic": Topic.INGEST_GENERIC_METRICS_DLQ, - "dlq_max_invalid_ratio": None, - "dlq_max_consecutive_count": None, }, "generic-metrics-last-seen-updater": { "topic": Topic.SNUBA_GENERIC_METRICS, @@ -423,15 +453,6 @@ def ingest_transactions_options() -> list[click.Option]: } -def print_deprecation_warning(name, group_id): - import click - - click.echo( - f"WARNING: Deprecated command, use sentry run consumer {name} " - f"--consumer-group {group_id} ..." - ) - - def get_stream_processor( consumer_name: str, consumer_args: Sequence[str], @@ -446,6 +467,8 @@ def get_stream_processor( synchronize_commit_group: str | None = None, healthcheck_file_path: str | None = None, enable_dlq: bool = True, + # If set, messages above this age will be rerouted to the stale topic if one is configured + stale_threshold_sec: int | None = None, enforce_schema: bool = False, group_instance_id: str | None = None, ) -> StreamProcessor: @@ -555,37 +578,35 @@ def build_consumer_config(group_id: str): consumer_topic.value, enforce_schema, strategy_factory ) + if stale_threshold_sec: + strategy_factory = DlqStaleMessagesStrategyFactoryWrapper( + stale_threshold_sec, strategy_factory + ) + if healthcheck_file_path is not None: strategy_factory = HealthcheckStrategyFactoryWrapper( healthcheck_file_path, strategy_factory ) if enable_dlq and consumer_definition.get("dlq_topic"): - try: - dlq_topic = consumer_definition["dlq_topic"] - except KeyError as e: - raise click.BadParameter( - f"Cannot enable DLQ for consumer: {consumer_name}, no DLQ topic has been defined for it" - ) from e - try: - dlq_topic_defn = get_topic_definition(dlq_topic) - cluster_setting = dlq_topic_defn["cluster"] - except ValueError as e: - raise click.BadParameter( - f"Cannot enable DLQ for consumer: {consumer_name}, DLQ topic {dlq_topic} is not configured in this environment" - ) from e + dlq_topic = consumer_definition["dlq_topic"] + else: + dlq_topic = None - producer_config = get_kafka_producer_cluster_options(cluster_setting) - dlq_producer = KafkaProducer(producer_config) + if stale_threshold_sec and consumer_definition.get("stale_topic"): + stale_topic = consumer_definition["stale_topic"] + else: + stale_topic = None + dlq_producer = maybe_build_dlq_producer(dlq_topic=dlq_topic, stale_topic=stale_topic) + + if dlq_producer: dlq_policy = DlqPolicy( - KafkaDlqProducer(dlq_producer, ArroyoTopic(dlq_topic_defn["real_topic_name"])), - DlqLimit( - max_invalid_ratio=consumer_definition.get("dlq_max_invalid_ratio"), - max_consecutive_count=consumer_definition.get("dlq_max_consecutive_count"), - ), + dlq_producer, + None, None, ) + else: dlq_policy = None diff --git a/src/sentry/consumers/dlq.py b/src/sentry/consumers/dlq.py new file mode 100644 index 00000000000000..4e8ea5f7939d25 --- /dev/null +++ b/src/sentry/consumers/dlq.py @@ -0,0 +1,163 @@ +import logging +import time +from collections.abc import Mapping, MutableMapping +from concurrent.futures import Future +from datetime import datetime, timedelta, timezone +from enum import Enum + +from arroyo.backends.kafka import KafkaPayload, KafkaProducer +from arroyo.dlq import InvalidMessage, KafkaDlqProducer +from arroyo.processing.strategies.abstract import ProcessingStrategy, ProcessingStrategyFactory +from arroyo.types import FILTERED_PAYLOAD, BrokerValue, Commit, FilteredPayload, Message, Partition +from arroyo.types import Topic as ArroyoTopic +from arroyo.types import Value + +from sentry.conf.types.kafka_definition import Topic +from sentry.utils.kafka_config import get_kafka_producer_cluster_options, get_topic_definition + +logger = logging.getLogger(__name__) + + +class RejectReason(Enum): + STALE = "stale" + INVALID = "invalid" + + +class MultipleDestinationDlqProducer(KafkaDlqProducer): + """ + Produces to either the DLQ or stale message topic depending on the reason. + """ + + def __init__( + self, + producers: Mapping[RejectReason, KafkaDlqProducer | None], + ) -> None: + self.producers = producers + + def produce( + self, + value: BrokerValue[KafkaPayload], + reason: str | None = None, + ) -> Future[BrokerValue[KafkaPayload]]: + + reject_reason = RejectReason(reason) if reason else RejectReason.INVALID + producer = self.producers.get(reject_reason) + + if producer: + return producer.produce(value) + else: + # No DLQ producer configured for the reason. + logger.error("No DLQ producer configured for reason %s", reason) + future: Future[BrokerValue[KafkaPayload]] = Future() + future.set_running_or_notify_cancel() + future.set_result(value) + return future + + +def _get_dlq_producer(topic: Topic | None) -> KafkaDlqProducer | None: + if topic is None: + return None + + topic_defn = get_topic_definition(topic) + config = get_kafka_producer_cluster_options(topic_defn["cluster"]) + real_topic = topic_defn["real_topic_name"] + return KafkaDlqProducer(KafkaProducer(config), ArroyoTopic(real_topic)) + + +def maybe_build_dlq_producer( + dlq_topic: Topic | None, + stale_topic: Topic | None, +) -> MultipleDestinationDlqProducer | None: + if dlq_topic is None and stale_topic is None: + return None + + producers = { + RejectReason.INVALID: _get_dlq_producer(dlq_topic), + RejectReason.STALE: _get_dlq_producer(stale_topic), + } + + return MultipleDestinationDlqProducer(producers) + + +class DlqStaleMessages(ProcessingStrategy[KafkaPayload]): + def __init__( + self, + stale_threshold_sec: int, + next_step: ProcessingStrategy[KafkaPayload | FilteredPayload], + ) -> None: + self.stale_threshold_sec = stale_threshold_sec + self.next_step = next_step + + # A filtered message is created so we commit periodically if all are stale. + self.last_forwarded_offsets = time.time() + self.offsets_to_forward: MutableMapping[Partition, int] = {} + + def submit(self, message: Message[KafkaPayload]) -> None: + min_accepted_timestamp = datetime.now(timezone.utc) - timedelta( + seconds=self.stale_threshold_sec + ) + + if isinstance(message.value, BrokerValue): + # Normalize the message timezone to be UTC + if message.value.timestamp.tzinfo is None: + message_timestamp = message.value.timestamp.replace(tzinfo=timezone.utc) + else: + message_timestamp = message.value.timestamp + + if message_timestamp < min_accepted_timestamp: + self.offsets_to_forward[message.value.partition] = message.value.next_offset + raise InvalidMessage( + message.value.partition, + message.value.offset, + reason=RejectReason.STALE.value, + log_exception=False, + ) + + # If we get a valid message for a partition later, don't emit a filtered message for it + if self.offsets_to_forward: + for partition in message.committable: + self.offsets_to_forward.pop(partition, None) + + self.next_step.submit(message) + + def poll(self) -> None: + self.next_step.poll() + + # Ensure we commit frequently even if all messages are invalid + if self.offsets_to_forward: + if time.time() > self.last_forwarded_offsets + 1: + filtered_message = Message(Value(FILTERED_PAYLOAD, self.offsets_to_forward)) + self.next_step.submit(filtered_message) + self.offsets_to_forward = {} + self.last_forwarded_offsets = time.time() + + def join(self, timeout: float | None = None) -> None: + self.next_step.join(timeout) + + def close(self) -> None: + self.next_step.close() + + def terminate(self) -> None: + self.next_step.terminate() + + +class DlqStaleMessagesStrategyFactoryWrapper(ProcessingStrategyFactory[KafkaPayload]): + """ + Wrapper used to dlq a message with a stale timestamp before it is passed to + the rest of the pipeline. The InvalidMessage is raised with a + "stale" reason so it can be routed to a separate stale topic. + """ + + def __init__( + self, + stale_threshold_sec: int, + inner: ProcessingStrategyFactory[KafkaPayload | FilteredPayload], + ) -> None: + self.stale_threshold_sec = stale_threshold_sec + self.inner = inner + + def create_with_partitions( + self, commit: Commit, partitions: Mapping[Partition, int] + ) -> ProcessingStrategy[KafkaPayload]: + rv = self.inner.create_with_partitions(commit, partitions) + return DlqStaleMessages(self.stale_threshold_sec, rv) diff --git a/src/sentry/data_secrecy/api/waive_data_secrecy.py b/src/sentry/data_secrecy/api/waive_data_secrecy.py index 5f59ba863423cf..995f3337fa5934 100644 --- a/src/sentry/data_secrecy/api/waive_data_secrecy.py +++ b/src/sentry/data_secrecy/api/waive_data_secrecy.py @@ -150,6 +150,5 @@ def delete(self, request: Request, organization: Organization): event=audit_log.get_event_id("DATA_SECRECY_REINSTATED"), ) return Response( - {"detail": "Data secrecy has been reinstated."}, status=status.HTTP_204_NO_CONTENT, ) diff --git a/src/sentry/debug/utils/function_wrapper.py b/src/sentry/debug/utils/function_wrapper.py deleted file mode 100644 index 433373faf9cfbf..00000000000000 --- a/src/sentry/debug/utils/function_wrapper.py +++ /dev/null @@ -1,32 +0,0 @@ -from time import time - - -class FunctionWrapper: - def __init__(self, collector): - self.collector = collector - - def __call__(self, func, *args, **kwargs): - __traceback_hide__ = True # NOQA - - start = time() - try: - return func(*args, **kwargs) - finally: - end = time() - - if getattr(func, "im_class", None): - arg_str = repr(args[1:]) - else: - arg_str = repr(args) - - data = { - "name": func.__name__, - "args": arg_str, - "kwargs": repr(kwargs), - "start": start, - "end": end, - } - self.record(data) - - def record(self, data): - self.collector.append(data) diff --git a/src/sentry/dynamic_sampling/tasks/custom_rule_notifications.py b/src/sentry/dynamic_sampling/tasks/custom_rule_notifications.py index b4e7c79c37608c..c7de2adb66c0f0 100644 --- a/src/sentry/dynamic_sampling/tasks/custom_rule_notifications.py +++ b/src/sentry/dynamic_sampling/tasks/custom_rule_notifications.py @@ -167,6 +167,8 @@ def create_discover_link(rule: CustomDynamicSamplingRule, projects: list[int]) - q["utc"] = "true" q["yAxis"] = "count()" q["sort"] = "-timestamp" + q["queryDataset"] = "transaction-like" + q["dataset"] = "transactions" query_string = q.urlencode() discover_url = rule.organization.absolute_url( diff --git a/src/sentry/event_manager.py b/src/sentry/event_manager.py index bbb6d1dd9b3a07..790433a11f895a 100644 --- a/src/sentry/event_manager.py +++ b/src/sentry/event_manager.py @@ -185,10 +185,6 @@ def get_tag(data: dict[str, Any], key: str) -> Any | None: return None -def is_sample_event(job): - return get_tag(job["data"], "sample_event") == "yes" - - def sdk_metadata_from_event(event: Event) -> Mapping[str, Any]: """ Returns a metadata dictionary with "sdk" field populated, including a normalized name @@ -350,7 +346,7 @@ def __init__( grouping_config = config.get("grouping_config") # if we still don't have a grouping also try the project if grouping_config is None and project is not None: - grouping_config = get_grouping_config_dict_for_project(self._project) + grouping_config = get_grouping_config_dict_for_project(project) self._grouping_config = grouping_config self._client_ip = client_ip self._user_agent = user_agent @@ -2380,7 +2376,7 @@ def save_attachment( return from sentry import ratelimits as ratelimiter - is_limited, num_requests, reset_time = ratelimiter.backend.is_limited_with_value( + is_limited, _, _ = ratelimiter.backend.is_limited_with_value( key="event_attachment.save_per_sec", limit=options.get("sentry.save-event-attachments.project-per-sec-limit"), project=project, @@ -2388,7 +2384,7 @@ def save_attachment( ) rate_limit_tag = "per_sec" if not is_limited: - is_limited, num_requests, reset_time = ratelimiter.backend.is_limited_with_value( + is_limited, _, _ = ratelimiter.backend.is_limited_with_value( key="event_attachment.save_5_min", limit=options.get("sentry.save-event-attachments.project-per-5-minute-limit"), project=project, diff --git a/src/sentry/eventstore/base.py b/src/sentry/eventstore/base.py index e645cade39e658..f1a375f1cc80cb 100644 --- a/src/sentry/eventstore/base.py +++ b/src/sentry/eventstore/base.py @@ -292,7 +292,7 @@ def get_adjacent_event_ids(self, event, filter): """ raise NotImplementedError - def create_event(self, project_id=None, event_id=None, group_id=None, data=None): + def create_event(self, *, project_id: int, event_id=None, group_id=None, data=None): """ Returns an Event from processed data """ diff --git a/src/sentry/eventstore/models.py b/src/sentry/eventstore/models.py index 67a49708ce2a03..67c912449ef825 100644 --- a/src/sentry/eventstore/models.py +++ b/src/sentry/eventstore/models.py @@ -294,10 +294,7 @@ def project(self) -> Project: @project.setter def project(self, project: Project) -> None: - if project is None: - self.project_id = None - else: - self.project_id = project.id + self.project_id = project.id self._project_cache = project @cached_property @@ -339,14 +336,23 @@ def get_hashes_and_variants( """ variants = self.get_grouping_variants(config) + hashes_by_variant = { + variant_name: variant.get_hash() for variant_name, variant in variants.items() + } + # Sort the variants so that the system variant (if any) is always last, in order to resolve # ambiguities when choosing primary_hash for Snuba - sorted_variants = sorted( - variants.items(), - key=lambda name_and_variant: 1 if name_and_variant[0] == "system" else 0, + sorted_variant_names = sorted( + variants, + key=lambda variant_name: 1 if variant_name == "system" else 0, ) + # Get each variant's hash value, filtering out Nones - hashes = list({variant.get_hash() for _, variant in sorted_variants} - {None}) + hashes = [ + hashes_by_variant[variant_name] + for variant_name in sorted_variant_names + if hashes_by_variant[variant_name] is not None + ] # Write to event before returning self.data["hashes"] = hashes diff --git a/src/sentry/eventstream/kafka/backend.py b/src/sentry/eventstream/kafka/backend.py index 02954e35ce7588..e9b6e8bda5b1e5 100644 --- a/src/sentry/eventstream/kafka/backend.py +++ b/src/sentry/eventstream/kafka/backend.py @@ -128,17 +128,6 @@ def insert( ) -> None: event_type = self._get_event_type(event) - if event.get_tag("sample_event"): - logger.info( - "insert: inserting event in KafkaEventStream", - extra={ - "event.id": event.event_id, - "project_id": event.project_id, - "sample_event": True, - "event_type": event_type.value, - }, - ) - assign_partitions_randomly = ( (event_type == EventStreamEventType.Generic) or (event_type == EventStreamEventType.Transaction) @@ -152,14 +141,6 @@ def insert( kwargs[KW_SKIP_SEMANTIC_PARTITIONING] = True if event.get_tag("sample_event"): - logger.info( - "insert: inserting event in SnubaProtocolEventStream", - extra={ - "event.id": event.event_id, - "project_id": event.project_id, - "sample_event": True, - }, - ) kwargs["asynchronous"] = False super().insert( diff --git a/src/sentry/eventstream/snuba.py b/src/sentry/eventstream/snuba.py index bdc1835724c751..c3aa891cbf868b 100644 --- a/src/sentry/eventstream/snuba.py +++ b/src/sentry/eventstream/snuba.py @@ -112,15 +112,6 @@ def insert( eventstream_type: str | None = None, **kwargs: Any, ) -> None: - if event.get_tag("sample_event") == "true": - logger.info( - "insert: attempting to insert event in SnubaProtocolEventStream", - extra={ - "event.id": event.event_id, - "project_id": event.project_id, - "sample_event": True, - }, - ) if isinstance(event, GroupEvent) and not event.occurrence: logger.error( "`GroupEvent` passed to `EventStream.insert`. `GroupEvent` may only be passed when " diff --git a/src/sentry/features/temporary.py b/src/sentry/features/temporary.py index 8f688d52cd6f68..8e3241aa02668e 100644 --- a/src/sentry/features/temporary.py +++ b/src/sentry/features/temporary.py @@ -61,8 +61,6 @@ def register_temporary_features(manager: FeatureManager): manager.add("organizations:anomaly-detection-alerts-charts", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=False) # Enable anr frame analysis manager.add("organizations:anr-analyze-frames", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) - # Enable auth provider configuration through api - manager.add("organizations:api-auth-provider", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=False) manager.add("organizations:api-keys", OrganizationFeature, FeatureHandlerStrategy.INTERNAL, default=False, api_expose=True) # Rollout of the new API rate limits for organization events manager.add("organizations:api-organization_events-rate-limit-reduced-rollout", OrganizationFeature, FeatureHandlerStrategy.INTERNAL, api_expose=False) @@ -111,8 +109,6 @@ def register_temporary_features(manager: FeatureManager): # Enable the dev toolbar PoC code for employees # Data Secrecy manager.add("organizations:data-secrecy", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) - # Enable default metric alerts for new projects - manager.add("organizations:default-metric-alerts-new-projects", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) manager.add("organizations:devtoolbar", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, default=False, api_expose=True) manager.add("organizations:email-performance-regression-image", OrganizationFeature, FeatureHandlerStrategy.OPTIONS, api_expose=False) # Enables automatically deriving of code mappings @@ -147,8 +143,6 @@ def register_temporary_features(manager: FeatureManager): manager.add("organizations:increased-issue-owners-rate-limit", OrganizationFeature, FeatureHandlerStrategy.INTERNAL, api_expose=False) # Starfish: extract metrics from the spans manager.add("organizations:indexed-spans-extraction", OrganizationFeature, FeatureHandlerStrategy.INTERNAL, api_expose=False) - # Enable custom alert priorities for Pagerduty and Opsgenie - manager.add("organizations:integrations-custom-alert-priorities", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) # Enable integration functionality to work deployment integrations like Vercel manager.add("organizations:integrations-deployment", OrganizationFeature, FeatureHandlerStrategy.INTERNAL, default=True, api_expose=True) manager.add("organizations:integrations-feature-flag-integration", OrganizationFeature, FeatureHandlerStrategy.INTERNAL, api_expose=False) @@ -193,14 +187,12 @@ def register_temporary_features(manager: FeatureManager): manager.add("organizations:issue-stream-search-query-builder", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) # Enable issue stream table layout changes manager.add("organizations:issue-stream-table-layout", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) + # When enabled, uses the functional issue stream component + manager.add("organizations:issue-stream-functional-refactor", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) manager.add("organizations:large-debug-files", OrganizationFeature, FeatureHandlerStrategy.INTERNAL, api_expose=False) manager.add("organizations:metric-issue-poc", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) - # Enable members to invite teammates to organizations - manager.add("organizations:members-invite-teammates", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) manager.add("organizations:mep-rollout-flag", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) manager.add("organizations:mep-use-default-tags", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=False) - # Enable messaging-integration onboarding when creating a new project - manager.add("organizations:messaging-integration-onboarding-project-creation", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) # Enable threshold period in metric alert rule builder manager.add("organizations:metric-alert-threshold-period", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) # Migrate Orgs to new Azure DevOps Integration @@ -327,6 +319,8 @@ def register_temporary_features(manager: FeatureManager): manager.add("organizations:performance-use-metrics", OrganizationFeature, FeatureHandlerStrategy.INTERNAL, api_expose=True) # Enable showing INP web vital in default views manager.add("organizations:performance-vitals-inp", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=False) + # Enable handling missing webvitals in performance score + manager.add("organizations:performance-vitals-handle-missing-webvitals", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) # Enable profiling manager.add("organizations:profiling", OrganizationFeature, FeatureHandlerStrategy.INTERNAL, api_expose=True) # Enabled for those orgs who participated in the profiling Beta program @@ -360,10 +354,6 @@ def register_temporary_features(manager: FeatureManager): manager.add("organizations:release-comparison-performance", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) # enable new release set_commits functionality manager.add("organizations:set-commits-updated", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=False) - # Enable new release UI - manager.add("organizations:releases-v2", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) - manager.add("organizations:releases-v2-internal", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=False) - manager.add("organizations:releases-v2-st", OrganizationFeature, FeatureHandlerStrategy.INTERNAL, api_expose=False) # Enable playing replays from the replay tab manager.add("organizations:replay-play-from-replay-tab", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) # Enable version 2 of reprocessing (completely distinct from v1) @@ -424,6 +414,8 @@ def register_temporary_features(manager: FeatureManager): manager.add("organizations:session-replay-ui", OrganizationFeature, FeatureHandlerStrategy.INTERNAL, default=True, api_expose=True) # Enable replay web vital breadcrumbs manager.add("organizations:session-replay-web-vitals", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, default=False, api_expose=True) + # Enable GA banner for mobile replay beta orgs about the grace period that will last 2 months. Flag can be removed after March 7th 2024. + manager.add("organizations:mobile-replay-beta-orgs", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, default=False, api_expose=False) # Enable Dev Toolbar frontend features (ex project settings page) manager.add("organizations:dev-toolbar-ui", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, default=False, api_expose=True) # Lets organizations manage grouping configs @@ -433,8 +425,6 @@ def register_temporary_features(manager: FeatureManager): # Add regression chart as image to slack message manager.add("organizations:slack-endpoint-regression-image", OrganizationFeature, FeatureHandlerStrategy.OPTIONS, api_expose=False) manager.add("organizations:slack-function-regression-image", OrganizationFeature, FeatureHandlerStrategy.OPTIONS, api_expose=False) - # Enable linking to Slack alerts from multiple teams to a single channel - manager.add("organizations:slack-multiple-team-single-channel-linking", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=False) manager.add("organizations:stacktrace-processing-caching", OrganizationFeature, FeatureHandlerStrategy.INTERNAL, api_expose=False) # Enable SAML2 Single-logout manager.add("organizations:sso-saml2-slo", OrganizationFeature, FeatureHandlerStrategy.OPTIONS, api_expose=False) @@ -456,12 +446,14 @@ def register_temporary_features(manager: FeatureManager): manager.add("organizations:insights-related-issues-table", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) # Enable access to Mobile Screens insights module manager.add("organizations:insights-mobile-screens-module", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) + # Enable access to insights crons view (moved from crons sidebar) + manager.add("organizations:insights-crons", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) + # Enable access to insights uptime view + manager.add("organizations:insights-uptime", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) # Enable standalone span ingestion manager.add("organizations:standalone-span-ingestion", OrganizationFeature, FeatureHandlerStrategy.INTERNAL, api_expose=False) # Enable the aggregate span waterfall view manager.add("organizations:starfish-aggregate-span-waterfall", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=False) - # Enable bundle analysis ui and endpoint - manager.add("organizations:starfish-browser-resource-module-bundle-analysis", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) # Enables the resource module ui manager.add("organizations:starfish-browser-resource-module-image-view", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=False) # Enables the resource module ui @@ -535,14 +527,16 @@ def register_temporary_features(manager: FeatureManager): manager.add("organizations:visibility-explore-view", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) # Enable the dataset toggle on the new explore page manager.add("organizations:visibility-explore-dataset", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) - # Enable minimap in the widget viewer modal in dashboards - manager.add("organizations:widget-viewer-modal-minimap", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) + # Enable RPC on the new explore page + manager.add("organizations:visibility-explore-rpc", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) # Enabled unresolved issue webhook for organization manager.add("organizations:webhooks-unresolved", OrganizationFeature, FeatureHandlerStrategy.OPTIONS, api_expose=True) # Enable dual writing for metric alert issues (see: alerts create issues) - manager.add("organizations:workflow-engine-m3-dual-write", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=False) + manager.add("organizations:workflow-engine-metric-alert-dual-write", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=False) + # Enable Processing for Metric Alerts in the workflow_engine + manager.add("organizations:workflow-engine-metric-alert-processing", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=False) # Enable reading from new ACI tables for metric alert issues (see: alerts create issues) - manager.add("organizations:workflow-engine-m3-read", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=False) + manager.add("organizations:workflow-engine-metric-alert-read", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=False) # Enable new workflow_engine UI (see: alerts create issues) manager.add("organizations:workflow-engine-ui", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) # Enable EventUniqueUserFrequencyConditionWithConditions special alert condition @@ -570,16 +564,12 @@ def register_temporary_features(manager: FeatureManager): manager.add("projects:first-event-severity-calculation", ProjectFeature, FeatureHandlerStrategy.INTERNAL, api_expose=False) # Enable escalation detection for new issues manager.add("projects:first-event-severity-new-escalation", ProjectFeature, FeatureHandlerStrategy.INTERNAL, default=True, api_expose=False) - # Enable functionality for attaching minidumps to events and displaying - # them in the group UI. - manager.add("projects:minidump", ProjectFeature, FeatureHandlerStrategy.INTERNAL, default=True, api_expose=True) # Enable alternative version of group creation that is supposed to be less racy. manager.add("projects:race-free-group-creation", ProjectFeature, FeatureHandlerStrategy.INTERNAL, default=True, api_expose=False) # Enable similarity embeddings API call # This feature is only available on the frontend using project details since the handler gets # project options and this is slow in the project index endpoint feature flag serialization manager.add("projects:similarity-embeddings", ProjectFeature, FeatureHandlerStrategy.INTERNAL, default=False, api_expose=True) - manager.add("projects:similarity-embeddings-backfill", ProjectFeature, FeatureHandlerStrategy.OPTIONS, api_expose=False) manager.add("projects:similarity-embeddings-delete-by-hash", ProjectFeature, FeatureHandlerStrategy.OPTIONS, api_expose=False) manager.add("projects:similarity-indexing", ProjectFeature, FeatureHandlerStrategy.INTERNAL, api_expose=False) manager.add("projects:similarity-view", ProjectFeature, FeatureHandlerStrategy.INTERNAL, api_expose=True) @@ -623,3 +613,11 @@ def register_temporary_features(manager: FeatureManager): FeatureHandlerStrategy.FLAGPOLE, api_expose=False, ) + + # Controls access to tempest features + manager.add( + "organizations:tempest-access", + OrganizationFeature, + FeatureHandlerStrategy.FLAGPOLE, + api_expose=True, + ) diff --git a/src/sentry/feedback/usecases/create_feedback.py b/src/sentry/feedback/usecases/create_feedback.py index 3a9fb3b267c475..b1cfc6ffb34ba7 100644 --- a/src/sentry/feedback/usecases/create_feedback.py +++ b/src/sentry/feedback/usecases/create_feedback.py @@ -230,7 +230,6 @@ def create_feedback_issue(event, project_id: int, source: FeedbackCreationSource "feedback.create_feedback_issue.entered", tags={ "referrer": source.value, - "client_source": get_path(event, "contexts", "feedback", "source"), }, ) @@ -257,7 +256,6 @@ def create_feedback_issue(event, project_id: int, source: FeedbackCreationSource tags={ "is_spam": is_message_spam, "referrer": source.value, - "client_source": event["contexts"]["feedback"].get("source"), }, sample_rate=1.0, ) @@ -345,7 +343,6 @@ def create_feedback_issue(event, project_id: int, source: FeedbackCreationSource "feedback.create_feedback_issue.produced_occurrence", tags={ "referrer": source.value, - "client_source": event["contexts"]["feedback"].get("source"), }, sample_rate=1.0, ) diff --git a/src/sentry/feedback/usecases/spam_detection.py b/src/sentry/feedback/usecases/spam_detection.py index e567d130fdece1..40f88332a015fc 100644 --- a/src/sentry/feedback/usecases/spam_detection.py +++ b/src/sentry/feedback/usecases/spam_detection.py @@ -34,8 +34,8 @@ def make_input_prompt(message: str): @metrics.wraps("feedback.spam_detection", sample_rate=1.0) def is_spam(message: str): - is_spam = False - trimmed_response = "" + labeled_spam = False + _trimmed_response = "" response = complete_prompt( usecase=LLMUseCase.SPAM_DETECTION, message=make_input_prompt(message), @@ -43,18 +43,9 @@ def is_spam(message: str): max_output_tokens=20, ) if response: - is_spam, trimmed_response = trim_response(response) - - logger.info( - "Spam detection", - extra={ - "feedback_message": message, - "is_spam": is_spam, - "response": response, - "trimmed_response": trimmed_response, - }, - ) - return is_spam + labeled_spam, _trimmed_response = trim_response(response) + + return labeled_spam def trim_response(text): diff --git a/src/sentry/flags/docs/api.md b/src/sentry/flags/docs/api.md index 5e63588b40142c..11a8313b8328ca 100644 --- a/src/sentry/flags/docs/api.md +++ b/src/sentry/flags/docs/api.md @@ -147,7 +147,54 @@ Delete a signing secret. ## Webhooks [/organizations//flags/hooks/provider//] -### Create Flag Log [POST] +### Create Generic Flag Log [POST] + +A flag log event must be emitted after every flag definition change which influences a flag's evaluation. Updates to a flag that do not change a flag's evaluation logic do not need to be emitted to this endpoint. We are only concerned with changes which could have influenced behavior. + +Sentry does not currently have a concept of disambiguating flag changes by project or environment. Everything is done at the organization level. Flag changes that are duplicated across projects, environments, or other groupings within the provider, must be de-duplicated. To support this, the posted payload sets a "change_id" field for idempotency. In the presence of duplicate ids, only one audit-log record is written in Sentry. + +**Data Attributes** + +| Column | Type | Description | +| --------------- | ------ | -------------------------------------------------------------- | +| action | string | Enum of `created`, `updated`, or `deleted`. | +| change_id | number | A 64-bit idempotency token representing a unique change group. | +| created_at | string | String formatted UTC date time: YYYY-MM-DDTHH:MM:SS. | +| created_by | object | Created-by object. | +| created_by.id | string | User identifier which made the change. | +| created_by.type | string | Enum of `email`, `id`, or `name`. | +| flag | string | The name of the flag changed. | + +**Meta Attributes** + +| Column | Type | Description | +| ------- | ---- | --------------------- | +| version | int | The protocol version. | + +- Request (application/json) + + ```json + { + "data": [ + { + "action": "created", + "created_at": "2024-12-12T00:02:00+00:00", + "created_by": { + "id": "first.last@company.com", + "type": "email" + }, + "flag": "hello.world" + } + ], + "meta": { + "version": 1 + } + } + ``` + +- Response 201 + +### Create Provider-Specific Flag Log [POST] The shape of the request object varies by provider. The `` URI parameter informs the server of the shape of the request and it is on the server to handle the provider. The following providers are supported: LaunchDarkly. diff --git a/src/sentry/flags/endpoints/__init__.py b/src/sentry/flags/endpoints/__init__.py index be4cef59b09284..e69de29bb2d1d6 100644 --- a/src/sentry/flags/endpoints/__init__.py +++ b/src/sentry/flags/endpoints/__init__.py @@ -1,12 +0,0 @@ -from sentry.api.bases.organization import OrganizationEndpoint -from sentry.api.exceptions import ResourceDoesNotExist - -VALID_PROVIDERS = {"launchdarkly"} - - -class OrganizationFlagsEndpoint(OrganizationEndpoint): - - def convert_args(self, *args, **kwargs): - if kwargs.get("provider", "") not in VALID_PROVIDERS: - raise ResourceDoesNotExist - return super().convert_args(*args, **kwargs) diff --git a/src/sentry/flags/endpoints/hooks.py b/src/sentry/flags/endpoints/hooks.py index e223f602df2363..b948a88ec7ec5b 100644 --- a/src/sentry/flags/endpoints/hooks.py +++ b/src/sentry/flags/endpoints/hooks.py @@ -6,20 +6,14 @@ from sentry.api.api_owners import ApiOwner from sentry.api.api_publish_status import ApiPublishStatus from sentry.api.base import region_silo_endpoint +from sentry.api.bases.organization import OrganizationEndpoint from sentry.api.exceptions import ResourceDoesNotExist -from sentry.flags.endpoints import OrganizationFlagsEndpoint -from sentry.flags.providers import ( - DeserializationError, - InvalidProvider, - handle_provider_event, - validate_provider_event, - write, -) +from sentry.flags.providers import DeserializationError, get_provider, write from sentry.models.organization import Organization @region_silo_endpoint -class OrganizationFlagsHooksEndpoint(OrganizationFlagsEndpoint): +class OrganizationFlagsHooksEndpoint(OrganizationEndpoint): authentication_classes = () owner = ApiOwner.REPLAY permission_classes = () @@ -32,18 +26,14 @@ def post(self, request: Request, organization: Organization, provider: str) -> R return Response("Not enabled.", status=404) try: - if not validate_provider_event( - provider, - request.body, - request.headers, - organization.id, - ): + provider_cls = get_provider(organization.id, provider, request.headers) + if provider_cls is None: + raise ResourceDoesNotExist + elif not provider_cls.validate(request.body): return Response("Not authorized.", status=401) - - write(handle_provider_event(provider, request.data, organization.id)) - return Response(status=200) - except InvalidProvider: - raise ResourceDoesNotExist + else: + write(provider_cls.handle(request.data)) + return Response(status=200) except DeserializationError as exc: sentry_sdk.capture_exception() return Response(exc.errors, status=200) diff --git a/src/sentry/flags/endpoints/secrets.py b/src/sentry/flags/endpoints/secrets.py index 17a03d98b3a045..d572c9d5efb805 100644 --- a/src/sentry/flags/endpoints/secrets.py +++ b/src/sentry/flags/endpoints/secrets.py @@ -11,7 +11,10 @@ from sentry.api.api_owners import ApiOwner from sentry.api.api_publish_status import ApiPublishStatus from sentry.api.base import region_silo_endpoint -from sentry.api.bases.organization import OrganizationEndpoint, OrgAuthTokenPermission +from sentry.api.bases.organization import ( + OrganizationEndpoint, + OrganizationFlagWebHookSigningSecretPermission, +) from sentry.api.paginator import OffsetPaginator from sentry.api.serializers import Serializer, register, serialize from sentry.flags.models import FlagWebHookSigningSecretModel @@ -39,14 +42,16 @@ def serialize(self, obj, attrs, user, **kwargs) -> FlagWebhookSigningSecretRespo class FlagWebhookSigningSecretValidator(serializers.Serializer): - provider = serializers.ChoiceField(choices=[("launchdarkly", "launchdarkly")], required=True) + provider = serializers.ChoiceField( + choices=["launchdarkly", "generic", "unleash"], required=True + ) secret = serializers.CharField(required=True, max_length=32, min_length=32) @region_silo_endpoint class OrganizationFlagsWebHookSigningSecretsEndpoint(OrganizationEndpoint): owner = ApiOwner.REPLAY - permission_classes = (OrgAuthTokenPermission,) + permission_classes = (OrganizationFlagWebHookSigningSecretPermission,) publish_status = { "GET": ApiPublishStatus.PRIVATE, "POST": ApiPublishStatus.PRIVATE, @@ -95,7 +100,7 @@ def post(self, request: Request, organization: Organization) -> Response: @region_silo_endpoint class OrganizationFlagsWebHookSigningSecretEndpoint(OrganizationEndpoint): owner = ApiOwner.REPLAY - permission_classes = (OrgAuthTokenPermission,) + permission_classes = (OrganizationFlagWebHookSigningSecretPermission,) publish_status = {"DELETE": ApiPublishStatus.PRIVATE} def delete( diff --git a/src/sentry/flags/providers.py b/src/sentry/flags/providers.py index 8232a6cf00be58..6b7e8077df4c36 100644 --- a/src/sentry/flags/providers.py +++ b/src/sentry/flags/providers.py @@ -1,10 +1,12 @@ import datetime import hashlib import hmac -from typing import Any, TypedDict +from collections.abc import Callable, Iterator +from typing import Any, Protocol, TypedDict, TypeVar from django.http.request import HttpHeaders from rest_framework import serializers +from rest_framework.exceptions import ValidationError from sentry.flags.models import ( ACTION_MAP, @@ -32,6 +34,8 @@ def write(rows: list["FlagAuditLogRow"]) -> None: the underlying systems. """ +T = TypeVar("T", contravariant=True) + class FlagAuditLogRow(TypedDict): """A complete flag audit log row instance.""" @@ -45,6 +49,16 @@ class FlagAuditLogRow(TypedDict): tags: dict[str, Any] +class ProviderProtocol(Protocol[T]): + organization_id: int + provider_name: str + signature: str | None + + def __init__(self, organization_id: int, signature: str | None) -> None: ... + def handle(self, message: T) -> list[FlagAuditLogRow]: ... + def validate(self, message_bytes: bytes) -> bool: ... + + class DeserializationError(Exception): """The request body could not be deserialized.""" @@ -58,29 +72,18 @@ class InvalidProvider(Exception): ... -def handle_provider_event( - provider: str, - request_data: dict[str, Any], - organization_id: int, -) -> list[FlagAuditLogRow]: - match provider: +def get_provider( + organization_id: int, provider_name: str, headers: HttpHeaders +) -> ProviderProtocol[dict[str, Any]] | None: + match provider_name: case "launchdarkly": - return handle_launchdarkly_event(request_data, organization_id) + return LaunchDarklyProvider(organization_id, signature=headers.get("X-LD-Signature")) + case "generic": + return GenericProvider(organization_id, signature=headers.get("X-Sentry-Signature")) + case "unleash": + return UnleashProvider(organization_id, signature=headers.get("Authorization")) case _: - raise InvalidProvider(provider) - - -def validate_provider_event( - provider: str, - request_data: bytes, - request_headers: HttpHeaders, - organization_id: int, -) -> bool: - match provider: - case "launchdarkly": - return validate_launchdarkly_event(request_data, request_headers, organization_id) - case _: - raise InvalidProvider(provider) + return None """LaunchDarkly provider.""" @@ -115,7 +118,49 @@ class LaunchDarklyItemSerializer(serializers.Serializer): } -def handle_launchdarkly_actions(action: str) -> int: +class LaunchDarklyProvider: + provider_name = "launchdarkly" + + def __init__(self, organization_id: int, signature: str | None) -> None: + self.organization_id = organization_id + self.signature = signature + + def handle(self, message: dict[str, Any]) -> list[FlagAuditLogRow]: + serializer = LaunchDarklyItemSerializer(data=message) + if not serializer.is_valid(): + raise DeserializationError(serializer.errors) + + result = serializer.validated_data + + access = result["accesses"][0] + if access["action"] not in SUPPORTED_LAUNCHDARKLY_ACTIONS: + return [] + + return [ + { + "action": _handle_launchdarkly_actions(access["action"]), + "created_at": datetime.datetime.fromtimestamp( + result["date"] / 1000.0, datetime.UTC + ), + "created_by": result["member"]["email"], + "created_by_type": CREATED_BY_TYPE_MAP["email"], + "flag": result["name"], + "organization_id": self.organization_id, + "tags": {"description": result["description"]}, + } + ] + + def validate(self, message_bytes: bytes) -> bool: + validator = PayloadSignatureValidator( + self.organization_id, + self.provider_name, + message_bytes, + self.signature, + ) + return validator.validate() + + +def _handle_launchdarkly_actions(action: str) -> int: if action == "createFlag" or action == "cloneFlag": return ACTION_MAP["created"] if action == "deleteFlag": @@ -124,60 +169,188 @@ def handle_launchdarkly_actions(action: str) -> int: return ACTION_MAP["updated"] -def handle_launchdarkly_event( - request_data: dict[str, Any], organization_id: int -) -> list[FlagAuditLogRow]: - serializer = LaunchDarklyItemSerializer(data=request_data) - if not serializer.is_valid(): - raise DeserializationError(serializer.errors) - - result = serializer.validated_data - - access = result["accesses"][0] - if access["action"] not in SUPPORTED_LAUNCHDARKLY_ACTIONS: - return [] - - return [ - { - "action": handle_launchdarkly_actions(access["action"]), - "created_at": datetime.datetime.fromtimestamp(result["date"] / 1000.0, datetime.UTC), - "created_by": result["member"]["email"], - "created_by_type": CREATED_BY_TYPE_MAP["email"], - "flag": result["name"], - "organization_id": organization_id, - "tags": {"description": result["description"]}, - } - ] - - -def validate_launchdarkly_event( - request_data: bytes, - request_headers: HttpHeaders, - organization_id: int, -) -> bool: - """Return "true" if the launchdarkly payload is valid.""" - signature = request_headers.get("X-LD-Signature") - if signature is None: - return False +"""Generic provider. - models = FlagWebHookSigningSecretModel.objects.filter( - organization_id=organization_id, - provider="launchdarkly", - ).all() - for model in models: - if hmac_sha256_hex_digest(model.secret, request_data) == signature: - return True - return False +The generic provider represents a Sentry-defined generic web hook +interface that anyone can integrate with. +""" -def hmac_sha256_hex_digest(key: str, message: bytes): - return hmac.new(key.encode(), message, hashlib.sha256).hexdigest() +class GenericItemCreatedBySerializer(serializers.Serializer): + id = serializers.CharField(required=True, max_length=100) + type = serializers.ChoiceField(choices=(("email", 0), ("id", 1), ("name", 2)), required=True) -"""Internal flag-pole provider. +class GenericItemSerializer(serializers.Serializer): + action = serializers.ChoiceField( + choices=(("created", 0), ("updated", 1), ("deleted", 2)), required=True + ) + change_id = serializers.IntegerField(required=True) + created_at = serializers.DateTimeField(required=True) + created_by = GenericItemCreatedBySerializer(required=True) + flag = serializers.CharField(required=True, max_length=100) + + +class GenericMetaSerializer(serializers.Serializer): + version = serializers.IntegerField(required=True) + + +class GenericRequestSerializer(serializers.Serializer): + data = GenericItemSerializer(many=True, required=True) # type: ignore[assignment] + meta = GenericMetaSerializer(required=True) + + +class GenericProvider: + provider_name = "generic" + + def __init__(self, organization_id: int, signature: str | None) -> None: + self.organization_id = organization_id + self.signature = signature + + def handle(self, message: dict[str, Any]) -> list[FlagAuditLogRow]: + serializer = GenericRequestSerializer(data=message) + if not serializer.is_valid(): + raise DeserializationError(serializer.errors) + + seen = set() + result: list[FlagAuditLogRow] = [] + for item in serializer.validated_data["data"]: + if item["change_id"] not in seen: + seen.add(item["change_id"]) + result.append( + { + "action": ACTION_MAP[item["action"]], + "created_at": item["created_at"], + "created_by": item["created_by"]["id"], + "created_by_type": CREATED_BY_TYPE_MAP[item["created_by"]["type"]], + "flag": item["flag"], + "organization_id": self.organization_id, + "tags": {}, + } + ) + + return result + + def validate(self, message_bytes: bytes) -> bool: + validator = PayloadSignatureValidator( + self.organization_id, + self.provider_name, + message_bytes, + self.signature, + ) + return validator.validate() + + +"""Unleash provider.""" + +SUPPORTED_UNLEASH_ACTIONS = { + "feature-created", + "feature-archived", + "feature-revived", + "feature-updated", + "feature-strategy-update", + "feature-strategy-add", + "feature-strategy-remove", + "feature-stale-on", + "feature-stale-off", + "feature-completed", + "feature-environment-enabled", + "feature-environment-disabled", +} -Allows us to skip the HTTP endpoint. -""" + +class UnleashItemSerializer(serializers.Serializer): + # Technically featureName is not required by Unleash, but for all the actions we care about, it should exist. + featureName = serializers.CharField(max_length=100, required=True) + createdAt = serializers.DateTimeField( + required=True, + input_formats=["iso-8601"], + format=None, + default_timezone=datetime.UTC, + ) + createdBy = serializers.CharField(required=True) + createdByUserId = serializers.IntegerField(required=False, allow_null=True) + type = serializers.CharField(allow_blank=True, required=True) + tags = serializers.ListField( + child=serializers.DictField(child=serializers.CharField()), required=False, allow_null=True + ) + project = serializers.CharField(required=False, allow_null=True) + environment = serializers.CharField(required=False, allow_null=True) + + +def _get_user(validated_event: dict[str, Any]) -> tuple[str, int]: + """If the email is not valid, default to the user ID sent by Unleash.""" + created_by = validated_event["createdBy"] + try: + serializers.EmailField().run_validation(created_by) + return created_by, CREATED_BY_TYPE_MAP["email"] + except ValidationError: + pass + + if "createdByUserId" in validated_event: + return validated_event["createdByUserId"], CREATED_BY_TYPE_MAP["id"] + return created_by, CREATED_BY_TYPE_MAP["name"] + + +class UnleashProvider: + provider_name = "unleash" + + def __init__(self, organization_id: int, signature: str | None) -> None: + self.organization_id = organization_id + self.signature = signature + + def handle(self, message: dict[str, Any]) -> list[FlagAuditLogRow]: + serializer = UnleashItemSerializer(data=message) + if not serializer.is_valid(): + raise DeserializationError(serializer.errors) + + result = serializer.validated_data + action = result["type"] + + if action not in SUPPORTED_UNLEASH_ACTIONS: + return [] + + created_by, created_by_type = _get_user(result) + unleash_tags = result.get("tags") or [] + tags = {tag["type"]: tag["value"] for tag in unleash_tags} + + if result.get("project"): + tags["project"] = result.get("project") + + if result.get("environment"): + tags["environment"] = result.get("environment") + + return [ + { + "action": _handle_unleash_actions(action), + "created_at": result["createdAt"], + "created_by": created_by, + "created_by_type": created_by_type, + "flag": result["featureName"], + "organization_id": self.organization_id, + "tags": tags, + } + ] + + def validate(self, message_bytes: bytes) -> bool: + validator = AuthTokenValidator( + self.organization_id, + self.provider_name, + self.signature, + ) + return validator.validate() + + +def _handle_unleash_actions(action: str) -> int: + if action == "feature-created": + return ACTION_MAP["created"] + if action == "feature-archived": + return ACTION_MAP["deleted"] + else: + return ACTION_MAP["updated"] + + +"""Flagpole provider.""" class FlagAuditLogItem(TypedDict): @@ -205,3 +378,81 @@ def handle_flag_pole_event_internal(items: list[FlagAuditLogItem], organization_ for item in items ] ) + + +"""Helpers.""" + + +class AuthTokenValidator: + """Abstract payload validator. + + Similar to the SecretValidator class below, except we do not need + to validate the authorization string. + """ + + def __init__( + self, + organization_id: int, + provider: str, + signature: str | None, + secret_finder: Callable[[int, str], Iterator[str]] | None = None, + ) -> None: + self.organization_id = organization_id + self.provider = provider + self.signature = signature + self.secret_finder = secret_finder or _query_signing_secrets + + def validate(self) -> bool: + if self.signature is None: + return False + + for secret in self.secret_finder(self.organization_id, self.provider): + if secret == self.signature: + return True + + return False + + +class PayloadSignatureValidator: + """Abstract payload validator. + + Allows us to inject dependencies for differing use cases. Specifically + the test suite. + """ + + def __init__( + self, + organization_id: int, + provider: str, + request_body: bytes, + signature: str | None, + secret_finder: Callable[[int, str], Iterator[str]] | None = None, + secret_validator: Callable[[str, bytes], str] | None = None, + ) -> None: + self.organization_id = organization_id + self.provider = provider + self.request_body = request_body + self.signature = signature + self.secret_finder = secret_finder or _query_signing_secrets + self.secret_validator = secret_validator or hmac_sha256_hex_digest + + def validate(self) -> bool: + if self.signature is None: + return False + + for secret in self.secret_finder(self.organization_id, self.provider): + if self.secret_validator(secret, self.request_body) == self.signature: + return True + return False + + +def _query_signing_secrets(organization_id: int, provider: str) -> Iterator[str]: + for model in FlagWebHookSigningSecretModel.objects.filter( + organization_id=organization_id, + provider=provider, + ).all(): + yield model.secret + + +def hmac_sha256_hex_digest(key: str, message: bytes): + return hmac.new(key.encode(), message, hashlib.sha256).hexdigest() diff --git a/src/sentry/grouping/api.py b/src/sentry/grouping/api.py index afebb8edb6fc62..9bd0488908593c 100644 --- a/src/sentry/grouping/api.py +++ b/src/sentry/grouping/api.py @@ -8,9 +8,11 @@ import sentry_sdk from sentry import options +from sentry.db.models.fields.node import NodeData from sentry.grouping.component import ( AppGroupingComponent, BaseGroupingComponent, + ContributingComponent, DefaultGroupingComponent, SystemGroupingComponent, ) @@ -20,6 +22,7 @@ from sentry.grouping.strategies.configurations import CONFIGURATIONS from sentry.grouping.utils import ( expand_title_template, + get_fingerprint_type, hash_from_values, is_default_fingerprint_var, resolve_fingerprint_values, @@ -83,7 +86,7 @@ def get_config_dict(self, project: Project) -> GroupingConfig: "enhancements": self._get_enhancements(project), } - def _get_enhancements(self, project) -> str: + def _get_enhancements(self, project: Project) -> str: project_enhancements = project.get_option("sentry:grouping_enhancements") config_id = self._get_config_id(project) @@ -112,14 +115,14 @@ def _get_enhancements(self, project) -> str: cache.set(cache_key, enhancements) return enhancements - def _get_config_id(self, project): + def _get_config_id(self, project: Project) -> str: raise NotImplementedError class ProjectGroupingConfigLoader(GroupingConfigLoader): option_name: str # Set in subclasses - def _get_config_id(self, project): + def _get_config_id(self, project: Project) -> str: return project.get_option( self.option_name, validate=lambda x: x in CONFIGURATIONS, @@ -145,29 +148,29 @@ class BackgroundGroupingConfigLoader(GroupingConfigLoader): cache_prefix = "background-grouping-enhancements:" - def _get_config_id(self, project): + def _get_config_id(self, _project: Project) -> str: return options.get("store.background-grouping-config-id") @sentry_sdk.tracing.trace -def get_grouping_config_dict_for_project(project) -> GroupingConfig: +def get_grouping_config_dict_for_project(project: Project) -> GroupingConfig: """Fetches all the information necessary for grouping from the project settings. The return value of this is persisted with the event on ingestion so that the grouping algorithm can be re-run later. This is called early on in normalization so that everything that is needed - to group the project is pulled into the event. + to group the event is pulled into the event data. """ loader = PrimaryGroupingConfigLoader() return loader.get_config_dict(project) -def get_grouping_config_dict_for_event_data(data, project) -> GroupingConfig: +def get_grouping_config_dict_for_event_data(data: NodeData, project: Project) -> GroupingConfig: """Returns the grouping config for an event dictionary.""" return data.get("grouping_config") or get_grouping_config_dict_for_project(project) -def get_default_enhancements(config_id=None) -> str: +def get_default_enhancements(config_id: str | None = None) -> str: base: str | None = DEFAULT_GROUPING_ENHANCEMENTS_BASE if config_id is not None: base = CONFIGURATIONS[config_id].enhancements_base @@ -191,7 +194,7 @@ def get_projects_default_fingerprinting_bases( return bases -def get_default_grouping_config_dict(config_id=None) -> GroupingConfig: +def get_default_grouping_config_dict(config_id: str | None = None) -> GroupingConfig: """Returns the default grouping config.""" if config_id is None: from sentry.projectoptions.defaults import DEFAULT_GROUPING_CONFIG @@ -200,17 +203,16 @@ def get_default_grouping_config_dict(config_id=None) -> GroupingConfig: return {"id": config_id, "enhancements": get_default_enhancements(config_id)} -def load_grouping_config(config_dict=None) -> StrategyConfiguration: +def load_grouping_config(config_dict: GroupingConfig | None = None) -> StrategyConfiguration: """Loads the given grouping config.""" if config_dict is None: config_dict = get_default_grouping_config_dict() elif "id" not in config_dict: raise ValueError("Malformed configuration dictionary") - config_dict = dict(config_dict) - config_id = config_dict.pop("id") + config_id = config_dict["id"] if config_id not in CONFIGURATIONS: raise GroupingConfigNotFound(config_id) - return CONFIGURATIONS[config_id](**config_dict) + return CONFIGURATIONS[config_id](enhancements=config_dict["enhancements"]) def load_default_grouping_config() -> StrategyConfiguration: @@ -249,9 +251,7 @@ def get_fingerprinting_config_for_project( def apply_server_fingerprinting( - event: MutableMapping[str, Any], - fingerprinting_config: FingerprintingRules, - allow_custom_title: bool = True, + event: MutableMapping[str, Any], fingerprinting_config: FingerprintingRules ) -> None: fingerprint_info = {} @@ -268,7 +268,7 @@ def apply_server_fingerprinting( # A custom title attribute is stored in the event to override the # default title. - if "title" in attributes and allow_custom_title: + if "title" in attributes: event["title"] = expand_title_template(attributes["title"], event) event["fingerprint"] = new_fingerprint @@ -280,23 +280,35 @@ def apply_server_fingerprinting( event["_fingerprint_info"] = fingerprint_info -def _get_component_trees_for_variants( +def _get_variants_from_strategies( event: Event, context: GroupingContext -) -> dict[str, AppGroupingComponent | SystemGroupingComponent | DefaultGroupingComponent]: +) -> dict[str, ComponentVariant]: winning_strategy: str | None = None precedence_hint: str | None = None - all_strategies_components_by_variant: dict[str, list[BaseGroupingComponent]] = {} + all_strategies_components_by_variant: dict[str, list[BaseGroupingComponent[Any]]] = {} + winning_strategy_components_by_variant = {} + # `iter_strategies` presents strategies in priority order, which allows us to go with the first + # one which produces a result. (See `src/sentry/grouping/strategies/configurations.py` for the + # strategies used by each config.) for strategy in context.config.iter_strategies(): - # Defined in src/sentry/grouping/strategies/base.py current_strategy_components_by_variant = strategy.get_grouping_components( event, context=context ) for variant_name, component in current_strategy_components_by_variant.items(): all_strategies_components_by_variant.setdefault(variant_name, []).append(component) - if winning_strategy is None: - if component.contributes: + if component.contributes: + if winning_strategy is None: + # If we haven't yet found a winner.. now we have! + # + # The value of `current_strategy_components_by_variant` will change with each + # strategy, so grab a separate reference to the winning ones so we don't lose + # track of them + # + # Also, create a hint we can add to components from other strategies indicating + # that this one took precedence + winning_strategy_components_by_variant = current_strategy_components_by_variant winning_strategy = strategy.name variant_descriptor = "/".join( sorted( @@ -313,10 +325,13 @@ def _get_component_trees_for_variants( ), "" if strategy.name.endswith("s") else "s", ) - elif component.contributes and winning_strategy != strategy.name: - component.update(contributes=False, hint=precedence_hint) + # On the other hand, if another strategy before this one was already the winner, we + # don't want any of this strategy's components to contribute to grouping + elif strategy.name != winning_strategy: + component.update(contributes=False, hint=precedence_hint) + + variants = {} - component_trees_by_variant = {} for variant_name, components in all_strategies_components_by_variant.items(): component_class_by_variant = { "app": AppGroupingComponent, @@ -324,11 +339,28 @@ def _get_component_trees_for_variants( "system": SystemGroupingComponent, } root_component = component_class_by_variant[variant_name](values=components) + + # The root component will pull its `contributes` value from the components it wraps - if + # none of them contributes, it will also be marked as non-contributing. But those components + # might not have the same reasons for not contributing (`hint` values), so it can't pull + # that them - it's gotta be set here. if not root_component.contributes and precedence_hint: root_component.update(hint=precedence_hint) - component_trees_by_variant[variant_name] = root_component - return component_trees_by_variant + winning_strategy_component = winning_strategy_components_by_variant.get(variant_name) + contributing_component = ( + winning_strategy_component + if winning_strategy_component and winning_strategy_component.contributes + else None + ) + + variants[variant_name] = ComponentVariant( + component=root_component, + contributing_component=contributing_component, + strategy_config=context.config, + ) + + return variants # This is called by the Event model in get_grouping_variants() @@ -336,84 +368,97 @@ def get_grouping_variants_for_event( event: Event, config: StrategyConfiguration | None = None ) -> dict[str, BaseVariant]: """Returns a dict of all grouping variants for this event.""" - # If a checksum is set the only variant that comes back from this - # event is the checksum variant. + # If a checksum is set the only variant that comes back from this event is the checksum variant. # # TODO: Is there a reason we don't treat a checksum like a custom fingerprint, and run the other # strategies but mark them as non-contributing, with explanations why? - # - # TODO: In the case where we have to hash the checksum to get a value in the right format, we - # store the raw value as well (provided it's not so long that it will overflow the DB field). - # Even when we do this, though, we don't set the raw value as non-cotributing, and we don't add - # an "ignored because xyz" hint on the variant, which we should. checksum = event.data.get("checksum") if checksum: if HASH_RE.match(checksum): return {"checksum": ChecksumVariant(checksum)} + else: + return { + "hashed_checksum": HashedChecksumVariant(hash_from_values(checksum), checksum), + } - variants: dict[str, BaseVariant] = { - "hashed_checksum": HashedChecksumVariant(hash_from_values(checksum), checksum), - } + # Otherwise we go to the various forms of grouping based on fingerprints and/or event data + # (stacktrace, message, etc.) + raw_fingerprint = event.data.get("fingerprint") or ["{{ default }}"] + fingerprint_info = event.data.get("_fingerprint_info", {}) + fingerprint_type = get_fingerprint_type(raw_fingerprint) + resolved_fingerprint = ( + raw_fingerprint + if fingerprint_type == "default" + else resolve_fingerprint_values(raw_fingerprint, event.data) + ) - # The legacy code path also supported arbitrary values here but - # it will blow up if it results in more than 32 bytes of data - # as this cannot be inserted into the database. (See GroupHash.hash) - if len(checksum) <= 32: - variants["checksum"] = ChecksumVariant(checksum) + # Run all of the event-data-based grouping strategies. Any which apply will create grouping + # components, which will then be grouped into variants by variant type (system, app, default). + context = GroupingContext(config or load_default_grouping_config()) + strategy_component_variants: dict[str, ComponentVariant] = _get_variants_from_strategies( + event, context + ) - return variants + # Create a separate container for these for now to preserve the typing of + # `strategy_component_variants` + additional_variants: dict[str, BaseVariant] = {} - # Otherwise we go to the various forms of fingerprint handling. If the event carries - # a materialized fingerprint info from server side fingerprinting we forward it to the - # variants which can export additional information about them. - fingerprint = event.data.get("fingerprint") or ["{{ default }}"] - fingerprint_info = event.data.get("_fingerprint_info", {}) - defaults_referenced = sum(1 if is_default_fingerprint_var(d) else 0 for d in fingerprint) - - if config is None: - config = load_default_grouping_config() - context = GroupingContext(config) - - # At this point we need to calculate the default event values. If the - # fingerprint is salted we will wrap it. - component_trees_by_variant = _get_component_trees_for_variants(event, context) - - # If no defaults are referenced we produce a single completely custom - # fingerprint and mark all other variants as non-contributing - if defaults_referenced == 0: - variants = {} - for variant_name, root_component in component_trees_by_variant.items(): - root_component.update( - contributes=False, - hint="custom fingerprint takes precedence", - ) - variants[variant_name] = ComponentVariant(root_component, context.config) + # If the fingerprint is the default fingerprint, we can use the variants as is. If it's custom, + # we need to create an addiional fingerprint variant and mark the existing variants as + # non-contributing. And if it's hybrid, we'll replace the existing variants with "salted" + # versions which include the fingerprint. + if fingerprint_type == "custom": + for variant in strategy_component_variants.values(): + variant.component.update(contributes=False, hint="custom fingerprint takes precedence") - fingerprint = resolve_fingerprint_values(fingerprint, event.data) if fingerprint_info.get("matched_rule", {}).get("is_builtin") is True: - variants["built_in_fingerprint"] = BuiltInFingerprintVariant( - fingerprint, fingerprint_info + additional_variants["built_in_fingerprint"] = BuiltInFingerprintVariant( + resolved_fingerprint, fingerprint_info ) else: - variants["custom_fingerprint"] = CustomFingerprintVariant(fingerprint, fingerprint_info) - - # If only the default is referenced, we can use the variants as is - elif defaults_referenced == 1 and len(fingerprint) == 1: - variants = {} - for variant_name, root_component in component_trees_by_variant.items(): - variants[variant_name] = ComponentVariant(root_component, context.config) - - # Otherwise we need to "salt" our variants with the custom fingerprint value(s) - else: - variants = {} - fingerprint = resolve_fingerprint_values(fingerprint, event.data) - for variant_name, root_component in component_trees_by_variant.items(): - variants[variant_name] = SaltedComponentVariant( - fingerprint, root_component, context.config, fingerprint_info + additional_variants["custom_fingerprint"] = CustomFingerprintVariant( + resolved_fingerprint, fingerprint_info ) + elif fingerprint_type == "hybrid": + for variant_name, variant in strategy_component_variants.items(): + # Since we're reusing the variant names, when all of the variants are combined, these + # salted versions will replace the unsalted versions + additional_variants[variant_name] = SaltedComponentVariant.from_component_variant( + variant, resolved_fingerprint, fingerprint_info + ) + + final_variants = { + **strategy_component_variants, + # Add these in second, so the salted versions of any variants replace the unsalted versions + **additional_variants, + } # Ensure we have a fallback hash if nothing else works out - if not any(x.contributes for x in variants.values()): - variants["fallback"] = FallbackVariant() + if not any(x.contributes for x in final_variants.values()): + final_variants["fallback"] = FallbackVariant() - return variants + return final_variants + + +def get_contributing_variant_and_component( + variants: dict[str, BaseVariant] +) -> tuple[BaseVariant, ContributingComponent | None]: + if len(variants) == 1: + contributing_variant = list(variants.values())[0] + else: + contributing_variant = ( + variants["app"] + # TODO: We won't need this 'if' once we stop returning both app and system contributing + # variants + if "app" in variants and variants["app"].contributes + # Other than in the broken app/system case, there should only ever be a single + # contributing variant + else [variant for variant in variants.values() if variant.contributes][0] + ) + contributing_component = ( + contributing_variant.contributing_component + if hasattr(contributing_variant, "contributing_component") + else None + ) + + return (contributing_variant, contributing_component) diff --git a/src/sentry/grouping/component.py b/src/sentry/grouping/component.py index 6a5fe7b28e8092..cfca678fe3a0e3 100644 --- a/src/sentry/grouping/component.py +++ b/src/sentry/grouping/component.py @@ -31,8 +31,13 @@ def _calculate_contributes[ValuesType](values: Sequence[ValuesType]) -> bool: class BaseGroupingComponent[ValuesType: str | int | BaseGroupingComponent[Any]](ABC): - """A grouping component is a recursive structure that is flattened - into components to make a hash for grouping purposes. + """ + A grouping component is a node in a tree describing the event data (exceptions, stacktraces, + messages, etc.) which can contribute to grouping. Each node's children, stored in the `values` + attribute, are either other grouping components or primitives representing the actual data. + + For example, an exception component might have type, value, and stacktrace components as + children, and the type component might have the string "KeyError" as its child. """ hint: str | None = None @@ -44,10 +49,9 @@ def __init__( hint: str | None = None, contributes: bool | None = None, values: Sequence[ValuesType] | None = None, - variant_provider: bool = False, ): - self.variant_provider = variant_provider - + # Use `upate` to set attribute values because it ensures `contributes` is set (if + # `contributes` is not provided, `update` will derive it from the `values` value) self.update( hint=hint, contributes=contributes, @@ -146,8 +150,9 @@ def shallow_copy(self) -> Self: return copy def iter_values(self) -> Generator[str | int]: - """Recursively walks the component and flattens it into a list of - values. + """ + Recursively walks the component tree, gathering literal values from contributing + branches into a flat list. """ if self.contributes: for value in self.values: @@ -336,6 +341,17 @@ def __init__( class ThreadsGroupingComponent(BaseGroupingComponent[StacktraceGroupingComponent]): id: str = "threads" + frame_counts: Counter[str] + + def __init__( + self, + values: Sequence[StacktraceGroupingComponent] | None = None, + hint: str | None = None, + contributes: bool | None = None, + frame_counts: Counter[str] | None = None, + ): + super().__init__(hint=hint, contributes=contributes, values=values) + self.frame_counts = frame_counts or Counter() class CSPGroupingComponent( @@ -404,3 +420,17 @@ class SystemGroupingComponent( ] ): id: str = "system" + + +ContributingComponent = ( + ChainedExceptionGroupingComponent + | ExceptionGroupingComponent + | StacktraceGroupingComponent + | ThreadsGroupingComponent + | CSPGroupingComponent + | ExpectCTGroupingComponent + | ExpectStapleGroupingComponent + | HPKPGroupingComponent + | MessageGroupingComponent + | TemplateGroupingComponent +) diff --git a/src/sentry/grouping/enhancer/__init__.py b/src/sentry/grouping/enhancer/__init__.py index 89853ac57c0253..1966ab111f6f66 100644 --- a/src/sentry/grouping/enhancer/__init__.py +++ b/src/sentry/grouping/enhancer/__init__.py @@ -173,7 +173,7 @@ def assemble_stacktrace_component( frames: list[dict[str, Any]], platform: str | None, exception_data: dict[str, Any] | None = None, - ) -> tuple[StacktraceGroupingComponent, bool]: + ) -> StacktraceGroupingComponent: """ This assembles a `stacktrace` grouping component out of the given `frame` components and source frames. @@ -205,7 +205,7 @@ def assemble_stacktrace_component( frame_counts=frame_counts, ) - return stacktrace_component, rust_results.invert_stacktrace + return stacktrace_component def as_dict(self, with_rules=False): rv = { @@ -265,7 +265,7 @@ def loads(cls, data) -> Enhancements: @classmethod @sentry_sdk.tracing.trace - def from_config_string(self, s, bases=None, id=None) -> Enhancements: + def from_config_string(cls, s, bases=None, id=None) -> Enhancements: rust_enhancements = parse_rust_enhancements("config_string", s) rules = parse_enhancements(s) diff --git a/src/sentry/grouping/enhancer/actions.py b/src/sentry/grouping/enhancer/actions.py index 10911971a0b23f..988a514b2fd576 100644 --- a/src/sentry/grouping/enhancer/actions.py +++ b/src/sentry/grouping/enhancer/actions.py @@ -56,8 +56,8 @@ def is_updater(self) -> bool: def _from_config_structure(cls, val, version: int): if isinstance(val, list): return VarAction(val[0], val[1]) - flag, range = REVERSE_ACTION_FLAGS[val >> ACTION_BITSIZE] - return FlagAction(ACTIONS[val & 0xF], flag, range) + flag, range_direction = REVERSE_ACTION_FLAGS[val >> ACTION_BITSIZE] + return FlagAction(ACTIONS[val & 0xF], flag, range_direction) class FlagAction(EnhancementAction): diff --git a/src/sentry/grouping/enhancer/matchers.py b/src/sentry/grouping/enhancer/matchers.py index a86b9afa2f09a0..8a411da381fe96 100644 --- a/src/sentry/grouping/enhancer/matchers.py +++ b/src/sentry/grouping/enhancer/matchers.py @@ -202,7 +202,8 @@ def _to_config_structure(self, version): if self.key == "family": arg = "".join(_f for _f in [FAMILIES.get(x) for x in self.pattern.split(",")] if _f) elif self.key == "app": - arg = {True: "1", False: "0"}.get(bool_from_string(self.pattern), "") + boolified_pattern = bool_from_string(self.pattern) + arg = "1" if boolified_pattern is True else "0" if boolified_pattern is False else "" else: arg = self.pattern return ("!" if self.negated else "") + MATCH_KEYS[self.key] + arg diff --git a/src/sentry/grouping/fingerprinting/__init__.py b/src/sentry/grouping/fingerprinting/__init__.py index 5bf069ee7da605..003083263266b8 100644 --- a/src/sentry/grouping/fingerprinting/__init__.py +++ b/src/sentry/grouping/fingerprinting/__init__.py @@ -240,6 +240,9 @@ def _get_release(self) -> list[_ReleaseInfo]: return self._release def get_values(self, match_type: str) -> list[dict[str, Any]]: + """ + Pull values from all the spots in the event appropriate to the given match type. + """ return getattr(self, "_get_" + match_type)() @@ -375,7 +378,12 @@ def _from_config_structure( class FingerprintMatcher: - def __init__(self, key: str, pattern: str, negated: bool = False) -> None: + def __init__( + self, + key: str, # The event attribute on which to match + pattern: str, # The value to match (or to not match, depending on `negated`) + negated: bool = False, # If True, match when `event[key]` does NOT equal `pattern` + ) -> None: if key.startswith("tags."): self.key = key else: @@ -422,7 +430,7 @@ def _positive_path_match(self, value: str | None) -> bool: return False def _positive_match(self, values: dict[str, Any]) -> bool: - # path is special in that it tests against two values (abs_path and path) + # `path` is special in that it tests against two values (`abs_path` and `filename`) if self.key == "path": value = values.get("abs_path") if self._positive_path_match(value): @@ -433,7 +441,7 @@ def _positive_match(self, values: dict[str, Any]) -> bool: return True return False - # message tests against value as well as this is what users expect + # message tests against exception value also, as this is what users expect if self.key == "message": for key in ("message", "value"): value = values.get(key) @@ -444,20 +452,13 @@ def _positive_match(self, values: dict[str, Any]) -> bool: value = values.get(self.key) if value is None: return False - elif self.key == "package": + elif self.key in ["package", "release"]: if self._positive_path_match(value): return True - elif self.key == "family": - flags = self.pattern.split(",") - if "all" in flags or value in flags: - return True - elif self.key == "sdk": + elif self.key in ["family", "sdk"]: flags = self.pattern.split(",") if "all" in flags or value in flags: return True - elif self.key == "release": - if self._positive_path_match(value): - return True elif self.key == "app": ref_val = bool_from_string(self.pattern) if ref_val is not None and ref_val == value: @@ -583,7 +584,7 @@ def visit_fingerprinting_rules( in_header = True for child in children: if isinstance(child, str): - if in_header and child[:2] == "##": + if in_header and child.startswith("##"): changelog.append(child[2:].rstrip()) else: in_header = False diff --git a/src/sentry/grouping/grouping_info.py b/src/sentry/grouping/grouping_info.py index 5e7ff6e9f695da..92e4d60e6bc8ac 100644 --- a/src/sentry/grouping/grouping_info.py +++ b/src/sentry/grouping/grouping_info.py @@ -1,5 +1,4 @@ import logging -from collections.abc import Mapping from typing import Any from sentry.api.exceptions import ResourceDoesNotExist @@ -89,7 +88,7 @@ def _check_for_mismatched_hashes( def get_grouping_info_from_variants( - variants: Mapping[str, BaseVariant], + variants: dict[str, BaseVariant], ) -> dict[str, dict[str, Any]]: """ Given a dictionary of variant objects, create and return a copy of the dictionary in which each diff --git a/src/sentry/grouping/ingest/grouphash_metadata.py b/src/sentry/grouping/ingest/grouphash_metadata.py index 0ecbbf0aec307f..94dc3251c99766 100644 --- a/src/sentry/grouping/ingest/grouphash_metadata.py +++ b/src/sentry/grouping/ingest/grouphash_metadata.py @@ -6,6 +6,7 @@ from typing_extensions import TypeIs from sentry.eventstore.models import Event +from sentry.grouping.api import get_contributing_variant_and_component from sentry.grouping.component import ( ChainedExceptionGroupingComponent, CSPGroupingComponent, @@ -135,32 +136,13 @@ def get_hash_basis_and_metadata( metrics_timer_tags: MutableTags, ) -> tuple[HashBasis, HashingMetadata]: hashing_metadata: HashingMetadata = {} - - # TODO: This (and `contributing_variant` below) are typed as `Any` so that we don't have to cast - # them to whatever specific subtypes of `BaseVariant` and `GroupingComponent` (respectively) - # each of the helper calls below requires. Casting once, to a type retrieved from a look-up, - # doesn't work, but maybe there's a better way? - contributing_variant: Any = ( - variants["app"] - # TODO: We won't need this 'if' once we stop returning both app and system contributing - # variants - if "app" in variants and variants["app"].contributes - else ( - variants["hashed_checksum"] - # TODO: We won't need this 'if' once we stop returning both hashed and non-hashed - # checksum contributing variants - if "hashed_checksum" in variants - # Other than in the broken app/system and hashed/raw checksum cases, there should only - # ever be a single contributing variant - else [variant for variant in variants.values() if variant.contributes][0] - ) - ) - contributing_component: Any = ( - # There should only ever be a single contributing component here at the top level - [value for value in contributing_variant.component.values if value.contributes][0] - if hasattr(contributing_variant, "component") - else None - ) + # TODO: These are typed as `Any` so that we don't have to cast them to whatever specific + # subtypes of `BaseVariant` and `GroupingComponent` (respectively) each of the helper calls + # below requires. Casting once, to a type retrieved from a look-up, doesn't work, but maybe + # there's a better way? + contributors = get_contributing_variant_and_component(variants) + contributing_variant: Any = contributors[0] + contributing_component: Any = contributors[1] # Hybrid fingerprinting adds 'modified' to the beginning of the description of whatever method # was used before the extra fingerprint was added. We classify events with hybrid fingerprints @@ -278,7 +260,7 @@ def _get_stacktrace_hashing_metadata( ), ) -> StacktraceHashingMetadata: return { - "stacktrace_type": "in_app" if "in-app" in contributing_variant.description else "system", + "stacktrace_type": "in_app" if contributing_variant.variant_name == "app" else "system", "stacktrace_location": ( "exception" if "exception" in contributing_variant.description diff --git a/src/sentry/grouping/ingest/hashing.py b/src/sentry/grouping/ingest/hashing.py index 93b0c6e4865f61..0aa5d76f90b2f1 100644 --- a/src/sentry/grouping/ingest/hashing.py +++ b/src/sentry/grouping/ingest/hashing.py @@ -67,9 +67,7 @@ def _calculate_event_grouping( # look at `grouping_config` to pick the right parameters. event.data["fingerprint"] = event.data.data.get("fingerprint") or ["{{ default }}"] apply_server_fingerprinting( - event.data.data, - get_fingerprinting_config_for_project(project), - allow_custom_title=True, + event.data.data, get_fingerprinting_config_for_project(project) ) with metrics.timer("event_manager.event.get_hashes", tags=metric_tags): diff --git a/src/sentry/grouping/ingest/seer.py b/src/sentry/grouping/ingest/seer.py index 234759e73ddd8a..2335348ef26e22 100644 --- a/src/sentry/grouping/ingest/seer.py +++ b/src/sentry/grouping/ingest/seer.py @@ -1,5 +1,4 @@ import logging -from collections.abc import Mapping from dataclasses import asdict from typing import Any @@ -17,11 +16,14 @@ from sentry.seer.similarity.similar_issues import get_similarity_data_from_seer from sentry.seer.similarity.types import SimilarIssuesEmbeddingsRequest from sentry.seer.similarity.utils import ( + SEER_INELIGIBLE_EVENT_PLATFORMS, ReferrerOptions, - event_content_is_seer_eligible, + event_content_has_stacktrace, filter_null_from_string, get_stacktrace_string_with_metrics, + has_too_many_contributing_frames, killswitch_enabled, + record_did_call_seer_metric, ) from sentry.utils import metrics from sentry.utils.circuit_breaker2 import CircuitBreaker @@ -30,7 +32,7 @@ logger = logging.getLogger("sentry.events.grouping") -def should_call_seer_for_grouping(event: Event, variants: Mapping[str, BaseVariant]) -> bool: +def should_call_seer_for_grouping(event: Event, variants: dict[str, BaseVariant]) -> bool: """ Use event content, feature flags, rate limits, killswitches, seer health, etc. to determine whether a call to Seer should be made. @@ -39,14 +41,15 @@ def should_call_seer_for_grouping(event: Event, variants: Mapping[str, BaseVaria project = event.project # Check both of these before returning based on either so we can gather metrics on their results - content_is_eligible = event_content_is_seer_eligible(event) + content_is_eligible = _event_content_is_seer_eligible(event) seer_enabled_for_project = _project_has_similarity_grouping_enabled(project) if not (content_is_eligible and seer_enabled_for_project): return False if ( _has_customized_fingerprint(event, variants) - or killswitch_enabled(project.id, event) + or _has_too_many_contributing_frames(event, variants) + or killswitch_enabled(project.id, ReferrerOptions.INGEST, event) or _circuit_breaker_broken(event, project) # The rate limit check has to be last (see below) but rate-limiting aside, call this after other checks # because it calculates the stacktrace string, which we only want to spend the time to do if we already @@ -66,6 +69,42 @@ def should_call_seer_for_grouping(event: Event, variants: Mapping[str, BaseVaria return True +def _event_content_is_seer_eligible(event: Event) -> bool: + """ + Determine if an event's contents makes it fit for using with Seer's similar issues model. + """ + if not event_content_has_stacktrace(event): + metrics.incr( + "grouping.similarity.event_content_seer_eligible", + sample_rate=options.get("seer.similarity.metrics_sample_rate"), + tags={"eligible": False, "blocker": "no-stacktrace"}, + ) + return False + + if event.platform in SEER_INELIGIBLE_EVENT_PLATFORMS: + metrics.incr( + "grouping.similarity.event_content_seer_eligible", + sample_rate=options.get("seer.similarity.metrics_sample_rate"), + tags={"eligible": False, "blocker": "unsupported-platform"}, + ) + return False + + metrics.incr( + "grouping.similarity.event_content_seer_eligible", + sample_rate=options.get("seer.similarity.metrics_sample_rate"), + tags={"eligible": True, "blocker": "none"}, + ) + return True + + +def _has_too_many_contributing_frames(event: Event, variants: dict[str, BaseVariant]) -> bool: + if has_too_many_contributing_frames(event, variants, ReferrerOptions.INGEST): + record_did_call_seer_metric(call_made=False, blocker="excess-frames") + return True + + return False + + def _project_has_similarity_grouping_enabled(project: Project) -> bool: # TODO: This is a hack to get ingest to turn on for projects as soon as they're backfilled. When # the backfill script completes, we turn on this option, enabling ingest immediately rather than @@ -86,7 +125,7 @@ def _project_has_similarity_grouping_enabled(project: Project) -> bool: # combined with some other value). To the extent to which we're then using this function to decide # whether or not to call Seer, this means that the calculations giving rise to the default part of # the value never involve Seer input. In the long run, we probably want to change that. -def _has_customized_fingerprint(event: Event, variants: Mapping[str, BaseVariant]) -> bool: +def _has_customized_fingerprint(event: Event, variants: dict[str, BaseVariant]) -> bool: fingerprint = event.data.get("fingerprint", []) if "{{ default }}" in fingerprint: @@ -96,22 +135,14 @@ def _has_customized_fingerprint(event: Event, variants: Mapping[str, BaseVariant # Hybrid fingerprinting ({{ default }} + some other value(s)) else: - metrics.incr( - "grouping.similarity.did_call_seer", - sample_rate=options.get("seer.similarity.metrics_sample_rate"), - tags={"call_made": False, "blocker": "hybrid-fingerprint"}, - ) + record_did_call_seer_metric(call_made=False, blocker="hybrid-fingerprint") return True # Fully customized fingerprint (from either us or the user) fingerprint_variant = variants.get("custom_fingerprint") or variants.get("built_in_fingerprint") if fingerprint_variant: - metrics.incr( - "grouping.similarity.did_call_seer", - sample_rate=options.get("seer.similarity.metrics_sample_rate"), - tags={"call_made": False, "blocker": fingerprint_variant.type}, - ) + record_did_call_seer_metric(call_made=False, blocker=fingerprint_variant.type) return True return False @@ -133,12 +164,7 @@ def _ratelimiting_enabled(event: Event, project: Project) -> bool: if ratelimiter.backend.is_limited("seer:similarity:global-limit", **global_ratelimit): logger_extra["limit_per_sec"] = global_limit_per_sec logger.warning("should_call_seer_for_grouping.global_ratelimit_hit", extra=logger_extra) - - metrics.incr( - "grouping.similarity.did_call_seer", - sample_rate=options.get("seer.similarity.metrics_sample_rate"), - tags={"call_made": False, "blocker": "global-rate-limit"}, - ) + record_did_call_seer_metric(call_made=False, blocker="global-rate-limit") return True @@ -147,12 +173,7 @@ def _ratelimiting_enabled(event: Event, project: Project) -> bool: ): logger_extra["limit_per_sec"] = project_limit_per_sec logger.warning("should_call_seer_for_grouping.project_ratelimit_hit", extra=logger_extra) - - metrics.incr( - "grouping.similarity.did_call_seer", - sample_rate=options.get("seer.similarity.metrics_sample_rate"), - tags={"call_made": False, "blocker": "project-rate-limit"}, - ) + record_did_call_seer_metric(call_made=False, blocker="project-rate-limit") return True @@ -173,29 +194,18 @@ def _circuit_breaker_broken(event: Event, project: Project) -> bool: **breaker_config, }, ) - metrics.incr( - "grouping.similarity.did_call_seer", - sample_rate=options.get("seer.similarity.metrics_sample_rate"), - tags={"call_made": False, "blocker": "circuit-breaker"}, - ) + record_did_call_seer_metric(call_made=False, blocker="circuit-breaker") return circuit_broken -def _has_empty_stacktrace_string(event: Event, variants: Mapping[str, BaseVariant]) -> bool: +def _has_empty_stacktrace_string(event: Event, variants: dict[str, BaseVariant]) -> bool: stacktrace_string = get_stacktrace_string_with_metrics( get_grouping_info_from_variants(variants), event.platform, ReferrerOptions.INGEST ) if not stacktrace_string: if stacktrace_string == "": - metrics.incr( - "grouping.similarity.did_call_seer", - sample_rate=options.get("seer.similarity.metrics_sample_rate"), - tags={ - "call_made": False, - "blocker": "empty-stacktrace-string", - }, - ) + record_did_call_seer_metric(call_made=False, blocker="empty-stacktrace-string") return True # Store the stacktrace string in the event so we only calculate it once. We need to pop it # later so it isn't stored in the database. @@ -205,7 +215,7 @@ def _has_empty_stacktrace_string(event: Event, variants: Mapping[str, BaseVarian def get_seer_similar_issues( event: Event, - variants: Mapping[str, BaseVariant], + variants: dict[str, BaseVariant], num_neighbors: int = 1, ) -> tuple[dict[str, Any], GroupHash | None]: """ @@ -281,16 +291,12 @@ def get_seer_similar_issues( def maybe_check_seer_for_matching_grouphash( - event: Event, variants: Mapping[str, BaseVariant], all_grouphashes: list[GroupHash] + event: Event, variants: dict[str, BaseVariant], all_grouphashes: list[GroupHash] ) -> GroupHash | None: seer_matched_grouphash = None if should_call_seer_for_grouping(event, variants): - metrics.incr( - "grouping.similarity.did_call_seer", - sample_rate=options.get("seer.similarity.metrics_sample_rate"), - tags={"call_made": True, "blocker": "none"}, - ) + record_did_call_seer_metric(call_made=True, blocker="none") try: # If no matching group is found in Seer, we'll still get back result diff --git a/src/sentry/grouping/parameterization.py b/src/sentry/grouping/parameterization.py index 49309f7d4ca91c..412b254ccc6ac2 100644 --- a/src/sentry/grouping/parameterization.py +++ b/src/sentry/grouping/parameterization.py @@ -20,19 +20,19 @@ @dataclasses.dataclass class ParameterizationRegex: - name: str # name of the pattern also used as group name in combined regex + name: str # name of the pattern (also used as group name in combined regex) raw_pattern: str # regex pattern w/o matching group name lookbehind: str | None = None # positive lookbehind prefix if needed lookahead: str | None = None # positive lookahead postfix if needed counter: int = 0 - # These need to be used with `(?x)` tells the regex compiler to ignore comments + # These need to be used with `(?x)`, to tell the regex compiler to ignore comments # and unescaped whitespace, so we can use newlines and indentation for better legibility. @property def pattern(self) -> str: """ - Returns the regex pattern for with as a named matching group and lookbehind/lookahead if needed. + Returns the regex pattern with a named matching group and lookbehind/lookahead if needed. """ prefix = rf"(?<={self.lookbehind})" if self.lookbehind else "" postfix = rf"(?={self.lookahead})" if self.lookahead else "" @@ -41,7 +41,7 @@ def pattern(self) -> str: @property def compiled_pattern(self) -> re.Pattern[str]: """ - Returns the compiled regex pattern for with as a named matching group and lookbehind/lookahead if needed. + Returns the compiled regex pattern with a named matching group and lookbehind/lookahead if needed. """ if not hasattr(self, "_compiled_pattern"): self._compiled_pattern = re.compile(rf"(?x){self.pattern}") @@ -189,8 +189,8 @@ class ParameterizationCallable: us more flexibility than just using regex. """ - name: str # name of the pattern also used as group name in combined regex - apply: Callable[[str], tuple[str, int]] # function to modifying the input string + name: str # name of the pattern (also used as group name in combined regex) + apply: Callable[[str], tuple[str, int]] # function for modifying the input string counter: int = 0 diff --git a/src/sentry/grouping/strategies/base.py b/src/sentry/grouping/strategies/base.py index 8157f6232d364a..a974391cdc60ba 100644 --- a/src/sentry/grouping/strategies/base.py +++ b/src/sentry/grouping/strategies/base.py @@ -1,6 +1,6 @@ import inspect from collections.abc import Callable, Iterator, Sequence -from typing import Any, Generic, Protocol, TypeVar, overload +from typing import Any, Generic, Protocol, Self, TypeVar, overload from sentry import projectoptions from sentry.eventstore.models import Event @@ -62,7 +62,7 @@ def strategy( """ Registers a strategy - :param ids: The strategy/delegate IDs with which to register + :param ids: The strategy/delegate IDs to register :param interface: Which interface type should be dispatched to this strategy :param score: Determines precedence of strategies. For example exception strategy scores higher than message strategy, so if both interfaces are @@ -90,6 +90,7 @@ def decorator(f: StrategyFunc[ConcreteInterface]) -> Strategy[ConcreteInterface] class GroupingContext: def __init__(self, strategy_config: "StrategyConfiguration"): + # The initial context is essentially the grouping config options self._stack = [strategy_config.initial_context] self.config = strategy_config self.push() @@ -99,12 +100,13 @@ def __setitem__(self, key: str, value: ContextValue) -> None: self._stack[-1][key] = value def __getitem__(self, key: str) -> ContextValue: + # Walk down the stack from the top and return the first instance of `key` found for d in reversed(self._stack): if key in d: return d[key] raise KeyError(key) - def __enter__(self) -> "GroupingContext": + def __enter__(self) -> Self: self.push() return self @@ -142,7 +144,7 @@ def get_single_grouping_component( def get_single_grouping_component( self, interface: Interface, *, event: Event, **kwargs: Any - ) -> BaseGroupingComponent: + ) -> FrameGroupingComponent | ExceptionGroupingComponent | StacktraceGroupingComponent: """Invokes a delegate grouping strategy. If no such delegate is configured a fallback grouping component is returned. """ @@ -200,9 +202,9 @@ def __repr__(self) -> str: def _invoke( self, func: Callable[..., ReturnedVariants], *args: Any, **kwargs: Any ) -> ReturnedVariants: - # We forcefully override strategy here. This lets a strategy + # We forcefully override strategy here. This lets a strategy # function always access its metadata and directly forward it to - # subcomponents without having to filter out strategy. + # subcomponents. kwargs["strategy"] = self return func(*args, **kwargs) @@ -218,7 +220,7 @@ def variant_processor(self, func: VariantProcessor) -> VariantProcessor: def get_grouping_component( self, event: Event, context: GroupingContext, variant: str | None = None - ) -> None | BaseGroupingComponent | ReturnedVariants: + ) -> None | BaseGroupingComponent[Any] | ReturnedVariants: """Given a specific variant this calculates the grouping component.""" args = [] iface = event.interfaces.get(self.interface_name) @@ -244,54 +246,33 @@ def get_grouping_components(self, event: Event, context: GroupingContext) -> Ret assert isinstance(components_by_variant, dict) final_components_by_variant = {} - has_mandatory_hashes = False - mandatory_contributing_variants_by_hash = {} - optional_contributing_variants = [] - prevent_contribution = None + priority_contributing_variants_by_hash = {} + non_priority_contributing_variants = [] for variant_name, component in components_by_variant.items(): - is_mandatory = variant_name.startswith("!") + is_priority = variant_name.startswith("!") variant_name = variant_name.lstrip("!") - if is_mandatory: - has_mandatory_hashes = True - if component.contributes: - if is_mandatory: - mandatory_contributing_variants_by_hash[component.get_hash()] = variant_name + # Track priority and non-priority contributing hashes separately, so the latter can + # be deduped against the former + if is_priority: + priority_contributing_variants_by_hash[component.get_hash()] = variant_name else: - optional_contributing_variants.append(variant_name) + non_priority_contributing_variants.append(variant_name) final_components_by_variant[variant_name] = component - prevent_contribution = has_mandatory_hashes and not mandatory_contributing_variants_by_hash - - for variant_name in optional_contributing_variants: + # Mark any non-priority duplicates of priority hashes as non-contributing + for variant_name in non_priority_contributing_variants: component = final_components_by_variant[variant_name] - - # In case this variant contributes we need to check two things - # here: if we did not have a system match we need to prevent - # it from contributing. Additionally if it matches the system - # component we also do not want the variant to contribute but - # with a different message. - if prevent_contribution: + hash_value = component.get_hash() + duplicate_of = priority_contributing_variants_by_hash.get(hash_value) + if duplicate_of is not None: component.update( contributes=False, - hint="ignored because %s variant is not used" - % ( - list(mandatory_contributing_variants_by_hash.values())[0] - if len(mandatory_contributing_variants_by_hash) == 1 - else "other mandatory" - ), + hint="ignored because hash matches %s variant" % duplicate_of, ) - else: - hash_value = component.get_hash() - duplicate_of = mandatory_contributing_variants_by_hash.get(hash_value) - if duplicate_of is not None: - component.update( - contributes=False, - hint="ignored because hash matches %s variant" % duplicate_of, - ) if self.variant_processor_func is not None: final_components_by_variant = self._invoke( diff --git a/src/sentry/grouping/strategies/legacy.py b/src/sentry/grouping/strategies/legacy.py index d1c302f5f3a375..d1cd5232fef73c 100644 --- a/src/sentry/grouping/strategies/legacy.py +++ b/src/sentry/grouping/strategies/legacy.py @@ -111,9 +111,9 @@ def is_recursion_legacy(frame1: Frame, frame2: Frame) -> bool: def remove_module_outliers_legacy(module: str, platform: str) -> tuple[str, str | None]: """Remove things that augment the module but really should not.""" if platform == "java": - if module[:35] == "sun.reflect.GeneratedMethodAccessor": + if module.startswith("sun.reflect.GeneratedMethodAccessor"): return "sun.reflect.GeneratedMethodAccessor", "removed reflection marker" - if module[:44] == "jdk.internal.reflect.GeneratedMethodAccessor": + if module.startswith("jdk.internal.reflect.GeneratedMethodAccessor"): return "jdk.internal.reflect.GeneratedMethodAccessor", "removed reflection marker" old_module = module module = _java_reflect_enhancer_re.sub(r"\1", module) @@ -447,7 +447,7 @@ def stacktrace_legacy( frames_for_filtering.append(frame.get_raw_data()) prev_frame = frame - stacktrace_component, _ = context.config.enhancements.assemble_stacktrace_component( + stacktrace_component = context.config.enhancements.assemble_stacktrace_component( frame_components, frames_for_filtering, event.platform ) stacktrace_component.update(contributes=contributes, hint=hint) diff --git a/src/sentry/grouping/strategies/newstyle.py b/src/sentry/grouping/strategies/newstyle.py index bd2dc77655d8ae..d32174de0bf638 100644 --- a/src/sentry/grouping/strategies/newstyle.py +++ b/src/sentry/grouping/strategies/newstyle.py @@ -152,7 +152,6 @@ def get_filename_component( new_filename = _java_assist_enhancer_re.sub(r"\1", filename) if new_filename != filename: filename_component.update(values=[new_filename], hint="cleaned javassist parts") - filename = new_filename return filename_component @@ -176,11 +175,11 @@ def get_module_component( elif platform == "java": if "$$Lambda$" in module: module_component.update(contributes=False, hint="ignored java lambda") - if module[:35] == "sun.reflect.GeneratedMethodAccessor": + if module.startswith("sun.reflect.GeneratedMethodAccessor"): module_component.update( values=["sun.reflect.GeneratedMethodAccessor"], hint="removed reflection marker" ) - elif module[:44] == "jdk.internal.reflect.GeneratedMethodAccessor": + elif module.startswith("jdk.internal.reflect.GeneratedMethodAccessor"): module_component.update( values=["jdk.internal.reflect.GeneratedMethodAccessor"], hint="removed reflection marker", @@ -351,8 +350,9 @@ def frame( if context["javascript_fuzzing"] and get_behavior_family_for_platform(platform) == "javascript": func = frame.raw_function or frame.function if func: + # Strip leading namespacing, i.e., turn `some.module.path.someFunction` into + # `someFunction` and `someObject.someMethod` into `someMethod` func = func.rsplit(".", 1)[-1] - # special case empty functions not to have a hint if not func: function_component.update(contributes=False) elif func in ( @@ -412,7 +412,7 @@ def stacktrace( return call_with_variants( _single_stacktrace_variant, - ["!system", "app"], + ["!app", "system"], interface, event=event, context=context, @@ -463,7 +463,7 @@ def _single_stacktrace_variant( contributes=False, hint="ignored single non-URL JavaScript frame" ) - stacktrace_component, _ = context.config.enhancements.assemble_stacktrace_component( + stacktrace_component = context.config.enhancements.assemble_stacktrace_component( frame_components, frames_for_filtering, event.platform, @@ -518,15 +518,9 @@ def single_exception( if exception.mechanism: if exception.mechanism.synthetic: - # Ignore synthetic exceptions as they are produced from platform - # specific error codes. - # - # For example there can be crashes with EXC_ACCESS_VIOLATION_* on Windows with - # the same exact stacktrace as a crash with EXC_BAD_ACCESS on macOS. - # - # Do not update type component of system variant, such that regex - # can be continuously modified without unnecessarily creating new - # groups. + # Ignore the error type for synthetic exceptions as it can vary by platform and doesn't + # actually carry any meaning with respect to what went wrong. (Synthetic exceptions + # are dummy excepttions created by the SDK in order to harvest a stacktrace.) type_component.update(contributes=False, hint="ignored because exception is synthetic") system_type_component.update( contributes=False, hint="ignored because exception is synthetic" @@ -549,7 +543,7 @@ def single_exception( ) else: stacktrace_components_by_variant = { - "app": StacktraceGroupingComponent(), + "!app": StacktraceGroupingComponent(), } exception_components_by_variant = {} @@ -615,7 +609,7 @@ def chained_exception( # Get all the exceptions to consider. all_exceptions = interface.exceptions() - # Get the grouping components for all exceptions up front, as we'll need them in a few places and only want to compute them once. + # For each exception, create a dictionary of grouping components by variant name exception_components_by_exception = { id(exception): context.get_grouping_components_by_variant(exception, event=event, **meta) for exception in all_exceptions @@ -638,12 +632,16 @@ def chained_exception( if main_exception_id: event.data["main_exception_id"] = main_exception_id - # Case 1: we have a single exception, use the single exception - # component directly to avoid a level of nesting + # Cases 1 and 2: Either this never was a chained exception (this is our entry point for single + # exceptions, too), or this is a chained exception consisting solely of an exception group and a + # single inner exception. In the former case, all we have is the single exception component, so + # return it. In the latter case, the there's no value-add to the wrapper, so discard it and just + # return the component for the inner exception. if len(exceptions) == 1: return exception_components_by_exception[id(exceptions[0])] - # Case 2: produce a component for each chained exception + # Case 3: This is either a chained exception or an exception group containing at least two inner + # exceptions. Either way, we need to wrap our exception components in a chained exception component. exception_components_by_variant: dict[str, list[ExceptionGroupingComponent]] = {} for exception in exceptions: @@ -670,7 +668,7 @@ def chained_exception( # See https://github.com/getsentry/rfcs/blob/main/text/0079-exception-groups.md#sentry-issue-grouping def filter_exceptions_for_exception_groups( exceptions: list[SingleException], - exception_components: dict[int, ReturnedVariants], + exception_components: dict[int, dict[str, ExceptionGroupingComponent]], event: Event, ) -> list[SingleException]: # This function only filters exceptions if there are at least two exceptions. @@ -710,8 +708,8 @@ def get_child_exceptions(exception: SingleException) -> list[SingleException]: node = exception_tree.get(exception_id) return node.children if node else [] - # This recursive generator gets the "top-level exceptions", and is used below. - # "Top-level exceptions are those that are the first descendants of the root that are not exception groups. + # This recursive generator gets the "top-level exceptions," and is used below. + # Top-level exceptions are those that are the first descendants of the root that are not exception groups. # For examples, see https://github.com/getsentry/rfcs/blob/main/text/0079-exception-groups.md#sentry-issue-grouping def get_top_level_exceptions( exception: SingleException, @@ -753,8 +751,8 @@ def get_first_path(exception: SingleException) -> Generator[SingleException]: # If there's only one distinct top-level exception in the group, # use it and its first-path children, but throw out the exception group and any copies. # For example, Group<['Da', 'Da', 'Da']> should just be treated as a single 'Da'. - # We'll also set the main_exception_id, which is used in the extract_metadata function - # in src/sentry/eventtypes/error.py - which will ensure the issue is titled by this + # We'll also set `main_exception_id`, which is used in the `extract_metadata` function + # in `src/sentry/eventtypes/error.py`, in order to ensure the issue is titled by this # item rather than the exception group. if len(distinct_top_level_exceptions) == 1: main_exception = distinct_top_level_exceptions[0] @@ -826,7 +824,7 @@ def _filtered_threads( stacktrace, event=event, **meta ).items(): thread_components_by_variant[variant_name] = ThreadsGroupingComponent( - values=[stacktrace_component] + values=[stacktrace_component], frame_counts=stacktrace_component.frame_counts ) return thread_components_by_variant diff --git a/src/sentry/grouping/strategies/utils.py b/src/sentry/grouping/strategies/utils.py index 68d12d6a4101cc..0aace20dbdf108 100644 --- a/src/sentry/grouping/strategies/utils.py +++ b/src/sentry/grouping/strategies/utils.py @@ -12,8 +12,8 @@ def remove_non_stacktrace_variants(variants: ReturnedVariants) -> ReturnedVarian non_contributing_components = [] stacktrace_variants = set() - # In case any of the variants has a contributing stacktrace, we want - # to make all other variants non contributing. + # If at least one variant has a contributing stacktrace, we want to mark all variants without a + # stacktrace as non-contributing. for variant_name, component in variants.items(): stacktrace_iter = component.iter_subcomponents( id="stacktrace", recursive=True, only_contributing=True diff --git a/src/sentry/grouping/utils.py b/src/sentry/grouping/utils.py index f94a6fe16399a4..b5952f68a828c0 100644 --- a/src/sentry/grouping/utils.py +++ b/src/sentry/grouping/utils.py @@ -1,32 +1,65 @@ +from __future__ import annotations + import re +from collections.abc import Iterable, Mapping from hashlib import md5 +from re import Match +from typing import TYPE_CHECKING, Any, Literal +from uuid import UUID from django.utils.encoding import force_bytes +from sentry.db.models.fields.node import NodeData from sentry.stacktraces.processing import get_crash_frame_from_event_data from sentry.utils.safe import get_path +if TYPE_CHECKING: + from sentry.grouping.component import ExceptionGroupingComponent + + _fingerprint_var_re = re.compile(r"\{\{\s*(\S+)\s*\}\}") -def parse_fingerprint_var(value): +def parse_fingerprint_var(value: str) -> str | None: match = _fingerprint_var_re.match(value) if match is not None and match.end() == len(value): return match.group(1) + return None -def is_default_fingerprint_var(value): +def is_default_fingerprint_var(value: str) -> bool: return parse_fingerprint_var(value) == "default" -def hash_from_values(values): +def hash_from_values(values: Iterable[str | int | UUID | ExceptionGroupingComponent]) -> str: + """ + Primarily used at the end of the grouping process, to get a final hash value once the all of the + variants have been constructed, but also used as a hack to compare exception components (by + stringifying their reprs) when calculating variants for chained exceptions. + """ result = md5() for value in values: result.update(force_bytes(value, errors="replace")) return result.hexdigest() -def bool_from_string(value): +def get_fingerprint_type(fingerprint: list[str]) -> Literal["default", "hybrid", "custom"]: + return ( + "default" + if len(fingerprint) == 1 and is_default_fingerprint_var(fingerprint[0]) + else ( + "hybrid" + if any(is_default_fingerprint_var(entry) for entry in fingerprint) + else "custom" + ) + ) + + +def bool_from_string(value: str) -> bool | None: + """ + Convert various string representations of boolean values ("1", "yes", "true", "0", "no", + "false") into actual booleans. Return `None` for all other inputs. + """ if value: value = value.lower() if value in ("1", "yes", "true"): @@ -34,8 +67,10 @@ def bool_from_string(value): elif value in ("0", "no", "false"): return False + return None + -def get_fingerprint_value(var, data): +def get_fingerprint_value(var: str, data: NodeData | Mapping[str, Any]) -> str | None: if var == "transaction": return data.get("transaction") or "" elif var == "message": @@ -78,15 +113,18 @@ def get_fingerprint_value(var, data): elif var == "logger": return data.get("logger") or "" elif var.startswith("tags."): + # Turn "tags.some_tag" into just "some_tag" tag = var[5:] for t, value in data.get("tags") or (): if t == tag: return value return "" % tag + else: + return None -def resolve_fingerprint_values(values, event_data): - def _get_fingerprint_value(value): +def resolve_fingerprint_values(values: list[str], event_data: NodeData) -> list[str]: + def _get_fingerprint_value(value: str) -> str: var = parse_fingerprint_var(value) if var is None: return value @@ -98,8 +136,8 @@ def _get_fingerprint_value(value): return [_get_fingerprint_value(x) for x in values] -def expand_title_template(template, event_data): - def _handle_match(match): +def expand_title_template(template: str, event_data: Mapping[str, Any]) -> str: + def _handle_match(match: Match[str]) -> str: var = match.group(1) rv = get_fingerprint_value(var, event_data) if rv is not None: diff --git a/src/sentry/grouping/variants.py b/src/sentry/grouping/variants.py index 9be16481967921..f25322e66ade9b 100644 --- a/src/sentry/grouping/variants.py +++ b/src/sentry/grouping/variants.py @@ -1,10 +1,12 @@ from __future__ import annotations from abc import ABC, abstractmethod -from typing import TYPE_CHECKING, NotRequired, TypedDict +from collections.abc import Mapping +from typing import TYPE_CHECKING, Any, NotRequired, Self, TypedDict from sentry.grouping.component import ( AppGroupingComponent, + ContributingComponent, DefaultGroupingComponent, SystemGroupingComponent, ) @@ -24,8 +26,11 @@ class FingerprintVariantMetadata(TypedDict): class BaseVariant(ABC): - # This is true if `get_hash` does not return `None`. - contributes = True + variant_name: str | None = None + + @property + def contributes(self) -> bool: + return True @property @abstractmethod @@ -35,18 +40,20 @@ def get_hash(self) -> str | None: return None @property - def description(self): + def description(self) -> str: return self.type - def _get_metadata_as_dict(self): + # This has to return `Mapping` rather than `dict` so that subtypes can override the return value + # with a TypedDict if they choose. See https://github.com/python/mypy/issues/4976. + def _get_metadata_as_dict(self) -> Mapping[str, Any]: return {} - def as_dict(self): + def as_dict(self) -> dict[str, Any]: rv = {"type": self.type, "description": self.description, "hash": self.get_hash()} rv.update(self._get_metadata_as_dict()) return rv - def __repr__(self): + def __repr__(self) -> str: return f"<{self.__class__.__name__} {self.get_hash()!r} ({self.type})>" def __eq__(self, other: object) -> bool: @@ -70,7 +77,7 @@ def __init__(self, checksum: str): def get_hash(self) -> str | None: return self.checksum - def _get_metadata_as_dict(self): + def _get_metadata_as_dict(self) -> Mapping[str, str]: return {"checksum": self.checksum} @@ -82,7 +89,7 @@ def __init__(self, checksum: str, raw_checksum: str): self.checksum = checksum self.raw_checksum = raw_checksum - def _get_metadata_as_dict(self): + def _get_metadata_as_dict(self) -> Mapping[str, str]: return {"checksum": self.checksum, "raw_checksum": self.raw_checksum} @@ -109,14 +116,14 @@ class PerformanceProblemVariant(BaseVariant): description = "performance problem" contributes = True - def __init__(self, event_performance_problem): + def __init__(self, event_performance_problem: Any): self.event_performance_problem = event_performance_problem self.problem = event_performance_problem.problem def get_hash(self) -> str | None: return self.problem.fingerprint - def _get_metadata_as_dict(self): + def _get_metadata_as_dict(self) -> Mapping[str, Any]: problem_data = self.problem.to_dict() evidence_hashes = self.event_performance_problem.evidence_hashes @@ -124,35 +131,40 @@ def _get_metadata_as_dict(self): class ComponentVariant(BaseVariant): - """A component variant is a variant that produces a hash from the - `BaseGroupingComponent` it encloses. - """ + """A variant that produces a hash from the `BaseGroupingComponent` it encloses.""" type = "component" def __init__( self, + # The root of the component tree component: AppGroupingComponent | SystemGroupingComponent | DefaultGroupingComponent, + # The highest non-root contributing component in the tree, representing the overall grouping + # method (exception, threads, message, etc.). For non-contributing variants, this will be + # None. + contributing_component: ContributingComponent | None, strategy_config: StrategyConfiguration, ): self.component = component self.config = strategy_config + self.contributing_component = contributing_component + self.variant_name = self.component.id # "app", "system", or "default" @property - def description(self): + def description(self) -> str: return self.component.description @property - def contributes(self): + def contributes(self) -> bool: return self.component.contributes def get_hash(self) -> str | None: return self.component.get_hash() - def _get_metadata_as_dict(self): + def _get_metadata_as_dict(self) -> Mapping[str, Any]: return {"component": self.component.as_dict(), "config": self.config.as_dict()} - def __repr__(self): + def __repr__(self) -> str: return super().__repr__() + f" contributes={self.contributes} ({self.description})" @@ -190,7 +202,7 @@ def __init__(self, fingerprint: list[str], fingerprint_info: FingerprintInfo): self.info = fingerprint_info @property - def description(self): + def description(self) -> str: return "custom fingerprint" def get_hash(self) -> str | None: @@ -201,12 +213,12 @@ def _get_metadata_as_dict(self) -> FingerprintVariantMetadata: class BuiltInFingerprintVariant(CustomFingerprintVariant): - """A built-in, Sentry defined fingerprint.""" + """A built-in, Sentry-defined fingerprint.""" type = "built_in_fingerprint" @property - def description(self): + def description(self) -> str: return "Sentry defined fingerprint" @@ -215,19 +227,39 @@ class SaltedComponentVariant(ComponentVariant): type = "salted_component" + @classmethod + def from_component_variant( + cls, + component_variant: ComponentVariant, + fingerprint: list[str], + fingerprint_info: FingerprintInfo, + ) -> Self: + return cls( + fingerprint=fingerprint, + component=component_variant.component, + contributing_component=component_variant.contributing_component, + strategy_config=component_variant.config, + fingerprint_info=fingerprint_info, + ) + def __init__( self, fingerprint: list[str], + # The root of the component tree component: AppGroupingComponent | SystemGroupingComponent | DefaultGroupingComponent, + # The highest non-root contributing component in the tree, representing the overall grouping + # method (exception, threads, message, etc.). For non-contributing variants, this will be + # None. + contributing_component: ContributingComponent | None, strategy_config: StrategyConfiguration, fingerprint_info: FingerprintInfo, ): - ComponentVariant.__init__(self, component, strategy_config) + ComponentVariant.__init__(self, component, contributing_component, strategy_config) self.values = fingerprint self.info = fingerprint_info @property - def description(self): + def description(self) -> str: return "modified " + self.component.description def get_hash(self) -> str | None: @@ -235,16 +267,19 @@ def get_hash(self) -> str | None: return None final_values: list[str | int] = [] for value in self.values: + # If we've hit the `{{ default }}` part of the fingerprint, pull in values from the + # original grouping method (message, stacktrace, etc.) if is_default_fingerprint_var(value): final_values.extend(self.component.iter_values()) else: final_values.append(value) return hash_from_values(final_values) - def _get_metadata_as_dict(self): - rv = ComponentVariant._get_metadata_as_dict(self) - rv.update(expose_fingerprint_dict(self.values, self.info)) - return rv + def _get_metadata_as_dict(self) -> Mapping[str, Any]: + return { + **ComponentVariant._get_metadata_as_dict(self), + **expose_fingerprint_dict(self.values, self.info), + } class VariantsByDescriptor(TypedDict, total=False): diff --git a/src/sentry/hybridcloud/models/apikeyreplica.py b/src/sentry/hybridcloud/models/apikeyreplica.py index a13a967f492918..7538aae8ad5a54 100644 --- a/src/sentry/hybridcloud/models/apikeyreplica.py +++ b/src/sentry/hybridcloud/models/apikeyreplica.py @@ -35,6 +35,9 @@ class Meta: __repr__ = sane_repr("organization_id", "key") + def __str__(self) -> str: + return f"replica_id={self.id}, status={self.status}" + @property def entity_id(self) -> int: return self.apikey_id diff --git a/src/sentry/hybridcloud/models/apitokenreplica.py b/src/sentry/hybridcloud/models/apitokenreplica.py index d9ee2be13040dc..c9f4402379f536 100644 --- a/src/sentry/hybridcloud/models/apitokenreplica.py +++ b/src/sentry/hybridcloud/models/apitokenreplica.py @@ -39,7 +39,7 @@ class Meta: __repr__ = sane_repr("user_id", "token", "application_id") def __str__(self) -> str: - return force_str(self.token) + return f"replica_token_id={self.id}, token_id={force_str(self.apitoken_id)}" @property def entity_id(self) -> int: diff --git a/src/sentry/incidents/endpoints/validators.py b/src/sentry/incidents/endpoints/validators.py index 382484a21e361c..de938853f8b568 100644 --- a/src/sentry/incidents/endpoints/validators.py +++ b/src/sentry/incidents/endpoints/validators.py @@ -89,7 +89,6 @@ class MetricAlertComparisonConditionValidator(NumericComparisonConditionValidato supported_conditions = frozenset((Condition.GREATER, Condition.LESS)) supported_results = frozenset((DetectorPriorityLevel.HIGH, DetectorPriorityLevel.MEDIUM)) - type = "metric_alert" class MetricAlertsDetectorValidator(BaseGroupTypeDetectorValidator): diff --git a/src/sentry/incidents/grouptype.py b/src/sentry/incidents/grouptype.py index b7078c7c2f7a40..1d0873ddb8588c 100644 --- a/src/sentry/incidents/grouptype.py +++ b/src/sentry/incidents/grouptype.py @@ -1,15 +1,68 @@ +from __future__ import annotations + from dataclasses import dataclass +from datetime import UTC, datetime +from typing import Any +from uuid import uuid4 +from sentry import features from sentry.incidents.endpoints.validators import MetricAlertsDetectorValidator from sentry.incidents.utils.types import QuerySubscriptionUpdate from sentry.issues.grouptype import GroupCategory, GroupType +from sentry.issues.issue_occurrence import IssueOccurrence +from sentry.models.organization import Organization from sentry.ratelimits.sliding_windows import Quota from sentry.types.group import PriorityLevel from sentry.workflow_engine.handlers.detector import StatefulDetectorHandler +from sentry.workflow_engine.models.data_source import DataPacket +from sentry.workflow_engine.types import DetectorGroupKey class MetricAlertDetectorHandler(StatefulDetectorHandler[QuerySubscriptionUpdate]): - pass + def build_occurrence_and_event_data( + self, group_key: DetectorGroupKey, value: int, new_status: PriorityLevel + ) -> tuple[IssueOccurrence, dict[str, Any]]: + # Returning a placeholder for now, this may require us passing more info + + occurrence = IssueOccurrence( + id=str(uuid4()), + project_id=self.detector.project_id, + event_id=str(uuid4()), + fingerprint=self.build_fingerprint(group_key), + issue_title="Some Issue", + subtitle="Some subtitle", + resource_id=None, + evidence_data={"detector_id": self.detector.id, "value": value}, + evidence_display=[], + type=MetricAlertFire, + detection_time=datetime.now(UTC), + level="error", + culprit="Some culprit", + initial_issue_priority=new_status.value, + ) + event_data = { + "timestamp": occurrence.detection_time, + "project_id": occurrence.project_id, + "event_id": occurrence.event_id, + "platform": "python", + "received": occurrence.detection_time, + "tags": {}, + } + return occurrence, event_data + + @property + def counter_names(self) -> list[str]: + # Placeholder for now, this should be a list of counters that we want to update as we go above warning / critical + return [] + + def get_dedupe_value(self, data_packet: DataPacket[QuerySubscriptionUpdate]) -> int: + return int(data_packet.packet.get("timestamp", datetime.now(UTC)).timestamp()) + + def get_group_key_values( + self, data_packet: DataPacket[QuerySubscriptionUpdate] + ) -> dict[DetectorGroupKey, int]: + # This is for testing purposes, we'll need to update the values inspected. + return {None: data_packet.packet["values"]["foo"]} # Example GroupType and detector handler for metric alerts. We don't create these issues yet, but we'll use something @@ -26,3 +79,8 @@ class MetricAlertFire(GroupType): enable_escalation_detection = False detector_handler = MetricAlertDetectorHandler detector_validator = MetricAlertsDetectorValidator + detector_config_schema = {} # TODO(colleen): update this + + @classmethod + def allow_post_process_group(cls, organization: Organization) -> bool: + return features.has("organizations:workflow-engine-metric-alert-processing", organization) diff --git a/src/sentry/incidents/subscription_processor.py b/src/sentry/incidents/subscription_processor.py index 4654948204437a..88953fd133def5 100644 --- a/src/sentry/incidents/subscription_processor.py +++ b/src/sentry/incidents/subscription_processor.py @@ -43,8 +43,16 @@ ) from sentry.incidents.tasks import handle_trigger_action from sentry.incidents.utils.metric_issue_poc import create_or_update_metric_issue -from sentry.incidents.utils.types import QuerySubscriptionUpdate +from sentry.incidents.utils.process_update_helpers import ( + get_aggregation_value_helper, + get_crash_rate_alert_metrics_aggregation_value_helper, +) +from sentry.incidents.utils.types import ( + DATA_SOURCE_SNUBA_QUERY_SUBSCRIPTION, + QuerySubscriptionUpdate, +) from sentry.models.project import Project +from sentry.search.eap.utils import add_start_end_conditions from sentry.seer.anomaly_detection.get_anomaly_data import get_anomaly_data_from_seer from sentry.seer.anomaly_detection.utils import anomaly_has_confidence, has_anomaly from sentry.snuba.dataset import Dataset @@ -53,10 +61,12 @@ get_entity_key_from_query_builder, get_entity_subscription_from_snuba_query, ) -from sentry.snuba.models import QuerySubscription +from sentry.snuba.models import QuerySubscription, SnubaQuery from sentry.snuba.subscriptions import delete_snuba_subscription -from sentry.utils import metrics, redis +from sentry.utils import metrics, redis, snuba_rpc from sentry.utils.dates import to_datetime +from sentry.workflow_engine.models import DataPacket +from sentry.workflow_engine.processors.data_packet import process_data_packets logger = logging.getLogger(__name__) REDIS_TTL = int(timedelta(days=7).total_seconds()) @@ -221,41 +231,80 @@ def get_comparison_aggregation_value( snuba_query, self.subscription.project.organization_id, ) - try: - project_ids = [self.subscription.project_id] - # TODO: determine whether we need to include the subscription query_extra here - query_builder = entity_subscription.build_query_builder( - query=snuba_query.query, - project_ids=project_ids, - environment=snuba_query.environment, - params={ - "organization_id": self.subscription.project.organization.id, - "project_id": project_ids, - "start": start, - "end": end, - }, - ) - time_col = ENTITY_TIME_COLUMNS[get_entity_key_from_query_builder(query_builder)] - query_builder.add_conditions( - [ - Condition(Column(time_col), Op.GTE, start), - Condition(Column(time_col), Op.LT, end), - ] - ) - query_builder.limit = Limit(1) - results = query_builder.run_query(referrer="subscription_processor.comparison_query") - comparison_aggregate = list(results["data"][0].values())[0] + dataset = Dataset(snuba_query.dataset) + query_type = SnubaQuery.Type(snuba_query.type) + project_ids = [self.subscription.project_id] + + comparison_aggregate: None | float = None + if query_type == SnubaQuery.Type.PERFORMANCE and dataset == Dataset.EventsAnalyticsPlatform: + try: + rpc_time_series_request = entity_subscription.build_rpc_request( + query=snuba_query.query, + project_ids=project_ids, + environment=snuba_query.environment, + params={ + "organization_id": self.subscription.project.organization.id, + "project_id": project_ids, + }, + referrer="subscription_processor.comparison_query", + ) - except Exception: - logger.exception( - "Failed to run comparison query", - extra={ - "alert_rule_id": self.alert_rule.id, - "subscription_id": subscription_update.get("subscription_id"), - "organization_id": self.alert_rule.organization_id, - }, - ) - return None + rpc_time_series_request = add_start_end_conditions( + rpc_time_series_request, start, end + ) + + rpc_response = snuba_rpc.timeseries_rpc(rpc_time_series_request) + if len(rpc_response.result_timeseries): + comparison_aggregate = rpc_response.result_timeseries[0].data_points[0].data + + except Exception: + logger.exception( + "Failed to run RPC comparison query", + extra={ + "alert_rule_id": self.alert_rule.id, + "subscription_id": subscription_update.get("subscription_id"), + "organization_id": self.alert_rule.organization_id, + }, + ) + return None + + else: + try: + # TODO: determine whether we need to include the subscription query_extra here + query_builder = entity_subscription.build_query_builder( + query=snuba_query.query, + project_ids=project_ids, + environment=snuba_query.environment, + params={ + "organization_id": self.subscription.project.organization.id, + "project_id": project_ids, + "start": start, + "end": end, + }, + ) + time_col = ENTITY_TIME_COLUMNS[get_entity_key_from_query_builder(query_builder)] + query_builder.add_conditions( + [ + Condition(Column(time_col), Op.GTE, start), + Condition(Column(time_col), Op.LT, end), + ] + ) + query_builder.limit = Limit(1) + results = query_builder.run_query( + referrer="subscription_processor.comparison_query" + ) + comparison_aggregate = list(results["data"][0].values())[0] + + except Exception: + logger.exception( + "Failed to run comparison query", + extra={ + "alert_rule_id": self.alert_rule.id, + "subscription_id": subscription_update.get("subscription_id"), + "organization_id": self.alert_rule.organization_id, + }, + ) + return None if not comparison_aggregate: metrics.incr("incidents.alert_rules.skipping_update_comparison_value_invalid") @@ -282,24 +331,12 @@ def get_crash_rate_alert_metrics_aggregation_value( count is just ignored - `crashed` represents the total sessions or user counts that crashed. """ - row = subscription_update["values"]["data"][0] - total_session_count = row.get("count", 0) - crash_count = row.get("crashed", 0) - - if total_session_count == 0: + # NOTE (mifu67): we create this helper because we also use it in the new detector processing flow + aggregation_value = get_crash_rate_alert_metrics_aggregation_value_helper( + subscription_update + ) + if aggregation_value is None: self.reset_trigger_counts() - metrics.incr("incidents.alert_rules.ignore_update_no_session_data") - return None - - if CRASH_RATE_ALERT_MINIMUM_THRESHOLD is not None: - min_threshold = int(CRASH_RATE_ALERT_MINIMUM_THRESHOLD) - if total_session_count < min_threshold: - self.reset_trigger_counts() - metrics.incr("incidents.alert_rules.ignore_update_count_lower_than_min_threshold") - return None - - aggregation_value: int = round((1 - crash_count / total_session_count) * 100, 3) - return aggregation_value def get_aggregation_value(self, subscription_update: QuerySubscriptionUpdate) -> float | None: @@ -308,14 +345,8 @@ def get_aggregation_value(self, subscription_update: QuerySubscriptionUpdate) -> subscription_update ) else: - aggregation_value = list(subscription_update["values"]["data"][0].values())[0] - # In some cases Snuba can return a None value for an aggregation. This means - # there were no rows present when we made the query for certain types of aggregations - # like avg. Defaulting this to 0 for now. It might turn out that we'd prefer to skip - # the update in the future. - if aggregation_value is None: - aggregation_value = 0 - + # NOTE (mifu67): we create this helper because we also use it in the new detector processing flow + aggregation_value = get_aggregation_value_helper(subscription_update) if self.alert_rule.comparison_delta: aggregation_value = self.get_comparison_aggregation_value( subscription_update, aggregation_value @@ -358,6 +389,15 @@ def process_update(self, subscription_update: QuerySubscriptionUpdate) -> None: metrics.incr("incidents.alert_rules.skipping_already_processed_update") return + if features.has( + "organizations:workflow-engine-metric-alert-processing", + self.subscription.project.organization, + ): + data_packet = DataPacket[QuerySubscriptionUpdate]( + query_id=self.subscription.id, packet=subscription_update + ) + process_data_packets([data_packet], DATA_SOURCE_SNUBA_QUERY_SUBSCRIPTION) + self.last_update = subscription_update["timestamp"] if ( diff --git a/src/sentry/incidents/utils/process_update_helpers.py b/src/sentry/incidents/utils/process_update_helpers.py new file mode 100644 index 00000000000000..74b6526eb5218e --- /dev/null +++ b/src/sentry/incidents/utils/process_update_helpers.py @@ -0,0 +1,60 @@ +from sentry.incidents.utils.types import QuerySubscriptionUpdate +from sentry.utils import metrics + +""" +We pull these methods out of the subscription processor to be used by the +workflow engine data condition handlers. +""" + +# NOTE (mifu67): this is set to None in the subscription processor code and doesn't +# seem to be used. Maybe we don't need the logic gated by it? +CRASH_RATE_ALERT_MINIMUM_THRESHOLD: int | None = None + + +def get_crash_rate_alert_metrics_aggregation_value_helper( + subscription_update: QuerySubscriptionUpdate, +) -> float | None: + """ + Handles validation and extraction of Crash Rate Alerts subscription updates values over + metrics dataset. + The subscription update looks like + [ + {'project_id': 8, 'tags[5]': 6, 'count': 2.0, 'crashed': 1.0} + ] + - `count` represents sessions or users sessions that were started, hence to get the crash + free percentage, we would need to divide number of crashed sessions by that number, + and subtract that value from 1. This is also used when CRASH_RATE_ALERT_MINIMUM_THRESHOLD is + set in the sense that if the minimum threshold is greater than the session count, + then the update is dropped. If the minimum threshold is not set then the total sessions + count is just ignored + - `crashed` represents the total sessions or user counts that crashed. + """ + row = subscription_update["values"]["data"][0] + total_session_count = row.get("count", 0) + crash_count = row.get("crashed", 0) + + if total_session_count == 0: + metrics.incr("incidents.alert_rules.ignore_update_no_session_data") + return None + + if CRASH_RATE_ALERT_MINIMUM_THRESHOLD is not None: + min_threshold = int(CRASH_RATE_ALERT_MINIMUM_THRESHOLD) + if total_session_count < min_threshold: + metrics.incr("incidents.alert_rules.ignore_update_count_lower_than_min_threshold") + return None + + aggregation_value: int = round((1 - crash_count / total_session_count) * 100, 3) + + return aggregation_value + + +def get_aggregation_value_helper(subscription_update: QuerySubscriptionUpdate) -> float: + aggregation_value = list(subscription_update["values"]["data"][0].values())[0] + # In some cases Snuba can return a None value for an aggregation. This means + # there were no rows present when we made the query for certain types of aggregations + # like avg. Defaulting this to 0 for now. It might turn out that we'd prefer to skip + # the update in the future. + if aggregation_value is None: + aggregation_value = 0 + + return aggregation_value diff --git a/src/sentry/incidents/utils/types.py b/src/sentry/incidents/utils/types.py index 4be43160dc8a73..572598108b39e5 100644 --- a/src/sentry/incidents/utils/types.py +++ b/src/sentry/incidents/utils/types.py @@ -13,3 +13,6 @@ class QuerySubscriptionUpdate(TypedDict): class AlertRuleActivationConditionType(Enum): RELEASE_CREATION = 0 DEPLOY_CREATION = 1 + + +DATA_SOURCE_SNUBA_QUERY_SUBSCRIPTION = "snuba_query_subscription" diff --git a/src/sentry/ingest/billing_metrics_consumer.py b/src/sentry/ingest/billing_metrics_consumer.py index 471855aa356f6d..8009d57a6cdf9e 100644 --- a/src/sentry/ingest/billing_metrics_consumer.py +++ b/src/sentry/ingest/billing_metrics_consumer.py @@ -16,13 +16,7 @@ from sentry.constants import DataCategory from sentry.models.project import Project -from sentry.sentry_metrics.indexer.strings import ( - SHARED_TAG_STRINGS, - SPAN_METRICS_NAMES, - TRANSACTION_METRICS_NAMES, -) -from sentry.sentry_metrics.use_case_id_registry import UseCaseID -from sentry.sentry_metrics.utils import reverse_resolve_tag_value +from sentry.sentry_metrics.indexer.strings import SPAN_METRICS_NAMES, TRANSACTION_METRICS_NAMES from sentry.signals import first_custom_metric_received from sentry.snuba.metrics import parse_mri from sentry.snuba.metrics.naming_layer.mri import is_custom_metric @@ -48,9 +42,11 @@ def create_with_partitions( class BillingTxCountMetricConsumerStrategy(ProcessingStrategy[KafkaPayload]): - """A metrics consumer that generates a billing outcome for each processed - transaction, processing a bucket at a time. The transaction count is - directly taken from the `c:transactions/usage@none` counter metric. + """A metrics consumer that generates an accepted outcome for each processed (as opposed to indexed) + transaction or span, processing a bucket at a time. The transaction / span count is + directly taken from the `c:transactions/usage@none` or `c:spans/usage@none` counter metric. + + See https://develop.sentry.dev/application-architecture/dynamic-sampling/outcomes/. """ #: The IDs of the metrics used to count transactions or spans @@ -58,7 +54,6 @@ class BillingTxCountMetricConsumerStrategy(ProcessingStrategy[KafkaPayload]): TRANSACTION_METRICS_NAMES["c:transactions/usage@none"]: DataCategory.TRANSACTION, SPAN_METRICS_NAMES["c:spans/usage@none"]: DataCategory.SPAN, } - profile_tag_key = str(SHARED_TAG_STRINGS["has_profile"]) def __init__(self, next_step: ProcessingStrategy[Any]) -> None: self.__next_step = next_step @@ -79,7 +74,7 @@ def submit(self, message: Message[KafkaPayload]) -> None: payload = self._get_payload(message) - self._produce_billing_outcomes(payload) + self._produce_outcomes(payload) self._flag_metric_received_for_project(payload) self.__next_step.submit(message) @@ -106,25 +101,16 @@ def _count_processed_items(self, generic_metric: GenericMetric) -> Mapping[DataC return items - def _has_profile(self, generic_metric: GenericMetric) -> bool: - return bool( - (tag_value := generic_metric["tags"].get(self.profile_tag_key)) - and "true" - == reverse_resolve_tag_value( - UseCaseID.TRANSACTIONS, generic_metric["org_id"], tag_value - ) - ) - - def _produce_billing_outcomes(self, generic_metric: GenericMetric) -> None: + def _produce_outcomes(self, generic_metric: GenericMetric) -> None: for category, quantity in self._count_processed_items(generic_metric).items(): - self._produce_billing_outcome( + self._produce_accepted_outcome( org_id=generic_metric["org_id"], project_id=generic_metric["project_id"], category=category, quantity=quantity, ) - def _produce_billing_outcome( + def _produce_accepted_outcome( self, *, org_id: int, project_id: int, category: DataCategory, quantity: int ) -> None: if quantity < 1: diff --git a/src/sentry/integrations/api/bases/doc_integrations.py b/src/sentry/integrations/api/bases/doc_integrations.py index a6141fbe3f1285..c7349143d9ce6e 100644 --- a/src/sentry/integrations/api/bases/doc_integrations.py +++ b/src/sentry/integrations/api/bases/doc_integrations.py @@ -68,7 +68,7 @@ class DocIntegrationsBaseEndpoint(Endpoint): permission_classes = (DocIntegrationsAndStaffPermission,) def generate_incoming_metadata(self, request: Request) -> Any: - return {k: v for k, v in request.json_body.items() if k in METADATA_PROPERTIES} + return {k: v for k, v in request.data.items() if k in METADATA_PROPERTIES} class DocIntegrationBaseEndpoint(DocIntegrationsBaseEndpoint): diff --git a/src/sentry/integrations/api/endpoints/doc_integration_details.py b/src/sentry/integrations/api/endpoints/doc_integration_details.py index e6974c13876c38..6d93dbde58634f 100644 --- a/src/sentry/integrations/api/endpoints/doc_integration_details.py +++ b/src/sentry/integrations/api/endpoints/doc_integration_details.py @@ -31,7 +31,7 @@ def get(self, request: Request, doc_integration: DocIntegration) -> Response: return self.respond(serialize(doc_integration, request.user), status=status.HTTP_200_OK) def put(self, request: Request, doc_integration: DocIntegration) -> Response: - data = request.json_body + data = request.data data["metadata"] = self.generate_incoming_metadata(request) serializer = DocIntegrationSerializer(doc_integration, data=data) diff --git a/src/sentry/integrations/api/endpoints/doc_integrations_index.py b/src/sentry/integrations/api/endpoints/doc_integrations_index.py index 869079209a13c1..e392a8eba0f7c8 100644 --- a/src/sentry/integrations/api/endpoints/doc_integrations_index.py +++ b/src/sentry/integrations/api/endpoints/doc_integrations_index.py @@ -42,7 +42,7 @@ def get(self, request: Request): def post(self, request: Request): # Override any incoming JSON for these fields - data = request.json_body + data = request.data data["is_draft"] = True data["metadata"] = self.generate_incoming_metadata(request) serializer = DocIntegrationSerializer(data=data) diff --git a/src/sentry/integrations/bitbucket/client.py b/src/sentry/integrations/bitbucket/client.py index 897e309ab7b308..09fee5b8305870 100644 --- a/src/sentry/integrations/bitbucket/client.py +++ b/src/sentry/integrations/bitbucket/client.py @@ -89,9 +89,6 @@ def finalize_request(self, prepared_request: PreparedRequest) -> PreparedRequest def get_issue(self, repo, issue_id): return self.get(BitbucketAPIPath.issue.format(repo=repo, issue_id=issue_id)) - def get_issues(self, repo): - return self.get(BitbucketAPIPath.issues.format(repo=repo)) - def create_issue(self, repo, data): return self.post(path=BitbucketAPIPath.issues.format(repo=repo), data=data) diff --git a/src/sentry/integrations/bitbucket/webhook.py b/src/sentry/integrations/bitbucket/webhook.py index 2460e7aca6c8c5..e03d4b632d38c9 100644 --- a/src/sentry/integrations/bitbucket/webhook.py +++ b/src/sentry/integrations/bitbucket/webhook.py @@ -1,6 +1,6 @@ import ipaddress import logging -from abc import ABC, abstractmethod +from abc import ABC from collections.abc import Mapping from datetime import timezone from typing import Any @@ -17,6 +17,7 @@ from sentry.api.base import Endpoint, region_silo_endpoint from sentry.integrations.base import IntegrationDomain from sentry.integrations.bitbucket.constants import BITBUCKET_IP_RANGES, BITBUCKET_IPS +from sentry.integrations.source_code_management.webhook import SCMWebhook from sentry.integrations.utils.metrics import IntegrationWebhookEvent, IntegrationWebhookEventType from sentry.models.commit import Commit from sentry.models.commitauthor import CommitAuthor @@ -30,17 +31,12 @@ PROVIDER_NAME = "integrations:bitbucket" -class Webhook(ABC): +class BitbucketWebhook(SCMWebhook, ABC): @property - @abstractmethod - def event_type(self) -> IntegrationWebhookEventType: - raise NotImplementedError - - @abstractmethod - def __call__(self, organization: Organization, event: Mapping[str, Any]): - raise NotImplementedError + def provider(self) -> str: + return "bitbucket" - def update_repo_data(self, repo, event): + def update_repo_data(self, repo: Repository, event: Mapping[str, Any]) -> None: """ Given a webhook payload, update stored repo data if needed. @@ -68,16 +64,19 @@ def update_repo_data(self, repo, event): ) -class PushEventWebhook(Webhook): +class PushEventWebhook(BitbucketWebhook): # https://confluence.atlassian.com/bitbucket/event-payloads-740262817.html#EventPayloads-Push @property def event_type(self) -> IntegrationWebhookEventType: return IntegrationWebhookEventType.PUSH - def __call__(self, organization: Organization, event: Mapping[str, Any]): + def __call__(self, event: Mapping[str, Any], **kwargs) -> None: authors = {} + if not (organization := kwargs.get("organization")): + raise ValueError("Missing organization") + try: repo = Repository.objects.get( organization_id=organization.id, @@ -131,9 +130,9 @@ class BitbucketWebhookEndpoint(Endpoint): "POST": ApiPublishStatus.PRIVATE, } permission_classes = () - _handlers: dict[str, type[Webhook]] = {"repo:push": PushEventWebhook} + _handlers: dict[str, type[BitbucketWebhook]] = {"repo:push": PushEventWebhook} - def get_handler(self, event_type) -> type[Webhook] | None: + def get_handler(self, event_type) -> type[BitbucketWebhook] | None: return self._handlers.get(event_type) @method_decorator(csrf_exempt) @@ -205,8 +204,8 @@ def post(self, request: HttpRequest, organization_id: int) -> HttpResponse: with IntegrationWebhookEvent( interaction_type=event_handler.event_type, domain=IntegrationDomain.SOURCE_CODE_MANAGEMENT, - provider_key="bitbucket", + provider_key=event_handler.provider, ).capture(): - event_handler(organization, event) + event_handler(event, organization=organization) return HttpResponse(status=204) diff --git a/src/sentry/integrations/bitbucket_server/webhook.py b/src/sentry/integrations/bitbucket_server/webhook.py index f18c226c43a36d..0fdca807dd97a3 100644 --- a/src/sentry/integrations/bitbucket_server/webhook.py +++ b/src/sentry/integrations/bitbucket_server/webhook.py @@ -1,5 +1,5 @@ import logging -from abc import ABC, abstractmethod +from abc import ABC from collections.abc import Mapping from datetime import datetime, timezone from typing import Any @@ -7,14 +7,18 @@ import orjson import sentry_sdk from django.db import IntegrityError, router, transaction -from django.http import HttpRequest, HttpResponse +from django.http import Http404, HttpRequest, HttpResponse from django.http.response import HttpResponseBase from django.utils.decorators import method_decorator from django.views.decorators.csrf import csrf_exempt -from django.views.generic.base import View +from sentry.api.api_owners import ApiOwner +from sentry.api.api_publish_status import ApiPublishStatus +from sentry.api.base import Endpoint +from sentry.api.exceptions import BadRequest from sentry.integrations.base import IntegrationDomain from sentry.integrations.models.integration import Integration +from sentry.integrations.source_code_management.webhook import SCMWebhook from sentry.integrations.utils.metrics import IntegrationWebhookEvent, IntegrationWebhookEventType from sentry.models.commit import Commit from sentry.models.commitauthor import CommitAuthor @@ -29,15 +33,10 @@ PROVIDER_NAME = "integrations:bitbucket_server" -class Webhook(ABC): +class BitbucketServerWebhook(SCMWebhook, ABC): @property - @abstractmethod - def event_type(self) -> IntegrationWebhookEventType: - raise NotImplementedError - - @abstractmethod - def __call__(self, organization: Organization, integration_id: int, event: Mapping[str, Any]): - raise NotImplementedError + def provider(self): + return "bitbucket_server" def update_repo_data(self, repo, event): """ @@ -49,16 +48,20 @@ def update_repo_data(self, repo, event): repo.update(name=name_from_event, config=dict(repo.config, name=name_from_event)) -class PushEventWebhook(Webhook): +class PushEventWebhook(BitbucketServerWebhook): @property def event_type(self) -> IntegrationWebhookEventType: return IntegrationWebhookEventType.PUSH - def __call__( - self, organization: Organization, integration_id: int, event: Mapping[str, Any] - ) -> HttpResponse: + def __call__(self, event: Mapping[str, Any], **kwargs) -> None: authors = {} + if not ( + (organization := kwargs.get("organization")) + and (integration_id := kwargs.get("integration_id")) + ): + raise ValueError("Organization and integration_id must be provided") + try: repo = Repository.objects.get( organization_id=organization.id, @@ -66,18 +69,18 @@ def __call__( external_id=str(event["repository"]["id"]), ) except Repository.DoesNotExist: - return HttpResponse(status=404) + raise Http404() provider = repo.get_provider() try: installation = provider.get_installation(integration_id, organization.id) except Integration.DoesNotExist: - return HttpResponse(status=404) + raise Http404() try: client = installation.get_client() except IntegrationError: - return HttpResponse(status=400) + raise BadRequest() # while we're here, make sure repo data is up to date self.update_repo_data(repo, event) @@ -91,12 +94,12 @@ def __call__( project_name, repo_name, from_hash, change.get("toHash") ) except ApiHostError: - return HttpResponse(status=409) + raise BadRequest(detail="Unable to reach host") except ApiUnauthorized: - return HttpResponse(status=400) + raise BadRequest() except Exception as e: sentry_sdk.capture_exception(e) - return HttpResponse(status=400) + raise for commit in commits: if IntegrationRepositoryProvider.should_ignore_commit(commit["message"]): @@ -131,14 +134,19 @@ def __call__( except IntegrityError: pass - return HttpResponse(status=204) - @region_silo_view -class BitbucketServerWebhookEndpoint(View): - _handlers: dict[str, type[Webhook]] = {"repo:refs_changed": PushEventWebhook} +class BitbucketServerWebhookEndpoint(Endpoint): + authentication_classes = () + permission_classes = () + owner = ApiOwner.ECOSYSTEM + publish_status = { + "POST": ApiPublishStatus.PRIVATE, + } + + _handlers: dict[str, type[BitbucketServerWebhook]] = {"repo:refs_changed": PushEventWebhook} - def get_handler(self, event_type) -> type[Webhook] | None: + def get_handler(self, event_type) -> type[BitbucketServerWebhook] | None: return self._handlers.get(event_type) @method_decorator(csrf_exempt) @@ -150,7 +158,7 @@ def dispatch(self, request: HttpRequest, *args, **kwargs) -> HttpResponseBase: def post(self, request: HttpRequest, organization_id, integration_id) -> HttpResponseBase: try: - organization = Organization.objects.get_from_cache(id=organization_id) + organization: Organization = Organization.objects.get_from_cache(id=organization_id) except Organization.DoesNotExist: logger.exception( "%s.webhook.invalid-organization", @@ -194,6 +202,8 @@ def post(self, request: HttpRequest, organization_id, integration_id) -> HttpRes with IntegrationWebhookEvent( interaction_type=event_handler.event_type, domain=IntegrationDomain.SOURCE_CODE_MANAGEMENT, - provider_key="bitbucket-server", + provider_key=event_handler.provider, ).capture(): - return event_handler(organization, integration_id, event) + event_handler(event, organization=organization, integration_id=integration_id) + + return HttpResponse(status=204) diff --git a/src/sentry/integrations/discord/actions/issue_alert/notification.py b/src/sentry/integrations/discord/actions/issue_alert/notification.py index 27e3ac45a8f2be..66122003d18b73 100644 --- a/src/sentry/integrations/discord/actions/issue_alert/notification.py +++ b/src/sentry/integrations/discord/actions/issue_alert/notification.py @@ -5,6 +5,11 @@ from sentry.integrations.discord.actions.issue_alert.form import DiscordNotifyServiceForm from sentry.integrations.discord.client import DiscordClient from sentry.integrations.discord.message_builder.issues import DiscordIssuesMessageBuilder +from sentry.integrations.discord.spec import DiscordMessagingSpec +from sentry.integrations.messaging.metrics import ( + MessagingInteractionEvent, + MessagingInteractionType, +) from sentry.rules.actions import IntegrationEventAction from sentry.rules.base import CallbackFuture from sentry.types.rules import RuleFuture @@ -46,19 +51,27 @@ def send_notification(event: GroupEvent, futures: Sequence[RuleFuture]) -> None: message = DiscordIssuesMessageBuilder(event.group, event=event, tags=tags, rules=rules) client = DiscordClient() - try: - client.send_message(channel_id, message, notification_uuid=notification_uuid) - except Exception as e: - self.logger.error( - "discord.notification.message_send_failure", - extra={ - "error": str(e), - "project_id": event.project_id, - "event_id": event.event_id, - "guild_id": integration.external_id, - "channel_id": channel_id, - }, - ) + with MessagingInteractionEvent( + interaction_type=MessagingInteractionType.SEND_ISSUE_ALERT_NOTIFICATION, + spec=DiscordMessagingSpec(), + ).capture() as lifecycle: + try: + lifecycle.add_extras({"integration_id": integration.id, "channel": channel_id}) + client.send_message(channel_id, message, notification_uuid=notification_uuid) + except Exception as e: + # TODO(iamrajjoshi): Update some of these failures to halts + lifecycle.record_failure(e) + # TODO(iamrajjoshi): Remove the logger after we audit lifecycle + self.logger.error( + "discord.notification.message_send_failure", + extra={ + "error": str(e), + "project_id": event.project_id, + "event_id": event.event_id, + "guild_id": integration.external_id, + "channel_id": channel_id, + }, + ) rule = rules[0] if rules else None self.record_notification_sent(event, channel_id, rule, notification_uuid) diff --git a/src/sentry/integrations/discord/actions/metric_alert.py b/src/sentry/integrations/discord/actions/metric_alert.py index beff2896743e6f..e2e6f757389b52 100644 --- a/src/sentry/integrations/discord/actions/metric_alert.py +++ b/src/sentry/integrations/discord/actions/metric_alert.py @@ -10,6 +10,11 @@ from sentry.integrations.discord.message_builder.metric_alerts import ( DiscordMetricAlertMessageBuilder, ) +from sentry.integrations.discord.spec import DiscordMessagingSpec +from sentry.integrations.messaging.metrics import ( + MessagingInteractionEvent, + MessagingInteractionType, +) from ..utils import logger @@ -51,13 +56,19 @@ def send_incident_alert_notification( ) client = DiscordClient() - try: - client.send_message(channel, message) - except Exception as error: - logger.warning( - "discord.metric_alert.message_send_failure", - extra={"error": error, "incident_id": incident.id, "channel_id": channel}, - ) - return False - else: + with MessagingInteractionEvent( + interaction_type=MessagingInteractionType.SEND_INCIDENT_ALERT_NOTIFICATION, + spec=DiscordMessagingSpec(), + ).capture() as lifecycle: + try: + client.send_message(channel, message) + except Exception as error: + # TODO(iamrajjoshi): Update some of these failures to halts + lifecycle.record_failure(error) + # TODO(iamrajjoshi): Remove the logger after we audit lifecycle + logger.warning( + "discord.metric_alert.message_send_failure", + extra={"error": error, "incident_id": incident.id, "channel_id": channel}, + ) + return False return True diff --git a/src/sentry/integrations/discord/views/link_identity.py b/src/sentry/integrations/discord/views/link_identity.py index 2ceb879317c181..968fcdc96aec35 100644 --- a/src/sentry/integrations/discord/views/link_identity.py +++ b/src/sentry/integrations/discord/views/link_identity.py @@ -26,7 +26,13 @@ class DiscordLinkIdentityView(DiscordIdentityLinkageView, LinkIdentityView): def get_success_template_and_context( self, params: Mapping[str, Any], integration: Integration | None ) -> tuple[str, dict[str, Any]]: - return "sentry/integrations/discord/linked.html", {} + if integration is None: + raise ValueError( + 'integration is required for linking (params must include "integration_id")' + ) + return "sentry/integrations/discord/linked.html", { + "guild_id": integration.external_id, + } @property def analytics_operation_key(self) -> str | None: diff --git a/src/sentry/integrations/discord/webhooks/message_component.py b/src/sentry/integrations/discord/webhooks/message_component.py index 39d58be7d61ae2..04f4691617166c 100644 --- a/src/sentry/integrations/discord/webhooks/message_component.py +++ b/src/sentry/integrations/discord/webhooks/message_component.py @@ -247,13 +247,7 @@ def update_group(self, data: Mapping[str, object]) -> None: status=data, ) update_groups( - request=self.request.request, - group_ids=[self.group.id], - projects=[self.group.project], - organization_id=self.group.organization.id, - search_fn=None, - user=self.user, - data=data, + request=self.request.request, groups=[self.group], user=self.user, data=data ) diff --git a/src/sentry/integrations/github/client.py b/src/sentry/integrations/github/client.py index 98c189d50bbbfd..b1f831e9b42c8b 100644 --- a/src/sentry/integrations/github/client.py +++ b/src/sentry/integrations/github/client.py @@ -141,7 +141,7 @@ def _get_token(self, prepared_request: PreparedRequest) -> str | None: access_token: str | None = self.integration.metadata.get("access_token") expires_at: str | None = self.integration.metadata.get("expires_at") is_expired = ( - bool(expires_at) and datetime.strptime(cast(str, expires_at), "%Y-%m-%dT%H:%M:%S") < now + bool(expires_at) and datetime.fromisoformat(expires_at).replace(tzinfo=None) < now ) should_refresh = not access_token or not expires_at or is_expired @@ -587,10 +587,6 @@ def get_with_pagination( page_number += 1 return output - def get_issues(self, repo: str) -> Sequence[Any]: - issues: Sequence[Any] = self.get(f"/repos/{repo}/issues") - return issues - def search_issues(self, query: str) -> Mapping[str, Sequence[Mapping[str, Any]]]: """ https://docs.github.com/en/rest/search?#search-issues-and-pull-requests diff --git a/src/sentry/integrations/github/issues.py b/src/sentry/integrations/github/issues.py index cdd510da75cdaf..cc803cb9b40984 100644 --- a/src/sentry/integrations/github/issues.py +++ b/src/sentry/integrations/github/issues.py @@ -287,17 +287,6 @@ def get_allowed_assignees(self, repo: str) -> Sequence[tuple[str, str]]: return (("", "Unassigned"),) + users - def get_repo_issues(self, repo: str) -> Sequence[tuple[str, str]]: - client = self.get_client() - try: - response = client.get_issues(repo) - except Exception as e: - self.raise_error(e) - - issues = tuple((i["number"], "#{} {}".format(i["number"], i["title"])) for i in response) - - return issues - def get_repo_labels(self, repo: str) -> Sequence[tuple[str, str]]: client = self.get_client() try: diff --git a/src/sentry/integrations/github/webhook.py b/src/sentry/integrations/github/webhook.py index d5066d0e908673..24cd6fdfe02f9d 100644 --- a/src/sentry/integrations/github/webhook.py +++ b/src/sentry/integrations/github/webhook.py @@ -26,12 +26,10 @@ from sentry.integrations.base import IntegrationDomain from sentry.integrations.github.tasks.open_pr_comment import open_pr_comment_workflow from sentry.integrations.pipeline import ensure_integration -from sentry.integrations.services.integration.model import ( - RpcIntegration, - RpcOrganizationIntegration, -) +from sentry.integrations.services.integration.model import RpcIntegration from sentry.integrations.services.integration.service import integration_service from sentry.integrations.services.repository.service import repository_service +from sentry.integrations.source_code_management.webhook import SCMWebhook from sentry.integrations.utils.metrics import IntegrationWebhookEvent, IntegrationWebhookEventType from sentry.integrations.utils.scope import clear_tags_and_context from sentry.models.commit import Commit @@ -73,31 +71,21 @@ def get_file_language(filename: str) -> str | None: return language -class Webhook(ABC): +class GitHubWebhook(SCMWebhook, ABC): """ Base class for GitHub webhooks handled in region silos. """ - provider = "github" - @property - @abstractmethod - def event_type(self) -> IntegrationWebhookEventType: - raise NotImplementedError + def provider(self) -> str: + return "github" @abstractmethod - def _handle( - self, - integration: RpcIntegration, - event: Mapping[str, Any], - organization: Organization, - repo: Repository, - host: str | None = None, - ) -> None: - raise NotImplementedError + def _handle(self, integration: RpcIntegration, event: Mapping[str, Any], **kwargs) -> None: + pass - def __call__(self, event: Mapping[str, Any], host: str | None = None) -> None: - external_id = get_github_external_id(event=event, host=host) + def __call__(self, event: Mapping[str, Any], **kwargs) -> None: + external_id = get_github_external_id(event=event, host=kwargs.get("host")) result = integration_service.organization_contexts( external_id=external_id, provider=self.provider @@ -166,7 +154,12 @@ def __call__(self, event: Mapping[str, Any], host: str | None = None) -> None: for repo in repos.exclude(status=ObjectStatus.HIDDEN): self.update_repo_data(repo, event) - self._handle(integration, event, orgs[repo.organization_id], repo) + self._handle( + integration=integration, + event=event, + organization=orgs[repo.organization_id], + repo=repo, + ) def update_repo_data(self, repo: Repository, event: Mapping[str, Any]) -> None: """ @@ -208,21 +201,28 @@ def update_repo_data(self, repo: Repository, event: Mapping[str, Any]) -> None: ) pass + def is_anonymous_email(self, email: str) -> bool: + return email[-25:] == "@users.noreply.github.com" + + def get_external_id(self, username: str) -> str: + return f"github:{username}" -class InstallationEventWebhook: + def get_idp_external_id(self, integration: RpcIntegration, host: str | None = None) -> str: + return options.get("github-app.id") + + +class InstallationEventWebhook(GitHubWebhook): """ Unlike other GitHub webhooks, installation webhooks are handled in control silo. https://developer.github.com/v3/activity/events/types/#installationevent """ - provider = "github" - @property def event_type(self) -> IntegrationWebhookEventType: return IntegrationWebhookEventType.INSTALLATION - def __call__(self, event: Mapping[str, Any], host: str | None = None) -> None: + def __call__(self, event: Mapping[str, Any], **kwargs) -> None: installation = event["installation"] if not installation: @@ -241,7 +241,7 @@ def __call__(self, event: Mapping[str, Any], host: str | None = None) -> None: if event["action"] == "deleted": external_id = event["installation"]["id"] - if host: + if host := kwargs.get("host"): external_id = "{}:{}".format(host, event["installation"]["id"]) result = integration_service.organization_contexts( provider=self.provider, @@ -251,7 +251,7 @@ def __call__(self, event: Mapping[str, Any], host: str | None = None) -> None: org_integrations = result.organization_integrations if integration is not None: - self._handle_delete(event, integration, org_integrations) + self._handle(integration, event, org_integrations=org_integrations) else: # It seems possible for the GH or GHE app to be installed on their # end, but the integration to not exist. Possibly from deleting in @@ -267,13 +267,13 @@ def __call__(self, event: Mapping[str, Any], host: str | None = None) -> None: ) logger.error("Installation is missing.") - def _handle_delete( + def _handle( self, - event: Mapping[str, Any], integration: RpcIntegration, - org_integrations: list[RpcOrganizationIntegration], + event: Mapping[str, Any], + **kwargs, ) -> None: - org_ids = {oi.organization_id for oi in org_integrations} + org_ids = {oi.organization_id for oi in kwargs.get("org_integrations", [])} logger.info( "InstallationEventWebhook._handle_delete", @@ -294,22 +294,13 @@ def _handle_delete( ) -class PushEventWebhook(Webhook): +class PushEventWebhook(GitHubWebhook): """https://developer.github.com/v3/activity/events/types/#pushevent""" @property def event_type(self) -> IntegrationWebhookEventType: return IntegrationWebhookEventType.PUSH - def is_anonymous_email(self, email: str) -> bool: - return email[-25:] == "@users.noreply.github.com" - - def get_external_id(self, username: str) -> str: - return f"github:{username}" - - def get_idp_external_id(self, integration: RpcIntegration, host: str | None = None) -> str: - return options.get("github-app.id") - def should_ignore_commit(self, commit: Mapping[str, Any]) -> bool: return GitHubRepositoryProvider.should_ignore_commit(commit["message"]) @@ -317,11 +308,12 @@ def _handle( self, integration: RpcIntegration, event: Mapping[str, Any], - organization: Organization, - repo: Repository, - host: str | None = None, + **kwargs, ) -> None: authors = {} + if not ((organization := kwargs.get("organization")) and (repo := kwargs.get("repo"))): + raise ValueError("Missing organization and repo") + client = integration.get_installation(organization_id=organization.id).get_client() gh_username_cache: MutableMapping[str, str | None] = {} @@ -373,7 +365,7 @@ def _handle( "identity_ext_id": gh_user["id"], "provider_type": self.provider, "provider_ext_id": self.get_idp_external_id( - integration, host + integration, kwargs.get("host") ), } ) @@ -474,29 +466,18 @@ def _handle( repo.save() -class PullRequestEventWebhook(Webhook): +class PullRequestEventWebhook(GitHubWebhook): """https://developer.github.com/v3/activity/events/types/#pullrequestevent""" @property def event_type(self) -> IntegrationWebhookEventType: return IntegrationWebhookEventType.PULL_REQUEST - def is_anonymous_email(self, email: str) -> bool: - return email[-25:] == "@users.noreply.github.com" - - def get_external_id(self, username: str) -> str: - return f"github:{username}" - - def get_idp_external_id(self, integration: RpcIntegration, host: str | None = None) -> str: - return options.get("github-app.id") - def _handle( self, integration: RpcIntegration, event: Mapping[str, Any], - organization: Organization, - repo: Repository, - host: str | None = None, + **kwargs, ) -> None: pull_request = event["pull_request"] number = pull_request["number"] @@ -522,6 +503,10 @@ def _handle( merge_commit_sha = pull_request["merge_commit_sha"] if pull_request["merged"] else None author_email = "{}@localhost".format(user["login"][:65]) + + if not ((organization := kwargs.get("organization")) and (repo := kwargs.get("repo"))): + raise ValueError("Missing organization and repo") + try: commit_author = CommitAuthor.objects.get( external_id=self.get_external_id(user["login"]), organization_id=organization.id @@ -533,7 +518,7 @@ def _handle( filter={ "identity_ext_id": user["id"], "provider_type": self.provider, - "provider_ext_id": self.get_idp_external_id(integration, host), + "provider_ext_id": self.get_idp_external_id(integration, kwargs.get("host")), } ) if identity is not None: @@ -612,13 +597,13 @@ class GitHubIntegrationsWebhookEndpoint(Endpoint): "POST": ApiPublishStatus.PRIVATE, } - _handlers: dict[str, type[Webhook] | type[InstallationEventWebhook]] = { + _handlers: dict[str, type[GitHubWebhook]] = { "push": PushEventWebhook, "pull_request": PullRequestEventWebhook, "installation": InstallationEventWebhook, } - def get_handler(self, event_type: str) -> type[Webhook] | type[InstallationEventWebhook] | None: + def get_handler(self, event_type: str) -> type[GitHubWebhook] | None: return self._handlers.get(event_type) def is_valid_signature(self, method: str, body: bytes, secret: str, signature: str) -> bool: @@ -699,7 +684,7 @@ def handle(self, request: HttpRequest) -> HttpResponse: with IntegrationWebhookEvent( interaction_type=event_handler.event_type, domain=IntegrationDomain.SOURCE_CODE_MANAGEMENT, - provider_key="github", + provider_key=event_handler.provider, ).capture(): event_handler(event) return HttpResponse(status=204) diff --git a/src/sentry/integrations/github_enterprise/webhook.py b/src/sentry/integrations/github_enterprise/webhook.py index 230ab9e069434f..8550214d531ff7 100644 --- a/src/sentry/integrations/github_enterprise/webhook.py +++ b/src/sentry/integrations/github_enterprise/webhook.py @@ -18,10 +18,10 @@ from sentry.constants import ObjectStatus from sentry.integrations.base import IntegrationDomain from sentry.integrations.github.webhook import ( + GitHubWebhook, InstallationEventWebhook, PullRequestEventWebhook, PushEventWebhook, - Webhook, get_github_external_id, ) from sentry.integrations.utils.metrics import IntegrationWebhookEvent @@ -29,8 +29,6 @@ from sentry.utils import metrics from sentry.utils.sdk import Scope -from .repository import GitHubEnterpriseRepositoryProvider - logger = logging.getLogger("sentry.webhooks") from sentry.api.base import Endpoint, region_silo_endpoint from sentry.integrations.services.integration import integration_service @@ -89,16 +87,10 @@ def get_installation_metadata(event, host): return integration.metadata["installation"] -class GitHubEnterpriseInstallationEventWebhook(InstallationEventWebhook): - provider = "github_enterprise" - - -class GitHubEnterprisePushEventWebhook(PushEventWebhook): - provider = "github_enterprise" - - # https://developer.github.com/v3/activity/events/types/#pushevent - def is_anonymous_email(self, email: str) -> bool: - return email[-25:] == "@users.noreply.github.com" +class GitHubEnterpriseWebhook: + @property + def provider(self) -> str: + return "github_enterprise" def get_external_id(self, username: str) -> str: return f"github_enterprise:{username}" @@ -106,29 +98,24 @@ def get_external_id(self, username: str) -> str: def get_idp_external_id(self, integration: RpcIntegration, host: str | None = None) -> str: return "{}:{}".format(host, integration.metadata["installation"]["id"]) - def should_ignore_commit(self, commit): - return GitHubEnterpriseRepositoryProvider.should_ignore_commit(commit["message"]) +class GitHubEnterpriseInstallationEventWebhook(GitHubEnterpriseWebhook, InstallationEventWebhook): + pass -class GitHubEnterprisePullRequestEventWebhook(PullRequestEventWebhook): - provider = "github_enterprise" - # https://developer.github.com/v3/activity/events/types/#pullrequestevent - def is_anonymous_email(self, email: str) -> bool: - return email[-25:] == "@users.noreply.github.com" +class GitHubEnterprisePushEventWebhook(GitHubEnterpriseWebhook, PushEventWebhook): + pass - def get_external_id(self, username: str) -> str: - return f"github_enterprise:{username}" - def get_idp_external_id(self, integration: RpcIntegration, host: str | None = None) -> str: - return "{}:{}".format(host, integration.metadata["installation"]["id"]) +class GitHubEnterprisePullRequestEventWebhook(GitHubEnterpriseWebhook, PullRequestEventWebhook): + pass class GitHubEnterpriseWebhookBase(Endpoint): authentication_classes = () permission_classes = () - _handlers: dict[str, type[InstallationEventWebhook] | type[Webhook]] = {} + _handlers: dict[str, type[GitHubWebhook]] = {} # https://developer.github.com/webhooks/ def get_handler(self, event_type): @@ -163,7 +150,7 @@ def get_secret(self, event, host): else: return None - def handle(self, request: HttpRequest) -> HttpResponse: + def _handle(self, request: HttpRequest) -> HttpResponse: clear_tags_and_context() scope = Scope.get_isolation_scope() @@ -301,9 +288,9 @@ def handle(self, request: HttpRequest) -> HttpResponse: with IntegrationWebhookEvent( interaction_type=event_handler.event_type, domain=IntegrationDomain.SOURCE_CODE_MANAGEMENT, - provider_key="github-enterprise", + provider_key=event_handler.provider, ).capture(): - event_handler(event, host) + event_handler(event, host=host) return HttpResponse(status=204) @@ -329,4 +316,4 @@ def dispatch(self, request: HttpRequest, *args, **kwargs) -> HttpResponse: @method_decorator(csrf_exempt) def post(self, request: HttpRequest) -> HttpResponse: - return self.handle(request) + return self._handle(request) diff --git a/src/sentry/integrations/gitlab/webhooks.py b/src/sentry/integrations/gitlab/webhooks.py index 6d2f3cfd0f5c5f..0a0123c842f5f0 100644 --- a/src/sentry/integrations/gitlab/webhooks.py +++ b/src/sentry/integrations/gitlab/webhooks.py @@ -1,7 +1,7 @@ from __future__ import annotations import logging -from abc import ABC, abstractmethod +from abc import ABC from collections.abc import Mapping from datetime import timezone from typing import Any @@ -20,6 +20,7 @@ from sentry.integrations.base import IntegrationDomain from sentry.integrations.services.integration import integration_service from sentry.integrations.services.integration.model import RpcIntegration +from sentry.integrations.source_code_management.webhook import SCMWebhook from sentry.integrations.utils.metrics import IntegrationWebhookEvent, IntegrationWebhookEventType from sentry.integrations.utils.scope import clear_tags_and_context from sentry.models.commit import Commit @@ -36,17 +37,38 @@ GITHUB_WEBHOOK_SECRET_INVALID_ERROR = """Gitlab's webhook secret does not match. Refresh token (or re-install the integration) by following this https://docs.sentry.io/organization/integrations/integration-platform/public-integration/#refreshing-tokens.""" -class Webhook(ABC): +def get_gitlab_external_id(request, extra) -> tuple[str, str] | HttpResponse: + token = "" + try: + # Munge the token to extract the integration external_id. + # gitlab hook payloads don't give us enough unique context + # to find data on our side so we embed one in the token. + token = request.META["HTTP_X_GITLAB_TOKEN"] + # e.g. "example.gitlab.com:group-x:webhook_secret_from_sentry_integration_table" + instance, group_path, secret = token.split(":") + external_id = f"{instance}:{group_path}" + return (external_id, secret) + except KeyError: + logger.info("gitlab.webhook.missing-gitlab-token") + extra["reason"] = "The customer needs to set a Secret Token in their webhook." + logger.exception(extra["reason"]) + return HttpResponse(status=400, reason=extra["reason"]) + except ValueError: + logger.info("gitlab.webhook.malformed-gitlab-token", extra=extra) + extra["reason"] = "The customer's Secret Token is malformed." + logger.exception(extra["reason"]) + return HttpResponse(status=400, reason=extra["reason"]) + except Exception: + logger.info("gitlab.webhook.invalid-token", extra=extra) + extra["reason"] = "Generic catch-all error." + logger.exception(extra["reason"]) + return HttpResponse(status=400, reason=extra["reason"]) + + +class GitlabWebhook(SCMWebhook, ABC): @property - @abstractmethod - def event_type(self) -> IntegrationWebhookEventType: - raise NotImplementedError - - @abstractmethod - def __call__( - self, integration: RpcIntegration, organization: RpcOrganization, event: Mapping[str, Any] - ): - raise NotImplementedError + def provider(self) -> str: + return "gitlab" def get_repo( self, integration: RpcIntegration, organization: RpcOrganization, event: Mapping[str, Any] @@ -94,7 +116,7 @@ def update_repo_data(self, repo: Repository, event: Mapping[str, Any]): ) -class MergeEventWebhook(Webhook): +class MergeEventWebhook(GitlabWebhook): """ Handle Merge Request Hook @@ -105,9 +127,13 @@ class MergeEventWebhook(Webhook): def event_type(self) -> IntegrationWebhookEventType: return IntegrationWebhookEventType.PULL_REQUEST - def __call__( - self, integration: RpcIntegration, organization: RpcOrganization, event: Mapping[str, Any] - ): + def __call__(self, event: Mapping[str, Any], **kwargs): + if not ( + (organization := kwargs.get("organization")) + and (integration := kwargs.get("integration")) + ): + raise ValueError("Organization and integration must be provided") + repo = self.get_repo(integration, organization, event) if repo is None: return @@ -162,7 +188,7 @@ def __call__( pass -class PushEventWebhook(Webhook): +class PushEventWebhook(GitlabWebhook): """ Handle push hook @@ -173,9 +199,13 @@ class PushEventWebhook(Webhook): def event_type(self) -> IntegrationWebhookEventType: return IntegrationWebhookEventType.PUSH - def __call__( - self, integration: RpcIntegration, organization: RpcOrganization, event: Mapping[str, Any] - ): + def __call__(self, event: Mapping[str, Any], **kwargs): + if not ( + (organization := kwargs.get("organization")) + and (integration := kwargs.get("integration")) + ): + raise ValueError("Organization and integration must be provided") + repo = self.get_repo(integration, organization, event) if repo is None: return @@ -222,37 +252,8 @@ def __call__( pass -class GitlabWebhookMixin: - def _get_external_id(self, request, extra) -> tuple[str, str] | HttpResponse: - token = "" - try: - # Munge the token to extract the integration external_id. - # gitlab hook payloads don't give us enough unique context - # to find data on our side so we embed one in the token. - token = request.META["HTTP_X_GITLAB_TOKEN"] - # e.g. "example.gitlab.com:group-x:webhook_secret_from_sentry_integration_table" - instance, group_path, secret = token.split(":") - external_id = f"{instance}:{group_path}" - return (external_id, secret) - except KeyError: - logger.info("gitlab.webhook.missing-gitlab-token") - extra["reason"] = "The customer needs to set a Secret Token in their webhook." - logger.exception(extra["reason"]) - return HttpResponse(status=400, reason=extra["reason"]) - except ValueError: - logger.info("gitlab.webhook.malformed-gitlab-token", extra=extra) - extra["reason"] = "The customer's Secret Token is malformed." - logger.exception(extra["reason"]) - return HttpResponse(status=400, reason=extra["reason"]) - except Exception: - logger.info("gitlab.webhook.invalid-token", extra=extra) - extra["reason"] = "Generic catch-all error." - logger.exception(extra["reason"]) - return HttpResponse(status=400, reason=extra["reason"]) - - @region_silo_endpoint -class GitlabWebhookEndpoint(Endpoint, GitlabWebhookMixin): +class GitlabWebhookEndpoint(Endpoint): owner = ApiOwner.INTEGRATIONS publish_status = { "POST": ApiPublishStatus.PRIVATE, @@ -261,7 +262,7 @@ class GitlabWebhookEndpoint(Endpoint, GitlabWebhookMixin): permission_classes = () provider = "gitlab" - _handlers: dict[str, type[Webhook]] = { + _handlers: dict[str, type[GitlabWebhook]] = { "Push Hook": PushEventWebhook, "Merge Request Hook": MergeEventWebhook, } @@ -282,7 +283,7 @@ def post(self, request: HttpRequest) -> HttpResponse: # AppPlatformEvents also hit this API "event-type": request.META.get("HTTP_X_GITLAB_EVENT"), } - result = self._get_external_id(request=request, extra=extra) + result = get_gitlab_external_id(request=request, extra=extra) if isinstance(result, HttpResponse): return result (external_id, secret) = result @@ -351,8 +352,8 @@ def post(self, request: HttpRequest) -> HttpResponse: with IntegrationWebhookEvent( interaction_type=event_handler.event_type, domain=IntegrationDomain.SOURCE_CODE_MANAGEMENT, - provider_key="gitlab", + provider_key=event_handler.provider, ).capture(): - event_handler(integration, organization, event) + event_handler(event, integration=integration, organization=organization) return HttpResponse(status=204) diff --git a/src/sentry/integrations/jira/actions/form.py b/src/sentry/integrations/jira/actions/form.py index 7c5d1f7acfe2b9..22013a1f9872d0 100644 --- a/src/sentry/integrations/jira/actions/form.py +++ b/src/sentry/integrations/jira/actions/form.py @@ -14,6 +14,8 @@ class JiraNotifyServiceForm(IntegrationNotifyServiceForm): def clean(self) -> dict[str, Any] | None: cleaned_data = super().clean() + if cleaned_data is None: + return None integration_id = cleaned_data.get("integration") integration = integration_service.get_integration( diff --git a/src/sentry/integrations/messaging/metrics.py b/src/sentry/integrations/messaging/metrics.py index d2e1da56bb72b0..82baec8eec3653 100644 --- a/src/sentry/integrations/messaging/metrics.py +++ b/src/sentry/integrations/messaging/metrics.py @@ -46,6 +46,9 @@ class MessagingInteractionType(StrEnum): SEND_INCIDENT_ALERT_NOTIFICATION = "SEND_INCIDENT_ALERT_NOTIFICATION" SEND_ISSUE_ALERT_NOTIFICATION = "SEND_ISSUE_ALERT_NOTIFICATION" + SEND_ACTIVITY_NOTIFICATION = "SEND_ACTIVITY_NOTIFICATION" + SEND_GENERIC_NOTIFICATION = "SEND_GENERIC_NOTIFICATION" + @dataclass class MessagingInteractionEvent(IntegrationEventLifecycleMetric): @@ -84,7 +87,6 @@ class MessageCommandHaltReason(StrEnum): # Team Linking LINK_FROM_CHANNEL = "link_from_channel" LINK_USER_FIRST = "link_user_first" - CHANNEL_ALREADY_LINKED = "channel_already_linked" TEAM_NOT_LINKED = "team_not_linked" INSUFFICIENT_ROLE = "insufficient_role" diff --git a/src/sentry/integrations/middleware/hybrid_cloud/parser.py b/src/sentry/integrations/middleware/hybrid_cloud/parser.py index b1740f1e250b87..e1226f7c55e020 100644 --- a/src/sentry/integrations/middleware/hybrid_cloud/parser.py +++ b/src/sentry/integrations/middleware/hybrid_cloud/parser.py @@ -12,6 +12,7 @@ from rest_framework import status from sentry.api.base import ONE_DAY +from sentry.constants import ObjectStatus from sentry.hybridcloud.models.webhookpayload import WebhookPayload from sentry.hybridcloud.outbox.category import WebhookProviderIdentifier from sentry.hybridcloud.services.organization_mapping import organization_mapping_service @@ -366,7 +367,8 @@ def get_organizations_from_integration( logger.info("%s.no_integration", self.provider, extra={"path": self.request.path}) raise Integration.DoesNotExist() organization_integrations = OrganizationIntegration.objects.filter( - integration_id=integration.id + integration_id=integration.id, + status=ObjectStatus.ACTIVE, ) if organization_integrations.count() == 0: diff --git a/src/sentry/integrations/msteams/actions/notification.py b/src/sentry/integrations/msteams/actions/notification.py index 7c3624401ae0df..982c3e7cde7801 100644 --- a/src/sentry/integrations/msteams/actions/notification.py +++ b/src/sentry/integrations/msteams/actions/notification.py @@ -2,12 +2,18 @@ from sentry import features from sentry.eventstore.models import GroupEvent +from sentry.integrations.messaging.metrics import ( + MessagingInteractionEvent, + MessagingInteractionType, +) from sentry.integrations.msteams.actions.form import MsTeamsNotifyServiceForm from sentry.integrations.msteams.card_builder.issues import MSTeamsIssueMessageBuilder from sentry.integrations.msteams.client import MsTeamsClient +from sentry.integrations.msteams.spec import MsTeamsMessagingSpec from sentry.integrations.msteams.utils import get_channel_id from sentry.integrations.services.integration import RpcIntegration from sentry.rules.actions import IntegrationEventAction +from sentry.shared_integrations.exceptions import ApiError from sentry.utils import metrics @@ -54,7 +60,15 @@ def send_notification(event, futures): ).build_group_card(notification_uuid=notification_uuid) client = MsTeamsClient(integration) - client.send_card(channel, card) + with MessagingInteractionEvent( + interaction_type=MessagingInteractionType.SEND_ISSUE_ALERT_NOTIFICATION, + spec=MsTeamsMessagingSpec(), + ).capture() as lifecycle: + lifecycle.add_extras({"integration_id": integration.id, "channel": channel}) + try: + client.send_card(channel, card) + except ApiError as e: + lifecycle.record_failure(e) rule = rules[0] if rules else None self.record_notification_sent(event, channel, rule, notification_uuid) diff --git a/src/sentry/integrations/pagerduty/actions/notification.py b/src/sentry/integrations/pagerduty/actions/notification.py index 3cff720b0e41dd..54e48290894ae2 100644 --- a/src/sentry/integrations/pagerduty/actions/notification.py +++ b/src/sentry/integrations/pagerduty/actions/notification.py @@ -6,7 +6,6 @@ import sentry_sdk -from sentry import features from sentry.integrations.pagerduty.actions import PagerDutyNotifyServiceForm from sentry.integrations.pagerduty.client import PAGERDUTY_DEFAULT_SEVERITY, PagerdutySeverity from sentry.rules.actions import IntegrationEventAction @@ -18,17 +17,13 @@ class PagerDutyNotifyServiceAction(IntegrationEventAction): id = "sentry.integrations.pagerduty.notify_action.PagerDutyNotifyServiceAction" form_cls = PagerDutyNotifyServiceForm - old_label = "Send a notification to PagerDuty account {account} and service {service}" - new_label = "Send a notification to PagerDuty account {account} and service {service} with {severity} severity" + label = "Send a notification to PagerDuty account {account} and service {service} with {severity} severity" prompt = "Send a PagerDuty notification" provider = "pagerduty" integration_key = "account" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - self.has_feature_flag = features.has( - "organizations:integrations-custom-alert-priorities", self.project.organization - ) self.form_fields = { "account": { "type": "choice", @@ -46,7 +41,6 @@ def __init__(self, *args, **kwargs): ], }, } - self.__class__.label = self.new_label if self.has_feature_flag else self.old_label def _get_service(self): oi = self.get_organization_integration() diff --git a/src/sentry/integrations/pipeline.py b/src/sentry/integrations/pipeline.py index 38d15d5864bb52..734210c82606de 100644 --- a/src/sentry/integrations/pipeline.py +++ b/src/sentry/integrations/pipeline.py @@ -15,6 +15,7 @@ from sentry.integrations.models.organization_integration import OrganizationIntegration from sentry.models.organizationmapping import OrganizationMapping from sentry.organizations.absolute_url import generate_organization_url +from sentry.organizations.services.organization import organization_service from sentry.pipeline import Pipeline, PipelineAnalyticsEntry from sentry.shared_integrations.exceptions import IntegrationError, IntegrationProviderError from sentry.silo.base import SiloMode @@ -94,6 +95,29 @@ def initialize(self) -> None: ) def finish_pipeline(self): + org_context = organization_service.get_organization_by_id( + id=self.organization.id, user_id=self.request.user.id + ) + + if ( + org_context + and org_context.member + and "org:integrations" not in org_context.member.scopes + ): + error_message = ( + "You must be an organization owner, manager or admin to install this integration." + ) + logger.info( + "build-integration.permission_error", + extra={ + "error_message": error_message, + "organization_id": self.organization.id, + "user_id": self.request.user.id, + "provider_key": self.provider.key, + }, + ) + return self.error(error_message) + try: data = self.provider.build_integration(self.state.data) except IntegrationError as e: diff --git a/src/sentry/integrations/project_management/metrics.py b/src/sentry/integrations/project_management/metrics.py index 782bb9c2f4021d..09c113140409c4 100644 --- a/src/sentry/integrations/project_management/metrics.py +++ b/src/sentry/integrations/project_management/metrics.py @@ -15,9 +15,7 @@ class ProjectManagementActionType(StrEnum): OUTBOUND_STATUS_SYNC = "outbound_status_sync" INBOUND_STATUS_SYNC = "inbound_status_sync" LINK_EXTERNAL_ISSUE = "link_external_issue" - - def __str__(self): - return self.value.lower() + CREATE_EXTERNAL_ISSUE_VIA_ISSUE_DETAIL = "create_external_issue_via_issue_detail" class ProjectManagementHaltReason(StrEnum): diff --git a/src/sentry/integrations/services/integration/impl.py b/src/sentry/integrations/services/integration/impl.py index cbf01a3c334649..e4a75ee868cbd3 100644 --- a/src/sentry/integrations/services/integration/impl.py +++ b/src/sentry/integrations/services/integration/impl.py @@ -12,11 +12,16 @@ from sentry.constants import SentryAppInstallationStatus from sentry.hybridcloud.rpc.pagination import RpcPaginationArgs, RpcPaginationResult from sentry.incidents.models.incident import INCIDENT_STATUS, IncidentStatus +from sentry.integrations.messaging.metrics import ( + MessagingInteractionEvent, + MessagingInteractionType, +) from sentry.integrations.mixins import NotifyBasicMixin from sentry.integrations.models.integration import Integration from sentry.integrations.models.integration_external_project import IntegrationExternalProject from sentry.integrations.models.organization_integration import OrganizationIntegration from sentry.integrations.msteams import MsTeamsClient +from sentry.integrations.msteams.spec import MsTeamsMessagingSpec from sentry.integrations.services.integration import ( IntegrationService, RpcIntegration, @@ -37,7 +42,6 @@ from sentry.sentry_apps.api.serializers.app_platform_event import AppPlatformEvent from sentry.sentry_apps.models.sentry_app import SentryApp from sentry.sentry_apps.models.sentry_app_installation import SentryAppInstallation -from sentry.shared_integrations.exceptions import ApiError from sentry.utils import json, metrics from sentry.utils.sentry_apps import send_and_save_webhook_request @@ -447,12 +451,20 @@ def send_msteams_incident_alert_notification( ) -> bool: integration = Integration.objects.get(id=integration_id) client = MsTeamsClient(integration) - try: - client.send_card(channel, attachment) - return True - except ApiError: - logger.info("rule.fail.msteams_post", exc_info=True) - return False + + with MessagingInteractionEvent( + interaction_type=MessagingInteractionType.SEND_INCIDENT_ALERT_NOTIFICATION, + spec=MsTeamsMessagingSpec(), + ).capture() as lifecycle: + try: + client.send_card(channel, attachment) + return True + except Exception as e: + # TODO(iamrajjoshi): Remove the logger after we audit lifecycle + logger.info("rule.fail.msteams_post", exc_info=True) + lifecycle.add_extras({"integration_id": integration_id, "channel": channel}) + lifecycle.record_failure(e) + return False def delete_integration(self, *, integration_id: int) -> None: try: diff --git a/src/sentry/integrations/slack/actions/notification.py b/src/sentry/integrations/slack/actions/notification.py index da4ba68e335e39..9854b42a8f56d1 100644 --- a/src/sentry/integrations/slack/actions/notification.py +++ b/src/sentry/integrations/slack/actions/notification.py @@ -68,10 +68,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: "channel": {"type": "string", "placeholder": "e.g., #critical, Jane Schmidt"}, "channel_id": {"type": "string", "placeholder": "e.g., CA2FRA079 or UA1J9RTE1"}, "tags": {"type": "string", "placeholder": "e.g., environment,user,my_tag"}, - } - self.form_fields["notes"] = { - "type": "string", - "placeholder": "e.g. @jane, @on-call-team", + "notes": {"type": "string", "placeholder": "e.g., @jane, @on-call-team"}, } self._repository: IssueAlertNotificationMessageRepository = ( diff --git a/src/sentry/integrations/slack/message_builder/base/base.py b/src/sentry/integrations/slack/message_builder/base/base.py index a91f1c473c8495..bc3f756bf2399c 100644 --- a/src/sentry/integrations/slack/message_builder/base/base.py +++ b/src/sentry/integrations/slack/message_builder/base/base.py @@ -40,10 +40,3 @@ def build_fallback_text(self, obj: Group | Event | GroupEvent, project_slug: str title = obj.occurrence.issue_title return f"[{project_slug}] {title}" - - @property - def escape_text(self) -> bool: - """ - Returns True if we need to escape the text in the message. - """ - return False diff --git a/src/sentry/integrations/slack/message_builder/issues.py b/src/sentry/integrations/slack/message_builder/issues.py index 95eaee8aa32224..c64a37013de558 100644 --- a/src/sentry/integrations/slack/message_builder/issues.py +++ b/src/sentry/integrations/slack/message_builder/issues.py @@ -36,12 +36,7 @@ from sentry.integrations.time_utils import get_approx_start_time, time_since from sentry.integrations.types import ExternalProviders from sentry.issues.endpoints.group_details import get_group_global_count -from sentry.issues.grouptype import ( - GroupCategory, - NotificationContextField, - PerformanceP95EndpointRegressionGroupType, - ProfileFunctionRegressionType, -) +from sentry.issues.grouptype import GroupCategory, NotificationContextField from sentry.models.commit import Commit from sentry.models.group import Group, GroupStatus from sentry.models.project import Project @@ -107,11 +102,6 @@ def get_group_users_count(group: Group, rules: list[Rule] | None = None) -> int: } -REGRESSION_PERFORMANCE_ISSUE_TYPES = [ - PerformanceP95EndpointRegressionGroupType, - ProfileFunctionRegressionType, -] - logger = logging.getLogger(__name__) @@ -456,13 +446,6 @@ def __init__( self.skip_fallback = skip_fallback self.notes = notes - @property - def escape_text(self) -> bool: - """ - Returns True if we need to escape the text in the message. - """ - return True - def get_title_block( self, event_or_group: Event | GroupEvent | Group, diff --git a/src/sentry/integrations/slack/metrics.py b/src/sentry/integrations/slack/metrics.py index 18a6364c7877c3..fe5c52da59e973 100644 --- a/src/sentry/integrations/slack/metrics.py +++ b/src/sentry/integrations/slack/metrics.py @@ -1,5 +1,13 @@ # metrics constants +from slack_sdk.errors import SlackApiError + +from sentry.integrations.slack.utils.errors import ( + SLACK_SDK_HALT_ERROR_CATEGORIES, + unpack_slack_api_error, +) +from sentry.integrations.utils.metrics import EventLifecycle + SLACK_ISSUE_ALERT_SUCCESS_DATADOG_METRIC = "sentry.integrations.slack.issue_alert.success" SLACK_ISSUE_ALERT_FAILURE_DATADOG_METRIC = "sentry.integrations.slack.issue_alert.failure" SLACK_ACTIVITY_THREAD_SUCCESS_DATADOG_METRIC = "sentry.integrations.slack.activity_thread.success" @@ -9,28 +17,6 @@ SLACK_NOTIFY_RECIPIENT_SUCCESS_DATADOG_METRIC = "sentry.integrations.slack.notify_recipient.success" SLACK_NOTIFY_RECIPIENT_FAILURE_DATADOG_METRIC = "sentry.integrations.slack.notify_recipient.failure" -# Bot commands -SLACK_BOT_COMMAND_LINK_IDENTITY_SUCCESS_DATADOG_METRIC = ( - "sentry.integrations.slack.link_identity_view.success" -) -SLACK_BOT_COMMAND_LINK_IDENTITY_FAILURE_DATADOG_METRIC = ( - "sentry.integrations.slack.link_identity_view.failure" -) -SLACK_BOT_COMMAND_UNLINK_IDENTITY_SUCCESS_DATADOG_METRIC = ( - "sentry.integrations.slack.unlink_identity_view.success" -) -SLACK_BOT_COMMAND_UNLINK_IDENTITY_FAILURE_DATADOG_METRIC = ( - "sentry.integrations.slack.unlink_identity_view.failure" -) -SLACK_BOT_COMMAND_UNLINK_TEAM_SUCCESS_DATADOG_METRIC = ( - "sentry.integrations.slack.unlink_team.success" -) -SLACK_BOT_COMMAND_UNLINK_TEAM_FAILURE_DATADOG_METRIC = ( - "sentry.integrations.slack.unlink_team.failure" -) -SLACK_BOT_COMMAND_LINK_TEAM_SUCCESS_DATADOG_METRIC = "sentry.integrations.slack.link_team.success" -SLACK_BOT_COMMAND_LINK_TEAM_FAILURE_DATADOG_METRIC = "sentry.integrations.slack.link_team.failure" - # Webhooks SLACK_WEBHOOK_DM_ENDPOINT_SUCCESS_DATADOG_METRIC = "sentry.integrations.slack.dm_endpoint.success" SLACK_WEBHOOK_DM_ENDPOINT_FAILURE_DATADOG_METRIC = "sentry.integrations.slack.dm_endpoint.failure" @@ -79,3 +65,14 @@ # Middleware Parsers SLACK_MIDDLE_PARSERS_SUCCESS_DATADOG_METRIC = "sentry.middleware.integrations.slack.parsers.success" SLACK_MIDDLE_PARSERS_FAILURE_DATADOG_METRIC = "sentry.middleware.integrations.slack.parsers.failure" + + +def record_lifecycle_termination_level(lifecycle: EventLifecycle, error: SlackApiError) -> None: + if ( + (reason := unpack_slack_api_error(error)) + and reason is not None + and reason in SLACK_SDK_HALT_ERROR_CATEGORIES + ): + lifecycle.record_halt(reason.message) + else: + lifecycle.record_failure(error) diff --git a/src/sentry/integrations/slack/requests/base.py b/src/sentry/integrations/slack/requests/base.py index f546bd9ff2f253..2b7bf34ea9f2b8 100644 --- a/src/sentry/integrations/slack/requests/base.py +++ b/src/sentry/integrations/slack/requests/base.py @@ -73,7 +73,6 @@ def validate(self) -> None: """ Ensure everything is present to properly process this request """ - self.request.body self._log_request() self._get_context() self.authorize() diff --git a/src/sentry/integrations/slack/service.py b/src/sentry/integrations/slack/service.py index 9b2dd314ca53f2..df0511ab1510ab 100644 --- a/src/sentry/integrations/slack/service.py +++ b/src/sentry/integrations/slack/service.py @@ -10,6 +10,10 @@ from slack_sdk.errors import SlackApiError from sentry.constants import ISSUE_ALERTS_THREAD_DEFAULT +from sentry.integrations.messaging.metrics import ( + MessagingInteractionEvent, + MessagingInteractionType, +) from sentry.integrations.models.integration import Integration from sentry.integrations.notifications import get_context from sentry.integrations.repository import get_default_issue_alert_repository @@ -25,8 +29,10 @@ SLACK_ACTIVITY_THREAD_SUCCESS_DATADOG_METRIC, SLACK_NOTIFY_RECIPIENT_FAILURE_DATADOG_METRIC, SLACK_NOTIFY_RECIPIENT_SUCCESS_DATADOG_METRIC, + record_lifecycle_termination_level, ) from sentry.integrations.slack.sdk_client import SlackSdkClient +from sentry.integrations.slack.spec import SlackMessagingSpec from sentry.integrations.slack.threads.activity_notifications import ( AssignedActivityNotification, ExternalIssueCreatedActivityNotification, @@ -182,12 +188,23 @@ def notify_all_threads_for_activity(self, activity: Activity) -> None: slack_client = SlackSdkClient(integration_id=integration.id) # Get all parent notifications, which will have the message identifier to use to reply in a thread - parent_notifications = ( - self._notification_message_repository.get_all_parent_notification_messages_by_filters( + with MessagingInteractionEvent( + interaction_type=MessagingInteractionType.GET_PARENT_NOTIFICATION, + spec=SlackMessagingSpec(), + ).capture() as lifecycle: + lifecycle.add_extras( + { + "activity_id": activity.id, + "group_id": activity.group.id, + "project_id": activity.project.id, + } + ) + parent_notifications = self._notification_message_repository.get_all_parent_notification_messages_by_filters( group_ids=[activity.group.id], project_ids=[activity.project.id], ) - ) + + # We don't wrap this in a lifecycle because _handle_parent_notification is already wrapped in a lifecycle for parent_notification in parent_notifications: try: self._handle_parent_notification( @@ -196,6 +213,7 @@ def notify_all_threads_for_activity(self, activity: Activity) -> None: client=slack_client, ) except Exception as err: + # TODO(iamrajjoshi): We can probably swallow this error once we audit the lifecycle self._logger.info( "failed to send notification", exc_info=err, @@ -254,25 +272,33 @@ def _handle_parent_notification( "rule_action_uuid": parent_notification.rule_action_uuid, } - try: - client.chat_postMessage( - channel=channel_id, - thread_ts=parent_notification.message_identifier, - text=notification_to_send, - blocks=json_blocks, - ) - metrics.incr(SLACK_ACTIVITY_THREAD_SUCCESS_DATADOG_METRIC, sample_rate=1.0) - except SlackApiError as e: - self._logger.info( - "failed to post message to slack", - extra={"error": str(e), "blocks": json_blocks, **extra}, - ) - metrics.incr( - SLACK_ACTIVITY_THREAD_FAILURE_DATADOG_METRIC, - sample_rate=1.0, - tags={"ok": e.response.get("ok", False), "status": e.response.status_code}, - ) - raise + with MessagingInteractionEvent( + interaction_type=MessagingInteractionType.SEND_ACTIVITY_NOTIFICATION, + spec=SlackMessagingSpec(), + ).capture() as lifecycle: + try: + client.chat_postMessage( + channel=channel_id, + thread_ts=parent_notification.message_identifier, + text=notification_to_send, + blocks=json_blocks, + ) + # TODO(iamrajjoshi): Remove this after we validate lifecycle + metrics.incr(SLACK_ACTIVITY_THREAD_SUCCESS_DATADOG_METRIC, sample_rate=1.0) + except SlackApiError as e: + # TODO(iamrajjoshi): Remove this after we validate lifecycle + self._logger.info( + "failed to post message to slack", + extra={"error": str(e), "blocks": json_blocks, **extra}, + ) + metrics.incr( + SLACK_ACTIVITY_THREAD_FAILURE_DATADOG_METRIC, + sample_rate=1.0, + tags={"ok": e.response.get("ok", False), "status": e.response.status_code}, + ) + lifecycle.add_extras({"rule_action_uuid": parent_notification.rule_action_uuid}) + record_lifecycle_termination_level(lifecycle, e) + raise def _get_notification_message_to_send(self, activity: Activity) -> str | None: """ @@ -427,21 +453,32 @@ def send_message_to_slack_channel( """Execution of send_notification_as_slack.""" client = SlackSdkClient(integration_id=integration_id) - try: - client.chat_postMessage( - blocks=str(payload.get("blocks", "")), - text=str(payload.get("text", "")), - channel=str(payload.get("channel", "")), - unfurl_links=False, - unfurl_media=False, - callback_id=str(payload.get("callback_id", "")), - ) - metrics.incr(SLACK_NOTIFY_RECIPIENT_SUCCESS_DATADOG_METRIC, sample_rate=1.0) - except SlackApiError as e: - extra = {"error": str(e), **log_params} - self._logger.info(log_error_message, extra=extra) - metrics.incr( - SLACK_NOTIFY_RECIPIENT_FAILURE_DATADOG_METRIC, - sample_rate=1.0, - tags={"ok": e.response.get("ok", False), "status": e.response.status_code}, - ) + with MessagingInteractionEvent( + interaction_type=MessagingInteractionType.SEND_GENERIC_NOTIFICATION, + spec=SlackMessagingSpec(), + ).capture() as lifecycle: + try: + lifecycle.add_extras({"integration_id": integration_id}) + client.chat_postMessage( + blocks=str(payload.get("blocks", "")), + text=str(payload.get("text", "")), + channel=str(payload.get("channel", "")), + unfurl_links=False, + unfurl_media=False, + callback_id=str(payload.get("callback_id", "")), + ) + # TODO(iamrajjoshi): Remove this after we validate lifecycle + metrics.incr(SLACK_NOTIFY_RECIPIENT_SUCCESS_DATADOG_METRIC, sample_rate=1.0) + except SlackApiError as e: + # TODO(iamrajjoshi): Remove this after we validate lifecycle + extra = {"error": str(e), **log_params} + self._logger.info(log_error_message, extra=extra) + metrics.incr( + SLACK_NOTIFY_RECIPIENT_FAILURE_DATADOG_METRIC, + sample_rate=1.0, + tags={"ok": e.response.get("ok", False), "status": e.response.status_code}, + ) + lifecycle.add_extras( + {k: str(v) for k, v in log_params.items() if isinstance(v, (int, str))} + ) + record_lifecycle_termination_level(lifecycle, e) diff --git a/src/sentry/integrations/slack/utils/notifications.py b/src/sentry/integrations/slack/utils/notifications.py index 32a2edebc34058..3923bd21c19632 100644 --- a/src/sentry/integrations/slack/utils/notifications.py +++ b/src/sentry/integrations/slack/utils/notifications.py @@ -31,13 +31,10 @@ SLACK_LINK_IDENTITY_MSG_SUCCESS_DATADOG_METRIC, SLACK_METRIC_ALERT_FAILURE_DATADOG_METRIC, SLACK_METRIC_ALERT_SUCCESS_DATADOG_METRIC, + record_lifecycle_termination_level, ) from sentry.integrations.slack.sdk_client import SlackSdkClient from sentry.integrations.slack.spec import SlackMessagingSpec -from sentry.integrations.slack.utils.errors import ( - SLACK_SDK_HALT_ERROR_CATEGORIES, - unpack_slack_api_error, -) from sentry.models.options.organization_option import OrganizationOption from sentry.utils import metrics @@ -176,14 +173,7 @@ def send_incident_alert_notification( lifecycle.add_extras(log_params) # If the error is a channel not found or archived, we can halt the flow # This means that the channel was deleted or archived after the alert rule was created - if ( - (reason := unpack_slack_api_error(e)) - and reason is not None - and reason in SLACK_SDK_HALT_ERROR_CATEGORIES - ): - lifecycle.record_halt(reason.message) - else: - lifecycle.record_failure(e) + record_lifecycle_termination_level(lifecycle, e) else: success = True diff --git a/src/sentry/integrations/slack/views/__init__.py b/src/sentry/integrations/slack/views/__init__.py index 9e0747e9003b25..bd7c1aa6ea9c5c 100644 --- a/src/sentry/integrations/slack/views/__init__.py +++ b/src/sentry/integrations/slack/views/__init__.py @@ -2,23 +2,15 @@ from django.http import HttpRequest, HttpResponse from django.urls import reverse -from django.views.decorators.cache import never_cache as django_never_cache from rest_framework.request import Request from sentry.utils.http import absolute_uri from sentry.utils.signing import sign -from sentry.web.decorators import EndpointFunc from sentry.web.helpers import render_to_response SALT = "sentry-slack-integration" -def never_cache(view_func: EndpointFunc) -> EndpointFunc: - """TODO(mgaeta): Remove cast once Django has a typed version.""" - result: EndpointFunc = django_never_cache(view_func) - return result - - def build_linking_url(endpoint: str, **kwargs: Any) -> str: """TODO(mgaeta): Remove cast once sentry/utils/http.py is typed.""" url: str = absolute_uri(reverse(endpoint, kwargs={"signed_params": sign(salt=SALT, **kwargs)}), options.get("slack.url-prefix")) diff --git a/src/sentry/integrations/slack/webhooks/action.py b/src/sentry/integrations/slack/webhooks/action.py index a4f74148431940..2e595016bee4c7 100644 --- a/src/sentry/integrations/slack/webhooks/action.py +++ b/src/sentry/integrations/slack/webhooks/action.py @@ -118,15 +118,7 @@ def update_group( status_code=403, body="The user does not have access to the organization." ) - return update_groups( - request=request, - group_ids=[group.id], - projects=[group.project], - organization_id=group.organization.id, - search_fn=None, - user=user, - data=data, - ) + return update_groups(request=request, groups=[group], user=user, data=data) def get_rule(slack_request: SlackActionRequest) -> Rule | None: @@ -414,6 +406,7 @@ def _handle_group_actions( ) view = View(**slack_request.data["view"]) + assert view.private_metadata is not None private_metadata = orjson.loads(view.private_metadata) original_tags_from_request = set(private_metadata.get("tags", {})) diff --git a/src/sentry/integrations/slack/webhooks/base.py b/src/sentry/integrations/slack/webhooks/base.py index fba29ed49d3b62..0699aa9e3aab93 100644 --- a/src/sentry/integrations/slack/webhooks/base.py +++ b/src/sentry/integrations/slack/webhooks/base.py @@ -136,7 +136,6 @@ class SlackCommandDispatcher(MessagingIntegrationCommandDispatcher[Response]): @property def TEAM_HALT_MAPPINGS(self) -> dict[str, MessageCommandHaltReason]: from sentry.integrations.slack.webhooks.command import ( - CHANNEL_ALREADY_LINKED_MESSAGE, INSUFFICIENT_ROLE_MESSAGE, LINK_FROM_CHANNEL_MESSAGE, LINK_USER_FIRST_MESSAGE, @@ -147,7 +146,6 @@ def TEAM_HALT_MAPPINGS(self) -> dict[str, MessageCommandHaltReason]: LINK_FROM_CHANNEL_MESSAGE: MessageCommandHaltReason.LINK_FROM_CHANNEL, LINK_USER_FIRST_MESSAGE: MessageCommandHaltReason.LINK_USER_FIRST, INSUFFICIENT_ROLE_MESSAGE: MessageCommandHaltReason.INSUFFICIENT_ROLE, - CHANNEL_ALREADY_LINKED_MESSAGE: MessageCommandHaltReason.CHANNEL_ALREADY_LINKED, TEAM_NOT_LINKED_MESSAGE: MessageCommandHaltReason.TEAM_NOT_LINKED, } @@ -200,7 +198,7 @@ def link_team_handler(self, input: CommandInput) -> IntegrationResponse[Response for message, reason in self.TEAM_HALT_MAPPINGS.items(): if message in str(response.data): return IntegrationResponse( - interaction_result=EventLifecycleOutcome.SUCCESS, + interaction_result=EventLifecycleOutcome.HALTED, response=response, outcome_reason=str(reason), ) @@ -215,7 +213,7 @@ def unlink_team_handler(self, input: CommandInput) -> IntegrationResponse[Respon for message, reason in self.TEAM_HALT_MAPPINGS.items(): if message in str(response.data): return IntegrationResponse( - interaction_result=EventLifecycleOutcome.SUCCESS, + interaction_result=EventLifecycleOutcome.HALTED, response=response, outcome_reason=str(reason), ) diff --git a/src/sentry/integrations/slack/webhooks/command.py b/src/sentry/integrations/slack/webhooks/command.py index ea0f111566da4d..575d983f72dd7e 100644 --- a/src/sentry/integrations/slack/webhooks/command.py +++ b/src/sentry/integrations/slack/webhooks/command.py @@ -6,7 +6,6 @@ from rest_framework.request import Request from rest_framework.response import Response -from sentry import features from sentry.api.api_owners import ApiOwner from sentry.api.api_publish_status import ApiPublishStatus from sentry.api.base import region_silo_endpoint @@ -27,7 +26,7 @@ from sentry.models.organizationmember import OrganizationMember from sentry.utils import metrics -_logger = logging.getLogger(__name__) +_logger = logging.getLogger("sentry.integration.slack.bot-commands") from .base import SlackDMEndpoint @@ -35,7 +34,6 @@ "Link your Sentry team to this Slack channel! <{associate_url}|Link your team now> to receive " "notifications of issues in Sentry in Slack." ) -CHANNEL_ALREADY_LINKED_MESSAGE = "This channel already has a team linked to it." LINK_USER_FIRST_MESSAGE = ( "You must first link your identity to Sentry by typing /sentry link. Be aware that you " "must be an admin or higher in your Sentry organization or a team admin to link your team." @@ -86,9 +84,7 @@ def link_team(self, slack_request: SlackDMRequest) -> Response: if slack_request.channel_name == DIRECT_MESSAGE_CHANNEL_NAME: return self.reply(slack_request, LINK_FROM_CHANNEL_MESSAGE) - logger_params = { - "slack_request": slack_request, - } + logger_params = {} identity_user = slack_request.get_identity_user() if not identity_user: @@ -103,17 +99,11 @@ def link_team(self, slack_request: SlackDMRequest) -> Response: has_valid_role = False for organization_membership in organization_memberships: - if not features.has( - "organizations:slack-multiple-team-single-channel-linking", - organization_membership.organization, - ) and is_team_linked_to_channel(organization_membership.organization, slack_request): - return self.reply(slack_request, CHANNEL_ALREADY_LINKED_MESSAGE) - if is_valid_role(organization_membership) or is_team_admin(organization_membership): has_valid_role = True if not has_valid_role: - _logger.info("insufficient-role", extra=logger_params) + _logger.error("insufficient-role", extra=logger_params) metrics.incr( self._METRICS_FAILURE_KEY + ".link_team.insufficient_role", sample_rate=1.0 ) diff --git a/src/sentry/integrations/source_code_management/repository.py b/src/sentry/integrations/source_code_management/repository.py index bf737cdfda0062..3fbb2f746342d9 100644 --- a/src/sentry/integrations/source_code_management/repository.py +++ b/src/sentry/integrations/source_code_management/repository.py @@ -153,7 +153,17 @@ def get_stacktrace_link( If no file was found return `None`, and re-raise for non-"Not Found" errors, like 403 "Account Suspended". """ - with self.record_event(SCMIntegrationInteractionType.GET_STACKTRACE_LINK).capture(): + with self.record_event( + SCMIntegrationInteractionType.GET_STACKTRACE_LINK + ).capture() as lifecycle: + lifecycle.add_extras( + { + "filepath": filepath, + "default": default, + "version": version, + "organization_id": repo.organization_id, + } + ) scope = sentry_sdk.Scope.get_isolation_scope() scope.set_tag("stacktrace_link.tried_version", False) if version: @@ -182,7 +192,15 @@ def get_codeowner_file( * filepath - full path of the file i.e. CODEOWNERS, .github/CODEOWNERS, docs/CODEOWNERS * raw - the decoded raw contents of the codeowner file """ - with self.record_event(SCMIntegrationInteractionType.GET_CODEOWNER_FILE).capture(): + with self.record_event( + SCMIntegrationInteractionType.GET_CODEOWNER_FILE + ).capture() as lifecycle: + lifecycle.add_extras( + { + "ref": ref, + "organization_id": repo.organization_id, + } + ) if self.codeowners_locations is None: raise NotImplementedError("Implement self.codeowners_locations to use this method.") diff --git a/src/sentry/integrations/source_code_management/webhook.py b/src/sentry/integrations/source_code_management/webhook.py new file mode 100644 index 00000000000000..bc0eef1ae39cf9 --- /dev/null +++ b/src/sentry/integrations/source_code_management/webhook.py @@ -0,0 +1,26 @@ +from abc import ABC, abstractmethod +from collections.abc import Mapping +from typing import Any + +from sentry.integrations.utils.metrics import IntegrationWebhookEventType +from sentry.models.repository import Repository + + +class SCMWebhook(ABC): + @property + @abstractmethod + def provider(self) -> str: + raise NotImplementedError + + @property + @abstractmethod + def event_type(self) -> IntegrationWebhookEventType: + raise NotImplementedError + + @abstractmethod + def __call__(self, event: Mapping[str, Any], **kwargs) -> None: + raise NotImplementedError + + @abstractmethod + def update_repo_data(self, repo: Repository, event: Mapping[str, Any]) -> None: + raise NotImplementedError diff --git a/src/sentry/integrations/tasks/sync_status_inbound.py b/src/sentry/integrations/tasks/sync_status_inbound.py index 7337a657ced6b1..d60829e5e4234e 100644 --- a/src/sentry/integrations/tasks/sync_status_inbound.py +++ b/src/sentry/integrations/tasks/sync_status_inbound.py @@ -1,5 +1,6 @@ import logging from collections.abc import Iterable, Mapping +from datetime import timedelta from typing import Any from django.db.models import Q @@ -168,6 +169,20 @@ def get_resolutions_and_activity_data_for_groups( return resolutions_by_group_id, activity_type, activity_data +def group_was_recently_resolved(group: Group) -> bool: + """ + Check if the group was resolved in the last 3 minutes + """ + if group.status != GroupStatus.RESOLVED: + return False + + try: + group_resolution = GroupResolution.objects.get(group=group) + return group_resolution.datetime > django_timezone.now() - timedelta(minutes=3) + except GroupResolution.DoesNotExist: + return False + + @instrumented_task( name="sentry.integrations.tasks.sync_status_inbound", queue="integrations", @@ -189,8 +204,8 @@ def sync_status_inbound( raise Integration.DoesNotExist organizations = Organization.objects.filter(id=organization_id) - affected_groups = Group.objects.get_groups_by_external_issue( - integration, organizations, issue_key + affected_groups = list( + Group.objects.get_groups_by_external_issue(integration, organizations, issue_key) ) if not affected_groups: return @@ -213,6 +228,17 @@ def sync_status_inbound( "integration_id": integration_id, } if action == ResolveSyncAction.RESOLVE: + # Check if the group was recently resolved and we should skip the request + # Avoid resolving the group in-app and then re-resolving via the integration webhook + # which would override the in-app resolution + resolvable_groups = [] + for group in affected_groups: + if not group_was_recently_resolved(group): + resolvable_groups.append(group) + + if not resolvable_groups: + return + ( resolutions_by_group_id, activity_type, @@ -221,14 +247,14 @@ def sync_status_inbound( affected_groups, config.get("resolution_strategy"), activity_data, organization_id ) Group.objects.update_group_status( - groups=affected_groups, + groups=resolvable_groups, status=GroupStatus.RESOLVED, substatus=None, activity_type=activity_type, activity_data=activity_data, ) # after we update the group, pdate the resolutions - for group in affected_groups: + for group in resolvable_groups: resolution_params = resolutions_by_group_id.get(group.id) if resolution_params: resolution, created = GroupResolution.objects.get_or_create( diff --git a/src/sentry/integrations/utils/sync.py b/src/sentry/integrations/utils/sync.py index 86bb58330748ae..4c15633a978081 100644 --- a/src/sentry/integrations/utils/sync.py +++ b/src/sentry/integrations/utils/sync.py @@ -102,6 +102,7 @@ def sync_group_assignee_inbound( if not assign: for group in affected_groups: + # XXX: Pass an acting user and make the acting_user mandatory GroupAssignee.objects.deassign( group, assignment_source=AssignmentSource.from_integration(integration), diff --git a/src/sentry/interfaces/security.py b/src/sentry/interfaces/security.py index 2ca01d005cdc80..59ba1d6c4f5aba 100644 --- a/src/sentry/interfaces/security.py +++ b/src/sentry/interfaces/security.py @@ -150,7 +150,7 @@ class Csp(SecurityReport): """ A CSP violation report. - See also: http://www.w3.org/TR/CSP/#violation-reports + See also: https://www.w3.org/TR/CSP/#violation-events >>> { >>> "document_uri": "http://example.com/", diff --git a/src/sentry/issues/attributes.py b/src/sentry/issues/attributes.py index 26d730a418ecb9..2c8f1e842c8201 100644 --- a/src/sentry/issues/attributes.py +++ b/src/sentry/issues/attributes.py @@ -1,8 +1,8 @@ import dataclasses import logging +from collections.abc import Iterable from datetime import datetime from enum import Enum -from typing import cast import urllib3 from arroyo import Topic as ArroyoTopic @@ -93,7 +93,7 @@ def bulk_send_snapshot_values( if group_ids is None and groups is None: raise ValueError("cannot send snapshot values when group_ids and groups are None") - group_list: list[Group | GroupValues] = cast(list[Group | GroupValues], groups) or [] + group_list: list[Group | GroupValues] = [*(groups or [])] if group_ids: group_list.extend(_bulk_retrieve_group_values(group_ids)) @@ -128,10 +128,6 @@ def produce_snapshot_to_kafka(snapshot: GroupAttributesSnapshot) -> None: ) -def _retrieve_group_values(group_id: int) -> GroupValues: - return _bulk_retrieve_group_values([group_id])[0] - - def _bulk_retrieve_group_values(group_ids: list[int]) -> list[GroupValues]: group_values_map = { group["id"]: group @@ -167,7 +163,7 @@ def _bulk_retrieve_group_values(group_ids: list[int]) -> list[GroupValues]: def _bulk_retrieve_snapshot_values( - group_values_list: list[Group | GroupValues], group_deleted: bool = False + group_values_list: Iterable[Group | GroupValues], group_deleted: bool = False ) -> list[GroupAttributesSnapshot]: group_assignee_map = { ga["group_id"]: ga diff --git a/src/sentry/issues/endpoints/__init__.py b/src/sentry/issues/endpoints/__init__.py index 36c255daf1830a..a1f812f708f8f0 100644 --- a/src/sentry/issues/endpoints/__init__.py +++ b/src/sentry/issues/endpoints/__init__.py @@ -6,7 +6,6 @@ from .group_hashes import GroupHashesEndpoint from .group_notes import GroupNotesEndpoint from .group_notes_details import GroupNotesDetailsEndpoint -from .group_participants import GroupParticipantsEndpoint from .group_similar_issues import GroupSimilarIssuesEndpoint from .group_similar_issues_embeddings import GroupSimilarIssuesEmbeddingsEndpoint from .group_tombstone import GroupTombstoneEndpoint @@ -39,7 +38,6 @@ "GroupHashesEndpoint", "GroupNotesDetailsEndpoint", "GroupNotesEndpoint", - "GroupParticipantsEndpoint", "GroupSimilarIssuesEmbeddingsEndpoint", "GroupSimilarIssuesEndpoint", "GroupTombstoneDetailsEndpoint", diff --git a/src/sentry/issues/endpoints/actionable_items.py b/src/sentry/issues/endpoints/actionable_items.py index 7e860fc7575b86..30087fb93faef8 100644 --- a/src/sentry/issues/endpoints/actionable_items.py +++ b/src/sentry/issues/endpoints/actionable_items.py @@ -1,5 +1,3 @@ -from typing import TypedDict - from rest_framework.exceptions import NotFound from rest_framework.request import Request from rest_framework.response import Response @@ -19,16 +17,6 @@ from sentry.models.project import Project -class ActionableItemResponse(TypedDict): - type: str - message: str - data: dict | None - - -class SourceMapProcessingResponse(TypedDict): - errors: list[ActionableItemResponse] - - @region_silo_endpoint class ActionableItemsEndpoint(ProjectEndpoint): """ diff --git a/src/sentry/issues/endpoints/group_details.py b/src/sentry/issues/endpoints/group_details.py index 40117285948b6c..a7675aa9323e23 100644 --- a/src/sentry/issues/endpoints/group_details.py +++ b/src/sentry/issues/endpoints/group_details.py @@ -18,7 +18,7 @@ delete_group_list, get_first_last_release, prep_search, - update_groups, + update_groups_with_search_fn, ) from sentry.api.serializers import GroupSerializer, GroupSerializerSnuba, serialize from sentry.api.serializers.models.group_stream import get_actions, get_available_issue_plugins @@ -83,14 +83,11 @@ class GroupDetailsEndpoint(GroupEndpoint, EnvironmentMixin): }, } - def _get_activity(self, request: Request, group, num): - return Activity.objects.get_activities_for_group(group, num) - - def _get_seen_by(self, request: Request, group): + def _get_seen_by(self, request: Request, group: Group): seen_by = list(GroupSeen.objects.filter(group=group).order_by("-last_seen")) return [seen for seen in serialize(seen_by, request.user) if seen is not None] - def _get_context_plugins(self, request: Request, group): + def _get_context_plugins(self, request: Request, group: Group): project = group.project return serialize( [ @@ -105,7 +102,9 @@ def _get_context_plugins(self, request: Request, group): ) @staticmethod - def __group_hourly_daily_stats(group: Group, environment_ids: Sequence[int]): + def __group_hourly_daily_stats( + group: Group, environment_ids: Sequence[int] + ) -> tuple[list[list[float]], list[list[float]]]: model = get_issue_tsdb_group_model(group.issue_category) now = timezone.now() hourly_stats = tsdb.backend.rollup( @@ -133,7 +132,7 @@ def __group_hourly_daily_stats(group: Group, environment_ids: Sequence[int]): return hourly_stats, daily_stats - def get(self, request: Request, group) -> Response: + def get(self, request: Request, group: Group) -> Response: """ Retrieve an Issue ````````````````` @@ -164,7 +163,7 @@ def get(self, request: Request, group) -> Response: ) # TODO: these probably should be another endpoint - activity = self._get_activity(request, group, num=100) + activity = Activity.objects.get_activities_for_group(group, 100) seen_by = self._get_seen_by(request, group) if "release" not in collapse: @@ -317,7 +316,7 @@ def get(self, request: Request, group) -> Response: ) raise - def put(self, request: Request, group) -> Response: + def put(self, request: Request, group: Group) -> Response: """ Update an Issue ``````````````` @@ -329,6 +328,11 @@ def put(self, request: Request, group) -> Response: :param string status: the new status for the issue. Valid values are ``"resolved"``, ``resolvedInNextRelease``, ``"unresolved"``, and ``"ignored"``. + :param map statusDetails: additional details about the resolution. + Valid values are ``"inRelease"``, ``"inNextRelease"``, + ``"inCommit"``, ``"ignoreDuration"``, ``"ignoreCount"``, + ``"ignoreWindow"``, ``"ignoreUserCount"``, and + ``"ignoreUserWindow"``. :param string assignedTo: the user or team that should be assigned to this issue. Can be of the form ``""``, ``"user:"``, ``""``, @@ -351,7 +355,7 @@ def put(self, request: Request, group) -> Response: discard = request.data.get("discard") project = group.project search_fn = functools.partial(prep_search, self, request, project) - response = update_groups( + response = update_groups_with_search_fn( request, [group.id], [project], project.organization_id, search_fn ) # if action was discard, there isn't a group to serialize anymore diff --git a/src/sentry/issues/endpoints/group_event_details.py b/src/sentry/issues/endpoints/group_event_details.py index fffb47f40ea3b9..686b7b20b69c6a 100644 --- a/src/sentry/issues/endpoints/group_event_details.py +++ b/src/sentry/issues/endpoints/group_event_details.py @@ -4,11 +4,11 @@ from collections.abc import Sequence from django.contrib.auth.models import AnonymousUser -from drf_spectacular.types import OpenApiTypes -from drf_spectacular.utils import OpenApiParameter, extend_schema +from drf_spectacular.utils import extend_schema +from rest_framework.exceptions import ParseError from rest_framework.request import Request from rest_framework.response import Response -from snuba_sdk import Condition, Or +from snuba_sdk import Column, Condition, Op, Or from snuba_sdk.legacy import is_condition, parse_condition from sentry import eventstore @@ -19,6 +19,7 @@ from sentry.api.helpers.group_index import parse_and_convert_issue_search_query from sentry.api.helpers.group_index.validators import ValidationError from sentry.api.serializers import EventSerializer, serialize +from sentry.api.utils import get_date_range_from_params from sentry.apidocs.constants import ( RESPONSE_BAD_REQUEST, RESPONSE_FORBIDDEN, @@ -26,9 +27,10 @@ RESPONSE_UNAUTHORIZED, ) from sentry.apidocs.examples.event_examples import EventExamples -from sentry.apidocs.parameters import GlobalParams, IssueParams +from sentry.apidocs.parameters import EventParams, GlobalParams, IssueParams from sentry.apidocs.utils import inline_sentry_response_serializer from sentry.eventstore.models import Event, GroupEvent +from sentry.exceptions import InvalidParams from sentry.issues.endpoints.project_event_details import ( GroupEventDetailsResponse, wrap_event_response, @@ -49,7 +51,7 @@ def issue_search_query_to_conditions( query: str, group: Group, user: User | AnonymousUser, environments: Sequence[Environment] -) -> Sequence[Condition]: +) -> list[Condition]: from sentry.utils.snuba import resolve_column, resolve_conditions dataset = ( @@ -135,14 +137,7 @@ class GroupEventDetailsEndpoint(GroupEndpoint): IssueParams.ISSUES_OR_GROUPS, IssueParams.ISSUE_ID, GlobalParams.ENVIRONMENT, - OpenApiParameter( - name="event_id", - type=OpenApiTypes.STR, - location=OpenApiParameter.PATH, - description="The ID of the event to retrieve, or 'latest', 'oldest', or 'recommended'.", - required=True, - enum=["latest", "oldest", "recommended"], - ), + EventParams.EVENT_ID_EXTENDED, ], responses={ 200: inline_sentry_response_serializer( @@ -159,56 +154,75 @@ def get(self, request: Request, group: Group, event_id: str) -> Response: """ Retrieves the details of an issue event. """ - environments = [e for e in get_environments(request, group.project.organization)] + organization = group.project.organization + environments = [e for e in get_environments(request, organization)] environment_names = [e.name for e in environments] + try: + start, end = get_date_range_from_params(request.GET, optional=True) + except InvalidParams: + raise ParseError(detail="Invalid date range") + + query = request.GET.get("query") + try: + conditions: list[Condition] = ( + issue_search_query_to_conditions(query, group, request.user, environments) + if query + else [] + ) + except ValidationError: + raise ParseError(detail="Invalid event query") + except Exception: + logging.exception( + "group_event_details.parse_query", + extra={"query": query, "group": group.id, "organization": organization.id}, + ) + raise ParseError(detail="Unable to parse query") + + if environments: + conditions.append(Condition(Column("environment"), Op.IN, environment_names)) + + metric = "api.endpoints.group_event_details.get" + error_response = {"detail": "Unable to apply query. Change or remove it and try again."} + + event: Event | GroupEvent | None = None + if event_id == "latest": - with metrics.timer("api.endpoints.group_event_details.get", tags={"type": "latest"}): - event: Event | GroupEvent | None = group.get_latest_event_for_environments( - environment_names - ) + with metrics.timer(metric, tags={"type": "latest", "query": bool(query)}): + try: + event = group.get_latest_event(conditions=conditions, start=start, end=end) + except ValueError: + return Response(error_response, status=400) + elif event_id == "oldest": - with metrics.timer("api.endpoints.group_event_details.get", tags={"type": "oldest"}): - event = group.get_oldest_event_for_environments(environment_names) + with metrics.timer(metric, tags={"type": "oldest", "query": bool(query)}): + try: + event = group.get_oldest_event(conditions=conditions, start=start, end=end) + except ValueError: + return Response(error_response, status=400) + elif event_id == "recommended": - query = request.GET.get("query") - if query: - with metrics.timer( - "api.endpoints.group_event_details.get", - tags={"type": "helpful", "query": True}, - ): - try: - conditions = issue_search_query_to_conditions( - query, group, request.user, environments - ) - event = group.get_recommended_event_for_environments( - environments, conditions - ) - except ValidationError: - return Response(status=400) - except Exception: - logging.exception( - "group_event_details:get_helpful", - ) - return Response(status=500) - else: - with metrics.timer( - "api.endpoints.group_event_details.get", - tags={"type": "helpful", "query": False}, - ): - event = group.get_recommended_event_for_environments(environments) + with metrics.timer(metric, tags={"type": "helpful", "query": bool(query)}): + try: + event = group.get_recommended_event(conditions=conditions, start=start, end=end) + except ValueError: + return Response(error_response, status=400) + else: - with metrics.timer("api.endpoints.group_event_details.get", tags={"type": "event"}): + with metrics.timer(metric, tags={"type": "event"}): event = eventstore.backend.get_event_by_id( - group.project.id, event_id, group_id=group.id + project_id=group.project.id, event_id=event_id, group_id=group.id ) - # TODO: Remove `for_group` check once performance issues are moved to the issue platform - - if event is not None and hasattr(event, "for_group") and event.group: + if isinstance(event, Event) and event.group: event = event.for_group(event.group) if event is None: - return Response({"detail": "Event not found"}, status=404) + error_text = ( + "Event not found. The event ID may be incorrect, or its age exceeded the retention period." + if event_id not in {"recommended", "latest", "oldest"} + else "No matching event found. Try changing the environments, date range, or query." + ) + return Response({"detail": error_text}, status=404) collapse = request.GET.getlist("collapse", []) if "stacktraceOnly" in collapse: diff --git a/src/sentry/issues/endpoints/group_events.py b/src/sentry/issues/endpoints/group_events.py index dc3e23c1fe8c3d..aed34a05bab13f 100644 --- a/src/sentry/issues/endpoints/group_events.py +++ b/src/sentry/issues/endpoints/group_events.py @@ -5,8 +5,7 @@ from typing import TYPE_CHECKING, Any from django.utils import timezone -from drf_spectacular.types import OpenApiTypes -from drf_spectacular.utils import OpenApiParameter, extend_schema +from drf_spectacular.utils import extend_schema from rest_framework.exceptions import ParseError from rest_framework.request import Request from rest_framework.response import Response @@ -30,7 +29,7 @@ RESPONSE_UNAUTHORIZED, ) from sentry.apidocs.examples.event_examples import EventExamples -from sentry.apidocs.parameters import GlobalParams, IssueParams +from sentry.apidocs.parameters import EventParams, GlobalParams, IssueParams from sentry.apidocs.utils import inline_sentry_response_serializer from sentry.eventstore.models import Event from sentry.exceptions import InvalidParams, InvalidSearchQuery @@ -68,27 +67,9 @@ class GroupEventsEndpoint(GroupEndpoint, EnvironmentMixin): GlobalParams.END, GlobalParams.STATS_PERIOD, GlobalParams.ENVIRONMENT, - OpenApiParameter( - name="full", - type=OpenApiTypes.BOOL, - location=OpenApiParameter.QUERY, - description="Specify true to include the full event body, including the stacktrace, in the event payload.", - required=False, - ), - OpenApiParameter( - name="sample", - type=OpenApiTypes.BOOL, - location=OpenApiParameter.QUERY, - description="Return events in pseudo-random order. This is deterministic so an identical query will always return the same events in the same order.", - required=False, - ), - OpenApiParameter( - name="query", - location=OpenApiParameter.QUERY, - type=OpenApiTypes.STR, - description="An optional search query for filtering events.", - required=False, - ), + EventParams.FULL_PAYLOAD, + EventParams.SAMPLE, + EventParams.QUERY, ], responses={ 200: inline_sentry_response_serializer( diff --git a/src/sentry/issues/endpoints/group_hashes.py b/src/sentry/issues/endpoints/group_hashes.py index 73c5104ec47d8d..69a095e4824e90 100644 --- a/src/sentry/issues/endpoints/group_hashes.py +++ b/src/sentry/issues/endpoints/group_hashes.py @@ -9,6 +9,7 @@ from sentry.api.bases import GroupEndpoint from sentry.api.paginator import GenericOffsetPaginator from sentry.api.serializers import EventSerializer, SimpleEventSerializer, serialize +from sentry.models.group import Group from sentry.models.grouphash import GroupHash from sentry.tasks.unmerge import unmerge from sentry.utils import metrics @@ -22,7 +23,7 @@ class GroupHashesEndpoint(GroupEndpoint): "GET": ApiPublishStatus.PRIVATE, } - def get(self, request: Request, group) -> Response: + def get(self, request: Request, group: Group) -> Response: """ List an Issue's Hashes `````````````````````` @@ -59,7 +60,7 @@ def get(self, request: Request, group) -> Response: paginator=GenericOffsetPaginator(data_fn=data_fn), ) - def put(self, request: Request, group) -> Response: + def put(self, request: Request, group: Group) -> Response: """ Perform an unmerge by reassigning events with hash values corresponding to the given grouphash ids from being part of the given group to being part of a new group. diff --git a/src/sentry/issues/endpoints/group_notes.py b/src/sentry/issues/endpoints/group_notes.py index 4a76e5b45fe107..8f0b65ecf5edce 100644 --- a/src/sentry/issues/endpoints/group_notes.py +++ b/src/sentry/issues/endpoints/group_notes.py @@ -11,6 +11,7 @@ from sentry.api.serializers import serialize from sentry.api.serializers.rest_framework.group_notes import NoteSerializer from sentry.models.activity import Activity +from sentry.models.group import Group from sentry.models.groupsubscription import GroupSubscription from sentry.notifications.types import GroupSubscriptionReason from sentry.signals import comment_created @@ -25,7 +26,7 @@ class GroupNotesEndpoint(GroupEndpoint): "POST": ApiPublishStatus.UNKNOWN, } - def get(self, request: AuthenticatedHttpRequest, group) -> Response: + def get(self, request: AuthenticatedHttpRequest, group: Group) -> Response: notes = Activity.objects.filter(group=group, type=ActivityType.NOTE.value) return self.paginate( @@ -36,7 +37,7 @@ def get(self, request: AuthenticatedHttpRequest, group) -> Response: on_results=lambda x: serialize(x, request.user), ) - def post(self, request: AuthenticatedHttpRequest, group) -> Response: + def post(self, request: AuthenticatedHttpRequest, group: Group) -> Response: serializer = NoteSerializer( data=request.data, context={ diff --git a/src/sentry/issues/endpoints/group_notes_details.py b/src/sentry/issues/endpoints/group_notes_details.py index 7097802a0f0f05..74192125336738 100644 --- a/src/sentry/issues/endpoints/group_notes_details.py +++ b/src/sentry/issues/endpoints/group_notes_details.py @@ -10,6 +10,7 @@ from sentry.api.serializers import serialize from sentry.api.serializers.rest_framework.group_notes import NoteSerializer from sentry.models.activity import Activity +from sentry.models.group import Group from sentry.models.groupsubscription import GroupSubscription from sentry.notifications.types import GroupSubscriptionReason from sentry.signals import comment_deleted, comment_updated @@ -27,7 +28,7 @@ class GroupNotesDetailsEndpoint(GroupEndpoint): # since an ApiKey is bound to the Organization, not # an individual. Not sure if we'd want to allow an ApiKey # to delete/update other users' comments - def delete(self, request: Request, group, note_id) -> Response: + def delete(self, request: Request, group: Group, note_id: str) -> Response: if not request.user.is_authenticated: raise PermissionDenied(detail="Key doesn't have permission to delete Note") @@ -69,7 +70,7 @@ def delete(self, request: Request, group, note_id) -> Response: return Response(status=204) - def put(self, request: Request, group, note_id) -> Response: + def put(self, request: Request, group: Group, note_id: str) -> Response: if not request.user.is_authenticated: raise PermissionDenied(detail="Key doesn't have permission to edit Note") diff --git a/src/sentry/issues/endpoints/group_participants.py b/src/sentry/issues/endpoints/group_participants.py deleted file mode 100644 index 1662be566b0b9b..00000000000000 --- a/src/sentry/issues/endpoints/group_participants.py +++ /dev/null @@ -1,25 +0,0 @@ -from __future__ import annotations - -from rest_framework.request import Request -from rest_framework.response import Response - -from sentry.api.api_publish_status import ApiPublishStatus -from sentry.api.base import region_silo_endpoint -from sentry.api.bases import GroupEndpoint -from sentry.models.group import Group -from sentry.models.groupsubscription import GroupSubscriptionManager -from sentry.users.services.user.service import user_service - - -@region_silo_endpoint -class GroupParticipantsEndpoint(GroupEndpoint): - publish_status = { - "GET": ApiPublishStatus.UNKNOWN, - } - - def get(self, request: Request, group: Group) -> Response: - participants = GroupSubscriptionManager.get_participating_user_ids(group) - - return Response( - user_service.serialize_many(filter={"user_ids": participants}, as_user=request.user) - ) diff --git a/src/sentry/issues/endpoints/group_similar_issues.py b/src/sentry/issues/endpoints/group_similar_issues.py index cc7d8cedf49367..a5fb4f59a40171 100644 --- a/src/sentry/issues/endpoints/group_similar_issues.py +++ b/src/sentry/issues/endpoints/group_similar_issues.py @@ -13,7 +13,7 @@ logger = logging.getLogger(__name__) -def _fix_label(label): +def _fix_label(label) -> str: if isinstance(label, tuple): return ":".join(label) return label @@ -25,7 +25,7 @@ class GroupSimilarIssuesEndpoint(GroupEndpoint): "GET": ApiPublishStatus.PRIVATE, } - def get(self, request: Request, group) -> Response: + def get(self, request: Request, group: Group) -> Response: features = similarity.features limit_s = request.GET.get("limit", None) @@ -54,13 +54,13 @@ def get(self, request: Request, group) -> Response: # We need to preserve the ordering of the Redis results, as that # ordering is directly shown in the UI for group_id, scores in zip(group_ids, group_scores): - group = serialized_groups.get(group_id) - if group is None: + serialized_group = serialized_groups.get(group_id) + if serialized_group is None: # TODO(tkaemming): This should log when we filter out a group that is # unable to be retrieved from the database. (This will soon be # unexpected behavior, but still possible.) continue - results.append((group, {_fix_label(k): v for k, v in scores.items()})) + results.append((serialized_group, {_fix_label(k): v for k, v in scores.items()})) return Response(results) diff --git a/src/sentry/issues/endpoints/group_similar_issues_embeddings.py b/src/sentry/issues/endpoints/group_similar_issues_embeddings.py index b51e57e3f8b760..fb46b54b8381da 100644 --- a/src/sentry/issues/endpoints/group_similar_issues_embeddings.py +++ b/src/sentry/issues/endpoints/group_similar_issues_embeddings.py @@ -12,22 +12,23 @@ from sentry.api.base import region_silo_endpoint from sentry.api.bases.group import GroupEndpoint from sentry.api.serializers import serialize -from sentry.grouping.grouping_info import get_grouping_info +from sentry.grouping.grouping_info import get_grouping_info_from_variants from sentry.models.group import Group from sentry.models.grouphash import GroupHash from sentry.seer.similarity.similar_issues import get_similarity_data_from_seer from sentry.seer.similarity.types import SeerSimilarIssueData, SimilarIssuesEmbeddingsRequest from sentry.seer.similarity.utils import ( + ReferrerOptions, TooManyOnlySystemFramesException, event_content_has_stacktrace, get_stacktrace_string, + has_too_many_contributing_frames, killswitch_enabled, ) from sentry.users.models.user import User from sentry.utils.safe import get_path logger = logging.getLogger(__name__) -MAX_FRAME_COUNT = 50 class FormattedSimilarIssuesEmbeddingsData(TypedDict): @@ -42,24 +43,26 @@ class GroupSimilarIssuesEmbeddingsEndpoint(GroupEndpoint): "GET": ApiPublishStatus.PRIVATE, } - def get_group_hashes_for_group_id(self, group_id: int) -> set[str]: - hashes = GroupHash.objects.filter(group_id=group_id) - return {hash.hash for hash in hashes} - def get_formatted_results( self, similar_issues_data: Sequence[SeerSimilarIssueData], user: User | AnonymousUser, - group_id: int, + group: Group, ) -> Sequence[tuple[Mapping[str, Any], Mapping[str, Any]] | None]: """ Format the responses using to be used by the frontend by changing the field names and changing the cosine distances into cosine similarities. """ - hashes = self.get_group_hashes_for_group_id(group_id) group_data = {} + parent_hashes = [ + similar_issue_data.parent_hash for similar_issue_data in similar_issues_data + ] + group_hashes = GroupHash.objects.filter(project_id=group.project_id, hash__in=parent_hashes) + parent_hashes_group_ids = { + group_hash.hash: group_hash.group_id for group_hash in group_hashes + } for similar_issue_data in similar_issues_data: - if similar_issue_data.parent_hash not in hashes: + if parent_hashes_group_ids[similar_issue_data.parent_hash] != group.id: formatted_response: FormattedSimilarIssuesEmbeddingsData = { "exception": round(1 - similar_issue_data.stacktrace_distance, 4), "shouldBeGrouped": "Yes" if similar_issue_data.should_group else "No", @@ -76,21 +79,27 @@ def get_formatted_results( return [(serialized_groups[group_id], group_data[group_id]) for group_id in group_data] def get(self, request: Request, group: Group) -> Response: - if killswitch_enabled(group.project.id): + if killswitch_enabled(group.project.id, ReferrerOptions.SIMILAR_ISSUES_TAB): return Response([]) latest_event = group.get_latest_event() stacktrace_string = "" + if latest_event and event_content_has_stacktrace(latest_event): - grouping_info = get_grouping_info(None, project=group.project, event=latest_event) - try: - stacktrace_string = get_stacktrace_string( - grouping_info, platform=latest_event.platform - ) - except TooManyOnlySystemFramesException: - pass - except Exception: - logger.exception("Unexpected exception in stacktrace string formatting") + variants = latest_event.get_grouping_variants(normalize_stacktraces=True) + + if not has_too_many_contributing_frames( + latest_event, variants, ReferrerOptions.SIMILAR_ISSUES_TAB + ): + grouping_info = get_grouping_info_from_variants(variants) + try: + stacktrace_string = get_stacktrace_string( + grouping_info, platform=latest_event.platform + ) + except TooManyOnlySystemFramesException: + pass + except Exception: + logger.exception("Unexpected exception in stacktrace string formatting") if not stacktrace_string or not latest_event: return Response([]) # No exception, stacktrace or in-app frames, or event @@ -138,6 +147,6 @@ def get(self, request: Request, group: Group) -> Response: if not results: return Response([]) - formatted_results = self.get_formatted_results(results, request.user, group.id) + formatted_results = self.get_formatted_results(results, request.user, group) return Response(formatted_results) diff --git a/src/sentry/issues/endpoints/group_tombstone.py b/src/sentry/issues/endpoints/group_tombstone.py index ac5c2faee71b1d..cf9ad9e38148d3 100644 --- a/src/sentry/issues/endpoints/group_tombstone.py +++ b/src/sentry/issues/endpoints/group_tombstone.py @@ -8,6 +8,7 @@ from sentry.api.paginator import OffsetPaginator from sentry.api.serializers import serialize from sentry.models.grouptombstone import GroupTombstone +from sentry.models.project import Project @region_silo_endpoint @@ -17,7 +18,7 @@ class GroupTombstoneEndpoint(ProjectEndpoint): "GET": ApiPublishStatus.PRIVATE, } - def get(self, request: Request, project) -> Response: + def get(self, request: Request, project: Project) -> Response: """ Retrieve a Project's GroupTombstones ```````````````````````````````````` diff --git a/src/sentry/issues/endpoints/group_tombstone_details.py b/src/sentry/issues/endpoints/group_tombstone_details.py index addd62e9489545..f2aa03517e6698 100644 --- a/src/sentry/issues/endpoints/group_tombstone_details.py +++ b/src/sentry/issues/endpoints/group_tombstone_details.py @@ -8,6 +8,7 @@ from sentry.api.exceptions import ResourceDoesNotExist from sentry.models.grouphash import GroupHash from sentry.models.grouptombstone import GroupTombstone +from sentry.models.project import Project @region_silo_endpoint @@ -17,7 +18,7 @@ class GroupTombstoneDetailsEndpoint(ProjectEndpoint): "DELETE": ApiPublishStatus.PRIVATE, } - def delete(self, request: Request, project, tombstone_id) -> Response: + def delete(self, request: Request, project: Project, tombstone_id: str) -> Response: """ Remove a GroupTombstone ``````````````````````` diff --git a/src/sentry/issues/endpoints/organization_eventid.py b/src/sentry/issues/endpoints/organization_eventid.py index 3fddf7346d45c5..929cbd156b90ba 100644 --- a/src/sentry/issues/endpoints/organization_eventid.py +++ b/src/sentry/issues/endpoints/organization_eventid.py @@ -8,6 +8,7 @@ from sentry.api.bases.organization import OrganizationEndpoint from sentry.api.exceptions import ResourceDoesNotExist from sentry.api.serializers import serialize +from sentry.models.organization import Organization from sentry.models.project import Project from sentry.types.ratelimit import RateLimit, RateLimitCategory from sentry.utils.validators import INVALID_ID_DETAILS, is_event_id @@ -28,7 +29,7 @@ class EventIdLookupEndpoint(OrganizationEndpoint): } } - def get(self, request: Request, organization, event_id) -> Response: + def get(self, request: Request, organization: Organization, event_id: str) -> Response: """ Resolve an Event ID `````````````````` diff --git a/src/sentry/issues/endpoints/organization_group_index.py b/src/sentry/issues/endpoints/organization_group_index.py index 3d53a2bc19f4dd..08040c40b1c9ec 100644 --- a/src/sentry/issues/endpoints/organization_group_index.py +++ b/src/sentry/issues/endpoints/organization_group_index.py @@ -23,7 +23,7 @@ delete_groups, get_by_short_id, track_slo_response, - update_groups, + update_groups_with_search_fn, ) from sentry.api.helpers.group_index.validators import ValidationError from sentry.api.paginator import DateTimePaginator, Paginator @@ -36,6 +36,7 @@ from sentry.models.group import QUERY_STATUS_LOOKUP, Group, GroupStatus from sentry.models.groupenvironment import GroupEnvironment from sentry.models.groupinbox import GroupInbox +from sentry.models.organization import Organization from sentry.models.project import Project from sentry.search.events.constants import EQUALITY_OPERATORS from sentry.search.snuba.backend import assigned_or_suggested_filter @@ -59,7 +60,7 @@ def inbox_search( date_to: datetime | None = None, max_hits: int | None = None, actor: Any | None = None, -) -> CursorResult: +) -> CursorResult[Group]: now: datetime = timezone.now() end: datetime | None = None end_params: list[datetime] = [ @@ -151,8 +152,13 @@ class OrganizationGroupIndexEndpoint(OrganizationEndpoint): enforce_rate_limit = True def _search( - self, request: Request, organization, projects, environments, extra_query_kwargs=None - ): + self, + request: Request, + organization: Organization, + projects: Sequence[Project], + environments: Sequence[Environment], + extra_query_kwargs: None | Mapping[str, Any] = None, + ) -> tuple[CursorResult[Group], Mapping[str, Any]]: with start_span(op="_search"): query_kwargs = build_query_params_from_request( request, organization, projects, environments @@ -201,7 +207,7 @@ def use_group_snuba_dataset() -> bool: return result, query_kwargs @track_slo_response("workflow") - def get(self, request: Request, organization) -> Response: + def get(self, request: Request, organization: Organization) -> Response: """ List an Organization's Issues ````````````````````````````` @@ -406,7 +412,7 @@ def get(self, request: Request, organization) -> Response: return response @track_slo_response("workflow") - def put(self, request: Request, organization) -> Response: + def put(self, request: Request, organization: Organization) -> Response: """ Bulk Mutate a List of Issues ```````````````````````````` @@ -490,10 +496,10 @@ def put(self, request: Request, organization) -> Response: ) ids = [int(id) for id in request.GET.getlist("id")] - return update_groups(request, ids, projects, organization.id, search_fn) + return update_groups_with_search_fn(request, ids, projects, organization.id, search_fn) @track_slo_response("workflow") - def delete(self, request: Request, organization) -> Response: + def delete(self, request: Request, organization: Organization) -> Response: """ Bulk Remove a List of Issues ```````````````````````````` diff --git a/src/sentry/issues/endpoints/organization_shortid.py b/src/sentry/issues/endpoints/organization_shortid.py index c7c50db957f054..0f08a7e6d867f8 100644 --- a/src/sentry/issues/endpoints/organization_shortid.py +++ b/src/sentry/issues/endpoints/organization_shortid.py @@ -8,6 +8,7 @@ from sentry.api.exceptions import ResourceDoesNotExist from sentry.api.serializers import serialize from sentry.models.group import Group +from sentry.models.organization import Organization @region_silo_endpoint @@ -17,7 +18,7 @@ class ShortIdLookupEndpoint(OrganizationEndpoint): "GET": ApiPublishStatus.UNKNOWN, } - def get(self, request: Request, organization, short_id) -> Response: + def get(self, request: Request, organization: Organization, short_id: str) -> Response: """ Resolve a Short ID `````````````````` diff --git a/src/sentry/issues/endpoints/project_event_details.py b/src/sentry/issues/endpoints/project_event_details.py index b3c1650db7f8f9..e477e4701b41f6 100644 --- a/src/sentry/issues/endpoints/project_event_details.py +++ b/src/sentry/issues/endpoints/project_event_details.py @@ -1,6 +1,7 @@ from datetime import datetime from typing import Any +import sentry_sdk from rest_framework.request import Request from rest_framework.response import Response @@ -12,6 +13,7 @@ from sentry.api.serializers import IssueEventSerializer, serialize from sentry.api.serializers.models.event import IssueEventSerializerResponse from sentry.eventstore.models import Event, GroupEvent +from sentry.models.project import Project class GroupEventDetailsResponse(IssueEventSerializerResponse): @@ -72,7 +74,7 @@ class ProjectEventDetailsEndpoint(ProjectEndpoint): "GET": ApiPublishStatus.EXPERIMENTAL, } - def get(self, request: Request, project, event_id) -> Response: + def get(self, request: Request, project: Project, event_id: str) -> Response: """ Retrieve an Event for a Project ``````````````````````````````` @@ -120,7 +122,7 @@ class EventJsonEndpoint(ProjectEndpoint): "GET": ApiPublishStatus.EXPERIMENTAL, } - def get(self, request: Request, project, event_id) -> Response: + def get(self, request: Request, project: Project, event_id: str) -> Response: event = eventstore.backend.get_event_by_id(project.id, event_id) if not event: @@ -130,4 +132,23 @@ def get(self, request: Request, project, event_id) -> Response: if isinstance(event_dict["datetime"], datetime): event_dict["datetime"] = event_dict["datetime"].isoformat() + try: + scrub_ip_addresses = project.organization.get_option( + "sentry:require_scrub_ip_address", False + ) or project.get_option("sentry:scrub_ip_address", False) + + if scrub_ip_addresses: + if "spans" in event_dict: + for span in event_dict["spans"]: + if "sentry_tags" not in span: + continue + if "user.ip" in span["sentry_tags"]: + del span["sentry_tags"]["user.ip"] + if "user" in span["sentry_tags"] and span["sentry_tags"]["user"].startswith( + "ip:" + ): + span["sentry_tags"]["user"] = "ip:[ip]" + except Exception as e: + sentry_sdk.capture_exception(e) + return Response(event_dict, status=200) diff --git a/src/sentry/issues/endpoints/project_events.py b/src/sentry/issues/endpoints/project_events.py index 63e830ee08fde4..66d35d105d0cef 100644 --- a/src/sentry/issues/endpoints/project_events.py +++ b/src/sentry/issues/endpoints/project_events.py @@ -2,7 +2,7 @@ from functools import partial from django.utils import timezone -from drf_spectacular.utils import OpenApiParameter, extend_schema +from drf_spectacular.utils import extend_schema from rest_framework.request import Request from rest_framework.response import Response @@ -15,7 +15,7 @@ from sentry.api.serializers.models.event import SimpleEventSerializerResponse from sentry.apidocs.constants import RESPONSE_FORBIDDEN, RESPONSE_NOT_FOUND, RESPONSE_UNAUTHORIZED from sentry.apidocs.examples.event_examples import EventExamples -from sentry.apidocs.parameters import CursorQueryParam, GlobalParams +from sentry.apidocs.parameters import CursorQueryParam, EventParams, GlobalParams from sentry.apidocs.utils import inline_sentry_response_serializer from sentry.models.project import Project from sentry.snuba.events import Columns @@ -44,22 +44,8 @@ class ProjectEventsEndpoint(ProjectEndpoint): GlobalParams.ORG_ID_OR_SLUG, GlobalParams.PROJECT_ID_OR_SLUG, CursorQueryParam, - OpenApiParameter( - name="full", - description="If this is set to true, the event payload will include the full event body, including the stacktrace. Set to 1 to enable.", - required=False, - type=bool, - location="query", - default=False, - ), - OpenApiParameter( - name="sample", - description="Return events in pseudo-random order. This is deterministic so an identical query will always return the same events in the same order.", - required=False, - type=bool, - location="query", - default=False, - ), + EventParams.FULL_PAYLOAD, + EventParams.SAMPLE, ], responses={ 200: inline_sentry_response_serializer( diff --git a/src/sentry/issues/endpoints/project_group_index.py b/src/sentry/issues/endpoints/project_group_index.py index 06a5eb2b3c5b78..8f5c9d956da839 100644 --- a/src/sentry/issues/endpoints/project_group_index.py +++ b/src/sentry/issues/endpoints/project_group_index.py @@ -13,7 +13,7 @@ get_by_short_id, prep_search, track_slo_response, - update_groups, + update_groups_with_search_fn, ) from sentry.api.helpers.group_index.validators import ValidationError from sentry.api.serializers import serialize @@ -21,6 +21,7 @@ from sentry.models.environment import Environment from sentry.models.group import QUERY_STATUS_LOOKUP, Group, GroupStatus from sentry.models.grouphash import GroupHash +from sentry.models.project import Project from sentry.search.events.constants import EQUALITY_OPERATORS from sentry.signals import advanced_search from sentry.types.ratelimit import RateLimit, RateLimitCategory @@ -50,7 +51,7 @@ class ProjectGroupIndexEndpoint(ProjectEndpoint, EnvironmentMixin): } @track_slo_response("workflow") - def get(self, request: Request, project) -> Response: + def get(self, request: Request, project: Project) -> Response: """ List a Project's Issues ``````````````````````` @@ -208,7 +209,7 @@ def get(self, request: Request, project) -> Response: return response @track_slo_response("workflow") - def put(self, request: Request, project) -> Response: + def put(self, request: Request, project: Project) -> Response: """ Bulk Mutate a List of Issues ```````````````````````````` @@ -269,7 +270,7 @@ def put(self, request: Request, project) -> Response: """ search_fn = functools.partial(prep_search, self, request, project) - return update_groups( + return update_groups_with_search_fn( request, request.GET.getlist("id"), [project], @@ -278,7 +279,7 @@ def put(self, request: Request, project) -> Response: ) @track_slo_response("workflow") - def delete(self, request: Request, project) -> Response: + def delete(self, request: Request, project: Project) -> Response: """ Bulk Remove a List of Issues ```````````````````````````` diff --git a/src/sentry/issues/endpoints/project_group_stats.py b/src/sentry/issues/endpoints/project_group_stats.py index 4a1227daf0f93c..3781a999f4a718 100644 --- a/src/sentry/issues/endpoints/project_group_stats.py +++ b/src/sentry/issues/endpoints/project_group_stats.py @@ -9,6 +9,7 @@ from sentry.api.exceptions import ResourceDoesNotExist from sentry.models.environment import Environment from sentry.models.group import Group +from sentry.models.project import Project from sentry.tsdb.base import TSDBModel from sentry.types.ratelimit import RateLimit, RateLimitCategory @@ -28,7 +29,7 @@ class ProjectGroupStatsEndpoint(ProjectEndpoint, EnvironmentMixin, StatsMixin): } } - def get(self, request: Request, project) -> Response: + def get(self, request: Request, project: Project) -> Response: try: environment_id = self._get_environment_id_from_request(request, project.organization_id) except Environment.DoesNotExist: diff --git a/src/sentry/issues/endpoints/source_map_debug.py b/src/sentry/issues/endpoints/source_map_debug.py index fa07de1c795f40..a20d6008c55c02 100644 --- a/src/sentry/issues/endpoints/source_map_debug.py +++ b/src/sentry/issues/endpoints/source_map_debug.py @@ -78,9 +78,6 @@ def get(self, request: Request, project: Project, event_id: str) -> Response: debug_response = source_map_debug(project, event_id, exception_idx, frame_idx) issue, data = debug_response.issue, debug_response.data - return self._create_response(issue, data) - - def _create_response(self, issue=None, data=None) -> Response: errors_list = [] if issue: response = SourceMapProcessingIssue(issue, data=data).get_api_context() diff --git a/src/sentry/issues/grouptype.py b/src/sentry/issues/grouptype.py index 7810e9f66ba712..3819bc06de9ad7 100644 --- a/src/sentry/issues/grouptype.py +++ b/src/sentry/issues/grouptype.py @@ -1,11 +1,12 @@ from __future__ import annotations import importlib +import logging from collections import defaultdict from dataclasses import dataclass, field from datetime import timedelta from enum import Enum, StrEnum -from typing import TYPE_CHECKING, Any +from typing import TYPE_CHECKING, Any, ClassVar import sentry_sdk from django.apps import apps @@ -21,11 +22,8 @@ if TYPE_CHECKING: from sentry.models.organization import Organization from sentry.models.project import Project - from sentry.users.models.user import User - from sentry.workflow_engine.handlers.detector import DetectorHandler from sentry.workflow_engine.endpoints.validators import BaseGroupTypeDetectorValidator - -import logging + from sentry.workflow_engine.handlers.detector import DetectorHandler logger = logging.getLogger(__name__) @@ -174,6 +172,10 @@ class GroupType: notification_config: NotificationConfig = NotificationConfig() detector_handler: type[DetectorHandler] | None = None detector_validator: type[BaseGroupTypeDetectorValidator] | None = None + # Controls whether status change (i.e. resolved, regressed) workflow notifications are enabled. + # Defaults to true to maintain the default workflow notification behavior as it exists for error group types. + enable_status_change_workflow_notifications: bool = True + detector_config_schema: ClassVar[dict[str, Any]] = {} def __init_subclass__(cls: type[GroupType], **kwargs: Any) -> None: super().__init_subclass__(**kwargs) @@ -189,13 +191,6 @@ def __post_init__(self) -> None: if self.category not in valid_categories: raise ValueError(f"Category must be one of {valid_categories} from GroupCategory.") - @classmethod - def is_visible(cls, organization: Organization, user: User | None = None) -> bool: - if cls.released: - return True - - return features.has(cls.build_visible_feature_name(), organization, actor=user) - @classmethod def allow_ingest(cls, organization: Organization) -> bool: if cls.released: @@ -635,6 +630,7 @@ class MetricIssuePOC(GroupType): default_priority = PriorityLevel.HIGH enable_auto_resolve = False enable_escalation_detection = False + enable_status_change_workflow_notifications = False def should_create_group( @@ -669,7 +665,7 @@ def should_create_group( return False -def import_grouptype(): +def import_grouptype() -> None: """ Ensures that grouptype.py is imported in any apps that implement it. We do this to make sure that all implemented grouptypes are loaded and registered. diff --git a/src/sentry/issues/highlights.py b/src/sentry/issues/highlights.py index 2e05a5aabddee1..a572832919d4d6 100644 --- a/src/sentry/issues/highlights.py +++ b/src/sentry/issues/highlights.py @@ -7,7 +7,7 @@ from rest_framework import serializers from sentry.models.project import Project -from sentry.utils.platform_categories import BACKEND, FRONTEND, MOBILE +from sentry.utils.platform_categories import MOBILE @extend_schema_field(field=OpenApiTypes.OBJECT) @@ -35,33 +35,22 @@ class HighlightPreset(TypedDict): context: Mapping[str, list[str]] -SENTRY_TAGS = ["handled", "level", "release", "environment"] +DEFAULT_HIGHLIGHT_TAGS = ["handled", "level"] +DEFAULT_HIGHLIGHT_CTX = {"trace": ["trace_id"]} -BACKEND_HIGHLIGHTS: HighlightPreset = { - "tags": SENTRY_TAGS + ["url", "transaction", "status_code"], - "context": {"trace": ["trace_id"], "runtime": ["name", "version"]}, -} -FRONTEND_HIGHLIGHTS: HighlightPreset = { - "tags": SENTRY_TAGS + ["url", "transaction", "browser", "user"], - "context": {"user": ["email"]}, -} MOBILE_HIGHLIGHTS: HighlightPreset = { - "tags": SENTRY_TAGS + ["mobile", "main_thread"], - "context": {"profile": ["profile_id"], "app": ["name"], "device": ["family"]}, + "tags": DEFAULT_HIGHLIGHT_TAGS + ["mobile", "main_thread"], + "context": {**DEFAULT_HIGHLIGHT_CTX, "profile": ["profile_id"], "app": ["name"]}, } FALLBACK_HIGHLIGHTS: HighlightPreset = { - "tags": SENTRY_TAGS, - "context": {"user": ["email"], "trace": ["trace_id"]}, + "tags": DEFAULT_HIGHLIGHT_TAGS + ["url"], + "context": {**DEFAULT_HIGHLIGHT_CTX}, } def get_highlight_preset_for_project(project: Project) -> HighlightPreset: if not project.platform or project.platform == "other": return FALLBACK_HIGHLIGHTS - elif project.platform in FRONTEND: - return FRONTEND_HIGHLIGHTS - elif project.platform in BACKEND: - return BACKEND_HIGHLIGHTS elif project.platform in MOBILE: return MOBILE_HIGHLIGHTS return FALLBACK_HIGHLIGHTS diff --git a/src/sentry/issues/priority.py b/src/sentry/issues/priority.py index c42aef02b43656..99195a307406da 100644 --- a/src/sentry/issues/priority.py +++ b/src/sentry/issues/priority.py @@ -30,10 +30,6 @@ class PriorityChangeReason(Enum): PriorityLevel.LOW: GroupHistoryStatus.PRIORITY_LOW, } -GROUP_HISTORY_STATUS_TO_PRIORITY = { - value: key for key, value in PRIORITY_TO_GROUP_HISTORY_STATUS.items() -} - def update_priority( group: Group, diff --git a/src/sentry/lang/java/utils.py b/src/sentry/lang/java/utils.py index 8fb98d52d6ff08..2d8dd6a0de0aa4 100644 --- a/src/sentry/lang/java/utils.py +++ b/src/sentry/lang/java/utils.py @@ -12,7 +12,6 @@ from sentry.models.debugfile import ProjectDebugFile from sentry.models.project import Project from sentry.stacktraces.processing import StacktraceInfo -from sentry.utils import metrics from sentry.utils.cache import cache_key_for_event from sentry.utils.safe import get_path @@ -133,8 +132,8 @@ def is_jvm_event(data: Any, stacktraces: list[StacktraceInfo]) -> bool: return True # check if there are any JVM or Proguard images - # TODO: Can this actually happen if the event platform - # is not "java"? + # we *do* hit this code path, likely for events that don't have platform + # `"java"` but contain Java view hierarchies. images = get_path( data, "debug_meta", @@ -144,8 +143,6 @@ def is_jvm_event(data: Any, stacktraces: list[StacktraceInfo]) -> bool: ) if images: - metrics.incr("process.java.symbolicate.missing_platform", tags={platform: platform}) - return True return False diff --git a/src/sentry/lang/native/processing.py b/src/sentry/lang/native/processing.py index 022b30d4198780..b1cef31a389698 100644 --- a/src/sentry/lang/native/processing.py +++ b/src/sentry/lang/native/processing.py @@ -489,6 +489,8 @@ def emit_apple_symbol_stats(apple_symbol_stats, data): data, "contexts", "os", "raw_description" ) os_version = get_path(data, "contexts", "os", "version") + # See https://develop.sentry.dev/sdk/data-model/event-payloads/contexts/ + is_simulator = get_path(data, "contexts", "device", "simulator", default=False) if os_version: os_version = os_version.split(".", 1)[0] @@ -497,19 +499,25 @@ def emit_apple_symbol_stats(apple_symbol_stats, data): metrics.incr( "apple_symbol_availability_v2", amount=neither, - tags={"availability": "neither", "os_name": os_name, "os_version": os_version}, + tags={ + "availability": "neither", + "os_name": os_name, + "os_version": os_version, + "is_simulator": is_simulator, + }, sample_rate=1.0, ) - # TODO: This seems to just be wrong - # We want mutual exclusion here, since we don't want to double count. E.g., an event has both symbols, so we - # count it both in `both` and `old` or `symx` which makes it impossible for us to know the percentage of events - # that matched both. if both := apple_symbol_stats.get("both"): metrics.incr( "apple_symbol_availability_v2", amount=both, - tags={"availability": "both", "os_name": os_name, "os_version": os_version}, + tags={ + "availability": "both", + "os_name": os_name, + "os_version": os_version, + "is_simulator": is_simulator, + }, sample_rate=1.0, ) @@ -517,7 +525,12 @@ def emit_apple_symbol_stats(apple_symbol_stats, data): metrics.incr( "apple_symbol_availability_v2", amount=old, - tags={"availability": "old", "os_name": os_name, "os_version": os_version}, + tags={ + "availability": "old", + "os_name": os_name, + "os_version": os_version, + "is_simulator": is_simulator, + }, sample_rate=1.0, ) @@ -525,7 +538,12 @@ def emit_apple_symbol_stats(apple_symbol_stats, data): metrics.incr( "apple_symbol_availability_v2", amount=symx, - tags={"availability": "symx", "os_name": os_name, "os_version": os_version}, + tags={ + "availability": "symx", + "os_name": os_name, + "os_version": os_version, + "is_simulator": is_simulator, + }, sample_rate=1.0, ) diff --git a/src/sentry/logging/handlers.py b/src/sentry/logging/handlers.py index 2a05424f00b331..4113bbeb59be15 100644 --- a/src/sentry/logging/handlers.py +++ b/src/sentry/logging/handlers.py @@ -1,5 +1,8 @@ +from __future__ import annotations + import logging import re +from typing import Any from django.utils.timezone import now from structlog import get_logger @@ -79,7 +82,7 @@ def __call__(self, logger, name, event_dict): class StructLogHandler(logging.StreamHandler): - def get_log_kwargs(self, record, logger): + def get_log_kwargs(self, record: logging.LogRecord) -> dict[str, Any]: kwargs = {k: v for k, v in vars(record).items() if k not in throwaways and v is not None} kwargs.update({"level": record.levelno, "event": record.msg}) @@ -96,7 +99,7 @@ def get_log_kwargs(self, record, logger): return kwargs - def emit(self, record, logger=None): + def emit(self, record: logging.LogRecord, logger: logging.Logger | None = None) -> None: # If anyone wants to use the 'extra' kwarg to provide context within # structlog, we have to strip all of the default attributes from # a record because the RootLogger will take the 'extra' dictionary @@ -104,12 +107,24 @@ def emit(self, record, logger=None): try: if logger is None: logger = get_logger() - logger.log(**self.get_log_kwargs(record=record, logger=logger)) + logger.log(**self.get_log_kwargs(record=record)) except Exception: if logging.raiseExceptions: raise +class GKEStructLogHandler(StructLogHandler): + def get_log_kwargs(self, record: logging.LogRecord) -> dict[str, Any]: + kwargs = super().get_log_kwargs(record) + kwargs.update( + { + "logging.googleapis.com/labels": {"name": kwargs.get("name", "root")}, + "severity": record.levelname, + } + ) + return kwargs + + class MessageContainsFilter(logging.Filter): """ A logging filter that allows log records where the message diff --git a/src/sentry/middleware/integrations/parsers/gitlab.py b/src/sentry/middleware/integrations/parsers/gitlab.py index 9f48aba2f27ef8..aad4e9a4c48b71 100644 --- a/src/sentry/middleware/integrations/parsers/gitlab.py +++ b/src/sentry/middleware/integrations/parsers/gitlab.py @@ -8,7 +8,7 @@ from django.http.response import HttpResponseBase from sentry.hybridcloud.outbox.category import WebhookProviderIdentifier -from sentry.integrations.gitlab.webhooks import GitlabWebhookEndpoint, GitlabWebhookMixin +from sentry.integrations.gitlab.webhooks import GitlabWebhookEndpoint, get_gitlab_external_id from sentry.integrations.middleware.hybrid_cloud.parser import BaseRequestParser from sentry.integrations.models.integration import Integration from sentry.integrations.models.organization_integration import OrganizationIntegration @@ -20,7 +20,7 @@ logger = logging.getLogger(__name__) -class GitlabRequestParser(BaseRequestParser, GitlabWebhookMixin): +class GitlabRequestParser(BaseRequestParser): provider = EXTERNAL_PROVIDERS[ExternalProviders.GITLAB] webhook_identifier = WebhookProviderIdentifier.GITLAB _integration: Integration | None = None @@ -35,7 +35,7 @@ def _resolve_external_id(self) -> tuple[str, str] | HttpResponseBase: # AppPlatformEvents also hit this API "event-type": self.request.META.get("HTTP_X_GITLAB_EVENT"), } - return super()._get_external_id(request=self.request, extra=extra) + return get_gitlab_external_id(request=self.request, extra=extra) @control_silo_function def get_integration_from_request(self) -> Integration | None: diff --git a/src/sentry/migrations/0803_delete_unused_metricskeyindexer_pt1.py b/src/sentry/migrations/0803_delete_unused_metricskeyindexer_pt1.py new file mode 100644 index 00000000000000..e468f091384ec2 --- /dev/null +++ b/src/sentry/migrations/0803_delete_unused_metricskeyindexer_pt1.py @@ -0,0 +1,29 @@ +# Generated by Django 5.1.4 on 2024-12-19 20:24 + +from sentry.new_migrations.migrations import CheckedMigration +from sentry.new_migrations.monkey.models import SafeDeleteModel +from sentry.new_migrations.monkey.state import DeletionAction + + +class Migration(CheckedMigration): + # This flag is used to mark that a migration shouldn't be automatically run in production. + # This should only be used for operations where it's safe to run the migration after your + # code has deployed. So this should not be used for most operations that alter the schema + # of a table. + # Here are some things that make sense to mark as post deployment: + # - Large data migrations. Typically we want these to be run manually so that they can be + # monitored and not block the deploy for a long period of time while they run. + # - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to + # run this outside deployments so that we don't block them. Note that while adding an index + # is a schema change, it's completely safe to run the operation after the code has deployed. + # Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment + + is_post_deployment = False + + dependencies = [ + ("sentry", "0802_remove_grouping_auto_update_option"), + ] + + operations = [ + SafeDeleteModel(name="MetricsKeyIndexer", deletion_action=DeletionAction.MOVE_TO_PENDING), + ] diff --git a/src/sentry/migrations/0804_delete_metrics_key_indexer_pt2.py b/src/sentry/migrations/0804_delete_metrics_key_indexer_pt2.py new file mode 100644 index 00000000000000..c863a8ca7f503e --- /dev/null +++ b/src/sentry/migrations/0804_delete_metrics_key_indexer_pt2.py @@ -0,0 +1,27 @@ +# Generated by Django 5.1.4 on 2024-12-26 14:38 + +from sentry.new_migrations.migrations import CheckedMigration +from sentry.new_migrations.monkey.models import SafeDeleteModel +from sentry.new_migrations.monkey.state import DeletionAction + + +class Migration(CheckedMigration): + # This flag is used to mark that a migration shouldn't be automatically run in production. + # This should only be used for operations where it's safe to run the migration after your + # code has deployed. So this should not be used for most operations that alter the schema + # of a table. + # Here are some things that make sense to mark as post deployment: + # - Large data migrations. Typically we want these to be run manually so that they can be + # monitored and not block the deploy for a long period of time while they run. + # - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to + # run this outside deployments so that we don't block them. Note that while adding an index + # is a schema change, it's completely safe to run the operation after the code has deployed. + # Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment + + is_post_deployment = False + + dependencies = [ + ("sentry", "0803_delete_unused_metricskeyindexer_pt1"), + ] + + operations = [SafeDeleteModel(name="MetricsKeyIndexer", deletion_action=DeletionAction.DELETE)] diff --git a/src/sentry/models/activity.py b/src/sentry/models/activity.py index 827d244880cf3c..363c6cd7dd7f1a 100644 --- a/src/sentry/models/activity.py +++ b/src/sentry/models/activity.py @@ -23,8 +23,9 @@ ) from sentry.db.models.fields.hybrid_cloud_foreign_key import HybridCloudForeignKey from sentry.db.models.manager.base import BaseManager +from sentry.issues.grouptype import get_group_type_by_type_id from sentry.tasks import activity -from sentry.types.activity import CHOICES, ActivityType +from sentry.types.activity import CHOICES, STATUS_CHANGE_ACTIVITY_TYPES, ActivityType from sentry.types.group import PriorityLevel if TYPE_CHECKING: @@ -127,10 +128,10 @@ class Meta: __repr__ = sane_repr("project_id", "group_id", "event_id", "user_id", "type", "ident") @staticmethod - def get_version_ident(version): + def get_version_ident(version: str | None) -> str: return (version or "")[:64] - def __init__(self, *args, **kwargs): + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) from sentry.models.release import Release @@ -142,7 +143,7 @@ def __init__(self, *args, **kwargs): if self.type == ActivityType.ASSIGNED.value: self.data["assignee"] = str(self.data["assignee"]) - def save(self, *args, **kwargs): + def save(self, *args: Any, **kwargs: Any) -> None: created = bool(not self.id) super().save(*args, **kwargs) @@ -177,8 +178,8 @@ def save(self, *args, **kwargs): sender=Group, instance=self.group, created=True, update_fields=["num_comments"] ) - def delete(self, *args, **kwargs): - super().delete(*args, **kwargs) + def delete(self, *args: Any, **kwargs: Any) -> tuple[int, dict[str, int]]: + result = super().delete(*args, **kwargs) # HACK: support Group.num_comments if self.type == ActivityType.NOTE.value and self.group is not None: @@ -190,7 +191,21 @@ def delete(self, *args, **kwargs): sender=Group, instance=self.group, created=True, update_fields=["num_comments"] ) - def send_notification(self): + return result + + def send_notification(self) -> None: + if self.group: + group_type = get_group_type_by_type_id(self.group.type) + has_status_change_notifications = group_type.enable_status_change_workflow_notifications + is_status_change = self.type in { + activity.value for activity in STATUS_CHANGE_ACTIVITY_TYPES + } + + # Skip sending the activity notification if the group type does not + # support status change workflow notifications + if is_status_change and not has_status_change_notifications: + return + activity.send_activity_notifications.delay(self.id) diff --git a/src/sentry/models/apiapplication.py b/src/sentry/models/apiapplication.py index bd44f5d8bf019c..a1912fffbc75e8 100644 --- a/src/sentry/models/apiapplication.py +++ b/src/sentry/models/apiapplication.py @@ -111,16 +111,20 @@ def is_active(self): def is_allowed_response_type(self, value): return value in ("code", "token") - def is_valid_redirect_uri(self, value): + def normalize_url(self, value): parts = urlparse(value) normalized_path = os.path.normpath(parts.path) - if value.endswith("/"): + if normalized_path == ".": + normalized_path = "/" + elif value.endswith("/") and not normalized_path.endswith("/"): normalized_path += "/" - value = urlunparse(parts._replace(path=normalized_path)) + return urlunparse(parts._replace(path=normalized_path)) + + def is_valid_redirect_uri(self, value): + value = self.normalize_url(value) - for ruri in self.redirect_uris.split("\n"): - if parts.netloc != urlparse(ruri).netloc: - continue + for redirect_uri in self.redirect_uris.split("\n"): + ruri = self.normalize_url(redirect_uri) if value == ruri: return True if value.startswith(ruri): diff --git a/src/sentry/models/apigrant.py b/src/sentry/models/apigrant.py index f949712c2080f0..d511a611aee4fb 100644 --- a/src/sentry/models/apigrant.py +++ b/src/sentry/models/apigrant.py @@ -78,6 +78,11 @@ class Meta: app_label = "sentry" db_table = "sentry_apigrant" + def __str__(self): + return ( + f"api_grant_id={self.id}, user_id={self.user.id}, application_id={self.application.id}" + ) + def get_scopes(self): if self.scope_list: return self.scope_list diff --git a/src/sentry/models/apikey.py b/src/sentry/models/apikey.py index 612d0ca606822c..a93d6190f28818 100644 --- a/src/sentry/models/apikey.py +++ b/src/sentry/models/apikey.py @@ -56,7 +56,7 @@ def handle_async_replication(self, region_name: str, shard_identifier: int) -> N ) def __str__(self): - return str(self.key) + return f"api_key_id={self.id}, status={self.status}" @classmethod def generate_api_key(cls): diff --git a/src/sentry/models/apitoken.py b/src/sentry/models/apitoken.py index e7c80482ac0335..92522b78368a85 100644 --- a/src/sentry/models/apitoken.py +++ b/src/sentry/models/apitoken.py @@ -137,7 +137,7 @@ class Meta: __repr__ = sane_repr("user_id", "token", "application_id") def __str__(self): - return force_str(self.token) + return f"token_id={force_str(self.id)}" def _set_plaintext_token(self, token: str) -> None: """Set the plaintext token for one-time reading diff --git a/src/sentry/models/group.py b/src/sentry/models/group.py index c5243200ec00ab..ac8348e2f827a3 100644 --- a/src/sentry/models/group.py +++ b/src/sentry/models/group.py @@ -229,24 +229,33 @@ class EventOrdering(Enum): ] -def get_oldest_or_latest_event_for_environments( - ordering: EventOrdering, environments: Sequence[str], group: Group +def get_oldest_or_latest_event( + group: Group, + ordering: EventOrdering, + conditions: Sequence[Condition] | None = None, + start: datetime | None = None, + end: datetime | None = None, ) -> GroupEvent | None: - conditions = [] - - if len(environments) > 0: - conditions.append(["environment", "IN", environments]) if group.issue_category == GroupCategory.ERROR: dataset = Dataset.Events else: dataset = Dataset.IssuePlatform - _filter = eventstore.Filter( - conditions=conditions, project_ids=[group.project_id], group_ids=[group.id] - ) - events = eventstore.backend.get_events( - filter=_filter, + all_conditions = [ + Condition(Column("project_id"), Op.IN, [group.project.id]), + Condition(Column("group_id"), Op.IN, [group.id]), + ] + + if conditions: + all_conditions.extend(conditions) + + events = eventstore.backend.get_events_snql( + organization_id=group.project.organization_id, + group_id=group.id, + start=start, + end=end, + conditions=all_conditions, limit=1, orderby=ordering.value, referrer="Group.get_latest", @@ -260,32 +269,32 @@ def get_oldest_or_latest_event_for_environments( return None -def get_recommended_event_for_environments( - environments: Sequence[Environment], +def get_recommended_event( group: Group, conditions: Sequence[Condition] | None = None, + start: datetime | None = None, + end: datetime | None = None, ) -> GroupEvent | None: if group.issue_category == GroupCategory.ERROR: dataset = Dataset.Events else: dataset = Dataset.IssuePlatform - all_conditions = [] - if len(environments) > 0: - all_conditions.append( - Condition(Column("environment"), Op.IN, [e.name for e in environments]) - ) - all_conditions.append(Condition(Column("project_id"), Op.IN, [group.project.id])) - all_conditions.append(Condition(Column("group_id"), Op.IN, [group.id])) + all_conditions = [ + Condition(Column("project_id"), Op.IN, [group.project.id]), + Condition(Column("group_id"), Op.IN, [group.id]), + ] if conditions: all_conditions.extend(conditions) - end = group.last_seen + timedelta(minutes=1) - start = end - timedelta(days=7) + default_end = group.last_seen + timedelta(minutes=1) + default_start = default_end - timedelta(days=7) expired, _ = outside_retention_with_modified_start( - start, end, Organization(group.project.organization_id) + start=start if start else default_start, + end=end if end else default_end, + organization=Organization(group.project.organization_id), ) if expired: @@ -294,8 +303,8 @@ def get_recommended_event_for_environments( events = eventstore.backend.get_events_snql( organization_id=group.project.organization_id, group_id=group.id, - start=start, - end=end, + start=start if start else default_start, + end=end if end else default_end, conditions=all_conditions, limit=1, orderby=EventOrdering.RECOMMENDED.value, @@ -764,46 +773,105 @@ def get_share_id(self): # Otherwise it has not been shared yet. return None - def get_latest_event(self) -> GroupEvent | None: - if not hasattr(self, "_latest_event"): - self._latest_event = self.get_latest_event_for_environments() - - return self._latest_event + def get_latest_event( + self, + conditions: Sequence[Condition] | None = None, + start: datetime | None = None, + end: datetime | None = None, + ) -> GroupEvent | None: + """ + Returns the latest/newest event given the conditions and time range. + If no event is found, returns None. + """ + return get_oldest_or_latest_event( + group=self, + ordering=EventOrdering.LATEST, + conditions=conditions, + start=start, + end=end, + ) def get_latest_event_for_environments( self, environments: Sequence[str] = () ) -> GroupEvent | None: - return get_oldest_or_latest_event_for_environments( - EventOrdering.LATEST, - environments, - self, + """ + Legacy special case of `self.get_latest_event` for environments and no date range. + Kept for compatability, but it's advised to use `self.get_latest_event` directly. + """ + conditions = ( + [Condition(Column("environment"), Op.IN, environments)] if len(environments) > 0 else [] + ) + return self.get_latest_event(conditions=conditions) + + def get_oldest_event( + self, + conditions: Sequence[Condition] | None = None, + start: datetime | None = None, + end: datetime | None = None, + ) -> GroupEvent | None: + """ + Returns the oldest event given the conditions and time range. + If no event is found, returns None. + """ + return get_oldest_or_latest_event( + group=self, + ordering=EventOrdering.OLDEST, + conditions=conditions, + start=start, + end=end, ) def get_oldest_event_for_environments( self, environments: Sequence[str] = () ) -> GroupEvent | None: - return get_oldest_or_latest_event_for_environments( - EventOrdering.OLDEST, - environments, - self, + """ + Legacy special case of `self.get_oldest_event` for environments and no date range. + Kept for compatability, but it's advised to use `self.get_oldest_event` directly. + """ + conditions = ( + [Condition(Column("environment"), Op.IN, environments)] if len(environments) > 0 else [] ) + return self.get_oldest_event(conditions=conditions) - def get_recommended_event_for_environments( + def get_recommended_event( self, - environments: Sequence[Environment] = (), conditions: Sequence[Condition] | None = None, + start: datetime | None = None, + end: datetime | None = None, ) -> GroupEvent | None: - maybe_event = get_recommended_event_for_environments( - environments, - self, - conditions, + """ + Returns a recommended event given the conditions and time range. + If a helpful recommendation is not found, it will fallback to the latest event. + If neither are found, returns None. + """ + maybe_event = get_recommended_event( + group=self, + conditions=conditions, + start=start, + end=end, ) return ( maybe_event if maybe_event - else self.get_latest_event_for_environments([env.name for env in environments]) + else self.get_latest_event(conditions=conditions, start=start, end=end) ) + def get_recommended_event_for_environments( + self, + environments: Sequence[Environment] = (), + conditions: Sequence[Condition] | None = None, + ) -> GroupEvent | None: + """ + Legacy special case of `self.get_recommended_event` for environments and no date range. + Kept for compatability, but it's advised to use `self.get_recommended_event` directly. + """ + all_conditions: list[Condition] = list(conditions) if conditions else [] + if len(environments) > 0: + all_conditions.append( + Condition(Column("environment"), Op.IN, [e.name for e in environments]) + ) + return self.get_recommended_event(conditions=all_conditions) + def get_suspect_commit(self) -> Commit | None: from sentry.models.groupowner import GroupOwner, GroupOwnerType diff --git a/src/sentry/models/groupassignee.py b/src/sentry/models/groupassignee.py index b4479012ecb2a9..ce6dd33739d4df 100644 --- a/src/sentry/models/groupassignee.py +++ b/src/sentry/models/groupassignee.py @@ -134,12 +134,12 @@ def assign( self, group: Group, assigned_to: Team | RpcUser | User, - acting_user: User | None = None, + acting_user: RpcUser | User | None = None, create_only: bool = False, extra: dict[str, str] | None = None, force_autoassign: bool = False, assignment_source: AssignmentSource | None = None, - ): + ) -> dict[str, bool]: from sentry.integrations.utils.sync import sync_group_assignee_outbound from sentry.models.activity import Activity from sentry.models.groupsubscription import GroupSubscription @@ -204,9 +204,8 @@ def assign( def deassign( self, group: Group, + # XXX: Some callers do not pass an acting user but we should make it mandatory acting_user: User | RpcUser | None = None, - assigned_to: Team | RpcUser | None = None, - extra: dict[str, str] | None = None, assignment_source: AssignmentSource | None = None, ) -> None: from sentry.integrations.utils.sync import sync_group_assignee_outbound @@ -272,7 +271,7 @@ class Meta: __repr__ = sane_repr("group_id", "user_id", "team_id") - def save(self, *args, **kwargs): + def save(self, *args: Any, **kwargs: Any) -> None: assert not (self.user_id is not None and self.team_id is not None) and not ( self.user_id is None and self.team_id is None ), "Must have Team or User, not both" diff --git a/src/sentry/models/grouphistory.py b/src/sentry/models/grouphistory.py index 96e04922a032ac..06ddb9b3e972da 100644 --- a/src/sentry/models/grouphistory.py +++ b/src/sentry/models/grouphistory.py @@ -1,8 +1,12 @@ -from typing import TYPE_CHECKING, ClassVar, Optional, Union +from __future__ import annotations + +import datetime +from collections.abc import Sequence +from typing import TYPE_CHECKING, ClassVar from django.conf import settings from django.db import models -from django.db.models import Q +from django.db.models import Q, QuerySet from django.utils import timezone from django.utils.translation import gettext_lazy as _ @@ -149,7 +153,7 @@ class GroupHistoryStatus: class GroupHistoryManager(BaseManager["GroupHistory"]): - def filter_to_team(self, team: "Team"): + def filter_to_team(self, team: Team) -> QuerySet[GroupHistory]: from sentry.models.groupassignee import GroupAssignee from sentry.models.project import Project @@ -241,13 +245,13 @@ def owner(self, actor: Actor | None) -> None: self.team_id = actor.id -def get_prev_history(group, status): +def get_prev_history(group: Group, status: int) -> GroupHistory | None: """ Finds the most recent row that is the inverse of this history row, if one exists. """ previous_statuses = PREVIOUS_STATUSES.get(status) if not previous_statuses: - return + return None prev_histories = GroupHistory.objects.filter( group=group, status__in=previous_statuses @@ -256,11 +260,11 @@ def get_prev_history(group, status): def record_group_history_from_activity_type( - group: "Group", + group: Group, activity_type: int, - actor: Union["User", "Team"] | None = None, - release: Optional["Release"] = None, -): + actor: RpcUser | User | Team | None = None, + release: Release | None = None, +) -> GroupHistory | None: """ Writes a `GroupHistory` row for an activity type if there's a relevant `GroupHistoryStatus` that maps to it @@ -275,14 +279,15 @@ def record_group_history_from_activity_type( if status is not None: return record_group_history(group, status, actor, release) + return None def record_group_history( - group: "Group", + group: Group, status: int, - actor: Union["User", "RpcUser", "Team"] | None = None, - release: Optional["Release"] = None, -): + actor: User | RpcUser | Team | None = None, + release: Release | None = None, +) -> GroupHistory: from sentry.models.team import Team from sentry.users.models.user import User from sentry.users.services.user import RpcUser @@ -312,16 +317,16 @@ def record_group_history( def bulk_record_group_history( - groups: list["Group"], + groups: Sequence[Group], status: int, - actor: Union["User", "RpcUser", "Team"] | None = None, - release: Optional["Release"] = None, -): + actor: User | RpcUser | Team | None = None, + release: Release | None = None, +) -> list[GroupHistory]: from sentry.models.team import Team from sentry.users.models.user import User from sentry.users.services.user import RpcUser - def get_prev_history_date(group, status): + def get_prev_history_date(group: Group, status: int) -> datetime.datetime | None: prev_history = get_prev_history(group, status) return prev_history.date_added if prev_history else None diff --git a/src/sentry/models/groupinbox.py b/src/sentry/models/groupinbox.py index a4272cb1ad2cdf..efd5332df2e8bf 100644 --- a/src/sentry/models/groupinbox.py +++ b/src/sentry/models/groupinbox.py @@ -1,5 +1,10 @@ +from __future__ import annotations + import logging +from collections.abc import Iterable +from datetime import datetime from enum import Enum +from typing import TypedDict import jsonschema import sentry_sdk @@ -135,10 +140,24 @@ def bulk_remove_groups_from_inbox(groups, action=None, user=None, referrer=None) pass -def get_inbox_details(group_list): +class InboxReasonDetails(TypedDict): + until: str | None + count: int | None + window: int | None + user_count: int | None + user_window: int | None + + +class InboxDetails(TypedDict): + reason: int + reason_details: InboxReasonDetails | None + date_added: datetime + + +def get_inbox_details(group_list: Iterable[Group]) -> dict[int, InboxDetails]: group_ids = [g.id for g in group_list] group_inboxes = GroupInbox.objects.filter(group__in=group_ids) - inbox_stats = { + return { gi.group_id: { "reason": gi.reason, "reason_details": gi.reason_details, @@ -146,5 +165,3 @@ def get_inbox_details(group_list): } for gi in group_inboxes } - - return inbox_stats diff --git a/src/sentry/models/organizationonboardingtask.py b/src/sentry/models/organizationonboardingtask.py index d21977ffb5eb2b..5d1cefecd94f87 100644 --- a/src/sentry/models/organizationonboardingtask.py +++ b/src/sentry/models/organizationonboardingtask.py @@ -179,11 +179,6 @@ class OrganizationOnboardingTask(AbstractOnboardingTask): OnboardingTask.INVITE_MEMBER, OnboardingTask.SECOND_PLATFORM, OnboardingTask.RELEASE_TRACKING, - # TODO(Telemetry Experience): This task is shown conditionally - # according to the platform. - # Check if we can do the same here and mark onboarding as - # complete if platform does not support sourcemaps - OnboardingTask.SOURCEMAPS, OnboardingTask.ALERT_RULE, OnboardingTask.FIRST_TRANSACTION, OnboardingTask.SESSION_REPLAY, @@ -192,6 +187,13 @@ class OrganizationOnboardingTask(AbstractOnboardingTask): ] ) + NEW_REQUIRED_ONBOARDING_TASKS_WITH_SOURCE_MAPS = frozenset( + [ + *NEW_REQUIRED_ONBOARDING_TASKS, + OnboardingTask.SOURCEMAPS, + ] + ) + SKIPPABLE_TASKS = frozenset( [ OnboardingTask.INVITE_MEMBER, diff --git a/src/sentry/models/projectteam.py b/src/sentry/models/projectteam.py index 183ab0c573b145..ad493b2d189ee7 100644 --- a/src/sentry/models/projectteam.py +++ b/src/sentry/models/projectteam.py @@ -16,23 +16,12 @@ class ProjectTeamManager(BaseManager["ProjectTeam"]): def get_for_teams_with_org_cache(self, teams: Sequence["Team"]) -> QuerySet["ProjectTeam"]: - project_teams = ( + return ( self.filter(team__in=teams, project__status=ObjectStatus.ACTIVE) .order_by("project__name", "project__slug") - .select_related("project") + .select_related("project", "project__organization") ) - # TODO(dcramer): we should query in bulk for ones we're missing here - orgs = {i.organization_id: i.organization for i in teams} - - for project_team in project_teams: - if project_team.project.organization_id in orgs: - project_team.project.set_cached_field_value( - "organization", orgs[project_team.project.organization_id] - ) - - return project_teams - @region_silo_model class ProjectTeam(Model): diff --git a/src/sentry/monitors/endpoints/organization_monitor_index.py b/src/sentry/monitors/endpoints/organization_monitor_index.py index 585a37cf26d5c1..62aebb05d17611 100644 --- a/src/sentry/monitors/endpoints/organization_monitor_index.py +++ b/src/sentry/monitors/endpoints/organization_monitor_index.py @@ -11,6 +11,8 @@ When, ) from drf_spectacular.utils import extend_schema +from rest_framework.request import Request +from rest_framework.response import Response from sentry import audit_log, quotas from sentry.api.api_owners import ApiOwner @@ -65,10 +67,6 @@ def map_value_to_constant(constant, value): return getattr(constant, value) -from rest_framework.request import Request -from rest_framework.response import Response - - def flip_sort_direction(sort_field: str) -> str: if sort_field[0] == "-": sort_field = sort_field[1:] @@ -365,8 +363,11 @@ def put(self, request: Request, organization) -> Response: result = dict(validator.validated_data) + projects = self.get_projects(request, organization, include_all_accessible=True) + project_ids = [project.id for project in projects] + monitor_guids = result.pop("ids", []) - monitors = Monitor.objects.filter(guid__in=monitor_guids) + monitors = Monitor.objects.filter(guid__in=monitor_guids, project_id__in=project_ids) status = result.get("status") # If enabling monitors, ensure we can assign all before moving forward diff --git a/src/sentry/monitors/endpoints/organization_monitor_index_stats.py b/src/sentry/monitors/endpoints/organization_monitor_index_stats.py index ba08695ccca103..55e9171049cff5 100644 --- a/src/sentry/monitors/endpoints/organization_monitor_index_stats.py +++ b/src/sentry/monitors/endpoints/organization_monitor_index_stats.py @@ -57,6 +57,9 @@ def get(self, request: Request, organization) -> Response: monitor_guids: list[str] = request.GET.getlist("monitor") + projects = self.get_projects(request, organization, include_all_accessible=True) + project_ids = [project.id for project in projects] + # Pre-fetch the monitor-ids and their guid. This is an # optimization to eliminate a join against the monitor table which # significantly inflates the size of the aggregation states. @@ -68,9 +71,11 @@ def get(self, request: Request, organization) -> Response: monitor_map = { id: str(guid) for id, guid in Monitor.objects.filter( - organization_id=organization.id, guid__in=monitor_guids + organization_id=organization.id, project_id__in=project_ids, guid__in=monitor_guids ).values_list("id", "guid") } + # Filter monitors, keeping only ones that the user has access to. + monitor_guids = [guid for guid in monitor_guids if guid in monitor_map.values()] # We only care about the name but we don't want to join to get it. So we're maintaining # this map until the very end where we'll map from monitor_environment to environment to diff --git a/src/sentry/notifications/utils/__init__.py b/src/sentry/notifications/utils/__init__.py index 370a6d5259305c..c4de152a58b219 100644 --- a/src/sentry/notifications/utils/__init__.py +++ b/src/sentry/notifications/utils/__init__.py @@ -28,7 +28,6 @@ from sentry.models.commit import Commit from sentry.models.deploy import Deploy from sentry.models.environment import Environment -from sentry.models.eventerror import EventError from sentry.models.group import Group from sentry.models.grouplink import GroupLink from sentry.models.organization import Organization @@ -119,24 +118,6 @@ def get_environment_for_deploy(deploy: Deploy | None) -> str: return "Default Environment" -def summarize_issues( - issues: Iterable[Mapping[str, Mapping[str, Any]]] -) -> Iterable[Mapping[str, str]]: - rv = [] - for issue in issues: - extra_info = None - msg_d = dict(issue["data"]) - msg_d["type"] = issue["type"] - - if "image_path" in issue["data"]: - extra_info = issue["data"]["image_path"].rsplit("/", 1)[-1] - if "image_arch" in issue["data"]: - extra_info = "{} ({})".format(extra_info, issue["data"]["image_arch"]) - - rv.append({"message": EventError(msg_d).message, "extra_info": extra_info}) - return rv - - def get_email_link_extra_params( referrer: str = "alert_email", environment: str | None = None, @@ -275,10 +256,7 @@ def has_integrations(organization: Organization, project: Project) -> bool: def is_alert_rule_integration(provider: IntegrationProvider) -> bool: - return any( - feature == (IntegrationFeatures.ALERT_RULE or IntegrationFeatures.ENTERPRISE_ALERT_RULE) - for feature in provider.features - ) + return IntegrationFeatures.ALERT_RULE in provider.features def has_alert_integration(project: Project) -> bool: diff --git a/src/sentry/onboarding_tasks/backends/organization_onboarding_task.py b/src/sentry/onboarding_tasks/backends/organization_onboarding_task.py index c7510abb4c66c1..5ceac260f92b26 100644 --- a/src/sentry/onboarding_tasks/backends/organization_onboarding_task.py +++ b/src/sentry/onboarding_tasks/backends/organization_onboarding_task.py @@ -10,10 +10,12 @@ OnboardingTaskStatus, OrganizationOnboardingTask, ) +from sentry.models.project import Project from sentry.onboarding_tasks.base import OnboardingTaskBackend from sentry.users.models.user import User from sentry.users.services.user.model import RpcUser from sentry.utils import json +from sentry.utils.platform_categories import SOURCE_MAPS class OrganizationOnboardingTaskBackend(OnboardingTaskBackend[OrganizationOnboardingTask]): @@ -47,7 +49,21 @@ def try_mark_onboarding_complete( organization = Organization.objects.get_from_cache(id=organization_id) if features.has("organizations:quick-start-updates", organization, actor=user): - required_tasks = OrganizationOnboardingTask.NEW_REQUIRED_ONBOARDING_TASKS + + projects = Project.objects.filter(organization=organization) + project_with_source_maps = next( + (p for p in projects if p.platform in SOURCE_MAPS), None + ) + + # If a project supports source maps, we require them to complete the quick start. + # It's possible that the first project doesn't have source maps, + # but the second project (which users are guided to create in the "Add Sentry to other parts of the app" step) may have source maps. + required_tasks = ( + OrganizationOnboardingTask.NEW_REQUIRED_ONBOARDING_TASKS_WITH_SOURCE_MAPS + if project_with_source_maps + else OrganizationOnboardingTask.NEW_REQUIRED_ONBOARDING_TASKS + ) + else: required_tasks = OrganizationOnboardingTask.REQUIRED_ONBOARDING_TASKS diff --git a/src/sentry/options/defaults.py b/src/sentry/options/defaults.py index c8cc62186a41ab..e8eeb9bfd10c81 100644 --- a/src/sentry/options/defaults.py +++ b/src/sentry/options/defaults.py @@ -907,13 +907,13 @@ register( "seer.similarity.global-rate-limit", type=Dict, - default={"limit": 20, "window": 1}, + default={"limit": 20, "window": 1}, # window is in seconds flags=FLAG_ALLOW_EMPTY | FLAG_AUTOMATOR_MODIFIABLE, ) register( "seer.similarity.per-project-rate-limit", type=Dict, - default={"limit": 5, "window": 1}, + default={"limit": 5, "window": 1}, # window is in seconds flags=FLAG_ALLOW_EMPTY | FLAG_AUTOMATOR_MODIFIABLE, ) @@ -1122,11 +1122,6 @@ # Controls the rollout rate in percent (`0.0` to `1.0`) for metric stats. register("relay.metric-stats.rollout-rate", default=0.0, flags=FLAG_AUTOMATOR_MODIFIABLE) -# Controls the sample rate of metrics summaries computation in Relay. -register( - "relay.compute-metrics-summaries.sample-rate", default=0.0, flags=FLAG_AUTOMATOR_MODIFIABLE -) - # Controls whether generic inbound filters are sent to Relay. register("relay.emit-generic-inbound-filters", default=False, flags=FLAG_AUTOMATOR_MODIFIABLE) @@ -1227,6 +1222,12 @@ default=0, flags=FLAG_PRIORITIZE_DISK | FLAG_AUTOMATOR_MODIFIABLE, ) +register( + "project-abuse-quota.attachment-item-limit", + type=Int, + default=0, + flags=FLAG_PRIORITIZE_DISK | FLAG_AUTOMATOR_MODIFIABLE, +) register( "project-abuse-quota.session-limit", type=Int, @@ -2115,6 +2116,20 @@ flags=FLAG_PRIORITIZE_DISK | FLAG_AUTOMATOR_MODIFIABLE, ) +register( + "statistical_detectors.throughput.threshold.transactions", + default=50, + type=Int, + flags=FLAG_AUTOMATOR_MODIFIABLE, +) + +register( + "statistical_detectors.throughput.threshold.functions", + default=25, + type=Int, + flags=FLAG_AUTOMATOR_MODIFIABLE, +) + register( "options_automator_slack_webhook_enabled", default=True, @@ -2293,12 +2308,6 @@ flags=FLAG_AUTOMATOR_MODIFIABLE, ) -register( - "releases_v2.single-tenant", - default=False, - flags=FLAG_AUTOMATOR_MODIFIABLE, -) - # The flag disables the file io on main thread detector register( "performance_issues.file_io_main_thread.disabled", @@ -2504,43 +2513,6 @@ flags=FLAG_AUTOMATOR_MODIFIABLE, ) -# killswitch for profiling ddm functions metrics. -# Enable/Disable the ingestion of function metrics -# in the generic metrics platform -register( - "profiling.generic_metrics.functions_ingestion.enabled", - default=False, - type=Bool, - flags=FLAG_AUTOMATOR_MODIFIABLE, -) - -# list of org IDs for which we'll write the function -# metrics to the generic metrics platform -register( - "profiling.generic_metrics.functions_ingestion.allowed_org_ids", - type=Sequence, - default=[], - flags=FLAG_ALLOW_EMPTY | FLAG_AUTOMATOR_MODIFIABLE, -) - -# list of project IDs we want to deny ingesting profiles -# function metrics into the generic metrics platform -register( - "profiling.generic_metrics.functions_ingestion.denied_proj_ids", - type=Sequence, - default=[], - flags=FLAG_ALLOW_EMPTY | FLAG_AUTOMATOR_MODIFIABLE, -) - -# rollout rate: % of profiles for which we ingest the extracted profile -# functions metrics into the generic metrics platform -register( - "profiling.generic_metrics.functions_ingestion.rollout_rate", - type=Float, - default=0.0, - flags=FLAG_AUTOMATOR_MODIFIABLE, -) - # temporary option for logging canonical key fallback stacktraces register( "canonical-fallback.send-error-to-sentry", @@ -2833,12 +2805,6 @@ flags=FLAG_AUTOMATOR_MODIFIABLE, ) -register( - "celery_split_queue_legacy_mode", - default=["post_process_transactions"], - flags=FLAG_AUTOMATOR_MODIFIABLE, -) - register( "celery_split_queue_rollout", default={"post_process_transactions": 1.0}, @@ -2917,15 +2883,6 @@ flags=FLAG_AUTOMATOR_MODIFIABLE, ) -# list of project IDs for which we'll apply -# stack trace rules to the profiles in case -# there are any rules defined -register( - "profiling.stack_trace_rules.allowed_project_ids", - type=Sequence, - default=[], - flags=FLAG_ALLOW_EMPTY | FLAG_AUTOMATOR_MODIFIABLE, -) register( "performance.event-tracker.sample-rate.transactions", default=0.0, diff --git a/src/sentry/plugins/base/configuration.py b/src/sentry/plugins/base/configuration.py deleted file mode 100644 index d41eacbd4777c4..00000000000000 --- a/src/sentry/plugins/base/configuration.py +++ /dev/null @@ -1,136 +0,0 @@ -from django.contrib import messages -from django.http import Http404, HttpResponseRedirect -from django.urls import reverse -from django.utils.safestring import mark_safe -from django.utils.translation import gettext as _ - -from sentry import options -from sentry.api import client -from sentry.api.serializers import serialize -from sentry.models.options.project_option import ProjectOption -from sentry.utils import json -from sentry.web.helpers import render_to_string - - -def react_plugin_config(plugin, project, request): - response = client.get( - f"/projects/{project.organization.slug}/{project.slug}/plugins/{plugin.slug}/", - request=request, - ) - nonce = "" - if hasattr(request, "csp_nonce"): - nonce = f' nonce="{request.csp_nonce}"' - - # Pretty sure this is not in use, and if it is, it has been broken since - # https://github.com/getsentry/sentry/pull/13578/files#diff-d17d91cc629f5f2e4582adb6e52d426f654452b751da97bafa25160b78566438L206 - return mark_safe( - """ -
- - window.__onSentryInit = window.__onSentryInit || []; - window.__onSentryInit.push({ - name: 'renderReact', - component: 'PluginConfig', - container: '#ref-plugin-config', - props: { - project: %s, - organization: %s, - data: %s - }, - }); - - """ - % ( - nonce, - json.dumps_htmlsafe(serialize(project, request.user)), - json.dumps_htmlsafe(serialize(project.organization, request.user)), - json.dumps_htmlsafe(response.data), - ) - ) - - -def default_plugin_config(plugin, project, request): - if plugin.can_enable_for_projects() and not plugin.can_configure_for_project(project): - raise Http404() - - plugin_key = plugin.get_conf_key() - form_class = plugin.get_conf_form(project) - template = plugin.get_conf_template(project) - - if form_class is None: - return HttpResponseRedirect( - reverse("sentry-manage-project", args=[project.organization.slug, project.slug]) - ) - - test_results = None - - form = form_class( - request.POST if request.POST.get("plugin") == plugin.slug else None, - initial=plugin.get_conf_options(project), - prefix=plugin_key, - ) - if form.is_valid(): - if "action_test" in request.POST and plugin.is_testable(): - test_results = plugin.test_configuration_and_get_test_results(project) - else: - for field, value in form.cleaned_data.items(): - key = f"{plugin_key}:{field}" - if project: - ProjectOption.objects.set_value(project, key, value) - else: - options.set(key, value, channel=options.UpdateChannel.APPLICATION) - - messages.add_message( - request, messages.SUCCESS, _("Your settings were saved successfully.") - ) - return HttpResponseRedirect(request.path) - - # TODO(mattrobenolt): Reliably determine if a plugin is configured - # if hasattr(plugin, 'is_configured'): - # is_configured = plugin.is_configured(project) - # else: - # is_configured = True - is_configured = True - - return mark_safe( - render_to_string( - template=template, - context={ - "form": form, - "plugin": plugin, - "plugin_description": plugin.get_description() or "", - "plugin_test_results": test_results, - "plugin_is_configured": is_configured, - }, - request=request, - ) - ) - - -def default_issue_plugin_config(plugin, project, form_data): - plugin_key = plugin.get_conf_key() - for field, value in form_data.items(): - key = f"{plugin_key}:{field}" - if project: - ProjectOption.objects.set_value(project, key, value) - else: - options.set(key, value, channel=options.UpdateChannel.APPLICATION) - - -def default_plugin_options(plugin, project): - form_class = plugin.get_conf_form(project) - if form_class is None: - return {} - - NOTSET = object() - plugin_key = plugin.get_conf_key() - initials = plugin.get_form_initial(project) - for field in form_class.base_fields: - key = f"{plugin_key}:{field}" - if project is not None: - value = ProjectOption.objects.get_value(project, key, NOTSET) - else: - value = options.get(key) - if value is not NOTSET: - initials[field] = value - return initials diff --git a/src/sentry/plugins/base/v1.py b/src/sentry/plugins/base/v1.py index 8b186f2b5a7bc3..86f0167c17d27e 100644 --- a/src/sentry/plugins/base/v1.py +++ b/src/sentry/plugins/base/v1.py @@ -1,7 +1,5 @@ from __future__ import annotations -__all__ = ("Plugin",) - import logging from collections.abc import Sequence from threading import local @@ -13,17 +11,17 @@ from sentry.auth import access from sentry.models.project import Project from sentry.plugins import HIDDEN_PLUGINS -from sentry.plugins.base.configuration import default_plugin_config, default_plugin_options from sentry.plugins.base.response import DeferredResponse from sentry.plugins.base.view import PluggableViewMixin from sentry.plugins.config import PluginConfigMixin from sentry.plugins.status import PluginStatusMixin from sentry.projects.services.project import RpcProject -from sentry.utils.hashlib import md5_text if TYPE_CHECKING: from django.utils.functional import _StrPromise +__all__ = ("Plugin",) + class PluginMount(type): def __new__(cls, name, bases, attrs): @@ -206,25 +204,6 @@ def get_conf_template(self, project=None): return self.project_conf_template return self.site_conf_template - def get_conf_options(self, project=None): - """ - Returns a dict of all of the configured options for a project. - - >>> plugin.get_conf_options(project) - """ - return default_plugin_options(self, project) - - def get_conf_version(self, project): - """ - Returns a version string that represents the current configuration state. - - If any option changes or new options added, the version will change. - - >>> plugin.get_conf_version(project) - """ - options = self.get_conf_options(project) - return md5_text("&".join(sorted("%s=%s" % o for o in options.items()))).hexdigest()[:3] - def get_conf_title(self): """ Returns a string representing the title to be shown on the configuration page. @@ -498,10 +477,6 @@ def is_hidden(self): """ return self.slug in HIDDEN_PLUGINS - def configure(self, request, project=None): - """Configures the plugin.""" - return default_plugin_config(self, project, request) - def get_url_module(self): """Allows a plugin to return the import path to a URL module.""" diff --git a/src/sentry/plugins/base/v2.py b/src/sentry/plugins/base/v2.py index f1c0d9fdf3e16b..75909115896414 100644 --- a/src/sentry/plugins/base/v2.py +++ b/src/sentry/plugins/base/v2.py @@ -8,12 +8,10 @@ from django.http import HttpResponseRedirect from sentry.plugins import HIDDEN_PLUGINS -from sentry.plugins.base.configuration import default_plugin_config, default_plugin_options from sentry.plugins.base.response import DeferredResponse from sentry.plugins.config import PluginConfigMixin from sentry.plugins.interfaces.releasehook import ReleaseHook from sentry.plugins.status import PluginStatusMixin -from sentry.utils.hashlib import md5_text if TYPE_CHECKING: from django.utils.functional import _StrPromise @@ -197,25 +195,6 @@ def get_conf_template(self, project=None): return self.project_conf_template return self.site_conf_template - def get_conf_options(self, project=None): - """ - Returns a dict of all of the configured options for a project. - - >>> plugin.get_conf_options(project) - """ - return default_plugin_options(self, project) - - def get_conf_version(self, project): - """ - Returns a version string that represents the current configuration state. - - If any option changes or new options added, the version will change. - - >>> plugin.get_conf_version(project) - """ - options = self.get_conf_options(project) - return md5_text("&".join(sorted("%s=%s" % o for o in options.items()))).hexdigest()[:3] - def get_conf_title(self): """ Returns a string representing the title to be shown on the configuration page. @@ -438,10 +417,6 @@ def get_custom_contexts(self): return [MyContextType] """ - def configure(self, project, request): - """Configures the plugin.""" - return default_plugin_config(self, project, request) - def get_url_module(self): """Allows a plugin to return the import path to a URL module.""" diff --git a/src/sentry/plugins/bases/data_forwarding.py b/src/sentry/plugins/bases/data_forwarding.py index 44e0e1cf53606a..1a970044a417e0 100644 --- a/src/sentry/plugins/bases/data_forwarding.py +++ b/src/sentry/plugins/bases/data_forwarding.py @@ -6,16 +6,12 @@ from sentry.api.serializers import serialize from sentry.eventstore.models import Event from sentry.plugins.base import Plugin -from sentry.plugins.base.configuration import react_plugin_config from sentry.tsdb.base import TSDBModel logger = logging.getLogger(__name__) class DataForwardingPlugin(Plugin): - def configure(self, project, request): - return react_plugin_config(self, project, request) - def has_project_conf(self): return True diff --git a/src/sentry/plugins/bases/issue2.py b/src/sentry/plugins/bases/issue2.py index 689808affdee65..d52e3b7b6613ea 100644 --- a/src/sentry/plugins/bases/issue2.py +++ b/src/sentry/plugins/bases/issue2.py @@ -13,7 +13,6 @@ from sentry.exceptions import PluginError # NOQA from sentry.models.activity import Activity from sentry.models.groupmeta import GroupMeta -from sentry.plugins.base.configuration import react_plugin_config from sentry.plugins.base.v1 import Plugin from sentry.plugins.endpoints import PluginGroupEndpoint from sentry.signals import issue_tracker_used @@ -51,9 +50,6 @@ class IssueTrackingPlugin2(Plugin): issue_fields: frozenset[str] | None = None # issue_fields = frozenset(['id', 'title', 'url']) - def configure(self, project, request): - return react_plugin_config(self, project, request) - def get_plugin_type(self): return "issue-tracking" diff --git a/src/sentry/plugins/bases/notify.py b/src/sentry/plugins/bases/notify.py index 420aa39e15922b..7016bb830a93e0 100644 --- a/src/sentry/plugins/bases/notify.py +++ b/src/sentry/plugins/bases/notify.py @@ -11,7 +11,6 @@ from sentry.notifications.services.service import notifications_service from sentry.notifications.types import NotificationSettingEnum from sentry.plugins.base import Plugin -from sentry.plugins.base.configuration import react_plugin_config from sentry.plugins.base.structs import Notification from sentry.shared_integrations.exceptions import ApiError from sentry.types.actor import Actor, ActorType @@ -46,9 +45,6 @@ class NotificationPlugin(Plugin): # site_conf_form = NotificationConfigurationForm project_conf_form: type[forms.Form] = NotificationConfigurationForm - def configure(self, project, request): - return react_plugin_config(self, project, request) - def get_plugin_type(self): return "notification" diff --git a/src/sentry/processing/backpressure/memory.py b/src/sentry/processing/backpressure/memory.py index 3a336377043ce2..7b2af742fe0597 100644 --- a/src/sentry/processing/backpressure/memory.py +++ b/src/sentry/processing/backpressure/memory.py @@ -13,6 +13,8 @@ class ServiceMemory: used: int available: int percentage: float + host: str | None = None + port: int | None = None def __init__(self, name: str, used: int, available: int): self.name = name @@ -21,6 +23,12 @@ def __init__(self, name: str, used: int, available: int): self.percentage = used / available +@dataclass +class NodeInfo: + host: str | None + port: int | None + + def query_rabbitmq_memory_usage(host: str) -> ServiceMemory: """Returns the currently used memory and the memory limit of a RabbitMQ host. @@ -51,6 +59,23 @@ def get_memory_usage(node_id: str, info: Mapping[str, Any]) -> ServiceMemory: return ServiceMemory(node_id, memory_used, memory_available) +def get_host_port_info(node_id: str, cluster: Cluster) -> NodeInfo: + """ + Extract the host and port of the redis node in the cluster. + """ + try: + if isinstance(cluster, RedisCluster): + # RedisCluster node mapping + node = cluster.connection_pool.nodes.nodes.get(node_id) + return NodeInfo(node["host"], node["port"]) + else: + # rb.Cluster node mapping + node = cluster.hosts[node_id] + return NodeInfo(node.host, node.port) + except Exception: + return NodeInfo(None, None) + + def iter_cluster_memory_usage(cluster: Cluster) -> Generator[ServiceMemory, None, None]: """ A generator that yields redis `INFO` results for each of the nodes in the `cluster`. @@ -65,4 +90,8 @@ def iter_cluster_memory_usage(cluster: Cluster) -> Generator[ServiceMemory, None cluster_info = promise.value for node_id, info in cluster_info.items(): - yield get_memory_usage(node_id, info) + node_info = get_host_port_info(node_id, cluster) + memory_usage = get_memory_usage(node_id, info) + memory_usage.host = node_info.host + memory_usage.port = node_info.port + yield memory_usage diff --git a/src/sentry/processing/backpressure/monitor.py b/src/sentry/processing/backpressure/monitor.py index f9c233d6dd409a..bcd856c6b00d0d 100644 --- a/src/sentry/processing/backpressure/monitor.py +++ b/src/sentry/processing/backpressure/monitor.py @@ -85,10 +85,12 @@ def check_service_health(services: Mapping[str, Service]) -> MutableMapping[str, reasons = [] logger.info("Checking service `%s` (configured high watermark: %s):", name, high_watermark) + memory = None try: for memory in check_service_memory(service): if memory.percentage >= high_watermark: reasons.append(memory) + logger.info("Checking node: %s:%s", memory.host, memory.port) logger.info( " name: %s, used: %s, available: %s, percentage: %s", memory.name, @@ -101,6 +103,14 @@ def check_service_health(services: Mapping[str, Service]) -> MutableMapping[str, scope.set_tag("service", name) sentry_sdk.capture_exception(e) unhealthy_services[name] = e + host = memory.host if memory else "unknown" + port = memory.port if memory else "unknown" + logger.exception( + "Error while processing node %s:%s for service %s", + host, + port, + service, + ) else: unhealthy_services[name] = reasons diff --git a/src/sentry/profiles/device.py b/src/sentry/profiles/device.py deleted file mode 100644 index b7f4defa94500d..00000000000000 --- a/src/sentry/profiles/device.py +++ /dev/null @@ -1,538 +0,0 @@ -from enum import Enum - -GIB = 1024 * 1024 * 1024 -UNKNOWN_DEVICE = "Unknown Device" - - -class DeviceClass(Enum): - UNCLASSIFIED = 0 - LOW_END = 1 - MID_END = 2 - HIGH_END = 3 - - def __str__(self) -> str: - return {0: "unclassified", 1: "low", 2: "mid", 3: "high"}[self.value] - - -class Platform(Enum): - UNKNOWN = 0 - IOS_DEVICE = 1 - IOS_SIMULATOR = 2 - ANDROID_DEVICE = 3 - ANDROID_EMULATOR = 4 - - -# classify_device classifies a device as being low, mid, or high end -def classify_device( - model: str, - os_name: str, - is_emulator: bool, - cpu_frequencies: tuple[int] | None = None, - physical_memory_bytes: int | None = None, -) -> DeviceClass: - platform = get_platform(os_name, is_emulator) - if platform in (Platform.IOS_SIMULATOR, Platform.ANDROID_EMULATOR): - """ - We exclude simulators/emulators from performance statistics for - low/mid/high end because these run on arbitrary PC hardware and - will make our data noisy. - """ - return DeviceClass.UNCLASSIFIED - - if platform == Platform.IOS_DEVICE: - frequencies = ios_cpu_core_max_frequencies_mhz(model) - if core_frequency(frequencies) < 2000: - return DeviceClass.LOW_END # less than 2GHz clock speed - if core_frequency(frequencies) < 3000: - return DeviceClass.MID_END # less than 3Ghz clock speed - return DeviceClass.HIGH_END - - if platform == Platform.ANDROID_DEVICE and cpu_frequencies and physical_memory_bytes: - if number_of_cores(cpu_frequencies) < 8 or physical_memory_bytes < (4 * GIB): - return DeviceClass.LOW_END # less than 8 cores or less than 4GiB of RAM - if core_frequency(cpu_frequencies) < 2500: - return DeviceClass.MID_END # less than 2.5GHz clock speed - return DeviceClass.HIGH_END - - return DeviceClass.UNCLASSIFIED - - -def number_of_cores(frequencies: tuple[int, ...] | None) -> int: - return len(frequencies) if frequencies is not None else 0 - - -def core_frequency(frequencies: tuple[int, ...] | None) -> int: - return max(frequencies) if frequencies is not None else 0 - - -def get_platform(device_os_name: str, is_emulator: bool) -> Platform: - if device_os_name == "android": - if is_emulator: - return Platform.ANDROID_EMULATOR - return Platform.ANDROID_DEVICE - if device_os_name in ("iPhone OS", "iOS", "iPadOS", "watchOS", "tvOS"): - if is_emulator: - return Platform.IOS_SIMULATOR - return Platform.IOS_DEVICE - return Platform.UNKNOWN - - -IPHONE4 = "iPhone 4" -IPHONE5 = "iPhone 5" -IPHONE5C = "iPhone 5c" -IPHONE5S = "iPhone 5s" -IPHONE7 = "iPhone 7" -IPHONE7PLUS = "iPhone 7 Plus" -IPHONE8 = "iPhone 8" -IPHONE8PLUS = "iPhone 8 Plus" -IPHONEX = "iPhone X" -IPHONEXSMAX = "iPhone XS Max" - -IPAD2 = "iPad 2" -IPADGEN3 = "iPad (3rd gen)" -IPADGEN4 = "iPad (4th gen)" -IPADGEN5 = "iPad (5th gen)" -IPADGEN6 = "iPad (6th gen)" -IPADGEN7 = "iPad (7th gen)" -IPADGEN8 = "iPad (8th gen)" -IPADGEN9 = "iPad (9th gen)" -IPADGEN10 = "iPad (10th gen)" - -IPADAIRGEN1 = "iPad Air (1st gen)" -IPADAIR2 = "iPad Air 2" -IPADAIRGEN3 = "iPad Air (3rd gen)" -IPADAIRGEN4 = "iPad Air (4th gen)" -IPADAIRGEN5 = "iPad Air (5th gen)" - -IPADPRO12GEN1 = "iPad Pro (12.9-inch, 1st gen)" -IPADPRO9GEN1 = "iPad Pro (9.7-inch, 1st gen)" -IPADPRO12GEN2 = "iPad Pro (12.9-inch, 2nd gen)" -IPADPRO10 = "iPad Pro (10.5-inch)" -IPADPRO11GEN1 = "iPad Pro (11-inch, 1st gen)" -IPADPRO12GEN3 = "iPad Pro (12.9-inch, 3rd gen)" -IPADPRO11GEN2 = "iPad Pro (11-inch, 2nd gen)" -IPADPRO12GEN4 = "iPad Pro (12.9-inch, 4th gen)" -IPADPRO11GEN3 = "iPad Pro (11-inch, 3rd gen)" -IPADPRO11GEN4 = "iPad Pro (11-inch, 4th gen)" -IPADPRO12GEN5 = "iPad Pro (12.9-inch, 5th gen)" -IPADPRO12GEN6 = "iPad Pro (12.9-inch 6th gen)" - -IPADMINIGEN1 = "iPad mini (1st gen)" -IPADMINI2 = "iPad mini 2" -IPADMINI3 = "iPad mini 3" -IPADMINI4 = "iPad mini 4" -IPADMINIGEN5 = "iPad mini (5th gen)" -IPADMINIGEN6 = "iPad mini (6th gen)" - -APPLEWATCHGEN1 = "Apple Watch (1st gen)" -APPLEWATCHSERIES1 = "Apple Watch Series 1" -APPLEWATCHSERIES2 = "Apple Watch Series 2" -APPLEWATCHSERIES3 = "Apple Watch Series 3" -APPLEWATCHSERIES4 = "Apple Watch Series 4" -APPLEWATCHSERIES5 = "Apple Watch Series 5" -APPLEWATCHSE = "Apple Watch SE" -APPLEWATCHSERIES6 = "Apple Watch Series 6" - -APPLETVGEN1 = "Apple TV (1st gen)" -APPLETVGEN2 = "Apple TV (2nd gen)" -APPLETVGEN3 = "Apple TV (3rd gen)" - -# https:#www.theiphonewiki.com/wiki/Models -IOS_MODELS: dict[str, str] = { - "iPhone1,1": "iPhone (1st gen)", - "iPhone1,2": "iPhone 3G", - "iPhone2,1": "iPhone 3GS", - "iPhone3,1": IPHONE4, - "iPhone3,2": IPHONE4, - "iPhone3,3": IPHONE4, - "iPhone4,1": "iPhone 4S", - "iPhone5,1": IPHONE5, - "iPhone5,2": IPHONE5, - "iPhone5,3": IPHONE5C, - "iPhone5,4": IPHONE5C, - "iPhone6,1": IPHONE5S, - "iPhone6,2": IPHONE5S, - "iPhone7,2": "iPhone 6", - "iPhone7,1": "iPhone 6 Plus", - "iPhone8,1": "iPhone 6s", - "iPhone8,2": "iPhone 6s Plus", - "iPhone8,4": "iPhone SE (1st gen)", - "iPhone9,1": IPHONE7, - "iPhone9,3": IPHONE7, - "iPhone9,2": IPHONE7PLUS, - "iPhone9,4": IPHONE7PLUS, - "iPhone10,1": IPHONE8, - "iPhone10,4": IPHONE8, - "iPhone10,2": IPHONE8PLUS, - "iPhone10,5": IPHONE8PLUS, - "iPhone10,3": IPHONEX, - "iPhone10,6": IPHONEX, - "iPhone11,8": "iPhone XR", - "iPhone11,2": "iPhone XS", - "iPhone11,4": IPHONEXSMAX, - "iPhone11,6": IPHONEXSMAX, - "iPhone12,1": "iPhone 11", - "iPhone12,3": "iPhone 11 Pro", - "iPhone12,5": "iPhone 11 Pro Max", - "iPhone12,8": "iPhone SE (2nd gen)", - "iPhone13,1": "iPhone 12 mini", - "iPhone13,2": "iPhone 12", - "iPhone13,3": "iPhone 12 Pro", - "iPhone13,4": "iPhone 12 Pro Max", - "iPhone14,4": "iPhone 13 mini", - "iPhone14,5": "iPhone 13", - "iPhone14,2": "iPhone 13 Pro", - "iPhone14,3": "iPhone 13 Pro Max", - "iPhone14,6": "iPhone SE (3rd gen)", - "iPhone14,7": "iPhone 14", - "iPhone14,8": "iPhone 14 Plus", - "iPhone15,2": "iPhone 14 Pro", - "iPhone15,3": "iPhone 14 Pro Max", - "iPhone15,4": "iPhone 15", - "iPhone15,5": "iPhone 15 Plus", - "iPhone16,1": "iPhone 15 Pro", - "iPhone16,2": "iPhone 15 Pro Max", - "iPod1,1": "iPod touch (1st gen)", - "iPod2,1": "iPod touch (2nd gen)", - "iPod3,1": "iPod touch (3rd gen)", - "iPod4,1": "iPod touch (4th gen)", - "iPod5,1": "iPod touch (5th gen)", - "iPod7,1": "iPod touch (6th gen)", - "iPod9,1": "iPod touch (7th gen)", - "iPad1,1": "iPad (1st gen)", - "iPad2,1": IPAD2, - "iPad2,2": IPAD2, - "iPad2,3": IPAD2, - "iPad2,4": IPAD2, - "iPad3,1": IPADGEN3, - "iPad3,2": IPADGEN3, - "iPad3,3": IPADGEN3, - "iPad3,4": IPADGEN4, - "iPad3,5": IPADGEN4, - "iPad3,6": IPADGEN4, - "iPad6,11": IPADGEN5, - "iPad6,12": IPADGEN5, - "iPad7,5": IPADGEN6, - "iPad7,6": IPADGEN6, - "iPad7,11": IPADGEN7, - "iPad7,12": IPADGEN7, - "iPad11,6": IPADGEN8, - "iPad11,7": IPADGEN8, - "iPad12,1": IPADGEN9, - "iPad12,2": IPADGEN9, - "iPad4,1": IPADAIRGEN1, - "iPad4,2": IPADAIRGEN1, - "iPad4,3": IPADAIRGEN1, - "iPad5,3": IPADAIR2, - "iPad5,4": IPADAIR2, - "iPad11,3": IPADAIRGEN3, - "iPad11,4": IPADAIRGEN3, - "iPad13,1": IPADAIRGEN4, - "iPad13,2": IPADAIRGEN4, - "iPad13,16": IPADAIRGEN5, - "iPad13,17": IPADAIRGEN5, - "iPad6,7": IPADPRO12GEN1, - "iPad6,8": IPADPRO12GEN1, - "iPad6,3": IPADPRO9GEN1, - "iPad6,4": IPADPRO9GEN1, - "iPad7,1": IPADPRO12GEN2, - "iPad7,2": IPADPRO12GEN2, - "iPad7,3": IPADPRO10, - "iPad7,4": IPADPRO10, - "iPad8,1": IPADPRO11GEN1, - "iPad8,2": IPADPRO11GEN1, - "iPad8,3": IPADPRO11GEN1, - "iPad8,4": IPADPRO11GEN1, - "iPad8,5": IPADPRO12GEN3, - "iPad8,6": IPADPRO12GEN3, - "iPad8,7": IPADPRO12GEN3, - "iPad8,8": IPADPRO12GEN3, - "iPad8,9": IPADPRO11GEN2, - "iPad8,10": IPADPRO11GEN2, - "iPad8,11": IPADPRO12GEN4, - "iPad8,12": IPADPRO12GEN4, - "iPad13,4": IPADPRO11GEN3, - "iPad13,5": IPADPRO11GEN3, - "iPad13,6": IPADPRO11GEN3, - "iPad13,7": IPADPRO11GEN3, - "iPad13,8": IPADPRO12GEN5, - "iPad13,9": IPADPRO12GEN5, - "iPad13,10": IPADPRO12GEN5, - "iPad13,11": IPADPRO12GEN5, - "iPad2,5": IPADMINIGEN1, - "iPad2,6": IPADMINIGEN1, - "iPad2,7": IPADMINIGEN1, - "iPad4,4": IPADMINI2, - "iPad4,5": IPADMINI2, - "iPad4,6": IPADMINI2, - "iPad4,7": IPADMINI3, - "iPad4,8": IPADMINI3, - "iPad4,9": IPADMINI3, - "iPad5,1": IPADMINI4, - "iPad5,2": IPADMINI4, - "iPad11,1": IPADMINIGEN5, - "iPad11,2": IPADMINIGEN5, - "iPad13,18": IPADGEN10, - "iPad13,19": IPADGEN10, - "iPad14,1": IPADMINIGEN6, - "iPad14,2": IPADMINIGEN6, - "iPad14,3": IPADPRO11GEN4, - "iPad14,4": IPADPRO11GEN4, - "iPad14,5": IPADPRO12GEN6, - "iPad14,6": IPADPRO12GEN6, - "Watch1,1": APPLEWATCHGEN1, - "Watch1,2": APPLEWATCHGEN1, - "Watch2,6": APPLEWATCHSERIES1, - "Watch2,7": APPLEWATCHSERIES1, - "Watch2,3": APPLEWATCHSERIES2, - "Watch2,4": APPLEWATCHSERIES2, - "Watch3,1": APPLEWATCHSERIES3, - "Watch3,2": APPLEWATCHSERIES3, - "Watch3,3": APPLEWATCHSERIES3, - "Watch3,4": APPLEWATCHSERIES3, - "Watch4,1": APPLEWATCHSERIES4, - "Watch4,2": APPLEWATCHSERIES4, - "Watch4,3": APPLEWATCHSERIES4, - "Watch4,4": APPLEWATCHSERIES4, - "Watch5,1": APPLEWATCHSERIES5, - "Watch5,2": APPLEWATCHSERIES5, - "Watch5,3": APPLEWATCHSERIES5, - "Watch5,4": APPLEWATCHSERIES5, - "Watch5,9": APPLEWATCHSE, - "Watch5,10": APPLEWATCHSE, - "Watch5,11": APPLEWATCHSE, - "Watch5,12": APPLEWATCHSE, - "Watch6,3": APPLEWATCHSERIES6, - "Watch6,4": APPLEWATCHSERIES6, - "AppleTV1,1": "Apple TV (1st gen)", - "AppleTV2,1": "Apple TV (2nd gen)", - "AppleTV3,1": APPLETVGEN3, - "AppleTV3,2": APPLETVGEN3, - "AppleTV5,3": "Apple TV (4th gen)", - "AppleTV6,2": "Apple TV 4K", - "AppleTV11,1": "Apple TV 4K (2nd gen)", - "i386": "iOS Simulator (i386)", - "x86_64": "iOS Simulator (x86_64)", -} - - -def ios_human_readable_model_name(model: str) -> str: - if model in IOS_MODELS: - return IOS_MODELS[model] - if model.startswith("iPhone"): - return "Unknown iPhone" - if model.startswith("iPad"): - return "Unknown iPad" - if model.startswith("AppleTV"): - return "Unknown Apple TV" - if model.startswith("Watch"): - return "Unknown Apple Watch" - return "Unknown iOS Device" - - -CPU1 = (520, 520) -CPU2 = (1000, 1000) -CPU3 = (1300, 1300) -CPU4 = (1400, 1400) -CPU5 = (1500, 1500) -CPU6 = (1800, 1800) -CPU7 = (1850, 1850) -CPU8 = (2160, 2160) -CPU9 = (2260, 2260) -CPU10 = (2320, 2320) -CPU11 = (2340, 2340) -CPU12 = (1500, 1500, 1500) -CPU13 = (2380, 2380, 2380, 1300, 1300, 1300) -CPU14 = (2390, 2390, 1420, 1420, 1420, 1420) -CPU15 = (2490, 2490, 1587, 1587, 1587, 1587) -CPU16 = (2650, 2650, 1600, 1600, 1600, 1600) -CPU17 = (2490, 2490, 2490, 2490, 1587, 1587, 1587, 1587) -CPU18 = (3100, 3100, 1800, 1800, 1800, 1800) -CPU19 = (3230, 3230, 1800, 1800, 1800, 1800) -CPU20 = (2900, 2900, 1800, 1800, 1800, 1800) -CPU21 = (3200, 3200, 3200, 3200, 2060, 2060, 2060, 2060) -CPU22 = (3230, 3230, 2020, 2020, 2020, 2020) -CPU23 = (3460, 3460, 2020, 2020, 2020, 2020) - - -IOS_CPU_FREQUENCIES: dict[str, tuple[int, ...]] = { - "iPhone1,1": (412,), - "iPhone1,2": (412,), - "iPod1,1": (412,), - "Watch1,1": (520,), - "Watch1,2": (520,), - "iPod1,2": (533,), - "iPhone2,1": (600,), - "iPod3,1": (600,), - "iPhone3,1": (800,), - "iPhone3,2": (800,), - "iPhone3,3": (800,), - "iPod4,1": (800,), - "iPhone4,1": (800,), - "iPad1,1": (1000,), - "AppleTV1,1": (1000,), - "AppleTV2,1": (1000,), - "AppleTV3,1": (1000,), - "AppleTV3,2": (1000,), - "Watch2,6": CPU1, - "Watch2,7": CPU1, - "Watch2,3": CPU1, - "Watch2,4": CPU1, - # The clock speeds for the Watch3,4,5 have not been published, we only - # know that they are dual core 64-bit chips. Here we will assume that - # they use the confirmed clock frequency from the Watch2, but in reality - # they are likely higher. - "Watch3,1": CPU1, - "Watch3,2": CPU1, - "Watch3,3": CPU1, - "Watch3,4": CPU1, - "Watch4,1": CPU1, - "Watch4,2": CPU1, - "Watch4,3": CPU1, - "Watch4,4": CPU1, - "Watch5,1": CPU1, - "Watch5,2": CPU1, - "Watch5,3": CPU1, - "Watch5,4": CPU1, - "Watch5,9": CPU2, - "Watch5,10": CPU2, - "Watch5,11": CPU2, - "Watch5,12": CPU2, - "Watch6,3": CPU2, - "Watch6,4": CPU2, - "iPod5,1": (800, 800), - "iPad2,1": CPU2, - "iPad2,2": CPU2, - "iPad2,3": CPU2, - "iPad2,4": CPU2, - "iPad2,5": CPU2, - "iPad2,6": CPU2, - "iPad2,7": CPU2, - "iPad3,1": CPU2, - "iPad3,2": CPU2, - "iPad3,3": CPU2, - "iPod7,1": (1100, 1100), - "iPhone5,1": CPU3, - "iPhone5,2": CPU3, - "iPhone5,3": CPU3, - "iPhone5,4": CPU3, - "iPhone6,1": CPU3, - "iPhone6,2": CPU3, - "iPad4,4": CPU3, - "iPad4,5": CPU3, - "iPad4,6": CPU3, - "iPad4,7": CPU3, - "iPad4,8": CPU3, - "iPad4,9": CPU3, - "iPhone7,1": CPU4, - "iPhone7,2": CPU4, - "iPad3,4": CPU4, - "iPad3,5": CPU4, - "iPad3,6": CPU4, - "iPad4,1": CPU4, - "iPad4,2": CPU4, - "iPad4,3": CPU4, - "iPad5,1": CPU5, - "iPad5,2": CPU5, - "AppleTV5,3": CPU5, - "iPod9,1": (1630, 1630), - "iPad6,11": CPU6, - "iPad6,12": CPU6, - "iPhone8,1": CPU7, - "iPhone8,2": CPU7, - "iPhone8,4": CPU7, - "iPad6,3": CPU8, - "iPad6,4": CPU8, - "iPad6,7": CPU9, - "iPad6,8": CPU9, - "iPad7,11": CPU10, - "iPad7,12": CPU10, - "iPad7,5": CPU11, - "iPad7,6": CPU11, - "iPhone9,1": CPU11, - "iPhone9,2": CPU11, - "iPhone9,3": CPU11, - "iPhone9,4": CPU11, - "iPad5,3": CPU12, - "iPad5,4": CPU12, - "AppleTV6,2": (2380, 2380, 2380), - "iPad7,1": CPU13, - "iPad7,2": CPU13, - "iPad7,3": CPU13, - "iPad7,4": CPU13, - "iPhone10,1": CPU14, - "iPhone10,2": CPU14, - "iPhone10,3": CPU14, - "iPhone10,4": CPU14, - "iPhone10,5": CPU14, - "iPhone10,6": CPU14, - "iPad11,1": CPU15, - "iPad11,2": CPU15, - "iPad11,3": CPU15, - "iPad11,4": CPU15, - "iPad11,6": CPU15, - "iPad11,7": CPU15, - "iPhone11,2": CPU15, - "iPhone11,4": CPU15, - "iPhone11,6": CPU15, - "iPhone11,8": CPU15, - "AppleTV11,1": CPU15, - "iPhone12,1": CPU16, - "iPhone12,3": CPU16, - "iPhone12,5": CPU16, - "iPhone12,8": CPU16, - "iPad12,1": CPU16, - "iPad12,2": CPU16, - "iPad8,1": CPU17, - "iPad8,2": CPU17, - "iPad8,3": CPU17, - "iPad8,4": CPU17, - "iPad8,5": CPU17, - "iPad8,6": CPU17, - "iPad8,7": CPU17, - "iPad8,8": CPU17, - "iPad8,9": CPU17, - "iPad8,10": CPU17, - "iPad8,11": CPU17, - "iPad8,12": CPU17, - "iPhone13,1": CPU18, - "iPhone13,2": CPU18, - "iPhone13,3": CPU18, - "iPhone13,4": CPU18, - "iPad13,1": CPU18, - "iPad13,2": CPU18, - "iPhone14,2": CPU19, - "iPhone14,3": CPU19, - "iPhone14,4": CPU19, - "iPhone14,5": CPU19, - "iPad14,1": CPU20, - "iPad14,2": CPU20, - "iPad13,4": CPU21, - "iPad13,5": CPU21, - "iPad13,6": CPU21, - "iPad13,7": CPU21, - "iPad13,8": CPU21, - "iPad13,9": CPU21, - "iPad13,10": CPU21, - "iPad13,11": CPU21, - "iPhone14,6": CPU19, - "iPhone14,7": CPU22, - "iPhone14,8": CPU22, - "iPhone15,2": CPU23, - "iPhone15,3": CPU23, -} - - -def ios_cpu_core_max_frequencies_mhz(model: str) -> tuple[int, ...] | None: - if model in IOS_CPU_FREQUENCIES: - return IOS_CPU_FREQUENCIES[model] - # New unreleased device, assume device is best of class */ - if model.startswith("iPhone"): - return CPU19 - if model.startswith("iPad"): - return CPU21 - if model.startswith("AppleTV"): - return CPU15 - if model.startswith("Watch"): - return CPU6 - return None # unknown device diff --git a/src/sentry/profiles/task.py b/src/sentry/profiles/task.py index 71cc3c98db3bf2..60debead1d3095 100644 --- a/src/sentry/profiles/task.py +++ b/src/sentry/profiles/task.py @@ -1,9 +1,7 @@ from __future__ import annotations -import random from copy import deepcopy from datetime import datetime, timezone -from functools import lru_cache from time import time from typing import Any, TypedDict from uuid import UUID @@ -21,8 +19,6 @@ from sentry.models.eventerror import EventError from sentry.models.organization import Organization from sentry.models.project import Project -from sentry.models.projectkey import ProjectKey, UseCase -from sentry.profiles.device import classify_device from sentry.profiles.java import ( convert_android_methods_to_jvm_frames, deobfuscate_signature, @@ -34,6 +30,7 @@ apply_stack_trace_rules_to_profile, get_from_profiling_service, ) +from sentry.search.utils import DEVICE_CLASS from sentry.signals import first_profile_received from sentry.silo.base import SiloMode from sentry.tasks.base import instrumented_task @@ -41,20 +38,16 @@ from sentry.utils.outcomes import Outcome, track_outcome from sentry.utils.sdk import set_measurement - -class VroomTimeout(Exception): - pass +REVERSE_DEVICE_CLASS = {next(iter(tags)): label for label, tags in DEVICE_CLASS.items()} @instrumented_task( name="sentry.profiles.task.process_profile", - queue="profiles.process", - autoretry_for=(VroomTimeout,), # Retry when vroom returns a GCS timeout retry_backoff=True, - retry_backoff_max=60, # up to 1 min + retry_backoff_max=20, retry_jitter=True, default_retry_delay=5, # retries after 5s - max_retries=5, + max_retries=2, acks_late=True, task_time_limit=60, task_acks_on_failure_or_timeout=False, @@ -158,27 +151,6 @@ def process_profile_task( set_measurement("profile.stacks.processed", len(profile["profile"]["stacks"])) set_measurement("profile.frames.processed", len(profile["profile"]["frames"])) - if ( - profile.get("version") in ["1", "2"] - and options.get("profiling.generic_metrics.functions_ingestion.enabled") - and ( - organization.id - in options.get("profiling.generic_metrics.functions_ingestion.allowed_org_ids") - or random.random() - < options.get("profiling.generic_metrics.functions_ingestion.rollout_rate") - ) - and project.id - not in options.get("profiling.generic_metrics.functions_ingestion.denied_proj_ids") - ): - try: - with metrics.timer("process_profile.get_metrics_dsn"): - dsn = get_metrics_dsn(project.id) - profile["options"] = { - "dsn": dsn, - } - except Exception as e: - sentry_sdk.capture_exception(e) - if options.get("profiling.stack_trace_rules.enabled"): try: with metrics.timer("process_profile.apply_stack_trace_rules"): @@ -373,34 +345,12 @@ def _normalize(profile: Profile, organization: Organization) -> None: if platform not in {"cocoa", "android"} or version == "2": return - classification_options = dict() - - if platform == "android": - classification_options.update( - { - "cpu_frequencies": profile["device_cpu_frequencies"], - "physical_memory_bytes": profile["device_physical_memory_bytes"], - } - ) + classification = profile.get("transaction_tags", {}).get("device.class", None) - if version == "1": - classification_options.update( - { - "model": profile["device"]["model"], - "os_name": profile["os"]["name"], - "is_emulator": profile["device"]["is_emulator"], - } - ) - elif version is None: - classification_options.update( - { - "model": profile["device_model"], - "os_name": profile["device_os_name"], - "is_emulator": profile["device_is_emulator"], - } - ) + if not classification: + return - classification = str(classify_device(**classification_options)) + classification = REVERSE_DEVICE_CLASS.get(classification, "unknown") if version == "1": profile["device"]["classification"] = classification @@ -515,7 +465,10 @@ def symbolicate( classes=[], ) return symbolicator.process_payload( - platform=platform, stacktraces=stacktraces, modules=modules, apply_source_context=False + platform=platform, + stacktraces=stacktraces, + modules=modules, + apply_source_context=False, ) @@ -956,29 +909,27 @@ def _insert_vroom_profile(profile: Profile) -> bool: path = "/chunk" if "profiler_id" in profile else "/profile" response = get_from_profiling_service(method="POST", path=path, json_data=profile) + sentry_sdk.set_tag("vroom.response.status_code", str(response.status)) + + reason = "bad status" + if response.status == 204: return True elif response.status == 429: - raise VroomTimeout + reason = "gcs timeout" elif response.status == 412: - metrics.incr( - "process_profile.insert_vroom_profile.error", - tags={ - "platform": profile["platform"], - "reason": "duplicate profile", - }, - sample_rate=1.0, - ) - return False - else: - metrics.incr( - "process_profile.insert_vroom_profile.error", - tags={"platform": profile["platform"], "reason": "bad status"}, - sample_rate=1.0, - ) - return False - except VroomTimeout: - raise + reason = "duplicate profile" + + metrics.incr( + "process_profile.insert_vroom_profile.error", + tags={ + "platform": profile["platform"], + "reason": reason, + "status_code": response.status, + }, + sample_rate=1.0, + ) + return False except Exception as e: sentry_sdk.capture_exception(e) metrics.incr( @@ -1022,22 +973,6 @@ class _ProjectKeyKwargs(TypedDict): use_case: str -@lru_cache(maxsize=100) -def get_metrics_dsn(project_id: int) -> str: - kwargs: _ProjectKeyKwargs = { - "project_id": project_id, - "use_case": UseCase.PROFILING.value, - } - try: - project_key, _ = ProjectKey.objects.get_or_create(**kwargs) - except ProjectKey.MultipleObjectsReturned: - # See https://docs.djangoproject.com/en/5.0/ref/models/querysets/#get-or-create - project_key_first = ProjectKey.objects.filter(**kwargs).order_by("pk").first() - assert project_key_first is not None - project_key = project_key_first - return project_key.get_dsn(public=True) - - @metrics.wraps("process_profile.track_outcome") def _track_duration_outcome( profile: Profile, diff --git a/src/sentry/queue/routers.py b/src/sentry/queue/routers.py index b1ad6081ec9f5a..00b091b8586dc5 100644 --- a/src/sentry/queue/routers.py +++ b/src/sentry/queue/routers.py @@ -127,14 +127,8 @@ def route_for_queue(self, queue: str) -> str: if random.random() >= rollout_rate: return queue - if queue in set(options.get("celery_split_queue_legacy_mode")): - # Use legacy route - # This router required to define the routing logic inside the - # settings file. - return settings.SENTRY_POST_PROCESS_QUEUE_SPLIT_ROUTER.get(queue, lambda: queue)() + router = self.__queue_routers.get(queue) + if router is not None: + return next(router) else: - router = self.__queue_routers.get(queue) - if router is not None: - return next(router) - else: - return queue + return queue diff --git a/src/sentry/quotas/base.py b/src/sentry/quotas/base.py index 7e1f7a4d15ea9a..e236126a719c36 100644 --- a/src/sentry/quotas/base.py +++ b/src/sentry/quotas/base.py @@ -457,6 +457,12 @@ def get_abuse_quotas(self, org): categories=[DataCategory.ATTACHMENT], scope=QuotaScope.PROJECT, ), + AbuseQuota( + id="paai", + option="project-abuse-quota.attachment-item-limit", + categories=[DataCategory.ATTACHMENT_ITEM], + scope=QuotaScope.PROJECT, + ), AbuseQuota( id="pas", option="project-abuse-quota.session-limit", diff --git a/src/sentry/ratelimits/base.py b/src/sentry/ratelimits/base.py index fb5fe63893a64b..b43b296d7a3625 100644 --- a/src/sentry/ratelimits/base.py +++ b/src/sentry/ratelimits/base.py @@ -31,3 +31,6 @@ def is_limited_with_value( def validate(self) -> None: raise NotImplementedError + + def reset(self, key: str, project: Project | None = None, window: int | None = None) -> None: + return diff --git a/src/sentry/ratelimits/redis.py b/src/sentry/ratelimits/redis.py index d96c02c8fe0efd..2d43855ae93be3 100644 --- a/src/sentry/ratelimits/redis.py +++ b/src/sentry/ratelimits/redis.py @@ -120,3 +120,7 @@ def is_limited_with_value( return False, 0, reset_time return result > limit, result, reset_time + + def reset(self, key: str, project: Project | None = None, window: int | None = None) -> None: + redis_key = self._construct_redis_key(key, project=project, window=window) + self.client.delete(redis_key) diff --git a/src/sentry/receivers/onboarding.py b/src/sentry/receivers/onboarding.py index 97d8976ebcba98..c363a085be319a 100644 --- a/src/sentry/receivers/onboarding.py +++ b/src/sentry/receivers/onboarding.py @@ -69,9 +69,8 @@ def record_new_project(project, user=None, user_id=None, **kwargs): else: user_id = None try: - default_user_id = ( - Organization.objects.get(id=project.organization_id).get_default_owner().id - ) + default_user = Organization.objects.get(id=project.organization_id).get_default_owner() + default_user_id = default_user.id except IndexError: logger.warning( "Cannot initiate onboarding for organization (%s) due to missing owners", @@ -113,6 +112,13 @@ def record_new_project(project, user=None, user_id=None, **kwargs): ), project_id=project.id, ) + analytics.record( + "second_platform.added", + user_id=default_user_id, + organization_id=project.organization_id, + project_id=project.id, + ) + try_mark_onboarding_complete(project.organization_id, user) @first_event_received.connect(weak=False) @@ -641,8 +647,6 @@ def record_plugin_enabled(plugin, project, user, **kwargs): @alert_rule_created.connect(weak=False) def record_alert_rule_created(user, project: Project, rule_type: str, **kwargs): - # NOTE: This intentionally does not fire for the default issue alert rule - # that gets created on new project creation. task = OnboardingTask.METRIC_ALERT if rule_type == "metric" else OnboardingTask.ALERT_RULE rows_affected, created = OrganizationOnboardingTask.objects.create_or_update( organization_id=project.organization_id, @@ -735,6 +739,7 @@ def record_integration_added( task=task_mapping[integration_type], status=OnboardingTaskStatus.COMPLETE, ) + try_mark_onboarding_complete(organization_id, user) else: task = OrganizationOnboardingTask.objects.filter( organization_id=organization_id, diff --git a/src/sentry/receivers/rules.py b/src/sentry/receivers/rules.py index e024ff4fab2410..c07b27c62fe0e1 100644 --- a/src/sentry/receivers/rules.py +++ b/src/sentry/receivers/rules.py @@ -1,7 +1,13 @@ +import logging + +from sentry import features from sentry.models.project import Project from sentry.models.rule import Rule from sentry.notifications.types import FallthroughChoiceType -from sentry.signals import project_created +from sentry.signals import alert_rule_created, project_created +from sentry.users.services.user.model import RpcUser + +logger = logging.getLogger("sentry") DEFAULT_RULE_LABEL = "Send a notification for high priority issues" DEFAULT_RULE_ACTIONS = [ @@ -31,7 +37,29 @@ def create_default_rules(project: Project, default_rules=True, RuleModel=Rule, * return rule_data = DEFAULT_RULE_DATA - RuleModel.objects.create(project=project, label=DEFAULT_RULE_LABEL, data=rule_data) + rule = RuleModel.objects.create(project=project, label=DEFAULT_RULE_LABEL, data=rule_data) + + try: + user: RpcUser = project.organization.get_default_owner() + except IndexError: + logger.warning( + "Cannot record default rule created for organization (%s) due to missing owners", + project.organization_id, + ) + return + + if features.has("organizations:quick-start-updates", project.organization, actor=user): + # When a user creates a new project and opts to set up an issue alert within it, + # the corresponding task in the quick start sidebar is automatically marked as complete. + alert_rule_created.send( + user=user, + project=project, + rule_id=rule.id, + # The default rule created within a new project is always of type 'issue' + rule_type="issue", + sender=type(project), + is_api_token=False, + ) project_created.connect(create_default_rules, dispatch_uid="create_default_rules", weak=False) diff --git a/src/sentry/relay/globalconfig.py b/src/sentry/relay/globalconfig.py index 6e58750ec3bdfa..b992fb8fb6802b 100644 --- a/src/sentry/relay/globalconfig.py +++ b/src/sentry/relay/globalconfig.py @@ -15,7 +15,6 @@ "profiling.profile_metrics.unsampled_profiles.platforms", "profiling.profile_metrics.unsampled_profiles.sample_rate", "profiling.profile_metrics.unsampled_profiles.enabled", - "profiling.generic_metrics.functions_ingestion.enabled", "relay.span-usage-metric", "relay.cardinality-limiter.mode", "relay.cardinality-limiter.error-sample-rate", @@ -23,7 +22,6 @@ "relay.metric-bucket-distribution-encodings", "relay.metric-stats.rollout-rate", "relay.span-extraction.sample-rate", - "relay.compute-metrics-summaries.sample-rate", "relay.span-normalization.allowed_hosts", ] diff --git a/src/sentry/release_health/base.py b/src/sentry/release_health/base.py index d9fd7530fb122b..9e747ce4ea608e 100644 --- a/src/sentry/release_health/base.py +++ b/src/sentry/release_health/base.py @@ -243,14 +243,10 @@ class ReleaseHealthBackend(Service): "get_project_releases_count", "get_project_release_stats", "get_project_sessions_count", - "is_metrics_based", "get_num_sessions_per_project", "get_project_releases_by_stability", ) - def is_metrics_based(self) -> bool: - return False - def get_current_and_previous_crash_free_rates( self, project_ids: Sequence[ProjectId], diff --git a/src/sentry/release_health/metrics.py b/src/sentry/release_health/metrics.py index 9be08a6101b290..2b0229516e4963 100644 --- a/src/sentry/release_health/metrics.py +++ b/src/sentry/release_health/metrics.py @@ -173,9 +173,6 @@ def _get_crash_free_rate_data( result_groups=result_groups ) - def is_metrics_based(self) -> bool: - return True - def get_current_and_previous_crash_free_rates( self, project_ids: Sequence[int], diff --git a/src/sentry/remote_subscriptions/consumers/result_consumer.py b/src/sentry/remote_subscriptions/consumers/result_consumer.py index 6e7ddcdf99b6c7..51958ee6f4a969 100644 --- a/src/sentry/remote_subscriptions/consumers/result_consumer.py +++ b/src/sentry/remote_subscriptions/consumers/result_consumer.py @@ -2,17 +2,23 @@ import abc import logging +from collections import defaultdict from collections.abc import Mapping -from typing import Generic, TypeVar +from concurrent.futures import ThreadPoolExecutor, wait +from typing import Generic, Literal, TypeVar +import sentry_sdk from arroyo.backends.kafka.consumer import KafkaPayload +from arroyo.processing.strategies import BatchStep from arroyo.processing.strategies.abstract import ProcessingStrategy, ProcessingStrategyFactory +from arroyo.processing.strategies.batching import ValuesBatch from arroyo.processing.strategies.commit import CommitOffsets from arroyo.processing.strategies.run_task import RunTask -from arroyo.types import Commit, FilteredPayload, Message, Partition +from arroyo.types import BrokerValue, Commit, FilteredPayload, Message, Partition from sentry.conf.types.kafka_definition import Topic, get_topic_codec from sentry.remote_subscriptions.models import BaseRemoteSubscription +from sentry.utils import metrics logger = logging.getLogger(__name__) @@ -54,7 +60,40 @@ def handle_result(self, subscription: U | None, result: T): class ResultsStrategyFactory(ProcessingStrategyFactory[KafkaPayload], Generic[T, U]): - def __init__(self) -> None: + parallel_executor: ThreadPoolExecutor | None = None + + parallel = False + """ + Does the consumer process unrelated messages in parallel? + """ + + max_batch_size = 500 + """ + How many messages will be batched at once when in parallel mode. + """ + + max_batch_time = 10 + """ + The maximum time in seconds to accumulate a bach of check-ins. + """ + + def __init__( + self, + mode: Literal["parallel", "serial"] = "serial", + max_batch_size: int | None = None, + max_batch_time: int | None = None, + max_workers: int | None = None, + ) -> None: + self.mode = mode + if mode == "parallel": + self.parallel = True + self.parallel_executor = ThreadPoolExecutor(max_workers=max_workers) + + if max_batch_size is not None: + self.max_batch_size = max_batch_size + if max_batch_time is not None: + self.max_batch_time = max_batch_time + self.result_processor = self.result_processor_cls() self.codec = get_topic_codec(self.topic_for_codec) @@ -68,6 +107,26 @@ def topic_for_codec(self) -> Topic: def result_processor_cls(self) -> type[ResultProcessor[T, U]]: pass + @abc.abstractmethod + def build_payload_grouping_key(self, result: T) -> str: + """ + Used in parallel processing mode. This method should return a string used to + group related results together for serial processing. + """ + pass + + @property + @abc.abstractmethod + def identifier(self) -> str: + """ + A unique identifier for this consumer - used to differentiate it in stats + """ + pass + + def shutdown(self) -> None: + if self.parallel_executor: + self.parallel_executor.shutdown() + def decode_payload(self, payload: KafkaPayload | FilteredPayload) -> T | None: assert not isinstance(payload, FilteredPayload) try: @@ -79,20 +138,95 @@ def decode_payload(self, payload: KafkaPayload | FilteredPayload) -> T | None: ) return None + def create_with_partitions( + self, + commit: Commit, + partitions: Mapping[Partition, int], + ) -> ProcessingStrategy[KafkaPayload]: + if self.parallel: + return self.create_thread_parallel_worker(commit) + else: + return self.create_serial_worker(commit) + + def create_serial_worker(self, commit: Commit) -> ProcessingStrategy[KafkaPayload]: + return RunTask( + function=self.process_single, + next_step=CommitOffsets(commit), + ) + def process_single(self, message: Message[KafkaPayload | FilteredPayload]): result = self.decode_payload(message.payload) if result is not None: self.result_processor(result) - def create_serial_worker(self, commit: Commit) -> ProcessingStrategy[KafkaPayload]: - return RunTask( - function=self.process_single, + def create_thread_parallel_worker(self, commit: Commit) -> ProcessingStrategy[KafkaPayload]: + assert self.parallel_executor is not None + batch_processor = RunTask( + function=self.process_batch, next_step=CommitOffsets(commit), ) + return BatchStep( + max_batch_size=self.max_batch_size, + max_batch_time=self.max_batch_time, + next_step=batch_processor, + ) - def create_with_partitions( - self, - commit: Commit, - partitions: Mapping[Partition, int], - ) -> ProcessingStrategy[KafkaPayload]: - return self.create_serial_worker(commit) + def partition_message_batch(self, message: Message[ValuesBatch[KafkaPayload]]) -> list[list[T]]: + """ + Takes a batch of messages and partitions them based on the `build_payload_grouping_key` method. + Returns a generator that yields each partitioned list of messages. + """ + batch = message.payload + + batch_mapping: Mapping[str, list[T]] = defaultdict(list) + for item in batch: + assert isinstance(item, BrokerValue) + + result = self.decode_payload(item.payload) + if result is None: + continue + + key = self.build_payload_grouping_key(result) + batch_mapping[key].append(result) + + # Number of messages that are being processed in this batch + metrics.gauge( + "remote_subscriptions.result_consumer.parallel_batch_count", + len(batch), + tags={"identifier": self.identifier, "mode": self.mode}, + ) + # Number of groups we've collected to be processed in parallel + metrics.gauge( + "remote_subscriptions.result_consumer.parallel_batch_groups", + len(batch_mapping), + tags={"identifier": self.identifier, "mode": self.mode}, + ) + + return list(batch_mapping.values()) + + def process_batch(self, message: Message[ValuesBatch[KafkaPayload]]): + """ + Receives batches of messages. This function will take the batch and group them together + using `build_payload_grouping_key`, which ensures order is preserved. Each group is then + executed using a ThreadPoolWorker. + + By batching we're able to process messages in parallel while guaranteeing that no messages + are processed out of order. + """ + assert self.parallel_executor is not None + partitioned_values = self.partition_message_batch(message) + + # Submit groups for processing + with sentry_sdk.start_transaction(op="process_batch", name="monitors.monitor_consumer"): + futures = [ + self.parallel_executor.submit(self.process_group, group) + for group in partitioned_values + ] + wait(futures) + + def process_group(self, items: list[T]): + """ + Process a group of related messages serially. + """ + for item in items: + self.result_processor(item) diff --git a/src/sentry/replays/lib/new_query/parsers.py b/src/sentry/replays/lib/new_query/parsers.py index 4229fa491f1b79..092f37862b6479 100644 --- a/src/sentry/replays/lib/new_query/parsers.py +++ b/src/sentry/replays/lib/new_query/parsers.py @@ -19,7 +19,10 @@ def parse_float(value: str) -> float: def parse_int(value: str) -> int: """Coerce to int or fail.""" - return int(parse_float(value)) + try: + return int(parse_float(value)) + except (ValueError, CouldNotParseValue): + raise CouldNotParseValue("Failed to parse int.") def parse_duration(value: str) -> int: @@ -30,7 +33,6 @@ def parse_duration(value: str) -> int: milliseconds = parse_int(value) if milliseconds % 1000: # TODO: remove once we support milliseconds. - # TODO: this error isn't actually returned to the frontend, it's caught and then we raise a ParseError raise CouldNotParseValue( f"Replays only supports second-resolution timestamps at this time. Try '{milliseconds // 1000}s' instead." ) diff --git a/src/sentry/replays/usecases/query/__init__.py b/src/sentry/replays/usecases/query/__init__.py index 986c594d59a742..96ccd3ec4d0958 100644 --- a/src/sentry/replays/usecases/query/__init__.py +++ b/src/sentry/replays/usecases/query/__init__.py @@ -103,14 +103,19 @@ def handle_search_filters( # are top level filters they are implicitly AND'ed in the WHERE/HAVING clause. Otherwise # explicit operators are used. if isinstance(search_filter, SearchFilter): + try: condition = search_filter_to_condition(search_config, search_filter) if condition is None: raise ParseError(f"Unsupported search field: {search_filter.key.name}") except OperatorNotSupported: raise ParseError(f"Invalid operator specified for `{search_filter.key.name}`") - except CouldNotParseValue: - raise ParseError(f"Could not parse value for `{search_filter.key.name}`") + except CouldNotParseValue as e: + err_msg = f"Could not parse value for `{search_filter.key.name}`." + if e.args and e.args[0]: + # avoid using str(e) as it may expose stack trace info + err_msg += f" Detail: {e.args[0]}" + raise ParseError(err_msg) if look_back == "AND": look_back = None diff --git a/src/sentry/rules/match.py b/src/sentry/rules/match.py index a21a249228cf44..9fca4e945c50ca 100644 --- a/src/sentry/rules/match.py +++ b/src/sentry/rules/match.py @@ -1,8 +1,9 @@ from collections.abc import Iterable +from enum import StrEnum from typing import Any -class MatchType: +class MatchType(StrEnum): CONTAINS = "co" ENDS_WITH = "ew" EQUAL = "eq" diff --git a/src/sentry/runner/commands/devserver.py b/src/sentry/runner/commands/devserver.py index 5a2b9a947a8f4a..219a9f7dc8be8d 100644 --- a/src/sentry/runner/commands/devserver.py +++ b/src/sentry/runner/commands/devserver.py @@ -30,6 +30,7 @@ "worker": ["sentry", "run", "worker", "-c", "1", "--autoreload"], "celery-beat": ["sentry", "run", "cron", "--autoreload"], "server": ["sentry", "run", "web"], + "taskworker": ["sentry", "run", "taskworker"], } _SUBSCRIPTION_RESULTS_CONSUMERS = [ @@ -138,6 +139,11 @@ def _get_daemon(name: str) -> tuple[str, list[str]]: type=click.Choice(["control", "region"]), help="The silo mode to run this devserver instance in. Choices are control, region, none", ) +@click.option( + "--taskworker/--no-taskworker", + default=False, + help="Run kafka-based task workers", +) @click.argument( "bind", default=None, @@ -164,6 +170,7 @@ def devserver( client_hostname: str, ngrok: str | None, silo: str | None, + taskworker: bool, ) -> NoReturn: "Starts a lightweight web server for development." if bind is None: @@ -286,6 +293,9 @@ def devserver( click.echo("--ingest was provided, implicitly enabling --workers") workers = True + if taskworker: + daemons.append(_get_daemon("taskworker")) + if workers and not celery_beat: click.secho( "If you want to run periodic tasks from celery (celerybeat), you need to also pass --celery-beat.", @@ -354,10 +364,19 @@ def devserver( # Create all topics if the Kafka eventstream is selected if kafka_consumers: - kafka_container_name = ( - "kafka-kafka-1" if os.environ.get("USE_NEW_DEVSERVICES") == "1" else "sentry_kafka" + use_new_devservices = os.environ.get("USE_NEW_DEVSERVICES") == "1" + valid_kafka_container_names = ["kafka-kafka-1", "sentry_kafka"] + kafka_container_name = "kafka-kafka-1" if use_new_devservices else "sentry_kafka" + kafka_container_warning_message = ( + f""" +Devserver is configured to work with the revamped devservices. Looks like the `{kafka_container_name}` container is not running. +Please run `devservices up` to start it. If you would like to use devserver with `sentry devservices`, set `USE_NEW_DEVSERVICES=0` in your environment.""" + if use_new_devservices + else f""" +Devserver is configured to work with `sentry devservices`. Looks like the `{kafka_container_name}` container is not running. +Please run `sentry devservices up kafka` to start it. If you would like to use devserver with the revamped devservices, set `USE_NEW_DEVSERVICES=1` in your environment.""" ) - if kafka_container_name not in containers: + if not any(name in containers for name in valid_kafka_container_names): raise click.ClickException( f""" Devserver is configured to start some kafka consumers, but Kafka @@ -373,7 +392,7 @@ def devserver( SENTRY_EVENTSTREAM = "sentry.eventstream.kafka.KafkaEventStream" -and run `sentry devservices up kafka`. +{kafka_container_warning_message} Alternatively, run without --workers. """ diff --git a/src/sentry/runner/commands/devservices.py b/src/sentry/runner/commands/devservices.py index 5b0b91f5a7782c..d4d2d7dfca3f62 100644 --- a/src/sentry/runner/commands/devservices.py +++ b/src/sentry/runner/commands/devservices.py @@ -303,6 +303,21 @@ def up( """ from sentry.runner import configure + if os.environ.get("USE_NEW_DEVSERVICES", "0") != "1": + click.secho( + """ +WARNING: We're transitioning from `sentry devservices` to the new and improved `devservices` in January 2025. +To give the new devservices a try, set the `USE_NEW_DEVSERVICES` environment variable to `1`. For a full list of commands, see +https://github.com/getsentry/devservices?tab=readme-ov-file#commands + +Instead of running `sentry devservices up`, consider using `devservices up`. +For Sentry employees - if you hit any bumps or have feedback, we'd love to hear from you in #discuss-dev-infra. +Thanks for helping the Dev Infra team improve this experience! + + """, + fg="yellow", + ) + configure() containers = _prepare_containers( @@ -520,6 +535,21 @@ def down(project: str, service: list[str]) -> None: an explicit list of services to bring down. """ + if os.environ.get("USE_NEW_DEVSERVICES", "0") != "1": + click.secho( + """ +WARNING: We're transitioning from `sentry devservices` to the new and improved `devservices` in January 2025. +To give the new devservices a try, set the `USE_NEW_DEVSERVICES` environment variable to `1`. For a full list of commands, see +https://github.com/getsentry/devservices?tab=readme-ov-file#commands + +Instead of running `sentry devservices down`, consider using `devservices down`. +For Sentry employees - if you hit any bumps or have feedback, we'd love to hear from you in #discuss-dev-infra. +Thanks for helping the Dev Infra team improve this experience! + + """, + fg="yellow", + ) + def _down(container: docker.models.containers.Container) -> None: click.secho(f"> Stopping '{container.name}' container", fg="red") container.stop() diff --git a/src/sentry/runner/commands/run.py b/src/sentry/runner/commands/run.py index 73814b5e4e77d6..75679d41fc50d2 100644 --- a/src/sentry/runner/commands/run.py +++ b/src/sentry/runner/commands/run.py @@ -7,6 +7,7 @@ from typing import Any import click +from django.utils import autoreload from sentry.bgtasks.api import managed_bgtasks from sentry.runner.decorators import configuration, log_options @@ -230,8 +231,6 @@ def worker(ignore_unknown_queues: bool, **options: Any) -> None: raise click.ClickException(message) if options["autoreload"]: - from django.utils import autoreload - autoreload.run_with_reloader(run_worker, **options) else: run_worker(**options) @@ -243,13 +242,33 @@ def worker(ignore_unknown_queues: bool, **options: Any) -> None: @click.option( "--max-task-count", help="Number of tasks this worker should run before exiting", default=10000 ) +@click.option( + "--namespace", help="The dedicated task namespace that taskworker operates on", default=None +) @log_options() @configuration -def taskworker(rpc_host: str, max_task_count: int, **options: Any) -> None: +def taskworker(**options: Any) -> None: + """ + Run a taskworker worker + """ + if options["autoreload"]: + autoreload.run_with_reloader(run_taskworker, **options) + else: + run_taskworker(**options) + + +def run_taskworker( + rpc_host: str, max_task_count: int, namespace: str | None, **options: Any +) -> None: + """ + taskworker factory that can be reloaded + """ from sentry.taskworker.worker import TaskWorker with managed_bgtasks(role="taskworker"): - worker = TaskWorker(rpc_host=rpc_host, max_task_count=max_task_count, **options) + worker = TaskWorker( + rpc_host=rpc_host, max_task_count=max_task_count, namespace=namespace, **options + ) exitcode = worker.start() raise SystemExit(exitcode) @@ -397,6 +416,11 @@ def cron(**options: Any) -> None: is_flag=True, default=True, ) +@click.option( + "--stale-threshold-sec", + type=click.IntRange(min=300), + help="Routes stale messages to stale topic if provided. This feature is currently being tested, do not pass in production yet.", +) @click.option( "--log-level", type=click.Choice(["debug", "info", "warning", "error", "critical"], case_sensitive=False), @@ -481,6 +505,7 @@ def dev_consumer(consumer_names: tuple[str, ...]) -> None: synchronize_commit_group=None, synchronize_commit_log_topic=None, enable_dlq=False, + stale_threshold_sec=None, healthcheck_file_path=None, enforce_schema=True, ) diff --git a/src/sentry/search/eap/columns.py b/src/sentry/search/eap/columns.py index 58245dc36b872e..13b3f7467a8095 100644 --- a/src/sentry/search/eap/columns.py +++ b/src/sentry/search/eap/columns.py @@ -14,6 +14,7 @@ from sentry.exceptions import InvalidSearchQuery from sentry.search.eap import constants +from sentry.search.events.constants import SPAN_MODULE_CATEGORY_VALUES from sentry.search.events.types import SnubaParams from sentry.search.utils import DEVICE_CLASS from sentry.utils.validators import is_event_id, is_span_id @@ -246,6 +247,11 @@ def datetime_processor(datetime_string: str) -> str: internal_name="sentry.status", search_type="string", ), + ResolvedColumn( + public_alias="span.status_code", + internal_name="sentry.status_code", + search_type="string", + ), ResolvedColumn( public_alias="trace", internal_name="sentry.trace_id", @@ -321,7 +327,6 @@ def datetime_processor(datetime_string: str) -> str: simple_sentry_field("release"), simple_sentry_field("sdk.name"), simple_sentry_field("sdk.version"), - simple_sentry_field("span.status_code"), simple_sentry_field("span_id"), simple_sentry_field("trace.status"), simple_sentry_field("transaction.method"), @@ -430,11 +435,21 @@ def device_class_context_constructor(params: SnubaParams) -> VirtualColumnContex ) +def module_context_constructor(params: SnubaParams) -> VirtualColumnContext: + value_map = {key: key for key in SPAN_MODULE_CATEGORY_VALUES} + return VirtualColumnContext( + from_column_name="sentry.category", + to_column_name="span.module", + value_map=value_map, + ) + + VIRTUAL_CONTEXTS = { "project": project_context_constructor("project"), "project.slug": project_context_constructor("project.slug"), "project.name": project_context_constructor("project.name"), "device.class": device_class_context_constructor, + "span.module": module_context_constructor, } diff --git a/src/sentry/search/eap/constants.py b/src/sentry/search/eap/constants.py index de195272b869ff..a802707fec2401 100644 --- a/src/sentry/search/eap/constants.py +++ b/src/sentry/search/eap/constants.py @@ -44,8 +44,8 @@ } # https://github.com/getsentry/snuba/blob/master/snuba/web/rpc/v1/endpoint_time_series.py -# The RPC limits us to 1000 points per timeseries -MAX_ROLLUP_POINTS = 1000 +# The RPC limits us to 2016 points per timeseries +MAX_ROLLUP_POINTS = 2016 # Copied from snuba, a number of total seconds VALID_GRANULARITIES = frozenset( { @@ -55,6 +55,7 @@ 2 * 60, 5 * 60, 10 * 60, + 15 * 60, 30 * 60, # minutes 1 * 3600, 3 * 3600, diff --git a/src/sentry/search/eap/spans.py b/src/sentry/search/eap/spans.py index e1b477bfc528fb..601cf4b465b843 100644 --- a/src/sentry/search/eap/spans.py +++ b/src/sentry/search/eap/spans.py @@ -4,6 +4,7 @@ from re import Match from typing import cast +import sentry_sdk from parsimonious.exceptions import ParseError from sentry_protos.snuba.v1.request_common_pb2 import RequestMeta from sentry_protos.snuba.v1.trace_item_attribute_pb2 import ( @@ -54,9 +55,13 @@ class SearchResolver: field(default_factory=dict) ) + @sentry_sdk.trace def resolve_meta(self, referrer: str) -> RequestMeta: if self.params.organization_id is None: raise Exception("An organization is required to resolve queries") + span = sentry_sdk.get_current_span() + if span: + span.set_tag("SearchResolver.params", self.params) return RequestMeta( organization_id=self.params.organization_id, referrer=referrer, @@ -65,10 +70,18 @@ def resolve_meta(self, referrer: str) -> RequestMeta: end_timestamp=self.params.rpc_end_date, ) - def resolve_query(self, querystring: str | None) -> TraceItemFilter | None: + @sentry_sdk.trace + def resolve_query( + self, querystring: str | None + ) -> tuple[TraceItemFilter | None, list[VirtualColumnContext | None]]: """Given a query string in the public search syntax eg. `span.description:foo` construct the TraceItemFilter""" environment_query = self.__resolve_environment_query() - query = self.__resolve_query(querystring) + query, contexts = self.__resolve_query(querystring) + span = sentry_sdk.get_current_span() + if span: + span.set_tag("SearchResolver.query_string", querystring) + span.set_tag("SearchResolver.resolved_query", query) + span.set_tag("SearchResolver.environment_query", environment_query) # The RPC request meta does not contain the environment. # So we have to inject it as a query condition. @@ -78,18 +91,21 @@ def resolve_query(self, querystring: str | None) -> TraceItemFilter | None: # But if both are defined, we AND them together. if not environment_query: - return query + return query, contexts if not query: - return environment_query - - return TraceItemFilter( - and_filter=AndFilter( - filters=[ - environment_query, - query, - ] - ) + return environment_query, [] + + return ( + TraceItemFilter( + and_filter=AndFilter( + filters=[ + environment_query, + query, + ] + ) + ), + contexts, ) def __resolve_environment_query(self) -> TraceItemFilter | None: @@ -115,9 +131,11 @@ def __resolve_environment_query(self) -> TraceItemFilter | None: return TraceItemFilter(and_filter=AndFilter(filters=filters)) - def __resolve_query(self, querystring: str | None) -> TraceItemFilter | None: + def __resolve_query( + self, querystring: str | None + ) -> tuple[TraceItemFilter | None, list[VirtualColumnContext | None]]: if querystring is None: - return None + return None, [] try: parsed_terms = event_search.parse_search_query( querystring, @@ -142,8 +160,10 @@ def __resolve_query(self, querystring: str | None) -> TraceItemFilter | None: def _resolve_boolean_conditions( self, terms: event_filter.ParsedTerms - ) -> TraceItemFilter | None: - if len(terms) == 1: + ) -> tuple[TraceItemFilter | None, list[VirtualColumnContext | None]]: + if len(terms) == 0: + return None, [] + elif len(terms) == 1: if isinstance(terms[0], event_search.ParenExpression): return self._resolve_boolean_conditions(terms[0].children) elif isinstance(terms[0], event_search.SearchFilter): @@ -196,38 +216,54 @@ def _resolve_boolean_conditions( lhs, rhs = terms[:1], terms[1:] operator = AndFilter - resolved_lhs = self._resolve_boolean_conditions(lhs) if lhs else None - resolved_rhs = self._resolve_boolean_conditions(rhs) if rhs else None + resolved_lhs, contexts_lhs = self._resolve_boolean_conditions(lhs) + resolved_rhs, contexts_rhs = self._resolve_boolean_conditions(rhs) + contexts = contexts_lhs + contexts_rhs if resolved_lhs is not None and resolved_rhs is not None: if operator == AndFilter: - return TraceItemFilter(and_filter=AndFilter(filters=[resolved_lhs, resolved_rhs])) + return ( + TraceItemFilter(and_filter=AndFilter(filters=[resolved_lhs, resolved_rhs])), + contexts, + ) else: - return TraceItemFilter(or_filter=OrFilter(filters=[resolved_lhs, resolved_rhs])) + return ( + TraceItemFilter(or_filter=OrFilter(filters=[resolved_lhs, resolved_rhs])), + contexts, + ) elif resolved_lhs is None and resolved_rhs is not None: - return resolved_rhs + return resolved_rhs, contexts elif resolved_lhs is not None and resolved_rhs is None: - return resolved_lhs + return resolved_lhs, contexts else: - return None + return None, contexts - def _resolve_terms(self, terms: event_filter.ParsedTerms) -> TraceItemFilter | None: + def _resolve_terms( + self, terms: event_filter.ParsedTerms + ) -> tuple[TraceItemFilter | None, list[VirtualColumnContext | None]]: parsed_terms = [] + resolved_contexts = [] for item in terms: if isinstance(item, event_search.SearchFilter): - parsed_terms.append(self.resolve_term(cast(event_search.SearchFilter, item))) + resolved_term, resolved_context = self.resolve_term( + cast(event_search.SearchFilter, item) + ) + parsed_terms.append(resolved_term) + resolved_contexts.append(resolved_context) else: if self.config.use_aggregate_conditions: raise NotImplementedError("Can't filter on aggregates yet") if len(parsed_terms) > 1: - return TraceItemFilter(and_filter=AndFilter(filters=parsed_terms)) + return TraceItemFilter(and_filter=AndFilter(filters=parsed_terms)), resolved_contexts elif len(parsed_terms) == 1: - return parsed_terms[0] + return parsed_terms[0], resolved_contexts else: - return None + return None, [] - def resolve_term(self, term: event_search.SearchFilter) -> TraceItemFilter: + def resolve_term( + self, term: event_search.SearchFilter + ) -> tuple[TraceItemFilter, VirtualColumnContext | None]: resolved_column, context = self.resolve_column(term.key.name) raw_value = term.value.raw_value if term.value.is_wildcard(): @@ -251,12 +287,15 @@ def resolve_term(self, term: event_search.SearchFilter) -> TraceItemFilter: else: raise InvalidSearchQuery(f"Unknown operator: {term.operator}") if isinstance(resolved_column.proto_definition, AttributeKey): - return TraceItemFilter( - comparison_filter=ComparisonFilter( - key=resolved_column.proto_definition, - op=operator, - value=self._resolve_search_value(resolved_column, term.operator, raw_value), - ) + return ( + TraceItemFilter( + comparison_filter=ComparisonFilter( + key=resolved_column.proto_definition, + op=operator, + value=self._resolve_search_value(resolved_column, term.operator, raw_value), + ) + ), + context, ) else: raise NotImplementedError("Can't filter on aggregates yet") @@ -325,16 +364,20 @@ def clean_contexts( final_contexts.append(context) return final_contexts + @sentry_sdk.trace def resolve_columns( self, selected_columns: list[str] - ) -> tuple[list[ResolvedColumn | ResolvedFunction], list[VirtualColumnContext]]: + ) -> tuple[list[ResolvedColumn | ResolvedFunction], list[VirtualColumnContext | None]]: """Given a list of columns resolve them and get their context if applicable This function will also dedupe the virtual column contexts if necessary """ + span = sentry_sdk.get_current_span() resolved_columns = [] resolved_contexts = [] stripped_columns = [column.strip() for column in selected_columns] + if span: + span.set_tag("SearchResolver.selected_columns", stripped_columns) has_aggregates = False for column in stripped_columns: match = fields.is_function(column) @@ -355,7 +398,7 @@ def resolve_columns( resolved_columns.append(project_column) resolved_contexts.append(project_context) - return resolved_columns, self.clean_contexts(resolved_contexts) + return resolved_columns, resolved_contexts def resolve_column( self, column: str, match: Match | None = None @@ -372,6 +415,7 @@ def get_field_type(self, column: str) -> str: resolved_column, _ = self.resolve_column(column) return resolved_column.search_type + @sentry_sdk.trace def resolve_attributes( self, columns: list[str] ) -> tuple[list[ResolvedColumn], list[VirtualColumnContext | None]]: @@ -420,6 +464,9 @@ def resolve_attribute(self, column: str) -> tuple[ResolvedColumn, VirtualColumnC if field_type not in constants.TYPE_MAP: raise InvalidSearchQuery(f"Unsupported type {field_type} in {column}") + if column.startswith("sentry_tags"): + field = f"sentry.{field}" + search_type = cast(constants.SearchType, field_type) column_definition = ResolvedColumn( public_alias=column, internal_name=field, search_type=search_type @@ -432,6 +479,7 @@ def resolve_attribute(self, column: str) -> tuple[ResolvedColumn, VirtualColumnC else: raise InvalidSearchQuery(f"Could not parse {column}") + @sentry_sdk.trace def resolve_aggregates( self, columns: list[str] ) -> tuple[list[ResolvedFunction], list[VirtualColumnContext | None]]: diff --git a/src/sentry/search/eap/utils.py b/src/sentry/search/eap/utils.py new file mode 100644 index 00000000000000..28619c2929f47c --- /dev/null +++ b/src/sentry/search/eap/utils.py @@ -0,0 +1,17 @@ +from datetime import datetime + +from google.protobuf.timestamp_pb2 import Timestamp +from sentry_protos.snuba.v1.endpoint_time_series_pb2 import TimeSeriesRequest + + +def add_start_end_conditions( + in_msg: TimeSeriesRequest, start: datetime, end: datetime +) -> TimeSeriesRequest: + start_time_proto = Timestamp() + start_time_proto.FromDatetime(start) + end_time_proto = Timestamp() + end_time_proto.FromDatetime(end) + in_msg.meta.start_timestamp.CopyFrom(start_time_proto) + in_msg.meta.end_timestamp.CopyFrom(end_time_proto) + + return in_msg diff --git a/src/sentry/search/events/builder/metrics.py b/src/sentry/search/events/builder/metrics.py index d57893ae33b027..46c84384d8bcb8 100644 --- a/src/sentry/search/events/builder/metrics.py +++ b/src/sentry/search/events/builder/metrics.py @@ -1953,10 +1953,6 @@ def __init__( [column for column in self.columns if column not in self.aggregates] ) - @cached_property - def non_aggregate_columns(self) -> list[str]: - return list(set(self.original_selected_columns) - set(self.timeseries_columns)) - @property def translated_groupby(self) -> list[str]: """Get the names of the groupby columns to create the series names""" diff --git a/src/sentry/search/events/builder/spans_indexed.py b/src/sentry/search/events/builder/spans_indexed.py index 79a82d81f46a1e..3e603f55cf5802 100644 --- a/src/sentry/search/events/builder/spans_indexed.py +++ b/src/sentry/search/events/builder/spans_indexed.py @@ -71,6 +71,13 @@ class SpansEAPQueryBuilder(BaseQueryBuilder): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) + def get_field_type(self, field: str) -> str | None: + tag_match = constants.TYPED_TAG_KEY_RE.search(field) + field_type = tag_match.group("type") if tag_match else None + if field_type == "number": + return "number" + return super().get_field_type(field) + def resolve_field(self, raw_field: str, alias: bool = False) -> Column: # try the typed regex first if len(raw_field) > constants.MAX_TAG_KEY_LENGTH: diff --git a/src/sentry/search/events/builder/spans_metrics.py b/src/sentry/search/events/builder/spans_metrics.py index fd9688c4528089..5ab6ea363afb26 100644 --- a/src/sentry/search/events/builder/spans_metrics.py +++ b/src/sentry/search/events/builder/spans_metrics.py @@ -8,6 +8,13 @@ ) from sentry.search.events.datasets.spans_metrics import SpansMetricsDatasetConfig from sentry.search.events.types import SelectType +from sentry.snuba.metrics.naming_layer.mri import parse_mri + +SIZE_FIELDS = { + "http.decoded_response_content_length": "byte", + "http.response_content_length": "byte", + "http.response_transfer_size": "byte", +} class SpansMetricsQueryBuilder(MetricsQueryBuilder): @@ -15,6 +22,7 @@ class SpansMetricsQueryBuilder(MetricsQueryBuilder): spans_metrics_builder = True has_transaction = False config_class = SpansMetricsDatasetConfig + size_fields = SIZE_FIELDS column_remapping = { # We want to remap `message` to `span.description` for the free @@ -33,6 +41,15 @@ def get_field_type(self, field: str) -> str | None: if field in ["span.duration", "span.self_time"]: return "duration" + if unit := self.size_fields.get(field): + return unit + + mri = constants.SPAN_METRICS_MAP.get(field) + if mri is not None: + parsed_mri = parse_mri(mri) + if parsed_mri is not None and parsed_mri.unit in constants.RESULT_TYPES: + return parsed_mri.unit + return None def resolve_select( diff --git a/src/sentry/search/events/datasets/discover.py b/src/sentry/search/events/datasets/discover.py index 9178ae61de05cd..d909e4ca6723d3 100644 --- a/src/sentry/search/events/datasets/discover.py +++ b/src/sentry/search/events/datasets/discover.py @@ -993,14 +993,6 @@ def function_converter(self) -> Mapping[str, SnQLFunction]: snql_aggregate=self._resolve_web_vital_score_function, default_result_type="number", ), - SnQLFunction( - "weighted_performance_score", - required_args=[ - NumericColumn("column"), - ], - snql_aggregate=self._resolve_weighted_web_vital_score_function, - default_result_type="number", - ), SnQLFunction( "opportunity_score", required_args=[ @@ -1121,98 +1113,97 @@ def _resolve_http_status_code(self, _: str) -> SelectType: @cached_property def _resolve_project_threshold_config(self) -> SelectType: - org_id = ( - self.builder.params.organization.id - if self.builder.params.organization is not None - else None - ) + project_thresholds = {} + project_threshold_config_keys = [] + project_threshold_config_values = [] + + project_threshold_override_config_keys = [] + project_threshold_override_config_values = [] + + org_id = self.builder.params.organization_id project_ids = self.builder.params.project_ids - project_threshold_configs = ( - ProjectTransactionThreshold.objects.filter( - organization_id=org_id, - project_id__in=project_ids, + if org_id is not None: + project_threshold_configs = ( + ProjectTransactionThreshold.objects.filter( + organization_id=org_id, + project_id__in=project_ids, + ) + .order_by("project_id") + .values_list("project_id", "threshold", "metric") ) - .order_by("project_id") - .values_list("project_id", "threshold", "metric") - ) - transaction_threshold_configs = ( - ProjectTransactionThresholdOverride.objects.filter( - organization_id=org_id, - project_id__in=project_ids, + transaction_threshold_configs = ( + ProjectTransactionThresholdOverride.objects.filter( + organization_id=org_id, + project_id__in=project_ids, + ) + .order_by("project_id") + .values_list("transaction", "project_id", "threshold", "metric") ) - .order_by("project_id") - .values_list("transaction", "project_id", "threshold", "metric") - ) - num_project_thresholds = project_threshold_configs.count() - sentry_sdk.set_tag("project_threshold.count", num_project_thresholds) - sentry_sdk.set_tag( - "project_threshold.count.grouped", - format_grouped_length(num_project_thresholds, [10, 100, 250, 500]), - ) - - num_transaction_thresholds = transaction_threshold_configs.count() - sentry_sdk.set_tag("txn_threshold.count", num_transaction_thresholds) - sentry_sdk.set_tag( - "txn_threshold.count.grouped", - format_grouped_length(num_transaction_thresholds, [10, 100, 250, 500]), - ) - - if ( - num_project_thresholds + num_transaction_thresholds - > MAX_QUERYABLE_TRANSACTION_THRESHOLDS - ): - raise InvalidSearchQuery( - f"Exceeded {MAX_QUERYABLE_TRANSACTION_THRESHOLDS} configured transaction thresholds limit, try with fewer Projects." + num_project_thresholds = project_threshold_configs.count() + sentry_sdk.set_tag("project_threshold.count", num_project_thresholds) + sentry_sdk.set_tag( + "project_threshold.count.grouped", + format_grouped_length(num_project_thresholds, [10, 100, 250, 500]), ) - # Arrays need to have toUint64 casting because clickhouse will define the type as the narrowest possible type - # that can store listed argument types, which means the comparison will fail because of mismatched types - project_thresholds = {} - project_threshold_config_keys = [] - project_threshold_config_values = [] - for project_id, threshold, metric in project_threshold_configs: - metric = TRANSACTION_METRICS[metric] - if ( - threshold == DEFAULT_PROJECT_THRESHOLD - and metric == DEFAULT_PROJECT_THRESHOLD_METRIC - ): - # small optimization, if the configuration is equal to the default, - # we can skip it in the final query - continue - - project_thresholds[project_id] = (metric, threshold) - project_threshold_config_keys.append(Function("toUInt64", [project_id])) - project_threshold_config_values.append((metric, threshold)) + num_transaction_thresholds = transaction_threshold_configs.count() + sentry_sdk.set_tag("txn_threshold.count", num_transaction_thresholds) + sentry_sdk.set_tag( + "txn_threshold.count.grouped", + format_grouped_length(num_transaction_thresholds, [10, 100, 250, 500]), + ) - project_threshold_override_config_keys = [] - project_threshold_override_config_values = [] - for transaction, project_id, threshold, metric in transaction_threshold_configs: - metric = TRANSACTION_METRICS[metric] if ( - project_id in project_thresholds - and threshold == project_thresholds[project_id][1] - and metric == project_thresholds[project_id][0] + num_project_thresholds + num_transaction_thresholds + > MAX_QUERYABLE_TRANSACTION_THRESHOLDS ): - # small optimization, if the configuration is equal to the project - # configs, we can skip it in the final query - continue - - elif ( - project_id not in project_thresholds - and threshold == DEFAULT_PROJECT_THRESHOLD - and metric == DEFAULT_PROJECT_THRESHOLD_METRIC - ): - # small optimization, if the configuration is equal to the default - # and no project configs were set, we can skip it in the final query - continue + raise InvalidSearchQuery( + f"Exceeded {MAX_QUERYABLE_TRANSACTION_THRESHOLDS} configured transaction thresholds limit, try with fewer Projects." + ) - project_threshold_override_config_keys.append( - (Function("toUInt64", [project_id]), transaction) - ) - project_threshold_override_config_values.append((metric, threshold)) + # Arrays need to have toUint64 casting because clickhouse will define the type as the narrowest possible type + # that can store listed argument types, which means the comparison will fail because of mismatched types + for project_id, threshold, metric in project_threshold_configs: + metric_name = TRANSACTION_METRICS[metric] + if ( + threshold == DEFAULT_PROJECT_THRESHOLD + and metric_name == DEFAULT_PROJECT_THRESHOLD_METRIC + ): + # small optimization, if the configuration is equal to the default, + # we can skip it in the final query + continue + + project_thresholds[project_id] = (metric_name, threshold) + project_threshold_config_keys.append(Function("toUInt64", [project_id])) + project_threshold_config_values.append((metric_name, threshold)) + + for transaction, project_id, threshold, metric in transaction_threshold_configs: + metric_name = TRANSACTION_METRICS[metric] + if ( + project_id in project_thresholds + and threshold == project_thresholds[project_id][1] + and metric_name == project_thresholds[project_id][0] + ): + # small optimization, if the configuration is equal to the project + # configs, we can skip it in the final query + continue + + elif ( + project_id not in project_thresholds + and threshold == DEFAULT_PROJECT_THRESHOLD + and metric_name == DEFAULT_PROJECT_THRESHOLD_METRIC + ): + # small optimization, if the configuration is equal to the default + # and no project configs were set, we can skip it in the final query + continue + + project_threshold_override_config_keys.append( + (Function("toUInt64", [project_id]), transaction) + ) + project_threshold_override_config_values.append((metric_name, threshold)) project_threshold_config_index: SelectType = Function( "indexOf", @@ -1710,58 +1701,6 @@ def _resolve_web_vital_score_function( alias, ) - def _resolve_weighted_web_vital_score_function( - self, - args: Mapping[str, Column], - alias: str, - ) -> SelectType: - column = args["column"] - if column.key not in [ - "score.lcp", - "score.fcp", - "score.fid", - "score.cls", - "score.ttfb", - ]: - raise InvalidSearchQuery( - "weighted_performance_score only supports performance score measurements" - ) - total_score_column = self.builder.column("measurements.score.total") - return Function( - "greatest", - [ - Function( - "least", - [ - Function( - "divide", - [ - Function( - "sum", - [column], - ), - Function( - "countIf", - [ - Function( - "greaterOrEquals", - [ - total_score_column, - 0, - ], - ) - ], - ), - ], - ), - 1.0, - ], - ), - 0.0, - ], - alias, - ) - def _resolve_web_vital_opportunity_score_function( self, args: Mapping[str, Column], @@ -1777,7 +1716,7 @@ def _resolve_web_vital_opportunity_score_function( "score.total", ]: raise InvalidSearchQuery( - "weighted_performance_score only supports performance score measurements" + "opportunity_score only supports performance score measurements" ) weight_column = ( @@ -1867,7 +1806,7 @@ def _issue_filter_converter(self, search_filter: SearchFilter) -> WhereType | No value = to_list(search_filter.value.value) # `unknown` is a special value for when there is no issue associated with the event group_short_ids = [v for v in value if v and v != "unknown"] - general_group_filter_values = ["" for v in value if not v or v == "unknown"] + general_group_filter_values = [0 for v in value if not v or v == "unknown"] if group_short_ids and self.builder.params.organization is not None: try: diff --git a/src/sentry/search/events/datasets/function_aliases.py b/src/sentry/search/events/datasets/function_aliases.py index 071cccd476b759..70480101f3b9f6 100644 --- a/src/sentry/search/events/datasets/function_aliases.py +++ b/src/sentry/search/events/datasets/function_aliases.py @@ -12,19 +12,16 @@ from sentry.search.events import constants from sentry.search.events.builder.base import BaseQueryBuilder from sentry.search.events.types import SelectType -from sentry.sentry_metrics.configuration import UseCaseKey -from sentry.sentry_metrics.use_case_id_registry import UseCaseID from sentry.utils.hashlib import fnv1a_32 def resolve_project_threshold_config( # See resolve_tag_value signature - tag_value_resolver: Callable[[UseCaseID | UseCaseKey, int, str], int | str | None], + tag_value_resolver: Callable[[int, str], int | str | None], # See resolve_tag_key signature - column_name_resolver: Callable[[UseCaseID | UseCaseKey, int, str], str], + column_name_resolver: Callable[[int, str], str], project_ids: Sequence[int], org_id: int, - use_case_id: UseCaseID | None = None, ) -> SelectType: """ Shared function that resolves the project threshold configuration used by both snuba/metrics @@ -89,7 +86,7 @@ def resolve_project_threshold_config( # and no project configs were set, we can skip it in the final query continue - transaction_id = tag_value_resolver(use_case_id, org_id, transaction) + transaction_id = tag_value_resolver(org_id, transaction) # Don't add to the config if we can't resolve it if transaction_id is None: continue @@ -116,7 +113,7 @@ def resolve_project_threshold_config( project_threshold_override_config_keys, ( Column(name="project_id"), - Column(name=column_name_resolver(use_case_id, org_id, "transaction")), + Column(name=column_name_resolver(org_id, "transaction")), ), ], constants.PROJECT_THRESHOLD_OVERRIDE_CONFIG_INDEX_ALIAS, @@ -183,7 +180,7 @@ def resolve_metrics_percentile( fixed_percentile: float | None = None, extra_conditions: list[Function] | None = None, ) -> SelectType: - if fixed_percentile is None: + if fixed_percentile is None and isinstance(args["percentile"], float): fixed_percentile = args["percentile"] if fixed_percentile not in constants.METRIC_PERCENTILES: raise IncompatibleMetricsQuery("Custom quantile incompatible with metrics") @@ -242,6 +239,10 @@ def resolve_avg_compare_if( alias: str | None, ) -> SelectType: """Helper function for avg compare""" + if not isinstance(args["comparison_column"], str): + raise InvalidSearchQuery( + f"Invalid column type: expected got {args['comparison_column']}" + ) return Function( "avgIf", [ @@ -280,10 +281,12 @@ def resolve_metrics_layer_percentile( fixed_percentile: float | None = None, ) -> SelectType: # TODO: rename to just resolve_metrics_percentile once the non layer code can be retired - if fixed_percentile is None: + if fixed_percentile is None and isinstance(args["percentile"], float): fixed_percentile = args["percentile"] if fixed_percentile not in constants.METRIC_PERCENTILES: raise IncompatibleMetricsQuery("Custom quantile incompatible with metrics") + if not isinstance(args["column"], str): + raise InvalidSearchQuery(f"Invalid column type: expected got {args['column']}") column = resolve_mri(args["column"]) return ( Function( @@ -305,7 +308,7 @@ def resolve_metrics_layer_percentile( def resolve_division( - dividend: SelectType, divisor: SelectType, alias: str, fallback: Any | None = None + dividend: SelectType, divisor: SelectType, alias: str | None, fallback: Any | None = None ) -> SelectType: return Function( "if", diff --git a/src/sentry/search/events/datasets/metrics.py b/src/sentry/search/events/datasets/metrics.py index 54e02d63f41e86..faf147d5a023a9 100644 --- a/src/sentry/search/events/datasets/metrics.py +++ b/src/sentry/search/events/datasets/metrics.py @@ -1,10 +1,11 @@ from __future__ import annotations -from collections.abc import Callable, Mapping +from collections.abc import Callable, Mapping, MutableMapping from django.utils.functional import cached_property from snuba_sdk import Column, Condition, Function, Op, OrderBy +from sentry import features from sentry.api.event_search import SearchFilter from sentry.exceptions import IncompatibleMetricsQuery, InvalidSearchQuery from sentry.search.events import constants, fields @@ -80,11 +81,6 @@ def resolve_metric(self, value: str) -> int: self.builder.metric_ids.add(metric_id) return metric_id - def resolve_value(self, value: str) -> int: - value_id = self.builder.resolve_tag_value(value) - - return value_id - @property def should_skip_interval_calculation(self): return self.builder.builder_config.skip_time_conditions and ( @@ -648,26 +644,6 @@ def function_converter(self) -> Mapping[str, fields.MetricsFunction]: snql_distribution=self._resolve_web_vital_score_function, default_result_type="number", ), - fields.MetricsFunction( - "weighted_performance_score", - required_args=[ - fields.MetricArg( - "column", - allowed_columns=[ - "measurements.score.fcp", - "measurements.score.lcp", - "measurements.score.fid", - "measurements.score.inp", - "measurements.score.cls", - "measurements.score.ttfb", - ], - allow_custom_measurements=False, - ) - ], - calculated_args=[resolve_metric_id], - snql_distribution=self._resolve_weighted_web_vital_score_function, - default_result_type="number", - ), fields.MetricsFunction( "opportunity_score", required_args=[ @@ -1005,16 +981,13 @@ def _resolve_transaction_alias_on_demand(self, _: str) -> SelectType: @cached_property def _resolve_project_threshold_config(self) -> SelectType: + org_id = self.builder.params.organization_id + if org_id is None: + raise InvalidSearchQuery("Missing organization") return function_aliases.resolve_project_threshold_config( - tag_value_resolver=lambda _use_case_id, _org_id, value: self.builder.resolve_tag_value( - value - ), - column_name_resolver=lambda _use_case_id, _org_id, value: self.builder.resolve_column_name( - value - ), - org_id=( - self.builder.params.organization.id if self.builder.params.organization else None - ), + tag_value_resolver=lambda _org_id, value: self.builder.resolve_tag_value(value), + column_name_resolver=lambda _org_id, value: self.builder.resolve_column_name(value), + org_id=org_id, project_ids=self.builder.params.project_ids, ) @@ -1073,17 +1046,18 @@ def _transaction_filter_converter(self, search_filter: SearchFilter) -> WhereTyp return None if isinstance(value, list): - resolved_value = [] + resolved_values = [] for item in value: resolved_item = self.builder.resolve_tag_value(item) if resolved_item is None: raise IncompatibleMetricsQuery(f"Transaction value {item} in filter not found") - resolved_value.append(resolved_item) + resolved_values.append(resolved_item) + value = resolved_values else: resolved_value = self.builder.resolve_tag_value(value) if resolved_value is None: raise IncompatibleMetricsQuery(f"Transaction value {value} in filter not found") - value = resolved_value + value = resolved_value if search_filter.value.is_wildcard(): return Condition( @@ -1260,8 +1234,9 @@ def _resolve_histogram_function( buckets""" zoom_params = getattr(self.builder, "zoom_params", None) num_buckets = getattr(self.builder, "num_buckets", 250) + histogram_aliases = getattr(self.builder, "histogram_aliases", []) + histogram_aliases.append(alias) metric_condition = Function("equals", [Column("metric_id"), args["metric_id"]]) - self.builder.histogram_aliases.append(alias) return Function( f"histogramIf({num_buckets})", [ @@ -1326,6 +1301,8 @@ def _resolve_user_misery_function( args: Mapping[str, str | Column | SelectType | int | float], alias: str | None = None, ) -> SelectType: + if not isinstance(args["alpha"], float) or not isinstance(args["beta"], float): + raise InvalidSearchQuery("Cannot query user_misery with non floating point alpha/beta") if args["satisfaction"] is not None: raise IncompatibleMetricsQuery( "Cannot query user_misery with a threshold parameter on the metrics dataset" @@ -1493,9 +1470,13 @@ def _resolve_web_vital_function( ) -> SelectType: column = args["column"] metric_id = args["metric_id"] - quality = args["quality"].lower() + quality = args["quality"] - if column not in [ + if not isinstance(quality, str): + raise InvalidSearchQuery(f"Invalid argument quanlity: {quality}") + quality = quality.lower() + + if not isinstance(column, str) or column not in [ "measurements.lcp", "measurements.fcp", "measurements.fp", @@ -1549,12 +1530,18 @@ def _resolve_web_vital_function( def _resolve_web_vital_score_function( self, args: Mapping[str, str | Column | SelectType | int | float], - alias: str, + alias: str | None, ) -> SelectType: + """Returns the normalized score (0.0-1.0) for a given web vital. + This function exists because we don't store a metric for the normalized score. + The normalized score is calculated by dividing the sum of measurements.score.* by the sum of measurements.score.weight.* + + To calculate the total performance score, see _resolve_total_performance_score_function. + """ column = args["column"] metric_id = args["metric_id"] - if column not in [ + if not isinstance(column, str) or column not in [ "measurements.score.lcp", "measurements.score.fcp", "measurements.score.fid", @@ -1630,115 +1617,6 @@ def _resolve_web_vital_score_function( alias, ) - def _resolve_weighted_web_vital_score_function( - self, - args: Mapping[str, str | Column | SelectType | int | float], - alias: str, - ) -> SelectType: - column = args["column"] - metric_id = args["metric_id"] - - if column not in [ - "measurements.score.lcp", - "measurements.score.fcp", - "measurements.score.fid", - "measurements.score.inp", - "measurements.score.cls", - "measurements.score.ttfb", - ]: - raise InvalidSearchQuery("performance_score only supports measurements") - - return Function( - "greatest", - [ - Function( - "least", - [ - Function( - "if", - [ - Function( - "and", - [ - Function( - "greater", - [ - Function( - "sumIf", - [ - Column("value"), - Function( - "equals", - [Column("metric_id"), metric_id], - ), - ], - ), - 0, - ], - ), - Function( - "greater", - [ - Function( - "countIf", - [ - Column("value"), - Function( - "equals", - [ - Column("metric_id"), - self.resolve_metric( - "measurements.score.total" - ), - ], - ), - ], - ), - 0, - ], - ), - ], - ), - Function( - "divide", - [ - Function( - "sumIf", - [ - Column("value"), - Function( - "equals", [Column("metric_id"), metric_id] - ), - ], - ), - Function( - "countIf", - [ - Column("value"), - Function( - "equals", - [ - Column("metric_id"), - self.resolve_metric( - "measurements.score.total" - ), - ], - ), - ], - ), - ], - ), - 0.0, - ], - ), - 1.0, - ], - ), - 0.0, - ], - alias, - ) - def _resolve_web_vital_opportunity_score_function( self, args: Mapping[str, str | Column | SelectType | int | float], @@ -1747,7 +1625,7 @@ def _resolve_web_vital_opportunity_score_function( column = args["column"] metric_id = args["metric_id"] - if column not in [ + if not isinstance(column, str) or column not in [ "measurements.score.lcp", "measurements.score.fcp", "measurements.score.fid", @@ -1903,7 +1781,7 @@ def _resolve_total_web_vital_opportunity_score_with_fixed_weights_function( alias, ) - def _resolve_total_score_weights_function(self, column: str, alias: str) -> SelectType: + def _resolve_total_score_weights_function(self, column: str, alias: str | None) -> SelectType: """Calculates the total sum score weights for a given web vital. This must be cached since it runs another query.""" @@ -1962,8 +1840,16 @@ def _resolve_count_scores_function( def _resolve_total_performance_score_function( self, _: Mapping[str, str | Column | SelectType | int | float], - alias: str, + alias: str | None, ) -> SelectType: + """Returns the total performance score based on a page/site's web vitals. + This function is calculated by: + the summation of (normalized_vital_score * weight) for each vital, divided by the sum of all weights + - normalized_vital_score is the 0.0-1.0 score for each individual vital + - weight is the 0.0-1.0 weight for each individual vital (this is a constant value stored in constants.WEB_VITALS_PERFORMANCE_SCORE_WEIGHTS) + - if all webvitals have data, then the sum of all weights is 1 + - normalized_vital_score is obtained through _resolve_web_vital_score_function (see docstring on that function for more details) + """ vitals = ["lcp", "fcp", "cls", "ttfb", "inp"] scores = { vital: Function( @@ -1982,9 +1868,38 @@ def _resolve_total_performance_score_function( for vital in vitals } + weights = { + vital: Function( + "if", + [ + Function( + "isZeroOrNull", + [ + Function( + "countIf", + [ + Column("value"), + Function( + "equals", + [ + Column("metric_id"), + self.resolve_metric(f"measurements.score.{vital}"), + ], + ), + ], + ), + ], + ), + 0, + constants.WEB_VITALS_PERFORMANCE_SCORE_WEIGHTS[vital], + ], + ) + for vital in vitals + } + # TODO: Is there a way to sum more than 2 values at once? return Function( - "plus", + "divide", [ Function( "plus", @@ -1995,17 +1910,54 @@ def _resolve_total_performance_score_function( Function( "plus", [ - scores["lcp"], - scores["fcp"], + Function( + "plus", + [ + scores["lcp"], + scores["fcp"], + ], + ), + scores["cls"], ], ), - scores["cls"], + scores["ttfb"], ], ), - scores["ttfb"], + scores["inp"], ], ), - scores["inp"], + ( + Function( + "plus", + [ + Function( + "plus", + [ + Function( + "plus", + [ + Function( + "plus", + [ + weights["lcp"], + weights["fcp"], + ], + ), + weights["cls"], + ], + ), + weights["ttfb"], + ], + ), + weights["inp"], + ], + ) + if features.has( + "organizations:performance-vitals-handle-missing-webvitals", + self.builder.params.organization, + ) + else 1 + ), ], alias, ) @@ -2044,6 +1996,8 @@ def _resolve_total_transaction_duration(self, alias: str, scope: str) -> SelectT def _resolve_time_spent_percentage( self, args: Mapping[str, str | Column | SelectType | int | float], alias: str ) -> SelectType: + if not isinstance(args["scope"], str): + raise InvalidSearchQuery(f"Invalid scope: {args['scope']}") total_time = self._resolve_total_transaction_duration( constants.TOTAL_TRANSACTION_DURATION_ALIAS, args["scope"] ) @@ -2066,7 +2020,7 @@ def _resolve_time_spent_percentage( def _resolve_epm( self, - args: Mapping[str, str | Column | SelectType | int | float], + args: MutableMapping[str, str | Column | SelectType | int | float], alias: str | None = None, extra_condition: Function | None = None, ) -> SelectType: @@ -2076,7 +2030,7 @@ def _resolve_epm( def _resolve_spm( self, - args: Mapping[str, str | Column | SelectType | int | float], + args: MutableMapping[str, str | Column | SelectType | int | float], alias: str | None = None, extra_condition: Function | None = None, ) -> SelectType: @@ -2086,7 +2040,7 @@ def _resolve_spm( def _resolve_eps( self, - args: Mapping[str, str | Column | SelectType | int | float], + args: MutableMapping[str, str | Column | SelectType | int | float], alias: str | None = None, extra_condition: Function | None = None, ) -> SelectType: @@ -2100,7 +2054,7 @@ def _resolve_rate( args: Mapping[str, str | Column | SelectType | int | float], alias: str | None = None, extra_condition: Function | None = None, - metric: str | None = "transaction.duration", + metric: str = "transaction.duration", ) -> SelectType: base_condition = Function( "equals", diff --git a/src/sentry/search/events/datasets/metrics_summaries.py b/src/sentry/search/events/datasets/metrics_summaries.py deleted file mode 100644 index dd1076fa8ff8e4..00000000000000 --- a/src/sentry/search/events/datasets/metrics_summaries.py +++ /dev/null @@ -1,120 +0,0 @@ -from __future__ import annotations - -from collections.abc import Callable, Mapping - -from snuba_sdk import And, Column, Condition, Direction, Function, Op, OrderBy - -from sentry.api.event_search import SearchFilter -from sentry.search.events import constants -from sentry.search.events.builder.base import BaseQueryBuilder -from sentry.search.events.datasets import field_aliases, filter_aliases, function_aliases -from sentry.search.events.datasets.base import DatasetConfig -from sentry.search.events.fields import IntervalDefault, NumberRange, SnQLFunction, with_default -from sentry.search.events.types import SelectType, WhereType - - -class MetricsSummariesDatasetConfig(DatasetConfig): - def __init__(self, builder: BaseQueryBuilder): - self.builder = builder - - @property - def search_filter_converter( - self, - ) -> Mapping[str, Callable[[SearchFilter], WhereType | None]]: - return { - constants.PROJECT_ALIAS: self._project_slug_filter_converter, - constants.PROJECT_NAME_ALIAS: self._project_slug_filter_converter, - "metric": self._metric_filter_converter, - } - - @property - def field_alias_converter(self) -> Mapping[str, Callable[[str], SelectType]]: - return { - constants.PROJECT_ALIAS: self._resolve_project_slug_alias, - constants.PROJECT_NAME_ALIAS: self._resolve_project_slug_alias, - "avg_metric": self._resolve_avg_alias, - } - - @property - def function_converter(self) -> Mapping[str, SnQLFunction]: - return { - function.name: function - for function in [ - SnQLFunction( - "examples", - snql_aggregate=self._resolve_random_samples, - optional_args=[with_default(1, NumberRange("count", 1, None))], - private=True, - ), - SnQLFunction( - "rounded_timestamp", - required_args=[IntervalDefault("interval", 1, None)], - snql_column=lambda args, alias: function_aliases.resolve_rounded_timestamp( - args["interval"], alias, timestamp_column="end_timestamp" - ), - private=True, - ), - ] - } - - @property - def orderby_converter(self) -> Mapping[str, Callable[[Direction], OrderBy]]: - return {} - - def _project_slug_filter_converter(self, search_filter: SearchFilter) -> WhereType | None: - return filter_aliases.project_slug_converter(self.builder, search_filter) - - def _metric_filter_converter(self, search_filter: SearchFilter) -> WhereType | None: - column = search_filter.key.name - value = search_filter.value.value - return And( - [ - Condition(self.builder.column(column), Op.EQ, value), - # The metrics summaries table orders by the cityHash64 of the metric name. - # In order to take full advantage of the order by of the table, add an - # additional condition on the cityHash64 of the metric name. - Condition( - Function("cityHash64", [self.builder.column(column)]), - Op.EQ, - Function("cityHash64", [value]), - ), - ] - ) - - def _resolve_project_slug_alias(self, alias: str) -> SelectType: - return field_aliases.resolve_project_slug_alias(self.builder, alias) - - def _resolve_avg_alias(self, alias: str) -> SelectType: - return Function( - "divide", - [self.builder.column("sum_metric"), self.builder.column("count_metric")], - alias, - ) - - def _resolve_random_samples( - self, - args: Mapping[str, str | Column | SelectType | int | float], - alias: str, - ) -> SelectType: - offset = 0 if self.builder.offset is None else self.builder.offset.offset - limit = 0 if self.builder.limit is None else self.builder.limit.limit - return function_aliases.resolve_random_samples( - [ - # DO NOT change the order of these columns as it - # changes the order of the tuple in the response - # which WILL cause errors where it assumes this - # order - self.builder.resolve_column("span.group"), - self.builder.resolve_column("timestamp"), - self.builder.resolve_column("id"), - self.builder.resolve_column("min_metric"), - self.builder.resolve_column("max_metric"), - self.builder.resolve_column("sum_metric"), - self.builder.resolve_column("count_metric"), - self.builder.resolve_column("avg_metric"), - ], - alias, - offset, - limit, - size=int(args["count"]), - ) diff --git a/src/sentry/search/events/datasets/spans_metrics.py b/src/sentry/search/events/datasets/spans_metrics.py index 358dab0107a887..5713605e3b93d0 100644 --- a/src/sentry/search/events/datasets/spans_metrics.py +++ b/src/sentry/search/events/datasets/spans_metrics.py @@ -5,7 +5,7 @@ from typing import TypedDict import sentry_sdk -from snuba_sdk import AliasedExpression, Column, Condition, Function, Identifier, Op, OrderBy +from snuba_sdk import Column, Condition, Function, Identifier, Op, OrderBy from sentry.api.event_search import SearchFilter from sentry.exceptions import IncompatibleMetricsQuery, InvalidSearchQuery @@ -16,7 +16,6 @@ from sentry.search.events.fields import SnQLStringArg, get_function_alias from sentry.search.events.types import SelectType, WhereType from sentry.search.utils import DEVICE_CLASS -from sentry.snuba.metrics.naming_layer.mri import SpanMRI from sentry.snuba.referrer import Referrer @@ -1362,278 +1361,3 @@ def _resolve_trace_error_count( @property def orderby_converter(self) -> Mapping[str, OrderBy]: return {} - - -class SpansMetricsLayerDatasetConfig(DatasetConfig): - missing_function_error = IncompatibleMetricsQuery - - def __init__(self, builder: spans_metrics.SpansMetricsQueryBuilder): - self.builder = builder - self.total_span_duration: float | None = None - - def resolve_mri(self, value: str) -> Column: - """Given the public facing column name resolve it to the MRI and return a Column""" - # If the query builder has not detected a transaction use the light self time metric to get a performance boost - if value == "span.self_time" and not self.builder.has_transaction: - return Column(constants.SELF_TIME_LIGHT) - else: - return Column(constants.SPAN_METRICS_MAP[value]) - - @property - def search_filter_converter( - self, - ) -> Mapping[str, Callable[[SearchFilter], WhereType | None]]: - return {} - - @property - def field_alias_converter(self) -> Mapping[str, Callable[[str], SelectType]]: - return { - constants.SPAN_MODULE_ALIAS: lambda alias: field_aliases.resolve_span_module( - self.builder, alias - ) - } - - @property - def function_converter(self) -> Mapping[str, fields.MetricsFunction]: - """Make sure to update METRIC_FUNCTION_LIST_BY_TYPE when adding functions here, can't be a dynamic list since - the Metric Layer will actually handle which dataset each function goes to - """ - - function_converter = { - function.name: function - for function in [ - fields.MetricsFunction( - "count_unique", - required_args=[ - fields.MetricArg( - "column", - allowed_columns=["user"], - allow_custom_measurements=False, - ) - ], - snql_metric_layer=lambda args, alias: Function( - "count_unique", - [self.resolve_mri("user")], - alias, - ), - default_result_type="integer", - ), - fields.MetricsFunction( - "epm", - snql_metric_layer=lambda args, alias: Function( - "rate", - [ - self.resolve_mri("span.self_time"), - args["interval"], - 60, - ], - alias, - ), - optional_args=[fields.IntervalDefault("interval", 1, None)], - default_result_type="rate", - ), - fields.MetricsFunction( - "eps", - snql_metric_layer=lambda args, alias: Function( - "rate", - [ - self.resolve_mri("span.self_time"), - args["interval"], - 1, - ], - alias, - ), - optional_args=[fields.IntervalDefault("interval", 1, None)], - default_result_type="rate", - ), - fields.MetricsFunction( - "count", - snql_metric_layer=lambda args, alias: Function( - "count", - [ - self.resolve_mri("span.self_time"), - ], - alias, - ), - default_result_type="integer", - ), - fields.MetricsFunction( - "sum", - optional_args=[ - fields.with_default( - "span.self_time", - fields.MetricArg( - "column", - allowed_columns=constants.SPAN_METRIC_SUMMABLE_COLUMNS, - allow_custom_measurements=False, - ), - ), - ], - snql_metric_layer=lambda args, alias: Function( - "sum", - [self.resolve_mri(args["column"])], - alias, - ), - default_result_type="duration", - ), - fields.MetricsFunction( - "avg", - optional_args=[ - fields.with_default( - "span.self_time", - fields.MetricArg( - "column", - allowed_columns=constants.SPAN_METRIC_DURATION_COLUMNS.union( - constants.SPAN_METRIC_BYTES_COLUMNS - ), - ), - ), - ], - snql_metric_layer=lambda args, alias: Function( - "avg", - [self.resolve_mri(args["column"])], - alias, - ), - result_type_fn=self.reflective_result_type(), - default_result_type="duration", - ), - fields.MetricsFunction( - "percentile", - required_args=[ - fields.with_default( - "span.self_time", - fields.MetricArg( - "column", allowed_columns=constants.SPAN_METRIC_DURATION_COLUMNS - ), - ), - fields.NumberRange("percentile", 0, 1), - ], - snql_metric_layer=lambda args, alias: function_aliases.resolve_metrics_layer_percentile( - args, - alias, - self.resolve_mri, - ), - result_type_fn=self.reflective_result_type(), - default_result_type="duration", - ), - fields.MetricsFunction( - "p50", - optional_args=[ - fields.with_default( - "span.self_time", - fields.MetricArg( - "column", - allowed_columns=constants.SPAN_METRIC_DURATION_COLUMNS, - allow_custom_measurements=False, - ), - ), - ], - snql_metric_layer=lambda args, alias: function_aliases.resolve_metrics_layer_percentile( - args=args, alias=alias, resolve_mri=self.resolve_mri, fixed_percentile=0.50 - ), - default_result_type="duration", - ), - fields.MetricsFunction( - "p75", - optional_args=[ - fields.with_default( - "span.self_time", - fields.MetricArg( - "column", - allowed_columns=constants.SPAN_METRIC_DURATION_COLUMNS, - allow_custom_measurements=False, - ), - ), - ], - snql_metric_layer=lambda args, alias: function_aliases.resolve_metrics_layer_percentile( - args=args, alias=alias, resolve_mri=self.resolve_mri, fixed_percentile=0.75 - ), - default_result_type="duration", - ), - fields.MetricsFunction( - "p95", - optional_args=[ - fields.with_default( - "span.self_time", - fields.MetricArg( - "column", - allowed_columns=constants.SPAN_METRIC_DURATION_COLUMNS, - allow_custom_measurements=False, - ), - ), - ], - snql_metric_layer=lambda args, alias: function_aliases.resolve_metrics_layer_percentile( - args=args, alias=alias, resolve_mri=self.resolve_mri, fixed_percentile=0.95 - ), - default_result_type="duration", - ), - fields.MetricsFunction( - "p99", - optional_args=[ - fields.with_default( - "span.self_time", - fields.MetricArg( - "column", - allowed_columns=constants.SPAN_METRIC_DURATION_COLUMNS, - allow_custom_measurements=False, - ), - ), - ], - snql_metric_layer=lambda args, alias: function_aliases.resolve_metrics_layer_percentile( - args=args, alias=alias, resolve_mri=self.resolve_mri, fixed_percentile=0.99 - ), - default_result_type="duration", - ), - fields.MetricsFunction( - "p100", - optional_args=[ - fields.with_default( - "span.self_time", - fields.MetricArg( - "column", - allowed_columns=constants.SPAN_METRIC_DURATION_COLUMNS, - allow_custom_measurements=False, - ), - ), - ], - snql_metric_layer=lambda args, alias: function_aliases.resolve_metrics_layer_percentile( - args=args, alias=alias, resolve_mri=self.resolve_mri, fixed_percentile=1.0 - ), - default_result_type="duration", - ), - fields.MetricsFunction( - "http_error_count", - snql_metric_layer=lambda args, alias: AliasedExpression( - Column( - SpanMRI.HTTP_ERROR_COUNT_LIGHT.value - if not self.builder.has_transaction - else SpanMRI.HTTP_ERROR_COUNT.value - ), - alias, - ), - default_result_type="integer", - ), - fields.MetricsFunction( - "http_error_rate", - snql_metric_layer=lambda args, alias: AliasedExpression( - Column( - SpanMRI.HTTP_ERROR_RATE_LIGHT.value - if not self.builder.has_transaction - else SpanMRI.HTTP_ERROR_RATE.value - ), - alias, - ), - default_result_type="percentage", - ), - ] - } - - for alias, name in constants.SPAN_FUNCTION_ALIASES.items(): - if name in function_converter: - function_converter[alias] = function_converter[name].alias_as(alias) - - return function_converter - - @property - def orderby_converter(self) -> Mapping[str, OrderBy]: - return {} diff --git a/src/sentry/search/events/filter.py b/src/sentry/search/events/filter.py index d0c8d4fc2bc73a..9f5b3b1289430d 100644 --- a/src/sentry/search/events/filter.py +++ b/src/sentry/search/events/filter.py @@ -73,7 +73,7 @@ def translate_transaction_status(val: str) -> str: return SPAN_STATUS_NAME_TO_CODE[val] -def to_list(value: list[str] | str) -> list[str]: +def to_list[T](value: list[T] | T) -> list[T]: if isinstance(value, list): return value return [value] diff --git a/src/sentry/search/events/types.py b/src/sentry/search/events/types.py index f4b8e4e4672311..abba86f4a621a5 100644 --- a/src/sentry/search/events/types.py +++ b/src/sentry/search/events/types.py @@ -65,9 +65,12 @@ class QueryFramework: class EventsMeta(TypedDict): + datasetReason: NotRequired[str] fields: dict[str, str] tips: NotRequired[dict[str, str | None]] isMetricsData: NotRequired[bool] + isMetricsExtractedData: NotRequired[bool] + discoverSplitDecision: NotRequired[str] class EventsResponse(TypedDict): @@ -100,6 +103,9 @@ def __post_init__(self) -> None: # Only used in the trend query builder self.aliases: dict[str, Alias] | None = {} + def __repr__(self) -> str: + return f"" + def parse_stats_period(self) -> None: if self.stats_period is not None: self.end = django_timezone.now() diff --git a/src/sentry/seer/similarity/utils.py b/src/sentry/seer/similarity/utils.py index a7b8f088029581..49c5b0c417d610 100644 --- a/src/sentry/seer/similarity/utils.py +++ b/src/sentry/seer/similarity/utils.py @@ -5,6 +5,8 @@ from sentry import options from sentry.eventstore.models import Event, GroupEvent +from sentry.grouping.api import get_contributing_variant_and_component +from sentry.grouping.variants import BaseVariant, ComponentVariant from sentry.killswitches import killswitch_matches_context from sentry.models.project import Project from sentry.utils import metrics @@ -15,11 +17,18 @@ MAX_FRAME_COUNT = 30 MAX_EXCEPTION_COUNT = 30 FULLY_MINIFIED_STACKTRACE_MAX_FRAME_COUNT = 20 -SEER_ELIGIBLE_PLATFORMS_EVENTS = frozenset( +# Events' `platform` values are tested against this list before events are sent to Seer. Checking +# this separately from backfill status allows us to backfill projects which have events from +# multiple platforms, some supported and some not, and not worry about events from the unsupported +# platforms getting sent to Seer during ingest. +SEER_INELIGIBLE_EVENT_PLATFORMS = frozenset(["other"]) # We don't know what's in the event +# Event platforms corresponding to project platforms which were backfilled before we started +# blocking events with more than `MAX_FRAME_COUNT` frames from being sent to Seer (which we do to +# prevent possible over-grouping). Ultimately we want a more unified solution, but for now, we're +# just not going to apply the filter to events from these platforms. +EVENT_PLATFORMS_BYPASSING_FRAME_COUNT_CHECK = frozenset( [ - "csharp", "go", - "java", "javascript", "node", "php", @@ -27,136 +36,14 @@ "ruby", ] ) -# An original set of platforms were backfilled allowing more than 30 system contributing frames -# being set to seer. Unfortunately, this can cause over grouping. We will need to reduce -# these set of platforms but for now we will blacklist them. -SYSTEM_FRAME_CHECK_BLACKLIST_PLATFORMS = frozenset( +# Existing projects with these platforms shouldn't be backfilled and new projects with these +# platforms shouldn't have Seer enabled. +SEER_INELIGIBLE_PROJECT_PLATFORMS = frozenset( [ - "bun", - "cordova", - "deno", - "django", - "go", - "go-echo", - "go-fasthttp", - "go-fiber", - "go-gin", - "go-http", - "go-iris", - "go-martini", - "go-negroni", - "ionic", - "javascript", - "javascript-angular", - "javascript-angularjs", - "javascript-astro", - "javascript-backbone", - "javascript-browser", - "javascript-electron", - "javascript-ember", - "javascript-gatsby", - "javascript-nextjs", - "javascript-performance-onboarding-1-install", - "javascript-performance-onboarding-2-configure", - "javascript-performance-onboarding-3-verify", - "javascript-react", - "javascript-react-performance-onboarding-1-install", - "javascript-react-performance-onboarding-2-configure", - "javascript-react-performance-onboarding-3-verify", - "javascript-react-with-error-monitoring", - "javascript-react-with-error-monitoring-performance-and-replay", - "javascript-remix", - "javascript-replay-onboarding-1-install", - "javascript-replay-onboarding-2-configure", - "javascript-solid", - "javascript-svelte", - "javascript-sveltekit", - "javascript-vue", - "javascript-vue-with-error-monitoring", - "node", - "node-awslambda", - "node-azurefunctions", - "node-connect", - "node-express", - "node-fastify", - "node-gcpfunctions", - "node-hapi", - "node-koa", - "node-nestjs", - "node-nodeawslambda", - "node-nodegcpfunctions", - "node-profiling-onboarding-0-alert", - "node-profiling-onboarding-1-install", - "node-profiling-onboarding-2-configure-performance", - "node-profiling-onboarding-3-configure-profiling", - "node-serverlesscloud", - "PHP", - "php", - "php-laravel", - "php-monolog", - "php-symfony", - "php-symfony2", - "python", - "python-aiohttp", - "python-asgi", - "python-awslambda", - "python-azurefunctions", - "python-bottle", - "python-celery", - "python-chalice", - "python-django", - "python-falcon", - "python-fastapi", - "python-flask", - "python-gcpfunctions", - "python-profiling-onboarding-0-alert", - "python-profiling-onboarding-1-install", - "python-profiling-onboarding-3-configure-profiling", - "python-pylons", - "python-pymongo", - "python-pyramid", - "python-pythonawslambda", - "python-pythonazurefunctions", - "python-pythongcpfunctions", - "python-pythonserverless", - "python-quart", - "python-rq", - "python-sanic", - "python-serverless", - "python-starlette", - "python-tornado", - "python-tryton", - "python-wsgi", - "react", - "react-native", - "react-native-tracing", - "ruby", - "ruby-rack", - "ruby-rails", - ] -) -SEER_ELIGIBLE_PLATFORMS = SYSTEM_FRAME_CHECK_BLACKLIST_PLATFORMS | frozenset( - [ - "android", - "android-profiling-onboarding-1-install", - "android-profiling-onboarding-3-configure-profiling", - "android-profiling-onboarding-4-upload", - "csharp", - "csharp-aspnetcore", - "dart", - "dotnet", - "flutter", - "groovy", - "java", - "java-android", - "java-appengine", - "java-log4j", - "java-log4j2", - "java-logging", - "java-logback", - "java-spring", - "java-spring-boot", - "perl", + # We have no clue what's in these projects + "other", + "", + None, ] ) BASE64_ENCODED_PREFIXES = [ @@ -170,6 +57,8 @@ class ReferrerOptions(StrEnum): INGEST = "ingest" BACKFILL = "backfill" + DELETION = "deletion" + SIMILAR_ISSUES_TAB = "similar_issues_tab" class TooManyOnlySystemFramesException(Exception): @@ -236,9 +125,8 @@ def get_stacktrace_string(data: dict[str, Any], platform: str | None = None) -> exception, frame_metrics ) if ( - platform not in SYSTEM_FRAME_CHECK_BLACKLIST_PLATFORMS + platform not in EVENT_PLATFORMS_BYPASSING_FRAME_COUNT_CHECK and frame_metrics["is_frames_truncated"] - and not app_hash ): raise TooManyOnlySystemFramesException @@ -282,15 +170,12 @@ def generate_stacktrace_string( }, ) - # Metric for errors with no header, only one frame and no filename - # TODO: Determine how often this occurs and if we should send to seer, then remove metric + # Return empty stacktrace for events with no header, only one frame and no filename + # since this is too little info to group on if frame_metrics["has_no_filename"] and len(result_parts) == 1: header, frames = result_parts[0][0], result_parts[0][1] if header == "" and len(frames) == 1: - metrics.incr( - "seer.grouping.no_header_one_frame_no_filename", - sample_rate=options.get("seer.similarity.metrics_sample_rate"), - ) + stacktrace_str = "" return stacktrace_str.strip() @@ -397,7 +282,6 @@ def get_stacktrace_string_with_metrics( data: dict[str, Any], platform: str | None, referrer: ReferrerOptions ) -> str | None: stacktrace_string = None - key = "grouping.similarity.did_call_seer" sample_rate = options.get("seer.similarity.metrics_sample_rate") try: stacktrace_string = get_stacktrace_string(data, platform) @@ -409,11 +293,7 @@ def get_stacktrace_string_with_metrics( tags={"platform": platform, "referrer": referrer}, ) if referrer == ReferrerOptions.INGEST: - metrics.incr( - key, - sample_rate=sample_rate, - tags={"call_made": False, "blocker": "over-threshold-only-system-frames"}, - ) + record_did_call_seer_metric(call_made=False, blocker="over-threshold-frames") except Exception: logger.exception("Unexpected exception in stacktrace string formatting") @@ -430,78 +310,114 @@ def event_content_has_stacktrace(event: GroupEvent | Event) -> bool: return exception_stacktrace or threads_stacktrace or only_stacktrace -def event_content_is_seer_eligible(event: GroupEvent | Event) -> bool: - """ - Determine if an event's contents makes it fit for using with Seer's similar issues model. - """ - # TODO: Determine if we want to filter out non-sourcemapped events - if not event_content_has_stacktrace(event): +def record_did_call_seer_metric(*, call_made: bool, blocker: str) -> None: + metrics.incr( + "grouping.similarity.did_call_seer", + sample_rate=options.get("seer.similarity.metrics_sample_rate"), + tags={"call_made": call_made, "blocker": blocker}, + ) + + +def has_too_many_contributing_frames( + event: Event | GroupEvent, + variants: dict[str, BaseVariant], + referrer: ReferrerOptions, +) -> bool: + platform = event.platform + shared_tags = {"referrer": referrer.value, "platform": platform} + + contributing_variant, contributing_component = get_contributing_variant_and_component(variants) + + # Ideally we're calling this function after we already know the event both has a stacktrace and + # is using it for grouping (in which case none of the below conditions should apply), but still + # worth checking that we have enough information to answer the question just in case + if ( + # Fingerprint, checksum, fallback variants + not isinstance(contributing_variant, ComponentVariant) + # Security violations, log-message-based grouping + or contributing_variant.variant_name == "default" + # Any ComponentVariant will have this, but this reassures mypy + or not contributing_component + # Exception-message-based grouping + or not hasattr(contributing_component, "frame_counts") + ): + # We don't bother to collect a metric on this outcome, because we shouldn't have called the + # function in the first place + return False + + # Certain platforms were backfilled before we added this filter, so to keep new events matching + # with the existing data, we turn off the filter for them (instead their stacktraces will be + # truncated) + if platform in EVENT_PLATFORMS_BYPASSING_FRAME_COUNT_CHECK: metrics.incr( - "grouping.similarity.event_content_seer_eligible", + "grouping.similarity.frame_count_filter", sample_rate=options.get("seer.similarity.metrics_sample_rate"), - tags={"eligible": False, "blocker": "no-stacktrace"}, + tags={**shared_tags, "outcome": "bypass"}, ) return False - if event.platform not in SEER_ELIGIBLE_PLATFORMS_EVENTS: + stacktrace_type = "in_app" if contributing_variant.variant_name == "app" else "system" + key = f"{stacktrace_type}_contributing_frames" + shared_tags["stacktrace_type"] = stacktrace_type + + if contributing_component.frame_counts[key] > MAX_FRAME_COUNT: metrics.incr( - "grouping.similarity.event_content_seer_eligible", + "grouping.similarity.frame_count_filter", sample_rate=options.get("seer.similarity.metrics_sample_rate"), - tags={"eligible": False, "blocker": "unsupported-platform"}, + tags={**shared_tags, "outcome": "block"}, ) - return False + return True metrics.incr( - "grouping.similarity.event_content_seer_eligible", + "grouping.similarity.frame_count_filter", sample_rate=options.get("seer.similarity.metrics_sample_rate"), - tags={"eligible": True, "blocker": "none"}, + tags={**shared_tags, "outcome": "pass"}, ) - return True + return False -def killswitch_enabled(project_id: int, event: GroupEvent | Event | None = None) -> bool: +def killswitch_enabled( + project_id: int | None, + referrer: ReferrerOptions, + event: GroupEvent | Event | None = None, +) -> bool: """ Check both the global and similarity-specific Seer killswitches. """ - + is_ingest = referrer == ReferrerOptions.INGEST + logger_prefix = f"grouping.similarity.{referrer.value}" logger_extra = {"event_id": event.event_id if event else None, "project_id": project_id} if options.get("seer.global-killswitch.enabled"): logger.warning( - "should_call_seer_for_grouping.seer_global_killswitch_enabled", + f"{logger_prefix}.seer_global_killswitch_enabled", # noqa extra=logger_extra, ) - metrics.incr( - "grouping.similarity.did_call_seer", - sample_rate=options.get("seer.similarity.metrics_sample_rate"), - tags={"call_made": False, "blocker": "global-killswitch"}, - ) + if is_ingest: + record_did_call_seer_metric(call_made=False, blocker="global-killswitch") + return True if options.get("seer.similarity-killswitch.enabled"): logger.warning( - "should_call_seer_for_grouping.seer_similarity_killswitch_enabled", + f"{logger_prefix}.seer_similarity_killswitch_enabled", # noqa extra=logger_extra, ) - metrics.incr( - "grouping.similarity.did_call_seer", - sample_rate=options.get("seer.similarity.metrics_sample_rate"), - tags={"call_made": False, "blocker": "similarity-killswitch"}, - ) + if is_ingest: + record_did_call_seer_metric(call_made=False, blocker="similarity-killswitch") + return True if killswitch_matches_context( "seer.similarity.grouping_killswitch_projects", {"project_id": project_id} ): logger.warning( - "should_call_seer_for_grouping.seer_similarity_project_killswitch_enabled", + f"{logger_prefix}.seer_similarity_project_killswitch_enabled", # noqa extra=logger_extra, ) - metrics.incr( - "grouping.similarity.did_call_seer", - sample_rate=options.get("seer.similarity.metrics_sample_rate"), - tags={"call_made": False, "blocker": "project-killswitch"}, - ) + if is_ingest: + record_did_call_seer_metric(call_made=False, blocker="project-killswitch") + return True return False @@ -543,7 +459,7 @@ def project_is_seer_eligible(project: Project) -> bool: the feature is enabled in the region. """ is_backfill_completed = project.get_option("sentry:similarity_backfill_completed") - is_seer_eligible_platform = project.platform in SEER_ELIGIBLE_PLATFORMS + is_seer_eligible_platform = project.platform not in SEER_INELIGIBLE_PROJECT_PLATFORMS is_region_enabled = options.get("similarity.new_project_seer_grouping.enabled") return not is_backfill_completed and is_seer_eligible_platform and is_region_enabled diff --git a/src/sentry/sentry_apps/api/bases/sentryapps.py b/src/sentry/sentry_apps/api/bases/sentryapps.py index 1ce641f943707d..f2046c61081b9f 100644 --- a/src/sentry/sentry_apps/api/bases/sentryapps.py +++ b/src/sentry/sentry_apps/api/bases/sentryapps.py @@ -120,7 +120,7 @@ class SentryAppsBaseEndpoint(IntegrationPlatformEndpoint): permission_classes: tuple[type[BasePermission], ...] = (SentryAppsAndStaffPermission,) def _get_organization_slug(self, request: Request): - organization_slug = request.json_body.get("organization") + organization_slug = request.data.get("organization") if not organization_slug or not isinstance(organization_slug, str): error_message = "Please provide a valid value for the 'organization' field." raise ValidationError({"organization": to_single_line_str(error_message)}) @@ -179,7 +179,7 @@ def convert_args(self, request: Request, *args, **kwargs): objects from URI params, we're applying the same logic for a param in the request body. """ - if not request.json_body: + if not request.data: return (args, kwargs) context = self._get_org_context(request) diff --git a/src/sentry/sentry_apps/api/endpoints/sentry_app_authorizations.py b/src/sentry/sentry_apps/api/endpoints/sentry_app_authorizations.py index 8f999d7eb1fc95..05b1aad13758d1 100644 --- a/src/sentry/sentry_apps/api/endpoints/sentry_app_authorizations.py +++ b/src/sentry/sentry_apps/api/endpoints/sentry_app_authorizations.py @@ -46,7 +46,7 @@ def post(self, request: Request, installation) -> Response: scope.set_tag("sentry_app_slug", installation.sentry_app.slug) try: - if request.json_body.get("grant_type") == GrantTypes.AUTHORIZATION: + if request.data.get("grant_type") == GrantTypes.AUTHORIZATION: auth_serializer: SentryAppAuthorizationSerializer = ( SentryAppAuthorizationSerializer(data=request.data) ) @@ -60,7 +60,7 @@ def post(self, request: Request, installation) -> Response: client_id=auth_serializer.validated_data.get("client_id"), user=promote_request_api_user(request), ).run() - elif request.json_body.get("grant_type") == GrantTypes.REFRESH: + elif request.data.get("grant_type") == GrantTypes.REFRESH: refresh_serializer = SentryAppRefreshAuthorizationSerializer(data=request.data) if not refresh_serializer.is_valid(): @@ -87,7 +87,7 @@ def post(self, request: Request, installation) -> Response: ) return Response({"error": e.msg or "Unauthorized"}, status=403) - attrs = {"state": request.json_body.get("state"), "application": None} + attrs = {"state": request.data.get("state"), "application": None} body = ApiTokenSerializer().serialize(token, attrs, promote_request_api_user(request)) diff --git a/src/sentry/sentry_apps/api/endpoints/sentry_app_details.py b/src/sentry/sentry_apps/api/endpoints/sentry_app_details.py index 09aae78ab8fbb1..f972ee75d74283 100644 --- a/src/sentry/sentry_apps/api/endpoints/sentry_app_details.py +++ b/src/sentry/sentry_apps/api/endpoints/sentry_app_details.py @@ -192,7 +192,7 @@ def delete(self, request: Request, sentry_app) -> Response: return Response({"detail": ["Published apps cannot be removed."]}, status=403) def _has_hook_events(self, request: Request): - if not request.json_body.get("events"): + if not request.data.get("events"): return False - return "error" in request.json_body["events"] + return "error" in request.data["events"] diff --git a/src/sentry/sentry_apps/api/endpoints/sentry_apps.py b/src/sentry/sentry_apps/api/endpoints/sentry_apps.py index 1ebe56f95d6374..eecbb92c7f8c80 100644 --- a/src/sentry/sentry_apps/api/endpoints/sentry_apps.py +++ b/src/sentry/sentry_apps/api/endpoints/sentry_apps.py @@ -71,22 +71,22 @@ def get(self, request: Request) -> Response: def post(self, request: Request, organization) -> Response: data = { - "name": request.json_body.get("name"), + "name": request.data.get("name"), "user": request.user, - "author": request.json_body.get("author"), + "author": request.data.get("author"), "organization": organization, - "webhookUrl": request.json_body.get("webhookUrl"), - "redirectUrl": request.json_body.get("redirectUrl"), - "isAlertable": request.json_body.get("isAlertable"), - "isInternal": request.json_body.get("isInternal"), - "verifyInstall": request.json_body.get("verifyInstall"), - "scopes": request.json_body.get("scopes", []), - "events": request.json_body.get("events", []), - "schema": request.json_body.get("schema", {}), - "overview": request.json_body.get("overview"), - "allowedOrigins": request.json_body.get("allowedOrigins", []), + "webhookUrl": request.data.get("webhookUrl"), + "redirectUrl": request.data.get("redirectUrl"), + "isAlertable": request.data.get("isAlertable"), + "isInternal": request.data.get("isInternal"), + "verifyInstall": request.data.get("verifyInstall"), + "scopes": request.data.get("scopes", []), + "events": request.data.get("events", []), + "schema": request.data.get("schema", {}), + "overview": request.data.get("overview"), + "allowedOrigins": request.data.get("allowedOrigins", []), "popularity": ( - request.json_body.get("popularity") if is_active_superuser(request) else None + request.data.get("popularity") if is_active_superuser(request) else None ), } @@ -166,7 +166,7 @@ def _filter_queryset_for_user(self, queryset: BaseQuerySet[SentryApp, SentryApp] return queryset.filter(owner_id__in=owner_ids) def _has_hook_events(self, request: Request): - if not request.json_body.get("events"): + if not request.data.get("events"): return False - return "error" in request.json_body["events"] + return "error" in request.data["events"] diff --git a/src/sentry/sentry_apps/services/app/model.py b/src/sentry/sentry_apps/services/app/model.py index 20609618b50841..b9ac92bccc638b 100644 --- a/src/sentry/sentry_apps/services/app/model.py +++ b/src/sentry/sentry_apps/services/app/model.py @@ -18,8 +18,8 @@ class RpcApiApplication(RpcModel): id: int = -1 - client_id: str = "" - client_secret: str = "" + client_id: str = Field(repr=False, default="") + client_secret: str = Field(repr=False, default="") class RpcSentryAppService(RpcModel): diff --git a/src/sentry/sentry_apps/utils/errors.py b/src/sentry/sentry_apps/utils/errors.py new file mode 100644 index 00000000000000..af43d04c00b01d --- /dev/null +++ b/src/sentry/sentry_apps/utils/errors.py @@ -0,0 +1,35 @@ +from enum import Enum + + +class SentryAppErrorType(Enum): + CLIENT = "client" + INTEGRATOR = "integrator" + SENTRY = "sentry" + + +# Represents a user/client error that occured during a Sentry App process +class SentryAppError(Exception): + error_type = SentryAppErrorType.CLIENT + status_code = 400 + + def __init__( + self, + error: Exception | None = None, + status_code: int | None = None, + ) -> None: + if status_code: + self.status_code = status_code + + +# Represents an error caused by a 3p integrator during a Sentry App process +class SentryAppIntegratorError(Exception): + error_type = SentryAppErrorType.INTEGRATOR + status_code = 400 + + def __init__( + self, + error: Exception | None = None, + status_code: int | None = None, + ) -> None: + if status_code: + self.status_code = status_code diff --git a/src/sentry/sentry_metrics/configuration.py b/src/sentry/sentry_metrics/configuration.py index 0f812f29362341..8c8b63752f26f5 100644 --- a/src/sentry/sentry_metrics/configuration.py +++ b/src/sentry/sentry_metrics/configuration.py @@ -27,8 +27,6 @@ class UseCaseKey(Enum): # backwards compatibility RELEASE_HEALTH_PG_NAMESPACE = "releasehealth" PERFORMANCE_PG_NAMESPACE = "performance" -RELEASE_HEALTH_CS_NAMESPACE = "releasehealth.cs" -PERFORMANCE_CS_NAMESPACE = "performance.cs" RELEASE_HEALTH_SCHEMA_VALIDATION_RULES_OPTION_NAME = ( "sentry-metrics.indexer.release-health.schema-validation-rules" @@ -172,23 +170,3 @@ def initialize_main_process_state(config: MetricsIngestConfiguration) -> None: global_tag_map = {"pipeline": config.internal_metrics_tag or ""} add_global_tags(_all_threads=True, **global_tag_map) - - -HARD_CODED_UNITS = {"span.duration": "millisecond"} -ALLOWED_TYPES = {"c", "d", "s", "g"} - -# METRICS_AGGREGATES specifies the aggregates that are available for a metric type - AGGREGATES_TO_METRICS reverses this, -# and provides a map from the aggregate to the metric type in the form {'count': 'c', 'avg':'g', ...}. This is needed -# when the UI lets the user select the aggregate, and the backend infers the metric_type from it. It is programmatic -# and not hard-coded, so that in case of a change, the two mappings are aligned. -METRIC_TYPE_TO_AGGREGATE = { - "c": ["count"], - "g": ["avg", "min", "max", "sum"], - "d": ["p50", "p75", "p90", "p95", "p99"], - "s": ["count_unique"], -} -AGGREGATE_TO_METRIC_TYPE = { - aggregate: metric_type - for metric_type, aggregate_list in METRIC_TYPE_TO_AGGREGATE.items() - for aggregate in aggregate_list -} diff --git a/src/sentry/sentry_metrics/consumers/indexer/batch.py b/src/sentry/sentry_metrics/consumers/indexer/batch.py index ef52a8ffe1b44b..707a0a643ab291 100644 --- a/src/sentry/sentry_metrics/consumers/indexer/batch.py +++ b/src/sentry/sentry_metrics/consumers/indexer/batch.py @@ -1,7 +1,7 @@ import logging import random from collections import defaultdict -from collections.abc import Callable, Iterable, Mapping, MutableMapping, MutableSequence, Sequence +from collections.abc import Callable, Iterable, Mapping, MutableMapping, MutableSequence from dataclasses import dataclass from typing import Any, cast @@ -248,27 +248,6 @@ def _validate_message(self, parsed_payload: ParsedMessage) -> None: ) raise ValueError(f"Invalid metric tags: {tags}") - @metrics.wraps("process_messages.filter_messages") - def filter_messages(self, keys_to_remove: Sequence[BrokerMeta]) -> None: - # XXX: it is useful to be able to get a sample of organization ids that are affected by rate limits, but this is really slow. - for broker_meta in keys_to_remove: - if _should_sample_debug_log(): - sentry_sdk.set_tag( - "sentry_metrics.organization_id", - self.parsed_payloads_by_meta[broker_meta]["org_id"], - ) - sentry_sdk.set_tag( - "sentry_metrics.metric_name", self.parsed_payloads_by_meta[broker_meta]["name"] - ) - logger.error( - "process_messages.dropped_message", - extra={ - "reason": "cardinality_limit", - }, - ) - - self.filtered_msg_meta.update(keys_to_remove) - @metrics.wraps("process_messages.extract_strings") def extract_strings(self) -> Mapping[UseCaseID, Mapping[OrgId, set[str]]]: strings: Mapping[UseCaseID, Mapping[OrgId, set[str]]] = defaultdict( diff --git a/src/sentry/sentry_metrics/consumers/indexer/common.py b/src/sentry/sentry_metrics/consumers/indexer/common.py index 9b0a18c8281fb9..54fbfdf066520d 100644 --- a/src/sentry/sentry_metrics/consumers/indexer/common.py +++ b/src/sentry/sentry_metrics/consumers/indexer/common.py @@ -26,9 +26,6 @@ class BrokerMeta(NamedTuple): logger = logging.getLogger(__name__) -DEFAULT_QUEUED_MAX_MESSAGE_KBYTES = 50000 -DEFAULT_QUEUED_MIN_MESSAGES = 100000 - @dataclass(frozen=True) class IndexerOutputMessageBatch: diff --git a/src/sentry/sentry_metrics/extraction_rules.py b/src/sentry/sentry_metrics/extraction_rules.py deleted file mode 100644 index 38246de5e50dc4..00000000000000 --- a/src/sentry/sentry_metrics/extraction_rules.py +++ /dev/null @@ -1,80 +0,0 @@ -from __future__ import annotations - -from collections.abc import Mapping -from dataclasses import dataclass -from typing import Any - -from sentry.sentry_metrics.configuration import ( - AGGREGATE_TO_METRIC_TYPE, - ALLOWED_TYPES, - HARD_CODED_UNITS, -) -from sentry.sentry_metrics.use_case_utils import string_to_use_case_id - -METRICS_EXTRACTION_RULES_OPTION_KEY = "sentry:metrics_extraction_rules" -SPAN_ATTRIBUTE_PREFIX = "span_attribute_" - - -class MetricsExtractionRuleValidationError(ValueError): - pass - - -@dataclass -class MetricsExtractionRule: - def __init__( - self, - span_attribute: str, - type: str, - unit: str, - tags: set[str], - condition: str, - id: int, - ): - self.span_attribute = self.validate_span_attribute(span_attribute) - self.type = self.validate_type(type) - self.unit = HARD_CODED_UNITS.get(span_attribute, unit) - self.tags = set(tags) - self.condition = condition - self.id = id - - def validate_span_attribute(self, span_attribute: str) -> str: - if not isinstance(span_attribute, str): - raise ValueError("The span attribute must be of type string.") - return span_attribute - - def validate_type(self, type_value: str) -> str: - if not isinstance(type_value, str): - raise ValueError("The type must be of type string.") - - if type_value not in ALLOWED_TYPES: - raise ValueError( - "Type can only have the following values: 'c' for counter, 'd' for distribution, 'g' for gauge, or 's' for set." - ) - return type_value - - @classmethod - def infer_types(self, aggregates: set[str]) -> set[str]: - types: set[str] = set() - for aggregate in aggregates: - if new_type := AGGREGATE_TO_METRIC_TYPE.get(aggregate): - types.add(new_type) - - return types - - def to_dict(self) -> Mapping[str, Any]: - return { - "spanAttribute": self.span_attribute, - "type": self.type, - "unit": self.unit, - "tags": self.tags, - "condition": self.condition, - "id": self.id, - } - - def generate_mri(self, use_case: str = "custom"): - """Generate the Metric Resource Identifier (MRI) associated with the extraction rule.""" - use_case_id = string_to_use_case_id(use_case) - return f"{self.type}:{use_case_id.value}/{SPAN_ATTRIBUTE_PREFIX}{self.id}@none" - - def __hash__(self): - return hash(self.generate_mri()) diff --git a/src/sentry/sentry_metrics/indexer/id_generator.py b/src/sentry/sentry_metrics/indexer/id_generator.py deleted file mode 100644 index db0514468d14d9..00000000000000 --- a/src/sentry/sentry_metrics/indexer/id_generator.py +++ /dev/null @@ -1,54 +0,0 @@ -import random -import time - -_VERSION_BITS = 4 -_TS_BITS = 32 -_RANDOM_BITS = 28 -_TOTAL_BITS = _VERSION_BITS + _TS_BITS + _RANDOM_BITS -assert _TOTAL_BITS == 64 - -_VERSION = 2 - -# Warning! The version must be an even number as this is already -# written to a BigInt field in Postgres -assert _VERSION % 2 == 0 - -# 1st January 2022 -_INDEXER_EPOCH_START = 1641024000 - - -def reverse_bits(number: int, bit_size: int) -> int: - return int(bin(number)[2:].zfill(bit_size)[::-1], 2) - - -# we will have room b/n version and time since for a while -# so let's reverse the version bits to grow to the right -# instead of left should we need more than 3 bits for version - -_VERSION_PREFIX = reverse_bits(_VERSION, _VERSION_BITS) - - -def get_id() -> int: - """ - Generates IDs for use by indexer storages that do not have autoincrement sequences. - - This function does not provide any guarantee of uniqueness, just a low probability of collisions. - It relies on the database to be strongly consistent and reject writes with duplicate IDs. These should - be retried with a newly generated ID. - - The ID generated is in roughly incrementing order. - - Metric IDs are 64 bit but this function only generates IDs that fit in 63 bits. The leading bit is always zero. - This is because they were stored in Postgres as BigInt (signed 64 bit) and we do not want to change that now. - In ClickHouse it is an unsigned 64 bit integer. - """ - - now = int(time.time()) - time_since_epoch = now - _INDEXER_EPOCH_START - rand = random.getrandbits(_RANDOM_BITS) - - id = _VERSION_PREFIX << (_TOTAL_BITS - _VERSION_BITS) - id |= time_since_epoch << (_TOTAL_BITS - _VERSION_BITS - _TS_BITS) - id |= rand - - return id diff --git a/src/sentry/sentry_metrics/indexer/postgres/models.py b/src/sentry/sentry_metrics/indexer/postgres/models.py index 7575951f74c3a5..142af00288e919 100644 --- a/src/sentry/sentry_metrics/indexer/postgres/models.py +++ b/src/sentry/sentry_metrics/indexer/postgres/models.py @@ -1,8 +1,8 @@ import logging -from typing import Any, ClassVar, Self +from typing import ClassVar, Self from django.conf import settings -from django.db import connections, models, router +from django.db import models from django.utils import timezone from sentry.backup.scopes import RelocationScope @@ -16,35 +16,6 @@ from collections.abc import Mapping -@region_silo_model -class MetricsKeyIndexer(Model): - __relocation_scope__ = RelocationScope.Excluded - - string = models.CharField(max_length=200) - date_added = models.DateTimeField(default=timezone.now) - - objects: ClassVar[BaseManager[Self]] = BaseManager( - cache_fields=("pk", "string"), cache_ttl=settings.SENTRY_METRICS_INDEXER_CACHE_TTL - ) - - class Meta: - db_table = "sentry_metricskeyindexer" - app_label = "sentry" - constraints = [ - models.UniqueConstraint(fields=["string"], name="unique_string"), - ] - - @classmethod - def get_next_values(cls, num: int) -> Any: - using = router.db_for_write(cls) - connection = connections[using].cursor() - - connection.execute( - "SELECT nextval('sentry_metricskeyindexer_id_seq') from generate_series(1,%s)", [num] - ) - return connection.fetchall() - - class BaseIndexer(Model): string = models.CharField(max_length=MAX_INDEXED_COLUMN_LENGTH) organization_id = BoundedBigIntegerField() diff --git a/src/sentry/sentry_metrics/indexer/postgres/postgres_v2.py b/src/sentry/sentry_metrics/indexer/postgres/postgres_v2.py index 2eaac04915ff6c..8155a49ef506b0 100644 --- a/src/sentry/sentry_metrics/indexer/postgres/postgres_v2.py +++ b/src/sentry/sentry_metrics/indexer/postgres/postgres_v2.py @@ -31,7 +31,6 @@ __all__ = ["PostgresIndexer"] -_INDEXER_CACHE_METRIC = "sentry_metrics.indexer.memcache" _INDEXER_DB_METRIC = "sentry_metrics.indexer.postgres" _PARTITION_KEY = "pg" diff --git a/src/sentry/sentry_metrics/querying/constants.py b/src/sentry/sentry_metrics/querying/constants.py index 64ed5b42be3f9a..508202c3262d84 100644 --- a/src/sentry/sentry_metrics/querying/constants.py +++ b/src/sentry/sentry_metrics/querying/constants.py @@ -2,17 +2,6 @@ # Snuba can return at most 10.000 rows. SNUBA_QUERY_LIMIT = 10000 -# Intervals in seconds which are used by the product to query data. -DEFAULT_QUERY_INTERVALS = [ - 60 * 60 * 24, # 1 day - 60 * 60 * 12, # 12 hours - 60 * 60 * 4, # 4 hours - 60 * 60 * 2, # 2 hours - 60 * 60, # 1 hour - 60 * 30, # 30 min - 60 * 5, # 5 min - 60, # 1 min -] # Operators in formulas that use coefficients. COEFFICIENT_OPERATORS = { ArithmeticOperator.DIVIDE.value, diff --git a/src/sentry/sentry_metrics/querying/data/mapping/base.py b/src/sentry/sentry_metrics/querying/data/mapping/base.py index e241dd0229a571..b67384654bf093 100644 --- a/src/sentry/sentry_metrics/querying/data/mapping/base.py +++ b/src/sentry/sentry_metrics/querying/data/mapping/base.py @@ -1,6 +1,6 @@ import abc from collections.abc import Sequence -from typing import Any, TypeVar +from typing import Any from sentry.models.project import Project @@ -26,9 +26,6 @@ def backward(self, projects: Sequence[Project], value: Any) -> Any: return value -TMapper = TypeVar("TMapper", bound=Mapper) - - class MapperConfig: def __init__(self): self.mappers: set[type[Mapper]] = set() diff --git a/src/sentry/sentry_metrics/querying/data/transformation/stats.py b/src/sentry/sentry_metrics/querying/data/transformation/stats.py deleted file mode 100644 index 45cc44c37f4147..00000000000000 --- a/src/sentry/sentry_metrics/querying/data/transformation/stats.py +++ /dev/null @@ -1,48 +0,0 @@ -from collections.abc import Mapping, Sequence -from dataclasses import dataclass -from typing import Any - -from sentry.sentry_metrics.querying.data.execution import QueryResult -from sentry.sentry_metrics.querying.data.transformation.base import QueryResultsTransformer -from sentry.utils.outcomes import Outcome - - -@dataclass(frozen=True) -class MetricsOutcomesResult: - series: Sequence[Mapping[str, Any]] - totals: Sequence[Mapping[str, Any]] - - -class MetricsOutcomesTransformer(QueryResultsTransformer[Mapping[str, Any]]): - def transform_result(self, result: Sequence[Mapping[str, Any]]) -> Sequence[Mapping[str, Any]]: - ret_val = [] - - for item in result: - ret_val_item = {} - for key in item: - if key == "outcome.id": - outcome = int(item[key]) - ret_val_item["outcome"] = Outcome(outcome).api_name() - elif key in "aggregate_value": - ret_val_item["quantity"] = item[key] - else: - ret_val_item[key] = item[key] - - ret_val.append(ret_val_item) - - return ret_val - - def transform(self, query_results: Sequence[QueryResult]) -> Mapping[str, Any]: - """ - Transforms the query results into the format returned by outcomes queries. - Performs necessary mappings to match that format such as outcome.id -> outcome - - """ - - if not query_results or len(query_results) == 0: - return {"series": [], "totals": []} - - series = self.transform_result(query_results[0].series) - totals = self.transform_result(query_results[0].totals) - - return {"series": series, "totals": totals} diff --git a/src/sentry/sentry_metrics/querying/errors.py b/src/sentry/sentry_metrics/querying/errors.py index 56ef0a2d916afd..365e2f628e7e84 100644 --- a/src/sentry/sentry_metrics/querying/errors.py +++ b/src/sentry/sentry_metrics/querying/errors.py @@ -8,7 +8,3 @@ class MetricsQueryExecutionError(Exception): class LatestReleaseNotFoundError(Exception): pass - - -class CorrelationsQueryExecutionError(Exception): - pass diff --git a/src/sentry/sentry_metrics/querying/metadata/metrics.py b/src/sentry/sentry_metrics/querying/metadata/metrics.py index e09e828ca2572a..09ba8d840968ca 100644 --- a/src/sentry/sentry_metrics/querying/metadata/metrics.py +++ b/src/sentry/sentry_metrics/querying/metadata/metrics.py @@ -18,13 +18,7 @@ from sentry.snuba.metrics import parse_mri from sentry.snuba.metrics.datasource import get_metrics_blocking_state_of_projects from sentry.snuba.metrics.naming_layer.mri import ParsedMRI, get_available_operations -from sentry.snuba.metrics.utils import ( - BlockedMetric, - MetricMeta, - MetricOperationType, - MetricType, - MetricUnit, -) +from sentry.snuba.metrics.utils import BlockedMetric, MetricMeta, MetricType, MetricUnit from sentry.snuba.metrics_layer.query import fetch_metric_mris @@ -152,7 +146,7 @@ def _build_metric_meta( name=parsed_mri.name, unit=cast(MetricUnit, parsed_mri.unit), mri=parsed_mri.mri_string, - operations=cast(Sequence[MetricOperationType], available_operations), + operations=available_operations, projectIds=project_ids, blockingStatus=blocking_status, ) diff --git a/src/sentry/sentry_metrics/querying/metadata/utils.py b/src/sentry/sentry_metrics/querying/metadata/utils.py index 88d00b58c5623e..2a3e5520a374e4 100644 --- a/src/sentry/sentry_metrics/querying/metadata/utils.py +++ b/src/sentry/sentry_metrics/querying/metadata/utils.py @@ -1,7 +1,10 @@ +from __future__ import annotations + from sentry.snuba.metrics import get_mri from sentry.snuba.metrics.naming_layer.mri import is_mri +from sentry.snuba.metrics.utils import MetricOperationType -METRICS_API_HIDDEN_OPERATIONS = { +METRICS_API_HIDDEN_OPERATIONS: dict[str, list[MetricOperationType]] = { "sentry:metrics_activate_percentiles": [ "p50", "p75", @@ -12,20 +15,21 @@ "sentry:metrics_activate_last_for_gauges": ["last"], } -NON_QUERYABLE_METRIC_OPERATIONS = ["histogram", "min_timestamp", "max_timestamp"] +NON_QUERYABLE_METRIC_OPERATIONS: list[MetricOperationType] = [ + "histogram", + "min_timestamp", + "max_timestamp", +] class OperationsConfiguration: def __init__(self): self.hidden_operations = set() - def hide_operation(self, operation: str) -> None: - self.hidden_operations.add(operation) - - def hide_operations(self, operations: list[str]) -> None: + def hide_operations(self, operations: list[MetricOperationType]) -> None: self.hidden_operations.update(operations) - def get_hidden_operations(self): + def get_hidden_operations(self) -> list[MetricOperationType]: return list(self.hidden_operations) diff --git a/src/sentry/sentry_metrics/querying/utils.py b/src/sentry/sentry_metrics/querying/utils.py index 527cf26721bef8..e5304d44513634 100644 --- a/src/sentry/sentry_metrics/querying/utils.py +++ b/src/sentry/sentry_metrics/querying/utils.py @@ -1,5 +1,3 @@ -import re - from django.conf import settings from rediscluster import RedisCluster @@ -12,11 +10,3 @@ def get_redis_client_for_metrics_meta() -> RedisCluster: """ cluster_key = settings.SENTRY_METRIC_META_REDIS_CLUSTER return redis.redis_clusters.get(cluster_key) # type: ignore[return-value] - - -def remove_if_match(pattern, string: str) -> str: - """ - Removes a pattern from a string. - """ - # Use the re.sub function to replace the matched characters with an empty string - return re.sub(pattern, "", string) diff --git a/src/sentry/snuba/entity_subscription.py b/src/sentry/snuba/entity_subscription.py index 08c7319179d86f..96bb7b5fcb1b46 100644 --- a/src/sentry/snuba/entity_subscription.py +++ b/src/sentry/snuba/entity_subscription.py @@ -161,6 +161,7 @@ def build_rpc_request( environment: Environment | None, params: ParamsType | None = None, skip_field_validation_for_entity_subscription_deletion: bool = False, + referrer: str = Referrer.API_ALERTS_ALERT_RULE_CHART.value, ) -> TimeSeriesRequest: raise NotImplementedError @@ -293,6 +294,7 @@ def build_rpc_request( environment: Environment | None, params: ParamsType | None = None, skip_field_validation_for_entity_subscription_deletion: bool = False, + referrer: str = Referrer.API_ALERTS_ALERT_RULE_CHART.value, ) -> TimeSeriesRequest: if params is None: params = {} @@ -317,7 +319,7 @@ def build_rpc_request( query_string=query, y_axes=[self.aggregate], groupby=[], - referrer=Referrer.API_ALERTS_ALERT_RULE_CHART.value, + referrer=referrer, config=SearchResolverConfig(), granularity_secs=self.time_window, ) diff --git a/src/sentry/snuba/errors.py b/src/sentry/snuba/errors.py index 8466a1f40c2c78..96563bd52e6883 100644 --- a/src/sentry/snuba/errors.py +++ b/src/sentry/snuba/errors.py @@ -4,6 +4,7 @@ from typing import cast import sentry_sdk +from snuba_sdk import Column, Condition from sentry.discover.arithmetic import categorize_columns from sentry.exceptions import InvalidSearchQuery @@ -30,29 +31,31 @@ def query( - selected_columns, - query, - snuba_params, - equations=None, - orderby=None, - offset=None, - limit=50, - referrer=None, - auto_fields=False, - auto_aggregations=False, - include_equation_fields=False, - allow_metric_aggregates=False, - use_aggregate_conditions=False, - conditions=None, - functions_acl=None, - transform_alias_to_input_format=False, - sample=None, - has_metrics=False, - use_metrics_layer=False, - skip_tag_resolution=False, - on_demand_metrics_enabled=False, + selected_columns: list[str], + query: str, + snuba_params: SnubaParams, + equations: list[str] | None = None, + orderby: list[str] | None = None, + offset: int | None = None, + limit: int = 50, + referrer: str | None = None, + auto_fields: bool = False, + auto_aggregations: bool = False, + include_equation_fields: bool = False, + allow_metric_aggregates: bool = False, + use_aggregate_conditions: bool = False, + conditions: list[Condition] | None = None, + functions_acl: list[str] | None = None, + transform_alias_to_input_format: bool = False, + sample: float | None = None, + has_metrics: bool = False, + use_metrics_layer: bool = False, + skip_tag_resolution: bool = False, + extra_columns: list[Column] | None = None, + on_demand_metrics_enabled: bool = False, on_demand_metrics_type: MetricSpecType | None = None, - fallback_to_transactions=False, + dataset: Dataset = Dataset.Events, + fallback_to_transactions: bool = False, query_source: QuerySource | None = None, ) -> EventsResponse: if not selected_columns: diff --git a/src/sentry/snuba/metrics/datasource.py b/src/sentry/snuba/metrics/datasource.py index ea290073a10fb2..78729018fe3b8b 100644 --- a/src/sentry/snuba/metrics/datasource.py +++ b/src/sentry/snuba/metrics/datasource.py @@ -1,10 +1,3 @@ -from __future__ import annotations - -from functools import lru_cache - -import sentry_sdk -from rest_framework.exceptions import NotFound - """ Module that gets both metadata and time series from Snuba. For metadata, it fetch metrics metadata (metric names, tag names, tag values, ...) from snuba. @@ -13,12 +6,7 @@ efficient, we only look at the past 24 hours. """ -__all__ = ( - "get_all_tags", - "get_tag_values", - "get_series", - "get_single_metric_info", -) +from __future__ import annotations import logging from collections import defaultdict, deque @@ -29,14 +17,14 @@ from operator import itemgetter from typing import Any -from snuba_sdk import And, Column, Condition, Function, Op, Or, Query, Request +import sentry_sdk +from rest_framework.exceptions import NotFound +from snuba_sdk import Column, Condition, Function, Op, Query, Request from snuba_sdk.conditions import ConditionGroup from sentry.exceptions import InvalidParams from sentry.models.project import Project from sentry.sentry_metrics import indexer -from sentry.sentry_metrics.indexer.strings import PREFIX as SHARED_STRINGS_PREFIX -from sentry.sentry_metrics.indexer.strings import SHARED_STRINGS from sentry.sentry_metrics.use_case_id_registry import UseCaseID from sentry.sentry_metrics.utils import ( MetricIndexNotFound, @@ -49,7 +37,6 @@ from sentry.snuba.metrics.fields import run_metrics_query from sentry.snuba.metrics.fields.base import ( SnubaDataType, - build_metrics_query, get_derived_metrics, org_id_from_projects, ) @@ -80,7 +67,15 @@ get_intervals, to_intervals, ) -from sentry.utils.snuba import bulk_snuba_queries, raw_snql_query +from sentry.utils.snuba import raw_snql_query + +__all__ = ( + "get_all_tags", + "get_tag_values", + "get_series", + "get_single_metric_info", +) + logger = logging.getLogger(__name__) @@ -107,133 +102,6 @@ def _get_metrics_for_entity( ) -def _get_metrics_by_project_for_entity_query( - entity_key: EntityKey, - project_ids: Sequence[int], - org_id: int, - use_case_id: UseCaseID, - start: datetime | None = None, - end: datetime | None = None, -) -> Request: - where = [Condition(Column("use_case_id"), Op.EQ, use_case_id.value)] - where.extend(_get_mri_constraints_for_use_case(entity_key, use_case_id)) - - return build_metrics_query( - entity_key=entity_key, - select=[Column("project_id"), Column("metric_id")], - groupby=[Column("project_id"), Column("metric_id")], - where=where, - project_ids=project_ids, - org_id=org_id, - use_case_id=use_case_id, - start=start, - end=end, - ) - - -@lru_cache(maxsize=len(EntityKey) * len(UseCaseID)) -def _get_mri_constraints_for_use_case(entity_key: EntityKey, use_case_id: UseCaseID): - # Sessions exist on a different infrastructure that works differently, - # thus this optimization does not apply. - if use_case_id == UseCaseID.SESSIONS: - return [] - - conditions = [] - - # Look for the min/max of the metric id range for the given use case id to - # constrain the search ClickHouse must do otherwise, it'll attempt a full scan. - # - # This assumes that metric ids are divided into non-overlapping ranges by the - # use case id, so we can focus on a particular range for better performance. - min_metric_id = SHARED_STRINGS_PREFIX << 1 # larger than possible metric ids - max_metric_id = 0 - - for mri, id in SHARED_STRINGS.items(): - parsed_mri = parse_mri(mri) - if parsed_mri is not None and parsed_mri.namespace == use_case_id.value: - min_metric_id = min(id, min_metric_id) - max_metric_id = max(id, max_metric_id) - - # It's possible that there's a metric id within the use case that is not - # hard coded so we should always check the range of custom metric ids. - condition = Condition(Column("metric_id"), Op.LT, SHARED_STRINGS_PREFIX) - - # If we find a valid range, we extend the condition to check it as well. - if min_metric_id <= max_metric_id: - condition = Or( - [ - condition, - # Expand the search to include the range of the hard coded - # metric ids if a valid range was found. - And( - [ - Condition(Column("metric_id"), Op.GTE, min_metric_id), - Condition(Column("metric_id"), Op.LTE, max_metric_id), - ] - ), - ] - ) - - conditions.append(condition) - - # This is added to every use case id because the MRI is the primary ORDER BY - # on the table, and without it, these granules will be scanned no matter what - # the use case id is. - excluded_mris = [] - - if use_case_id == UseCaseID.TRANSACTIONS: - # This on_demand MRI takes up the majority of dataset and makes the query slow - # because ClickHouse ends up scanning the whole table. - # - # These are used for on demand metrics extraction and end users should not - # need to know about these metrics. - # - # As an optimization, we explicitly exclude these MRIs in the query to allow - # Clickhouse to skip the granules containing strictly these MRIs. - if entity_key == EntityKey.GenericMetricsCounters: - excluded_mris.append("c:transactions/on_demand@none") - elif entity_key == EntityKey.GenericMetricsDistributions: - excluded_mris.append("d:transactions/on_demand@none") - elif entity_key == EntityKey.GenericMetricsSets: - excluded_mris.append("s:transactions/on_demand@none") - elif entity_key == EntityKey.GenericMetricsGauges: - excluded_mris.append("g:transactions/on_demand@none") - - if excluded_mris: - conditions.append( - Condition( - Column("metric_id"), - Op.NOT_IN, - # these are shared strings, so just using org id 0 as a placeholder - [indexer.resolve(use_case_id, 0, mri) for mri in excluded_mris], - ) - ) - - return conditions - - -def _get_metrics_by_project_for_entity( - entity_key: EntityKey, - project_ids: Sequence[int], - org_id: int, - use_case_id: UseCaseID, - start: datetime | None = None, - end: datetime | None = None, -) -> list[SnubaDataType]: - return run_metrics_query( - entity_key=entity_key, - select=[Column("project_id"), Column("metric_id")], - groupby=[Column("project_id"), Column("metric_id")], - where=[Condition(Column("use_case_id"), Op.EQ, use_case_id.value)], - referrer="snuba.metrics.get_metrics_names_for_entity", - project_ids=project_ids, - org_id=org_id, - use_case_id=use_case_id, - start=start, - end=end, - ) - - def get_available_derived_metrics( projects: Sequence[Project], supported_metric_ids_in_entities: dict[MetricType, Sequence[int]], @@ -300,68 +168,6 @@ def get_metrics_blocking_state_of_projects( return metrics_blocking_state_by_mri -def get_stored_metrics_of_projects( - projects: Sequence[Project], - use_case_ids: Sequence[UseCaseID], - start: datetime | None = None, - end: datetime | None = None, -) -> Mapping[str, Sequence[int]]: - org_id = projects[0].organization_id - project_ids = [project.id for project in projects] - - # We compute a list of all the queries that we want to run in parallel across entities and use cases. - requests = [] - use_case_id_to_index = defaultdict(list) - for use_case_id in use_case_ids: - entity_keys = get_entity_keys_of_use_case_id(use_case_id=use_case_id) - for entity_key in entity_keys or (): - requests.append( - _get_metrics_by_project_for_entity_query( - entity_key=entity_key, - project_ids=project_ids, - org_id=org_id, - use_case_id=use_case_id, - start=start, - end=end, - ) - ) - use_case_id_to_index[use_case_id].append(len(requests) - 1) - - # We run the queries all in parallel. - results = bulk_snuba_queries( - requests=requests, - referrer="snuba.metrics.datasource.get_stored_metrics_of_projects", - use_cache=True, - ) - - # We reverse resolve all the metric ids by bulking together all the resolutions of the same use case id to maximize - # the parallelism. - resolved_metric_ids = defaultdict(dict) - for use_case_id, results_indexes in use_case_id_to_index.items(): - metrics_ids = [] - for result_index in results_indexes: - data = results[result_index]["data"] - for row in data or (): - metrics_ids.append(row["metric_id"]) - - # We have to partition the resolved metric ids per use case id, since the indexer values might clash across - # use cases. - resolved_metric_ids[use_case_id].update( - bulk_reverse_resolve(use_case_id, org_id, [metric_id for metric_id in metrics_ids]) - ) - - # We iterate over each result and compute a map of `metric_id -> project_id`. - grouped_stored_metrics = defaultdict(list) - for use_case_id, results_indexes in use_case_id_to_index.items(): - for result_index in results_indexes: - data = results[result_index]["data"] - for row in data or (): - resolved_metric_id = resolved_metric_ids[use_case_id][row["metric_id"]] - grouped_stored_metrics[resolved_metric_id].append(row["project_id"]) - - return grouped_stored_metrics - - def get_custom_measurements( project_ids: Sequence[int], organization_id: int, diff --git a/src/sentry/snuba/metrics/extraction.py b/src/sentry/snuba/metrics/extraction.py index 99b0fc625cf705..a838ca7cc8a6a9 100644 --- a/src/sentry/snuba/metrics/extraction.py +++ b/src/sentry/snuba/metrics/extraction.py @@ -313,8 +313,6 @@ def get_default_spec_version(cls: Any) -> SpecVersion: "event.type=transaction", ] -Variables = dict[str, Any] - query_builder = UnresolvedQuery( dataset=Dataset.Transactions, params={} ) # Workaround to get all updated discover functions instead of using the deprecated events fields. @@ -1326,11 +1324,6 @@ def condition(self) -> RuleCondition | None: is extracted.""" return self._process_query() - def is_project_dependent(self) -> bool: - """Returns whether the spec is unique to a project, which is required for some forms of caching""" - tags_specs_generator = _ONDEMAND_OP_TO_PROJECT_SPEC_GENERATOR.get(self.op) - return tags_specs_generator is not None - def tags_conditions(self, project: Project) -> list[TagSpec]: """Returns a list of tag conditions that will specify how tags are injected into metrics by Relay, and a bool if those specs may be project specific.""" tags_specs_generator = _ONDEMAND_OP_TO_SPEC_GENERATOR.get(self.op) diff --git a/src/sentry/snuba/metrics/fields/snql.py b/src/sentry/snuba/metrics/fields/snql.py index c73af996b7a1ac..85925024144292 100644 --- a/src/sentry/snuba/metrics/fields/snql.py +++ b/src/sentry/snuba/metrics/fields/snql.py @@ -225,14 +225,6 @@ def _snql_on_tx_satisfaction_factory( return _snql_on_tx_satisfaction_factory -def _dist_count_aggregation_on_tx_satisfaction_factory( - org_id: int, satisfaction: str, metric_ids: Sequence[int], alias: str | None = None -) -> Function: - return _aggregation_on_tx_satisfaction_func_factory("countIf")( - org_id, satisfaction, metric_ids, alias - ) - - def _set_count_aggregation_on_tx_satisfaction_factory( org_id: int, satisfaction: str, metric_ids: Sequence[int], alias: str | None = None ) -> Function: @@ -783,16 +775,12 @@ def team_key_transaction_snql( def _resolve_project_threshold_config(project_ids: Sequence[int], org_id: int) -> SelectType: + use_case_id = UseCaseID.TRANSACTIONS return resolve_project_threshold_config( - tag_value_resolver=lambda use_case_id, org_id, value: resolve_tag_value( - use_case_id, org_id, value - ), - column_name_resolver=lambda use_case_id, org_id, value: resolve_tag_key( - use_case_id, org_id, value - ), + tag_value_resolver=lambda org_id, value: resolve_tag_value(use_case_id, org_id, value), + column_name_resolver=lambda org_id, value: resolve_tag_key(use_case_id, org_id, value), project_ids=project_ids, org_id=org_id, - use_case_id=UseCaseID.TRANSACTIONS, ) diff --git a/src/sentry/snuba/metrics/naming_layer/mapping.py b/src/sentry/snuba/metrics/naming_layer/mapping.py index ae3cb23455d651..5e3cbf86cd0155 100644 --- a/src/sentry/snuba/metrics/naming_layer/mapping.py +++ b/src/sentry/snuba/metrics/naming_layer/mapping.py @@ -92,12 +92,6 @@ def get_public_name_from_mri(internal_name: TransactionMRI | SessionMRI | str) - return internal_name -def is_private_mri(internal_name: TransactionMRI | SessionMRI | str) -> bool: - public_name = get_public_name_from_mri(internal_name) - # If the public name is the same as internal name it means that the internal is "private". - return public_name == internal_name - - def _extract_name_from_custom_metric_mri(mri: str) -> str | None: parsed_mri = parse_mri(mri) if parsed_mri is None: diff --git a/src/sentry/snuba/metrics/naming_layer/mri.py b/src/sentry/snuba/metrics/naming_layer/mri.py index 49d7070d882361..284a1e34d3daa3 100644 --- a/src/sentry/snuba/metrics/naming_layer/mri.py +++ b/src/sentry/snuba/metrics/naming_layer/mri.py @@ -33,7 +33,6 @@ ) import re -from collections.abc import Sequence from dataclasses import dataclass from enum import Enum from typing import cast @@ -42,12 +41,12 @@ from sentry.exceptions import InvalidParams from sentry.sentry_metrics.use_case_id_registry import UseCaseID -from sentry.snuba.dataset import EntityKey from sentry.snuba.metrics.units import format_value_using_unit_and_op from sentry.snuba.metrics.utils import ( AVAILABLE_GENERIC_OPERATIONS, AVAILABLE_OPERATIONS, OP_REGEX, + MetricEntity, MetricOperationType, MetricUnit, ) @@ -335,28 +334,27 @@ def is_custom_measurement(parsed_mri: ParsedMRI) -> bool: ) -def get_entity_key_from_entity_type(entity_type: str, generic_metrics: bool) -> EntityKey: - entity_name_suffixes = { - "c": "counters", - "s": "sets", - "d": "distributions", - "g": "gauges", - } - - if generic_metrics: - return EntityKey(f"generic_metrics_{entity_name_suffixes[entity_type]}") - else: - return EntityKey(f"metrics_{entity_name_suffixes[entity_type]}") +_ENTITY_KEY_MAPPING_GENERIC: dict[str, MetricEntity] = { + "c": "generic_metrics_counters", + "s": "generic_metrics_sets", + "d": "generic_metrics_distributions", + "g": "generic_metrics_gauges", +} +_ENTITY_KEY_MAPPING_NON_GENERIC: dict[str, MetricEntity] = { + "c": "metrics_counters", + "s": "metrics_sets", + "d": "metrics_distributions", +} -def get_available_operations(parsed_mri: ParsedMRI) -> Sequence[str]: +def get_available_operations(parsed_mri: ParsedMRI) -> list[MetricOperationType]: if parsed_mri.entity == "e": return [] elif parsed_mri.namespace == "sessions": - entity_key = get_entity_key_from_entity_type(parsed_mri.entity, False).value + entity_key = _ENTITY_KEY_MAPPING_NON_GENERIC[parsed_mri.entity] return AVAILABLE_OPERATIONS[entity_key] else: - entity_key = get_entity_key_from_entity_type(parsed_mri.entity, True).value + entity_key = _ENTITY_KEY_MAPPING_GENERIC[parsed_mri.entity] return AVAILABLE_GENERIC_OPERATIONS[entity_key] diff --git a/src/sentry/snuba/metrics/query_builder.py b/src/sentry/snuba/metrics/query_builder.py index eac1874aa33497..b36ab5e349e5e4 100644 --- a/src/sentry/snuba/metrics/query_builder.py +++ b/src/sentry/snuba/metrics/query_builder.py @@ -45,7 +45,6 @@ resolve_tag_key, resolve_tag_value, resolve_weak, - reverse_resolve, reverse_resolve_tag_value, ) from sentry.snuba.dataset import Dataset @@ -615,11 +614,6 @@ def get_date_range(params: Mapping) -> tuple[datetime, datetime, int]: return start, end, interval -def parse_tag(use_case_id: UseCaseID, org_id: int, tag_string: str) -> str: - tag_key = int(tag_string.replace("tags_raw[", "").replace("tags[", "").replace("]", "")) - return reverse_resolve(use_case_id, org_id, tag_key) - - def get_metric_object_from_metric_field( metric_field: MetricField, ) -> MetricExpressionBase: diff --git a/src/sentry/snuba/metrics/utils.py b/src/sentry/snuba/metrics/utils.py index cef92af6e666c6..df12ad96eaf7cb 100644 --- a/src/sentry/snuba/metrics/utils.py +++ b/src/sentry/snuba/metrics/utils.py @@ -4,7 +4,7 @@ from abc import ABC from collections.abc import Collection, Generator, Mapping, Sequence from datetime import datetime, timedelta, timezone -from typing import Literal, TypedDict, overload +from typing import Literal, NotRequired, TypedDict, overload from sentry.sentry_metrics.use_case_id_registry import UseCaseID from sentry.snuba.dataset import EntityKey @@ -141,7 +141,7 @@ "generic_metrics_gauges", ] -OP_TO_SNUBA_FUNCTION = { +OP_TO_SNUBA_FUNCTION: dict[MetricEntity, dict[MetricOperationType, str]] = { "metrics_counters": { "sum": "sumIf", "min_timestamp": "minIf", @@ -169,7 +169,7 @@ "max_timestamp": "maxIf", }, } -GENERIC_OP_TO_SNUBA_FUNCTION = { +GENERIC_OP_TO_SNUBA_FUNCTION: dict[MetricEntity, dict[MetricOperationType, str]] = { "generic_metrics_counters": OP_TO_SNUBA_FUNCTION["metrics_counters"], "generic_metrics_distributions": OP_TO_SNUBA_FUNCTION["metrics_distributions"], "generic_metrics_sets": OP_TO_SNUBA_FUNCTION["metrics_sets"], @@ -275,9 +275,6 @@ def generate_operation_regex(): OPERATIONS_TO_ENTITY = { op: entity for entity, operations in AVAILABLE_OPERATIONS.items() for op in operations } -GENERIC_OPERATIONS_TO_ENTITY = { - op: entity for entity, operations in AVAILABLE_GENERIC_OPERATIONS.items() for op in operations -} METRIC_TYPE_TO_ENTITY: Mapping[MetricType, EntityKey] = { "counter": EntityKey.MetricsCounters, @@ -342,6 +339,7 @@ class MetricMeta(TypedDict): type: MetricType operations: Collection[MetricOperationType] unit: MetricUnit | None + metric_id: NotRequired[int] mri: str projectIds: Sequence[int] blockingStatus: Sequence[BlockedMetric] | None diff --git a/src/sentry/snuba/metrics_enhanced_performance.py b/src/sentry/snuba/metrics_enhanced_performance.py index 2273781467e894..0fcdd654a2f19c 100644 --- a/src/sentry/snuba/metrics_enhanced_performance.py +++ b/src/sentry/snuba/metrics_enhanced_performance.py @@ -19,6 +19,7 @@ from sentry.snuba.metrics_performance import timeseries_query as metrics_timeseries_query from sentry.snuba.metrics_performance import top_events_timeseries as metrics_top_events_timeseries from sentry.snuba.query_sources import QuerySource +from sentry.snuba.types import DatasetQuery from sentry.utils.snuba import SnubaTSResult @@ -48,7 +49,7 @@ def query( on_demand_metrics_type: MetricSpecType | None = None, fallback_to_transactions: bool = False, query_source: QuerySource | None = None, -): +) -> EventsResponse: metrics_compatible = not equations dataset_reason = discover.DEFAULT_DATASET_REASON @@ -57,22 +58,22 @@ def query( result = metrics_query( selected_columns, query, - snuba_params, - equations, - orderby, - offset, - limit, - referrer, - auto_fields, - auto_aggregations, - use_aggregate_conditions, - allow_metric_aggregates, - conditions, - functions_acl, - transform_alias_to_input_format, - has_metrics, - use_metrics_layer, - on_demand_metrics_enabled, + snuba_params=snuba_params, + equations=equations, + orderby=orderby, + offset=offset, + limit=limit, + referrer=referrer, + auto_fields=auto_fields, + auto_aggregations=auto_aggregations, + use_aggregate_conditions=use_aggregate_conditions, + allow_metric_aggregates=allow_metric_aggregates, + conditions=conditions, + functions_acl=functions_acl, + transform_alias_to_input_format=transform_alias_to_input_format, + has_metrics=has_metrics, + use_metrics_layer=use_metrics_layer, + on_demand_metrics_enabled=on_demand_metrics_enabled, on_demand_metrics_type=on_demand_metrics_type, query_source=query_source, ) @@ -90,13 +91,13 @@ def query( # Either metrics failed, or this isn't a query we can enhance with metrics if not metrics_compatible: - dataset: types.ModuleType = discover + dataset_query: DatasetQuery = discover.query if fallback_to_transactions: - dataset = transactions + dataset_query = transactions.query sentry_sdk.set_tag("performance.dataset", "transactions") else: sentry_sdk.set_tag("performance.dataset", "discover") - results = dataset.query( + results = dataset_query( selected_columns, query, snuba_params=snuba_params, @@ -120,7 +121,10 @@ def query( return results - return {} + return { + "data": [], + "meta": {"fields": {}}, + } def timeseries_query( diff --git a/src/sentry/snuba/metrics_performance.py b/src/sentry/snuba/metrics_performance.py index 68e00c52bfccf8..2acd67232bc7bd 100644 --- a/src/sentry/snuba/metrics_performance.py +++ b/src/sentry/snuba/metrics_performance.py @@ -6,7 +6,7 @@ from typing import Any, Literal, overload import sentry_sdk -from snuba_sdk import Column +from snuba_sdk import Column, Condition from sentry.discover.arithmetic import categorize_columns from sentry.exceptions import IncompatibleMetricsQuery @@ -30,29 +30,32 @@ def query( - selected_columns, - query, - snuba_params=None, - equations=None, - orderby=None, - offset=None, - limit=50, - referrer=None, - auto_fields=False, - auto_aggregations=False, - use_aggregate_conditions=False, - allow_metric_aggregates=True, - conditions=None, - functions_acl=None, - transform_alias_to_input_format=False, + selected_columns: list[str], + query: str, + snuba_params: SnubaParams, + equations: list[str] | None = None, + orderby: list[str] | None = None, + offset: int | None = None, + limit: int = 50, + referrer: str | None = None, + auto_fields: bool = False, + auto_aggregations: bool = False, + include_equation_fields: bool = False, + allow_metric_aggregates: bool = True, + use_aggregate_conditions: bool = False, + conditions: list[Condition] | None = None, + functions_acl: list[str] | None = None, + transform_alias_to_input_format: bool = False, + sample: float | None = None, has_metrics: bool = True, use_metrics_layer: bool = False, + skip_tag_resolution: bool = False, on_demand_metrics_enabled: bool = False, on_demand_metrics_type: MetricSpecType | None = None, granularity: int | None = None, fallback_to_transactions=False, query_source: QuerySource | None = None, -): +) -> EventsResponse: with sentry_sdk.start_span(op="mep", name="MetricQueryBuilder"): metrics_query = MetricsQueryBuilder( dataset=Dataset.PerformanceMetrics, @@ -78,6 +81,8 @@ def query( on_demand_metrics_type=on_demand_metrics_type, ), ) + if referrer is None: + referrer = "" metrics_referrer = referrer + ".metrics-enhanced" results = metrics_query.run_query(referrer=metrics_referrer, query_source=query_source) with sentry_sdk.start_span(op="mep", name="query.transform_results"): diff --git a/src/sentry/snuba/models.py b/src/sentry/snuba/models.py index 75c84f93c39375..6fb83b3478d771 100644 --- a/src/sentry/snuba/models.py +++ b/src/sentry/snuba/models.py @@ -12,6 +12,7 @@ from sentry.backup.scopes import ImportScope, RelocationScope from sentry.db.models import FlexibleForeignKey, Model, region_silo_model from sentry.db.models.manager.base import BaseManager +from sentry.incidents.utils.types import DATA_SOURCE_SNUBA_QUERY_SUBSCRIPTION from sentry.models.team import Team from sentry.users.models.user import User from sentry.workflow_engine.registry import data_source_type_registry @@ -153,7 +154,7 @@ def write_relocation_import( return (subscription.pk, ImportKind.Inserted) -@data_source_type_registry.register("snuba_query_subscription") +@data_source_type_registry.register(DATA_SOURCE_SNUBA_QUERY_SUBSCRIPTION) class QuerySubscriptionDataSourceHandler(DataSourceTypeHandler[QuerySubscription]): @staticmethod def bulk_get_query_object( diff --git a/src/sentry/snuba/referrer.py b/src/sentry/snuba/referrer.py index 667d39232b72dc..3a740223d27aae 100644 --- a/src/sentry/snuba/referrer.py +++ b/src/sentry/snuba/referrer.py @@ -1,7 +1,7 @@ from __future__ import annotations import logging -from enum import Enum, unique +from enum import StrEnum, unique from sentry.utils import metrics @@ -9,7 +9,7 @@ @unique -class Referrer(Enum): +class Referrer(StrEnum): ALERTRULESERIALIZER_TEST_QUERY_PRIMARY = "alertruleserializer.test_query.primary" ALERTRULESERIALIZER_TEST_QUERY = "alertruleserializer.test_query" ANOMALY_DETECTION_HISTORICAL_DATA_QUERY = "anomaly_detection_historical_data_query" @@ -107,6 +107,7 @@ class Referrer(Enum): ) API_GROUP_HASHES_LEVELS_GET_LEVELS_OVERVIEW = "api.group_hashes_levels.get_levels_overview" API_GROUP_HASHES = "api.group-hashes" + API_INSIGHTS_USER_GEO_SUBREGION_SELECTOR = "api.insights.user-geo-subregion-selector" API_ISSUES_ISSUE_EVENTS = "api.issues.issue_events" API_ISSUES_RELATED_ISSUES = "api.issues.related_issues" API_METRICS_TOTALS = "api.metrics.totals" @@ -529,32 +530,32 @@ class Referrer(Enum): # Performance Cache Module API_PERFORMANCE_CACHE_LANDING_CACHE_THROUGHPUT_CHART = ( - "api.performance.cache.landing-cache-throughput-chart", + "api.performance.cache.landing-cache-throughput-chart" ) API_PERFORMANCE_CACHE_LANDING_CACHE_TRANSACTION_LIST = ( - "api.performance.cache.landing-cache-transaction-list", + "api.performance.cache.landing-cache-transaction-list" ) API_PERFORMANCE_CACHE_LANDING_CACHE_TRANSACTION_DURATION = ( - "api.performance.cache.landing-cache-transaction-duration", + "api.performance.cache.landing-cache-transaction-duration" ) API_PERFORMANCE_CACHE_SAMPLES_CACHE_METRICS_RIBBON = ( - "api.performance.cache.samples-cache-metrics-ribbon", + "api.performance.cache.samples-cache-metrics-ribbon" ) API_PERFORMANCE_CACHE_SAMPLES_CACHE_TRANSACTION_DURATION_CHART = ( - "api.performance.cache.samples-cache-transaction-duration-chart", + "api.performance.cache.samples-cache-transaction-duration-chart" ) API_PERFORMANCE_CACHE_SAMPLES_CACHE_TRANSACTION_DURATION = ( - "api.performance.cache.samples-cache-transaction-duration", + "api.performance.cache.samples-cache-transaction-duration" ) API_PERFORMANCE_CACHE_SAMPLES_CACHE_SPAN_SAMPLES = ( - "api.performance.cache.samples-cache-span-samples", + "api.performance.cache.samples-cache-span-samples" ) API_PERFORMANCE_CACHE_SAMPLES_CACHE_SPAN_SAMPLES_TRANSACTION_DURATION = ( - "api.performance.cache.samples-cache-span-samples-transaction-duration", + "api.performance.cache.samples-cache-span-samples-transaction-duration" ) API_PERFORMANCE_CACHE_SAMPLES_CACHE_HIT_MISS_CHART = ( - "api.performance.cache.samples-cache-hit-miss-chart", + "api.performance.cache.samples-cache-hit-miss-chart" ) # Performance Queues Module @@ -716,6 +717,8 @@ class Referrer(Enum): INCIDENTS_GET_INCIDENT_AGGREGATES_PRIMARY = "incidents.get_incident_aggregates.primary" INCIDENTS_GET_INCIDENT_AGGREGATES = "incidents.get_incident_aggregates" IS_ESCALATING_GROUP = "sentry.issues.escalating.is_escalating" + ISSUE_DETAILS_STREAMLINE_GRAPH = "issue_details.streamline_graph" + ISSUE_DETAILS_STREAMLINE_LIST = "issue_details.streamline_list" METRIC_EXTRACTION_CARDINALITY_CHECK = "metric_extraction.cardinality_check" OUTCOMES_TIMESERIES = "outcomes.timeseries" OUTCOMES_TOTALS = "outcomes.totals" diff --git a/src/sentry/snuba/sessions_v2.py b/src/sentry/snuba/sessions_v2.py index 184b3a479e96de..3d83dfa2a615f8 100644 --- a/src/sentry/snuba/sessions_v2.py +++ b/src/sentry/snuba/sessions_v2.py @@ -4,7 +4,7 @@ import logging import math from datetime import datetime, timedelta, timezone -from typing import Any +from typing import Any, NotRequired, Protocol, TypedDict from snuba_sdk import BooleanCondition, Column, Condition, Function, Limit, Op @@ -101,6 +101,11 @@ """ +class _Field(Protocol): + def extract_from_row(self, row, group) -> float | None: ... + def get_snuba_columns(self, raw_groupby) -> list[str]: ... + + class SessionsField: def get_snuba_columns(self, raw_groupby): if "session.status" in raw_groupby: @@ -188,7 +193,7 @@ def extract_from_row(self, row, group): return None -COLUMN_MAP = { +COLUMN_MAP: dict[str, _Field] = { "sum(session)": SessionsField(), "count_unique(user)": UsersField(), "avg(session.duration)": DurationAverageField(), @@ -201,6 +206,12 @@ def extract_from_row(self, row, group): } +class _GroupBy(Protocol): + def get_snuba_columns(self) -> list[str]: ... + def get_snuba_groupby(self) -> list[str]: ... + def get_keys_for_row(self, row) -> list[tuple[str, str]]: ... + + class SimpleGroupBy: def __init__(self, row_name: str, name: str | None = None): self.row_name = row_name @@ -229,7 +240,7 @@ def get_keys_for_row(self, row): # NOTE: in the future we might add new `user_agent` and `os` fields -GROUPBY_MAP = { +GROUPBY_MAP: dict[str, _GroupBy] = { "project": SimpleGroupBy("project_id", "project"), "environment": SimpleGroupBy("environment"), "release": SimpleGroupBy("release"), @@ -423,8 +434,8 @@ def get_constrained_date_range( max_points=MAX_POINTS, restrict_date_range=True, ) -> tuple[datetime, datetime, int]: - interval = parse_stats_period(params.get("interval", "1h")) - interval = int(3600 if interval is None else interval.total_seconds()) + interval_td = parse_stats_period(params.get("interval", "1h")) + interval = int(3600 if interval_td is None else interval_td.total_seconds()) smallest_interval, interval_str = allowed_resolution.value if interval % smallest_interval != 0 or interval < smallest_interval: @@ -569,7 +580,8 @@ def make_timeseries(rows, group): row[ts_col] = row[ts_col][:19] + "Z" rows.sort(key=lambda row: row[ts_col]) - fields = [(name, field, list()) for name, field in query.fields.items()] + fields: list[tuple[str, _Field, list[float | None]]] + fields = [(name, field, []) for name, field in query.fields.items()] group_index = 0 while group_index < len(rows): @@ -618,9 +630,16 @@ def make_totals(totals, group): } +class _CategoryStats(TypedDict): + category: str + outcomes: dict[str, int] + totals: dict[str, int] + reason: NotRequired[str] + + def massage_sessions_result_summary( query, result_totals, outcome_query=None -) -> dict[str, list[Any]]: +) -> tuple[dict[int, dict[str, dict[str, _CategoryStats]]], dict[str, list[Any]]]: """ Post-processes the query result. @@ -667,8 +686,8 @@ def make_totals(totals, group): } def get_category_stats( - reason, totals, outcome, category, category_stats: dict[str, int] | None = None - ): + reason, totals, outcome, category, category_stats: _CategoryStats | None = None + ) -> _CategoryStats: if not category_stats: category_stats = { "category": category, @@ -697,7 +716,7 @@ def get_category_stats( return category_stats keys = set(total_groups.keys()) - projects = {} + projects: dict[int, dict[str, dict[str, _CategoryStats]]] = {} for key in keys: by = dict(key) @@ -708,8 +727,7 @@ def get_category_stats( totals = make_totals(total_groups.get(key, [None]), by) - if project_id not in projects: - projects[project_id] = {"categories": {}} + projects.setdefault(project_id, {"categories": {}}) if category in projects[project_id]["categories"]: # update stats dict for category @@ -763,18 +781,16 @@ def get_timestamps(query): def _split_rows_groupby(rows, groupby): - groups = {} + groups: dict[frozenset[str], list[object]] = {} if rows is None: return groups for row in rows: key_parts = (group.get_keys_for_row(row) for group in groupby) keys = itertools.product(*key_parts) - for key in keys: - key = frozenset(key) + for key_tup in keys: + key = frozenset(key_tup) - if key not in groups: - groups[key] = [] - groups[key].append(row) + groups.setdefault(key, []).append(row) return groups diff --git a/src/sentry/snuba/spans_eap.py b/src/sentry/snuba/spans_eap.py index 5356f2fd29f64e..35ebede9dc6d7e 100644 --- a/src/sentry/snuba/spans_eap.py +++ b/src/sentry/snuba/spans_eap.py @@ -1,6 +1,7 @@ import logging from collections.abc import Mapping, Sequence from datetime import timedelta +from typing import Any, TypedDict import sentry_sdk from snuba_sdk import Column, Condition @@ -49,8 +50,7 @@ def query( dataset: Dataset = Dataset.Discover, fallback_to_transactions: bool = False, query_source: QuerySource | None = None, - enable_rpc: bool | None = False, -): +) -> EventsResponse: builder = SpansEAPQueryBuilder( Dataset.EventsAnalyticsPlatform, {}, @@ -244,14 +244,16 @@ def top_events_timeseries( snuba_params.end_date, rollup, ) + with sentry_sdk.start_span(op="spans_indexed", name="top_events.transform_results") as span: span.set_data("result_count", len(result.get("data", []))) result = top_events_builder.process_results(result) + other_result = top_events_builder.process_results(other_result) issues: Mapping[int, str | None] = {} translated_groupby = top_events_builder.translated_groupby - results = ( + results: dict[str, TimeseriesResult] = ( {discover.OTHER_KEY: {"order": limit, "data": other_result["data"]}} if len(other_result.get("data", [])) else {} @@ -292,3 +294,8 @@ def top_events_timeseries( ) return top_events_results + + +class TimeseriesResult(TypedDict): + order: int + data: list[dict[str, Any]] diff --git a/src/sentry/snuba/spans_indexed.py b/src/sentry/snuba/spans_indexed.py index 602e0e5c256b83..2ef001caecbd67 100644 --- a/src/sentry/snuba/spans_indexed.py +++ b/src/sentry/snuba/spans_indexed.py @@ -101,7 +101,7 @@ def timeseries_query( equations, columns = categorize_columns(selected_columns) with sentry_sdk.start_span(op="spans_indexed", name="TimeseriesSpanIndexedQueryBuilder"): - query = TimeseriesSpanIndexedQueryBuilder( + query_obj = TimeseriesSpanIndexedQueryBuilder( Dataset.SpansIndexed, {}, rollup, @@ -113,16 +113,16 @@ def timeseries_query( transform_alias_to_input_format=transform_alias_to_input_format, ), ) - result = query.run_query(referrer, query_source=query_source) + result = query_obj.run_query(referrer, query_source=query_source) with sentry_sdk.start_span(op="spans_indexed", name="query.transform_results"): - result = query.process_results(result) + result = query_obj.process_results(result) result["data"] = ( discover.zerofill( result["data"], snuba_params.start_date, snuba_params.end_date, rollup, - "time", + ["time"], ) if zerofill_results else result["data"] @@ -229,7 +229,7 @@ def top_events_timeseries( { "data": ( discover.zerofill( - [], snuba_params.start_date, snuba_params.end_date, rollup, "time" + [], snuba_params.start_date, snuba_params.end_date, rollup, ["time"] ) if zerofill_results else [] @@ -243,7 +243,7 @@ def top_events_timeseries( span.set_data("result_count", len(result.get("data", []))) result = top_events_builder.process_results(result) - issues = {} + issues: dict[int, str | None] = {} translated_groupby = top_events_builder.translated_groupby results = ( @@ -264,8 +264,9 @@ def top_events_timeseries( "spans_indexed.top-events.timeseries.key-mismatch", extra={"result_key": result_key, "top_event_keys": list(results.keys())}, ) - for key, item in results.items(): - results[key] = SnubaTSResult( + + return { + key: SnubaTSResult( { "data": ( discover.zerofill( @@ -273,7 +274,7 @@ def top_events_timeseries( snuba_params.start_date, snuba_params.end_date, rollup, - "time", + ["time"], ) if zerofill_results else item["data"] @@ -284,5 +285,5 @@ def top_events_timeseries( snuba_params.end_date, rollup, ) - - return results + for key, item in results.items() + } diff --git a/src/sentry/snuba/spans_rpc.py b/src/sentry/snuba/spans_rpc.py index 930aee26eda83c..9b56b742b8c78e 100644 --- a/src/sentry/snuba/spans_rpc.py +++ b/src/sentry/snuba/spans_rpc.py @@ -1,7 +1,9 @@ import logging +from collections import defaultdict from datetime import timedelta from typing import Any +import sentry_sdk from sentry_protos.snuba.v1.endpoint_time_series_pb2 import TimeSeries, TimeSeriesRequest from sentry_protos.snuba.v1.endpoint_trace_item_table_pb2 import Column, TraceItemTableRequest from sentry_protos.snuba.v1.trace_item_attribute_pb2 import AttributeAggregation, AttributeKey @@ -29,6 +31,7 @@ def categorize_column(column: ResolvedColumn | ResolvedFunction) -> Column: return Column(key=column.proto_definition, label=column.public_alias) +@sentry_sdk.trace def run_table_query( params: SnubaParams, query_string: str, @@ -45,8 +48,9 @@ def run_table_query( SearchResolver(params=params, config=config) if search_resolver is None else search_resolver ) meta = resolver.resolve_meta(referrer=referrer) - query = resolver.resolve_query(query_string) - columns, contexts = resolver.resolve_columns(selected_columns) + query, query_contexts = resolver.resolve_query(query_string) + columns, column_contexts = resolver.resolve_columns(selected_columns) + contexts = resolver.clean_contexts(query_contexts + column_contexts) # We allow orderby function_aliases if they're a selected_column # eg. can orderby sum_span_self_time, assuming sum(span.self_time) is selected orderby_aliases = { @@ -153,7 +157,7 @@ def get_timeseries_query( ) -> TimeSeriesRequest: resolver = SearchResolver(params=params, config=config) meta = resolver.resolve_meta(referrer=referrer) - query = resolver.resolve_query(query_string) + query, query_contexts = resolver.resolve_query(query_string) (aggregations, _) = resolver.resolve_aggregates(y_axes) (groupbys, _) = resolver.resolve_columns(groupby) if extra_conditions is not None: @@ -176,6 +180,8 @@ def get_timeseries_query( if isinstance(groupby.proto_definition, AttributeKey) ], granularity_secs=granularity_secs, + # TODO: need to add this once the RPC supports it + # virtual_column_contexts=[context for context in resolver.clean_contexts(query_contexts) if context is not None], ) @@ -195,6 +201,7 @@ def validate_granularity( ) +@sentry_sdk.trace def run_timeseries_query( params: SnubaParams, query_string: str, @@ -217,7 +224,7 @@ def run_timeseries_query( result: SnubaData = [] confidences: SnubaData = [] for timeseries in rpc_response.result_timeseries: - processed, confidence = _process_timeseries(timeseries, params, granularity_secs) + processed, confidence = _process_all_timeseries([timeseries], params, granularity_secs) if len(result) == 0: result = processed confidences = confidence @@ -253,7 +260,7 @@ def run_timeseries_query( if comp_rpc_response.result_timeseries: timeseries = comp_rpc_response.result_timeseries[0] - processed, _ = _process_timeseries(timeseries, params, granularity_secs) + processed, _ = _process_all_timeseries([timeseries], params, granularity_secs) label = get_function_alias(timeseries.label) for existing, new in zip(result, processed): existing["comparisonCount"] = new[label] @@ -266,6 +273,7 @@ def run_timeseries_query( ) +@sentry_sdk.trace def build_top_event_conditions( resolver: SearchResolver, top_events: EAPResponse, groupby_columns: list[str] ) -> Any: @@ -281,7 +289,7 @@ def build_top_event_conditions( ] else: value = event[key] - resolved_term = resolver.resolve_term( + resolved_term, context = resolver.resolve_term( SearchFilter( key=SearchKey(name=key), operator="=", @@ -290,7 +298,7 @@ def build_top_event_conditions( ) if resolved_term is not None: row_conditions.append(resolved_term) - other_term = resolver.resolve_term( + other_term, context = resolver.resolve_term( SearchFilter( key=SearchKey(name=key), operator="!=", @@ -373,7 +381,7 @@ def run_top_events_timeseries_query( other_response = snuba_rpc.timeseries_rpc(other_request) """Process the results""" - map_result_key_to_timeseries = {} + map_result_key_to_timeseries = defaultdict(list) for timeseries in rpc_response.result_timeseries: groupby_attributes = timeseries.group_by_attributes remapped_groupby = {} @@ -388,12 +396,12 @@ def run_top_events_timeseries_query( resolved_groupby, _ = search_resolver.resolve_attribute(col) remapped_groupby[col] = groupby_attributes[resolved_groupby.internal_name] result_key = create_result_key(remapped_groupby, groupby_columns, {}) - map_result_key_to_timeseries[result_key] = timeseries + map_result_key_to_timeseries[result_key].append(timeseries) final_result = {} # Top Events actually has the order, so we need to iterate through it, regenerate the result keys for index, row in enumerate(top_events["data"]): result_key = create_result_key(row, groupby_columns, {}) - result_data, result_confidence = _process_timeseries( + result_data, result_confidence = _process_all_timeseries( map_result_key_to_timeseries[result_key], params, granularity_secs, @@ -409,8 +417,8 @@ def run_top_events_timeseries_query( granularity_secs, ) if other_response.result_timeseries: - result_data, result_confidence = _process_timeseries( - other_response.result_timeseries[0], + result_data, result_confidence = _process_all_timeseries( + [timeseries for timeseries in other_response.result_timeseries], params, granularity_secs, ) @@ -427,19 +435,29 @@ def run_top_events_timeseries_query( return final_result -def _process_timeseries( - timeseries: TimeSeries, params: SnubaParams, granularity_secs: int, order: int | None = None +def _process_all_timeseries( + all_timeseries: list[TimeSeries], + params: SnubaParams, + granularity_secs: int, + order: int | None = None, ) -> tuple[SnubaData, SnubaData]: result: SnubaData = [] confidence: SnubaData = [] - # Timeseries serialization expects the function alias (eg. `count` not `count()`) - label = get_function_alias(timeseries.label) - if len(result) < len(timeseries.buckets): - for bucket in timeseries.buckets: - result.append({"time": bucket.seconds}) - confidence.append({"time": bucket.seconds}) - for index, data_point in enumerate(timeseries.data_points): - result[index][label] = process_value(data_point.data) - confidence[index][label] = CONFIDENCES.get(data_point.reliability, None) + + for timeseries in all_timeseries: + # Timeseries serialization expects the function alias (eg. `count` not `count()`) + label = get_function_alias(timeseries.label) + if result: + for index, bucket in enumerate(timeseries.buckets): + assert result[index]["time"] == bucket.seconds + assert confidence[index]["time"] == bucket.seconds + else: + for bucket in timeseries.buckets: + result.append({"time": bucket.seconds}) + confidence.append({"time": bucket.seconds}) + + for index, data_point in enumerate(timeseries.data_points): + result[index][label] = process_value(data_point.data) + confidence[index][label] = CONFIDENCES.get(data_point.reliability, None) return result, confidence diff --git a/src/sentry/snuba/types.py b/src/sentry/snuba/types.py new file mode 100644 index 00000000000000..ae6c599819ad0c --- /dev/null +++ b/src/sentry/snuba/types.py @@ -0,0 +1,40 @@ +from typing import Protocol + +from snuba_sdk import Column, Condition + +from sentry.search.events.types import EventsResponse, SnubaParams +from sentry.snuba.dataset import Dataset +from sentry.snuba.metrics.extraction import MetricSpecType +from sentry.snuba.query_sources import QuerySource + + +class DatasetQuery(Protocol): + def __call__( + self, + selected_columns: list[str], + query: str, + snuba_params: SnubaParams, + equations: list[str] | None = None, + orderby: list[str] | None = None, + offset: int | None = None, + limit: int = 50, + referrer: str | None = None, + auto_fields: bool = False, + auto_aggregations: bool = False, + include_equation_fields: bool = False, + allow_metric_aggregates: bool = False, + use_aggregate_conditions: bool = False, + conditions: list[Condition] | None = None, + functions_acl: list[str] | None = None, + transform_alias_to_input_format: bool = False, + sample: float | None = None, + has_metrics: bool = False, + use_metrics_layer: bool = False, + skip_tag_resolution: bool = False, + extra_columns: list[Column] | None = None, + on_demand_metrics_enabled: bool = False, + on_demand_metrics_type: MetricSpecType | None = None, + dataset: Dataset = Dataset.Discover, + fallback_to_transactions: bool = False, + query_source: QuerySource | None = None, + ) -> EventsResponse: ... diff --git a/src/sentry/statistical_detectors/detector.py b/src/sentry/statistical_detectors/detector.py index 0b9540ddc5a293..c633d46c7beca0 100644 --- a/src/sentry/statistical_detectors/detector.py +++ b/src/sentry/statistical_detectors/detector.py @@ -62,6 +62,10 @@ class RegressionDetector(ABC): resolution_rel_threshold: float escalation_rel_threshold: float + @classmethod + @abstractmethod + def min_throughput_threshold(cls) -> int: ... + @classmethod def configure_tags(cls): sentry_sdk.set_tag("regression.source", cls.source) @@ -105,20 +109,28 @@ def detect_trends( unique_project_ids: set[int] = set() total_count = 0 + skipped_count = 0 regressed_count = 0 improved_count = 0 algorithm = cls.detector_algorithm_factory() store = cls.detector_store_factory() - for payloads in chunked(cls.all_payloads(projects, start), batch_size): - total_count += len(payloads) + for raw_payloads in chunked(cls.all_payloads(projects, start), batch_size): + total_count += len(raw_payloads) - raw_states = store.bulk_read_states(payloads) + raw_states = store.bulk_read_states(raw_payloads) + payloads = [] states = [] - for raw_state, payload in zip(raw_states, payloads): + for raw_state, payload in zip(raw_states, raw_payloads): + # If the number of events is too low, then we skip updating + # to minimize false positives + if payload.count <= cls.min_throughput_threshold(): + skipped_count += 1 + continue + metrics.distribution( "statistical_detectors.objects.throughput", value=payload.count, @@ -133,6 +145,7 @@ def detect_trends( elif trend_type == TrendType.Improved: improved_count += 1 + payloads.append(payload) states.append(None if new_state is None else new_state.to_redis_dict()) yield TrendBundle( @@ -142,7 +155,8 @@ def detect_trends( state=new_state, ) - store.bulk_write_states(payloads, states) + if payloads and states: + store.bulk_write_states(payloads, states) metrics.incr( "statistical_detectors.projects.active", @@ -158,6 +172,13 @@ def detect_trends( sample_rate=1.0, ) + metrics.incr( + "statistical_detectors.objects.skipped", + amount=skipped_count, + tags={"source": cls.source, "kind": cls.kind}, + sample_rate=1.0, + ) + metrics.incr( "statistical_detectors.objects.regressed", amount=regressed_count, diff --git a/src/sentry/tagstore/types.py b/src/sentry/tagstore/types.py index 2ca77f628792ef..4c31ca71f5f0f0 100644 --- a/src/sentry/tagstore/types.py +++ b/src/sentry/tagstore/types.py @@ -137,7 +137,7 @@ class TagValueSerializerResponse(TagValueSerializerResponseOptional): @register(GroupTagValue) @register(TagValue) class TagValueSerializer(Serializer): - def serialize(self, obj, attrs, user, **kwargs): + def serialize(self, obj, attrs, user, **kwargs) -> TagValueSerializerResponse: from sentry import tagstore key = tagstore.get_standardized_key(obj.key) diff --git a/src/sentry/tasks/base.py b/src/sentry/tasks/base.py index 3b703d27fd9fb5..3a2c404b4a7150 100644 --- a/src/sentry/tasks/base.py +++ b/src/sentry/tasks/base.py @@ -1,9 +1,7 @@ from __future__ import annotations import logging -import resource from collections.abc import Callable, Iterable -from contextlib import contextmanager from datetime import datetime from functools import wraps from typing import Any, TypeVar @@ -15,6 +13,7 @@ from sentry.celery import app from sentry.silo.base import SiloLimit, SiloMode from sentry.utils import metrics +from sentry.utils.memory import track_memory_usage from sentry.utils.sdk import Scope, capture_exception ModelT = TypeVar("ModelT", bound=Model) @@ -59,19 +58,6 @@ def __call__(self, decorated_task: Any) -> Any: return limited_func -def get_rss_usage(): - return resource.getrusage(resource.RUSAGE_SELF).ru_maxrss - - -@contextmanager -def track_memory_usage(metric, **kwargs): - before = get_rss_usage() - try: - yield - finally: - metrics.distribution(metric, get_rss_usage() - before, unit="byte", **kwargs) - - def load_model_from_db( tp: type[ModelT], instance_or_id: ModelT | int, allow_cache: bool = True ) -> ModelT: diff --git a/src/sentry/tasks/commit_context.py b/src/sentry/tasks/commit_context.py index f017618e8199f5..75a8dc0de2084b 100644 --- a/src/sentry/tasks/commit_context.py +++ b/src/sentry/tasks/commit_context.py @@ -231,6 +231,9 @@ def process_commit_context( project_id=project.id, group_id=group_id, new_assignment=created, + user_id=group_owner.user_id, + group_owner_type=group_owner.type, + method="scm_integration", ) except UnableToAcquireLock: pass diff --git a/src/sentry/tasks/delete_seer_grouping_records.py b/src/sentry/tasks/delete_seer_grouping_records.py index 4deaec4d3525a5..acbe9a4dd4e068 100644 --- a/src/sentry/tasks/delete_seer_grouping_records.py +++ b/src/sentry/tasks/delete_seer_grouping_records.py @@ -8,7 +8,7 @@ delete_grouping_records_by_hash, delete_project_grouping_records, ) -from sentry.seer.similarity.utils import killswitch_enabled +from sentry.seer.similarity.utils import ReferrerOptions, killswitch_enabled from sentry.silo.base import SiloMode from sentry.tasks.base import instrumented_task @@ -34,7 +34,7 @@ def delete_seer_grouping_records_by_hash( Task to delete seer grouping records by hash list. Calls the seer delete by hash endpoint with batches of hashes of size `BATCH_SIZE`. """ - if killswitch_enabled(project_id) or options.get( + if killswitch_enabled(project_id, ReferrerOptions.DELETION) or options.get( "seer.similarity-embeddings-delete-by-hash-killswitch.enabled" ): return @@ -57,7 +57,7 @@ def call_delete_seer_grouping_records_by_hash( if ( project and project.get_option("sentry:similarity_backfill_completed") - and not killswitch_enabled(project.id) + and not killswitch_enabled(project.id, ReferrerOptions.DELETION) and not options.get("seer.similarity-embeddings-delete-by-hash-killswitch.enabled") ): # TODO (jangjodi): once we store seer grouping info in GroupHash, we should filter by that here @@ -86,7 +86,7 @@ def call_seer_delete_project_grouping_records( *args: Any, **kwargs: Any, ) -> None: - if killswitch_enabled(project_id) or options.get( + if killswitch_enabled(project_id, ReferrerOptions.DELETION) or options.get( "seer.similarity-embeddings-delete-by-hash-killswitch.enabled" ): return diff --git a/src/sentry/tasks/embeddings_grouping/backfill_seer_grouping_records_for_project.py b/src/sentry/tasks/embeddings_grouping/backfill_seer_grouping_records_for_project.py index f22311cb035081..ea090ab018d507 100644 --- a/src/sentry/tasks/embeddings_grouping/backfill_seer_grouping_records_for_project.py +++ b/src/sentry/tasks/embeddings_grouping/backfill_seer_grouping_records_for_project.py @@ -9,11 +9,15 @@ from sentry.grouping.api import GroupingConfigNotFound from sentry.grouping.enhancer.exceptions import InvalidEnhancerConfig from sentry.models.project import Project -from sentry.seer.similarity.utils import killswitch_enabled, project_is_seer_eligible +from sentry.seer.similarity.utils import ( + ReferrerOptions, + killswitch_enabled, + project_is_seer_eligible, +) from sentry.silo.base import SiloMode from sentry.tasks.base import instrumented_task from sentry.tasks.embeddings_grouping.utils import ( - FeatureError, + NODESTORE_RETRY_EXCEPTIONS, GroupStacktraceData, create_project_cohort, delete_seer_grouping_records, @@ -80,7 +84,7 @@ def backfill_seer_grouping_records_for_project( assert current_project_id is not None if options.get("seer.similarity-backfill-killswitch.enabled") or killswitch_enabled( - current_project_id + current_project_id, ReferrerOptions.BACKFILL ): logger.info("backfill_seer_grouping_records.killswitch_enabled") return @@ -109,13 +113,6 @@ def backfill_seer_grouping_records_for_project( last_processed_group_id_input, last_processed_project_index_input, ) - except FeatureError: - logger.info( - "backfill_seer_grouping_records.no_feature", - extra={"current_project_id": current_project_id}, - ) - # TODO: let's just delete this branch since feature is on - return except Project.DoesNotExist: logger.info( "backfill_seer_grouping_records.project_does_not_exist", @@ -226,6 +223,16 @@ def backfill_seer_grouping_records_for_project( except EVENT_INFO_EXCEPTIONS: metrics.incr("sentry.tasks.backfill_seer_grouping_records.grouping_config_error") nodestore_results, group_hashes_dict = GroupStacktraceData(data=[], stacktrace_list=[]), {} + except NODESTORE_RETRY_EXCEPTIONS as e: + extra = { + "organization_id": project.organization.id, + "project_id": project.id, + "error": e.message, + } + logger.exception( + "tasks.backfill_seer_grouping_records.bulk_event_lookup_exception", extra=extra + ) + group_hashes_dict = {} if not group_hashes_dict: call_next_backfill( @@ -305,7 +312,7 @@ def call_next_backfill( only_delete: bool = False, last_processed_group_id: int | None = None, last_processed_project_id: int | None = None, -): +) -> None: if last_processed_group_id is not None: backfill_seer_grouping_records_for_project.apply_async( args=[ diff --git a/src/sentry/tasks/embeddings_grouping/utils.py b/src/sentry/tasks/embeddings_grouping/utils.py index b457ce50322c7e..84f121ff440ec2 100644 --- a/src/sentry/tasks/embeddings_grouping/utils.py +++ b/src/sentry/tasks/embeddings_grouping/utils.py @@ -10,12 +10,11 @@ from google.api_core.exceptions import DeadlineExceeded, ServiceUnavailable from snuba_sdk import Column, Condition, Entity, Limit, Op, Query, Request -from sentry import features, nodestore, options +from sentry import nodestore, options from sentry.conf.server import SEER_SIMILARITY_MODEL_VERSION from sentry.eventstore.models import Event -from sentry.grouping.grouping_info import get_grouping_info +from sentry.grouping.grouping_info import get_grouping_info_from_variants from sentry.issues.grouptype import ErrorGroupType -from sentry.issues.occurrence_consumer import EventLookupError from sentry.models.group import Group, GroupStatus from sentry.models.project import Project from sentry.seer.similarity.grouping_records import ( @@ -36,6 +35,7 @@ event_content_has_stacktrace, filter_null_from_string, get_stacktrace_string_with_metrics, + has_too_many_contributing_frames, ) from sentry.snuba.dataset import Dataset from sentry.snuba.referrer import Referrer @@ -57,10 +57,6 @@ logger = logging.getLogger(__name__) -class FeatureError(Exception): - pass - - class GroupEventRow(TypedDict): event_id: str group_id: int @@ -142,8 +138,6 @@ def initialize_backfill( }, ) project = Project.objects.get_from_cache(id=project_id) - if not features.has("projects:similarity-embeddings-backfill", project): - raise FeatureError("Project does not have feature") last_processed_project_index_ret = ( last_processed_project_index if last_processed_project_index else 0 @@ -363,11 +357,17 @@ def get_events_from_nodestore( bulk_event_ids = set() for group_id, event in nodestore_events.items(): event._project_cache = project - if event and event.data and event_content_has_stacktrace(event): - grouping_info = get_grouping_info(None, project=project, event=event) - stacktrace_string = get_stacktrace_string_with_metrics( - grouping_info, event.platform, ReferrerOptions.BACKFILL - ) + stacktrace_string = None + + if event and event_content_has_stacktrace(event): + variants = event.get_grouping_variants(normalize_stacktraces=True) + + if not has_too_many_contributing_frames(event, variants, ReferrerOptions.BACKFILL): + grouping_info = get_grouping_info_from_variants(variants) + stacktrace_string = get_stacktrace_string_with_metrics( + grouping_info, event.platform, ReferrerOptions.BACKFILL + ) + if not stacktrace_string: invalid_event_group_ids.append(group_id) continue @@ -571,26 +571,13 @@ def update_groups(project, seer_response, group_id_batch_filtered, group_hashes_ def _make_nodestore_call(project, node_keys): - try: - bulk_data = _retry_operation( - nodestore.backend.get_multi, - node_keys, - retries=3, - delay=2, - exceptions=NODESTORE_RETRY_EXCEPTIONS, - ) - except NODESTORE_RETRY_EXCEPTIONS as e: - extra = { - "organization_id": project.organization.id, - "project_id": project.id, - "node_keys": json.dumps(node_keys), - "error": e.message, - } - logger.exception( - "tasks.backfill_seer_grouping_records.bulk_event_lookup_exception", - extra=extra, - ) - raise + bulk_data = _retry_operation( + nodestore.backend.get_multi, + node_keys, + retries=3, + delay=2, + exceptions=NODESTORE_RETRY_EXCEPTIONS, + ) return bulk_data @@ -701,16 +688,6 @@ def _retry_operation(operation, *args, retries, delay, exceptions, **kwargs): raise -# TODO: delete this and its tests -def lookup_event(project_id: int, event_id: str, group_id: int) -> Event: - data = nodestore.backend.get(Event.generate_node_id(project_id, event_id)) - if data is None: - raise EventLookupError(f"Failed to lookup event({event_id}) for project_id({project_id})") - event = Event(event_id=event_id, project_id=project_id, group_id=group_id) - event.data = data - return event - - def delete_seer_grouping_records( project_id: int, ): diff --git a/src/sentry/tasks/groupowner.py b/src/sentry/tasks/groupowner.py index e5371142994479..81c7a1b0816f99 100644 --- a/src/sentry/tasks/groupowner.py +++ b/src/sentry/tasks/groupowner.py @@ -4,6 +4,7 @@ from django.utils import timezone +from sentry import analytics from sentry.locks import locks from sentry.models.commit import Commit from sentry.models.groupowner import GroupOwner, GroupOwnerType @@ -103,6 +104,17 @@ def _process_suspect_commits( "project": project_id, }, ) + analytics.record( + "groupowner.assignment", + organization_id=project.organization_id, + project_id=project.id, + group_id=group_id, + new_assignment=created, + user_id=go.user_id, + group_owner_type=go.type, + method="release_commit", + ) + except GroupOwner.MultipleObjectsReturned: GroupOwner.objects.filter( group_id=group_id, diff --git a/src/sentry/tasks/on_demand_metrics.py b/src/sentry/tasks/on_demand_metrics.py index 2b38c0956ff1cb..25e095405880c1 100644 --- a/src/sentry/tasks/on_demand_metrics.py +++ b/src/sentry/tasks/on_demand_metrics.py @@ -61,10 +61,6 @@ def _set_currently_processing_batch(current_batch: int) -> None: cache.set(_get_widget_processing_batch_key(), current_batch, timeout=3600) -def _set_cardinality_cache(cache_key: str, is_low_cardinality: bool) -> None: - cache.set(cache_key, is_low_cardinality, timeout=_WIDGET_QUERY_CARDINALITY_TTL) - - def _get_previous_processing_batch() -> int: return cache.get(_get_widget_processing_batch_key(), 0) diff --git a/src/sentry/tasks/post_process.py b/src/sentry/tasks/post_process.py index 35bbc5e8a21fb8..9cda6d27f367a1 100644 --- a/src/sentry/tasks/post_process.py +++ b/src/sentry/tasks/post_process.py @@ -40,6 +40,7 @@ from sentry.utils.sdk import bind_organization_context, set_current_event_project from sentry.utils.sdk_crashes.sdk_crash_detection_config import build_sdk_crash_detection_configs from sentry.utils.services import build_instance_from_options_of_type +from sentry.workflow_engine.types import WorkflowJob if TYPE_CHECKING: from sentry.eventstore.models import Event, GroupEvent @@ -679,35 +680,30 @@ def get_event_raise_exception() -> Event: ) metric_tags["occurrence_type"] = group_event.group.issue_type.slug - if not is_reprocessed and event.data.get("received"): - duration = time() - event.data["received"] - metrics.timing( - "events.time-to-post-process", - duration, - instance=event.data["platform"], - tags=metric_tags, - ) - - # We see occasional metrics being recorded with very old data, - # temporarily log some information about these groups to help - # investigate. - if duration and duration > 432_000: # 5 days (5*24*60*60) - logger.warning( - "tasks.post_process.old_time_to_post_process", - extra={ - "group_id": group_id, - "project_id": project_id, - "duration": duration, - "received": event.data["received"], - "platform": event.data["platform"], - "reprocessing": json.dumps( - get_path(event.data, "contexts", "reprocessing") - ), - "original_issue_id": json.dumps( - get_path(event.data, "contexts", "reprocessing", "original_issue_id") - ), - }, + if not is_reprocessed: + received_at = event.data.get("received") + saved_at = event.data.get("nodestore_insert") + post_processed_at = time() + + if saved_at: + metrics.timing( + "events.saved_to_post_processed", + post_processed_at - saved_at, + instance=event.data["platform"], + tags=metric_tags, ) + else: + metrics.incr("events.missing_nodestore_insert", tags=metric_tags) + + if received_at: + metrics.timing( + "events.time-to-post-process", + post_processed_at - received_at, + instance=event.data["platform"], + tags=metric_tags, + ) + else: + metrics.incr("events.missing_received", tags=metric_tags) def run_post_process_job(job: PostProcessJob) -> None: @@ -720,12 +716,12 @@ def run_post_process_job(job: PostProcessJob) -> None: ): return - if issue_category not in GROUP_CATEGORY_POST_PROCESS_PIPELINE: - # pipeline for generic issues - pipeline = GENERIC_POST_PROCESS_PIPELINE - else: + if issue_category in GROUP_CATEGORY_POST_PROCESS_PIPELINE: # specific pipelines for issue types pipeline = GROUP_CATEGORY_POST_PROCESS_PIPELINE[issue_category] + else: + # pipeline for generic issues + pipeline = GENERIC_POST_PROCESS_PIPELINE for pipeline_step in pipeline: try: @@ -994,6 +990,29 @@ def _get_replay_id(event): ) +def process_workflow_engine(job: PostProcessJob) -> None: + if job["is_reprocessed"]: + return + + # TODO - Add a rollout flag check here, if it's not enabled, call process_rules + # If the flag is enabled, use the code below + from sentry.workflow_engine.processors.workflow import process_workflows + + # PostProcessJob event is optional, WorkflowJob event is required + if "event" not in job: + logger.error("Missing event to create WorkflowJob", extra={"job": job}) + return + + try: + workflow_job = WorkflowJob({**job}) # type: ignore[typeddict-item] + except Exception: + logger.exception("Could not create WorkflowJob", extra={"job": job}) + return + + with sentry_sdk.start_span(op="tasks.post_process_group.workflow_engine.process_workflow"): + process_workflows(workflow_job) + + def process_rules(job: PostProcessJob) -> None: if job["is_reprocessed"]: return @@ -1558,6 +1577,9 @@ def detect_base_urls_for_uptime(job: PostProcessJob): feedback_filter_decorator(process_inbox_adds), feedback_filter_decorator(process_rules), ], + GroupCategory.METRIC_ALERT: [ + process_workflow_engine, + ], } GENERIC_POST_PROCESS_PIPELINE = [ diff --git a/src/sentry/tasks/statistical_detectors.py b/src/sentry/tasks/statistical_detectors.py index acc63ab4a7a6d6..8a1f1259ab94e4 100644 --- a/src/sentry/tasks/statistical_detectors.py +++ b/src/sentry/tasks/statistical_detectors.py @@ -236,6 +236,10 @@ class EndpointRegressionDetector(RegressionDetector): resolution_rel_threshold = 0.1 escalation_rel_threshold = 0.75 + @classmethod + def min_throughput_threshold(cls) -> int: + return options.get("statistical_detectors.throughput.threshold.transactions") + @classmethod def detector_algorithm_factory(cls) -> DetectorAlgorithm: return MovingAverageRelativeChangeDetector( @@ -278,6 +282,10 @@ class FunctionRegressionDetector(RegressionDetector): resolution_rel_threshold = 0.1 escalation_rel_threshold = 0.75 + @classmethod + def min_throughput_threshold(cls) -> int: + return options.get("statistical_detectors.throughput.threshold.functions") + @classmethod def detector_algorithm_factory(cls) -> DetectorAlgorithm: return MovingAverageRelativeChangeDetector( diff --git a/src/sentry/taskworker/client.py b/src/sentry/taskworker/client.py index bb1549d4d3aa4a..6beb6886b30e51 100644 --- a/src/sentry/taskworker/client.py +++ b/src/sentry/taskworker/client.py @@ -2,6 +2,7 @@ import grpc from sentry_protos.sentry.v1.taskworker_pb2 import ( + FetchNextTask, GetTaskRequest, SetTaskStatusRequest, TaskActivation, @@ -24,13 +25,14 @@ def __init__(self, host: str) -> None: self._channel = grpc.insecure_channel(self._host) self._stub = ConsumerServiceStub(self._channel) - def get_task(self) -> TaskActivation | None: + def get_task(self, namespace: str | None = None) -> TaskActivation | None: """ - Fetch a pending task + Fetch a pending task. - Will return None when there are no tasks to fetch + If a namespace is provided, only tasks for that namespace will be fetched. + This will return None if there are no tasks to fetch. """ - request = GetTaskRequest() + request = GetTaskRequest(namespace=namespace) try: response = self._stub.GetTask(request) except grpc.RpcError as err: @@ -42,7 +44,7 @@ def get_task(self) -> TaskActivation | None: return None def update_task( - self, task_id: str, status: TaskActivationStatus.ValueType, fetch_next: bool = True + self, task_id: str, status: TaskActivationStatus.ValueType, fetch_next_task: FetchNextTask ) -> TaskActivation | None: """ Update the status for a given task activation. @@ -52,7 +54,7 @@ def update_task( request = SetTaskStatusRequest( id=task_id, status=status, - fetch_next=fetch_next, + fetch_next_task=fetch_next_task, ) try: response = self._stub.SetTaskStatus(request) diff --git a/src/sentry/taskworker/registry.py b/src/sentry/taskworker/registry.py index 33423dbf2f3150..90ab9d4e8b55e4 100644 --- a/src/sentry/taskworker/registry.py +++ b/src/sentry/taskworker/registry.py @@ -57,11 +57,19 @@ def producer(self) -> KafkaProducer: return self._producer def get(self, name: str) -> Task[Any, Any]: + """ + Get a registered task by name + + Raises KeyError when an unknown task is provided. + """ if name not in self._registered_tasks: raise KeyError(f"No task registered with the name {name}. Check your imports") return self._registered_tasks[name] def contains(self, name: str) -> bool: + """ + Check if a task name has been registered + """ return name in self._registered_tasks def register( @@ -80,6 +88,7 @@ def register( asynchronously via taskworkers. Parameters + ---------- name: str The name of the task. This is serialized and must be stable across deploys. diff --git a/src/sentry/taskworker/task.py b/src/sentry/taskworker/task.py index 9b7525b0205046..920bcd727fc148 100644 --- a/src/sentry/taskworker/task.py +++ b/src/sentry/taskworker/task.py @@ -7,6 +7,7 @@ from uuid import uuid4 import orjson +import sentry_sdk from django.conf import settings from django.utils import timezone from google.protobuf.timestamp_pb2 import Timestamp @@ -58,15 +59,31 @@ def retry(self) -> Retry | None: return self._retry def __call__(self, *args: P.args, **kwargs: P.kwargs) -> R: + """ + Call the task function immediately. + """ return self._func(*args, **kwargs) def delay(self, *args: P.args, **kwargs: P.kwargs) -> None: + """ + Schedule a task to run later with a set of arguments. + + The provided parameters will be JSON encoded and stored within + a `TaskActivation` protobuf that is appended to kafka + """ self.apply_async(*args, **kwargs) def apply_async(self, *args: P.args, **kwargs: P.kwargs) -> None: + """ + Schedule a task to run later with a set of arguments. + + The provided parameters will be JSON encoded and stored within + a `TaskActivation` protobuf that is appended to kafka + """ if settings.TASK_WORKER_ALWAYS_EAGER: self._func(*args, **kwargs) else: + # TODO(taskworker) promote parameters to headers self._namespace.send_task(self.create_activation(*args, **kwargs)) def create_activation(self, *args: P.args, **kwargs: P.kwargs) -> TaskActivation: @@ -81,10 +98,16 @@ def create_activation(self, *args: P.args, **kwargs: P.kwargs) -> TaskActivation if isinstance(expires, datetime.timedelta): expires = int(expires.total_seconds()) + headers = { + "sentry-trace": sentry_sdk.get_traceparent() or "", + "baggage": sentry_sdk.get_baggage() or "", + } + return TaskActivation( id=uuid4().hex, namespace=self._namespace.name, taskname=self.name, + headers=headers, parameters=orjson.dumps({"args": args, "kwargs": kwargs}).decode("utf8"), retry_state=self._create_retry_state(), received_at=received_at, diff --git a/src/sentry/taskworker/tasks/examples.py b/src/sentry/taskworker/tasks/examples.py index 2e077d70e9b88b..ad623fbe1b706a 100644 --- a/src/sentry/taskworker/tasks/examples.py +++ b/src/sentry/taskworker/tasks/examples.py @@ -3,11 +3,54 @@ import logging from sentry.taskworker.registry import taskregistry +from sentry.taskworker.retry import LastAction, Retry, RetryError logger = logging.getLogger(__name__) + exampletasks = taskregistry.create_namespace(name="examples") @exampletasks.register(name="examples.say_hello") def say_hello(name: str) -> None: - print(f"Hello {name}") # noqa + logger.info("Hello %s", name) + + +@exampletasks.register( + name="examples.retry_deadletter", retry=Retry(times=2, times_exceeded=LastAction.Deadletter) +) +def retry_deadletter() -> None: + raise RetryError + + +@exampletasks.register( + name="examples.will_retry", + retry=Retry(times=3, on=(RuntimeError,), times_exceeded=LastAction.Discard), +) +def will_retry(failure: str) -> None: + if failure == "retry": + logger.info("going to retry with explicit retry error") + raise RetryError + if failure == "raise": + logger.info("raising runtimeerror") + raise RuntimeError("oh no") + logger.info("got %s", failure) + + +@exampletasks.register(name="examples.simple_task") +def simple_task() -> None: + logger.info("simple_task complete") + + +@exampletasks.register(name="examples.retry_task", retry=Retry(times=2)) +def retry_task() -> None: + raise RetryError + + +@exampletasks.register(name="examples.fail_task") +def fail_task() -> None: + raise ValueError("nope") + + +@exampletasks.register(name="examples.at_most_once", at_most_once=True) +def at_most_once_task() -> None: + pass diff --git a/src/sentry/taskworker/worker.py b/src/sentry/taskworker/worker.py index bef131a61298de..5153dafe6de5b2 100644 --- a/src/sentry/taskworker/worker.py +++ b/src/sentry/taskworker/worker.py @@ -10,12 +10,14 @@ import grpc import orjson +import sentry_sdk from django.conf import settings from django.core.cache import cache from sentry_protos.sentry.v1.taskworker_pb2 import ( TASK_ACTIVATION_STATUS_COMPLETE, TASK_ACTIVATION_STATUS_FAILURE, TASK_ACTIVATION_STATUS_RETRY, + FetchNextTask, TaskActivation, ) @@ -23,18 +25,36 @@ from sentry.taskworker.registry import taskregistry from sentry.taskworker.task import Task from sentry.utils import metrics +from sentry.utils.memory import track_memory_usage logger = logging.getLogger("sentry.taskworker.worker") -# Use forking processes so that django is initialized -mp_context = multiprocessing.get_context("fork") +mp_context = multiprocessing.get_context("spawn") -def _process_activation( - namespace: str, task_name: str, args: list[Any], kwargs: dict[str, Any] -) -> None: +def _init_pool_process() -> None: + """initialize pool workers by loading all task modules""" + for module in settings.TASKWORKER_IMPORTS: + __import__(module) + + +def _process_activation(activation: TaskActivation) -> None: """multiprocess worker method""" - taskregistry.get(namespace).get(task_name)(*args, **kwargs) + parameters = orjson.loads(activation.parameters) + args = parameters.get("args", []) + kwargs = parameters.get("kwargs", {}) + headers = {k: v for k, v in activation.headers.items()} + + transaction = sentry_sdk.continue_trace( + environ_or_headers=headers, + op="task.taskworker", + name=f"{activation.namespace}:{activation.taskname}", + ) + with ( + track_memory_usage("taskworker.worker.memory_change"), + sentry_sdk.start_transaction(transaction), + ): + taskregistry.get(activation.namespace).get(activation.taskname)(*args, **kwargs) AT_MOST_ONCE_TIMEOUT = 60 * 60 * 24 # 1 day @@ -57,12 +77,17 @@ class TaskWorker: """ def __init__( - self, rpc_host: str, max_task_count: int | None = None, **options: dict[str, Any] + self, + rpc_host: str, + max_task_count: int | None = None, + namespace: str | None = None, + **options: dict[str, Any], ) -> None: self.options = options self._execution_count = 0 self._worker_id = uuid4().hex self._max_task_count = max_task_count + self._namespace = namespace self.client = TaskworkerClient(rpc_host) self._pool: Pool | None = None self._build_pool() @@ -74,7 +99,7 @@ def __del__(self) -> None: def _build_pool(self) -> None: if self._pool: self._pool.terminate() - self._pool = mp_context.Pool(processes=1) + self._pool = mp_context.Pool(processes=1, initializer=_init_pool_process) def do_imports(self) -> None: for module in settings.TASKWORKER_IMPORTS: @@ -124,7 +149,7 @@ def start(self) -> int: def fetch_task(self) -> TaskActivation | None: try: - activation = self.client.get_task() + activation = self.client.get_task(self._namespace) except grpc.RpcError: metrics.incr("taskworker.worker.get_task.failed") logger.info("get_task failed. Retrying in 1 second") @@ -132,7 +157,7 @@ def fetch_task(self) -> TaskActivation | None: if not activation: metrics.incr("taskworker.worker.get_task.not_found") - logger.info("No task fetched") + logger.debug("No task fetched") return None metrics.incr("taskworker.worker.get_task.success") @@ -167,6 +192,7 @@ def process_task(self, activation: TaskActivation) -> TaskActivation | None: return self.client.update_task( task_id=activation.id, status=TASK_ACTIVATION_STATUS_FAILURE, + fetch_next_task=FetchNextTask(namespace=self._namespace), ) if task.at_most_once: @@ -187,17 +213,11 @@ def process_task(self, activation: TaskActivation) -> TaskActivation | None: result = None execution_start_time = 0.0 try: - task_data_parameters = orjson.loads(activation.parameters) execution_start_time = time.time() result = self._pool.apply_async( func=_process_activation, - args=( - activation.namespace, - activation.taskname, - task_data_parameters["args"], - task_data_parameters["kwargs"], - ), + args=(activation,), ) # Will trigger a TimeoutError if the task execution runs long result.get(timeout=processing_timeout) @@ -260,4 +280,5 @@ def process_task(self, activation: TaskActivation) -> TaskActivation | None: return self.client.update_task( task_id=activation.id, status=next_state, + fetch_next_task=FetchNextTask(namespace=self._namespace), ) diff --git a/static/app/views/performance/settings.ts b/src/sentry/tempest/__init__.py similarity index 100% rename from static/app/views/performance/settings.ts rename to src/sentry/tempest/__init__.py diff --git a/src/sentry/tempest/apps.py b/src/sentry/tempest/apps.py new file mode 100644 index 00000000000000..9dbe005be9753f --- /dev/null +++ b/src/sentry/tempest/apps.py @@ -0,0 +1,5 @@ +from django.apps import AppConfig + + +class TempestConfig(AppConfig): + name = "sentry.tempest" diff --git a/src/sentry/tempest/endpoints/tempest_credentials.py b/src/sentry/tempest/endpoints/tempest_credentials.py new file mode 100644 index 00000000000000..34c05de20e5665 --- /dev/null +++ b/src/sentry/tempest/endpoints/tempest_credentials.py @@ -0,0 +1,58 @@ +from django.db import IntegrityError +from rest_framework.exceptions import NotFound +from rest_framework.request import Request +from rest_framework.response import Response + +from sentry import features +from sentry.api.api_owners import ApiOwner +from sentry.api.api_publish_status import ApiPublishStatus +from sentry.api.base import region_silo_endpoint +from sentry.api.bases import ProjectEndpoint +from sentry.api.paginator import OffsetPaginator +from sentry.api.serializers.base import serialize +from sentry.models.project import Project +from sentry.tempest.models import TempestCredentials +from sentry.tempest.permissions import TempestCredentialsPermission +from sentry.tempest.serializers import DRFTempestCredentialsSerializer, TempestCredentialsSerializer + + +@region_silo_endpoint +class TempestCredentialsEndpoint(ProjectEndpoint): + publish_status = { + "GET": ApiPublishStatus.PRIVATE, + "POST": ApiPublishStatus.PRIVATE, + } + owner = ApiOwner.GDX + + permission_classes = (TempestCredentialsPermission,) + + def has_feature(self, request: Request, project: Project) -> bool: + return features.has( + "organizations:tempest-access", project.organization, actor=request.user + ) + + def get(self, request: Request, project: Project) -> Response: + if not self.has_feature(request, project): + raise NotFound + + tempest_credentials_qs = TempestCredentials.objects.filter(project=project) + return self.paginate( + request=request, + queryset=tempest_credentials_qs, + on_results=lambda x: serialize(x, request.user, TempestCredentialsSerializer()), + paginator_cls=OffsetPaginator, + ) + + def post(self, request: Request, project: Project) -> Response: + if not self.has_feature(request, project): + raise NotFound + + serializer = DRFTempestCredentialsSerializer(data=request.data) + serializer.is_valid(raise_exception=True) + try: + serializer.save(created_by_id=request.user.id, project=project) + except IntegrityError: + return Response( + {"detail": "A credential with this client ID already exists."}, status=400 + ) + return Response(serializer.data, status=201) diff --git a/src/sentry/tempest/endpoints/tempest_credentials_details.py b/src/sentry/tempest/endpoints/tempest_credentials_details.py new file mode 100644 index 00000000000000..abebd916ae8071 --- /dev/null +++ b/src/sentry/tempest/endpoints/tempest_credentials_details.py @@ -0,0 +1,34 @@ +from rest_framework.exceptions import NotFound +from rest_framework.request import Request +from rest_framework.response import Response + +from sentry import features +from sentry.api.api_owners import ApiOwner +from sentry.api.api_publish_status import ApiPublishStatus +from sentry.api.base import region_silo_endpoint +from sentry.api.bases import ProjectEndpoint +from sentry.models.project import Project +from sentry.tempest.models import TempestCredentials +from sentry.tempest.permissions import TempestCredentialsPermission + + +@region_silo_endpoint +class TempestCredentialsDetailsEndpoint(ProjectEndpoint): + publish_status = { + "DELETE": ApiPublishStatus.PRIVATE, + } + owner = ApiOwner.GDX + + permission_classes = (TempestCredentialsPermission,) + + def has_feature(self, request: Request, project: Project) -> bool: + return features.has( + "organizations:tempest-access", project.organization, actor=request.user + ) + + def delete(self, request: Request, project: Project, tempest_credentials_id: int) -> Response: + if not self.has_feature(request, project): + raise NotFound + + TempestCredentials.objects.filter(project=project, id=tempest_credentials_id).delete() + return Response(status=204) diff --git a/src/sentry/tempest/migrations/0001_create_tempest_credentials_model.py b/src/sentry/tempest/migrations/0001_create_tempest_credentials_model.py new file mode 100644 index 00000000000000..e528576fa44d1c --- /dev/null +++ b/src/sentry/tempest/migrations/0001_create_tempest_credentials_model.py @@ -0,0 +1,72 @@ +# Generated by Django 5.1.4 on 2024-12-17 00:21 + +import django.db.models.deletion +from django.db import migrations, models + +import sentry.db.models.fields.bounded +import sentry.db.models.fields.foreignkey +import sentry.db.models.fields.hybrid_cloud_foreign_key +from sentry.new_migrations.migrations import CheckedMigration + + +class Migration(CheckedMigration): + # This flag is used to mark that a migration shouldn't be automatically run in production. + # This should only be used for operations where it's safe to run the migration after your + # code has deployed. So this should not be used for most operations that alter the schema + # of a table. + # Here are some things that make sense to mark as post deployment: + # - Large data migrations. Typically we want these to be run manually so that they can be + # monitored and not block the deploy for a long period of time while they run. + # - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to + # run this outside deployments so that we don't block them. Note that while adding an index + # is a schema change, it's completely safe to run the operation after the code has deployed. + # Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment + + is_post_deployment = False + + initial = True + + dependencies = [ + ("sentry", "0802_remove_grouping_auto_update_option"), + ] + + operations = [ + migrations.CreateModel( + name="TempestCredentials", + fields=[ + ( + "id", + sentry.db.models.fields.bounded.BoundedBigAutoField( + primary_key=True, serialize=False + ), + ), + ("date_updated", models.DateTimeField(auto_now=True)), + ("date_added", models.DateTimeField(auto_now_add=True)), + ( + "created_by_id", + sentry.db.models.fields.hybrid_cloud_foreign_key.HybridCloudForeignKey( + "sentry.User", db_index=True, null=True, on_delete="SET_NULL" + ), + ), + ("message", models.TextField()), + ("message_type", models.CharField(default="error", max_length=20)), + ("client_id", models.CharField()), + ("client_secret", models.CharField()), + ("latest_fetched_item_id", models.CharField(null=True)), + ( + "project", + sentry.db.models.fields.foreignkey.FlexibleForeignKey( + on_delete=django.db.models.deletion.CASCADE, to="sentry.project" + ), + ), + ], + options={ + "constraints": [ + models.UniqueConstraint( + fields=("client_id", "project"), + name="sentry_tempestcredentials_client_project_uniq", + ) + ], + }, + ), + ] diff --git a/tests/sentry/hybridcloud/test_organization_provisioning.py b/src/sentry/tempest/migrations/__init__.py similarity index 100% rename from tests/sentry/hybridcloud/test_organization_provisioning.py rename to src/sentry/tempest/migrations/__init__.py diff --git a/src/sentry/tempest/models.py b/src/sentry/tempest/models.py new file mode 100644 index 00000000000000..63e019ea5a59e7 --- /dev/null +++ b/src/sentry/tempest/models.py @@ -0,0 +1,44 @@ +from django.conf import settings +from django.db import models +from django.db.models import UniqueConstraint + +from sentry.backup.scopes import RelocationScope +from sentry.db.models import DefaultFieldsModel, FlexibleForeignKey, region_silo_model +from sentry.db.models.fields.hybrid_cloud_foreign_key import HybridCloudForeignKey + + +class MessageType(models.TextChoices): + ERROR = "error" + WARNING = "warning" + SUCCESS = "success" + INFO = "info" + + +@region_silo_model +class TempestCredentials(DefaultFieldsModel): + # Contains sensitive information which we don't want to export/import - it should be configured again manually + __relocation_scope__ = RelocationScope.Excluded + + created_by_id = HybridCloudForeignKey(settings.AUTH_USER_MODEL, null=True, on_delete="SET_NULL") + project = FlexibleForeignKey("sentry.Project", on_delete=models.CASCADE) + + # message that is shown next to the client id/secret pair + # used to communicate the status of the latest actions with credentials + message = models.TextField() + message_type = models.CharField( + max_length=20, choices=MessageType.choices, default=MessageType.ERROR + ) + + client_id = models.CharField() + client_secret = models.CharField() + + # id of the latest item fetched via tempest + latest_fetched_item_id = models.CharField(null=True) + + class Meta: + constraints = [ + UniqueConstraint( + fields=["client_id", "project"], + name="sentry_tempestcredentials_client_project_uniq", + ) + ] diff --git a/src/sentry/tempest/permissions.py b/src/sentry/tempest/permissions.py new file mode 100644 index 00000000000000..9e64eee7f0628e --- /dev/null +++ b/src/sentry/tempest/permissions.py @@ -0,0 +1,16 @@ +from sentry.api.bases.project import ProjectPermission + + +class TempestCredentialsPermission(ProjectPermission): + scope_map = { + "GET": [ + "project:read", + "project:write", + "project:admin", + "org:read", + "org:write", + "org:admin", + ], + "POST": ["org:admin"], + "DELETE": ["org:admin"], + } diff --git a/src/sentry/tempest/serializers.py b/src/sentry/tempest/serializers.py new file mode 100644 index 00000000000000..625e636be2de86 --- /dev/null +++ b/src/sentry/tempest/serializers.py @@ -0,0 +1,69 @@ +from rest_framework import serializers + +from sentry.api.serializers.base import Serializer, register +from sentry.tempest.models import TempestCredentials +from sentry.users.services.user.service import user_service + + +@register(TempestCredentials) +class TempestCredentialsSerializer(Serializer): + def _obfuscate_client_secret(self, client_secret: str) -> str: + return "*" * len(client_secret) + + def get_attrs( + self, + item_list, + user, + **kwargs, + ): + users_mapping = {} + user_ids = [item.created_by_id for item in item_list if item.created_by_id is not None] + users = user_service.get_many_by_id(ids=user_ids) + for rpc_user in users: + users_mapping[rpc_user.id] = rpc_user + + attrs = {} + for item in item_list: + attrs[item] = users_mapping.get(item.created_by_id) + + return attrs + + def serialize(self, obj, attrs, user, **kwargs): + rpc_user = attrs + return { + "id": obj.id, + "clientId": obj.client_id, + "clientSecret": self._obfuscate_client_secret(obj.client_secret), + "message": obj.message, + "messageType": obj.message_type, + "latestFetchedItemId": obj.latest_fetched_item_id, + "createdById": obj.created_by_id, + "createdByEmail": rpc_user.email if rpc_user else None, + "dateAdded": obj.date_added, + "dateUpdated": obj.date_updated, + } + + +class DRFTempestCredentialsSerializer(serializers.ModelSerializer): + clientId = serializers.CharField(source="client_id") + clientSecret = serializers.CharField(source="client_secret") + message = serializers.CharField(read_only=True) + messageType = serializers.CharField(source="message_type", read_only=True) + latestFetchedItemId = serializers.CharField(source="latest_fetched_item_id", read_only=True) + createdById = serializers.CharField(source="created_by_id", read_only=True) + dateAdded = serializers.DateTimeField(source="date_added", read_only=True) + dateUpdated = serializers.DateTimeField(source="date_updated", read_only=True) + + class Meta: + model = TempestCredentials + fields = [ + "id", + "clientId", + "clientSecret", + "message", + "messageType", + "latestFetchedItemId", + "createdById", + "dateAdded", + "dateUpdated", + ] diff --git a/src/sentry/templates/sentry/base-react.html b/src/sentry/templates/sentry/base-react.html index d8c0ffd686f3b1..ab3e218318e48a 100644 --- a/src/sentry/templates/sentry/base-react.html +++ b/src/sentry/templates/sentry/base-react.html @@ -18,3 +18,5 @@ {% endblock %} + +{% block wrapperclass %}{{ user_theme }}{% endblock %} diff --git a/src/sentry/templates/sentry/bases/forceauth_modal.html b/src/sentry/templates/sentry/bases/forceauth_modal.html deleted file mode 100644 index a9adbebfe622a8..00000000000000 --- a/src/sentry/templates/sentry/bases/forceauth_modal.html +++ /dev/null @@ -1,11 +0,0 @@ -{% extends "sentry/layout.html" %} - -{% load i18n %} - -{% block wrapperclass %}{{ block.super }} narrow hide-sidebar{% endblock %} - -{% block content %} -

Account: {{ request.user.get_display_name }}

-
- {% block main %}{% endblock %} -{% endblock %} diff --git a/src/sentry/templates/sentry/emails/mfa-too-many-attempts.html b/src/sentry/templates/sentry/emails/mfa-too-many-attempts.html index a1b36df88d2900..8a17b70a2e02ea 100644 --- a/src/sentry/templates/sentry/emails/mfa-too-many-attempts.html +++ b/src/sentry/templates/sentry/emails/mfa-too-many-attempts.html @@ -13,4 +13,5 @@

Suspicious Activity Detected

Date: {{ datetime|date:"N j, Y, P e" }}

If you have lost your 2FA credentials, you can follow our account recovery steps here.

If these logins are not from you, we recommend you log in to your Sentry account and reset your password under your account security settings. On the same account security page, we also recommend you click the “Sign out of all devices” button to remove all currently logged-in sessions of your account.

+

If you are unable to log in to your Sentry account for the password reset, you can use Password Recovery.

{% endblock %} diff --git a/src/sentry/templates/sentry/emails/mfa-too-many-attempts.txt b/src/sentry/templates/sentry/emails/mfa-too-many-attempts.txt index 7ec831bc6473f2..192300b5ef7543 100644 --- a/src/sentry/templates/sentry/emails/mfa-too-many-attempts.txt +++ b/src/sentry/templates/sentry/emails/mfa-too-many-attempts.txt @@ -13,3 +13,6 @@ If these logins are not from you, we recommend you log in to your Sentry account {{ url }} On the same account security page, we also recommend you click the “Sign out of all devices” button to remove all currently logged-in sessions of your account. + +If you are unable to log in to your Sentry account for the password reset, you can use Password Recovery: +{{ recover_url }} diff --git a/src/sentry/templates/sentry/integrations/discord/linked.html b/src/sentry/templates/sentry/integrations/discord/linked.html index 4ec0bd00bcd2d5..5a834df64dff19 100644 --- a/src/sentry/templates/sentry/integrations/discord/linked.html +++ b/src/sentry/templates/sentry/integrations/discord/linked.html @@ -1,14 +1,16 @@ {% extends "sentry/bases/modal.html" %} - {% load i18n %} - {% block title %}{% trans "Discord Linked" %} | {{ block.super }}{% endblock %} {% block wrapperclass %}narrow auth{% endblock %} - {% block main %}

{% trans "Your Discord account has been associated with your Sentry account. You may now take Sentry actions through Discord." %}

+

+ +

{% endblock %} diff --git a/src/sentry/templates/sentry/partial/alerts.html b/src/sentry/templates/sentry/partial/alerts.html index 2cf4bf1be3ecee..7a41552430bb52 100644 --- a/src/sentry/templates/sentry/partial/alerts.html +++ b/src/sentry/templates/sentry/partial/alerts.html @@ -77,7 +77,7 @@ {% elif show_login_banner %}
- Join our workshop to for a live demo of Sentry's new AI tools on Dec. 10.  RSVP + Join our live workshop: Smarter Tools and Best Practices for Mobile Debugging on Jan 14.  RSVP
{% endif %} diff --git a/src/sentry/templates/sentry/toolbar/iframe.html b/src/sentry/templates/sentry/toolbar/iframe.html index f35185ab3311a1..a488d33d093668 100644 --- a/src/sentry/templates/sentry/toolbar/iframe.html +++ b/src/sentry/templates/sentry/toolbar/iframe.html @@ -1,11 +1,13 @@ {% comment %} Template returned for requests to /iframe. The iframe serves as a proxy for Sentry API requests. Required context variables: -- referrer: string. HTTP header from the request object. -- state: string. One of: `logged-out`, `missing-project`, `invalid-domain` or `success`. +- referrer: string. HTTP header from the request object. May have trailing `/`. +- state: string. One of: `logged-out`, `missing-project`, `invalid-domain` or `logged-in`. - logging: any. If the value is truthy in JavaScript then debug logging will be enabled. - organization_slug: string. The org named in the url params - project_id_or_slug: string | int. The project named in the url params +- organizationUrl: string. Result of generate_organization_url() +- regionUrl: string. Result of generate_region_url() {% endcomment %} {% load sentry_helpers %} {% load sentry_assets %} @@ -24,6 +26,11 @@ const logging = '{{ logging|escapejs }}'; const organizationSlug = '{{ organization_slug|escapejs }}'; const projectIdOrSlug = '{{ project_id_or_slug|escapejs }}'; + const organizationUrl = '{{ organization_url|escapejs }}'; + const regionUrl = '{{ region_url|escapejs }}'; + + // Strip the trailing `/` from the url + const referrerOrigin = new URL(referrer).origin; function log(...args) { if (logging) { @@ -31,145 +38,161 @@ } } - function requestAuthn(delay_ms) { - const origin = window.location.origin.endsWith('.sentry.io') - ? 'https://sentry.io' - : window.location.origin; - - window.open( - `${origin}/toolbar/${organizationSlug}/${projectIdOrSlug}/login-success/?delay=${delay_ms ?? '0'}`, - 'sentry-toolbar-auth-popup', - 'popup=true,innerWidth=800,innerHeight=550,noopener=false' - ); - } - /** - * This should only be called on pageload, which is when the server has - * checked for auth, project validity, and domain config first. - * - * Also to be called when we clear auth tokens. + * This is called on pageload, and whenever login tokens are cleared. + * Pageload when the server has checked for auth, project validity, and + * domain config first, so we can trust a state that is elevated above logged-out */ - function sendStateMessage(state) { - log('sendStateMessage(state)', { state }); - window.parent.postMessage({ - source: 'sentry-toolbar', - message: state - }, referrer); + function postStateMessage(state) { + log('parent.postMessage()', { state, referrerOrigin }); + window.parent.postMessage({ source: 'sentry-toolbar', message: state }, referrerOrigin); } - function listenForLoginSuccess() { - window.addEventListener('message', messageEvent => { - if (messageEvent.origin !== document.location.origin || messageEvent.data.source !== 'sentry-toolbar') { - return; - } - - log('window.onMessage', messageEvent.data, messageEvent); - if (messageEvent.data.message === 'did-login') { - saveAccessToken(messageEvent.data); - window.location.reload(); - } - }); + function handleLoginWindowMessage(messageEvent) { + handleWindowMessage(messageEvent, document.location.origin, loginWindowMessageDispatch); } - function getCookieValue(cookie) { - return `${cookie}; domain=${window.location.hostname}; path=/; max-age=31536000; SameSite=none; partitioned; secure`; + function handleParentWindowMessage(messageEvent) { + handleWindowMessage(messageEvent, referrerOrigin, parentWindowMessageDispatch); } - function saveAccessToken(data) { - log('saveAccessToken', data) - if (data.cookie) { - document.cookie = getCookieValue(data.cookie); - log('Saved a cookie', document.cookie.indexOf(data.cookie) >= 0); - } - if (data.token) { - localStorage.setItem('accessToken', data.token); - log('Saved an accessToken to localStorage'); + function handleWindowMessage(messageEvent, requiredOrigin, dispatch) { + const isValidOrigin = messageEvent.origin === requiredOrigin; + if (!isValidOrigin) { + return; } - if (!data.cookie && !data.token) { - log('Unexpected: No access token found!'); + log('window.onMessage', messageEvent); + const { message, source } = messageEvent.data; + if (source !== 'sentry-toolbar' || !message || !(Object.hasOwn(dispatch, message))) { + return; } + dispatch[message].call(undefined, messageEvent.data); } - function clearAuthn() { - document.cookie = getCookieValue(document.cookie.split('=').at(0) + '='); - log('Cleared the current cookie'); - const accessToken = localStorage.removeItem('accessToken') - log('Removed accessToken from localStorage'); - - sendStateMessage('logged-out'); - } - - async function fetchProxy(url, init) { - // If we have an accessToken lets use it. Otherwise we presume a cookie will be set. - const accessToken = localStorage.getItem('accessToken'); - const bearer = accessToken ? { 'Authorization': `Bearer ${accessToken}` } : {}; - - // If either of these is invalid, or both are missing, we will - // forward the resulting 401 to the application, which will request - // tokens be destroyed and reload the iframe in an unauth state. - log('Has access info', { cookie: Boolean(document.cookie), accessToken: Boolean(accessToken) }); - - const initWithCreds = { - ...init, - headers: { ...init.headers, ...bearer }, - credentials: 'same-origin', - }; - log({ initWithCreds }); - - const response = await fetch(url, initWithCreds); - return { - ok: response.ok, - status: response.status, - statusText: response.statusText, - url: response.url, - headers: Object.fromEntries(response.headers.entries()), - text: await response.text(), - }; - } - - function setupMessageChannel() { + function getMessagePort() { log('setupMessageChannel()'); const { port1, port2 } = new MessageChannel(); - const messageDispatch = { - 'log': log, - 'request-authn': requestAuthn, - 'clear-authn': clearAuthn, - 'fetch': fetchProxy, - }; - - port1.addEventListener('message', messageEvent => { + const handlePortMessage = (messageEvent) => { log('port.onMessage', messageEvent.data); const { $id, message } = messageEvent.data; - if (!$id) { - return; // MessageEvent is malformed, missing $id - } - - if (!message.$function || !(Object.hasOwn(messageDispatch, message.$function))) { - return; // No-op without a $function to call + if (!$id || !message.$function || !(Object.hasOwn(postMessageDispatch, message.$function))) { + return; } - Promise.resolve(messageDispatch[message.$function] + Promise.resolve(postMessageDispatch[message.$function] .apply(undefined, message.$args || [])) .then($result => port1.postMessage({ $id, $result })) .catch(error => port1.postMessage({ $id, $error: error })); - }); + }; + + port1.addEventListener('message', handlePortMessage); port1.start(); - window.parent.postMessage({ - source: 'sentry-toolbar', - message: 'port-connect', - }, referrer, [port2]); + return port2; + } - log('Sent', { message: 'port-connect', referrer }); + function getCookieValue(cookie, domain) { + return `${cookie}; domain=${domain}; path=/; max-age=31536000; SameSite=none; partitioned; secure`; } - log('Init', { referrer, state }); + const loginWindowMessageDispatch = { + 'did-login': ({ cookie, token }) => { + if (cookie) { + document.cookie = getCookieValue(cookie, window.location.hostname); + log('Saved a cookie', document.cookie.indexOf(cookie) >= 0); + } + if (token) { + localStorage.setItem('accessToken', token); + log('Saved an accessToken to localStorage'); + } + if (!cookie && !token) { + log('Unexpected: No access token found!'); + } + + postStateMessage('stale'); + }, + }; + + const parentWindowMessageDispatch = { + 'request-login': ({ delay_ms }) => { + const origin = window.location.origin.endsWith('.sentry.io') + ? 'https://sentry.io' + : window.location.origin; + + window.open( + `${origin}/toolbar/${organizationSlug}/${projectIdOrSlug}/login-success/?delay=${delay_ms ?? '0'}`, + 'sentry-toolbar-auth-popup', + 'popup=true,innerWidth=800,innerHeight=550,noopener=false' + ); + log('Opened /login-success/', { delay_ms }); + }, + + 'request-logout': () => { + const cookie = document.cookie.split('=').at(0) + '='; + document.cookie = getCookieValue(cookie, window.location.hostname); + document.cookie = getCookieValue(cookie, regionUrl); + log('Cleared the current cookie'); + + const accessToken = localStorage.removeItem('accessToken') + log('Removed accessToken from localStorage'); + + postStateMessage('stale'); + }, + }; + + const postMessageDispatch = { + 'log': log, + + 'fetch': async (path, init) => { + // If we have an accessToken lets use it. Otherwise we presume a cookie will be set. + const accessToken = localStorage.getItem('accessToken'); + const bearer = accessToken ? { 'Authorization': `Bearer ${accessToken}` } : {}; + + // If either of these is invalid, or both are missing, we will + // forward the resulting 401 to the application, which will request + // tokens be destroyed and reload the iframe in an unauth state. + log('Has access info', { cookie: Boolean(document.cookie), accessToken: Boolean(accessToken) }); + + const url = new URL('/api/0' + path, organizationUrl); + const initWithCreds = { + ...init, + headers: { ...init.headers, ...bearer }, + credentials: 'include', + }; + const response = await fetch(url, initWithCreds); + return { + ok: response.ok, + status: response.status, + statusText: response.statusText, + url: response.url, + headers: Object.fromEntries(response.headers.entries()), + text: await response.text(), + }; + }, + }; + + log('Init', { referrerOrigin, state }); + + if (state === 'logged-out') { + const cookie = document.cookie.split('=').at(0) + '='; + document.cookie = getCookieValue(cookie, window.location.hostname); + document.cookie = getCookieValue(cookie, regionUrl); + } - setupMessageChannel(); - listenForLoginSuccess(); - sendStateMessage(state); + window.addEventListener('message', handleLoginWindowMessage); + window.addEventListener('message', handleParentWindowMessage); + postStateMessage(state); + + if (state === 'logged-in') { + const port = getMessagePort(); + window.parent.postMessage({ + source: 'sentry-toolbar', + message: 'port-connect', + }, referrerOrigin, [port]); + log('parent.postMessage()', { message: 'port-connect', referrerOrigin }); + } })(); {% endscript %} diff --git a/src/sentry/testutils/asserts.py b/src/sentry/testutils/asserts.py index 062fd52e90a127..59b8c64e243235 100644 --- a/src/sentry/testutils/asserts.py +++ b/src/sentry/testutils/asserts.py @@ -55,14 +55,6 @@ def assert_org_audit_log_exists(**kwargs): assert org_audit_log_exists(**kwargs) -def assert_org_audit_log_does_not_exist(**kwargs): - assert not org_audit_log_exists(**kwargs) - - -def delete_all_org_audit_logs(): - return AuditLogEntry.objects.all().delete() - - """ Helper functions to assert integration SLO metrics """ @@ -93,3 +85,12 @@ def assert_success_metric(mock_record): call for call in mock_record.mock_calls if call.args[0] == EventLifecycleOutcome.SUCCESS ) assert event_success + + +def assert_slo_metric( + mock_record, event_outcome: EventLifecycleOutcome = EventLifecycleOutcome.SUCCESS +): + assert len(mock_record.mock_calls) == 2 + start, end = mock_record.mock_calls + assert start.args[0] == EventLifecycleOutcome.STARTED + assert end.args[0] == event_outcome diff --git a/src/sentry/testutils/cases.py b/src/sentry/testutils/cases.py index a5404c1e9d82a7..dee1be41de057b 100644 --- a/src/sentry/testutils/cases.py +++ b/src/sentry/testutils/cases.py @@ -27,14 +27,13 @@ from django.contrib.auth.models import AnonymousUser from django.core import signing from django.core.cache import cache -from django.db import DEFAULT_DB_ALIAS, connection, connections +from django.db import connection, connections from django.db.migrations.executor import MigrationExecutor from django.http import HttpRequest from django.test import RequestFactory from django.test import TestCase as DjangoTestCase from django.test import TransactionTestCase as DjangoTransactionTestCase from django.test import override_settings -from django.test.utils import CaptureQueriesContext from django.urls import resolve, reverse from django.utils import timezone from django.utils.functional import cached_property @@ -90,11 +89,7 @@ from sentry.models.commit import Commit from sentry.models.commitauthor import CommitAuthor from sentry.models.dashboard import Dashboard -from sentry.models.dashboard_widget import ( - DashboardWidget, - DashboardWidgetDisplayTypes, - DashboardWidgetQuery, -) +from sentry.models.dashboard_widget import DashboardWidget, DashboardWidgetDisplayTypes from sentry.models.deletedorganization import DeletedOrganization from sentry.models.deploy import Deploy from sentry.models.environment import Environment @@ -118,7 +113,6 @@ from sentry.projects.project_rules.creator import ProjectRuleCreator from sentry.replays.lib.event_linking import transform_event_for_linking_payload from sentry.replays.models import ReplayRecordingSegment -from sentry.rules.base import RuleBase from sentry.search.events.constants import ( METRIC_FRUSTRATED_TAG_VALUE, METRIC_SATISFACTION_TAG_KEY, @@ -138,11 +132,10 @@ from sentry.snuba.metrics.naming_layer.public import TransactionMetricKey from sentry.tagstore.snuba.backend import SnubaTagStorage from sentry.testutils.factories import get_fixture_path -from sentry.testutils.helpers.datetime import before_now, iso_format +from sentry.testutils.helpers.datetime import before_now from sentry.testutils.helpers.notifications import TEST_ISSUE_OCCURRENCE from sentry.testutils.helpers.slack import install_slack from sentry.testutils.pytest.selenium import Browser -from sentry.types.condition_activity import ConditionActivity, ConditionActivityType from sentry.users.models.identity import Identity, IdentityProvider, IdentityStatus from sentry.users.models.user import User from sentry.users.models.user_option import UserOption @@ -168,7 +161,7 @@ from .asserts import assert_status_code from .factories import Factories from .fixtures import Fixtures -from .helpers import AuthProvider, Feature, TaskRunner, override_options, parse_queries +from .helpers import AuthProvider, Feature, TaskRunner, override_options from .silo import assume_test_silo_mode from .skips import requires_snuba @@ -215,11 +208,6 @@ class BaseTestCase(Fixtures): - def assertRequiresAuthentication(self, path, method="GET"): - resp = getattr(self.client, method.lower())(path) - assert resp.status_code == 302 - assert resp["Location"].startswith("http://testserver" + reverse("sentry-login")) - @pytest.fixture(autouse=True) def setup_dummy_auth_provider(self): auth.register("dummy", DummyProvider) @@ -431,18 +419,6 @@ def assert_valid_deleted_log(self, deleted_log, original_object): assert deleted_log.date_created == original_object.date_added assert deleted_log.date_deleted >= deleted_log.date_created - def assertWriteQueries(self, queries, debug=False, *args, **kwargs): - func = kwargs.pop("func", None) - using = kwargs.pop("using", DEFAULT_DB_ALIAS) - conn = connections[using] - - context = _AssertQueriesContext(self, queries, debug, conn) - if func is None: - return context - - with context: - func(*args, **kwargs) - def get_mock_uuid(self): class uuid: hex = "abc123" @@ -451,52 +427,6 @@ class uuid: return uuid -class _AssertQueriesContext(CaptureQueriesContext): - def __init__(self, test_case, queries, debug, connection): - self.test_case = test_case - self.queries = queries - self.debug = debug - super().__init__(connection) - - def __exit__(self, exc_type, exc_value, traceback): - super().__exit__(exc_type, exc_value, traceback) - if exc_type is not None: - return - - parsed_queries = parse_queries(self.captured_queries) - - if self.debug: - import pprint - - pprint.pprint("====================== Raw Queries ======================") - pprint.pprint(self.captured_queries) - pprint.pprint("====================== Table writes ======================") - pprint.pprint(parsed_queries) - - for table, num in parsed_queries.items(): - expected = self.queries.get(table, 0) - if expected == 0: - import pprint - - pprint.pprint( - "WARNING: no query against %s emitted, add debug=True to see all the queries" - % (table) - ) - else: - self.test_case.assertTrue( - num == expected, - "%d write queries expected on `%s`, got %d, add debug=True to see all the queries" - % (expected, table, num), - ) - - for table, num in self.queries.items(): - executed = parsed_queries.get(table, None) - self.test_case.assertFalse( - executed is None, - "no query against %s emitted, add debug=True to see all the queries" % (table), - ) - - @override_settings(ROOT_URLCONF="sentry.web.urls") class TestCase(BaseTestCase, DjangoTestCase): # We need Django to flush all databases. @@ -952,24 +882,6 @@ def get_state(self, **kwargs): kwargs.setdefault("has_escalated", False) return EventState(**kwargs) - def get_condition_activity(self, **kwargs) -> ConditionActivity: - kwargs.setdefault("group_id", self.event.group.id) - kwargs.setdefault("type", ConditionActivityType.CREATE_ISSUE) - kwargs.setdefault("timestamp", self.event.datetime) - return ConditionActivity(**kwargs) - - def passes_activity( - self, - rule: RuleBase, - condition_activity: ConditionActivity | None = None, - event_map: dict[str, Any] | None = None, - ): - if condition_activity is None: - condition_activity = self.get_condition_activity() - if event_map is None: - event_map = {} - return rule.passes_activity(condition_activity, event_map) - def assertPasses(self, rule, event=None, **kwargs): if event is None: event = self.event @@ -1026,33 +938,9 @@ def assert_member_can_access(self, path, **kwargs): def assert_manager_can_access(self, path, **kwargs): return self.assert_role_can_access(path, "manager", **kwargs) - def assert_teamless_member_can_access(self, path, **kwargs): - user = self.create_user(is_superuser=False) - self.create_member(user=user, organization=self.organization, role="member", teams=[]) - - self.assert_can_access(user, path, **kwargs) - def assert_member_cannot_access(self, path, **kwargs): return self.assert_role_cannot_access(path, "member", **kwargs) - def assert_manager_cannot_access(self, path, **kwargs): - return self.assert_role_cannot_access(path, "manager", **kwargs) - - def assert_teamless_member_cannot_access(self, path, **kwargs): - user = self.create_user(is_superuser=False) - self.create_member(user=user, organization=self.organization, role="member", teams=[]) - - self.assert_cannot_access(user, path, **kwargs) - - def assert_team_admin_can_access(self, path, **kwargs): - return self.assert_role_can_access(path, "admin", **kwargs) - - def assert_teamless_admin_can_access(self, path, **kwargs): - user = self.create_user(is_superuser=False) - self.create_member(user=user, organization=self.organization, role="admin", teams=[]) - - self.assert_can_access(user, path, **kwargs) - def assert_team_admin_cannot_access(self, path, **kwargs): return self.assert_role_cannot_access(path, "admin", **kwargs) @@ -1062,19 +950,9 @@ def assert_teamless_admin_cannot_access(self, path, **kwargs): self.assert_cannot_access(user, path, **kwargs) - def assert_team_owner_can_access(self, path, **kwargs): - return self.assert_role_can_access(path, "owner", **kwargs) - def assert_owner_can_access(self, path, **kwargs): return self.assert_role_can_access(path, "owner", **kwargs) - def assert_owner_cannot_access(self, path, **kwargs): - return self.assert_role_cannot_access(path, "owner", **kwargs) - - def assert_non_member_cannot_access(self, path, **kwargs): - user = self.create_user(is_superuser=False) - self.assert_cannot_access(user, path, **kwargs) - def assert_role_can_access(self, path, role, **kwargs): user = self.create_user(is_superuser=False) self.create_member(user=user, organization=self.organization, role=role, teams=[self.team]) @@ -1253,20 +1131,6 @@ def setUp(self): def initialize(self, reset_snuba, call_snuba): self.call_snuba = call_snuba - @contextmanager - def disable_snuba_query_cache(self): - self.snuba_update_config({"use_readthrough_query_cache": 0, "use_cache": 0}) - yield - self.snuba_update_config({"use_readthrough_query_cache": None, "use_cache": None}) - - @classmethod - def snuba_get_config(cls): - return _snuba_pool.request("GET", "/config.json").data - - @classmethod - def snuba_update_config(cls, config_vals): - return _snuba_pool.request("POST", "/config.json", body=json.dumps(config_vals)) - def create_project(self, **kwargs) -> Project: if "flags" not in kwargs: # We insert events directly into snuba in tests, so we need to set has_transactions to True so the @@ -1368,16 +1232,6 @@ def store_group(self, group): == 200 ) - def store_outcome(self, group): - data = [self.__wrap_group(group)] - assert ( - requests.post( - settings.SENTRY_SNUBA + "/tests/entities/outcomes/insert", - data=json.dumps(data), - ).status_code - == 200 - ) - def store_span(self, span, is_eap=False): span["ingest_in_eap"] = is_eap assert ( @@ -2272,7 +2126,7 @@ def create_event(self, timestamp, fingerprint=None, user=None): data = { "event_id": event_id, "fingerprint": [fingerprint], - "timestamp": iso_format(timestamp), + "timestamp": timestamp.isoformat(), "type": "error", # This is necessary because event type error should not exist without # an exception being in the payload @@ -2677,32 +2531,6 @@ def do_request(self, method, url, data=None): func = getattr(self.client, method) return func(url, data=data) - def assert_widget_queries(self, widget_id, data): - result_queries = DashboardWidgetQuery.objects.filter(widget_id=widget_id).order_by("order") - for ds, expected_ds in zip(result_queries, data): - assert ds.name == expected_ds["name"] - assert ds.fields == expected_ds["fields"] - assert ds.conditions == expected_ds["conditions"] - - def assert_widget(self, widget, order, title, display_type, queries=None): - assert widget.order == order - assert widget.display_type == display_type - assert widget.title == title - - if not queries: - return - - self.assert_widget_queries(widget.id, queries) - - def assert_widget_data(self, data, title, display_type, queries=None): - assert data["displayType"] == display_type - assert data["title"] == title - - if not queries: - return - - self.assert_widget_queries(data["id"], queries) - def assert_serialized_widget_query(self, data, widget_data_source): if "id" in data: assert data["id"] == str(widget_data_source.id) @@ -2968,50 +2796,6 @@ def mock_chat_postMessage(self): ) as self.mock_post: yield - def assert_performance_issue_attachments( - self, attachment, project_slug, referrer, alert_type="workflow" - ): - assert "N+1 Query" in attachment["text"] - assert ( - "db - SELECT `books_author`.`id`, `books_author`.`name` FROM `books_author` WHERE `books_author`.`id` = %s LIMIT 21" - in attachment["blocks"][1]["text"]["text"] - ) - title_link = attachment["blocks"][0]["text"]["text"][13:][1:-1] - notification_uuid = self.get_notification_uuid(title_link) - assert ( - attachment["blocks"][-2]["elements"][0]["text"] - == f"{project_slug} | production | " - ) - - def assert_performance_issue_blocks( - self, - blocks, - org: Organization, - project_slug: str, - group, - referrer, - alert_type: FineTuningAPIKey = FineTuningAPIKey.WORKFLOW, - issue_link_extra_params=None, - ): - notification_uuid = self.get_notification_uuid(blocks[1]["text"]["text"]) - issue_link = f"http://testserver/organizations/{org.slug}/issues/{group.id}/?referrer={referrer}¬ification_uuid={notification_uuid}" - if issue_link_extra_params is not None: - issue_link += issue_link_extra_params - assert ( - blocks[1]["text"]["text"] - == f":large_blue_circle: :chart_with_upwards_trend: <{issue_link}|*N+1 Query*>" - ) - assert ( - blocks[2]["text"]["text"] - == "```db - SELECT `books_author`.`id`, `books_author`.`name` FROM `books_author` WHERE `books_author`.`id` = %s LIMIT 21```" - ) - assert blocks[3]["elements"][0]["text"] == "State: *New* First Seen: *10\xa0minutes ago*" - optional_org_id = f"&organizationId={org.id}" if alert_page_needs_org_id(alert_type) else "" - assert ( - blocks[4]["elements"][0]["text"] - == f"{project_slug} | production | " - ) - def assert_performance_issue_blocks_with_culprit_blocks( self, blocks, @@ -3042,17 +2826,6 @@ def assert_performance_issue_blocks_with_culprit_blocks( == f"{project_slug} | production | " ) - def assert_generic_issue_attachments( - self, attachment, project_slug, referrer, alert_type="workflow" - ): - assert attachment["title"] == TEST_ISSUE_OCCURRENCE.issue_title - assert attachment["text"] == TEST_ISSUE_OCCURRENCE.evidence_display[0].value - notification_uuid = self.get_notification_uuid(attachment["title_link"]) - assert ( - attachment["footer"] - == f"{project_slug} | " - ) - def assert_generic_issue_blocks( self, blocks, @@ -3615,7 +3388,7 @@ def load_default(self) -> Event: start, _ = self.get_start_end_from_day_ago(1000) return self.store_event( { - "timestamp": iso_format(start), + "timestamp": start.isoformat(), "contexts": { "trace": { "type": "trace", diff --git a/src/sentry/testutils/factories.py b/src/sentry/testutils/factories.py index 96f402a651f2cc..96f198c52f59fa 100644 --- a/src/sentry/testutils/factories.py +++ b/src/sentry/testutils/factories.py @@ -146,6 +146,8 @@ from sentry.silo.base import SiloMode from sentry.snuba.dataset import Dataset from sentry.snuba.models import QuerySubscription, QuerySubscriptionDataSourceHandler +from sentry.tempest.models import MessageType as TempestMessageType +from sentry.tempest.models import TempestCredentials from sentry.testutils.outbox import outbox_runner from sentry.testutils.silo import assume_test_silo_mode from sentry.types.activity import ActivityType @@ -158,6 +160,7 @@ ProjectUptimeSubscriptionMode, UptimeStatus, UptimeSubscription, + UptimeSubscriptionRegion, ) from sentry.users.models.identity import Identity, IdentityProvider, IdentityStatus from sentry.users.models.user import User @@ -604,6 +607,34 @@ def create_slack_project_rule(project, integration_id, channel_id=None, channel_ def create_project_key(project): return project.key_set.get_or_create()[0] + @staticmethod + @assume_test_silo_mode(SiloMode.REGION) + def create_tempest_credentials( + project: Project, + created_by: User | None = None, + client_id: str | None = None, + client_secret: str | None = None, + message: str = "", + message_type: str | None = None, + latest_fetched_item_id: str | None = None, + ): + if client_id is None: + client_id = str(uuid4()) + if client_secret is None: + client_secret = str(uuid4()) + if message_type is None: + message_type = TempestMessageType.ERROR + + return TempestCredentials.objects.create( + project=project, + created_by_id=created_by.id if created_by else None, + client_id=client_id, + client_secret=client_secret, + message=message, + message_type=message_type, + latest_fetched_item_id=latest_fetched_item_id, + ) + @staticmethod @assume_test_silo_mode(SiloMode.REGION) def create_release( @@ -1003,6 +1034,9 @@ def store_event( @staticmethod @assume_test_silo_mode(SiloMode.REGION) def create_group(project, **kwargs): + from sentry.models.group import GroupStatus + from sentry.types.group import GroupSubStatus + kwargs.setdefault("message", "Hello world") kwargs.setdefault("data", {}) if "type" not in kwargs["data"]: @@ -1012,6 +1046,10 @@ def create_group(project, **kwargs): if "metadata" in kwargs: metadata = kwargs.pop("metadata") kwargs["data"].setdefault("metadata", {}).update(metadata) + if "status" not in kwargs: + kwargs["status"] = GroupStatus.UNRESOLVED + kwargs["substatus"] = GroupSubStatus.NEW + return Group.objects.create(project=project, **kwargs) @staticmethod @@ -1945,7 +1983,7 @@ def create_uptime_subscription( type: str, subscription_id: str | None, status: UptimeSubscription.Status, - url: str, + url: str | None, url_domain: str, url_domain_suffix: str, host_provider_id: str, @@ -1957,6 +1995,10 @@ def create_uptime_subscription( date_updated: datetime, trace_sampling: bool = False, ): + if url is None: + url = petname.generate().title() + url = f"http://{url}.com" + return UptimeSubscription.objects.create( type=type, subscription_id=subscription_id, @@ -1980,10 +2022,12 @@ def create_project_uptime_subscription( env: Environment | None, uptime_subscription: UptimeSubscription, mode: ProjectUptimeSubscriptionMode, - name: str, + name: str | None, owner: Actor | None, uptime_status: UptimeStatus, ): + if name is None: + name = petname.generate().title() owner_team_id = None owner_user_id = None if owner: @@ -2003,6 +2047,14 @@ def create_project_uptime_subscription( uptime_status=uptime_status, ) + @staticmethod + def create_uptime_subscription_region( + subscription: UptimeSubscription, region_slug: str + ) -> UptimeSubscriptionRegion: + return UptimeSubscriptionRegion.objects.create( + uptime_subscription=subscription, region_slug=region_slug + ) + @staticmethod @assume_test_silo_mode(SiloMode.REGION) def create_dashboard( @@ -2061,13 +2113,18 @@ def create_dashboard_widget_query( def create_workflow( name: str | None = None, organization: Organization | None = None, + config: dict[str, Any] | None = None, **kwargs, ) -> Workflow: if organization is None: organization = Factories.create_organization() if name is None: name = petname.generate(2, " ", letters=10).title() - return Workflow.objects.create(organization=organization, name=name, **kwargs) + if config is None: + config = {} + return Workflow.objects.create( + organization=organization, name=name, config=config, **kwargs + ) @staticmethod @assume_test_silo_mode(SiloMode.REGION) diff --git a/src/sentry/testutils/fixtures.py b/src/sentry/testutils/fixtures.py index bff1a03aa1ec3b..6c6b2c535c750a 100644 --- a/src/sentry/testutils/fixtures.py +++ b/src/sentry/testutils/fixtures.py @@ -12,6 +12,7 @@ from sentry.incidents.models.alert_rule import AlertRule, AlertRuleMonitorTypeInt from sentry.integrations.models.integration import Integration from sentry.integrations.models.organization_integration import OrganizationIntegration +from sentry.issues.grouptype import ErrorGroupType from sentry.models.activity import Activity from sentry.models.environment import Environment from sentry.models.grouprelease import GroupRelease @@ -26,8 +27,9 @@ from sentry.organizations.services.organization import RpcOrganization from sentry.silo.base import SiloMode from sentry.snuba.models import QuerySubscription +from sentry.tempest.models import TempestCredentials from sentry.testutils.factories import Factories -from sentry.testutils.helpers.datetime import before_now, iso_format +from sentry.testutils.helpers.datetime import before_now from sentry.testutils.silo import assume_test_silo_mode # XXX(dcramer): this is a compatibility layer to transition to pytest-based fixtures @@ -45,6 +47,7 @@ from sentry.users.models.user import User from sentry.users.services.user import RpcUser from sentry.workflow_engine.models import DataSource, Detector, DetectorState, Workflow +from sentry.workflow_engine.models.data_condition import Condition from sentry.workflow_engine.types import DetectorPriorityLevel @@ -101,7 +104,7 @@ def event(self): data={ "event_id": "a" * 32, "message": "\u3053\u3093\u306b\u3061\u306f", - "timestamp": iso_format(before_now(seconds=1)), + "timestamp": before_now(seconds=1).isoformat(), }, project_id=self.project.id, ) @@ -126,7 +129,7 @@ def integration(self): external_id="github:1", metadata={ "access_token": "xxxxx-xxxxxxxxx-xxxxxxxxxx-xxxxxxxxxxxx", - "expires_at": iso_format(timezone.now() + timedelta(days=14)), + "expires_at": (timezone.now() + timedelta(days=14)).isoformat(), }, ) integration.add_organization(self.organization, self.user) @@ -279,6 +282,9 @@ def create_usersocialauth( def store_event(self, *args, **kwargs) -> Event: return Factories.store_event(*args, **kwargs) + def create_tempest_credentials(self, project: Project, *args, **kwargs) -> TempestCredentials: + return Factories.create_tempest_credentials(project, *args, **kwargs) + def create_group(self, project=None, *args, **kwargs): if project is None: project = self.project @@ -610,9 +616,8 @@ def create_data_source(self, *args, **kwargs) -> DataSource: def create_data_condition( self, - condition="eq", comparison="10", - type="", + type=Condition.EQUAL, condition_result=None, condition_group=None, **kwargs, @@ -623,7 +628,6 @@ def create_data_condition( condition_group = self.create_data_condition_group() return Factories.create_data_condition( - condition=condition, comparison=comparison, type=type, condition_result=condition_result, @@ -635,12 +639,13 @@ def create_detector( self, *args, project=None, + type=ErrorGroupType.slug, **kwargs, ) -> Detector: if project is None: project = self.create_project(organization=self.organization) - return Factories.create_detector(*args, project=project, **kwargs) + return Factories.create_detector(*args, project=project, type=type, **kwargs) def create_detector_state(self, *args, **kwargs) -> DetectorState: return Factories.create_detector_state(*args, **kwargs) @@ -672,7 +677,7 @@ def create_uptime_subscription( type: str = "test", subscription_id: str | None = None, status: UptimeSubscription.Status = UptimeSubscription.Status.ACTIVE, - url="http://sentry.io/", + url: str | None = None, host_provider_id="TEST", url_domain="sentry", url_domain_suffix="io", @@ -683,13 +688,16 @@ def create_uptime_subscription( body=None, date_updated: None | datetime = None, trace_sampling: bool = False, + region_slugs: list[str] | None = None, ) -> UptimeSubscription: if date_updated is None: date_updated = timezone.now() if headers is None: headers = [] + if region_slugs is None: + region_slugs = [] - return Factories.create_uptime_subscription( + subscription = Factories.create_uptime_subscription( type=type, subscription_id=subscription_id, status=status, @@ -705,6 +713,10 @@ def create_uptime_subscription( body=body, trace_sampling=trace_sampling, ) + for region_slug in region_slugs: + Factories.create_uptime_subscription_region(subscription, region_slug) + + return subscription def create_project_uptime_subscription( self, @@ -712,7 +724,7 @@ def create_project_uptime_subscription( env: Environment | None = None, uptime_subscription: UptimeSubscription | None = None, mode=ProjectUptimeSubscriptionMode.AUTO_DETECTED_ACTIVE, - name="Test Name", + name: str | None = None, owner: User | Team | None = None, uptime_status=UptimeStatus.OK, ) -> ProjectUptimeSubscription: diff --git a/src/sentry/testutils/helpers/__init__.py b/src/sentry/testutils/helpers/__init__.py index 215689798ba602..1e69c4a8f87230 100644 --- a/src/sentry/testutils/helpers/__init__.py +++ b/src/sentry/testutils/helpers/__init__.py @@ -3,7 +3,6 @@ from .features import * # NOQA from .link_header import * # NOQA from .options import * # NOQA -from .query import * # NOQA from .slack import * # NOQA from .socket import * # NOQA from .task_runner import * # NOQA diff --git a/src/sentry/testutils/helpers/backups.py b/src/sentry/testutils/helpers/backups.py index 76c8c2f48a1585..504ea3bf0a1634 100644 --- a/src/sentry/testutils/helpers/backups.py +++ b/src/sentry/testutils/helpers/backups.py @@ -101,6 +101,7 @@ from sentry.sentry_apps.models.sentry_app import SentryApp from sentry.silo.base import SiloMode from sentry.silo.safety import unguarded_write +from sentry.tempest.models import TempestCredentials from sentry.testutils.cases import TestCase, TransactionTestCase from sentry.testutils.factories import get_fixture_path from sentry.testutils.fixtures import Fixtures @@ -667,18 +668,15 @@ def create_exhaustive_organization( organization=org, ) - send_notification_action = self.create_action(type=Action.Type.NOTIFICATION, data="") + send_notification_action = self.create_action(type=Action.Type.SLACK, data="") self.create_data_condition_group_action( action=send_notification_action, condition_group=notification_condition_group, ) - # TODO @saponifi3d: Update comparison to be DetectorState.Critical data_condition = self.create_data_condition( - condition="eq", - comparison="critical", - type="WorkflowCondition", - condition_result="True", + comparison=75, + condition_result=True, condition_group=notification_condition_group, ) @@ -694,16 +692,13 @@ def create_exhaustive_organization( organization=org, ) - # TODO @saponifi3d: Create or define trigger workflow action type trigger_workflows_action = self.create_action(type=Action.Type.WEBHOOK, data="") self.create_data_condition_group_action( action=trigger_workflows_action, condition_group=detector_conditions ) self.create_data_condition( - condition="eq", - comparison="critical", - type="DetectorCondition", - condition_result="True", + comparison=75, + condition_result=True, condition_group=detector_conditions, ) detector.workflow_condition_group = detector_conditions @@ -714,6 +709,15 @@ def create_exhaustive_organization( alert_rule_trigger=trigger, data_condition=data_condition ) + TempestCredentials.objects.create( + project=project, + created_by_id=owner_id, + client_id="test_client_id", + client_secret="test_client_secret", + message="test_message", + latest_fetched_item_id="test_latest_fetched_item_id", + ) + return org @assume_test_silo_mode(SiloMode.CONTROL) diff --git a/src/sentry/testutils/helpers/datetime.py b/src/sentry/testutils/helpers/datetime.py index da1af95d8c5c51..206634b893e024 100644 --- a/src/sentry/testutils/helpers/datetime.py +++ b/src/sentry/testutils/helpers/datetime.py @@ -1,15 +1,10 @@ from __future__ import annotations -import time from datetime import UTC, datetime, timedelta import time_machine -__all__ = ["iso_format", "before_now", "timestamp_format"] - - -def iso_format(date: datetime) -> str: - return date.isoformat()[:19] +__all__ = ["before_now"] def before_now(**kwargs: float) -> datetime: @@ -17,10 +12,6 @@ def before_now(**kwargs: float) -> datetime: return date - timedelta(microseconds=date.microsecond % 1000) -def timestamp_format(datetime): - return time.mktime(datetime.utctimetuple()) + datetime.microsecond / 1e6 - - class MockClock: """Returns a distinct, increasing timestamp each time it is called.""" diff --git a/src/sentry/testutils/helpers/query.py b/src/sentry/testutils/helpers/query.py deleted file mode 100644 index d9e8e9759e3ebb..00000000000000 --- a/src/sentry/testutils/helpers/query.py +++ /dev/null @@ -1,30 +0,0 @@ -from __future__ import annotations - -from typing import Any - -import sqlparse -from sqlparse.tokens import DML - -__all__ = ("parse_queries",) - - -def parse_queries(captured_queries: list[dict[str, Any]]) -> dict[str, int]: - write_ops = ["INSERT", "UPDATE", "DELETE"] - - real_queries: dict[str, int] = {} - - for query in captured_queries: - raw_sql = query["sql"] - parsed = sqlparse.parse(raw_sql) - for token_index, token in enumerate(parsed[0].tokens): - if token.ttype is DML: - if token.value.upper() in write_ops: - for t in parsed[0].tokens[token_index + 1 :]: - if isinstance(t, sqlparse.sql.Identifier): - table_name = t.get_real_name() - if real_queries.get(table_name) is None: - real_queries[table_name] = 0 - real_queries[table_name] += 1 - break - - return real_queries diff --git a/src/sentry/testutils/helpers/redis.py b/src/sentry/testutils/helpers/redis.py index dd41b06dfd4e67..9e4bdc0303c5be 100644 --- a/src/sentry/testutils/helpers/redis.py +++ b/src/sentry/testutils/helpers/redis.py @@ -9,11 +9,8 @@ from sentry.testutils.helpers import override_options -@contextmanager def mock_redis_buffer(): - buffer = RedisBuffer() - with patch("sentry.buffer.backend", new=buffer): - yield buffer + return patch("sentry.buffer.backend", new=RedisBuffer()) @contextmanager diff --git a/src/sentry/testutils/helpers/slack.py b/src/sentry/testutils/helpers/slack.py index 000a0155d50827..bb5d3458c242c0 100644 --- a/src/sentry/testutils/helpers/slack.py +++ b/src/sentry/testutils/helpers/slack.py @@ -57,17 +57,6 @@ def add_identity( return idp -def find_identity(idp: IdentityProvider, user: User) -> Identity | None: - identities = Identity.objects.filter( - idp=idp, - user=user, - status=IdentityStatus.VALID, - ) - if not identities: - return None - return identities[0] - - @assume_test_silo_mode(SiloMode.CONTROL) def link_user(user: User, idp: IdentityProvider, slack_id: str) -> None: Identity.objects.create( diff --git a/src/sentry/testutils/pytest/relay.py b/src/sentry/testutils/pytest/relay.py index 76dff65c2da813..5bff85dcf2cd53 100644 --- a/src/sentry/testutils/pytest/relay.py +++ b/src/sentry/testutils/pytest/relay.py @@ -69,6 +69,7 @@ def relay_server_setup(live_server, tmpdir_factory): relay_port = 33331 redis_db = TEST_REDIS_DB + use_new_dev_services = environ.get("USE_NEW_DEVSERVICES", "0") == "1" from sentry.relay import projectconfig_cache from sentry.relay.projectconfig_cache.redis import RedisProjectConfigCache @@ -80,8 +81,8 @@ def relay_server_setup(live_server, tmpdir_factory): template_vars = { "SENTRY_HOST": f"http://host.docker.internal:{port}/", "RELAY_PORT": relay_port, - "KAFKA_HOST": "sentry_kafka", - "REDIS_HOST": "sentry_redis", + "KAFKA_HOST": "kafka-kafka-1" if use_new_dev_services else "sentry_kafka", + "REDIS_HOST": "redis-redis-1" if use_new_dev_services else "sentry_redis", "REDIS_DB": redis_db, } @@ -106,7 +107,7 @@ def relay_server_setup(live_server, tmpdir_factory): options = { "image": RELAY_TEST_IMAGE, "ports": {"%s/tcp" % relay_port: relay_port}, - "network": "sentry", + "network": "devservices" if use_new_dev_services else "sentry", "detach": True, "name": container_name, "volumes": {config_path: {"bind": "/etc/relay"}}, diff --git a/src/sentry/testutils/relay.py b/src/sentry/testutils/relay.py index 793e69a9207d80..25232a63af448e 100644 --- a/src/sentry/testutils/relay.py +++ b/src/sentry/testutils/relay.py @@ -35,9 +35,6 @@ class RelayStoreHelper(RequiredBaseclass): get_relay_minidump_url: Any get_relay_unreal_url: Any - def use_relay(self): - return True - def post_and_retrieve_event(self, data): url = self.get_relay_store_url(self.project.id) responses.add_passthru(url) diff --git a/src/sentry/testutils/requests.py b/src/sentry/testutils/requests.py index a7adf1f9ce4b0e..4ce41b0355d860 100644 --- a/src/sentry/testutils/requests.py +++ b/src/sentry/testutils/requests.py @@ -7,6 +7,8 @@ from django.contrib.auth.models import AnonymousUser from django.core.cache import cache from django.http import HttpRequest +from rest_framework.request import Request +from rest_framework.views import APIView from sentry.app import env from sentry.middleware.auth import AuthenticationMiddleware @@ -67,3 +69,13 @@ def make_user_request_from_org(org=None): request, user = make_user_request(org) request.session["activeorg"] = org.slug return request, user + + +def drf_request_from_request(request: HttpRequest) -> Request: + ret = APIView().initialize_request(request) + # reattach these if missing + # XXX: technically `HttpRequest` shouldn't have auth but our tests do!) + for attr in ("auth", "user"): + if hasattr(request, attr): + setattr(ret, attr, getattr(request, attr)) + return ret diff --git a/src/sentry/testutils/skips.py b/src/sentry/testutils/skips.py index 6937880a079417..d77514e19231e9 100644 --- a/src/sentry/testutils/skips.py +++ b/src/sentry/testutils/skips.py @@ -1,50 +1,11 @@ from __future__ import annotations -import os import socket -from collections.abc import Callable -from typing import Any, TypeVar from urllib.parse import urlparse import pytest from django.conf import settings -T = TypeVar("T", bound=Callable[..., Any]) - - -def is_arm64() -> bool: - return os.uname().machine == "arm64" - - -requires_not_arm64 = pytest.mark.skipif( - is_arm64(), reason="this test fails in our arm64 testing env" -) - - -def xfail_if_not_postgres(reason: str) -> Callable[[T], T]: - def decorator(function: T) -> T: - return pytest.mark.xfail(os.environ.get("TEST_SUITE") != "postgres", reason=reason)( - function - ) - - return decorator - - -def skip_for_relay_store(reason: str) -> Callable[[T], T]: - """ - Decorator factory will skip marked tests if Relay is enabled. - A test decorated with @skip_for_relay_store("this test has been moved in relay") - Will not be executed when the settings SENTRY_USE_RELAY = True - :param reason: the reason the test should be skipped - - Note: Eventually, when Relay becomes compulsory, tests marked with this decorator will be deleted. - """ - - def decorator(function: T) -> T: - return pytest.mark.skipif(settings.SENTRY_USE_RELAY, reason=reason)(function) - - return decorator - def _service_available(host: str, port: int) -> bool: try: diff --git a/src/sentry/toolbar/views/iframe_view.py b/src/sentry/toolbar/views/iframe_view.py index 58abcb5439cff1..a73af6dfb9a90d 100644 --- a/src/sentry/toolbar/views/iframe_view.py +++ b/src/sentry/toolbar/views/iframe_view.py @@ -3,8 +3,10 @@ from django.http import HttpRequest, HttpResponse from django.http.response import HttpResponseBase +from sentry.api.utils import generate_region_url from sentry.models.organization import Organization from sentry.models.project import Project +from sentry.organizations.absolute_url import generate_organization_url from sentry.toolbar.utils.url import is_origin_allowed from sentry.web.frontend.base import ProjectView, region_silo_view @@ -61,6 +63,8 @@ def _respond_with_state(self, state: str): "logging": self.request.GET.get("logging", ""), "organization_slug": self.organization_slug, "project_id_or_slug": self.project_id_or_slug, + "organization_url": generate_organization_url(self.organization_slug), + "region_url": generate_region_url(), }, ) diff --git a/src/sentry/types/activity.py b/src/sentry/types/activity.py index e850e61e854b47..1dd4a3c2373ad7 100644 --- a/src/sentry/types/activity.py +++ b/src/sentry/types/activity.py @@ -67,3 +67,16 @@ class ActivityType(Enum): ActivityType.DELETED_ATTACHMENT, # 27 ] ) + + +STATUS_CHANGE_ACTIVITY_TYPES = ( + ActivityType.SET_RESOLVED, + ActivityType.SET_UNRESOLVED, + ActivityType.SET_IGNORED, + ActivityType.SET_REGRESSION, + ActivityType.SET_RESOLVED_IN_RELEASE, + ActivityType.SET_RESOLVED_BY_AGE, + ActivityType.SET_RESOLVED_IN_COMMIT, + ActivityType.SET_RESOLVED_IN_PULL_REQUEST, + ActivityType.SET_ESCALATING, +) diff --git a/src/sentry/uptime/config_producer.py b/src/sentry/uptime/config_producer.py index 90ea85cc65e486..dbf486fa188cf1 100644 --- a/src/sentry/uptime/config_producer.py +++ b/src/sentry/uptime/config_producer.py @@ -1,5 +1,6 @@ from __future__ import annotations +import logging from uuid import UUID from arroyo import Topic as ArroyoTopic @@ -8,9 +9,12 @@ from sentry_kafka_schemas.schema_types.uptime_configs_v1 import CheckConfig from sentry.conf.types.kafka_definition import Topic, get_topic_codec +from sentry.uptime.subscriptions.regions import get_region_config from sentry.utils.arroyo_producer import SingletonProducer from sentry.utils.kafka_config import get_kafka_producer_cluster_options, get_topic_definition +logger = logging.getLogger(__name__) + UPTIME_CONFIGS_CODEC: Codec[CheckConfig] = get_topic_codec(Topic.UPTIME_CONFIGS) @@ -25,16 +29,30 @@ def _get_producer() -> KafkaProducer: _configs_producer = SingletonProducer(_get_producer) -def produce_config(config: CheckConfig): - _produce_to_kafka(UUID(config["subscription_id"]), UPTIME_CONFIGS_CODEC.encode(config)) +def produce_config(destination_region_slug: str, config: CheckConfig): + _produce_to_kafka( + destination_region_slug, + UUID(config["subscription_id"]), + UPTIME_CONFIGS_CODEC.encode(config), + ) + +def produce_config_removal(destination_region_slug: str, subscription_id: str): + _produce_to_kafka(destination_region_slug, UUID(subscription_id), None) -def produce_config_removal(subscription_id: str): - _produce_to_kafka(UUID(subscription_id), None) +def _produce_to_kafka( + destination_region_slug: str, subscription_id: UUID, value: bytes | None +) -> None: + region_config = get_region_config(destination_region_slug) + if region_config is None: + logger.error( + "Attempted to create uptime subscription with invalid region slug", + extra={"region_slug": destination_region_slug, "subscription_id": subscription_id}, + ) + return -def _produce_to_kafka(subscription_id: UUID, value: bytes | None) -> None: - topic = get_topic_definition(Topic.UPTIME_CONFIGS)["real_topic_name"] + topic = get_topic_definition(region_config.config_topic)["real_topic_name"] payload = KafkaPayload( subscription_id.bytes, # Typically None is not allowed for the arroyo payload, but in this diff --git a/src/sentry/uptime/consumers/results_consumer.py b/src/sentry/uptime/consumers/results_consumer.py index 7af6259ddacd85..87be1361ae67cb 100644 --- a/src/sentry/uptime/consumers/results_consumer.py +++ b/src/sentry/uptime/consumers/results_consumer.py @@ -1,6 +1,7 @@ from __future__ import annotations import logging +import random from datetime import datetime, timedelta, timezone from sentry_kafka_schemas.schema_types.uptime_results_v1 import ( @@ -24,13 +25,18 @@ ProjectUptimeSubscriptionMode, UptimeStatus, UptimeSubscription, + UptimeSubscriptionRegion, ) +from sentry.uptime.subscriptions.regions import get_active_region_configs from sentry.uptime.subscriptions.subscriptions import ( delete_uptime_subscriptions_for_project, get_or_create_uptime_subscription, remove_uptime_subscription_if_unused, ) -from sentry.uptime.subscriptions.tasks import send_uptime_config_deletion +from sentry.uptime.subscriptions.tasks import ( + send_uptime_config_deletion, + update_remote_uptime_subscription, +) from sentry.utils import metrics logger = logging.getLogger(__name__) @@ -73,16 +79,64 @@ class UptimeResultProcessor(ResultProcessor[CheckResult, UptimeSubscription]): def get_subscription_id(self, result: CheckResult) -> str: return result["subscription_id"] + def check_and_update_regions(self, subscription: UptimeSubscription): + """ + This method will check if regions have been added or removed from our region configuration, + and updates regions associated with this uptime monitor to reflect the new state. This is + done probabilistically, so that the check is performed roughly once an hour for each uptime + monitor. + """ + # Run region checks and updates roughly once an hour + chance_to_run = subscription.interval_seconds / timedelta(hours=1).total_seconds() + if random.random() >= chance_to_run: + return + + subscription_region_slugs = {r.region_slug for r in subscription.regions.all()} + active_region_slugs = {c.slug for c in get_active_region_configs()} + if subscription_region_slugs == active_region_slugs: + # Regions haven't changed, exit early. + return + + new_region_slugs = active_region_slugs - subscription_region_slugs + removed_region_slugs = subscription_region_slugs - active_region_slugs + if new_region_slugs: + new_regions = [ + UptimeSubscriptionRegion(uptime_subscription=subscription, region_slug=slug) + for slug in new_region_slugs + ] + UptimeSubscriptionRegion.objects.bulk_create(new_regions, ignore_conflicts=True) + + if removed_region_slugs: + for deleted_region in UptimeSubscriptionRegion.objects.filter( + uptime_subscription=subscription, region_slug__in=removed_region_slugs + ): + if subscription.subscription_id: + # We need to explicitly send deletes here before we remove the region + send_uptime_config_deletion( + deleted_region.region_slug, subscription.subscription_id + ) + deleted_region.delete() + + # Regardless of whether we added or removed regions, we need to send an updated config to all active + # regions for this subscription so that they all get an update set of currently active regions. + subscription.update(status=UptimeSubscription.Status.UPDATING.value) + update_remote_uptime_subscription.delay(subscription.id) + def handle_result(self, subscription: UptimeSubscription | None, result: CheckResult): logger.info("process_result", extra=result) if subscription is None: # If no subscription in the Postgres, this subscription has been orphaned. Remove # from the checker - send_uptime_config_deletion(result["subscription_id"]) + # TODO: Send to region specifically from this check result once we update the schema + send_uptime_config_deletion( + get_active_region_configs()[0].slug, result["subscription_id"] + ) metrics.incr("uptime.result_processor.subscription_not_found", sample_rate=1.0) return + self.check_and_update_regions(subscription) + project_subscriptions = list(subscription.projectuptimesubscription_set.all()) cluster = _get_cluster() @@ -333,3 +387,7 @@ def has_reached_status_threshold( class UptimeResultsStrategyFactory(ResultsStrategyFactory[CheckResult, UptimeSubscription]): result_processor_cls = UptimeResultProcessor topic_for_codec = Topic.UPTIME_RESULTS + identifier = "uptime" + + def build_payload_grouping_key(self, result: CheckResult) -> str: + return self.result_processor.get_subscription_id(result) diff --git a/src/sentry/uptime/detectors/tasks.py b/src/sentry/uptime/detectors/tasks.py index 1e37a61d457997..836cae9b82e8a4 100644 --- a/src/sentry/uptime/detectors/tasks.py +++ b/src/sentry/uptime/detectors/tasks.py @@ -6,7 +6,7 @@ from django.utils import timezone -from sentry import features +from sentry import audit_log, features from sentry.locks import locks from sentry.models.organization import Organization from sentry.models.project import Project @@ -22,7 +22,7 @@ should_detect_for_organization, should_detect_for_project, ) -from sentry.uptime.models import ProjectUptimeSubscriptionMode +from sentry.uptime.models import ProjectUptimeSubscription, ProjectUptimeSubscriptionMode from sentry.uptime.subscriptions.subscriptions import ( delete_uptime_subscriptions_for_project, get_auto_monitored_subscriptions_for_project, @@ -30,6 +30,7 @@ is_url_auto_monitored_for_project, ) from sentry.utils import metrics +from sentry.utils.audit import create_system_audit_entry from sentry.utils.hashlib import md5_text from sentry.utils.locking import UnableToAcquireLock @@ -221,16 +222,22 @@ def process_candidate_url( ) if features.has("organizations:uptime-automatic-subscription-creation", project.organization): # If we hit this point, then the url looks worth monitoring. Create an uptime subscription in monitor mode. - monitor_url_for_project(project, url) + uptime_monitor = monitor_url_for_project(project, url) # Disable auto-detection on this project and organization now that we've successfully found a hostname project.update_option("sentry:uptime_autodetection", False) project.organization.update_option("sentry:uptime_autodetection", False) + create_system_audit_entry( + organization=project.organization, + target_object=uptime_monitor.id, + event=audit_log.get_event_id("UPTIME_MONITOR_ADD"), + data=uptime_monitor.get_audit_log_data(), + ) metrics.incr("uptime.detectors.candidate_url.succeeded", sample_rate=1.0) return True -def monitor_url_for_project(project: Project, url: str): +def monitor_url_for_project(project: Project, url: str) -> ProjectUptimeSubscription: """ Start monitoring a url for a project. Creates a subscription using our onboarding interval and links the project to it. Also deletes any other auto-detected monitors since this one should replace them. @@ -244,7 +251,8 @@ def monitor_url_for_project(project: Project, url: str): ProjectUptimeSubscriptionMode.AUTO_DETECTED_ACTIVE, ], ) - get_or_create_project_uptime_subscription( + metrics.incr("uptime.detectors.candidate_url.monitor_created", sample_rate=1.0) + return get_or_create_project_uptime_subscription( project, # TODO(epurkhiser): This is where we would put the environment object # from autodetection if we decide to do that. @@ -253,8 +261,7 @@ def monitor_url_for_project(project: Project, url: str): interval_seconds=ONBOARDING_SUBSCRIPTION_INTERVAL_SECONDS, timeout_ms=ONBOARDING_SUBSCRIPTION_TIMEOUT_MS, mode=ProjectUptimeSubscriptionMode.AUTO_DETECTED_ONBOARDING, - ) - metrics.incr("uptime.detectors.candidate_url.monitor_created", sample_rate=1.0) + )[0] def is_failed_url(url: str) -> bool: diff --git a/src/sentry/uptime/endpoints/organiation_uptime_alert_index.py b/src/sentry/uptime/endpoints/organiation_uptime_alert_index.py new file mode 100644 index 00000000000000..3961bb0f93656a --- /dev/null +++ b/src/sentry/uptime/endpoints/organiation_uptime_alert_index.py @@ -0,0 +1,120 @@ +from django.db.models import Q +from drf_spectacular.utils import extend_schema +from rest_framework.request import Request +from rest_framework.response import Response + +from sentry.api.api_owners import ApiOwner +from sentry.api.api_publish_status import ApiPublishStatus +from sentry.api.base import region_silo_endpoint +from sentry.api.bases import NoProjects +from sentry.api.bases.organization import OrganizationEndpoint, OrganizationPermission +from sentry.api.helpers.teams import get_teams +from sentry.api.paginator import OffsetPaginator +from sentry.api.serializers import serialize +from sentry.apidocs.constants import RESPONSE_FORBIDDEN, RESPONSE_NOT_FOUND, RESPONSE_UNAUTHORIZED +from sentry.apidocs.parameters import GlobalParams, OrganizationParams, UptimeParams +from sentry.apidocs.utils import inline_sentry_response_serializer +from sentry.db.models.query import in_iexact +from sentry.models.organization import Organization +from sentry.search.utils import tokenize_query +from sentry.types.actor import Actor +from sentry.uptime.endpoints.serializers import ( + ProjectUptimeSubscriptionSerializer, + ProjectUptimeSubscriptionSerializerResponse, +) +from sentry.uptime.models import ProjectUptimeSubscription + + +@region_silo_endpoint +@extend_schema(tags=["Crons"]) +class OrganizationUptimeAlertIndexEndpoint(OrganizationEndpoint): + publish_status = { + "GET": ApiPublishStatus.EXPERIMENTAL, + } + owner = ApiOwner.CRONS + permission_classes = (OrganizationPermission,) + + @extend_schema( + operation_id="Retrieve Uptime Alets for an Organization", + parameters=[ + GlobalParams.ORG_ID_OR_SLUG, + OrganizationParams.PROJECT, + GlobalParams.ENVIRONMENT, + UptimeParams.OWNER, + ], + responses={ + 200: inline_sentry_response_serializer( + "UptimeAlertList", list[ProjectUptimeSubscriptionSerializerResponse] + ), + 401: RESPONSE_UNAUTHORIZED, + 403: RESPONSE_FORBIDDEN, + 404: RESPONSE_NOT_FOUND, + }, + ) + def get(self, request: Request, organization: Organization) -> Response: + """ + Lists uptime alerts. May be filtered to a project or environment. + """ + try: + filter_params = self.get_filter_params(request, organization, date_filter_optional=True) + except NoProjects: + return self.respond([]) + + queryset = ProjectUptimeSubscription.objects.filter( + project__organization_id=organization.id, project_id__in=filter_params["project_id"] + ) + query = request.GET.get("query") + owners = request.GET.getlist("owner") + + if "environment" in filter_params: + queryset = queryset.filter(environment__in=filter_params["environment_objects"]) + + if owners: + owners_set = set(owners) + + # Remove special values from owners, this can't be parsed as an Actor + include_myteams = "myteams" in owners_set + owners_set.discard("myteams") + include_unassigned = "unassigned" in owners_set + owners_set.discard("unassigned") + + actors = [Actor.from_identifier(identifier) for identifier in owners_set] + + user_ids = [actor.id for actor in actors if actor.is_user] + team_ids = [actor.id for actor in actors if actor.is_team] + + teams = get_teams( + request, + organization, + teams=[*team_ids, *(["myteams"] if include_myteams else [])], + ) + team_ids = [team.id for team in teams] + + owner_filter = Q(owner_user_id__in=user_ids) | Q(owner_team_id__in=team_ids) + + if include_unassigned: + unassigned_filter = Q(owner_user_id=None) & Q(owner_team_id=None) + queryset = queryset.filter(unassigned_filter | owner_filter) + else: + queryset = queryset.filter(owner_filter) + + if query: + tokens = tokenize_query(query) + for key, value in tokens.items(): + if key == "query": + query_value = " ".join(value) + queryset = queryset.filter( + Q(name__icontains=query_value) + | Q(uptime_subscription__url__icontains=query_value) + ) + elif key == "name": + queryset = queryset.filter(in_iexact("name", value)) + else: + queryset = queryset.none() + + return self.paginate( + request=request, + queryset=queryset, + on_results=lambda x: serialize(x, request.user, ProjectUptimeSubscriptionSerializer()), + paginator_cls=OffsetPaginator, + ) diff --git a/src/sentry/uptime/issue_platform.py b/src/sentry/uptime/issue_platform.py index 9b72df0aa80dab..ef812b8330741e 100644 --- a/src/sentry/uptime/issue_platform.py +++ b/src/sentry/uptime/issue_platform.py @@ -45,7 +45,7 @@ def build_occurrence_from_result( ), IssueEvidence( name="Duration", - value=str(result["duration_ms"]), + value=f"{result["duration_ms"]}ms", important=False, ), ] diff --git a/src/sentry/uptime/migrations/0019_uptime_region.py b/src/sentry/uptime/migrations/0019_uptime_region.py new file mode 100644 index 00000000000000..5d0e9b64971321 --- /dev/null +++ b/src/sentry/uptime/migrations/0019_uptime_region.py @@ -0,0 +1,92 @@ +# Generated by Django 5.1.4 on 2024-12-17 23:47 + +import django.db.models.deletion +from django.db import migrations, models + +import sentry.db.models.fields.bounded +import sentry.db.models.fields.foreignkey +from sentry.new_migrations.migrations import CheckedMigration + + +class Migration(CheckedMigration): + # This flag is used to mark that a migration shouldn't be automatically run in production. + # This should only be used for operations where it's safe to run the migration after your + # code has deployed. So this should not be used for most operations that alter the schema + # of a table. + # Here are some things that make sense to mark as post deployment: + # - Large data migrations. Typically we want these to be run manually so that they can be + # monitored and not block the deploy for a long period of time while they run. + # - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to + # run this outside deployments so that we don't block them. Note that while adding an index + # is a schema change, it's completely safe to run the operation after the code has deployed. + # Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment + + is_post_deployment = False + + dependencies = [ + ("uptime", "0018_add_trace_sampling_field_to_uptime"), + ] + + operations = [ + migrations.CreateModel( + name="Region", + fields=[ + ( + "id", + sentry.db.models.fields.bounded.BoundedBigAutoField( + primary_key=True, serialize=False + ), + ), + ("date_updated", models.DateTimeField(auto_now=True)), + ("date_added", models.DateTimeField(auto_now_add=True)), + ("slug", models.CharField(max_length=255, unique=True)), + ("name", models.CharField(max_length=255)), + ], + options={ + "db_table": "uptime_region", + }, + ), + migrations.CreateModel( + name="UptimeSubscriptionRegion", + fields=[ + ( + "id", + sentry.db.models.fields.bounded.BoundedBigAutoField( + primary_key=True, serialize=False + ), + ), + ("date_updated", models.DateTimeField(auto_now=True)), + ("date_added", models.DateTimeField(auto_now_add=True)), + ( + "region", + sentry.db.models.fields.foreignkey.FlexibleForeignKey( + on_delete=django.db.models.deletion.CASCADE, to="uptime.region" + ), + ), + ( + "uptime_subscription", + sentry.db.models.fields.foreignkey.FlexibleForeignKey( + on_delete=django.db.models.deletion.CASCADE, to="uptime.uptimesubscription" + ), + ), + ], + options={ + "db_table": "uptime_uptimesubscriptionregion", + }, + ), + migrations.AddField( + model_name="uptimesubscription", + name="regions", + field=models.ManyToManyField( + through="uptime.UptimeSubscriptionRegion", to="uptime.region" + ), + ), + migrations.AddConstraint( + model_name="uptimesubscriptionregion", + constraint=models.UniqueConstraint( + models.F("uptime_subscription"), + models.F("region"), + name="uptime_uptimesubscription_region_unique", + ), + ), + ] diff --git a/src/sentry/uptime/migrations/0020_drop_region.py b/src/sentry/uptime/migrations/0020_drop_region.py new file mode 100644 index 00000000000000..ead4c8e9c26479 --- /dev/null +++ b/src/sentry/uptime/migrations/0020_drop_region.py @@ -0,0 +1,84 @@ +# Generated by Django 5.1.4 on 2024-12-18 22:26 + +import django.db.models.deletion +from django.db import migrations, models + +import sentry.db.models.fields.foreignkey +from sentry.new_migrations.migrations import CheckedMigration +from sentry.new_migrations.monkey.fields import SafeRemoveField +from sentry.new_migrations.monkey.models import SafeDeleteModel +from sentry.new_migrations.monkey.state import DeletionAction + + +class Migration(CheckedMigration): + # This flag is used to mark that a migration shouldn't be automatically run in production. + # This should only be used for operations where it's safe to run the migration after your + # code has deployed. So this should not be used for most operations that alter the schema + # of a table. + # Here are some things that make sense to mark as post deployment: + # - Large data migrations. Typically we want these to be run manually so that they can be + # monitored and not block the deploy for a long period of time while they run. + # - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to + # run this outside deployments so that we don't block them. Note that while adding an index + # is a schema change, it's completely safe to run the operation after the code has deployed. + # Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment + + is_post_deployment = False + + dependencies = [ + ("uptime", "0019_uptime_region"), + ] + + operations = [ + migrations.SeparateDatabaseAndState( + state_operations=[ + migrations.RemoveField( + model_name="uptimesubscription", + name="regions", + ), + ] + ), + migrations.AlterField( + model_name="uptimesubscriptionregion", + name="uptime_subscription", + field=sentry.db.models.fields.foreignkey.FlexibleForeignKey( + on_delete=django.db.models.deletion.CASCADE, + related_name="regions", + to="uptime.uptimesubscription", + ), + ), + migrations.RemoveConstraint( + model_name="uptimesubscriptionregion", + name="uptime_uptimesubscription_region_unique", + ), + migrations.AddField( + model_name="uptimesubscriptionregion", + name="region_slug", + field=models.CharField(db_index=True, default="", db_default="", max_length=255), + preserve_default=False, + ), + migrations.AlterField( + model_name="uptimesubscriptionregion", + name="region", + field=sentry.db.models.fields.foreignkey.FlexibleForeignKey( + null=True, + db_constraint=False, + on_delete=django.db.models.deletion.CASCADE, + to="uptime.region", + ), + ), + migrations.AddConstraint( + model_name="uptimesubscriptionregion", + constraint=models.UniqueConstraint( + models.F("uptime_subscription"), + models.F("region_slug"), + name="uptime_uptimesubscription_region_slug_unique", + ), + ), + SafeRemoveField( + model_name="uptimesubscriptionregion", + name="region", + deletion_action=DeletionAction.MOVE_TO_PENDING, + ), + SafeDeleteModel(name="Region", deletion_action=DeletionAction.MOVE_TO_PENDING), + ] diff --git a/src/sentry/uptime/migrations/0021_drop_region_table_col.py b/src/sentry/uptime/migrations/0021_drop_region_table_col.py new file mode 100644 index 00000000000000..b12fd5f85aa529 --- /dev/null +++ b/src/sentry/uptime/migrations/0021_drop_region_table_col.py @@ -0,0 +1,35 @@ +# Generated by Django 5.1.4 on 2024-12-18 23:29 + +from sentry.new_migrations.migrations import CheckedMigration +from sentry.new_migrations.monkey.fields import SafeRemoveField +from sentry.new_migrations.monkey.models import SafeDeleteModel +from sentry.new_migrations.monkey.state import DeletionAction + + +class Migration(CheckedMigration): + # This flag is used to mark that a migration shouldn't be automatically run in production. + # This should only be used for operations where it's safe to run the migration after your + # code has deployed. So this should not be used for most operations that alter the schema + # of a table. + # Here are some things that make sense to mark as post deployment: + # - Large data migrations. Typically we want these to be run manually so that they can be + # monitored and not block the deploy for a long period of time while they run. + # - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to + # run this outside deployments so that we don't block them. Note that while adding an index + # is a schema change, it's completely safe to run the operation after the code has deployed. + # Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment + + is_post_deployment = False + + dependencies = [ + ("uptime", "0020_drop_region"), + ] + + operations = [ + SafeRemoveField( + model_name="uptimesubscriptionregion", + name="region", + deletion_action=DeletionAction.DELETE, + ), + SafeDeleteModel(name="Region", deletion_action=DeletionAction.DELETE), + ] diff --git a/src/sentry/uptime/models.py b/src/sentry/uptime/models.py index 9f53ee68766cd1..77e4ca0cb745e8 100644 --- a/src/sentry/uptime/models.py +++ b/src/sentry/uptime/models.py @@ -7,9 +7,11 @@ from django.db.models import Q from django.db.models.expressions import Value from django.db.models.functions import MD5, Coalesce +from sentry_kafka_schemas.schema_types.uptime_configs_v1 import REGIONSCHEDULEMODE_ROUND_ROBIN from sentry.backup.scopes import RelocationScope from sentry.db.models import ( + DefaultFieldsModel, DefaultFieldsModelExisting, FlexibleForeignKey, JSONField, @@ -107,6 +109,26 @@ class Meta: ] +@region_silo_model +class UptimeSubscriptionRegion(DefaultFieldsModel): + __relocation_scope__ = RelocationScope.Excluded + + uptime_subscription = FlexibleForeignKey("uptime.UptimeSubscription", related_name="regions") + region_slug = models.CharField(max_length=255, db_index=True, db_default="") + + class Meta: + app_label = "uptime" + db_table = "uptime_uptimesubscriptionregion" + + constraints = [ + models.UniqueConstraint( + "uptime_subscription", + "region_slug", + name="uptime_uptimesubscription_region_slug_unique", + ), + ] + + class ProjectUptimeSubscriptionMode(enum.IntEnum): # Manually created by a user MANUAL = 1 @@ -205,3 +227,7 @@ def get_active_auto_monitor_count_for_org(organization: Organization) -> int: ProjectUptimeSubscriptionMode.AUTO_DETECTED_ACTIVE, ], ).count() + + +class UptimeRegionScheduleMode(enum.StrEnum): + ROUND_ROBIN = REGIONSCHEDULEMODE_ROUND_ROBIN diff --git a/src/sentry/uptime/subscriptions/regions.py b/src/sentry/uptime/subscriptions/regions.py new file mode 100644 index 00000000000000..be954095a48d26 --- /dev/null +++ b/src/sentry/uptime/subscriptions/regions.py @@ -0,0 +1,15 @@ +from django.conf import settings + +from sentry.conf.types.uptime import UptimeRegionConfig + + +def get_active_region_configs() -> list[UptimeRegionConfig]: + return [v for v in settings.UPTIME_REGIONS if v.enabled] + + +def get_region_config(region_slug: str) -> UptimeRegionConfig | None: + region = next((r for r in settings.UPTIME_REGIONS if r.slug == region_slug), None) + if region is None: + # XXX: Temporary hack to guarantee we get a config + region = get_active_region_configs()[0] + return region diff --git a/src/sentry/uptime/subscriptions/subscriptions.py b/src/sentry/uptime/subscriptions/subscriptions.py index 2a0e78b7794819..0c3b58ee869648 100644 --- a/src/sentry/uptime/subscriptions/subscriptions.py +++ b/src/sentry/uptime/subscriptions/subscriptions.py @@ -15,9 +15,11 @@ ProjectUptimeSubscription, ProjectUptimeSubscriptionMode, UptimeSubscription, + UptimeSubscriptionRegion, headers_json_encoder, ) from sentry.uptime.rdap.tasks import fetch_subscription_rdap_info +from sentry.uptime.subscriptions.regions import get_active_region_configs from sentry.uptime.subscriptions.tasks import ( create_remote_uptime_subscription, delete_remote_uptime_subscription, @@ -132,6 +134,13 @@ def get_or_create_uptime_subscription( subscription.update(status=UptimeSubscription.Status.CREATING.value) created = True + # Associate active regions with this subscription + for region_config in get_active_region_configs(): + # If we add a region here we need to resend the subscriptions + created |= UptimeSubscriptionRegion.objects.get_or_create( + uptime_subscription=subscription, region_slug=region_config.slug + )[1] + if created: create_remote_uptime_subscription.delay(subscription.id) fetch_subscription_rdap_info.delay(subscription.id) diff --git a/src/sentry/uptime/subscriptions/tasks.py b/src/sentry/uptime/subscriptions/tasks.py index c0936b840ddce4..d28948577f27b9 100644 --- a/src/sentry/uptime/subscriptions/tasks.py +++ b/src/sentry/uptime/subscriptions/tasks.py @@ -10,7 +10,8 @@ from sentry.snuba.models import QuerySubscription from sentry.tasks.base import instrumented_task from sentry.uptime.config_producer import produce_config, produce_config_removal -from sentry.uptime.models import UptimeSubscription +from sentry.uptime.models import UptimeRegionScheduleMode, UptimeSubscription +from sentry.uptime.subscriptions.regions import get_active_region_configs from sentry.utils import metrics logger = logging.getLogger(__name__) @@ -35,10 +36,50 @@ def create_remote_uptime_subscription(uptime_subscription_id, **kwargs): metrics.incr("uptime.subscriptions.create.incorrect_status", sample_rate=1.0) return - subscription_id = send_uptime_subscription_config(subscription) - # TODO: Ideally this should actually be `PENDING_FIRST_UPDATE` so we can validate it's really working as expected + region_slugs = [s.region_slug for s in subscription.regions.all()] + if not region_slugs: + # XXX: Hack to make sure that region configs are sent even if we don't have region rows present. + # Remove once everything is in place + region_slugs = [get_active_region_configs()[0].slug] + + for region_slug in region_slugs: + send_uptime_subscription_config(region_slug, subscription) subscription.update( - status=QuerySubscription.Status.ACTIVE.value, subscription_id=subscription_id + status=QuerySubscription.Status.ACTIVE.value, + subscription_id=subscription.subscription_id, + ) + + +@instrumented_task( + name="sentry.uptime.subscriptions.tasks.update_remote_uptime_subscription", + queue="uptime", + default_retry_delay=5, + max_retries=5, +) +def update_remote_uptime_subscription(uptime_subscription_id, **kwargs): + """ + Pushes details of an uptime subscription to uptime subscription regions. + """ + try: + subscription = UptimeSubscription.objects.get(id=uptime_subscription_id) + except UptimeSubscription.DoesNotExist: + metrics.incr("uptime.subscriptions.update.subscription_does_not_exist", sample_rate=1.0) + return + if subscription.status != UptimeSubscription.Status.UPDATING.value: + metrics.incr("uptime.subscriptions.update.incorrect_status", sample_rate=1.0) + return + + region_slugs = [s.region_slug for s in subscription.regions.all()] + if not region_slugs: + # XXX: Hack to make sure that region configs are sent even if we don't have region rows present. + # Remove once everything is in place + region_slugs = [get_active_region_configs()[0].slug] + + for region_slug in region_slugs: + send_uptime_subscription_config(region_slug, subscription) + subscription.update( + status=QuerySubscription.Status.ACTIVE.value, + subscription_id=subscription.subscription_id, ) @@ -62,6 +103,12 @@ def delete_remote_uptime_subscription(uptime_subscription_id, **kwargs): metrics.incr("uptime.subscriptions.delete.incorrect_status", sample_rate=1.0) return + region_slugs = [s.region_slug for s in subscription.regions.all()] + if not region_slugs: + # XXX: Hack to make sure that region configs are sent even if we don't have regions present. + # Remove once everything is in place + region_slugs = [get_active_region_configs()[0].slug] + subscription_id = subscription.subscription_id if subscription.status == QuerySubscription.Status.DELETING.value: subscription.delete() @@ -69,15 +116,16 @@ def delete_remote_uptime_subscription(uptime_subscription_id, **kwargs): subscription.update(subscription_id=None) if subscription_id is not None: - send_uptime_config_deletion(subscription_id) + for region_slug in region_slugs: + send_uptime_config_deletion(region_slug, subscription_id) -def send_uptime_subscription_config(subscription: UptimeSubscription) -> str: - # Whenever we create/update a config we always want to generate a new subscription id. This allows us to validate - # that the config took effect - subscription_id = uuid4().hex - produce_config(uptime_subscription_to_check_config(subscription, subscription_id)) - return subscription_id +def send_uptime_subscription_config(region_slug: str, subscription: UptimeSubscription): + if subscription.subscription_id is None: + subscription.subscription_id = uuid4().hex + produce_config( + region_slug, uptime_subscription_to_check_config(subscription, subscription.subscription_id) + ) def uptime_subscription_to_check_config( @@ -96,14 +144,16 @@ def uptime_subscription_to_check_config( "request_method": subscription.method, "request_headers": headers, "trace_sampling": subscription.trace_sampling, + "active_regions": [r.region_slug for r in subscription.regions.all()], + "region_schedule_mode": UptimeRegionScheduleMode.ROUND_ROBIN.value, } if subscription.body is not None: config["request_body"] = subscription.body return config -def send_uptime_config_deletion(subscription_id: str) -> None: - produce_config_removal(subscription_id) +def send_uptime_config_deletion(destination_region_slug: str, subscription_id: str) -> None: + produce_config_removal(destination_region_slug, subscription_id) @instrumented_task( @@ -120,6 +170,7 @@ def subscription_checker(**kwargs): for subscription in UptimeSubscription.objects.filter( status__in=( UptimeSubscription.Status.CREATING.value, + UptimeSubscription.Status.UPDATING.value, UptimeSubscription.Status.DELETING.value, ), date_updated__lt=timezone.now() - SUBSCRIPTION_STATUS_MAX_AGE, diff --git a/src/sentry/users/api/endpoints/user_authenticator_enroll.py b/src/sentry/users/api/endpoints/user_authenticator_enroll.py index e1fdfea5d73fd9..0fe89df27321f6 100644 --- a/src/sentry/users/api/endpoints/user_authenticator_enroll.py +++ b/src/sentry/users/api/endpoints/user_authenticator_enroll.py @@ -13,7 +13,7 @@ from sentry.api.api_owners import ApiOwner from sentry.api.api_publish_status import ApiPublishStatus from sentry.api.base import control_silo_endpoint -from sentry.api.decorators import email_verification_required, sudo_required +from sentry.api.decorators import primary_email_verification_required, sudo_required from sentry.api.invite_helper import ApiInviteHelper, remove_invite_details_from_session from sentry.api.serializers import serialize from sentry.auth.authenticators.base import EnrollmentStatus, NewEnrollmentDisallowed @@ -175,7 +175,7 @@ def get(self, request: Request, user: User, interface_id: str) -> HttpResponse: return Response(response) @sudo_required - @email_verification_required + @primary_email_verification_required def post(self, request: Request, user: User, interface_id: str) -> HttpResponse: """ Enroll in authenticator interface diff --git a/src/sentry/users/api/endpoints/user_password.py b/src/sentry/users/api/endpoints/user_password.py index a3a67682131c36..fbf5a9d29cd54b 100644 --- a/src/sentry/users/api/endpoints/user_password.py +++ b/src/sentry/users/api/endpoints/user_password.py @@ -13,6 +13,7 @@ from sentry.types.ratelimit import RateLimit, RateLimitCategory from sentry.users.api.bases.user import UserEndpoint from sentry.users.models.user import User +from sentry.web.frontend.twofactor import reset_2fa_rate_limits class UserPasswordSerializer(serializers.Serializer[User]): @@ -89,4 +90,7 @@ def put(self, request: Request, user: User) -> Response: ip_address=request.META["REMOTE_ADDR"], send_email=True, ) + + reset_2fa_rate_limits(user.id) + return Response(status=status.HTTP_204_NO_CONTENT) diff --git a/src/sentry/users/models/user.py b/src/sentry/users/models/user.py index a84b99c4cd93a0..736f96d0fa50a8 100644 --- a/src/sentry/users/models/user.py +++ b/src/sentry/users/models/user.py @@ -244,6 +244,9 @@ def has_verified_emails(self) -> bool: def has_unverified_emails(self) -> bool: return self.get_unverified_emails().exists() + def has_verified_primary_email(self) -> bool: + return self.emails.filter(is_verified=True, email=self.email).exists() + def has_usable_password(self) -> bool: if self.password == "" or self.password is None: # This is the behavior we've been relying on from Django 1.6 - 2.0. diff --git a/src/sentry/users/services/user/model.py b/src/sentry/users/services/user/model.py index b2c4a23d403264..f4d5082c63d6ed 100644 --- a/src/sentry/users/services/user/model.py +++ b/src/sentry/users/services/user/model.py @@ -87,6 +87,9 @@ def has_unverified_emails(self) -> bool: def has_verified_emails(self) -> bool: return len(self.get_verified_emails()) > 0 + def has_verified_primary_email(self) -> bool: + return bool([e for e in self.useremails if e.is_verified and e.email == self.email]) + def get_unverified_emails(self) -> list[RpcUserEmail]: return [e for e in self.useremails if not e.is_verified] diff --git a/src/sentry/users/web/accounts.py b/src/sentry/users/web/accounts.py index 24619470f4f708..0b669ee44f869f 100644 --- a/src/sentry/users/web/accounts.py +++ b/src/sentry/users/web/accounts.py @@ -28,6 +28,7 @@ ) from sentry.utils import auth from sentry.web.decorators import login_required, set_referrer_policy +from sentry.web.frontend.twofactor import reset_2fa_rate_limits from sentry.web.helpers import render_to_response logger = logging.getLogger("sentry.accounts") @@ -273,6 +274,8 @@ def recover_confirm( send_email=True, ) + reset_2fa_rate_limits(user.id) + return login_redirect(request) else: form = form_cls(user=user) diff --git a/src/sentry/utils/batching_kafka_consumer.py b/src/sentry/utils/batching_kafka_consumer.py index eb4e4b7c154c5f..47f0530e32b42c 100644 --- a/src/sentry/utils/batching_kafka_consumer.py +++ b/src/sentry/utils/batching_kafka_consumer.py @@ -8,9 +8,6 @@ logger = logging.getLogger("sentry.batching-kafka-consumer") -DEFAULT_QUEUED_MAX_MESSAGE_KBYTES = 50000 -DEFAULT_QUEUED_MIN_MESSAGES = 10000 - def wait_for_topics(admin_client: AdminClient, topics: list[str], timeout: int = 10) -> None: """ diff --git a/src/sentry/utils/memory.py b/src/sentry/utils/memory.py new file mode 100644 index 00000000000000..a1fb5809b1432a --- /dev/null +++ b/src/sentry/utils/memory.py @@ -0,0 +1,17 @@ +import resource +from contextlib import contextmanager + +from sentry.utils import metrics + + +def get_rss_usage(): + return resource.getrusage(resource.RUSAGE_SELF).ru_maxrss + + +@contextmanager +def track_memory_usage(metric, **kwargs): + before = get_rss_usage() + try: + yield + finally: + metrics.distribution(metric, get_rss_usage() - before, unit="byte", **kwargs) diff --git a/src/sentry/utils/performance_issues/base.py b/src/sentry/utils/performance_issues/base.py index fb4e7812ee4328..e9063c98408628 100644 --- a/src/sentry/utils/performance_issues/base.py +++ b/src/sentry/utils/performance_issues/base.py @@ -240,8 +240,6 @@ def total_span_time(span_list: list[dict[str, Any]]) -> float: return total_duration * 1000 -PARAMETERIZED_SQL_QUERY_REGEX = re.compile(r"\?|\$1|%s") - PARAMETERIZED_URL_REGEX = re.compile( r"""(?x) (?P diff --git a/src/sentry/utils/platform_categories.py b/src/sentry/utils/platform_categories.py index 25c3e65b992809..3baeb173bbb51f 100644 --- a/src/sentry/utils/platform_categories.py +++ b/src/sentry/utils/platform_categories.py @@ -25,6 +25,7 @@ # Mirrors `const mobile` in sentry/static/app/data/platformCategories.tsx # When changing this file, make sure to keep sentry/static/app/data/platformCategories.tsx in sync. + MOBILE = { "android", "apple-ios", @@ -165,3 +166,12 @@ {id: "serverless", "name": _("Serverless"), "platforms": SERVERLESS}, {id: "temporary", "name": _("Temporary"), "platforms": TEMPORARY}, ] + +# Mirrors `const sourceMaps` in sentry/static/app/data/platformCategories.tsx +# When changing this file, make sure to keep sentry/static/app/data/platformCategories.tsx in sync. +SOURCE_MAPS = { + *FRONTEND, + "react-native", + "cordova", + "electron", +} diff --git a/src/sentry/utils/registry.py b/src/sentry/utils/registry.py index 0ff110e268e054..3eff6865e82051 100644 --- a/src/sentry/utils/registry.py +++ b/src/sentry/utils/registry.py @@ -15,9 +15,15 @@ class NoRegistrationExistsError(ValueError): class Registry(Generic[T]): - def __init__(self): + """ + A simple generic registry that allows for registering and retrieving items by key. Reverse lookup by value is enabled by default. + If you have duplicate values, you may want to disable reverse lookup. + """ + + def __init__(self, enable_reverse_lookup=True): self.registrations: dict[str, T] = {} self.reverse_lookup: dict[T, str] = {} + self.enable_reverse_lookup = enable_reverse_lookup def register(self, key: str): def inner(item: T) -> T: @@ -26,13 +32,14 @@ def inner(item: T) -> T: f"A registration already exists for {key}: {self.registrations[key]}" ) - if item in self.reverse_lookup: - raise AlreadyRegisteredError( - f"A registration already exists for {item}: {self.reverse_lookup[item]}" - ) + if self.enable_reverse_lookup: + if item in self.reverse_lookup: + raise AlreadyRegisteredError( + f"A registration already exists for {item}: {self.reverse_lookup[item]}" + ) + self.reverse_lookup[item] = key self.registrations[key] = item - self.reverse_lookup[item] = key return item @@ -44,6 +51,8 @@ def get(self, key: str) -> T: return self.registrations[key] def get_key(self, item: T) -> str: + if not self.enable_reverse_lookup: + raise NotImplementedError("Reverse lookup is not enabled") if item not in self.reverse_lookup: raise NoRegistrationExistsError(f"No registration exists for {item}") return self.reverse_lookup[item] diff --git a/src/sentry/utils/samples.py b/src/sentry/utils/samples.py index 00ce2b9158651f..f3ccda8e798758 100644 --- a/src/sentry/utils/samples.py +++ b/src/sentry/utils/samples.py @@ -114,7 +114,6 @@ def load_data( trace_context=None, fingerprint=None, event_id=None, - metrics_summary=None, ): # NOTE: Before editing this data, make sure you understand the context # in which its being used. It is NOT only used for local development and @@ -193,9 +192,6 @@ def load_data( start_timestamp = start_timestamp.replace(tzinfo=timezone.utc) data["start_timestamp"] = start_timestamp.timestamp() - if metrics_summary is not None: - data["_metrics_summary"] = metrics_summary - if trace is None: trace = uuid4().hex if span_id is None: @@ -411,15 +407,6 @@ def create_sample_event( spans, ) - if not data: - logger.info( - "create_sample_event: no data loaded", - extra={ - "project_id": project.id, - "sample_event": True, - }, - ) - return for key in ["parent_span_id", "hash", "exclusive_time"]: if key in kwargs: data["contexts"]["trace"][key] = kwargs.pop(key) diff --git a/src/sentry/utils/sdk_crashes/sdk_crash_detection_config.py b/src/sentry/utils/sdk_crashes/sdk_crash_detection_config.py index 0053f191a524c8..22c592ce31e73a 100644 --- a/src/sentry/utils/sdk_crashes/sdk_crash_detection_config.py +++ b/src/sentry/utils/sdk_crashes/sdk_crash_detection_config.py @@ -57,6 +57,10 @@ class SDKCrashDetectionConfig: """Whether to report fatal errors. If true, both unhandled and fatal errors are reported. If false, only unhandled errors are reported.""" report_fatal_errors: bool + """The mechanism types to ignore. For example, {"console", "unhandledrejection"}. If empty, all mechanism types are captured.""" + ignore_mechanism_type: set[str] + """The mechanism types to capture. For example, {"ANR", "AppExitInfo"}. Useful when you want to detect events that are neither unhandled nor fatal.""" + allow_mechanism_type: set[str] """The system library path patterns to detect system frames. For example, `System/Library/*` """ system_library_path_patterns: set[str] """The configuration for detecting SDK frames.""" @@ -100,6 +104,8 @@ def build_sdk_crash_detection_configs() -> Sequence[SDKCrashDetectionConfig]: "sentry.cocoa.unreal": cocoa_min_sdk_version, }, report_fatal_errors=False, + ignore_mechanism_type=set(), + allow_mechanism_type=set(), system_library_path_patterns={r"/System/Library/**", r"/usr/lib/**"}, sdk_frame_config=SDKFrameConfig( function_patterns={ @@ -132,6 +138,10 @@ def build_sdk_crash_detection_configs() -> Sequence[SDKCrashDetectionConfig]: "sentry.javascript.react-native": "4.0.0", }, report_fatal_errors=False, + # used by the JS/RN SDKs + # https://github.com/getsentry/sentry-javascript/blob/dafd51054d8b2ab2030fa0b16ad0fd70493b6e08/packages/core/src/integrations/captureconsole.ts#L60 + ignore_mechanism_type={"console"}, + allow_mechanism_type=set(), system_library_path_patterns={ r"**/react-native/Libraries/**", r"**/react-native-community/**", @@ -204,6 +214,8 @@ def build_sdk_crash_detection_configs() -> Sequence[SDKCrashDetectionConfig]: "sentry.native.android": native_min_sdk_version, }, report_fatal_errors=False, + ignore_mechanism_type=set(), + allow_mechanism_type={"ANR", "AppExitInfo"}, system_library_path_patterns={ r"java.**", r"javax.**", @@ -227,7 +239,7 @@ def build_sdk_crash_detection_configs() -> Sequence[SDKCrashDetectionConfig]: function_and_path_patterns=[ FunctionAndPathPattern( function_pattern=r"*pthread_getcpuclockid*", - path_pattern=r"/apex/com.android.art/lib64/bionic/libc.so", + path_pattern=r"/apex/com.android.runtime/lib64/bionic/libc.so", ), FunctionAndPathPattern( function_pattern=r"*art::Trace::StopTracing*", @@ -265,6 +277,8 @@ def build_sdk_crash_detection_configs() -> Sequence[SDKCrashDetectionConfig]: "sentry.native.unreal": native_min_sdk_version, }, report_fatal_errors=False, + ignore_mechanism_type=set(), + allow_mechanism_type=set(), system_library_path_patterns={ # well known locations for unix paths r"/lib/**", @@ -315,6 +329,8 @@ def build_sdk_crash_detection_configs() -> Sequence[SDKCrashDetectionConfig]: "sentry.dart.flutter": dart_min_sdk_version, }, report_fatal_errors=True, + ignore_mechanism_type=set(), + allow_mechanism_type=set(), system_library_path_patterns={ # Dart r"org-dartlang-sdk:///**", diff --git a/src/sentry/utils/sdk_crashes/sdk_crash_detector.py b/src/sentry/utils/sdk_crashes/sdk_crash_detector.py index 83f9d9834c7636..d5130d8140b4a7 100644 --- a/src/sentry/utils/sdk_crashes/sdk_crash_detector.py +++ b/src/sentry/utils/sdk_crashes/sdk_crash_detector.py @@ -49,6 +49,13 @@ def should_detect_sdk_crash( if not self.is_sdk_supported(sdk_name, sdk_version): return False + mechanism_type = get_path(event_data, "exception", "values", -1, "mechanism", "type") + if mechanism_type and mechanism_type in self.config.ignore_mechanism_type: + return False + + if mechanism_type and mechanism_type in self.config.allow_mechanism_type: + return True + is_unhandled = ( get_path(event_data, "exception", "values", -1, "mechanism", "handled") is False ) diff --git a/src/sentry/utils/snuba_rpc.py b/src/sentry/utils/snuba_rpc.py index afdb03b9b2e031..5a75c188150715 100644 --- a/src/sentry/utils/snuba_rpc.py +++ b/src/sentry/utils/snuba_rpc.py @@ -146,6 +146,7 @@ def _make_rpc_request( with sentry_sdk.start_span(op="snuba_rpc.run", name=req.__class__.__name__) as span: if referrer: span.set_tag("snuba.referrer", referrer) + span.set_data("snuba.query", req) http_resp = _snuba_pool.urlopen( "POST", f"/rpc/{endpoint_name}/{class_version}", diff --git a/src/sentry/utils/strings.py b/src/sentry/utils/strings.py index 79a797e9a87c6a..b9da4f573b7b8d 100644 --- a/src/sentry/utils/strings.py +++ b/src/sentry/utils/strings.py @@ -9,8 +9,6 @@ from collections.abc import Callable from typing import overload -from django.utils.encoding import smart_str - _sprintf_placeholder_re = re.compile( r"%(?:\d+\$)?[+-]?(?:[ 0]|\'.{1})?-?\d*(?:\.\d+)?[bcdeEufFgGosxX]" ) @@ -83,7 +81,7 @@ def decompress(value: str) -> bytes: def strip(value: str | None) -> str: if not value: return "" - return smart_str(value).strip() + return value.strip() def soft_hyphenate(value: str, length: int, hyphen: str = "\u00ad") -> str: diff --git a/src/sentry/utils/tag_normalization.py b/src/sentry/utils/tag_normalization.py index b68776c1ff5f3e..bce6efa1b9047d 100644 --- a/src/sentry/utils/tag_normalization.py +++ b/src/sentry/utils/tag_normalization.py @@ -92,7 +92,7 @@ def normalize_sdk_tag(tag: str) -> str: # collapse tags other than JavaScript / Native to their top-level SDK - if not tag.split(".")[1] in {"javascript", "native"}: + if tag.split(".")[1] not in {"javascript", "native"}: tag = ".".join(tag.split(".", 2)[0:2]) if tag.split(".")[1] == "native": @@ -115,7 +115,7 @@ def normalized_sdk_tag_from_event(data: Mapping[str, Any]) -> str: Note: Some platforms may keep their framework-specific values, as needed for analytics. This is done to reduce the cardinality of the `sdk.name` tag, while keeping - the ones interesinting to us as granual as possible. + the ones interesting to us as granular as possible. """ try: return normalize_sdk_tag((data.get("sdk") or {}).get("name") or "other") diff --git a/src/sentry/utils/types.py b/src/sentry/utils/types.py index c0fdd43bf663b4..b4ed4aa90fa0bf 100644 --- a/src/sentry/utils/types.py +++ b/src/sentry/utils/types.py @@ -215,9 +215,3 @@ def type_from_value(value): AnyCallable = typing.Callable[..., AnyType] - - -def NonNone(value: T | None) -> T: - """A hacked version of TS's non-null assertion operator""" - assert value is not None - return value diff --git a/src/sentry/web/frontend/auth_login.py b/src/sentry/web/frontend/auth_login.py index 4fb8f4eb2fdbde..cf19698091d73c 100644 --- a/src/sentry/web/frontend/auth_login.py +++ b/src/sentry/web/frontend/auth_login.py @@ -40,6 +40,7 @@ from sentry.utils.http import absolute_uri from sentry.utils.sdk import capture_exception from sentry.utils.urls import add_params_to_url +from sentry.web.client_config import get_client_config from sentry.web.forms.accounts import AuthenticationForm, RegistrationForm from sentry.web.frontend.base import BaseView, control_silo_view @@ -431,7 +432,7 @@ def get_ratelimited_login_form( ] metrics.incr("login.attempt", instance="rate_limited", skip_internal=True, sample_rate=1.0) - context = { + context = self.get_default_context(request=request) | { "op": "login", "login_form": login_form, "referrer": request.GET.get("referrer"), @@ -526,11 +527,10 @@ def get_default_context(self, request: Request, **kwargs) -> dict: default_context = { "server_hostname": get_server_hostname(), "login_form": None, - "organization": kwargs.pop( - "organization", None - ), # NOTE: not utilized in basic login page (only org login) + "organization": organization, # NOTE: not utilized in basic login page (only org login) "register_form": None, "CAN_REGISTER": False, + "react_config": get_client_config(request, self.active_organization), "join_request_link": self.get_join_request_link( organization=organization, request=request ), # NOTE: not utilized in basic login page (only org login) @@ -702,18 +702,11 @@ def handle_basic_auth(self, request: Request, **kwargs) -> HttpResponseBase: "login.attempt", instance="failure", skip_internal=True, sample_rate=1.0 ) - context = { + context = self.get_default_context(request=request, organization=organization) | { "op": op or "login", - "server_hostname": get_server_hostname(), "login_form": login_form, - "organization": organization, "register_form": register_form, "CAN_REGISTER": can_register, - "join_request_link": self.get_join_request_link( - organization=organization, request=request - ), - "show_login_banner": settings.SHOW_LOGIN_BANNER, - "referrer": request.GET.get("referrer"), } context.update(additional_context.run_callbacks(request)) diff --git a/src/sentry/web/frontend/auth_organization_login.py b/src/sentry/web/frontend/auth_organization_login.py index 3e2de690770ea6..18af32980bcdc5 100644 --- a/src/sentry/web/frontend/auth_organization_login.py +++ b/src/sentry/web/frontend/auth_organization_login.py @@ -23,14 +23,15 @@ def respond_login(self, request: Request, context, *args, **kwargs) -> HttpRespo return self.respond("sentry/organization-login.html", context) def handle_sso(self, request: Request, organization: RpcOrganization, auth_provider): - referrer = request.GET.get("referrer") if request.method == "POST": helper = AuthHelper( request=request, organization=organization, auth_provider=auth_provider, flow=AuthHelper.FLOW_LOGIN, - referrer=referrer, # TODO: get referrer from the form submit - not the query parms + referrer=request.GET.get( + "referrer" + ), # TODO: get referrer from the form submit - not the query parms ) if request.POST.get("init"): @@ -47,13 +48,10 @@ def handle_sso(self, request: Request, organization: RpcOrganization, auth_provi provider = auth_provider.get_provider() - context = { - "CAN_REGISTER": False, - "organization": organization, + context = self.get_default_context(request, organization=organization) | { "provider_key": provider.key, "provider_name": provider.name, "authenticated": request.user.is_authenticated, - "referrer": referrer, } return self.respond("sentry/organization-login.html", context) diff --git a/src/sentry/web/frontend/oauth_authorize.py b/src/sentry/web/frontend/oauth_authorize.py index e9ed3e3543fc99..163b2f973440c6 100644 --- a/src/sentry/web/frontend/oauth_authorize.py +++ b/src/sentry/web/frontend/oauth_authorize.py @@ -233,13 +233,14 @@ def get(self, request: HttpRequest, **kwargs) -> HttpResponseBase: # If application is not org level we should not show organizations to choose from at all organization_options = [] - context = { + context = self.get_default_context(request) | { "user": request.user, "application": application, "scopes": scopes, "permissions": permissions, "organization_options": organization_options, } + return self.respond("sentry/oauth-authorize.html", context) def post(self, request: HttpRequest, **kwargs) -> HttpResponseBase: diff --git a/src/sentry/web/frontend/react_page.py b/src/sentry/web/frontend/react_page.py index 81e5c4d26b8051..68e65ed8d01463 100644 --- a/src/sentry/web/frontend/react_page.py +++ b/src/sentry/web/frontend/react_page.py @@ -88,6 +88,14 @@ def dns_prefetch(self) -> list[str]: def handle_react(self, request: Request, **kwargs) -> HttpResponse: org_context = getattr(self, "active_organization", None) + react_config = get_client_config(request, org_context) + + user_theme = "" + if react_config.get("user", None) and react_config["user"].get("options", {}).get( + "theme", None + ): + user_theme = f"theme-{react_config['user']['options']['theme']}" + context = { "CSRF_COOKIE_NAME": settings.CSRF_COOKIE_NAME, "meta_tags": [ @@ -100,7 +108,8 @@ def handle_react(self, request: Request, **kwargs) -> HttpResponse: # Since we already have it here from the OrganizationMixin, we can # save some work and render it faster. "org_context": org_context, - "react_config": get_client_config(request, org_context), + "react_config": react_config, + "user_theme": user_theme, } # Force a new CSRF token to be generated and set in user's diff --git a/src/sentry/web/frontend/twofactor.py b/src/sentry/web/frontend/twofactor.py index 5018e624860b80..a07c041b988ca8 100644 --- a/src/sentry/web/frontend/twofactor.py +++ b/src/sentry/web/frontend/twofactor.py @@ -1,6 +1,7 @@ import logging import time from base64 import b64encode +from urllib.parse import urlencode from django.http import HttpRequest, HttpResponse, HttpResponseRedirect from django.urls import reverse @@ -17,6 +18,7 @@ from sentry.utils.email import MessageBuilder from sentry.utils.geo import geo_by_addr from sentry.utils.http import absolute_uri +from sentry.web.client_config import get_client_config from sentry.web.forms.accounts import TwoFactorForm from sentry.web.frontend.base import BaseView, control_silo_view from sentry.web.helpers import render_to_response @@ -26,6 +28,36 @@ logger = logging.getLogger(__name__) +MFA_RATE_LIMITS = { + "auth-2fa:user:{user_id}": { + "limit": 5, + "window": 20, + }, + "auth-2fa-long:user:{user_id}": { + "limit": 20, + "window": 60 * 60, + }, +} + + +def is_rate_limited(user_id: int) -> bool: + result = False + for key_template, rl in MFA_RATE_LIMITS.items(): + result = result or ratelimiter.backend.is_limited( + key_template.format(user_id=user_id), + limit=rl["limit"], + window=rl["window"], + ) + return result + + +def reset_2fa_rate_limits(user_id: int): + for key_template, rl in MFA_RATE_LIMITS.items(): + ratelimiter.backend.reset( + key_template.format(user_id=user_id), + window=rl["window"], + ) + @control_silo_view class TwoFactorAuthView(BaseView): @@ -112,12 +144,16 @@ def validate_otp(self, otp, selected_interface, all_interfaces=None): return interface def send_notification_email(self, email, ip_address): + recover_uri = "{path}?{query}".format( + path=reverse("sentry-account-recover"), query=urlencode({"email": email}) + ) context = { "datetime": timezone.now(), "email": email, "geo": geo_by_addr(ip_address), "ip_address": ip_address, "url": absolute_uri(reverse("sentry-account-settings-security")), + "recover_url": absolute_uri(recover_uri), } subject = "Suspicious Activity Detected" @@ -146,13 +182,7 @@ def handle(self, request: HttpRequest) -> HttpResponse: challenge = activation = None interface = self.negotiate_interface(request, interfaces) - is_rate_limited = ratelimiter.backend.is_limited( - f"auth-2fa:user:{user.id}", limit=5, window=20 - ) or ratelimiter.backend.is_limited( - f"auth-2fa-long:user:{user.id}", limit=20, window=60 * 60 - ) - - if request.method == "POST" and is_rate_limited: + if request.method == "POST" and is_rate_limited(user.id): # prevent spamming due to failed 2FA attempts if not ratelimiter.backend.is_limited( f"auth-2fa-failed-notification:user:{user.id}", limit=1, window=30 * 60 @@ -220,6 +250,7 @@ def handle(self, request: HttpRequest) -> HttpResponse: "interface": interface, "other_interfaces": self.get_other_interfaces(interface, interfaces), "activation": activation, + "react_config": get_client_config(request, self.active_organization), }, request, status=200, diff --git a/src/sentry/web/urls.py b/src/sentry/web/urls.py index dd5361143698c0..69cbedc6987a02 100644 --- a/src/sentry/web/urls.py +++ b/src/sentry/web/urls.py @@ -623,6 +623,11 @@ react_page_view, name="sentry-customer-domain-audit-log-settings", ), + re_path( + r"^rate-limits/", + react_page_view, + name="sentry-customer-domain-rate-limits-settings", + ), re_path( r"^relay/", react_page_view, @@ -638,6 +643,16 @@ react_page_view, name="sentry-customer-domain-integrations-settings", ), + re_path( + r"^dynamic-sampling/", + react_page_view, + name="sentry-customer-domain-dynamic-sampling-settings", + ), + re_path( + r"^feature-flags/", + react_page_view, + name="sentry-customer-domain-feature-flags-settings", + ), re_path( r"^developer-settings/", react_page_view, @@ -678,11 +693,6 @@ react_page_view, name="sentry-customer-domain-legal-settings", ), - re_path( - r"^dynamic-sampling/", - react_page_view, - name="sentry-customer-domain-dynamic-sampling-settings", - ), re_path( r"^(?P[\w_-]+)/$", react_page_view, diff --git a/src/sentry/workflow_engine/endpoints/serializers.py b/src/sentry/workflow_engine/endpoints/serializers.py index af736bb054be54..e3975982f4e8fa 100644 --- a/src/sentry/workflow_engine/endpoints/serializers.py +++ b/src/sentry/workflow_engine/endpoints/serializers.py @@ -58,7 +58,7 @@ class DataConditionSerializer(Serializer): def serialize(self, obj: DataCondition, *args, **kwargs) -> dict[str, Any]: return { "id": str(obj.id), - "condition": obj.condition, + "condition": obj.type, "comparison": obj.comparison, "result": obj.condition_result, } diff --git a/src/sentry/workflow_engine/endpoints/validators.py b/src/sentry/workflow_engine/endpoints/validators.py index 79312091bb79be..4a1829a7f971c6 100644 --- a/src/sentry/workflow_engine/endpoints/validators.py +++ b/src/sentry/workflow_engine/endpoints/validators.py @@ -37,7 +37,7 @@ def create(self) -> T: class BaseDataConditionValidator(CamelSnakeSerializer): - condition = serializers.CharField( + type = serializers.CharField( required=True, max_length=200, help_text="Condition used to compare data value to the stored comparison value", @@ -51,14 +51,8 @@ def comparison(self) -> Field: def result(self) -> Field: raise NotImplementedError - @property - def type(self) -> str: - # TODO: This should probably at least be an enum - raise NotImplementedError - def validate(self, attrs): attrs = super().validate(attrs) - attrs["type"] = self.type return attrs @@ -84,15 +78,15 @@ def supported_conditions(self) -> frozenset[Condition]: def supported_results(self) -> frozenset[DetectorPriorityLevel]: raise NotImplementedError - def validate_condition(self, value: str) -> Condition: + def validate_type(self, value: str) -> Condition: try: - condition = Condition(value) + type = Condition(value) except ValueError: - condition = None + type = None - if condition not in self.supported_conditions: - raise serializers.ValidationError(f"Unsupported condition {value}") - return condition + if type not in self.supported_conditions: + raise serializers.ValidationError(f"Unsupported type {value}") + return type def validate_result(self, value: str) -> DetectorPriorityLevel: try: @@ -160,7 +154,6 @@ def create(self, validated_data): ) for condition in validated_data["data_conditions"]: DataCondition.objects.create( - condition=condition["condition"], comparison=condition["comparison"], condition_result=condition["result"], type=condition["type"], diff --git a/src/sentry/workflow_engine/handlers/__init__.py b/src/sentry/workflow_engine/handlers/__init__.py index 4fc3428a0ce1e4..50363cc9ce97a4 100644 --- a/src/sentry/workflow_engine/handlers/__init__.py +++ b/src/sentry/workflow_engine/handlers/__init__.py @@ -1,5 +1,9 @@ # Export any handlers we want to include into the registry -__all__ = ["NotificationActionHandler", "GroupEventConditionHandler"] +__all__ = [ + "NotificationActionHandler", + "EventCreatedByDetectorConditionHandler", + "EventSeenCountConditionHandler", +] from .action import NotificationActionHandler -from .condition import GroupEventConditionHandler +from .condition import EventCreatedByDetectorConditionHandler, EventSeenCountConditionHandler diff --git a/src/sentry/workflow_engine/handlers/action/notification.py b/src/sentry/workflow_engine/handlers/action/notification.py index 97e8cf84f39818..91b8bfcc96e718 100644 --- a/src/sentry/workflow_engine/handlers/action/notification.py +++ b/src/sentry/workflow_engine/handlers/action/notification.py @@ -1,14 +1,15 @@ -from sentry.eventstore.models import GroupEvent from sentry.workflow_engine.models import Action, Detector from sentry.workflow_engine.registry import action_handler_registry -from sentry.workflow_engine.types import ActionHandler +from sentry.workflow_engine.types import ActionHandler, WorkflowJob -@action_handler_registry.register(Action.Type.NOTIFICATION) +# TODO - Enable once the PR to allow for multiple of the same funcs is merged +# @action_handler_registry.register(Action.Type.PAGERDUTY) +@action_handler_registry.register(Action.Type.SLACK) class NotificationActionHandler(ActionHandler): @staticmethod def execute( - evt: GroupEvent, + job: WorkflowJob, action: Action, detector: Detector, ) -> None: diff --git a/src/sentry/workflow_engine/handlers/condition/__init__.py b/src/sentry/workflow_engine/handlers/condition/__init__.py index 85a4596d38b75e..cc0ca9879d61bb 100644 --- a/src/sentry/workflow_engine/handlers/condition/__init__.py +++ b/src/sentry/workflow_engine/handlers/condition/__init__.py @@ -1,5 +1,25 @@ __all__ = [ - "GroupEventConditionHandler", + "EventCreatedByDetectorConditionHandler", + "EventSeenCountConditionHandler", + "EveryEventConditionHandler", + "ReappearedEventConditionHandler", + "RegressionEventConditionHandler", + "ExistingHighPriorityIssueConditionHandler", + "EventAttributeConditionHandler", + "FirstSeenEventConditionHandler", + "NewHighPriorityIssueConditionHandler", ] -from .group_event import GroupEventConditionHandler +from .group_event_handlers import ( + EventAttributeConditionHandler, + EventCreatedByDetectorConditionHandler, + EventSeenCountConditionHandler, + EveryEventConditionHandler, +) +from .group_state_handlers import ( + ExistingHighPriorityIssueConditionHandler, + FirstSeenEventConditionHandler, + NewHighPriorityIssueConditionHandler, + ReappearedEventConditionHandler, + RegressionEventConditionHandler, +) diff --git a/src/sentry/workflow_engine/handlers/condition/group_event.py b/src/sentry/workflow_engine/handlers/condition/group_event.py deleted file mode 100644 index e392db084cfdd9..00000000000000 --- a/src/sentry/workflow_engine/handlers/condition/group_event.py +++ /dev/null @@ -1,29 +0,0 @@ -from typing import Any - -from sentry.eventstore.models import GroupEvent -from sentry.workflow_engine.models.data_condition import Condition -from sentry.workflow_engine.registry import condition_handler_registry -from sentry.workflow_engine.types import DataConditionHandler - - -def get_nested_value(data: Any, path: str, default: Any = None) -> Any | None: - try: - value = data - for part in path.split("."): - if hasattr(value, part): - value = getattr(value, part) - elif hasattr(value, "get"): - value = value.get(part) - else: - return default - return value - except Exception: - return default - - -@condition_handler_registry.register(Condition.GROUP_EVENT_ATTR_COMPARISON) -class GroupEventConditionHandler(DataConditionHandler[GroupEvent]): - @staticmethod - def evaluate_value(data: GroupEvent, comparison: Any, data_filter: str) -> bool: - event_value = get_nested_value(data, data_filter) - return event_value == comparison diff --git a/src/sentry/workflow_engine/handlers/condition/group_event_handlers.py b/src/sentry/workflow_engine/handlers/condition/group_event_handlers.py new file mode 100644 index 00000000000000..f825f7eb900154 --- /dev/null +++ b/src/sentry/workflow_engine/handlers/condition/group_event_handlers.py @@ -0,0 +1,85 @@ +from typing import Any + +import sentry_sdk + +from sentry.eventstore.models import GroupEvent +from sentry.rules import MatchType, match_values +from sentry.rules.conditions.event_attribute import attribute_registry +from sentry.utils.registry import NoRegistrationExistsError +from sentry.workflow_engine.models.data_condition import Condition +from sentry.workflow_engine.registry import condition_handler_registry +from sentry.workflow_engine.types import DataConditionHandler, WorkflowJob + + +@condition_handler_registry.register(Condition.EVENT_CREATED_BY_DETECTOR) +class EventCreatedByDetectorConditionHandler(DataConditionHandler[WorkflowJob]): + @staticmethod + def evaluate_value(job: WorkflowJob, comparison: Any) -> bool: + event = job["event"] + if event.occurrence is None or event.occurrence.evidence_data is None: + return False + + return event.occurrence.evidence_data.get("detector_id", None) == comparison + + +@condition_handler_registry.register(Condition.EVERY_EVENT) +class EveryEventConditionHandler(DataConditionHandler[WorkflowJob]): + @staticmethod + def evaluate_value(job: WorkflowJob, comparison: Any) -> bool: + return True + + +@condition_handler_registry.register(Condition.EVENT_SEEN_COUNT) +class EventSeenCountConditionHandler(DataConditionHandler[WorkflowJob]): + @staticmethod + def evaluate_value(job: WorkflowJob, comparison: Any) -> bool: + event = job["event"] + return event.group.times_seen == comparison + + +@condition_handler_registry.register(Condition.EVENT_ATTRIBUTE) +class EventAttributeConditionHandler(DataConditionHandler[WorkflowJob]): + @staticmethod + def get_attribute_values(event: GroupEvent, attribute: str) -> list[str]: + path = attribute.split(".") + first_attribute = path[0] + try: + attribute_handler = attribute_registry.get(first_attribute) + except NoRegistrationExistsError: + attribute_handler = None + + if not attribute_handler: + attribute_values = [] + else: + try: + attribute_values = attribute_handler.handle(path, event) + except KeyError as e: + attribute_values = [] + sentry_sdk.capture_exception(e) + + attribute_values = [str(value).lower() for value in attribute_values if value is not None] + + return attribute_values + + @staticmethod + def evaluate_value(job: WorkflowJob, comparison: Any) -> bool: + event = job["event"] + attribute = comparison.get("attribute", "") + attribute_values = EventAttributeConditionHandler.get_attribute_values(event, attribute) + + match = comparison.get("match") + desired_value = comparison.get("value") + if not (match and desired_value) and not (match in (MatchType.IS_SET, MatchType.NOT_SET)): + return False + + desired_value = str(desired_value).lower() + + # NOTE: IS_SET condition differs btw tagged_event and event_attribute so not handled by match_values + if match == MatchType.IS_SET: + return bool(attribute_values) + elif match == MatchType.NOT_SET: + return not attribute_values + + return match_values( + group_values=attribute_values, match_value=desired_value, match_type=match + ) diff --git a/src/sentry/workflow_engine/handlers/condition/group_state_handlers.py b/src/sentry/workflow_engine/handlers/condition/group_state_handlers.py new file mode 100644 index 00000000000000..481de774e49243 --- /dev/null +++ b/src/sentry/workflow_engine/handlers/condition/group_state_handlers.py @@ -0,0 +1,73 @@ +from typing import Any + +from sentry.types.group import PriorityLevel +from sentry.workflow_engine.models.data_condition import Condition +from sentry.workflow_engine.registry import condition_handler_registry +from sentry.workflow_engine.types import DataConditionHandler, WorkflowJob + + +def is_new_event(job: WorkflowJob) -> bool: + state = job.get("group_state") + if state is None: + return False + + workflow = job.get("workflow") + if workflow is None or workflow.environment_id is None: + return state["is_new"] + + return state["is_new_group_environment"] + + +@condition_handler_registry.register(Condition.REGRESSION_EVENT) +class RegressionEventConditionHandler(DataConditionHandler[WorkflowJob]): + @staticmethod + def evaluate_value(job: WorkflowJob, comparison: Any) -> bool: + state = job.get("group_state") + if state is None: + return False + + return state["is_regression"] == comparison + + +@condition_handler_registry.register(Condition.REAPPEARED_EVENT) +class ReappearedEventConditionHandler(DataConditionHandler[WorkflowJob]): + @staticmethod + def evaluate_value(job: WorkflowJob, comparison: Any) -> bool: + has_reappeared = job.get("has_reappeared") + if has_reappeared is None: + return False + + return has_reappeared == comparison + + +@condition_handler_registry.register(Condition.EXISTING_HIGH_PRIORITY_ISSUE) +class ExistingHighPriorityIssueConditionHandler(DataConditionHandler[WorkflowJob]): + @staticmethod + def evaluate_value(job: WorkflowJob, comparison: Any) -> bool: + state = job.get("group_state") + if state is None or state["is_new"]: + return False + + has_reappeared = job.get("has_reappeared", False) + has_escalated = job.get("has_escalated", False) + is_escalating = has_reappeared or has_escalated + return is_escalating and job["event"].group.priority == PriorityLevel.HIGH + + +@condition_handler_registry.register(Condition.FIRST_SEEN_EVENT) +class FirstSeenEventConditionHandler(DataConditionHandler[WorkflowJob]): + @staticmethod + def evaluate_value(job: WorkflowJob, comparison: Any) -> bool: + return is_new_event(job) + + +@condition_handler_registry.register(Condition.NEW_HIGH_PRIORITY_ISSUE) +class NewHighPriorityIssueConditionHandler(DataConditionHandler[WorkflowJob]): + @staticmethod + def evaluate_value(job: WorkflowJob, comparison: Any) -> bool: + is_new = is_new_event(job) + event = job["event"] + if not event.project.flags.has_high_priority_alerts: + return is_new + + return is_new and event.group.priority == PriorityLevel.HIGH diff --git a/src/sentry/workflow_engine/handlers/detector/stateful.py b/src/sentry/workflow_engine/handlers/detector/stateful.py index 8171787ad38d46..67781e8795ad41 100644 --- a/src/sentry/workflow_engine/handlers/detector/stateful.py +++ b/src/sentry/workflow_engine/handlers/detector/stateful.py @@ -54,7 +54,7 @@ def get_dedupe_value(self, data_packet: DataPacket[T]) -> int: pass @abc.abstractmethod - def get_group_key_values(self, data_packet: DataPacket[T]) -> dict[str, int]: + def get_group_key_values(self, data_packet: DataPacket[T]) -> dict[DetectorGroupKey, int]: """ Extracts the values for all the group keys that exist in the given data packet, and returns then as a dict keyed by group_key. @@ -70,6 +70,9 @@ def build_occurrence_and_event_data( def build_fingerprint(self, group_key) -> list[str]: """ Builds a fingerprint to uniquely identify a detected issue + + TODO - Take into account the data source / query that triggered the detector, + we'll want to create a new issue if the query changes. """ return [f"{self.detector.id}{':' + group_key if group_key is not None else ''}"] @@ -84,13 +87,17 @@ def get_state_data( group_key_detectors = self.bulk_get_detector_state(group_keys) dedupe_keys = [self.build_dedupe_value_key(gk) for gk in group_keys] pipeline = get_redis_client().pipeline() + for dk in dedupe_keys: pipeline.get(dk) + group_key_dedupe_values = { gk: int(dv) if dv else 0 for gk, dv in zip(group_keys, pipeline.execute()) } + pipeline.reset() counter_updates = {} + if self.counter_names: counter_keys = [ self.build_counter_value_key(gk, name) @@ -117,7 +124,7 @@ def get_state_data( else DetectorPriorityLevel.OK ), dedupe_value=group_key_dedupe_values[gk], - counter_updates=counter_updates[gk], + counter_updates=counter_updates.get(gk, {}), ) return results diff --git a/src/sentry/workflow_engine/migration_helpers/alert_rule.py b/src/sentry/workflow_engine/migration_helpers/alert_rule.py new file mode 100644 index 00000000000000..79f2fdb43452bd --- /dev/null +++ b/src/sentry/workflow_engine/migration_helpers/alert_rule.py @@ -0,0 +1,157 @@ +from sentry.incidents.grouptype import MetricAlertFire +from sentry.incidents.models.alert_rule import AlertRule +from sentry.snuba.models import QuerySubscription, SnubaQuery +from sentry.users.services.user import RpcUser +from sentry.workflow_engine.models import ( + AlertRuleDetector, + AlertRuleWorkflow, + DataConditionGroup, + DataSource, + Detector, + DetectorState, + DetectorWorkflow, + Workflow, + WorkflowDataConditionGroup, +) +from sentry.workflow_engine.types import DetectorPriorityLevel + + +def create_metric_alert_lookup_tables( + alert_rule: AlertRule, + detector: Detector, + workflow: Workflow, + data_source: DataSource, + data_condition_group: DataConditionGroup, +) -> tuple[AlertRuleDetector, AlertRuleWorkflow, DetectorWorkflow, WorkflowDataConditionGroup]: + alert_rule_detector = AlertRuleDetector.objects.create(alert_rule=alert_rule, detector=detector) + alert_rule_workflow = AlertRuleWorkflow.objects.create(alert_rule=alert_rule, workflow=workflow) + detector_workflow = DetectorWorkflow.objects.create(detector=detector, workflow=workflow) + workflow_data_condition_group = WorkflowDataConditionGroup.objects.create( + condition_group=data_condition_group, workflow=workflow + ) + return ( + alert_rule_detector, + alert_rule_workflow, + detector_workflow, + workflow_data_condition_group, + ) + + +def create_data_source( + organization_id: int, snuba_query: SnubaQuery | None = None +) -> DataSource | None: + if not snuba_query: + return None + + try: + query_subscription = QuerySubscription.objects.get(snuba_query=snuba_query.id) + except QuerySubscription.DoesNotExist: + return None + + return DataSource.objects.create( + organization_id=organization_id, + query_id=query_subscription.id, + type="snuba_query_subscription", + ) + + +def create_data_condition_group(organization_id: int) -> DataConditionGroup: + return DataConditionGroup.objects.create( + logic_type=DataConditionGroup.Type.ANY, + organization_id=organization_id, + ) + + +def create_workflow( + name: str, + organization_id: int, + data_condition_group: DataConditionGroup, + user: RpcUser | None = None, +) -> Workflow: + return Workflow.objects.create( + name=name, + organization_id=organization_id, + when_condition_group=data_condition_group, + enabled=True, + created_by_id=user.id if user else None, + config={}, + ) + + +def create_detector( + alert_rule: AlertRule, + project_id: int, + data_condition_group: DataConditionGroup, + user: RpcUser | None = None, +) -> Detector: + return Detector.objects.create( + project_id=project_id, + enabled=True, + created_by_id=user.id if user else None, + name=alert_rule.name, + workflow_condition_group=data_condition_group, + type=MetricAlertFire.slug, + description=alert_rule.description, + owner_user_id=alert_rule.user_id, + owner_team=alert_rule.team, + config={ # TODO create a schema + "threshold_period": alert_rule.threshold_period, + "sensitivity": alert_rule.sensitivity, + "seasonality": alert_rule.seasonality, + "comparison_delta": alert_rule.comparison_delta, + }, + ) + + +def migrate_alert_rule( + alert_rule: AlertRule, + user: RpcUser | None = None, +) -> ( + tuple[ + DataSource, + DataConditionGroup, + Workflow, + Detector, + DetectorState, + AlertRuleDetector, + AlertRuleWorkflow, + DetectorWorkflow, + WorkflowDataConditionGroup, + ] + | None +): + organization_id = alert_rule.organization_id + project = alert_rule.projects.first() + if not project: + return None + + data_source = create_data_source(organization_id, alert_rule.snuba_query) + if not data_source: + return None + + data_condition_group = create_data_condition_group(organization_id) + workflow = create_workflow(alert_rule.name, organization_id, data_condition_group, user) + detector = create_detector(alert_rule, project.id, data_condition_group, user) + + data_source.detectors.set([detector]) + detector_state = DetectorState.objects.create( + detector=detector, + active=False, + state=DetectorPriorityLevel.OK, + ) + alert_rule_detector, alert_rule_workflow, detector_workflow, workflow_data_condition_group = ( + create_metric_alert_lookup_tables( + alert_rule, detector, workflow, data_source, data_condition_group + ) + ) + return ( + data_source, + data_condition_group, + workflow, + detector, + detector_state, + alert_rule_detector, + alert_rule_workflow, + detector_workflow, + workflow_data_condition_group, + ) diff --git a/src/sentry/workflow_engine/migration_helpers/issue_alert_conditions.py b/src/sentry/workflow_engine/migration_helpers/issue_alert_conditions.py new file mode 100644 index 00000000000000..bd6c7bfd1e1484 --- /dev/null +++ b/src/sentry/workflow_engine/migration_helpers/issue_alert_conditions.py @@ -0,0 +1,107 @@ +from collections.abc import Callable +from typing import Any + +from sentry.rules.conditions.event_attribute import EventAttributeCondition +from sentry.rules.conditions.every_event import EveryEventCondition +from sentry.rules.conditions.existing_high_priority_issue import ExistingHighPriorityIssueCondition +from sentry.rules.conditions.first_seen_event import FirstSeenEventCondition +from sentry.rules.conditions.new_high_priority_issue import NewHighPriorityIssueCondition +from sentry.rules.conditions.reappeared_event import ReappearedEventCondition +from sentry.rules.conditions.regression_event import RegressionEventCondition +from sentry.utils.registry import Registry +from sentry.workflow_engine.models.data_condition import Condition, DataCondition +from sentry.workflow_engine.models.data_condition_group import DataConditionGroup + +data_condition_translator_registry = Registry[ + Callable[[dict[str, Any], DataConditionGroup], DataCondition] +]() + + +def translate_to_data_condition(data: dict[str, Any], dcg: DataConditionGroup): + translator = data_condition_translator_registry.get(data["id"]) + return translator(data, dcg) + + +@data_condition_translator_registry.register(ReappearedEventCondition.id) +def create_reappeared_event_data_condition( + data: dict[str, Any], dcg: DataConditionGroup +) -> DataCondition: + return DataCondition.objects.create( + type=Condition.REAPPEARED_EVENT, + comparison=True, + condition_result=True, + condition_group=dcg, + ) + + +@data_condition_translator_registry.register(RegressionEventCondition.id) +def create_regressed_event_data_condition( + data: dict[str, Any], dcg: DataConditionGroup +) -> DataCondition: + return DataCondition.objects.create( + type=Condition.REGRESSION_EVENT, + comparison=True, + condition_result=True, + condition_group=dcg, + ) + + +@data_condition_translator_registry.register(EveryEventCondition.id) +def create_every_event_data_condition( + data: dict[str, Any], dcg: DataConditionGroup +) -> DataCondition: + return DataCondition.objects.create( + type=Condition.EVERY_EVENT, + comparison=True, + condition_result=True, + condition_group=dcg, + ) + + +@data_condition_translator_registry.register(ExistingHighPriorityIssueCondition.id) +def create_existing_high_priority_issue_data_condition( + data: dict[str, Any], dcg: DataConditionGroup +) -> DataCondition: + return DataCondition.objects.create( + type=Condition.EXISTING_HIGH_PRIORITY_ISSUE, + comparison=True, + condition_result=True, + condition_group=dcg, + ) + + +@data_condition_translator_registry.register(EventAttributeCondition.id) +def create_event_attribute_data_condition( + data: dict[str, Any], dcg: DataConditionGroup +) -> DataCondition: + comparison = {"match": data["match"], "value": data["value"], "attribute": data["attribute"]} + return DataCondition.objects.create( + type=Condition.EVENT_ATTRIBUTE, + comparison=comparison, + condition_result=True, + condition_group=dcg, + ) + + +@data_condition_translator_registry.register(FirstSeenEventCondition.id) +def create_first_seen_event_data_condition( + data: dict[str, Any], dcg: DataConditionGroup +) -> DataCondition: + return DataCondition.objects.create( + type=Condition.FIRST_SEEN_EVENT, + comparison=True, + condition_result=True, + condition_group=dcg, + ) + + +@data_condition_translator_registry.register(NewHighPriorityIssueCondition.id) +def create_new_high_priority_issue_condition( + data: dict[str, Any], dcg: DataConditionGroup +) -> DataCondition: + return DataCondition.objects.create( + type=Condition.NEW_HIGH_PRIORITY_ISSUE, + comparison=True, + condition_result=True, + condition_group=dcg, + ) diff --git a/src/sentry/workflow_engine/migrations/0016_refactor_action_model.py b/src/sentry/workflow_engine/migrations/0016_refactor_action_model.py new file mode 100644 index 00000000000000..15511e36ffd139 --- /dev/null +++ b/src/sentry/workflow_engine/migrations/0016_refactor_action_model.py @@ -0,0 +1,33 @@ +# Generated by Django 5.1.4 on 2024-12-17 03:31 + +from django.db import migrations, models + +from sentry.new_migrations.migrations import CheckedMigration + + +class Migration(CheckedMigration): + # This flag is used to mark that a migration shouldn't be automatically run in production. + # This should only be used for operations where it's safe to run the migration after your + # code has deployed. So this should not be used for most operations that alter the schema + # of a table. + # Here are some things that make sense to mark as post deployment: + # - Large data migrations. Typically we want these to be run manually so that they can be + # monitored and not block the deploy for a long period of time while they run. + # - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to + # run this outside deployments so that we don't block them. Note that while adding an index + # is a schema change, it's completely safe to run the operation after the code has deployed. + # Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment + + is_post_deployment = False + + dependencies = [ + ("workflow_engine", "0015_create_rule_lookup_tables"), + ] + + operations = [ + migrations.AddField( + model_name="action", + name="legacy_notification_type", + field=models.TextField(null=True), + ), + ] diff --git a/src/sentry/workflow_engine/migrations/0017_ref_data_condition.py b/src/sentry/workflow_engine/migrations/0017_ref_data_condition.py new file mode 100644 index 00000000000000..45cc499ced28fb --- /dev/null +++ b/src/sentry/workflow_engine/migrations/0017_ref_data_condition.py @@ -0,0 +1,38 @@ +# Generated by Django 5.1.4 on 2024-12-18 05:36 + +from django.db import migrations, models + +from sentry.new_migrations.migrations import CheckedMigration + + +class Migration(CheckedMigration): + # This flag is used to mark that a migration shouldn't be automatically run in production. + # This should only be used for operations where it's safe to run the migration after your + # code has deployed. So this should not be used for most operations that alter the schema + # of a table. + # Here are some things that make sense to mark as post deployment: + # - Large data migrations. Typically we want these to be run manually so that they can be + # monitored and not block the deploy for a long period of time while they run. + # - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to + # run this outside deployments so that we don't block them. Note that while adding an index + # is a schema change, it's completely safe to run the operation after the code has deployed. + # Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment + + is_post_deployment = False + + dependencies = [ + ("workflow_engine", "0016_refactor_action_model"), + ] + + operations = [ + migrations.AlterField( + model_name="datacondition", + name="condition", + field=models.CharField(max_length=200, null=True), + ), + migrations.AlterField( + model_name="datacondition", + name="type", + field=models.CharField(default="eq", max_length=200), + ), + ] diff --git a/src/sentry/workflow_engine/migrations/0018_rm_data_condition_condition.py b/src/sentry/workflow_engine/migrations/0018_rm_data_condition_condition.py new file mode 100644 index 00000000000000..bc82885d5bdaaf --- /dev/null +++ b/src/sentry/workflow_engine/migrations/0018_rm_data_condition_condition.py @@ -0,0 +1,33 @@ +# Generated by Django 5.1.4 on 2024-12-18 22:21 + +from sentry.new_migrations.migrations import CheckedMigration +from sentry.new_migrations.monkey.fields import SafeRemoveField +from sentry.new_migrations.monkey.state import DeletionAction + + +class Migration(CheckedMigration): + # This flag is used to mark that a migration shouldn't be automatically run in production. + # This should only be used for operations where it's safe to run the migration after your + # code has deployed. So this should not be used for most operations that alter the schema + # of a table. + # Here are some things that make sense to mark as post deployment: + # - Large data migrations. Typically we want these to be run manually so that they can be + # monitored and not block the deploy for a long period of time while they run. + # - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to + # run this outside deployments so that we don't block them. Note that while adding an index + # is a schema change, it's completely safe to run the operation after the code has deployed. + # Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment + + is_post_deployment = False + + dependencies = [ + ("workflow_engine", "0017_ref_data_condition"), + ] + + operations = [ + SafeRemoveField( + model_name="datacondition", + name="condition", + deletion_action=DeletionAction.MOVE_TO_PENDING, + ), + ] diff --git a/src/sentry/workflow_engine/migrations/0019_drop_dataconditions_condition.py b/src/sentry/workflow_engine/migrations/0019_drop_dataconditions_condition.py new file mode 100644 index 00000000000000..9c96936d1ec154 --- /dev/null +++ b/src/sentry/workflow_engine/migrations/0019_drop_dataconditions_condition.py @@ -0,0 +1,33 @@ +# Generated by Django 5.1.4 on 2024-12-19 19:56 + +from sentry.new_migrations.migrations import CheckedMigration +from sentry.new_migrations.monkey.fields import SafeRemoveField +from sentry.new_migrations.monkey.state import DeletionAction + + +class Migration(CheckedMigration): + # This flag is used to mark that a migration shouldn't be automatically run in production. + # This should only be used for operations where it's safe to run the migration after your + # code has deployed. So this should not be used for most operations that alter the schema + # of a table. + # Here are some things that make sense to mark as post deployment: + # - Large data migrations. Typically we want these to be run manually so that they can be + # monitored and not block the deploy for a long period of time while they run. + # - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to + # run this outside deployments so that we don't block them. Note that while adding an index + # is a schema change, it's completely safe to run the operation after the code has deployed. + # Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment + + is_post_deployment = False + + dependencies = [ + ("workflow_engine", "0018_rm_data_condition_condition"), + ] + + operations = [ + SafeRemoveField( + model_name="datacondition", + name="condition", + deletion_action=DeletionAction.DELETE, + ), + ] diff --git a/src/sentry/workflow_engine/models/action.py b/src/sentry/workflow_engine/models/action.py index 027ef5cf11c933..efc7a1e3dabd9f 100644 --- a/src/sentry/workflow_engine/models/action.py +++ b/src/sentry/workflow_engine/models/action.py @@ -7,10 +7,9 @@ from sentry.backup.scopes import RelocationScope from sentry.db.models import DefaultFieldsModel, region_silo_model, sane_repr from sentry.db.models.fields.hybrid_cloud_foreign_key import HybridCloudForeignKey -from sentry.eventstore.models import GroupEvent from sentry.notifications.models.notificationaction import ActionTarget from sentry.workflow_engine.registry import action_handler_registry -from sentry.workflow_engine.types import ActionHandler +from sentry.workflow_engine.types import ActionHandler, WorkflowJob if TYPE_CHECKING: from sentry.workflow_engine.models import Detector @@ -30,9 +29,15 @@ class Action(DefaultFieldsModel): __repr__ = sane_repr("id", "type") class Type(models.TextChoices): - NOTIFICATION = "notification" + EMAIL = "email" + SLACK = "slack" + PAGERDUTY = "pagerduty" WEBHOOK = "webhook" + class LegacyNotificationType(models.TextChoices): + ISSUE_ALERT = "issue" + METRIC_ALERT = "metric" + # The type field is used to denote the type of action we want to trigger type = models.TextField(choices=Type.choices) data = models.JSONField(default=dict) @@ -46,6 +51,13 @@ class Type(models.TextChoices): "sentry.Integration", blank=True, null=True, on_delete="CASCADE" ) + # LEGACY: The legacy_notification_type is used to denote if this notification was for an issue alert, metric alert, etc. + # We need this because of how tightly coupled the notification system is with the legacy alert models + legacy_notification_type = models.TextField( + null=True, + choices=LegacyNotificationType.choices, + ) + # LEGACY: The target_display is used to display the target's name in notifications target_display = models.TextField(null=True) @@ -59,7 +71,7 @@ def get_handler(self) -> ActionHandler: action_type = Action.Type(self.type) return action_handler_registry.get(action_type) - def trigger(self, evt: GroupEvent, detector: Detector) -> None: + def trigger(self, job: WorkflowJob, detector: Detector) -> None: # get the handler for the action type handler = self.get_handler() - handler.execute(evt, self, detector) + handler.execute(job, self, detector) diff --git a/src/sentry/workflow_engine/models/data_condition.py b/src/sentry/workflow_engine/models/data_condition.py index 9ed3efc66c5c55..22aecf98180a67 100644 --- a/src/sentry/workflow_engine/models/data_condition.py +++ b/src/sentry/workflow_engine/models/data_condition.py @@ -1,7 +1,5 @@ import logging import operator -from collections.abc import Callable -from enum import StrEnum from typing import Any, TypeVar, cast from django.db import models @@ -10,23 +8,27 @@ from sentry.db.models import DefaultFieldsModel, region_silo_model, sane_repr from sentry.utils.registry import NoRegistrationExistsError from sentry.workflow_engine.registry import condition_handler_registry -from sentry.workflow_engine.types import ( - DataConditionHandler, - DataConditionResult, - DetectorPriorityLevel, -) +from sentry.workflow_engine.types import DataConditionResult, DetectorPriorityLevel logger = logging.getLogger(__name__) -class Condition(StrEnum): +class Condition(models.TextChoices): EQUAL = "eq" GREATER_OR_EQUAL = "gte" GREATER = "gt" LESS_OR_EQUAL = "lte" LESS = "lt" NOT_EQUAL = "ne" - GROUP_EVENT_ATTR_COMPARISON = "group_event_attr_comparison" + EVENT_ATTRIBUTE = "event_attribute" + EVENT_CREATED_BY_DETECTOR = "event_created_by_detector" + EVENT_SEEN_COUNT = "event_seen_count" + EVERY_EVENT = "every_event" + EXISTING_HIGH_PRIORITY_ISSUE = "existing_high_priority_issue" + FIRST_SEEN_EVENT = "first_seen_event" + NEW_HIGH_PRIORITY_ISSUE = "new_high_priority_issue" + REGRESSION_EVENT = "regression_event" + REAPPEARED_EVENT = "reappeared_event" condition_ops = { @@ -50,9 +52,6 @@ class DataCondition(DefaultFieldsModel): __relocation_scope__ = RelocationScope.Organization __repr__ = sane_repr("type", "condition", "condition_group") - # The condition is the logic condition that needs to be met, gt, lt, eq, etc. - condition = models.CharField(max_length=200) - # The comparison is the value that the condition is compared to for the evaluation, this must be a primitive value comparison = models.JSONField() @@ -60,7 +59,7 @@ class DataCondition(DefaultFieldsModel): condition_result = models.JSONField() # The type of condition, this is used to initialize the condition classes - type = models.CharField(max_length=200) + type = models.CharField(max_length=200, choices=Condition.choices, default=Condition.EQUAL) condition_group = models.ForeignKey( "workflow_engine.DataConditionGroup", @@ -85,44 +84,31 @@ def get_condition_result(self) -> DataConditionResult: return None - def get_condition_handler(self) -> DataConditionHandler[T] | None: + def evaluate_value(self, value: T) -> DataConditionResult: try: condition_type = Condition(self.type) except ValueError: - # If the type isn't in the condition, then it won't be in the registry either. - raise NoRegistrationExistsError(f"No registration exists for {self.type}") - - return condition_handler_registry.get(condition_type) + logger.exception( + "Invalid condition type", + extra={"type": self.type, "id": self.id}, + ) + return None - def evaluate_value(self, value: T) -> DataConditionResult: - condition_handler: DataConditionHandler[T] | None = None - op: Callable | None = None + if condition_type in condition_ops: + # If the condition is a base type, handle it directly + op = condition_ops[Condition(self.type)] + result = op(cast(Any, value), self.comparison) + return self.get_condition_result() if result else None + # Otherwise, we need to get the handler and evaluate the value try: - # Use a custom hanler - condition_handler = self.get_condition_handler() + handler = condition_handler_registry.get(condition_type) except NoRegistrationExistsError: - # If it's not a custom handler, use the default operators - condition = Condition(self.condition) - op = condition_ops.get(condition, None) - - if condition_handler is not None: - result = condition_handler.evaluate_value(value, self.comparison, self.condition) - elif op is not None: - result = op(cast(Any, value), self.comparison) - else: - logger.error( - "Invalid Data Condition Evaluation", - extra={ - "id": self.id, - "type": self.type, - "condition": self.condition, - }, + logger.exception( + "No registration exists for condition", + extra={"type": self.type, "id": self.id}, ) - return None - if result: - return self.get_condition_result() - - return None + result = handler.evaluate_value(value, self.comparison) + return self.get_condition_result() if result else None diff --git a/src/sentry/workflow_engine/models/data_source.py b/src/sentry/workflow_engine/models/data_source.py index ebfbc63fd18f14..0ebf62f600ddd4 100644 --- a/src/sentry/workflow_engine/models/data_source.py +++ b/src/sentry/workflow_engine/models/data_source.py @@ -3,6 +3,8 @@ from typing import Generic, TypeVar from django.db import models +from django.db.models.signals import pre_save +from django.dispatch import receiver from sentry.backup.scopes import RelocationScope from sentry.db.models import ( @@ -11,6 +13,7 @@ FlexibleForeignKey, region_silo_model, ) +from sentry.utils.registry import NoRegistrationExistsError from sentry.workflow_engine.models.data_source_detector import DataSourceDetector from sentry.workflow_engine.registry import data_source_type_registry from sentry.workflow_engine.types import DataSourceTypeHandler @@ -29,7 +32,11 @@ class DataSource(DefaultFieldsModel): __relocation_scope__ = RelocationScope.Organization organization = FlexibleForeignKey("sentry.Organization") + + # Should this be a string so we can support UUID / ints? query_id = BoundedBigIntegerField() + + # This is a dynamic field, depending on the type in the data_source_type_registry type = models.TextField() detectors = models.ManyToManyField("workflow_engine.Detector", through=DataSourceDetector) @@ -45,3 +52,19 @@ def type_handler(self) -> builtins.type[DataSourceTypeHandler]: if not handler: raise ValueError(f"Unknown data source type: {self.type}") return handler + + +@receiver(pre_save, sender=DataSource) +def ensure_type_handler_registered(sender, instance: DataSource, **kwargs): + """ + Ensure that the type of the data source is valid and registered in the data_source_type_registry + """ + data_source_type = instance.type + + if not data_source_type: + raise ValueError(f"No group type found with type {instance.type}") + + try: + data_source_type_registry.get(data_source_type) + except NoRegistrationExistsError: + raise ValueError(f"No data source type found with type {data_source_type}") diff --git a/src/sentry/workflow_engine/models/detector.py b/src/sentry/workflow_engine/models/detector.py index c11a3851b608ce..0cc2145a06a1c5 100644 --- a/src/sentry/workflow_engine/models/detector.py +++ b/src/sentry/workflow_engine/models/detector.py @@ -7,6 +7,8 @@ from django.conf import settings from django.db import models from django.db.models import UniqueConstraint +from django.db.models.signals import pre_save +from django.dispatch import receiver from sentry.backup.scopes import RelocationScope from sentry.db.models import DefaultFieldsModel, FlexibleForeignKey, region_silo_model @@ -53,17 +55,12 @@ class Detector(DefaultFieldsModel, OwnerModel, JSONConfigBase): on_delete=models.SET_NULL, ) - # The type of detector that is being used, this is used to determine the class - # to load for the detector + # maps to registry (sentry.issues.grouptype.registry) entries for GroupType.slug in sentry.issues.grouptype.GroupType type = models.CharField(max_length=200) # The user that created the detector created_by_id = HybridCloudForeignKey(settings.AUTH_USER_MODEL, null=True, on_delete="SET_NULL") - @property - def CONFIG_SCHEMA(self) -> dict[str, Any]: - raise NotImplementedError('Subclasses must define a "CONFIG_SCHEMA" attribute') - class Meta(OwnerModel.Meta): constraints = OwnerModel.Meta.constraints + [ UniqueConstraint( @@ -83,7 +80,6 @@ def detector_handler(self) -> DetectorHandler | None: logger.error( "No registered grouptype for detector", extra={ - "group_type": str(group_type), "detector_id": self.id, "detector_type": self.type, }, @@ -105,3 +101,18 @@ def detector_handler(self) -> DetectorHandler | None: def get_audit_log_data(self) -> dict[str, Any]: # TODO: Create proper audit log data for the detector, group and conditions return {} + + +@receiver(pre_save, sender=Detector) +def enforce_config_schema(sender, instance: Detector, **kwargs): + """ + Ensures the detector type is valid in the grouptype registry. + This needs to be a signal because the grouptype registry's entries are not available at import time. + """ + group_type = instance.group_type + + if not group_type: + raise ValueError(f"No group type found with type {instance.type}") + + config_schema = group_type.detector_config_schema + instance.validate_config(config_schema) diff --git a/src/sentry/workflow_engine/models/json_config.py b/src/sentry/workflow_engine/models/json_config.py index 1b353ccf18cbcb..b62b936d9c4007 100644 --- a/src/sentry/workflow_engine/models/json_config.py +++ b/src/sentry/workflow_engine/models/json_config.py @@ -1,4 +1,3 @@ -from abc import abstractproperty from typing import Any from django.db import models @@ -8,13 +7,9 @@ class JSONConfigBase(models.Model): config = models.JSONField(db_default={}) - @abstractproperty - def CONFIG_SCHEMA(self) -> dict[str, Any]: - pass - - def validate_config(self) -> None: + def validate_config(self, schema: dict[str, Any]) -> None: try: - validate(self.config, self.CONFIG_SCHEMA) + validate(self.config, schema) except ValidationError as e: raise ValidationError(f"Invalid config: {e.message}") diff --git a/src/sentry/workflow_engine/models/workflow.py b/src/sentry/workflow_engine/models/workflow.py index 13483e258f1532..b37dece9419179 100644 --- a/src/sentry/workflow_engine/models/workflow.py +++ b/src/sentry/workflow_engine/models/workflow.py @@ -2,13 +2,15 @@ from django.conf import settings from django.db import models +from django.db.models.signals import pre_save +from django.dispatch import receiver from sentry.backup.scopes import RelocationScope from sentry.db.models import DefaultFieldsModel, FlexibleForeignKey, region_silo_model, sane_repr from sentry.db.models.fields.hybrid_cloud_foreign_key import HybridCloudForeignKey -from sentry.eventstore.models import GroupEvent from sentry.models.owner_base import OwnerModel from sentry.workflow_engine.processors.data_condition_group import evaluate_condition_group +from sentry.workflow_engine.types import WorkflowJob from .json_config import JSONConfigBase @@ -35,8 +37,20 @@ class Workflow(DefaultFieldsModel, OwnerModel, JSONConfigBase): created_by_id = HybridCloudForeignKey(settings.AUTH_USER_MODEL, null=True, on_delete="SET_NULL") @property - def CONFIG_SCHEMA(self) -> dict[str, Any]: - raise NotImplementedError('Subclasses must define a "CONFIG_SCHEMA" attribute') + def config_schema(self) -> dict[str, Any]: + return { + "$schema": "https://json-schema.org/draft/2020-12/schema", + "title": "Workflow Schema", + "type": "object", + "properties": { + "frequency": { + "description": "How often the workflow should fire for a Group (minutes)", + "type": "integer", + "minimum": 0, + }, + }, + "additionalProperties": False, + } __repr__ = sane_repr("name", "organization_id") @@ -50,7 +64,7 @@ class Meta: ) ] - def evaluate_trigger_conditions(self, evt: GroupEvent) -> bool: + def evaluate_trigger_conditions(self, job: WorkflowJob) -> bool: """ Evaluate the conditions for the workflow trigger and return if the evaluation was successful. If there aren't any workflow trigger conditions, the workflow is considered triggered. @@ -58,5 +72,11 @@ def evaluate_trigger_conditions(self, evt: GroupEvent) -> bool: if self.when_condition_group is None: return True - evaluation, _ = evaluate_condition_group(self.when_condition_group, evt) + job["workflow"] = self + evaluation, _ = evaluate_condition_group(self.when_condition_group, job) return evaluation + + +@receiver(pre_save, sender=Workflow) +def enforce_config_schema(sender, instance: Workflow, **kwargs): + instance.validate_config(instance.config_schema) diff --git a/src/sentry/workflow_engine/processors/__init__.py b/src/sentry/workflow_engine/processors/__init__.py index 700cd48361de44..0dca1394898aeb 100644 --- a/src/sentry/workflow_engine/processors/__init__.py +++ b/src/sentry/workflow_engine/processors/__init__.py @@ -1,6 +1,8 @@ __all__ = [ "process_data_sources", "process_detectors", + "process_workflows", + "process_data_packet", ] from .data_source import process_data_sources diff --git a/src/sentry/workflow_engine/processors/action.py b/src/sentry/workflow_engine/processors/action.py index 0e57ee44441aea..bf03015bf37444 100644 --- a/src/sentry/workflow_engine/processors/action.py +++ b/src/sentry/workflow_engine/processors/action.py @@ -1,11 +1,11 @@ from sentry.db.models.manager.base_query_set import BaseQuerySet -from sentry.eventstore.models import GroupEvent from sentry.workflow_engine.models import Action, DataConditionGroup, Workflow from sentry.workflow_engine.processors.data_condition_group import evaluate_condition_group +from sentry.workflow_engine.types import WorkflowJob def evaluate_workflow_action_filters( - workflows: set[Workflow], evt: GroupEvent + workflows: set[Workflow], job: WorkflowJob ) -> BaseQuerySet[Action]: filtered_action_groups: set[DataConditionGroup] = set() @@ -17,7 +17,7 @@ def evaluate_workflow_action_filters( ).distinct() for action_condition in action_conditions: - evaluation, result = evaluate_condition_group(action_condition, evt) + evaluation, result = evaluate_condition_group(action_condition, job) if evaluation: filtered_action_groups.add(action_condition) diff --git a/src/sentry/workflow_engine/processors/data_packet.py b/src/sentry/workflow_engine/processors/data_packet.py new file mode 100644 index 00000000000000..35997e02f627e3 --- /dev/null +++ b/src/sentry/workflow_engine/processors/data_packet.py @@ -0,0 +1,24 @@ +from sentry.workflow_engine.handlers.detector import DetectorEvaluationResult +from sentry.workflow_engine.models import DataPacket, Detector +from sentry.workflow_engine.processors.data_source import process_data_sources +from sentry.workflow_engine.processors.detector import process_detectors +from sentry.workflow_engine.types import DetectorGroupKey + + +def process_data_packets( + data_packets: list[DataPacket], query_type: str +) -> list[tuple[Detector, dict[DetectorGroupKey, DetectorEvaluationResult]]]: + """ + This method ties the two main pre-processing methods together to process + the incoming data and create issue occurrences. + """ + processed_sources = process_data_sources(data_packets, query_type) + + results: list[tuple[Detector, dict[DetectorGroupKey, DetectorEvaluationResult]]] = [] + for data_packet, detectors in processed_sources: + detector_results = process_detectors(data_packet, detectors) + + for detector, detector_state in detector_results: + results.append((detector, detector_state)) + + return results diff --git a/src/sentry/workflow_engine/processors/data_source.py b/src/sentry/workflow_engine/processors/data_source.py index f7f375ccf59892..5df9711f4b7775 100644 --- a/src/sentry/workflow_engine/processors/data_source.py +++ b/src/sentry/workflow_engine/processors/data_source.py @@ -14,7 +14,8 @@ def process_data_sources( ) -> list[tuple[DataPacket, list[Detector]]]: metrics.incr("sentry.workflow_engine.process_data_sources", tags={"query_type": query_type}) - data_packet_ids = {packet.query_id for packet in data_packets} + # TODO - change data_source.query_id to be a string to support UUIDs + data_packet_ids = {int(packet.query_id) for packet in data_packets} # Fetch all data sources and associated detectors for the given data packets with sentry_sdk.start_span(op="sentry.workflow_engine.process_data_sources.fetch_data_sources"): @@ -23,12 +24,12 @@ def process_data_sources( ).prefetch_related(Prefetch("detectors")) # Build a lookup dict for query_id to detectors - query_id_to_detectors = {ds.query_id: list(ds.detectors.all()) for ds in data_sources} + query_id_to_detectors = {int(ds.query_id): list(ds.detectors.all()) for ds in data_sources} # Create the result tuples result = [] for packet in data_packets: - detectors = query_id_to_detectors.get(packet.query_id) + detectors = query_id_to_detectors.get(int(packet.query_id)) if detectors: data_packet_tuple = (packet, detectors) diff --git a/src/sentry/workflow_engine/processors/detector.py b/src/sentry/workflow_engine/processors/detector.py index 60bb85e0988190..b0a038fdbcb5c6 100644 --- a/src/sentry/workflow_engine/processors/detector.py +++ b/src/sentry/workflow_engine/processors/detector.py @@ -2,22 +2,23 @@ import logging -from sentry.eventstore.models import GroupEvent +from sentry.issues.grouptype import ErrorGroupType from sentry.issues.issue_occurrence import IssueOccurrence from sentry.issues.producer import PayloadType, produce_occurrence_to_kafka from sentry.workflow_engine.handlers.detector import DetectorEvaluationResult from sentry.workflow_engine.models import DataPacket, Detector -from sentry.workflow_engine.types import DetectorGroupKey, DetectorType +from sentry.workflow_engine.types import DetectorGroupKey, WorkflowJob logger = logging.getLogger(__name__) # TODO - cache these by evt.group_id? :thinking: -def get_detector_by_event(evt: GroupEvent) -> Detector: +def get_detector_by_event(job: WorkflowJob) -> Detector: + evt = job["event"] issue_occurrence = evt.occurrence if issue_occurrence is None: - detector = Detector.objects.get(project_id=evt.project_id, type=DetectorType.ERROR) + detector = Detector.objects.get(project_id=evt.project_id, type=ErrorGroupType.slug) else: detector = Detector.objects.get(id=issue_occurrence.evidence_data.get("detector_id", None)) diff --git a/src/sentry/workflow_engine/processors/workflow.py b/src/sentry/workflow_engine/processors/workflow.py index effc18173780a6..be2fe0f88e4e92 100644 --- a/src/sentry/workflow_engine/processors/workflow.py +++ b/src/sentry/workflow_engine/processors/workflow.py @@ -2,26 +2,26 @@ import sentry_sdk -from sentry.eventstore.models import GroupEvent from sentry.utils import metrics from sentry.workflow_engine.models import Detector, Workflow from sentry.workflow_engine.processors.action import evaluate_workflow_action_filters from sentry.workflow_engine.processors.detector import get_detector_by_event +from sentry.workflow_engine.types import WorkflowJob logger = logging.getLogger(__name__) -def evaluate_workflow_triggers(workflows: set[Workflow], evt: GroupEvent) -> set[Workflow]: +def evaluate_workflow_triggers(workflows: set[Workflow], job: WorkflowJob) -> set[Workflow]: triggered_workflows: set[Workflow] = set() for workflow in workflows: - if workflow.evaluate_trigger_conditions(evt): + if workflow.evaluate_trigger_conditions(job): triggered_workflows.add(workflow) return triggered_workflows -def process_workflows(evt: GroupEvent) -> set[Workflow]: +def process_workflows(job: WorkflowJob) -> set[Workflow]: """ This method will get the detector based on the event, and then gather the associated workflows. Next, it will evaluate the "when" (or trigger) conditions for each workflow, if the conditions are met, @@ -31,19 +31,19 @@ def process_workflows(evt: GroupEvent) -> set[Workflow]: """ # Check to see if the GroupEvent has an issue occurrence try: - detector = get_detector_by_event(evt) + detector = get_detector_by_event(job) except Detector.DoesNotExist: metrics.incr("workflow_engine.process_workflows.error") - logger.exception("Detector not found for event", extra={"event_id": evt.event_id}) + logger.exception("Detector not found for event", extra={"event_id": job["event"].event_id}) return set() # Get the workflows, evaluate the when_condition_group, finally evaluate the actions for workflows that are triggered workflows = set(Workflow.objects.filter(detectorworkflow__detector_id=detector.id).distinct()) - triggered_workflows = evaluate_workflow_triggers(workflows, evt) - actions = evaluate_workflow_action_filters(triggered_workflows, evt) + triggered_workflows = evaluate_workflow_triggers(workflows, job) + actions = evaluate_workflow_action_filters(triggered_workflows, job) with sentry_sdk.start_span(op="workflow_engine.process_workflows.trigger_actions"): for action in actions: - action.trigger(evt, detector) + action.trigger(job, detector) return triggered_workflows diff --git a/src/sentry/workflow_engine/registry.py b/src/sentry/workflow_engine/registry.py index 2af6b070a1bcf8..ece9577feadd00 100644 --- a/src/sentry/workflow_engine/registry.py +++ b/src/sentry/workflow_engine/registry.py @@ -1,8 +1,6 @@ -from typing import Any - from sentry.utils.registry import Registry from sentry.workflow_engine.types import ActionHandler, DataConditionHandler, DataSourceTypeHandler data_source_type_registry = Registry[type[DataSourceTypeHandler]]() -condition_handler_registry = Registry[DataConditionHandler[Any]]() +condition_handler_registry = Registry[DataConditionHandler]() action_handler_registry = Registry[ActionHandler]() diff --git a/src/sentry/workflow_engine/types.py b/src/sentry/workflow_engine/types.py index 50dc15a7010f22..b40f54c2a97448 100644 --- a/src/sentry/workflow_engine/types.py +++ b/src/sentry/workflow_engine/types.py @@ -1,13 +1,14 @@ from __future__ import annotations -from enum import IntEnum, StrEnum -from typing import TYPE_CHECKING, Any, Generic, TypeVar +from enum import IntEnum +from typing import TYPE_CHECKING, Any, Generic, TypedDict, TypeVar from sentry.types.group import PriorityLevel if TYPE_CHECKING: from sentry.eventstore.models import GroupEvent - from sentry.workflow_engine.models import Action, Detector + from sentry.eventstream.base import GroupState + from sentry.workflow_engine.models import Action, Detector, Workflow T = TypeVar("T") @@ -28,9 +29,22 @@ class DetectorPriorityLevel(IntEnum): ProcessedDataConditionResult = tuple[bool, list[DataConditionResult]] +class EventJob(TypedDict): + event: GroupEvent + + +class WorkflowJob(EventJob, total=False): + group_state: GroupState + is_reprocessed: bool + has_reappeared: bool + has_alert: bool + has_escalated: bool + workflow: Workflow + + class ActionHandler: @staticmethod - def execute(group_event: GroupEvent, action: Action, detector: Detector) -> None: + def execute(job: WorkflowJob, action: Action, detector: Detector) -> None: raise NotImplementedError @@ -42,9 +56,5 @@ def bulk_get_query_object(data_sources) -> dict[int, T | None]: class DataConditionHandler(Generic[T]): @staticmethod - def evaluate_value(value: T, comparison: Any, condition: str) -> DataConditionResult: + def evaluate_value(value: T, comparison: Any) -> DataConditionResult: raise NotImplementedError - - -class DetectorType(StrEnum): - ERROR = "ErrorDetector" diff --git a/src/sentry_plugins/heroku/plugin.py b/src/sentry_plugins/heroku/plugin.py index 12d587dd348ee4..6c535bb6a9dfff 100644 --- a/src/sentry_plugins/heroku/plugin.py +++ b/src/sentry_plugins/heroku/plugin.py @@ -11,7 +11,6 @@ from sentry.models.apikey import ApiKey from sentry.models.options.project_option import ProjectOption from sentry.models.repository import Repository -from sentry.plugins.base.configuration import react_plugin_config from sentry.plugins.bases.releasetracking import ReleaseTrackingPlugin from sentry.plugins.interfaces.releasehook import ReleaseHook from sentry.users.services.user.service import user_service @@ -152,9 +151,6 @@ class HerokuPlugin(CorePluginMixin, ReleaseTrackingPlugin): ) ] - def configure(self, project, request): - return react_plugin_config(self, project, request) - def can_enable_for_projects(self): return True diff --git a/src/sentry_plugins/sessionstack/plugin.py b/src/sentry_plugins/sessionstack/plugin.py index d38b36f6e6f36b..54603def4efc8b 100644 --- a/src/sentry_plugins/sessionstack/plugin.py +++ b/src/sentry_plugins/sessionstack/plugin.py @@ -7,7 +7,6 @@ from sentry.integrations.base import FeatureDescription, IntegrationFeatures from sentry.interfaces.contexts import ContextType from sentry.models.project import Project -from sentry.plugins.base.configuration import react_plugin_config from sentry.plugins.base.v2 import EventPreprocessor, Plugin2 from sentry.utils.settings import is_self_hosted from sentry_plugins.base import CorePluginMixin @@ -50,9 +49,6 @@ class SessionStackPlugin(CorePluginMixin, Plugin2): def get_resource_links(self): return self.resource_links + self.sessionstack_resource_links - def configure(self, project, request): - return react_plugin_config(self, project, request) - def has_project_conf(self): return True diff --git a/static/app/__mocks__/react-lazyload.tsx b/static/app/__mocks__/react-lazyload.tsx index f4d66de2bb7605..a5be44b10d53e9 100644 --- a/static/app/__mocks__/react-lazyload.tsx +++ b/static/app/__mocks__/react-lazyload.tsx @@ -4,7 +4,7 @@ * These mocks are simple no-ops to make testing lazy-loaded components simpler. */ -const LazyLoad = ({children}) => children; +const LazyLoad = ({children}: {children: React.ReactNode}) => children; export const forceCheck = jest.fn(); diff --git a/static/app/actionCreators/dashboards.tsx b/static/app/actionCreators/dashboards.tsx index 845fe1c8227db6..3c7a6c902bde9b 100644 --- a/static/app/actionCreators/dashboards.tsx +++ b/static/app/actionCreators/dashboards.tsx @@ -1,6 +1,6 @@ import omit from 'lodash/omit'; -import {addErrorMessage} from 'sentry/actionCreators/indicator'; +import {addErrorMessage, addSuccessMessage} from 'sentry/actionCreators/indicator'; import type {Client} from 'sentry/api'; import {ALL_ACCESS_PROJECTS} from 'sentry/constants/pageFilters'; import {t} from 'sentry/locale'; @@ -27,7 +27,7 @@ export function fetchDashboards(api: Client, orgSlug: string) { if (errorResponse) { const errors = flattenErrors(errorResponse, {}); - addErrorMessage(errors[Object.keys(errors)[0]] as string); + addErrorMessage(errors[Object.keys(errors)[0]!] as string); } else { addErrorMessage(t('Unable to fetch dashboards')); } @@ -73,7 +73,7 @@ export function createDashboard( if (errorResponse) { const errors = flattenErrors(errorResponse, {}); - addErrorMessage(errors[Object.keys(errors)[0]] as string); + addErrorMessage(errors[Object.keys(errors)[0]!] as string); } else { addErrorMessage(t('Unable to create dashboard')); } @@ -113,11 +113,12 @@ export async function updateDashboardFavorite( }, } ); + addSuccessMessage(isFavorited ? t('Added as favorite') : t('Removed as favorite')); } catch (response) { const errorResponse = response?.responseJSON ?? null; if (errorResponse) { const errors = flattenErrors(errorResponse, {}); - addErrorMessage(errors[Object.keys(errors)[0]] as string); + addErrorMessage(errors[Object.keys(errors)[0]!]! as string); } else if (isFavorited) { addErrorMessage(t('Unable to favorite dashboard')); } else { @@ -144,7 +145,7 @@ export function fetchDashboard( if (errorResponse) { const errors = flattenErrors(errorResponse, {}); - addErrorMessage(errors[Object.keys(errors)[0]] as string); + addErrorMessage(errors[Object.keys(errors)[0]!] as string); } else { addErrorMessage(t('Unable to load dashboard')); } @@ -191,7 +192,7 @@ export function updateDashboard( if (errorResponse) { const errors = flattenErrors(errorResponse, {}); - addErrorMessage(errors[Object.keys(errors)[0]] as string); + addErrorMessage(errors[Object.keys(errors)[0]!] as string); } else { addErrorMessage(t('Unable to update dashboard')); } @@ -217,7 +218,7 @@ export function deleteDashboard( if (errorResponse) { const errors = flattenErrors(errorResponse, {}); - addErrorMessage(errors[Object.keys(errors)[0]] as string); + addErrorMessage(errors[Object.keys(errors)[0]!] as string); } else { addErrorMessage(t('Unable to delete dashboard')); } @@ -269,7 +270,7 @@ export function updateDashboardPermissions( if (errorResponse) { const errors = flattenErrors(errorResponse, {}); - addErrorMessage(errors[Object.keys(errors)[0]] as string); + addErrorMessage(errors[Object.keys(errors)[0]!]! as string); } else { addErrorMessage(t('Unable to update dashboard permissions')); } diff --git a/static/app/actionCreators/integrations.tsx b/static/app/actionCreators/integrations.tsx index 3cc29123dcd076..ca6da8c9c2b959 100644 --- a/static/app/actionCreators/integrations.tsx +++ b/static/app/actionCreators/integrations.tsx @@ -4,84 +4,10 @@ import { addSuccessMessage, clearIndicators, } from 'sentry/actionCreators/indicator'; -import {Client} from 'sentry/api'; +import type {Client} from 'sentry/api'; import {t, tct} from 'sentry/locale'; import type {Integration, Repository} from 'sentry/types/integrations'; -const api = new Client(); - -/** - * Removes an integration from a project. - * - * @param orgSlug Organization Slug - * @param projectId Project Slug - * @param integration The organization integration to remove - */ -export function removeIntegrationFromProject( - orgSlug: string, - projectId: string, - integration: Integration -) { - const endpoint = `/projects/${orgSlug}/${projectId}/integrations/${integration.id}/`; - addLoadingMessage(); - - return api.requestPromise(endpoint, {method: 'DELETE'}).then( - () => { - addSuccessMessage(t('Disabled %s for %s', integration.name, projectId)); - }, - () => { - addErrorMessage(t('Failed to disable %s for %s', integration.name, projectId)); - } - ); -} - -/** - * Add an integration to a project - * - * @param orgSlug Organization Slug - * @param projectId Project Slug - * @param integration The organization integration to add - */ -export function addIntegrationToProject( - orgSlug: string, - projectId: string, - integration: Integration -) { - const endpoint = `/projects/${orgSlug}/${projectId}/integrations/${integration.id}/`; - addLoadingMessage(); - - return api.requestPromise(endpoint, {method: 'PUT'}).then( - () => { - addSuccessMessage(t('Enabled %s for %s', integration.name, projectId)); - }, - () => { - addErrorMessage(t('Failed to enabled %s for %s', integration.name, projectId)); - } - ); -} - -/** - * Delete a respository - * - * @param client ApiClient - * @param orgSlug Organization Slug - * @param repositoryId Repository ID - */ -export function deleteRepository(client: Client, orgSlug: string, repositoryId: string) { - addLoadingMessage(); - const promise = client.requestPromise( - `/organizations/${orgSlug}/repos/${repositoryId}/`, - { - method: 'DELETE', - } - ); - promise.then( - () => clearIndicators(), - () => addErrorMessage(t('Unable to delete repository.')) - ); - return promise; -} - /** * Cancel the deletion of a respository * diff --git a/static/app/actionCreators/members.tsx b/static/app/actionCreators/members.tsx index 61bf7ef0ff92ed..27b48e37389a9e 100644 --- a/static/app/actionCreators/members.tsx +++ b/static/app/actionCreators/members.tsx @@ -67,7 +67,7 @@ export function indexMembersByProject(members: Member[]): IndexedMembersByProjec acc[project] = []; } if (member.user) { - acc[project].push(member.user); + acc[project]!.push(member.user); } } return acc; diff --git a/static/app/actionCreators/modal.tsx b/static/app/actionCreators/modal.tsx index f94ad494a63351..255d41d9330cbf 100644 --- a/static/app/actionCreators/modal.tsx +++ b/static/app/actionCreators/modal.tsx @@ -1,12 +1,15 @@ import type {Location} from 'history'; import type {ModalTypes} from 'sentry/components/globalModal'; +import type {AddToDashboardModalProps as CreateDashboardFromMetricsModalProps} from 'sentry/components/modals/createDashboardFromMetricsModal'; import type {CreateNewIntegrationModalOptions} from 'sentry/components/modals/createNewIntegrationModal'; import type {CreateReleaseIntegrationModalOptions} from 'sentry/components/modals/createReleaseIntegrationModal'; import type {DashboardWidgetQuerySelectorModalOptions} from 'sentry/components/modals/dashboardWidgetQuerySelectorModal'; +import type {ImportDashboardFromFileModalProps} from 'sentry/components/modals/importDashboardFromFileModal'; import type {InsightChartModalOptions} from 'sentry/components/modals/insightChartModal'; import type {InviteRow} from 'sentry/components/modals/inviteMembersModal/types'; import type {ReprocessEventModalOptions} from 'sentry/components/modals/reprocessEventModal'; +import type {AddToDashboardModalProps} from 'sentry/components/modals/widgetBuilder/addToDashboardModal'; import type {OverwriteWidgetModalProps} from 'sentry/components/modals/widgetBuilder/overwriteWidgetModal'; import type {WidgetViewerModalOptions} from 'sentry/components/modals/widgetViewerModal'; import type {Category} from 'sentry/components/platformPicker'; @@ -245,7 +248,7 @@ export async function openWidgetBuilderOverwriteModal( }); } -export async function openAddToDashboardModal(options) { +export async function openAddToDashboardModal(options: AddToDashboardModalProps) { const mod = await import('sentry/components/modals/widgetBuilder/addToDashboardModal'); const {default: Modal, modalCss} = mod; @@ -255,7 +258,9 @@ export async function openAddToDashboardModal(options) { }); } -export async function openImportDashboardFromFileModal(options) { +export async function openImportDashboardFromFileModal( + options: ImportDashboardFromFileModalProps +) { const mod = await import('sentry/components/modals/importDashboardFromFileModal'); const {default: Modal, modalCss} = mod; @@ -265,7 +270,9 @@ export async function openImportDashboardFromFileModal(options) { }); } -export async function openCreateDashboardFromMetrics(options) { +export async function openCreateDashboardFromMetrics( + options: CreateDashboardFromMetricsModalProps +) { const mod = await import('sentry/components/modals/createDashboardFromMetricsModal'); const {default: Modal, modalCss} = mod; @@ -388,3 +395,13 @@ export async function openInsightChartModal(options: InsightChartModalOptions) { openModal(deps => , {modalCss}); } + +export async function openAddTempestCredentialsModal(options: { + organization: Organization; + project: Project; +}) { + const mod = await import('sentry/components/modals/addTempestCredentialsModal'); + const {default: Modal} = mod; + + openModal(deps => ); +} diff --git a/static/app/actionCreators/monitors.tsx b/static/app/actionCreators/monitors.tsx index 400dcaa98654c7..0a98c36450abad 100644 --- a/static/app/actionCreators/monitors.tsx +++ b/static/app/actionCreators/monitors.tsx @@ -70,7 +70,7 @@ export async function updateMonitor( // If we are updating a single value in the monitor we can read the // validation error for that key, otherwise fallback to the default error const validationError = - updateKeys.length === 1 ? respError.responseJSON?.[updateKeys[0]]?.[0] : undefined; + updateKeys.length === 1 ? respError.responseJSON?.[updateKeys[0]!]?.[0] : undefined; logException(err); addErrorMessage(validationError ?? t('Unable to update monitor.')); diff --git a/static/app/actionCreators/organization.tsx b/static/app/actionCreators/organization.tsx index e76f2c6e13a3a4..7d6845c1eaf6b1 100644 --- a/static/app/actionCreators/organization.tsx +++ b/static/app/actionCreators/organization.tsx @@ -15,9 +15,13 @@ import TeamStore from 'sentry/stores/teamStore'; import type {Organization, Team} from 'sentry/types/organization'; import type {Project} from 'sentry/types/project'; import FeatureFlagOverrides from 'sentry/utils/featureFlagOverrides'; -import FeatureObserver from 'sentry/utils/featureObserver'; +import { + addOrganizationFeaturesHandler, + buildSentryFeaturesHandler, +} from 'sentry/utils/featureFlags'; import {getPreloadedDataPromise} from 'sentry/utils/getPreloadedData'; import parseLinkHeader from 'sentry/utils/parseLinkHeader'; +import type RequestError from 'sentry/utils/requestError/requestError'; async function fetchOrg( api: Client, @@ -42,8 +46,9 @@ async function fetchOrg( } FeatureFlagOverrides.singleton().loadOrg(org); - FeatureObserver.singleton({}).observeOrganizationFlags({ + addOrganizationFeaturesHandler({ organization: org, + handler: buildSentryFeaturesHandler('feature.organizations:'), }); OrganizationStore.onUpdate(org, {replace: true}); @@ -139,7 +144,7 @@ export function fetchOrganizationDetails( PageFiltersStore.onReset(); } - const getErrorMessage = err => { + const getErrorMessage = (err: RequestError) => { if (typeof err.responseJSON?.detail === 'string') { return err.responseJSON?.detail; } diff --git a/static/app/actionCreators/organizations.spec.tsx b/static/app/actionCreators/organizations.spec.tsx index deceb5f403dd28..822f1bdbaae26d 100644 --- a/static/app/actionCreators/organizations.spec.tsx +++ b/static/app/actionCreators/organizations.spec.tsx @@ -2,7 +2,6 @@ import {OrganizationFixture} from 'sentry-fixture/organization'; import {fetchOrganizations} from 'sentry/actionCreators/organizations'; import ConfigStore from 'sentry/stores/configStore'; -import {browserHistory} from 'sentry/utils/browserHistory'; describe('fetchOrganizations', function () { const api = new MockApiClient(); @@ -76,6 +75,5 @@ describe('fetchOrganizations', function () { expect(usMock).toHaveBeenCalledTimes(1); expect(deMock).toHaveBeenCalledTimes(1); expect(window.location.reload).not.toHaveBeenCalled(); - expect(browserHistory.replace).not.toHaveBeenCalled(); }); }); diff --git a/static/app/actionCreators/organizations.tsx b/static/app/actionCreators/organizations.tsx index b26bec04438edf..5be559be701a4e 100644 --- a/static/app/actionCreators/organizations.tsx +++ b/static/app/actionCreators/organizations.tsx @@ -1,3 +1,5 @@ +import type {NavigateFunction} from 'react-router-dom'; + import {addErrorMessage, addSuccessMessage} from 'sentry/actionCreators/indicator'; import {resetPageFilters} from 'sentry/actionCreators/pageFilters'; import type {Client} from 'sentry/api'; @@ -18,7 +20,10 @@ type RedirectRemainingOrganizationParams = { * The organization slug */ orgId: string; - + /** + * navigate function from useNavigate + */ + navigate?: NavigateFunction; /** * Should remove org? */ @@ -32,6 +37,7 @@ type RedirectRemainingOrganizationParams = { * Can optionally remove organization from organizations store. */ export function redirectToRemainingOrganization({ + navigate, orgId, removeOrg, }: RedirectRemainingOrganizationParams) { @@ -40,12 +46,17 @@ export function redirectToRemainingOrganization({ org => org.status.id === 'active' && org.slug !== orgId ); if (!allOrgs.length) { - browserHistory.push('/organizations/new/'); + if (navigate) { + navigate('/organizations/new/'); + } else { + browserHistory.push('/organizations/new/'); + } + return; } // Let's be smart and select the best org to redirect to - const firstRemainingOrg = allOrgs[0]; + const firstRemainingOrg = allOrgs[0]!; const route = `/organizations/${firstRemainingOrg.slug}/issues/`; if (USING_CUSTOMER_DOMAIN) { @@ -54,7 +65,11 @@ export function redirectToRemainingOrganization({ return; } - browserHistory.push(route); + if (navigate) { + navigate(route); + } else { + browserHistory.push(route); + } // Remove org from SidebarDropdown if (removeOrg) { diff --git a/static/app/actionCreators/pageFilters.spec.tsx b/static/app/actionCreators/pageFilters.spec.tsx index 5fb56853b7a600..1c587d4c71e3dc 100644 --- a/static/app/actionCreators/pageFilters.spec.tsx +++ b/static/app/actionCreators/pageFilters.spec.tsx @@ -32,7 +32,7 @@ describe('PageFilters ActionCreators', function () { }); describe('initializeUrlState', function () { - let router; + let router: ReturnType; const key = `global-selection:${organization.slug}`; beforeEach(() => { diff --git a/static/app/actionCreators/pageFilters.tsx b/static/app/actionCreators/pageFilters.tsx index 54783e899fcbb2..ecd873d685661f 100644 --- a/static/app/actionCreators/pageFilters.tsx +++ b/static/app/actionCreators/pageFilters.tsx @@ -319,7 +319,7 @@ export function initializeUrlState({ if (projects && projects.length > 0) { // If there is a list of projects from URL params, select first project // from that list - newProject = typeof projects === 'string' ? [Number(projects)] : [projects[0]]; + newProject = typeof projects === 'string' ? [Number(projects)] : [projects[0]!]; } else { // When we have finished loading the organization into the props, i.e. // the organization slug is consistent with the URL param--Sentry will diff --git a/static/app/actionCreators/projects.spec.tsx b/static/app/actionCreators/projects.spec.tsx index 6b3f3e3cb79b1f..c7eef90e1537de 100644 --- a/static/app/actionCreators/projects.spec.tsx +++ b/static/app/actionCreators/projects.spec.tsx @@ -14,7 +14,7 @@ describe('Projects ActionCreators', function () { expect(mock).not.toHaveBeenCalled(); _debouncedLoadStats(api, new Set([...Array(50)].map((_, i) => String(i))), { - projectId: project.id, + projectId: project!.id, orgId: organization.slug, }); @@ -38,7 +38,7 @@ describe('Projects ActionCreators', function () { expect(mock).not.toHaveBeenCalled(); _debouncedLoadStats(api, new Set(['1', '2', '3']), { - projectId: project.id, + projectId: project!.id, orgId: organization.slug, query: {transactionStats: '1'}, }); diff --git a/static/app/actionCreators/repositories.spec.tsx b/static/app/actionCreators/repositories.spec.tsx index 79531d204b8568..00d72c56a2c909 100644 --- a/static/app/actionCreators/repositories.spec.tsx +++ b/static/app/actionCreators/repositories.spec.tsx @@ -7,7 +7,7 @@ describe('RepositoryActionCreator', function () { const api = new MockApiClient(); const mockData = [{id: '1'}]; - let mockResponse; + let mockResponse: jest.Mock; beforeEach(() => { MockApiClient.clearMockResponses(); @@ -50,15 +50,15 @@ describe('RepositoryActionCreator', function () { expect(RepositoryStore.state.orgSlug).toEqual(orgSlug); expect(RepositoryStore.state.repositories).toEqual(mockData); - expect(RepositoryStore.state.repositoriesLoading).toEqual(false); + expect(RepositoryStore.state.repositoriesLoading).toBe(false); }); it('short-circuits the JS event loop', () => { - expect(RepositoryStore.state.repositoriesLoading).toEqual(undefined); + expect(RepositoryStore.state.repositoriesLoading).toBeUndefined(); getRepositories(api, {orgSlug}); // Fire Action.loadRepositories expect(RepositoryStore.loadRepositories).toHaveBeenCalled(); // expect(RepositoryStore.loadRepositories).not.toHaveBeenCalled(); - expect(RepositoryStore.state.repositoriesLoading).toEqual(true); // Short-circuit + expect(RepositoryStore.state.repositoriesLoading).toBe(true); // Short-circuit }); }); diff --git a/static/app/actionCreators/tags.tsx b/static/app/actionCreators/tags.tsx index 7a4224ef8560f9..58ad9a03ac3690 100644 --- a/static/app/actionCreators/tags.tsx +++ b/static/app/actionCreators/tags.tsx @@ -248,7 +248,7 @@ export const makeFetchOrganizationTags = ({ if (end) { query.end = end; } - return [`/organizations/${orgSlug}/tags/`, {query: query}]; + return [`/organizations/${orgSlug}/tags/`, {query}]; }; export const useFetchOrganizationTags = ( diff --git a/static/app/api.spec.tsx b/static/app/api.spec.tsx index 000801ab10375a..5886128d50cdd6 100644 --- a/static/app/api.spec.tsx +++ b/static/app/api.spec.tsx @@ -1,5 +1,6 @@ import {OrganizationFixture} from 'sentry-fixture/organization'; +import type {Client, ResponseMeta} from 'sentry/api'; import {isSimilarOrigin, Request, resolveHostname} from 'sentry/api'; import {PROJECT_MOVED} from 'sentry/constants/apiErrorCodes'; @@ -9,7 +10,7 @@ import OrganizationStore from './stores/organizationStore'; jest.unmock('sentry/api'); describe('api', function () { - let api; + let api: Client; beforeEach(function () { api = new MockApiClient(); @@ -41,7 +42,9 @@ describe('api', function () { it('does not call success callback if 302 was returned because of a project slug change', function () { const successCb = jest.fn(); - api.activeRequests = {id: {alive: true}}; + api.activeRequests = { + id: {alive: true, requestPromise: new Promise(() => null), cancel: jest.fn()}, + }; api.wrapCallback( 'id', successCb @@ -60,9 +63,9 @@ describe('api', function () { }); it('handles error callback', function () { - jest.spyOn(api, 'wrapCallback').mockImplementation((_id, func) => func); + jest.spyOn(api, 'wrapCallback').mockImplementation((_id: string, func: any) => func); const errorCb = jest.fn(); - const args = ['test', true, 1]; + const args = ['test', true, 1] as unknown as [ResponseMeta, string, string]; api.handleRequestError( { id: 'test', @@ -83,15 +86,18 @@ describe('api', function () { path: 'test', requestOptions: {}, }, - {}, - {} + {} as ResponseMeta, + '', + 'test' ) ).not.toThrow(); }); }); describe('resolveHostname', function () { - let devUi, location, configstate; + let devUi: boolean | undefined; + let location: Location; + let configstate: ReturnType; const controlPath = '/api/0/broadcasts/'; const regionPath = '/api/0/organizations/slug/issues/'; @@ -103,7 +109,7 @@ describe('resolveHostname', function () { ConfigStore.loadInitialData({ ...configstate, - features: ['system:multi-region'], + features: new Set(['system:multi-region']), links: { organizationUrl: 'https://acme.sentry.io', sentryUrl: 'https://sentry.io', @@ -122,7 +128,7 @@ describe('resolveHostname', function () { ConfigStore.loadInitialData({ ...configstate, // Remove the feature flag - features: [], + features: new Set(), }); let result = resolveHostname(controlPath); diff --git a/static/app/bootstrap/exportGlobals.tsx b/static/app/bootstrap/exportGlobals.tsx index 7b62c39158c546..69b9a121d592d9 100644 --- a/static/app/bootstrap/exportGlobals.tsx +++ b/static/app/bootstrap/exportGlobals.tsx @@ -1,18 +1,17 @@ import * as React from 'react'; -import {findDOMNode} from 'react-dom'; import {createRoot} from 'react-dom/client'; import * as Sentry from '@sentry/react'; import moment from 'moment-timezone'; import plugins from 'sentry/plugins'; -const globals = { +const globals: Record = { // The following globals are used in sentry-plugins webpack externals // configuration. React, Sentry, moment, - ReactDOM: {findDOMNode, createRoot}, + ReactDOM: {createRoot}, // django templates make use of these globals SentryApp: {}, @@ -44,6 +43,11 @@ const SentryApp = { }; globals.SentryApp = SentryApp; -Object.keys(globals).forEach(name => (window[name] = globals[name])); +Object.keys(globals).forEach(name => { + Object.defineProperty(window, name, { + value: globals[name], + writable: true, + }); +}); export {globals as exportedGlobals}; diff --git a/static/app/bootstrap/initializeSdk.spec.tsx b/static/app/bootstrap/initializeSdk.spec.tsx index ee8e6497143b46..886da24167208e 100644 --- a/static/app/bootstrap/initializeSdk.spec.tsx +++ b/static/app/bootstrap/initializeSdk.spec.tsx @@ -9,7 +9,7 @@ import { isFilteredRequestErrorEvent, } from './initializeSdk'; -const ERROR_MAP = { +const ERROR_MAP: Record = { ...origErrorMap, // remove `UndefinedResponseBodyError` since we don't filter those 200: undefined, diff --git a/static/app/bootstrap/initializeSdk.tsx b/static/app/bootstrap/initializeSdk.tsx index 80ecff878cc382..222fda527d0715 100644 --- a/static/app/bootstrap/initializeSdk.tsx +++ b/static/app/bootstrap/initializeSdk.tsx @@ -1,7 +1,6 @@ // eslint-disable-next-line simple-import-sort/imports import * as Sentry from '@sentry/react'; -import {_browserPerformanceTimeOriginMode} from '@sentry/utils'; -import type {Event} from '@sentry/types'; +import {type Event, _browserPerformanceTimeOriginMode} from '@sentry/core'; import {SENTRY_RELEASE_VERSION, SPA_DSN} from 'sentry/constants'; import type {Config} from 'sentry/types/system'; @@ -15,7 +14,6 @@ import { useNavigationType, } from 'react-router-dom'; import {useEffect} from 'react'; -import FeatureObserver from 'sentry/utils/featureObserver'; const SPA_MODE_ALLOW_URLS = [ 'localhost', @@ -62,17 +60,18 @@ function getSentryIntegrations() { depth: 6, }), Sentry.reactRouterV6BrowserTracingIntegration({ - useEffect: useEffect, - useLocation: useLocation, - useNavigationType: useNavigationType, - createRoutesFromChildren: createRoutesFromChildren, - matchRoutes: matchRoutes, + useEffect, + useLocation, + useNavigationType, + createRoutesFromChildren, + matchRoutes, }), Sentry.browserProfilingIntegration(), Sentry.thirdPartyErrorFilterIntegration({ filterKeys: ['sentry-spa'], behaviour: 'apply-tag-if-contains-third-party-frames', }), + Sentry.featureFlagsIntegration(), ]; return integrations; @@ -180,15 +179,8 @@ export function initializeSdk(config: Config) { handlePossibleUndefinedResponseBodyErrors(event); addEndpointTagToRequestError(event); - lastEventId = event.event_id || hint.event_id; - // attach feature flags to the event context - if (event.contexts) { - const flags = FeatureObserver.singleton({}).getFeatureFlags(); - event.contexts.flags = flags; - } - return event; }, }); @@ -225,7 +217,7 @@ export function initializeSdk(config: Config) { images.push({ type: 'sourcemap', code_file: filename, - debug_id: debugIdMap[filename], + debug_id: debugIdMap[filename]!, }); }); } catch (e) { @@ -318,7 +310,7 @@ function handlePossibleUndefinedResponseBodyErrors(event: Event): void { const causeErrorIsURBE = causeError?.type === 'UndefinedResponseBodyError'; if (mainErrorIsURBE || causeErrorIsURBE) { - mainError.type = 'UndefinedResponseBodyError'; + mainError!.type = 'UndefinedResponseBodyError'; event.tags = {...event.tags, undefinedResponseBody: true}; event.fingerprint = mainErrorIsURBE ? ['UndefinedResponseBodyError as main error'] @@ -327,7 +319,7 @@ function handlePossibleUndefinedResponseBodyErrors(event: Event): void { } export function addEndpointTagToRequestError(event: Event): void { - const errorMessage = event.exception?.values?.[0].value || ''; + const errorMessage = event.exception?.values?.[0]!.value || ''; // The capturing group here turns `GET /dogs/are/great 500` into just `GET /dogs/are/great` const requestErrorRegex = new RegExp('^([A-Za-z]+ (/[^/]+)+/) \\d+$'); diff --git a/static/app/chartcuterie/config.tsx b/static/app/chartcuterie/config.tsx index 7a659d73952ecf..af2a13061dd446 100644 --- a/static/app/chartcuterie/config.tsx +++ b/static/app/chartcuterie/config.tsx @@ -8,7 +8,6 @@ * into the configuration file loaded by the service. */ -// eslint-disable-next-line import/no-named-default import {discoverCharts} from './discover'; import {metricAlertCharts} from './metricAlert'; import {performanceCharts} from './performance'; diff --git a/static/app/chartcuterie/discover.tsx b/static/app/chartcuterie/discover.tsx index 50959c108139fa..ade3f7bd7c8f6f 100644 --- a/static/app/chartcuterie/discover.tsx +++ b/static/app/chartcuterie/discover.tsx @@ -60,10 +60,12 @@ discoverCharts.push({ AreaSeries({ name: s.key, stack: 'area', - data: s.data.map(([timestamp, countsForTimestamp]) => [ - timestamp * 1000, - countsForTimestamp.reduce((acc, {count}) => acc + count, 0), - ]), + data: s.data.map( + ([timestamp, countsForTimestamp]: [number, {count: number}[]]) => [ + timestamp * 1000, + countsForTimestamp.reduce((acc, {count}) => acc + count, 0), + ] + ), lineStyle: {color: color?.[i], opacity: 1, width: 0.4}, areaStyle: {color: color?.[i], opacity: 1}, }) @@ -121,12 +123,14 @@ discoverCharts.push({ BarSeries({ name: s.key, stack: 'area', - data: s.data.map(([timestamp, countsForTimestamp]) => ({ - value: [ - timestamp * 1000, - countsForTimestamp.reduce((acc, {count}) => acc + count, 0), - ], - })), + data: s.data.map( + ([timestamp, countsForTimestamp]: [number, {count: number}[]]) => ({ + value: [ + timestamp * 1000, + countsForTimestamp.reduce((acc, {count}) => acc + count, 0), + ], + }) + ), itemStyle: {color: color?.[i], opacity: 1}, }) ); @@ -179,10 +183,12 @@ discoverCharts.push({ .map((topSeries, i) => AreaSeries({ stack: 'area', - data: topSeries.data.map(([timestamp, countsForTimestamp]) => [ - timestamp * 1000, - countsForTimestamp.reduce((acc, {count}) => acc + count, 0), - ]), + data: topSeries.data.map( + ([timestamp, countsForTimestamp]: [number, {count: number}[]]) => [ + timestamp * 1000, + countsForTimestamp.reduce((acc, {count}) => acc + count, 0), + ] + ), lineStyle: {color: color?.[i], opacity: 1, width: 0.4}, areaStyle: {color: color?.[i], opacity: 1}, }) @@ -235,10 +241,12 @@ discoverCharts.push({ .sort((a, b) => (a.order ?? 0) - (b.order ?? 0)) .map((topSeries, i) => LineSeries({ - data: topSeries.data.map(([timestamp, countsForTimestamp]) => [ - timestamp * 1000, - countsForTimestamp.reduce((acc, {count}) => acc + count, 0), - ]), + data: topSeries.data.map( + ([timestamp, countsForTimestamp]: [number, {count: number}[]]) => [ + timestamp * 1000, + countsForTimestamp.reduce((acc, {count}) => acc + count, 0), + ] + ), lineStyle: {color: color?.[i], opacity: 1}, itemStyle: {color: color?.[i]}, }) @@ -292,10 +300,12 @@ discoverCharts.push({ .map((topSeries, i) => BarSeries({ stack: 'area', - data: topSeries.data.map(([timestamp, countsForTimestamp]) => [ - timestamp * 1000, - countsForTimestamp.reduce((acc, {count}) => acc + count, 0), - ]), + data: topSeries.data.map( + ([timestamp, countsForTimestamp]: [number, {count: number}[]]) => [ + timestamp * 1000, + countsForTimestamp.reduce((acc, {count}) => acc + count, 0), + ] + ), itemStyle: {color: color?.[i], opacity: 1}, }) ); @@ -336,7 +346,7 @@ discoverCharts.push({ const previousPeriod = LineSeries({ name: t('previous %s', data.seriesName), data: previous.map(([_, countsForTimestamp], i) => [ - current[i][0] * 1000, + current[i]![0] * 1000, countsForTimestamp.reduce((acc, {count}) => acc + count, 0), ]), lineStyle: {color: theme.gray200, type: 'dotted'}, @@ -372,7 +382,7 @@ discoverCharts.push({ stack: 'area', data: s.data .slice(dataMiddleIndex) - .map(([timestamp, countsForTimestamp]) => [ + .map(([timestamp, countsForTimestamp]: [number, {count: number}[]]) => [ timestamp * 1000, countsForTimestamp.reduce((acc, {count}) => acc + count, 0), ]), @@ -384,10 +394,12 @@ discoverCharts.push({ LineSeries({ name: t('previous %s', s.key), stack: 'previous', - data: previous.map(([_, countsForTimestamp], index) => [ - current[index][0] * 1000, - countsForTimestamp.reduce((acc, {count}) => acc + count, 0), - ]), + data: previous.map( + ([_, countsForTimestamp]: [number, {count: number}[]], index: number) => [ + current[index][0] * 1000, + countsForTimestamp.reduce((acc, {count}) => acc + count, 0), + ] + ), lineStyle: {color: previousPeriodColor?.[i], type: 'dotted'}, itemStyle: {color: previousPeriodColor?.[i]}, }) diff --git a/static/app/components/acl/feature.spec.tsx b/static/app/components/acl/feature.spec.tsx index e86b80fb1269a7..35fe469956882f 100644 --- a/static/app/components/acl/feature.spec.tsx +++ b/static/app/components/acl/feature.spec.tsx @@ -285,7 +285,7 @@ describe('Feature', function () { }); describe('using HookStore for renderDisabled', function () { - let hookFn; + let hookFn: jest.Mock; beforeEach(function () { hookFn = jest.fn(() => null); diff --git a/static/app/components/acl/feature.tsx b/static/app/components/acl/feature.tsx index 83547b2d78acbe..77a3f5b91061a0 100644 --- a/static/app/components/acl/feature.tsx +++ b/static/app/components/acl/feature.tsx @@ -141,12 +141,12 @@ class Feature extends Component { const shouldMatchOnlyProject = feature.match(/^projects:(.+)/); if (shouldMatchOnlyProject) { - return project.includes(shouldMatchOnlyProject[1]); + return project.includes(shouldMatchOnlyProject[1]!); } const shouldMatchOnlyOrg = feature.match(/^organizations:(.+)/); if (shouldMatchOnlyOrg) { - return organization.includes(shouldMatchOnlyOrg[1]); + return organization.includes(shouldMatchOnlyOrg[1]!); } // default, check all feature arrays @@ -186,7 +186,7 @@ class Feature extends Component { const hooks = HookStore.get(hookName); if (hooks.length > 0) { - customDisabledRender = hooks[0]; + customDisabledRender = hooks[0]!; } } const renderProps = { diff --git a/static/app/components/acl/featureDisabledModal.spec.tsx b/static/app/components/acl/featureDisabledModal.spec.tsx index 18e40d393f9f00..d283812ee36e5a 100644 --- a/static/app/components/acl/featureDisabledModal.spec.tsx +++ b/static/app/components/acl/featureDisabledModal.spec.tsx @@ -1,4 +1,4 @@ -import type {ComponentProps} from 'react'; +import type {ComponentProps, PropsWithChildren} from 'react'; import styled from '@emotion/styled'; import {render, screen} from 'sentry-test/reactTestingLibrary'; @@ -8,7 +8,7 @@ import ModalStore from 'sentry/stores/modalStore'; describe('FeatureTourModal', function () { const onCloseModal = jest.fn(); - const styledWrapper = styled(c => c.children); + const styledWrapper = styled((c: PropsWithChildren) => c.children); const renderComponent = ( props: Partial> = {} ) => diff --git a/static/app/components/actions/archive.spec.tsx b/static/app/components/actions/archive.spec.tsx index 742681dfb0ea90..9e03ae6198d901 100644 --- a/static/app/components/actions/archive.spec.tsx +++ b/static/app/components/actions/archive.spec.tsx @@ -82,10 +82,10 @@ describe('ArchiveActions', () => { render(); await userEvent.click(screen.getByRole('button', {name: 'Archive options'})); expect( - screen.queryByRole('menuitemradio', {name: 'Until this occurs again\u2026'}) + screen.getByRole('menuitemradio', {name: 'Until this occurs again\u2026'}) ).toBeInTheDocument(); expect( - screen.queryByRole('menuitemradio', { + screen.getByRole('menuitemradio', { name: 'Until this affects an additional\u2026', }) ).toBeInTheDocument(); diff --git a/static/app/components/activity/note/input.tsx b/static/app/components/activity/note/input.tsx index 5e64720754d9ce..a1ac2f35ffcc8f 100644 --- a/static/app/components/activity/note/input.tsx +++ b/static/app/components/activity/note/input.tsx @@ -132,7 +132,7 @@ function NoteInput({ ); const handleChange: MentionsInputProps['onChange'] = useCallback( - e => { + (e: MentionChangeEvent) => { setValue(e.target.value); onChange?.(e, {updating: existingItem}); }, @@ -140,7 +140,7 @@ function NoteInput({ ); const handleKeyDown: MentionsInputProps['onKeyDown'] = useCallback( - e => { + (e: React.KeyboardEvent) => { // Auto submit the form on [meta,ctrl] + Enter if (e.key === 'Enter' && (e.metaKey || e.ctrlKey) && canSubmit) { submitForm(); diff --git a/static/app/components/activity/note/inputWithStorage.tsx b/static/app/components/activity/note/inputWithStorage.tsx index cef5b960a8eb1c..284b8b2493dab5 100644 --- a/static/app/components/activity/note/inputWithStorage.tsx +++ b/static/app/components/activity/note/inputWithStorage.tsx @@ -129,7 +129,7 @@ function NoteInputWithStorage({ } // Remove `itemKey` from stored object and save to storage - // eslint-disable-next-line no-unused-vars + const {[itemKey]: _oldItem, ...newStorageObj} = storageObj; saveToStorage(storageKey, newStorageObj); }, diff --git a/static/app/components/alerts/notificationBar.tsx b/static/app/components/alerts/notificationBar.tsx deleted file mode 100644 index ab7dc6666cd47b..00000000000000 --- a/static/app/components/alerts/notificationBar.tsx +++ /dev/null @@ -1,23 +0,0 @@ -import styled from '@emotion/styled'; - -import {IconInfo} from 'sentry/icons'; -import {space} from 'sentry/styles/space'; - -const StyledNotificationBarIconInfo = styled(IconInfo)` - margin-right: ${space(1)}; - color: ${p => p.theme.alert.info.color}; -`; - -export const NotificationBar = styled('div')` - display: flex; - align-items: center; - color: ${p => p.theme.textColor}; - background-color: ${p => p.theme.alert.info.backgroundLight}; - border-bottom: 1px solid ${p => p.theme.alert.info.border}; - padding: ${space(1.5)}; - font-size: 14px; - line-height: normal; - ${StyledNotificationBarIconInfo} { - color: ${p => p.theme.alert.info.color}; - } -`; diff --git a/static/app/components/alerts/snoozeAlert.tsx b/static/app/components/alerts/snoozeAlert.tsx index 890b48bfab5515..046fb3e43529b1 100644 --- a/static/app/components/alerts/snoozeAlert.tsx +++ b/static/app/components/alerts/snoozeAlert.tsx @@ -9,9 +9,9 @@ import {DropdownMenu} from 'sentry/components/dropdownMenu'; import {IconChevron, IconMute, IconSound} from 'sentry/icons'; import {t} from 'sentry/locale'; import {RuleActionsCategories} from 'sentry/types/alerts'; -import {browserHistory} from 'sentry/utils/browserHistory'; import useApi from 'sentry/utils/useApi'; import {useLocation} from 'sentry/utils/useLocation'; +import {useNavigate} from 'sentry/utils/useNavigate'; import useOrganization from 'sentry/utils/useOrganization'; type Props = { @@ -42,6 +42,7 @@ function SnoozeAlert({ const organization = useOrganization(); const api = useApi(); const location = useLocation(); + const navigate = useNavigate(); const [disabled, setDisabled] = useState(false); @@ -62,10 +63,13 @@ function SnoozeAlert({ ); if (autoMute) { - browserHistory.replace({ - pathname: location.pathname, - query: {...location.query, mute: undefined}, - }); + navigate( + { + pathname: location.pathname, + query: {...location.query, mute: undefined}, + }, + {replace: true} + ); } setDisabled(false); @@ -87,6 +91,7 @@ function SnoozeAlert({ }, [ api, + navigate, isSnoozed, location.pathname, location.query, diff --git a/static/app/components/analyticsArea.spec.tsx b/static/app/components/analyticsArea.spec.tsx index f44d5163a4c73a..061861bde7f180 100644 --- a/static/app/components/analyticsArea.spec.tsx +++ b/static/app/components/analyticsArea.spec.tsx @@ -15,7 +15,7 @@ function TestButton({org}: {org: Organization}) { onClick={() => { analytics.trackAnalytics('button-clicked', { organization: org, - area: area, + area, }); }} /> @@ -41,7 +41,7 @@ describe('AnalyticsAreaProvider', function () { await userEvent.click(button); expect(analyticsSpy).toHaveBeenCalledWith('button-clicked', { - organization: organization, + organization, area: 'feedback.details.activity', }); }); @@ -61,7 +61,7 @@ describe('AnalyticsAreaProvider', function () { await userEvent.click(button); expect(analyticsSpy).toHaveBeenCalledWith('button-clicked', { - organization: organization, + organization, area: 'my-modal', }); }); diff --git a/static/app/components/arithmeticInput/parser.spec.tsx b/static/app/components/arithmeticInput/parser.spec.tsx index d20321ae27ac3b..e58d1ae75350c7 100644 --- a/static/app/components/arithmeticInput/parser.spec.tsx +++ b/static/app/components/arithmeticInput/parser.spec.tsx @@ -2,17 +2,17 @@ import {Operation, parseArithmetic} from 'sentry/components/arithmeticInput/pars describe('arithmeticInput/parser', function () { it('errors on too many operators', () => { - expect(parseArithmetic('1+1+1+1+1+1+1+1+1+1+1+1').error).toEqual( + expect(parseArithmetic('1+1+1+1+1+1+1+1+1+1+1+1').error).toBe( 'Maximum operators exceeded' ); }); it('errors on divide by 0', () => { - expect(parseArithmetic('1/0').error).toEqual('Division by 0 is not allowed'); + expect(parseArithmetic('1/0').error).toBe('Division by 0 is not allowed'); }); it('handles one term', () => { - expect(parseArithmetic('1').result).toStrictEqual('1'); + expect(parseArithmetic('1').result).toBe('1'); }); it('handles some addition', () => { diff --git a/static/app/components/arithmeticInput/parser.tsx b/static/app/components/arithmeticInput/parser.tsx index 1c22966f4837ba..c73635d051f99f 100644 --- a/static/app/components/arithmeticInput/parser.tsx +++ b/static/app/components/arithmeticInput/parser.tsx @@ -54,7 +54,7 @@ export class TokenConverter { tokenTerm = (maybeFactor: Expression, remainingAdds: Array): Expression => { if (remainingAdds.length > 0) { - remainingAdds[0].lhs = maybeFactor; + remainingAdds[0]!.lhs = maybeFactor; return flatten(remainingAdds); } return maybeFactor; @@ -75,7 +75,7 @@ export class TokenConverter { }; tokenFactor = (primary: Expression, remaining: Array): Operation => { - remaining[0].lhs = primary; + remaining[0]!.lhs = primary; return flatten(remaining); }; diff --git a/static/app/components/assigneeBadge.stories.tsx b/static/app/components/assigneeBadge.stories.tsx index 3786ba961aaf48..d087d6e37455f4 100644 --- a/static/app/components/assigneeBadge.stories.tsx +++ b/static/app/components/assigneeBadge.stories.tsx @@ -42,7 +42,7 @@ export default storyBook('AssigneeBadge', story => { const [chevron2Toggle, setChevron2Toggle] = useState<'up' | 'down'>('down'); const team: Team = teams.length - ? teams[0] + ? teams[0]! : { id: '1', slug: 'team-slug', diff --git a/static/app/components/assigneeSelectorDropdown.spec.tsx b/static/app/components/assigneeSelectorDropdown.spec.tsx index 675aef098928c5..8c28fb3c99255b 100644 --- a/static/app/components/assigneeSelectorDropdown.spec.tsx +++ b/static/app/components/assigneeSelectorDropdown.spec.tsx @@ -572,7 +572,7 @@ describe('AssigneeSelectorDropdown', () => { // Suggested assignee initials expect(options[0]).toHaveTextContent('AB'); - await userEvent.click(options[0]); + await userEvent.click(options[0]!); await waitFor(() => expect(assignGroup2Mock).toHaveBeenCalledWith( diff --git a/static/app/components/assigneeSelectorDropdown.tsx b/static/app/components/assigneeSelectorDropdown.tsx index 62ccdd19f9a339..0615ae973d26e7 100644 --- a/static/app/components/assigneeSelectorDropdown.tsx +++ b/static/app/components/assigneeSelectorDropdown.tsx @@ -155,6 +155,7 @@ export function AssigneeAvatar({ } if (suggestedActors.length > 0) { + const actor = suggestedActors[0]!; return (
{tct('Suggestion: [name]', { - name: - suggestedActors[0].type === 'team' - ? `#${suggestedActors[0].name}` - : suggestedActors[0].name, + name: actor.type === 'team' ? `#${actor.name}` : actor.name, })} {suggestedActors.length > 1 && tn(' + %s other', ' + %s others', suggestedActors.length - 1)}
- - {suggestedReasons[suggestedActors[0].suggestedReason]} - + {suggestedReasons[actor.suggestedReason]} } /> @@ -265,7 +261,10 @@ export default function AssigneeSelectorDropdown({ const uniqueSuggestions = uniqBy(suggestedOwners, owner => owner.owner); return uniqueSuggestions .map(suggestion => { - const [suggestionType, suggestionId] = suggestion.owner.split(':'); + const [suggestionType, suggestionId] = suggestion.owner.split(':') as [ + string, + string, + ]; const suggestedReasonText = suggestedReasonTable[suggestion.type]; if (suggestionType === 'user') { const member = currentMemberList.find(user => user.id === suggestionId); @@ -322,7 +321,7 @@ export default function AssigneeSelectorDropdown({ } // See makeMemberOption and makeTeamOption for how the value is formatted const type = selectedOption.value.startsWith('user:') ? 'user' : 'team'; - const assigneeId = selectedOption.value.split(':')[1]; + const assigneeId = selectedOption.value.split(':')[1]!; let assignee: User | Actor; if (type === 'user') { @@ -344,10 +343,10 @@ export default function AssigneeSelectorDropdown({ actor => actor.type === type && actor.id === assignee.id ); onAssign({ - assignee: assignee, + assignee, id: assigneeId, - type: type, - suggestedAssignee: suggestedAssignee, + type, + suggestedAssignee, }); } }; diff --git a/static/app/components/assistant/guideAnchor.tsx b/static/app/components/assistant/guideAnchor.tsx index 81ac2c234cfc8c..efbd244f529460 100644 --- a/static/app/components/assistant/guideAnchor.tsx +++ b/static/app/components/assistant/guideAnchor.tsx @@ -1,4 +1,4 @@ -import {Component, createRef, Fragment, useEffect} from 'react'; +import {Component, Fragment, useEffect, useRef} from 'react'; import styled from '@emotion/styled'; import * as Sentry from '@sentry/react'; import type {Query} from 'history'; @@ -44,7 +44,7 @@ type Props = { }; function ScrollToGuide({children}: {children: React.ReactNode}) { - const containerElement = createRef(); + const containerElement = useRef(null); useEffect(() => { if (containerElement.current) { @@ -155,7 +155,7 @@ class BaseGuideAnchor extends Component { const totalStepCount = currentGuide.steps.length; const currentStepCount = step + 1; - const currentStep = currentGuide.steps[step]; + const currentStep = currentGuide.steps[step]!; const lastStep = currentStepCount === totalStepCount; const hasManySteps = totalStepCount > 1; diff --git a/static/app/components/autoComplete.spec.tsx b/static/app/components/autoComplete.spec.tsx index e85f910f870f6a..fb87a2fe581697 100644 --- a/static/app/components/autoComplete.spec.tsx +++ b/static/app/components/autoComplete.spec.tsx @@ -23,7 +23,7 @@ const items = [ * "controlled" props where does not handle state */ describe('AutoComplete', function () { - let input; + let input: HTMLInputElement; let autoCompleteState: any[] = []; const mocks = { onSelect: jest.fn(), @@ -36,12 +36,30 @@ describe('AutoComplete', function () { autoCompleteState = []; }); - function List({registerItemCount, itemCount, ...props}) { + function List({ + registerItemCount, + itemCount, + ...props + }: { + children: React.ReactNode; + itemCount: number; + registerItemCount: (count?: number) => void; + }) { useEffect(() => void registerItemCount(itemCount), [itemCount, registerItemCount]); return