Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Options to suppress Prometheus metrics and auth checks warnings in log #29

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 6 additions & 2 deletions ols/app/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,8 +74,12 @@ async def log_requests_responses(
request: Request, call_next: Callable[[Request], Awaitable[Response]]
) -> Response:
"""Middleware for logging of HTTP requests and responses, at debug level."""
# Bail out early if not logging
if not logger.isEnabledFor(logging.DEBUG):
# Bail out early if not logging or Prometheus metrics logging is suppressed
if (
not logger.isEnabledFor(logging.DEBUG)
or config.ols_config.logging_config.suppress_metrics_in_log
and request.url.path == "/metrics"
):
return await call_next(request)

# retrieve client host and port if provided in request object
Expand Down
4 changes: 3 additions & 1 deletion ols/app/models/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -843,12 +843,14 @@ class LoggingConfig(BaseModel):
app_log_level: int = logging.INFO
lib_log_level: int = logging.WARNING
uvicorn_log_level: int = logging.WARNING
suppress_metrics_in_log: bool = False
suppress_auth_checks_warning_in_log: bool = False

def __init__(self, **data: Optional[dict]) -> None:
"""Initialize configuration and perform basic validation."""
# convert input strings (level names, eg. debug/info,...) to
# logging level names (integer values) for defined model fields
for field in self.model_fields:
for field in filter(lambda x: x.endswith("_log_level"), self.model_fields):
if field in data:
data[field] = _get_log_level(data[field]) # type: ignore[assignment]
super().__init__(**data)
Expand Down
6 changes: 5 additions & 1 deletion ols/utils/auth_dependency.py
Original file line number Diff line number Diff line change
Expand Up @@ -241,7 +241,11 @@ async def __call__(self, request: Request) -> tuple[str, str]:
HTTPException: If authentication fails or the user does not have access.
"""
if config.dev_config.disable_auth:
logger.warning("Auth checks disabled, skipping")
if (
config.ols_config.logging_config is not None
and not config.ols_config.logging_config.suppress_auth_checks_warning_in_log
):
logger.warning("Auth checks disabled, skipping")
# Use constant user ID and user name in case auth. is disabled
# It will be needed for testing purposes because (for example)
# conversation history and user feedback depend on having any
Expand Down
40 changes: 39 additions & 1 deletion tests/integration/test_authorized.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,15 @@
"""Integration tests for /livenss and /readiness REST API endpoints."""

import logging
from unittest.mock import patch

import pytest
import requests
from fastapi.testclient import TestClient

from ols import config, constants
from ols.app.models.config import LoggingConfig
from ols.utils.logging import configure_logging
from tests.mock_classes.mock_k8s_api import (
mock_subject_access_review_response,
mock_token_review_response,
Expand Down Expand Up @@ -42,9 +45,15 @@ def _enabled_auth():


@pytest.mark.usefixtures("_disabled_auth")
def test_post_authorized_disabled():
def test_post_authorized_disabled(caplog):
"""Check the REST API /v1/query with POST HTTP method when no payload is posted."""
# perform POST request with authentication disabled
logging_config = LoggingConfig(app_log_level="warning")

configure_logging(logging_config)
logger = logging.getLogger("ols")
logger.handlers = [caplog.handler] # add caplog handler to logger

response = client.post("/authorized")
assert response.status_code == requests.codes.ok

Expand All @@ -54,6 +63,35 @@ def test_post_authorized_disabled():
"username": constants.DEFAULT_USER_NAME,
}

# check if the auth checks warning message is found in the log
captured_out = caplog.text
assert "Auth checks disabled, skipping" in captured_out


@pytest.mark.usefixtures("_disabled_auth")
def test_post_authorized_disabled_with_logging_suppressed(caplog):
"""Check the REST API /v1/query with POST HTTP method when no payload is posted."""
# perform POST request with authentication disabled
logging_config = LoggingConfig(app_log_level="warning")
config.ols_config.logging_config.suppress_auth_checks_warning_in_log = True

configure_logging(logging_config)
logger = logging.getLogger("ols")
logger.handlers = [caplog.handler] # add caplog handler to logger

response = client.post("/authorized")
assert response.status_code == requests.codes.ok

# check the response payload
assert response.json() == {
"user_id": constants.DEFAULT_USER_UID,
"username": constants.DEFAULT_USER_NAME,
}

# check if the auth checks warning message is NOT found in the log
captured_out = caplog.text
assert "Auth checks disabled, skipping" not in captured_out


@pytest.mark.usefixtures("_enabled_auth")
def test_post_authorized_no_token():
Expand Down
69 changes: 59 additions & 10 deletions tests/integration/test_metrics.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
"""Integration tests for metrics exposed by the service."""

import logging
import os
import re
from unittest.mock import patch
Expand All @@ -9,9 +10,22 @@
from fastapi.testclient import TestClient

from ols import config
from ols.app.models.config import LoggingConfig
from ols.utils.logging import configure_logging

client: TestClient

# counters that are expected to be part of metrics
expected_counters = (
"ols_rest_api_calls_total",
"ols_llm_calls_total",
"ols_llm_calls_failures_total",
"ols_llm_validation_errors_total",
"ols_llm_token_sent_total",
"ols_llm_token_received_total",
"ols_provider_model_configuration",
)


# we need to patch the config file path to point to the test
# config file before we import anything from main.py
Expand Down Expand Up @@ -45,23 +59,58 @@ def test_metrics():
"""Check if service provides metrics endpoint with some expected counters."""
response_text = retrieve_metrics(client)

# counters that are expected to be part of metrics
expected_counters = (
"ols_rest_api_calls_total",
"ols_llm_calls_total",
"ols_llm_calls_failures_total",
"ols_llm_validation_errors_total",
"ols_llm_token_sent_total",
"ols_llm_token_received_total",
"ols_provider_model_configuration",
)
# check if all counters are present
for expected_counter in expected_counters:
assert (
f"{expected_counter} " in response_text
), f"Counter {expected_counter} not found in {response_text}"


def test_metrics_with_debug_log(caplog):
"""Check if service provides metrics endpoint with some expected counters."""
logging_config = LoggingConfig(app_log_level="debug")

configure_logging(logging_config)
logger = logging.getLogger("ols")
logger.handlers = [caplog.handler] # add caplog handler to logger

response_text = retrieve_metrics(client)

# check if all counters are present
for expected_counter in expected_counters:
assert (
f"{expected_counter} " in response_text
), f"Counter {expected_counter} not found in {response_text}"

# check if the metrics are also found in the log
captured_out = caplog.text
for expected_counter in expected_counters:
assert expected_counter in captured_out


def test_metrics_with_debug_logging_suppressed(caplog):
"""Check if service provides metrics endpoint with some expected counters."""
logging_config = LoggingConfig(app_log_level="debug")
config.ols_config.logging_config.suppress_metrics_in_log = True

configure_logging(logging_config)

logger = logging.getLogger("ols")
logger.handlers = [caplog.handler] # add caplog handler to logger

response_text = retrieve_metrics(client)

# check if all counters are present
for expected_counter in expected_counters:
assert (
f"{expected_counter} " in response_text
), f"Counter {expected_counter} not found in {response_text}"

# check if the metrics are NOT found in the log
captured_out = caplog.text
for expected_counter in expected_counters:
assert expected_counter not in captured_out


def get_counter_value(client, counter_name, path, status_code):
"""Retrieve counter value from metrics."""
Expand Down
2 changes: 2 additions & 0 deletions tests/unit/app/models/test_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -1408,6 +1408,8 @@ def test_valid_values():
assert logging_config.app_log_level == logging.INFO
assert logging_config.lib_log_level == logging.WARNING
assert logging_config.uvicorn_log_level == logging.WARNING
assert not logging_config.suppress_metrics_in_log
assert not logging_config.suppress_auth_checks_warning_in_log

# test custom values
logging_config = LoggingConfig(
Expand Down
Loading