Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Added generator for OpenRouter.ai #1051

Open
wants to merge 5 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
159 changes: 159 additions & 0 deletions garak/generators/openrouter.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,159 @@
"""OpenRouter.ai API Generator

Supports various LLMs through OpenRouter.ai's API. Put your API key in
the OPENROUTER_API_KEY environment variable. Put the name of the
model you want in either the --model_name command line parameter, or
pass it as an argument to the Generator constructor.

Usage:
export OPENROUTER_API_KEY='your-api-key-here'
garak --model_type openrouter --model_name MODEL_NAME

Example:
garak --model_type openrouter --model_name anthropic/claude-3-opus

For available models, see: https://openrouter.ai/docs#models
"""

import logging
from typing import List, Union

from garak import _config
from garak.generators.openai import OpenAICompatible

# Default context lengths for common models
# These are just examples - any model from OpenRouter will work
context_lengths = {
"openai/gpt-4-turbo-preview": 128000,
"openai/gpt-3.5-turbo": 16385,
"anthropic/claude-3-opus": 200000,
"anthropic/claude-3-sonnet": 200000,
"anthropic/claude-2.1": 200000,
"google/gemini-pro": 32000,
"meta/llama-2-70b-chat": 4096,
"mistral/mistral-medium": 32000,
"mistral/mistral-small": 32000
}

class OpenRouterGenerator(OpenAICompatible):
"""Generator wrapper for OpenRouter.ai models. Expects API key in the OPENROUTER_API_KEY environment variable"""

ENV_VAR = "OPENROUTER_API_KEY"
active = True
generator_family_name = "OpenRouter"
DEFAULT_PARAMS = OpenAICompatible.DEFAULT_PARAMS | {
"uri": "https://openrouter.ai/api/v1",
"max_tokens": 2000,
}

def __init__(self, name="", config_root=_config):
self.name = name
self._load_config(config_root)
if self.name in context_lengths:
self.context_len = context_lengths[self.name]

super().__init__(self.name, config_root=config_root)

def _load_client(self):
"""Initialize the OpenAI client with OpenRouter.ai base URL"""
import openai
self.client = openai.OpenAI(
api_key=self._get_api_key(),
base_url="https://openrouter.ai/api/v1"
)

# Determine if we're using chat or completion based on model
self.generator = self.client.chat.completions

def _get_api_key(self):
"""Get API key from environment variable"""
import os
key = os.getenv(self.ENV_VAR)
if not key:
raise ValueError(f"Please set the {self.ENV_VAR} environment variable with your OpenRouter API key")
return key

Comment on lines +57 to +75
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This code is not needed, the api_key handling and is build into the base class and ENV_VAR set above, and the uri value should be set via DEFAULT_PARAMS noted in another comment.

Code for _load_client() seems incomplete as there is no detection of chat vs completion for setting self.generator.

Suggested change
def _load_client(self):
"""Initialize the OpenAI client with OpenRouter.ai base URL"""
import openai
self.client = openai.OpenAI(
api_key=self._get_api_key(),
base_url="https://openrouter.ai/api/v1"
)
# Determine if we're using chat or completion based on model
self.generator = self.client.chat.completions
def _get_api_key(self):
"""Get API key from environment variable"""
import os
key = os.getenv(self.ENV_VAR)
if not key:
raise ValueError(f"Please set the {self.ENV_VAR} environment variable with your OpenRouter API key")
return key

def _validate_config(self):
"""Validate the configuration"""
if not self.name:
raise ValueError("Model name must be specified")

# Set a default context length if not specified
if self.name not in context_lengths:
logging.info(
f"Model {self.name} not in list of known context lengths. Using default of 4096 tokens."
)
self.context_len = 4096

def _log_completion_details(self, prompt, response):
"""Log completion details at DEBUG level"""
logging.debug("=== Model Input ===")
if isinstance(prompt, str):
logging.debug(f"Prompt: {prompt}")
else:
logging.debug("Messages:")
for msg in prompt:
logging.debug(f"- Role: {msg.get('role', 'unknown')}")
logging.debug(f" Content: {msg.get('content', '')}")

logging.debug("\n=== Model Output ===")
if hasattr(response, 'usage'):
logging.debug(f"Prompt Tokens: {response.usage.prompt_tokens}")
logging.debug(f"Completion Tokens: {response.usage.completion_tokens}")
logging.debug(f"Total Tokens: {response.usage.total_tokens}")

logging.debug("\nGenerated Text:")
# OpenAI response object always has choices
for choice in response.choices:
if hasattr(choice, 'message'):
logging.debug(f"- Message Content: {choice.message.content}")
if hasattr(choice.message, 'role'):
logging.debug(f" Role: {choice.message.role}")
if hasattr(choice.message, 'function_call'):
logging.debug(f" Function Call: {choice.message.function_call}")
elif hasattr(choice, 'text'):
logging.debug(f"- Text: {choice.text}")

# Log additional choice attributes if present
if hasattr(choice, 'finish_reason'):
logging.debug(f" Finish Reason: {choice.finish_reason}")
if hasattr(choice, 'index'):
logging.debug(f" Choice Index: {choice.index}")

# Log model info if present
if hasattr(response, 'model'):
logging.debug(f"\nModel: {response.model}")
if hasattr(response, 'system_fingerprint'):
logging.debug(f"System Fingerprint: {response.system_fingerprint}")

logging.debug("==================")

def _call_model(self, prompt: Union[str, List[dict]], generations_this_call: int = 1):
"""Call model and handle both logging and response"""
try:
# Ensure client is initialized
if self.client is None or self.generator is None:
self._load_client()

# Create messages format for the API call
messages = [{"role": "user", "content": prompt}] if isinstance(prompt, str) else prompt

# Make a single API call to get the response
raw_response = self.generator.create(
model=self.name,
messages=messages,
n=generations_this_call if "n" not in self.suppressed_params else None,
max_tokens=self.max_tokens if hasattr(self, 'max_tokens') else None
)

# Log the completion details
self._log_completion_details(prompt, raw_response)

# Return the full response content
return [choice.message.content for choice in raw_response.choices]

except Exception as e:
logging.error(f"Error in model call: {str(e)}")
return [None]

Comment on lines +88 to +158
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Again code that is not needed, this adds a large amount of noise to the logs and is less comprehensive in handling chat vs completion than the default _call_model() implementation from OpenAICompatible.

Suggested change
def _log_completion_details(self, prompt, response):
"""Log completion details at DEBUG level"""
logging.debug("=== Model Input ===")
if isinstance(prompt, str):
logging.debug(f"Prompt: {prompt}")
else:
logging.debug("Messages:")
for msg in prompt:
logging.debug(f"- Role: {msg.get('role', 'unknown')}")
logging.debug(f" Content: {msg.get('content', '')}")
logging.debug("\n=== Model Output ===")
if hasattr(response, 'usage'):
logging.debug(f"Prompt Tokens: {response.usage.prompt_tokens}")
logging.debug(f"Completion Tokens: {response.usage.completion_tokens}")
logging.debug(f"Total Tokens: {response.usage.total_tokens}")
logging.debug("\nGenerated Text:")
# OpenAI response object always has choices
for choice in response.choices:
if hasattr(choice, 'message'):
logging.debug(f"- Message Content: {choice.message.content}")
if hasattr(choice.message, 'role'):
logging.debug(f" Role: {choice.message.role}")
if hasattr(choice.message, 'function_call'):
logging.debug(f" Function Call: {choice.message.function_call}")
elif hasattr(choice, 'text'):
logging.debug(f"- Text: {choice.text}")
# Log additional choice attributes if present
if hasattr(choice, 'finish_reason'):
logging.debug(f" Finish Reason: {choice.finish_reason}")
if hasattr(choice, 'index'):
logging.debug(f" Choice Index: {choice.index}")
# Log model info if present
if hasattr(response, 'model'):
logging.debug(f"\nModel: {response.model}")
if hasattr(response, 'system_fingerprint'):
logging.debug(f"System Fingerprint: {response.system_fingerprint}")
logging.debug("==================")
def _call_model(self, prompt: Union[str, List[dict]], generations_this_call: int = 1):
"""Call model and handle both logging and response"""
try:
# Ensure client is initialized
if self.client is None or self.generator is None:
self._load_client()
# Create messages format for the API call
messages = [{"role": "user", "content": prompt}] if isinstance(prompt, str) else prompt
# Make a single API call to get the response
raw_response = self.generator.create(
model=self.name,
messages=messages,
n=generations_this_call if "n" not in self.suppressed_params else None,
max_tokens=self.max_tokens if hasattr(self, 'max_tokens') else None
)
# Log the completion details
self._log_completion_details(prompt, raw_response)
# Return the full response content
return [choice.message.content for choice in raw_response.choices]
except Exception as e:
logging.error(f"Error in model call: {str(e)}")
return [None]

DEFAULT_CLASS = "OpenRouterGenerator"
74 changes: 74 additions & 0 deletions tests/generators/test_openrouter.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
"""Tests for OpenRouter.ai API Generator"""

import os
import httpx
import pytest

import openai

import garak.exception
from garak.generators.openrouter import OpenRouterGenerator


@pytest.fixture
def set_fake_env(request) -> None:
stored_env = os.getenv(OpenRouterGenerator.ENV_VAR, None)

def restore_env():
if stored_env is not None:
os.environ[OpenRouterGenerator.ENV_VAR] = stored_env
else:
del os.environ[OpenRouterGenerator.ENV_VAR]

os.environ[OpenRouterGenerator.ENV_VAR] = os.path.abspath(__file__)

request.addfinalizer(restore_env)


def test_openai_version():
assert openai.__version__.split(".")[0] == "1" # expect openai module v1.x


@pytest.mark.usefixtures("set_fake_env")
def test_openrouter_invalid_model_names():
with pytest.raises(ValueError) as e_info:
generator = OpenRouterGenerator(name="")
assert "Model name must be specified" in str(e_info.value)


@pytest.mark.skipif(
os.getenv(OpenRouterGenerator.ENV_VAR, None) is None,
reason=f"OpenRouter API key is not set in {OpenRouterGenerator.ENV_VAR}",
)
def test_openrouter_chat():
generator = OpenRouterGenerator(name="anthropic/claude-3-sonnet")
assert generator.name == "anthropic/claude-3-sonnet"
assert isinstance(generator.max_tokens, int)
generator.max_tokens = 99
assert generator.max_tokens == 99
generator.temperature = 0.5
assert generator.temperature == 0.5
output = generator.generate("Hello OpenRouter!")
assert len(output) == 1 # expect 1 generation by default
for item in output:
assert isinstance(item, str)
# Test with chat messages
messages = [
{"role": "user", "content": "Hello OpenRouter!"},
{"role": "assistant", "content": "Hello! How can I help you today?"},
{"role": "user", "content": "How do I write a sonnet?"},
]
output = generator.generate(messages)
assert len(output) == 1 # expect 1 generation by default
for item in output:
assert isinstance(item, str)


def test_context_lengths():
# Test with a known model
generator = OpenRouterGenerator(name="anthropic/claude-3-sonnet")
assert generator.context_len == 200000

# Test with an unknown model
generator = OpenRouterGenerator(name="unknown/model")
assert generator.context_len == 4096 # default context length
Loading