From 8f9f747a3ae5d73ffa3803a9845bc24ce195170e Mon Sep 17 00:00:00 2001 From: Rodion Mostovoi Date: Sun, 26 Nov 2023 00:57:30 +0800 Subject: [PATCH] Replace IOpenAiClients with IAiClient and refactor clients The 'IOpenAiClients' interface has been replaced by 'IAiClient', and all related classes have been refactored accordingly. `IAiClient` more accurately reflects that the client might not necessarily be specific to OpenAI and allows for broader API client development. Additional configuration files for AzureOpenAi and OpenRouter have also been added for more flexibility and future support. The version has been bumped to 4.0.0 due to this breaking change. Other changes include: - The Model property in ChatGPTConfig is not transformed anymore and assigned as it is. - Some variations in hard-coded strings have been fixed for consistency. - Documentation generation and some editor settings have been added to the build properties. These will help to better support various AI providers, allow more flexibility for developers and improve overall code quality. This update is a part of an ongoing effort to refactor and improve the codebase. --- OpenAI_DotNet.sln | 1 + README.md | 2 +- .../ChatGpt.BlazorExample/appsettings.json | 11 +- src/Directory.Build.props | 3 +- .../AiClientFactory.cs | 62 +++++ .../AiClientFromConfiguration.cs | 119 +++++++++ ...lientStartupValidationBackgroundService.cs | 15 ++ .../ChatGPTFactory.cs | 17 +- .../Extensions/ServiceCollectionExtensions.cs | 63 +++-- .../Models/AzureOpenAICredentials.cs | 42 ++++ .../Models/OpenAICredentials.cs | 13 +- .../Models/OpenRouterCredentials.cs | 36 +++ .../Extensions/ServiceCollectionExtensions.cs | 24 +- src/OpenAI.ChatGpt/AzureOpenAiClient.cs | 25 +- src/OpenAI.ChatGpt/ChatGPT.cs | 6 +- src/OpenAI.ChatGpt/ChatService.cs | 4 +- src/OpenAI.ChatGpt/IAiClient.cs | 230 ++++++++++++++++++ src/OpenAI.ChatGpt/IOpenAiClient.cs | 129 +--------- .../ChatCompletion/ChatCompletionModels.cs | 18 +- src/OpenAI.ChatGpt/Models/ChatGPTConfig.cs | 4 +- src/OpenAI.ChatGpt/OpenAiClient.cs | 4 +- src/OpenAI.ChatGpt/OpenRouterClient.cs | 13 +- ...iClientExtensions.GetStructuredResponse.cs | 4 +- .../ChatGPTTranslatorService.cs | 4 +- .../OpenAiClientExtensions.Translations.cs | 4 +- .../ChatGptEntityFrameworkIntegrationTests.cs | 2 +- .../ChatGptTranslatorServiceTests.cs | 2 +- .../ClientTests/AzureOpenAiClientTests.cs | 8 +- .../ClientTests/ChatCompletionsApiTests.cs | 2 +- .../ClientTests/ChatCompletionsVisionTests.cs | 2 +- .../Fixtures/AzureOpenAiClientFixture.cs | 2 +- .../Fixtures/OpenAiClientFixture.cs | 2 +- .../Fixtures/OpenRouterClientFixture.cs | 2 +- .../OpenAiClient_GetStructuredResponse.cs | 2 +- .../ClientTests/OpenRouterClientTests.cs | 2 +- .../ChatGptTranslatorServiceTests.cs | 4 +- .../ChatGptServicesIntegrationTests.cs | 6 +- 37 files changed, 652 insertions(+), 237 deletions(-) create mode 100644 src/OpenAI.ChatGpt.AspNetCore/AiClientFactory.cs create mode 100644 src/OpenAI.ChatGpt.AspNetCore/AiClientFromConfiguration.cs create mode 100644 src/OpenAI.ChatGpt.AspNetCore/AiClientStartupValidationBackgroundService.cs create mode 100644 src/OpenAI.ChatGpt.AspNetCore/Models/AzureOpenAICredentials.cs create mode 100644 src/OpenAI.ChatGpt.AspNetCore/Models/OpenRouterCredentials.cs create mode 100644 src/OpenAI.ChatGpt/IAiClient.cs diff --git a/OpenAI_DotNet.sln b/OpenAI_DotNet.sln index 101a143..a5f4f1d 100644 --- a/OpenAI_DotNet.sln +++ b/OpenAI_DotNet.sln @@ -36,6 +36,7 @@ EndProject Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "configs", "configs", "{77B5B4CD-2299-4FEE-B6C3-1090A8A8F2C2}" ProjectSection(SolutionItems) = preProject src\Directory.Build.props = src\Directory.Build.props + src\.editorconfig = src\.editorconfig EndProjectSection EndProject Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "src", "src", "{130D40E9-8E33-4EBA-8AE1-8B9479BC286A}" diff --git a/README.md b/README.md index f202fe0..1f5b555 100644 --- a/README.md +++ b/README.md @@ -84,7 +84,7 @@ If you want to configure request parameters, you can do it in `appsettings.json` ```json { "ChatGPTConfig": { - "InitialSystemMessage": null, + "InitialSystemMessage": "You are a helpful and kind assistant.", "InitialUserMessage": null, "MaxTokens": null, "Model": null, diff --git a/samples/ChatGpt.BlazorExample/appsettings.json b/samples/ChatGpt.BlazorExample/appsettings.json index 2ea2195..203b107 100644 --- a/samples/ChatGpt.BlazorExample/appsettings.json +++ b/samples/ChatGpt.BlazorExample/appsettings.json @@ -1,6 +1,15 @@ { "OpenAICredentials": { - "ApiKey": "** Your OpenAI ApiKey **" + "ApiKey": "** Your OpenAI ApiKey **. But better in " + }, + "ChatGPTConfig": { + "Provider": "openai", + "InitialSystemMessage": "You are a helpful and kind assistant.", + "InitialUserMessage": null, + "MaxTokens": null, + "Model": null, + "Temperature": null, + "PassUserIdToOpenAiRequests": true }, "Logging": { "LogLevel": { diff --git a/src/Directory.Build.props b/src/Directory.Build.props index def2249..aeac60a 100644 --- a/src/Directory.Build.props +++ b/src/Directory.Build.props @@ -1,11 +1,12 @@ - 3.3.0 + 4.0.0 net6.0;net7.0;net8.0 enable enable 12 true + true diff --git a/src/OpenAI.ChatGpt.AspNetCore/AiClientFactory.cs b/src/OpenAI.ChatGpt.AspNetCore/AiClientFactory.cs new file mode 100644 index 0000000..cd08afe --- /dev/null +++ b/src/OpenAI.ChatGpt.AspNetCore/AiClientFactory.cs @@ -0,0 +1,62 @@ +using Microsoft.Extensions.Options; + +namespace OpenAI.ChatGpt.AspNetCore; + +internal class AiClientFactory +{ + private readonly IHttpClientFactory _httpClientFactory; + private readonly OpenAICredentials _openAiCredentials; + private readonly AzureOpenAICredentials _azureOpenAiCredentials; + private readonly OpenRouterCredentials _openRouterCredentials; + + public AiClientFactory( + IHttpClientFactory httpClientFactory, + IOptions openAiCredentialsOptions, + IOptions azureOpenAiCredentialsOptions, + IOptions openRouterCredentialsOptions) + { + ArgumentNullException.ThrowIfNull(openAiCredentialsOptions); + ArgumentNullException.ThrowIfNull(azureOpenAiCredentialsOptions); + ArgumentNullException.ThrowIfNull(openRouterCredentialsOptions); + _httpClientFactory = httpClientFactory ?? throw new ArgumentNullException(nameof(httpClientFactory)); + _openAiCredentials = openAiCredentialsOptions.Value; + _azureOpenAiCredentials = azureOpenAiCredentialsOptions.Value; + _openRouterCredentials = openRouterCredentialsOptions.Value; + } + + public OpenAiClient GetOpenAiClient() + { + var httpClient = _httpClientFactory.CreateClient(nameof(OpenAiClient)); + if (_openAiCredentials.ApiKey is null) + { + throw new InvalidOperationException( + $"OpenAI API key is not configured. Please configure it in {nameof(OpenAICredentials)}"); + } + _openAiCredentials.SetupHttpClient(httpClient); + return new OpenAiClient(httpClient); + } + + public AzureOpenAiClient GetAzureOpenAiClient() + { + var httpClient = _httpClientFactory.CreateClient(nameof(AzureOpenAiClient)); + if (_azureOpenAiCredentials.ApiKey is null) + { + throw new InvalidOperationException( + $"Azure OpenAI API key is not configured. Please configure it in {nameof(AzureOpenAICredentials)}"); + } + _azureOpenAiCredentials.SetupHttpClient(httpClient); + return new AzureOpenAiClient(httpClient); + } + + public OpenRouterClient GetOpenRouterClient() + { + var httpClient = _httpClientFactory.CreateClient(nameof(OpenRouterClient)); + if (_openRouterCredentials.ApiKey is null) + { + throw new InvalidOperationException( + $"OpenRouter API key is not configured. Please configure it in {nameof(OpenRouterCredentials)}"); + } + _openRouterCredentials.SetupHttpClient(httpClient); + return new OpenRouterClient(httpClient); + } +} \ No newline at end of file diff --git a/src/OpenAI.ChatGpt.AspNetCore/AiClientFromConfiguration.cs b/src/OpenAI.ChatGpt.AspNetCore/AiClientFromConfiguration.cs new file mode 100644 index 0000000..1ae1d07 --- /dev/null +++ b/src/OpenAI.ChatGpt.AspNetCore/AiClientFromConfiguration.cs @@ -0,0 +1,119 @@ +using Microsoft.Extensions.Configuration; + +namespace OpenAI.ChatGpt.AspNetCore; + +#pragma warning disable CS0618 // Type or member is obsolete +internal class AiClientFromConfiguration : IAiClient, IOpenAiClient +#pragma warning restore CS0618 // Type or member is obsolete +{ + private const string OpenAiProvider = "openai"; + private const string AzureOpenAiProvider = "azure_openai"; + private const string OpenRouterProvider = "openrouter"; + + private static readonly string[] Providers = + { + OpenAiProvider, AzureOpenAiProvider, OpenRouterProvider + }; + private readonly IAiClient _client; + + public AiClientFromConfiguration( + AiClientFactory clientFactory, + IConfiguration configuration) + { + ArgumentNullException.ThrowIfNull(clientFactory); + ArgumentNullException.ThrowIfNull(configuration); + var provider = configuration.GetValue("AIProvider")?.ToLower(); + provider ??= OpenAiProvider; + if (!Providers.Contains(provider)) + { + ThrowUnkownProviderException(provider); + } + _client = provider switch + { + OpenAiProvider => clientFactory.GetOpenAiClient(), + AzureOpenAiProvider => clientFactory.GetAzureOpenAiClient(), + OpenRouterProvider => clientFactory.GetOpenRouterClient(), + _ => throw new InvalidOperationException($"Unknown provider: {provider}") + }; + } + + + private static void ThrowUnkownProviderException(string provider) + { + throw new ArgumentException($"Unknown AI provider: {provider}. " + + $"Supported providers: {string.Join(", ", Providers)}"); + } + + /// + public Task GetChatCompletions(UserOrSystemMessage dialog, + int maxTokens = ChatCompletionRequest.MaxTokensDefault, + string model = ChatCompletionModels.Default, float temperature = ChatCompletionTemperatures.Default, + string? user = null, bool jsonMode = false, long? seed = null, + Action? requestModifier = null, + Action? rawResponseGetter = null, CancellationToken cancellationToken = default) + { + return _client.GetChatCompletions(dialog, maxTokens, model, temperature, user, jsonMode, seed, + requestModifier, rawResponseGetter, cancellationToken); + } + + /// + public Task GetChatCompletions(IEnumerable messages, + int maxTokens = ChatCompletionRequest.MaxTokensDefault, + string model = ChatCompletionModels.Default, float temperature = ChatCompletionTemperatures.Default, + string? user = null, bool jsonMode = false, long? seed = null, + Action? requestModifier = null, + Action? rawResponseGetter = null, CancellationToken cancellationToken = default) + { + return _client.GetChatCompletions(messages, maxTokens, model, temperature, user, jsonMode, seed, + requestModifier, rawResponseGetter, cancellationToken); + } + + /// + public Task GetChatCompletionsRaw(IEnumerable messages, + int maxTokens = ChatCompletionRequest.MaxTokensDefault, + string model = ChatCompletionModels.Default, float temperature = ChatCompletionTemperatures.Default, + string? user = null, bool jsonMode = false, long? seed = null, + Action? requestModifier = null, + CancellationToken cancellationToken = default) + { + return _client.GetChatCompletionsRaw(messages, maxTokens, model, temperature, user, jsonMode, seed, + requestModifier, cancellationToken); + } + + /// + public IAsyncEnumerable StreamChatCompletions(IEnumerable messages, + int maxTokens = ChatCompletionRequest.MaxTokensDefault, + string model = ChatCompletionModels.Default, float temperature = ChatCompletionTemperatures.Default, + string? user = null, bool jsonMode = false, long? seed = null, + Action? requestModifier = null, + CancellationToken cancellationToken = default) + { + return _client.StreamChatCompletions(messages, maxTokens, model, temperature, user, jsonMode, seed, + requestModifier, cancellationToken); + } + + /// + public IAsyncEnumerable StreamChatCompletions(UserOrSystemMessage messages, + int maxTokens = ChatCompletionRequest.MaxTokensDefault, string model = ChatCompletionModels.Default, + float temperature = ChatCompletionTemperatures.Default, string? user = null, bool jsonMode = false, + long? seed = null, Action? requestModifier = null, + CancellationToken cancellationToken = default) + { + return _client.StreamChatCompletions(messages, maxTokens, model, temperature, user, jsonMode, seed, + requestModifier, cancellationToken); + } + + /// + public IAsyncEnumerable StreamChatCompletions(ChatCompletionRequest request, + CancellationToken cancellationToken = default) + { + return _client.StreamChatCompletions(request, cancellationToken); + } + + /// + public IAsyncEnumerable StreamChatCompletionsRaw(ChatCompletionRequest request, + CancellationToken cancellationToken = default) + { + return _client.StreamChatCompletionsRaw(request, cancellationToken); + } +} \ No newline at end of file diff --git a/src/OpenAI.ChatGpt.AspNetCore/AiClientStartupValidationBackgroundService.cs b/src/OpenAI.ChatGpt.AspNetCore/AiClientStartupValidationBackgroundService.cs new file mode 100644 index 0000000..0b0bfd8 --- /dev/null +++ b/src/OpenAI.ChatGpt.AspNetCore/AiClientStartupValidationBackgroundService.cs @@ -0,0 +1,15 @@ +using Microsoft.Extensions.Hosting; + +namespace OpenAI.ChatGpt.AspNetCore; + +internal class AiClientStartupValidationBackgroundService : BackgroundService +{ + private readonly AiClientFromConfiguration _aiClient; + + public AiClientStartupValidationBackgroundService(AiClientFromConfiguration aiClient) + { + _aiClient = aiClient ?? throw new ArgumentNullException(nameof(aiClient)); + } + + protected override Task ExecuteAsync(CancellationToken stoppingToken) => Task.CompletedTask; +} \ No newline at end of file diff --git a/src/OpenAI.ChatGpt.AspNetCore/ChatGPTFactory.cs b/src/OpenAI.ChatGpt.AspNetCore/ChatGPTFactory.cs index 88539d0..6c6d3fc 100644 --- a/src/OpenAI.ChatGpt.AspNetCore/ChatGPTFactory.cs +++ b/src/OpenAI.ChatGpt.AspNetCore/ChatGPTFactory.cs @@ -9,7 +9,7 @@ namespace OpenAI.ChatGpt.AspNetCore; // ReSharper disable once InconsistentNaming public class ChatGPTFactory : IDisposable { - private readonly IOpenAiClient _client; + private readonly IAiClient _client; private readonly ChatGPTConfig _config; private readonly IChatHistoryStorage _chatHistoryStorage; private readonly ITimeProvider _clock; @@ -17,7 +17,7 @@ public class ChatGPTFactory : IDisposable private volatile bool _ensureStorageCreatedCalled; public ChatGPTFactory( - IOpenAiClient client, + IAiClient client, IOptions config, IChatHistoryStorage chatHistoryStorage, ITimeProvider clock) @@ -29,19 +29,6 @@ public ChatGPTFactory( _isHttpClientInjected = true; } - internal ChatGPTFactory( - IOptions credentials, - IOptions config, - IChatHistoryStorage chatHistoryStorage, - ITimeProvider clock) - { - if (credentials?.Value == null) throw new ArgumentNullException(nameof(credentials)); - _config = config?.Value ?? throw new ArgumentNullException(nameof(config)); - _chatHistoryStorage = chatHistoryStorage ?? throw new ArgumentNullException(nameof(chatHistoryStorage)); - _clock = clock ?? throw new ArgumentNullException(nameof(clock)); - _client = new OpenAiClient(credentials.Value.ApiKey); - } - public ChatGPTFactory( string apiKey, IChatHistoryStorage chatHistoryStorage, diff --git a/src/OpenAI.ChatGpt.AspNetCore/Extensions/ServiceCollectionExtensions.cs b/src/OpenAI.ChatGpt.AspNetCore/Extensions/ServiceCollectionExtensions.cs index b260297..4c0d3d5 100644 --- a/src/OpenAI.ChatGpt.AspNetCore/Extensions/ServiceCollectionExtensions.cs +++ b/src/OpenAI.ChatGpt.AspNetCore/Extensions/ServiceCollectionExtensions.cs @@ -5,16 +5,19 @@ namespace OpenAI.ChatGpt.AspNetCore.Extensions; public static class ServiceCollectionExtensions { - public const string CredentialsConfigSectionPathDefault = "OpenAICredentials"; + public const string OpenAiCredentialsConfigSectionPathDefault = "OpenAICredentials"; + public const string AzureOpenAiCredentialsConfigSectionPathDefault = "AzureOpenAICredentials"; + public const string OpenRouterCredentialsConfigSectionPathDefault = "OpenRouterCredentials"; // ReSharper disable once InconsistentNaming public const string ChatGPTConfigSectionPathDefault = "ChatGPTConfig"; - public static IHttpClientBuilder AddChatGptInMemoryIntegration( + public static IServiceCollection AddChatGptInMemoryIntegration( this IServiceCollection services, bool injectInMemoryChatService = true, - string credentialsConfigSectionPath = CredentialsConfigSectionPathDefault, - string completionsConfigSectionPath = ChatGPTConfigSectionPathDefault) + string credentialsConfigSectionPath = OpenAiCredentialsConfigSectionPathDefault, + string completionsConfigSectionPath = ChatGPTConfigSectionPathDefault, + bool validateAiClientProviderOnStart = true) { ArgumentNullException.ThrowIfNull(services); if (string.IsNullOrWhiteSpace(credentialsConfigSectionPath)) @@ -36,8 +39,9 @@ public static IHttpClientBuilder AddChatGptInMemoryIntegration( } return services.AddChatGptIntegrationCore( - credentialsConfigSectionPath, - completionsConfigSectionPath + credentialsConfigSectionPath: credentialsConfigSectionPath, + completionsConfigSectionPath: completionsConfigSectionPath, + validateAiClientProviderOnStart: validateAiClientProviderOnStart ); } @@ -64,12 +68,13 @@ private static ChatService CreateChatService(IServiceProvider provider) return chat; } - public static IHttpClientBuilder AddChatGptIntegrationCore( - this IServiceCollection services, - string credentialsConfigSectionPath = CredentialsConfigSectionPathDefault, + public static IServiceCollection AddChatGptIntegrationCore(this IServiceCollection services, + string credentialsConfigSectionPath = OpenAiCredentialsConfigSectionPathDefault, string completionsConfigSectionPath = ChatGPTConfigSectionPathDefault, - ServiceLifetime serviceLifetime = ServiceLifetime.Scoped - ) + string azureOpenAiCredentialsConfigSectionPath = AzureOpenAiCredentialsConfigSectionPathDefault, + string openRouterCredentialsConfigSectionPath = OpenRouterCredentialsConfigSectionPathDefault, + ServiceLifetime gptFactoryLifetime = ServiceLifetime.Scoped, + bool validateAiClientProviderOnStart = true) { ArgumentNullException.ThrowIfNull(services); if (string.IsNullOrWhiteSpace(credentialsConfigSectionPath)) @@ -84,10 +89,23 @@ public static IHttpClientBuilder AddChatGptIntegrationCore( nameof(completionsConfigSectionPath)); } + services.AddOptions() .BindConfiguration(credentialsConfigSectionPath) + .Configure(_ => { }) //make optional + .ValidateDataAnnotations() + .ValidateOnStart(); + services.AddOptions() + .BindConfiguration(azureOpenAiCredentialsConfigSectionPath) + .Configure(_ => { }) //make optional + .ValidateDataAnnotations() + .ValidateOnStart(); + services.AddOptions() + .BindConfiguration(openRouterCredentialsConfigSectionPath) + .Configure(_ => { }) //make optional .ValidateDataAnnotations() .ValidateOnStart(); + services.AddOptions() .BindConfiguration(completionsConfigSectionPath) .Configure(_ => { }) //make optional @@ -95,17 +113,22 @@ public static IHttpClientBuilder AddChatGptIntegrationCore( .ValidateOnStart(); services.AddSingleton(); - services.Add(new ServiceDescriptor(typeof(ChatGPTFactory), typeof(ChatGPTFactory), serviceLifetime)); + services.Add(new ServiceDescriptor(typeof(ChatGPTFactory), typeof(ChatGPTFactory), gptFactoryLifetime)); - return AddOpenAiClient(services); - } + services.AddHttpClient(nameof(OpenAiClient)); + services.AddHttpClient(nameof(AzureOpenAiClient)); + services.AddHttpClient(nameof(OpenRouterClient)); - private static IHttpClientBuilder AddOpenAiClient(IServiceCollection services) - { - return services.AddHttpClient((provider, httpClient) => + services.AddSingleton(); +#pragma warning disable CS0618 // Type or member is obsolete + services.AddSingleton(); +#pragma warning restore CS0618 // Type or member is obsolete + + if (validateAiClientProviderOnStart) { - var credentials = provider.GetRequiredService>().Value; - credentials.SetupHttpClient(httpClient); - }); + services.AddHostedService(); + } + + return services; } } \ No newline at end of file diff --git a/src/OpenAI.ChatGpt.AspNetCore/Models/AzureOpenAICredentials.cs b/src/OpenAI.ChatGpt.AspNetCore/Models/AzureOpenAICredentials.cs new file mode 100644 index 0000000..73defe1 --- /dev/null +++ b/src/OpenAI.ChatGpt.AspNetCore/Models/AzureOpenAICredentials.cs @@ -0,0 +1,42 @@ +using System.ComponentModel.DataAnnotations; +using System.Net.Http.Headers; + +namespace OpenAI.ChatGpt.AspNetCore.Models; + +public class AzureOpenAICredentials +{ + /// + /// Azure OpenAI API key from Azure Portal. + /// + public string? ApiKey { get; set; } + + /// + /// Azure Open AI API endpoint url. + /// + [Url] + public string? ApiHost { get; set; } + + public string? DeploymentName { get; set; } + + public AuthenticationHeaderValue GetAuthHeader() + { + return new AuthenticationHeaderValue("Bearer", ApiKey); + } + + public void SetupHttpClient(HttpClient httpClient) + { + if (ApiKey is null) + { + throw new InvalidOperationException("ApiKey is null"); + } + if (ApiHost is null) + { + throw new InvalidOperationException("ApiHost is null"); + } + if (DeploymentName is null) + { + throw new InvalidOperationException("DeploymentName is null"); + } + AzureOpenAiClient.SetupHttpClient(httpClient, ApiHost, DeploymentName, ApiKey ?? throw new InvalidOperationException("ApiKey is null")); + } +} \ No newline at end of file diff --git a/src/OpenAI.ChatGpt.AspNetCore/Models/OpenAICredentials.cs b/src/OpenAI.ChatGpt.AspNetCore/Models/OpenAICredentials.cs index ca0bb04..ab19982 100644 --- a/src/OpenAI.ChatGpt.AspNetCore/Models/OpenAICredentials.cs +++ b/src/OpenAI.ChatGpt.AspNetCore/Models/OpenAICredentials.cs @@ -8,19 +8,18 @@ namespace OpenAI.ChatGpt.AspNetCore.Models; // ReSharper disable once InconsistentNaming public class OpenAICredentials { - private const string DefaultHost = "https://api.openai.com/v1/"; + private const string DefaultHost = OpenAiClient.DefaultHost; /// /// OpenAI API key. Can be issued here: https://platform.openai.com/account/api-keys /// - [Required] - public string ApiKey { get; set; } + public string? ApiKey { get; set; } /// /// Open AI API host. Default is: /// [Url] - public string ApiHost { get; set; } = DefaultHost; + public string? ApiHost { get; set; } = DefaultHost; public AuthenticationHeaderValue GetAuthHeader() { @@ -29,8 +28,12 @@ public AuthenticationHeaderValue GetAuthHeader() public void SetupHttpClient(HttpClient httpClient) { + if (ApiKey is null) + { + throw new InvalidOperationException("OpenAI ApiKey is not set"); + } ArgumentNullException.ThrowIfNull(httpClient); httpClient.DefaultRequestHeaders.Authorization = GetAuthHeader(); - httpClient.BaseAddress = new Uri(ApiHost); + httpClient.BaseAddress = new Uri(ApiHost ?? DefaultHost); } } \ No newline at end of file diff --git a/src/OpenAI.ChatGpt.AspNetCore/Models/OpenRouterCredentials.cs b/src/OpenAI.ChatGpt.AspNetCore/Models/OpenRouterCredentials.cs new file mode 100644 index 0000000..d169b5a --- /dev/null +++ b/src/OpenAI.ChatGpt.AspNetCore/Models/OpenRouterCredentials.cs @@ -0,0 +1,36 @@ +using System.ComponentModel.DataAnnotations; +using System.Net.Http.Headers; + +namespace OpenAI.ChatGpt.AspNetCore.Models; + +public class OpenRouterCredentials +{ + private const string DefaultHost = OpenRouterClient.DefaultHost; + + /// + /// OpenRouter API key. Can be issued here: https://openrouter.ai/keys + /// + public string? ApiKey { get; set; } + + /// + /// Open AI API host. Default is: + /// + [Url] + public string? ApiHost { get; set; } = DefaultHost; + + public AuthenticationHeaderValue GetAuthHeader() + { + return new AuthenticationHeaderValue("Bearer", ApiKey); + } + + public void SetupHttpClient(HttpClient httpClient) + { + if (ApiKey is null) + { + throw new InvalidOperationException("OpenRouter ApiKey is not set"); + } + ArgumentNullException.ThrowIfNull(httpClient); + httpClient.DefaultRequestHeaders.Authorization = GetAuthHeader(); + httpClient.BaseAddress = new Uri(ApiHost ?? DefaultHost); + } +} \ No newline at end of file diff --git a/src/OpenAI.ChatGpt.EntityFrameworkCore/Extensions/ServiceCollectionExtensions.cs b/src/OpenAI.ChatGpt.EntityFrameworkCore/Extensions/ServiceCollectionExtensions.cs index 8378aa3..781c4cd 100644 --- a/src/OpenAI.ChatGpt.EntityFrameworkCore/Extensions/ServiceCollectionExtensions.cs +++ b/src/OpenAI.ChatGpt.EntityFrameworkCore/Extensions/ServiceCollectionExtensions.cs @@ -9,12 +9,14 @@ public static class ServiceCollectionExtensions /// /// Adds the implementation using Entity Framework Core. /// - public static IHttpClientBuilder AddChatGptEntityFrameworkIntegration( - this IServiceCollection services, + public static IServiceCollection AddChatGptEntityFrameworkIntegration(this IServiceCollection services, Action optionsAction, - string credentialsConfigSectionPath = CredentialsConfigSectionPathDefault, + string credentialsConfigSectionPath = OpenAiCredentialsConfigSectionPathDefault, string completionsConfigSectionPath = ChatGPTConfigSectionPathDefault, - ServiceLifetime serviceLifetime = ServiceLifetime.Scoped) + string azureOpenAiCredentialsConfigSectionPath = AzureOpenAiCredentialsConfigSectionPathDefault, + string openRouterCredentialsConfigSectionPath = OpenRouterCredentialsConfigSectionPathDefault, + ServiceLifetime serviceLifetime = ServiceLifetime.Scoped, + bool validateAiClientProviderOnStart = true) { ArgumentNullException.ThrowIfNull(services); ArgumentNullException.ThrowIfNull(optionsAction); @@ -23,12 +25,13 @@ public static IHttpClientBuilder AddChatGptEntityFrameworkIntegration( throw new ArgumentException("Value cannot be null or whitespace.", nameof(credentialsConfigSectionPath)); } + if (string.IsNullOrWhiteSpace(completionsConfigSectionPath)) { throw new ArgumentException("Value cannot be null or whitespace.", nameof(completionsConfigSectionPath)); } - + services.AddDbContext(optionsAction, serviceLifetime); switch (serviceLifetime) { @@ -44,8 +47,13 @@ public static IHttpClientBuilder AddChatGptEntityFrameworkIntegration( default: throw new ArgumentOutOfRangeException(nameof(serviceLifetime), serviceLifetime, null); } - - return services.AddChatGptIntegrationCore( - credentialsConfigSectionPath, completionsConfigSectionPath, serviceLifetime); + + return services.AddChatGptIntegrationCore(credentialsConfigSectionPath: credentialsConfigSectionPath, + completionsConfigSectionPath: completionsConfigSectionPath, + azureOpenAiCredentialsConfigSectionPath: azureOpenAiCredentialsConfigSectionPath, + openRouterCredentialsConfigSectionPath: openRouterCredentialsConfigSectionPath, + serviceLifetime, + validateAiClientProviderOnStart: validateAiClientProviderOnStart + ); } } \ No newline at end of file diff --git a/src/OpenAI.ChatGpt/AzureOpenAiClient.cs b/src/OpenAI.ChatGpt/AzureOpenAiClient.cs index b57b99e..ecf5503 100644 --- a/src/OpenAI.ChatGpt/AzureOpenAiClient.cs +++ b/src/OpenAI.ChatGpt/AzureOpenAiClient.cs @@ -33,21 +33,30 @@ public AzureOpenAiClient( throw new ArgumentException("Value cannot be null or whitespace.", nameof(azureKey)); _apiVersion = apiVersion ?? throw new ArgumentNullException(nameof(apiVersion)); - HttpClient = new HttpClient() - { - BaseAddress = new Uri($"{endpointUrl}/openai/deployments/{deploymentName}/"), - DefaultRequestHeaders = - { - { "api-key", azureKey } - } - }; + HttpClient = new HttpClient(); + SetupHttpClient(HttpClient, endpointUrl, deploymentName, azureKey); IsHttpClientInjected = false; } + internal static void SetupHttpClient(HttpClient httpClient, string endpointUrl, string deploymentName, string azureKey) + { + ArgumentNullException.ThrowIfNull(httpClient); + ArgumentNullException.ThrowIfNull(endpointUrl); + ArgumentNullException.ThrowIfNull(deploymentName); + ArgumentNullException.ThrowIfNull(azureKey); + httpClient.BaseAddress = new Uri($"{endpointUrl}/openai/deployments/{deploymentName}/"); + httpClient.DefaultRequestHeaders.Add("api-key", azureKey); + } + public AzureOpenAiClient(HttpClient httpClient, string apiVersion) : base(httpClient) { _apiVersion = apiVersion ?? throw new ArgumentNullException(nameof(apiVersion)); } + + public AzureOpenAiClient(HttpClient httpClient) : base(httpClient) + { + _apiVersion = DefaultApiVersion; + } protected override string GetChatCompletionsEndpoint() { diff --git a/src/OpenAI.ChatGpt/ChatGPT.cs b/src/OpenAI.ChatGpt/ChatGPT.cs index d8bc28e..a5ce91c 100644 --- a/src/OpenAI.ChatGpt/ChatGPT.cs +++ b/src/OpenAI.ChatGpt/ChatGPT.cs @@ -14,7 +14,7 @@ public class ChatGPT : IDisposable private readonly IChatHistoryStorage _storage; private readonly ITimeProvider _clock; private readonly ChatGPTConfig? _config; - private readonly IOpenAiClient _client; + private readonly IAiClient _client; private ChatService? _currentChat; private static readonly string NoUser = Guid.Empty.ToString(); @@ -24,7 +24,7 @@ public class ChatGPT : IDisposable /// Use this constructor to create chat conversation provider for the specific user. /// public ChatGPT( - IOpenAiClient client, + IAiClient client, IChatHistoryStorage chatHistoryStorage, ITimeProvider clock, string userId, @@ -42,7 +42,7 @@ public ChatGPT( /// If you don't have users use this ChatGPT constructor. /// public ChatGPT( - IOpenAiClient client, + IAiClient client, IChatHistoryStorage chatHistoryStorage, ITimeProvider clock, ChatGPTConfig? config) diff --git a/src/OpenAI.ChatGpt/ChatService.cs b/src/OpenAI.ChatGpt/ChatService.cs index a654839..cd6c6d6 100644 --- a/src/OpenAI.ChatGpt/ChatService.cs +++ b/src/OpenAI.ChatGpt/ChatService.cs @@ -28,7 +28,7 @@ public class ChatService : IDisposable, IAsyncDisposable private readonly IChatHistoryStorage _chatHistoryStorage; private readonly ITimeProvider _clock; - private readonly IOpenAiClient _client; + private readonly IAiClient _client; private readonly bool _clearOnDisposal; private CancellationTokenSource? _cts; private bool _isNew; @@ -36,7 +36,7 @@ public class ChatService : IDisposable, IAsyncDisposable internal ChatService( IChatHistoryStorage chatHistoryStorage, ITimeProvider clock, - IOpenAiClient client, + IAiClient client, string userId, Topic topic, bool isNew, diff --git a/src/OpenAI.ChatGpt/IAiClient.cs b/src/OpenAI.ChatGpt/IAiClient.cs new file mode 100644 index 0000000..5b22fab --- /dev/null +++ b/src/OpenAI.ChatGpt/IAiClient.cs @@ -0,0 +1,230 @@ +using OpenAI.ChatGpt.Models.ChatCompletion; +using OpenAI.ChatGpt.Models.ChatCompletion.Messaging; + +namespace OpenAI.ChatGpt; + +/// +/// AI clients interface. +/// +public interface IAiClient +{ + /// + /// Get a chat completion response as a string + /// + /// The dialog history + /// The length of the response + /// One of + /// + /// What sampling temperature to use, between 0 and 2. + /// Higher values like 0.8 will make the output more random, + /// while lower values like 0.2 will make it more focused and deterministic. + /// + /// + /// A unique identifier representing your end-user, which can help OpenAI to monitor + /// and detect abuse. + /// + /// + /// If true, the response will be returned as a JSON object. + /// When using JSON mode, always instruct the model to produce JSON via some message in the conversation, + /// for example via your system message. + /// See: https://platform.openai.com/docs/guides/text-generation/json-mode + /// + /// + /// If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. + /// Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. + /// See: https://platform.openai.com/docs/guides/text-generation/reproducible-outputs + /// This feature is in Beta. + /// + /// A modifier of the raw request. Allows to specify any custom properties. + /// A delegate to get the raw response. + /// Cancellation token. + /// The chat completion response as a string + Task GetChatCompletions( + UserOrSystemMessage dialog, + int maxTokens = ChatCompletionRequest.MaxTokensDefault, + string model = ChatCompletionModels.Default, + float temperature = ChatCompletionTemperatures.Default, + string? user = null, + bool jsonMode = false, + long? seed = null, + Action? requestModifier = null, + Action? rawResponseGetter = null, + CancellationToken cancellationToken = default); + + /// + /// Get a chat completion response as a string + /// + /// The dialog history + /// The length of the response + /// One of + /// + /// What sampling temperature to use, between 0 and 2. + /// Higher values like 0.8 will make the output more random, + /// while lower values like 0.2 will make it more focused and deterministic. + /// + /// + /// A unique identifier representing your end-user, which can help OpenAI to monitor + /// and detect abuse. + /// + /// + /// If true, the response will be returned as a JSON object. + /// When using JSON mode, always instruct the model to produce JSON via some message in the conversation, + /// for example via your system message. + /// See: https://platform.openai.com/docs/guides/text-generation/json-mode + /// + /// + /// If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. + /// Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. + /// See: https://platform.openai.com/docs/guides/text-generation/reproducible-outputs + /// This feature is in Beta. + /// + /// A modifier of the raw request. Allows to specify any custom properties. + /// A delegate to get the raw response. + /// Cancellation token. + /// The chat completion response as a string + Task GetChatCompletions( + IEnumerable messages, + int maxTokens = ChatCompletionRequest.MaxTokensDefault, + string model = ChatCompletionModels.Default, + float temperature = ChatCompletionTemperatures.Default, + string? user = null, + bool jsonMode = false, + long? seed = null, + Action? requestModifier = null, + Action? rawResponseGetter = null, + CancellationToken cancellationToken = default); + + /// + /// Get a raw chat completion response + /// + /// The dialog history + /// The length of the response + /// One of + /// + /// What sampling temperature to use, between 0 and 2. + /// Higher values like 0.8 will make the output more random, + /// while lower values like 0.2 will make it more focused and deterministic. + /// + /// + /// A unique identifier representing your end-user, which can help OpenAI to monitor + /// and detect abuse. + /// + /// + /// If true, the response will be returned as a JSON object. + /// When using JSON mode, always instruct the model to produce JSON via some message in the conversation, + /// for example via your system message. + /// See: https://platform.openai.com/docs/guides/text-generation/json-mode + /// + /// + /// If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. + /// Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. + /// See: https://platform.openai.com/docs/guides/text-generation/reproducible-outputs + /// This feature is in Beta. + /// + /// A modifier of the raw request. Allows to specify any custom properties. + /// Cancellation token. + /// The raw chat completion response + Task GetChatCompletionsRaw( + IEnumerable messages, + int maxTokens = ChatCompletionRequest.MaxTokensDefault, + string model = ChatCompletionModels.Default, + float temperature = ChatCompletionTemperatures.Default, + string? user = null, + bool jsonMode = false, + long? seed = null, + Action? requestModifier = null, + CancellationToken cancellationToken = default); + + /// + /// Start streaming chat completions like ChatGPT + /// + /// The history of messaging + /// The length of the response + /// One of + /// + /// What sampling temperature to use, between 0 and 2. + /// Higher values like 0.8 will make the output more random, + /// while lower values like 0.2 will make it more focused and deterministic. + /// + /// + /// A unique identifier representing your end-user, which can help OpenAI to monitor + /// and detect abuse. + /// + /// + /// If true, the response will be returned as a JSON object. + /// When using JSON mode, always instruct the model to produce JSON via some message in the conversation, + /// for example via your system message. + /// See: https://platform.openai.com/docs/guides/text-generation/json-mode + /// + /// + /// If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. + /// Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. + /// See: https://platform.openai.com/docs/guides/text-generation/reproducible-outputs + /// This feature is in Beta. + /// + /// A modifier of the raw request. Allows to specify any custom properties. + /// Cancellation token. + /// Chunks of LLM's response, one by one. + IAsyncEnumerable StreamChatCompletions( + IEnumerable messages, + int maxTokens = ChatCompletionRequest.MaxTokensDefault, + string model = ChatCompletionModels.Default, + float temperature = ChatCompletionTemperatures.Default, + string? user = null, + bool jsonMode = false, + long? seed = null, + Action? requestModifier = null, + CancellationToken cancellationToken = default); + + /// + /// Start streaming chat completions like ChatGPT + /// + /// The history of messaging + /// The length of the response + /// One of + /// > + /// + /// + /// If true, the response will be returned as a JSON object. + /// When using JSON mode, always instruct the model to produce JSON via some message in the conversation, + /// for example via your system message. + /// See: https://platform.openai.com/docs/guides/text-generation/json-mode + /// + /// + /// If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. + /// Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. + /// This feature is in Beta. + /// See: https://platform.openai.com/docs/guides/text-generation/reproducible-outputs + /// + /// Request modifier + /// Cancellation token + /// Chunks of LLM's response, one by one + IAsyncEnumerable StreamChatCompletions( + UserOrSystemMessage messages, + int maxTokens = ChatCompletionRequest.MaxTokensDefault, + string model = ChatCompletionModels.Default, + float temperature = ChatCompletionTemperatures.Default, + string? user = null, + bool jsonMode = false, + long? seed = null, + Action? requestModifier = null, + CancellationToken cancellationToken = default); + + /// + /// Start streaming raw chat completion responses + /// + /// The chat completion request + /// Cancellation token + /// A stream of raw chat completion responses + IAsyncEnumerable StreamChatCompletions( + ChatCompletionRequest request, CancellationToken cancellationToken = default); + + /// + /// Start streaming raw chat completion responses + /// + /// The chat completion request + /// Cancellation token + /// A stream of raw chat completion responses + IAsyncEnumerable StreamChatCompletionsRaw( + ChatCompletionRequest request, CancellationToken cancellationToken = default); +} \ No newline at end of file diff --git a/src/OpenAI.ChatGpt/IOpenAiClient.cs b/src/OpenAI.ChatGpt/IOpenAiClient.cs index aa2eef1..6d286e5 100644 --- a/src/OpenAI.ChatGpt/IOpenAiClient.cs +++ b/src/OpenAI.ChatGpt/IOpenAiClient.cs @@ -1,124 +1,7 @@ -using OpenAI.ChatGpt.Models.ChatCompletion; -using OpenAI.ChatGpt.Models.ChatCompletion.Messaging; +namespace OpenAI.ChatGpt; -namespace OpenAI.ChatGpt; - -public interface IOpenAiClient -{ - - Task GetChatCompletions( - UserOrSystemMessage dialog, - int maxTokens = ChatCompletionRequest.MaxTokensDefault, - string model = ChatCompletionModels.Default, - float temperature = ChatCompletionTemperatures.Default, - string? user = null, - bool jsonMode = false, - long? seed = null, - Action? requestModifier = null, - Action? rawResponseGetter = null, - CancellationToken cancellationToken = default); - - Task GetChatCompletions( - IEnumerable messages, - int maxTokens = ChatCompletionRequest.MaxTokensDefault, - string model = ChatCompletionModels.Default, - float temperature = ChatCompletionTemperatures.Default, - string? user = null, - bool jsonMode = false, - long? seed = null, - Action? requestModifier = null, - Action? rawResponseGetter = null, - CancellationToken cancellationToken = default); - - Task GetChatCompletionsRaw( - IEnumerable messages, - int maxTokens = ChatCompletionRequest.MaxTokensDefault, - string model = ChatCompletionModels.Default, - float temperature = ChatCompletionTemperatures.Default, - string? user = null, - bool jsonMode = false, - long? seed = null, - Action? requestModifier = null, - CancellationToken cancellationToken = default); - - /// - /// Start streaming chat completions like ChatGPT - /// - /// The history of messaging - /// The length of the response - /// One of - /// - /// What sampling temperature to use, between 0 and 2. - /// Higher values like 0.8 will make the output more random, - /// while lower values like 0.2 will make it more focused and deterministic. - /// - /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor - /// and detect abuse. - /// - /// - /// If true, the response will be returned as a JSON object. - /// When using JSON mode, always instruct the model to produce JSON via some message in the conversation, - /// for example via your system message. - /// See: https://platform.openai.com/docs/guides/text-generation/json-mode - /// - /// - /// This feature is in Beta. - /// If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. - /// Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. - /// See: https://platform.openai.com/docs/guides/text-generation/reproducible-outputs - /// - /// A modifier of the raw request. Allows to specify any custom properties. - /// Cancellation token. - /// Chunks of ChatGPT's response, one by one. - IAsyncEnumerable StreamChatCompletions( - IEnumerable messages, - int maxTokens = ChatCompletionRequest.MaxTokensDefault, - string model = ChatCompletionModels.Default, - float temperature = ChatCompletionTemperatures.Default, - string? user = null, - bool jsonMode = false, - long? seed = null, - Action? requestModifier = null, - CancellationToken cancellationToken = default); - - /// - /// Start streaming chat completions like ChatGPT - /// - /// The history of messaging - /// The length of the response - /// One of - /// > - /// - /// - /// If true, the response will be returned as a JSON object. - /// When using JSON mode, always instruct the model to produce JSON via some message in the conversation, - /// for example via your system message. - /// See: https://platform.openai.com/docs/guides/text-generation/json-mode - /// - /// - /// This feature is in Beta. - /// If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. - /// Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. - /// See: https://platform.openai.com/docs/guides/text-generation/reproducible-outputs - /// - /// Request modifier - /// Cancellation token - /// Chunks of ChatGPT's response, one by one - IAsyncEnumerable StreamChatCompletions( - UserOrSystemMessage messages, - int maxTokens = ChatCompletionRequest.MaxTokensDefault, - string model = ChatCompletionModels.Default, - float temperature = ChatCompletionTemperatures.Default, - string? user = null, - bool jsonMode = false, - long? seed = null, - Action? requestModifier = null, - CancellationToken cancellationToken = default); - - IAsyncEnumerable StreamChatCompletions( - ChatCompletionRequest request, CancellationToken cancellationToken = default); - - IAsyncEnumerable StreamChatCompletionsRaw( - ChatCompletionRequest request, CancellationToken cancellationToken = default); -} \ No newline at end of file +/// +/// The OpenAI client interface. +/// +[Obsolete($"Will be removed in the next major version. Use {nameof(IAiClient)} instead.")] +public interface IOpenAiClient : IAiClient; \ No newline at end of file diff --git a/src/OpenAI.ChatGpt/Models/ChatCompletion/ChatCompletionModels.cs b/src/OpenAI.ChatGpt/Models/ChatCompletion/ChatCompletionModels.cs index 8a98d76..98642d1 100644 --- a/src/OpenAI.ChatGpt/Models/ChatCompletion/ChatCompletionModels.cs +++ b/src/OpenAI.ChatGpt/Models/ChatCompletion/ChatCompletionModels.cs @@ -150,7 +150,7 @@ public static class ChatCompletionModels { Gpt3_5_Turbo_0301, 4096 }, }; - private static int _validateModelName = 1; + private static int _validateModelName = 0; /// @@ -186,7 +186,7 @@ public static string FromString(string model) if (model == null) throw new ArgumentNullException(nameof(model)); if (string.IsNullOrWhiteSpace(model)) { - throw new ArgumentException("Value cannot be null or whitespace.", nameof(model)); + throw new ArgumentException("Model cannot be empty or whitespace.", nameof(model)); } if (_validateModelName == 1 && !MaxTokensLimits.ContainsKey(model)) { @@ -195,20 +195,6 @@ public static string FromString(string model) return model; } - // TODO move to IOpenAiClient - [Obsolete("This method will be removed in the next major version. Use DisableModelNameValidation from IOpenAiClient instead.")] - public static void DisableModelNameValidation() - { - Interlocked.CompareExchange(ref _validateModelName, 0, 1); - } - - // TODO move to IOpenAiClient - [Obsolete("This method will be removed in the next major version. Use EnableModelNameValidation from IOpenAiClient instead.")] - public static void EnableModelNameValidation() - { - Interlocked.CompareExchange(ref _validateModelName, 1, 0); - } - public static void EnsureMaxTokensIsSupported(string model, int maxTokens) { if (model == null) throw new ArgumentNullException(nameof(model)); diff --git a/src/OpenAI.ChatGpt/Models/ChatGPTConfig.cs b/src/OpenAI.ChatGpt/Models/ChatGPTConfig.cs index 6916868..7eca7e2 100644 --- a/src/OpenAI.ChatGpt/Models/ChatGPTConfig.cs +++ b/src/OpenAI.ChatGpt/Models/ChatGPTConfig.cs @@ -96,7 +96,7 @@ public int? MaxTokens public string? Model { get => _model; - set => _model = value != null ? ChatCompletionModels.FromString(value) : null; + set => _model = value; } /// @@ -167,7 +167,7 @@ internal void ModifyRequest(ChatCompletionRequest request) PassUserIdToOpenAiRequests = config.PassUserIdToOpenAiRequests ?? baseConfig.PassUserIdToOpenAiRequests, InitialSystemMessage = config.InitialSystemMessage ?? baseConfig.InitialSystemMessage, - InitialUserMessage = config.InitialUserMessage ?? baseConfig.InitialUserMessage + InitialUserMessage = config.InitialUserMessage ?? baseConfig.InitialUserMessage, } }; } diff --git a/src/OpenAI.ChatGpt/OpenAiClient.cs b/src/OpenAI.ChatGpt/OpenAiClient.cs index 6a73f8b..7800363 100644 --- a/src/OpenAI.ChatGpt/OpenAiClient.cs +++ b/src/OpenAI.ChatGpt/OpenAiClient.cs @@ -13,9 +13,9 @@ namespace OpenAI.ChatGpt; /// Thread-safe OpenAI client. /// https://github.com/openai/openai-openapi/blob/master/openapi.yaml [Fody.ConfigureAwait(false)] -public class OpenAiClient : IOpenAiClient, IDisposable +public class OpenAiClient : IAiClient, IDisposable { - private const string DefaultHost = "https://api.openai.com/v1/"; + internal const string DefaultHost = "https://api.openai.com/v1/"; private const string ChatCompletionsEndpoint = "chat/completions"; private static readonly Uri DefaultHostUri = new(DefaultHost); diff --git a/src/OpenAI.ChatGpt/OpenRouterClient.cs b/src/OpenAI.ChatGpt/OpenRouterClient.cs index 1c0072a..2baa68b 100644 --- a/src/OpenAI.ChatGpt/OpenRouterClient.cs +++ b/src/OpenAI.ChatGpt/OpenRouterClient.cs @@ -10,20 +10,19 @@ namespace OpenAI.ChatGpt; /// public class OpenRouterClient : OpenAiClient { - private const string DefaultHost = "https://openrouter.ai/api/v1/"; + internal new const string DefaultHost = "https://openrouter.ai/api/v1/"; + /// + /// Creates a new instance of + /// + /// OpenRouter API key. Can be issued here: https://openrouter.ai/keys + /// OpenRouter API host. Default is: public OpenRouterClient(string apiKey, string? host = DefaultHost) : base(apiKey, host ?? DefaultHost) { -#pragma warning disable CS0618 // Type or member is obsolete - ChatCompletionModels.DisableModelNameValidation(); -#pragma warning restore CS0618 // Type or member is obsolete } public OpenRouterClient(HttpClient httpClient) : base(httpClient) { -#pragma warning disable CS0618 // Type or member is obsolete - ChatCompletionModels.DisableModelNameValidation(); -#pragma warning restore CS0618 // Type or member is obsolete } } \ No newline at end of file diff --git a/src/modules/OpenAI.ChatGpt.Modules.StructuredResponse/OpenAiClientExtensions.GetStructuredResponse.cs b/src/modules/OpenAI.ChatGpt.Modules.StructuredResponse/OpenAiClientExtensions.GetStructuredResponse.cs index eeeb954..2748624 100644 --- a/src/modules/OpenAI.ChatGpt.Modules.StructuredResponse/OpenAiClientExtensions.GetStructuredResponse.cs +++ b/src/modules/OpenAI.ChatGpt.Modules.StructuredResponse/OpenAiClientExtensions.GetStructuredResponse.cs @@ -54,7 +54,7 @@ public static class OpenAiClientExtensions /// The original message content is restored after the API call. /// public static Task GetStructuredResponse( - this IOpenAiClient client, + this IAiClient client, UserOrSystemMessage dialog, int? maxTokens = null, string? model = null, @@ -89,7 +89,7 @@ public static Task GetStructuredResponse( } internal static async Task GetStructuredResponse( - this IOpenAiClient client, + this IAiClient client, UserOrSystemMessage dialog, string responseFormat, int? maxTokens = null, diff --git a/src/modules/OpenAI.ChatGpt.Modules.Translator/ChatGPTTranslatorService.cs b/src/modules/OpenAI.ChatGpt.Modules.Translator/ChatGPTTranslatorService.cs index 26a68a2..6f67934 100644 --- a/src/modules/OpenAI.ChatGpt.Modules.Translator/ChatGPTTranslatorService.cs +++ b/src/modules/OpenAI.ChatGpt.Modules.Translator/ChatGPTTranslatorService.cs @@ -10,14 +10,14 @@ namespace OpenAI.ChatGpt.Modules.Translator; // ReSharper disable once InconsistentNaming public class ChatGPTTranslatorService : IDisposable, IChatGPTTranslatorService { - private readonly IOpenAiClient _client; + private readonly IAiClient _client; private readonly string? _defaultSourceLanguage; private readonly string? _defaultTargetLanguage; private readonly string? _extraPrompt; private readonly bool _isHttpClientInjected; public ChatGPTTranslatorService( - IOpenAiClient client, + IAiClient client, string? defaultSourceLanguage = null, string? defaultTargetLanguage = null, string? extraPrompt = null) diff --git a/src/modules/OpenAI.ChatGpt.Modules.Translator/OpenAiClientExtensions.Translations.cs b/src/modules/OpenAI.ChatGpt.Modules.Translator/OpenAiClientExtensions.Translations.cs index 3700317..7b15cab 100644 --- a/src/modules/OpenAI.ChatGpt.Modules.Translator/OpenAiClientExtensions.Translations.cs +++ b/src/modules/OpenAI.ChatGpt.Modules.Translator/OpenAiClientExtensions.Translations.cs @@ -7,7 +7,7 @@ namespace OpenAI.ChatGpt.Modules.Translator; public static class OpenAiClientExtensions { public static Task TranslateText( - this IOpenAiClient client, + this IAiClient client, string text, string sourceLanguage, string targetLanguage, @@ -40,7 +40,7 @@ public static Task TranslateText( } public static Task TranslateObject( - this IOpenAiClient client, + this IAiClient client, TObject objectToTranslate, string sourceLanguage, string targetLanguage, diff --git a/tests/OpenAI.ChatGpt.IntegrationTests/ChatGptEntityFrameworkIntegrationTests.cs b/tests/OpenAI.ChatGpt.IntegrationTests/ChatGptEntityFrameworkIntegrationTests.cs index e7cfe08..0ae3854 100644 --- a/tests/OpenAI.ChatGpt.IntegrationTests/ChatGptEntityFrameworkIntegrationTests.cs +++ b/tests/OpenAI.ChatGpt.IntegrationTests/ChatGptEntityFrameworkIntegrationTests.cs @@ -42,7 +42,7 @@ IConfiguration CreateConfiguration() var builder = new ConfigurationBuilder() .AddInMemoryCollection(new Dictionary() { - { $"{CredentialsConfigSectionPathDefault}:{nameof(OpenAICredentials.ApiKey)}", "test-api-key" }, + { $"{OpenAiCredentialsConfigSectionPathDefault}:{nameof(OpenAICredentials.ApiKey)}", "test-api-key" }, { ChatGPTConfigSectionPathDefault, ""}, }); diff --git a/tests/OpenAI.ChatGpt.IntegrationTests/ChatGptTranslatorServiceTests.cs b/tests/OpenAI.ChatGpt.IntegrationTests/ChatGptTranslatorServiceTests.cs index 1f97931..017c022 100644 --- a/tests/OpenAI.ChatGpt.IntegrationTests/ChatGptTranslatorServiceTests.cs +++ b/tests/OpenAI.ChatGpt.IntegrationTests/ChatGptTranslatorServiceTests.cs @@ -6,7 +6,7 @@ namespace OpenAI.ChatGpt.IntegrationTests; [Collection("OpenAiTestCollection")] //to prevent parallel execution public class ChatGptTranslatorServiceTests : IClassFixture { - private readonly IOpenAiClient _client; + private readonly IAiClient _client; private const string GtpModel = ChatCompletionModels.Gpt4Turbo; diff --git a/tests/OpenAI.ChatGpt.IntegrationTests/ClientTests/AzureOpenAiClientTests.cs b/tests/OpenAI.ChatGpt.IntegrationTests/ClientTests/AzureOpenAiClientTests.cs index 7e1c854..6d53b50 100644 --- a/tests/OpenAI.ChatGpt.IntegrationTests/ClientTests/AzureOpenAiClientTests.cs +++ b/tests/OpenAI.ChatGpt.IntegrationTests/ClientTests/AzureOpenAiClientTests.cs @@ -5,7 +5,7 @@ namespace OpenAI.ChatGpt.IntegrationTests.ClientTests; public class AzureOpenAiClientTests : IClassFixture { private readonly ITestOutputHelper _outputHelper; - private readonly IOpenAiClient _client; + private readonly IAiClient _client; public AzureOpenAiClientTests(ITestOutputHelper outputHelper, AzureOpenAiClientFixture fixture) { @@ -14,10 +14,12 @@ public AzureOpenAiClientTests(ITestOutputHelper outputHelper, AzureOpenAiClientF } [Fact] - public async void Get_chatgpt_response_for_one_message_works() + public async void Get_response_from_gpt4_32k_model_for_one_message_works() { string text = "Who are you? In two words."; - string response = await _client.GetChatCompletions(new UserMessage(text), 64); +#pragma warning disable CS0618 // Type or member is obsolete + string response = await _client.GetChatCompletions(new UserMessage(text), 64, model: ChatCompletionModels.Gpt4_32k); +#pragma warning restore CS0618 // Type or member is obsolete _outputHelper.WriteLine(response); response.Should().NotBeNullOrEmpty(); } diff --git a/tests/OpenAI.ChatGpt.IntegrationTests/ClientTests/ChatCompletionsApiTests.cs b/tests/OpenAI.ChatGpt.IntegrationTests/ClientTests/ChatCompletionsApiTests.cs index 08a8580..448cc5c 100644 --- a/tests/OpenAI.ChatGpt.IntegrationTests/ClientTests/ChatCompletionsApiTests.cs +++ b/tests/OpenAI.ChatGpt.IntegrationTests/ClientTests/ChatCompletionsApiTests.cs @@ -6,7 +6,7 @@ namespace OpenAI.ChatGpt.IntegrationTests.ClientTests; public class ChatCompletionsApiTests : IClassFixture { private readonly ITestOutputHelper _outputHelper; - private readonly IOpenAiClient _client; + private readonly IAiClient _client; public ChatCompletionsApiTests(ITestOutputHelper outputHelper, OpenAiClientFixture fixture) { diff --git a/tests/OpenAI.ChatGpt.IntegrationTests/ClientTests/ChatCompletionsVisionTests.cs b/tests/OpenAI.ChatGpt.IntegrationTests/ClientTests/ChatCompletionsVisionTests.cs index 131621e..b8bba0b 100644 --- a/tests/OpenAI.ChatGpt.IntegrationTests/ClientTests/ChatCompletionsVisionTests.cs +++ b/tests/OpenAI.ChatGpt.IntegrationTests/ClientTests/ChatCompletionsVisionTests.cs @@ -5,7 +5,7 @@ namespace OpenAI.ChatGpt.IntegrationTests.ClientTests; public class ChatCompletionsVisionTests : IClassFixture { private readonly ITestOutputHelper _outputHelper; - private readonly IOpenAiClient _client; + private readonly IAiClient _client; public ChatCompletionsVisionTests(ITestOutputHelper outputHelper, OpenAiClientFixture fixture) { diff --git a/tests/OpenAI.ChatGpt.IntegrationTests/ClientTests/Fixtures/AzureOpenAiClientFixture.cs b/tests/OpenAI.ChatGpt.IntegrationTests/ClientTests/Fixtures/AzureOpenAiClientFixture.cs index 54b2b5d..3e66fcd 100644 --- a/tests/OpenAI.ChatGpt.IntegrationTests/ClientTests/Fixtures/AzureOpenAiClientFixture.cs +++ b/tests/OpenAI.ChatGpt.IntegrationTests/ClientTests/Fixtures/AzureOpenAiClientFixture.cs @@ -2,7 +2,7 @@ namespace OpenAI.ChatGpt.IntegrationTests.ClientTests.Fixtures; public class AzureOpenAiClientFixture { - public IOpenAiClient Client { get; } + public IAiClient Client { get; } public AzureOpenAiClientFixture() { diff --git a/tests/OpenAI.ChatGpt.IntegrationTests/ClientTests/Fixtures/OpenAiClientFixture.cs b/tests/OpenAI.ChatGpt.IntegrationTests/ClientTests/Fixtures/OpenAiClientFixture.cs index 288c1aa..6daafcd 100644 --- a/tests/OpenAI.ChatGpt.IntegrationTests/ClientTests/Fixtures/OpenAiClientFixture.cs +++ b/tests/OpenAI.ChatGpt.IntegrationTests/ClientTests/Fixtures/OpenAiClientFixture.cs @@ -2,6 +2,6 @@ public class OpenAiClientFixture { - public IOpenAiClient Client { get; private set; } + public IAiClient Client { get; private set; } = new OpenAiClient(Helpers.GetOpenAiKey()); } \ No newline at end of file diff --git a/tests/OpenAI.ChatGpt.IntegrationTests/ClientTests/Fixtures/OpenRouterClientFixture.cs b/tests/OpenAI.ChatGpt.IntegrationTests/ClientTests/Fixtures/OpenRouterClientFixture.cs index 0e49a68..317aa0d 100644 --- a/tests/OpenAI.ChatGpt.IntegrationTests/ClientTests/Fixtures/OpenRouterClientFixture.cs +++ b/tests/OpenAI.ChatGpt.IntegrationTests/ClientTests/Fixtures/OpenRouterClientFixture.cs @@ -2,6 +2,6 @@ namespace OpenAI.ChatGpt.IntegrationTests.ClientTests.Fixtures; public class OpenRouterClientFixture { - public IOpenAiClient Client { get; private set; } + public IAiClient Client { get; private set; } = new OpenRouterClient(Helpers.GetOpenRouterKey()); } \ No newline at end of file diff --git a/tests/OpenAI.ChatGpt.IntegrationTests/ClientTests/OpenAiClient_GetStructuredResponse.cs b/tests/OpenAI.ChatGpt.IntegrationTests/ClientTests/OpenAiClient_GetStructuredResponse.cs index 702f058..5a3f928 100644 --- a/tests/OpenAI.ChatGpt.IntegrationTests/ClientTests/OpenAiClient_GetStructuredResponse.cs +++ b/tests/OpenAI.ChatGpt.IntegrationTests/ClientTests/OpenAiClient_GetStructuredResponse.cs @@ -6,7 +6,7 @@ namespace OpenAI.ChatGpt.IntegrationTests.ClientTests; [Collection("OpenAiTestCollection")] //to prevent parallel execution public class OpenAiClientGetStructuredResponseTests : IClassFixture { - private readonly IOpenAiClient _client; + private readonly IAiClient _client; public OpenAiClientGetStructuredResponseTests(OpenAiClientFixture clientFixture) { diff --git a/tests/OpenAI.ChatGpt.IntegrationTests/ClientTests/OpenRouterClientTests.cs b/tests/OpenAI.ChatGpt.IntegrationTests/ClientTests/OpenRouterClientTests.cs index 759f48e..17d8294 100644 --- a/tests/OpenAI.ChatGpt.IntegrationTests/ClientTests/OpenRouterClientTests.cs +++ b/tests/OpenAI.ChatGpt.IntegrationTests/ClientTests/OpenRouterClientTests.cs @@ -7,7 +7,7 @@ public class OpenRouterClientTests public class ChatCompletionsApiTests : IClassFixture { private readonly ITestOutputHelper _outputHelper; - private readonly IOpenAiClient _client; + private readonly IAiClient _client; public ChatCompletionsApiTests(ITestOutputHelper outputHelper, OpenRouterClientFixture fixture) { diff --git a/tests/OpenAI.ChatGpt.UnitTests/ChatGptTranslatorServiceTests.cs b/tests/OpenAI.ChatGpt.UnitTests/ChatGptTranslatorServiceTests.cs index dfc723f..e6044ce 100644 --- a/tests/OpenAI.ChatGpt.UnitTests/ChatGptTranslatorServiceTests.cs +++ b/tests/OpenAI.ChatGpt.UnitTests/ChatGptTranslatorServiceTests.cs @@ -34,7 +34,7 @@ public async Task Translate_without_source_and_target_languages_uses_default_lan var expectedSourceLanguage = "English"; var expectedTargetLanguage = "Russian"; var textToTranslate = "Hello, world!"; - var clientMock = new Mock(); + var clientMock = new Mock(); clientMock.Setup(client => client.GetChatCompletions( It.IsAny(), It.IsAny(), @@ -49,7 +49,7 @@ public async Task Translate_without_source_and_target_languages_uses_default_lan .ReturnsAsync("Привет, мир!"); var translatorServiceMock = new Mock( - (IOpenAiClient) clientMock.Object, + (IAiClient) clientMock.Object, expectedSourceLanguage, expectedTargetLanguage, (string) null!); diff --git a/tests/OpenAI.ChatGpt.UnitTests/DependencyInjectionTests/ChatGptServicesIntegrationTests.cs b/tests/OpenAI.ChatGpt.UnitTests/DependencyInjectionTests/ChatGptServicesIntegrationTests.cs index 9faba4d..0d38b34 100644 --- a/tests/OpenAI.ChatGpt.UnitTests/DependencyInjectionTests/ChatGptServicesIntegrationTests.cs +++ b/tests/OpenAI.ChatGpt.UnitTests/DependencyInjectionTests/ChatGptServicesIntegrationTests.cs @@ -27,8 +27,8 @@ public void AddChatGptCoreIntegration_added_expected_services() provider.GetRequiredService>(); provider.GetRequiredService(); - provider.GetRequiredService(); - provider.GetRequiredService(); + provider.GetRequiredService(); + provider.GetRequiredService(); } [Fact] @@ -101,7 +101,7 @@ IConfiguration CreateConfiguration() var builder = new ConfigurationBuilder() .AddInMemoryCollection(new Dictionary() { - { $"{CredentialsConfigSectionPathDefault}:{nameof(OpenAICredentials.ApiKey)}", "test-api-key" }, + { $"{OpenAiCredentialsConfigSectionPathDefault}:{nameof(OpenAICredentials.ApiKey)}", "test-api-key" }, { ChatGPTConfigSectionPathDefault, ""}, }); return builder.Build();