Skip to content

Commit

Permalink
Merge pull request #21 from sergiobayona/1-implement-multiple-modes
Browse files Browse the repository at this point in the history
1 implement multiple modes
  • Loading branch information
sergiobayona authored May 22, 2024
2 parents 1902ca9 + b7c130e commit 4b59dc6
Show file tree
Hide file tree
Showing 16 changed files with 223 additions and 203 deletions.
22 changes: 0 additions & 22 deletions .github/workflows/gem-release.yml

This file was deleted.

8 changes: 8 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,3 +1,11 @@
## [0.1.3] - 2024-05-22
- Bumped OpenAI client version.
- Laying the work for more modes. See https://python.useinstructor.com/concepts/patching/ for more information.
- Allow the OpenAI client to be used normally in case you just want to use other client features.

## [0.1.2] - 2024-05-17
- Improved the ability to customize the function name and the LLM function call description (instructions).

## [0.1.1] - 2024-05-07
- Improved documentation in /docs folder.
- Readme updates.
Expand Down
2 changes: 1 addition & 1 deletion instructor-rb.gemspec
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ Gem::Specification.new do |spec|

spec.add_dependency 'activesupport', '~> 7.0'
spec.add_dependency 'easy_talk', '~> 0.2'
spec.add_dependency 'ruby-openai', '~> 6'
spec.add_dependency 'ruby-openai', '~> 7'
spec.add_development_dependency 'pry-byebug', '~> 3.10'
spec.add_development_dependency 'rake', '~> 13.1'
spec.add_development_dependency 'rspec', '~> 3.0'
Expand Down
11 changes: 10 additions & 1 deletion lib/instructor.rb
Original file line number Diff line number Diff line change
Expand Up @@ -6,23 +6,32 @@
require_relative 'instructor/version'
require_relative 'instructor/openai/patch'
require_relative 'instructor/openai/response'
require_relative 'instructor/mode'

# Instructor makes it easy to reliably get structured data like JSON from Large Language Models (LLMs)
# like GPT-3.5, GPT-4, GPT-4-Vision
module Instructor
@mode = nil

class Error < ::StandardError; end

# The ValidationError class represents an error that occurs during validation.
class ValidationError < ::StandardError; end

def self.mode
@mode
end

# Patches the OpenAI client to add the following functionality:
# - Retries on exceptions
# - Accepts and validates a response model
# - Accepts a validation_context argument
#
# @param openai_client [OpenAI::Client] The OpenAI client to be patched.
# @param mode [Symbol] The mode to be used. Default is `Instructor::Mode::TOOLS.function`.
# @return [OpenAI::Client] The patched OpenAI client.
def self.patch(openai_client)
def self.patch(openai_client, mode: Instructor::Mode::TOOLS.function)
@mode = mode
openai_client.prepend(Instructor::OpenAI::Patch)
end
end
22 changes: 22 additions & 0 deletions lib/instructor/mode.rb
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
# frozen_string_literal: true

require 'ostruct'

module Instructor
# This module defines constants related to different modes of operation.
# It provides options for tool behavior, function types, and JSON modes.
# Currently supported modes are:
# - tools: select between function, auto, required, and none.
# more modes will be added in the near future.
module Mode
tool_options = %w[function auto required none].index_by(&:itself)
TOOL_BEHAVIOR = OpenStruct.new(tool_options)

FUNCTIONS = 'function_call'
PARALLEL_TOOLS = 'parallel_tool_call'
TOOLS = TOOL_BEHAVIOR
JSON = 'json_mode'
MD_JSON = 'markdown_json_mode'
JSON_SCHEMA = 'json_schema_mode'
end
end
31 changes: 26 additions & 5 deletions lib/instructor/openai/patch.rb
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,8 @@ def with_retries(max_retries, exceptions, &block)
# @param validation_context [Hash] The validation context for the parameters. Optional.
# @return [Object] The processed response.
def chat(parameters:, response_model: nil, max_retries: 0, validation_context: nil)
return json_post(path: '/chat/completions', parameters:) if response_model.nil?

with_retries(max_retries, [JSON::ParserError, Instructor::ValidationError, Faraday::ParsingError]) do
model = determine_model(response_model)
function = build_function(model)
Expand All @@ -46,7 +48,22 @@ def chat(parameters:, response_model: nil, max_retries: 0, validation_context: n
# @return [Hash] The prepared parameters.
def prepare_parameters(parameters, validation_context, function)
parameters = apply_validation_context(parameters, validation_context)
parameters.merge(tools: [function])
parameters.merge!(tools: [function])
tool_choice = resolve_tool_choice(function)
parameters.merge!(tool_choice:)
end

def resolve_tool_choice(function)
case Instructor.mode
when Instructor::Mode::TOOLS.function
{ type: 'function', function: { name: function[:function][:name] } }
when Instructor::Mode::TOOLS.auto
'auto'
when Instructor::Mode::TOOLS.required
'required'
when Instructor::Mode::TOOLS.none
'none'
end
end

# Processes the API response.
Expand All @@ -56,7 +73,11 @@ def prepare_parameters(parameters, validation_context, function)
# @return [Object] The processed response.
def process_response(response, model)
parsed_response = Response.new(response).parse
iterable? ? process_multiple_responses(parsed_response, model) : process_single_response(parsed_response, model)
if iterable?(parsed_response)
process_multiple_responses(parsed_response, model)
else
process_single_response(parsed_response, model)
end
end

# Processes multiple responses from the API.
Expand Down Expand Up @@ -84,7 +105,7 @@ def process_single_response(parsed_response, model)
# Determines the response model based on the provided value.
#
# @param response_model [Class] The response model class or typed array.
# @return [Class] The determined response model class.
# @return [Class] The response model.
def determine_model(response_model)
if response_model.is_a?(T::Types::TypedArray)
@iterable = true
Expand Down Expand Up @@ -146,8 +167,8 @@ def generate_description(model)
# Checks if the response is iterable.
#
# @return [Boolean] `true` if the response is iterable, `false` otherwise.
def iterable?
@iterable
def iterable?(response)
@iterable && response.is_a?(Array)
end
end
end
Expand Down
2 changes: 1 addition & 1 deletion lib/instructor/version.rb
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
# frozen_string_literal: true

module Instructor
VERSION = '0.1.1'
VERSION = '0.1.3'
end
6 changes: 3 additions & 3 deletions spec/features/iterable_spec.rb
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
include EasyTalk::Model

def self.name
'User'
'Users'
end

define_schema do
Expand All @@ -18,13 +18,13 @@ def self.name
end
end

let(:client) { Instructor.patch(OpenAI::Client).new }
let(:client) { Instructor.patch(OpenAI::Client, mode: Instructor::Mode::TOOLS.required).new }

let(:parameters) do
{
model: 'gpt-3.5-turbo',
messages: [
{ role: 'system', content: 'Extract the names and ages of the users' },
{ role: 'system', content: 'Extract the names and ages of all the users' },
{ role: 'user', content: 'Extract `Jason is 25 and Peter is 32`' }
]
}
Expand Down
15 changes: 15 additions & 0 deletions spec/instructor_spec.rb
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
# frozen_string_literal: true

require 'spec_helper'

RSpec.describe Instructor, '.class' do
it 'returns the default mode after patching' do
described_class.patch(OpenAI::Client)
expect(described_class.mode).to eq(Instructor::Mode::TOOLS.function)
end

it 'changes the the mode' do
described_class.patch(OpenAI::Client, mode: Instructor::Mode::TOOLS.auto)
expect(described_class.mode).to eq(Instructor::Mode::TOOLS.auto)
end
end
14 changes: 14 additions & 0 deletions spec/openai/patch_spec.rb
Original file line number Diff line number Diff line change
Expand Up @@ -188,4 +188,18 @@ def self.name
end.to raise_error(Instructor::ValidationError)
end
end

describe 'when the client is used ia a standard manner' do
it 'does not raise an error when the client is used in a standard manner', vcr: 'patching_spec/standard_usage' do
response = patched_client.new.chat(
parameters: {
model: 'gpt-3.5-turbo',
messages: [{ role: 'user', content: 'How is the weather today in New York?' }]
}
)

expect(response).to be_a(Hash)
expect(response.dig('choices', 0, 'message', 'content')).to be_a(String)
end
end
end
42 changes: 18 additions & 24 deletions spec/vcr_cassettes/basic_spec/valid_response.yml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Loading

0 comments on commit 4b59dc6

Please sign in to comment.