Skip to content

Commit

Permalink
Merge pull request #22 from oddiz/gpt-3.5-turbo-16k
Browse files Browse the repository at this point in the history
feat: added model gpt-3.5-turbo-16k
  • Loading branch information
sean1832 authored Oct 29, 2023
2 parents 31d08eb + 2fd1124 commit 5590572
Show file tree
Hide file tree
Showing 3 changed files with 9 additions and 3 deletions.
6 changes: 4 additions & 2 deletions src/Components/sidebar.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,12 +110,14 @@ def sidebar():
persona_sum = ""

with st.expander('🔥 Advanced Options'):
model_options = ['gpt-3.5-turbo', 'gpt-4']
model_options = ['gpt-3.5-turbo','gpt-3.5-turbo-16k', 'gpt-4']
model_index = model_options.index(_set_config(config_file, "MODEL", 'gpt-3.5-turbo'))
model = st.selectbox("Model", options=model_options, index=model_index)

if model == 'gpt-4':
max_chunk = 4000
elif model == 'gpt-3.5-turbo-16k':
max_chunk = 16000
else:
max_chunk = 2500
chunk_size = st.slider('Chunk Size (word count)', min_value=0, max_value=max_chunk, step=20,
Expand All @@ -137,7 +139,7 @@ def sidebar():
value=_set_config(config_file, "PRESENCE_PENALTY", 0.0))
if st_toggle_switch(label="Delay (free openAI API user)",
default_value=_set_config(config_file, "ENABLE_DELAY", False)):
delay = st.slider('Delay (seconds)', min_value=0, max_value=5, step=1,
delay = st.slider('Delay (seconds)', min_value=0, max_value=60, step=1,
value=_set_config(config_file, "DELAY_TIME", 1))
else:
delay = 0
Expand Down
4 changes: 3 additions & 1 deletion src/SumGPT.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,8 +80,10 @@
st.markdown('**Note:** To access GPT-4, please [join the waitlist](https://openai.com/waitlist/gpt-4-api)'
" if you haven't already received an invitation from OpenAI.")
st.info("ℹ️️ Please keep in mind that GPT-4 is significantly **[more expensive](https://openai.com/pricing#language-models)** than GPT-3.5. ")
elif param.model == 'gpt-3.5-turbo-16k':
price = round(prompt_token * 0.000003 + completion_token *0.000004, 5)
else:
price = round((prompt_token + completion_token) * 0.000002, 5)
price = round(prompt_token * 0.0000015 + completion_token * 0.000002 , 5)
st.markdown(
f"Price Prediction: `${price}` || Total Prompt: `{prompt_token}`, Total Completion: `{completion_token}`")
# max tokens exceeded warning
Expand Down
2 changes: 2 additions & 0 deletions src/util.py
Original file line number Diff line number Diff line change
Expand Up @@ -225,6 +225,8 @@ def exceeded_token_handler(param, chunks) -> bool:
"""Handles the case where the user has exceeded the number of tokens."""
if param.model == 'gpt-4':
max_token = 8100
elif param.model == 'gpt-3.5-turbo-16k':
max_token = 16385
else:
max_token = 4096
info = GPT.misc.is_tokens_exceeded(param, chunks, max_token)
Expand Down

0 comments on commit 5590572

Please sign in to comment.