diff --git a/.env b/.env new file mode 100644 index 000000000..968faee06 --- /dev/null +++ b/.env @@ -0,0 +1,4 @@ +AZURE_OPENAI_ENDPOINT='https://genai-pr.openai.azure.com/' +AZURE_OPENAI_DEPLOYMENT='gpt-35-turbo' # deployment name you chose when you deployed the model +AZURE_OPENAI_KEY='ec6bbbe26cbd4f949bda5361731bb7f6' +AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT='text-embedding-ada-002' diff --git a/.env.copy b/.env.copy index 5ef493b54..566c05eb6 100644 --- a/.env.copy +++ b/.env.copy @@ -1,3 +1,4 @@ AZURE_OPENAI_ENDPOINT='' AZURE_OPENAI_DEPLOYMENT='' # deployment name you chose when you deployed the model AZURE_OPENAI_KEY='' +AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT='' # deployment name you chose when you deployed the embeddings model \ No newline at end of file diff --git a/08-building-search-applications/solution.ipynb b/08-building-search-applications/solution.ipynb index 046c2e791..fc4605a06 100644 --- a/08-building-search-applications/solution.ipynb +++ b/08-building-search-applications/solution.ipynb @@ -4,12 +4,12 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "In order to run the following noteboooks, if you haven't done yet, you need to deploy a model that uses `text-embedding-ada-002` as base model and set his deployment name inside .env file as `AZURE_OPENAI_EMBEDDINGS_ENDPOINT`" + "In order to run the following noteboooks, if you haven't done yet, you need to deploy a model that uses `text-embedding-ada-002` as base model and set the deployment name inside .env file as `AZURE_OPENAI_EMBEDDINGS_ENDPOINT`" ] }, { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -20,17 +20,19 @@ "from dotenv import load_dotenv\n", "\n", "from sklearn.metrics.pairwise import cosine_similarity\n", + "\n", + "# Load environment variables from .env file in the root directory\n", "load_dotenv()\n", "\n", "client = AzureOpenAI(\n", " api_key=os.environ['AZURE_OPENAI_KEY'], # this is also the default, it can be omitted\n", " api_version = \"2023-05-15\"\n", - " )\n", + ")\n", "\n", "model = os.environ['AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT']\n", "\n", "SIMILARITIES_RESULTS_THRESHOLD = 0.75\n", - "DATASET_NAME = \"embedding_index_3m.json\"" + "DATASET_NAME = \"embedding_index_3m.json\"\n" ] }, { @@ -42,7 +44,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -67,7 +69,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -107,7 +109,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -151,7 +153,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -188,7 +190,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.8" + "version": "3.9.6" } }, "nbformat": 4, diff --git a/09-building-image-applications/app.py b/09-building-image-applications/app.py index bc564f63f..73a4d75b1 100644 --- a/09-building-image-applications/app.py +++ b/09-building-image-applications/app.py @@ -1,4 +1,4 @@ -import openai +from openai import AzureOpenAI import os import requests from PIL import Image @@ -7,18 +7,21 @@ # import dotenv dotenv.load_dotenv() -# Get endpoint and key from environment variables -openai.api_base = os.environ['AZURE_OPENAI_ENDPOINT'] -openai.api_key = os.environ['AZURE_OPENAI_KEY'] + # Assign the API version (DALL-E is currently supported for the 2023-06-01-preview API version only) -openai.api_version = '2023-06-01-preview' -openai.api_type = 'azure' +client = AzureOpenAI( + api_key=os.environ['AZURE_OPENAI_KEY'], # this is also the default, it can be omitted + api_version = "2023-05-15", + azure_endpoint=os.environ['AZURE_OPENAI_ENDPOINT'] + ) + +model = os.environ['AZURE_OPENAI_DEPLOYMENT'] try: # Create an image by using the image generation API - generation_response = openai.Image.create( + generation_response = client.Image.create( prompt='Bunny on horse, holding a lollipop, on a foggy meadow where it grows daffodils', # Enter your prompt text here size='1024x1024', n=2, @@ -45,12 +48,12 @@ image.show() # catch exceptions -except openai.error.InvalidRequestError as err: +except client.error.InvalidRequestError as err: print(err) # ---creating variation below--- -response = openai.Image.create_variation( +response = client.Image.create_variation( image=open(image_path, "rb"), n=1, size="1024x1024" diff --git a/09-building-image-applications/notebook-azureopenai.ipynb b/09-building-image-applications/notebook-azureopenai.ipynb index 17781138c..448d42c14 100644 --- a/09-building-image-applications/notebook-azureopenai.ipynb +++ b/09-building-image-applications/notebook-azureopenai.ipynb @@ -328,7 +328,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.11" + "version": "3.9.6" }, "orig_nbformat": 4 },