import os from openai import AzureOpenAI from dotenv import load_dotenv from enum import Enum found_dotenv = load_dotenv( "config.txt", override=True ) if not found_dotenv: raise ValueError("Could not detect .env-file.") AZURE_OPENAI_API_KEY = os.environ.get("AZURE_OPENAI_API_KEY") AZURE_OPENAI_ENDPOINT = os.environ.get("AZURE_OPENAI_ENDPOINT") OPENAI_API_VERSION = os.environ.get("OPENAI_API_VERSION") class OpenAIModels(Enum): GPT_3 = "gpt3" GPT_4 = "gpt4" EMBED = "embed" @classmethod def get_all_values(cls): return [member.value for member in cls] def get_openai_client(model: str) -> AzureOpenAI: if not model in OpenAIModels.get_all_values(): raise ValueError(f"<model> needs to be one of {OpenAIModels.get_all_values()}.") if any(p is None for p in (AZURE_OPENAI_API_KEY, AZURE_OPENAI_API_KEY, OPENAI_API_VERSION)): raise ValueError( f"""None of the following parameters can be none: AZURE_OPENAI_API_KEY: {AZURE_OPENAI_API_KEY}, AZURE_OPENAI_API_KEY: {AZURE_OPENAI_API_KEY}, OPENAI_API_VERSION: {OPENAI_API_VERSION} """ ) client = AzureOpenAI( api_key=AZURE_OPENAI_API_KEY, azure_endpoint=AZURE_OPENAI_ENDPOINT, api_version=OPENAI_API_VERSION, azure_deployment=model ) return client class ChatGPT: def __init__(self, model="gpt4"): self.model = model self.client = get_openai_client(model=model) self.messages = [] def chat_with_gpt(self, user_input: str): self.messages.append({ "role": "user", "content": user_input }) response = self._generate_response(self.messages) return response def _generate_response(self, messages): response = self.client.chat.completions.create( model=self.model, messages=messages, temperature=0.2, max_tokens=150, top_p=1.0 ) response_message = response.choices[0].message self.messages.append({ "role": response_message.role, "content": response_message.content }) return response_message.content