Skip to content
Snippets Groups Projects
Commit 9782a559 authored by Julian Rasch's avatar Julian Rasch
Browse files

Merge branch 'dev' into 'dev'

Dev

See merge request !2
parents 382d5e48 cc492b0f
No related branches found
No related tags found
2 merge requests!6Finalized Jupyterlab for the sprint,!2Dev
Pipeline #182425 passed
*env/
.env
*.egg-*
*.pyc
*.txt
config.txt
......@@ -9,9 +9,13 @@ RUN conda env update -q -f /tmp/environment.yml && \
conda env export -n "root" && \
jupyter lab build
COPY jupyter_notebook_config.py ${HOME}/.jupyter/
RUN pip3 install --upgrade pip
# copy dash app
COPY app ${HOME}/app/
RUN chown -R jovyan ${HOME}/app/
COPY dash_proxy /tmp/dash_proxy/
RUN pip install /tmp/dash_proxy/
COPY llm_utils /llm_utils/
RUN pip install /llm_utils/
COPY app /dash/app/
RUN chown -R jovyan /dash/app/
# A Jupyterlab for LLM
In order to run Dash or use the client, AZURE_OPENAI_API_KEY, AZURE_OPENAI_ENDPOINT, OPENAI_API_VERSION need to be stored in a config.txt file in the home directory.
import sys
sys.path.append("/home/jovyan")
sys.path.append("/home/jovyan/")
import argparse
import logging
......@@ -26,7 +26,7 @@ else:
# define Dash app
app = Dash(
name=__name__,
requests_pathname_prefix="/dash/"
requests_pathname_prefix="/Dash/"
)
# define layout
......
from datetime import datetime
from dash import (
html,
Dash
......@@ -8,16 +10,48 @@ from dash.dependencies import (
State
)
from llm_utils.client import ChatGPT
def format_chat_messages(chat_history):
chat_messages = []
for message in chat_history:
chat_messages.append(html.Div([
html.P(f'{message["sender"]}: {message["message"]}'),
html.P(f'Sent at: {message["timestamp"]}')
]))
return chat_messages
def register_callbacks(app: Dash):
chat_gpt = ChatGPT(model="gpt4")
@app.callback(
Output("output-container", "children"),
[Input("send-button", "n_clicks")],
[State("input-text", "value")]
[Output('chat-container', 'children'),
Output('chat-history', 'data')],
[Input('send-button', 'n_clicks')],
[State('user-input', 'value'),
State('chat-history', 'data')]
)
def generate_response(n_clicks, input_text):
if n_clicks > 0:
response = "You said: " + input_text
return html.Div(response)
else:
return ""
def update_chat(n_clicks, input_value, chat_history):
if chat_history is None:
chat_history = []
if n_clicks > 0 and input_value:
chat_history.append({
'sender': 'User',
'message': input_value,
'timestamp': datetime.now().strftime("%Y-%m-%d %H:%M:%S")
})
response = chat_gpt.chat_with_gpt(input_value)
# Add response to chat history
chat_history.append({
'sender': 'Language Model',
'message': response,
'timestamp': datetime.now().strftime("%Y-%m-%d %H:%M:%S")
})
return format_chat_messages(chat_history), chat_history
......@@ -3,12 +3,38 @@ from dash import (
dcc
)
layout = html.Div(
className="container",
children=[
html.H1("GPT Chat", className="mt-5 mb-4"),
dcc.Textarea(id="input-text", placeholder="Enter your message:", className="form-control mb-3"),
html.Button("Send", id="send-button", n_clicks=0, className="btn btn-primary mb-3"),
html.Div(id="output-container")
]
)
layout = html.Div([
dcc.Store(
id='chat-history',
data=[]
),
html.H1(
"Simple Chat App",
style={'text-align': 'center'}
),
html.Div(
id='chat-container',
style={'overflowY': 'scroll', 'height': '70vh', 'padding': '10px'}
),
html.Div([
dcc.Input(
id='user-input',
type='text',
placeholder='Type your message...',
debounce=True
),
html.Button(
'Send',
id='send-button',
n_clicks=0
)
], style={
'display': 'flex',
'alignItems': 'center',
'justifyContent': 'center',
'position': 'fixed',
'bottom': 0,
'width': '100%',
'padding': '10px'
})
], style={'position': 'relative'})
def setup_dash_proxy():
command = [
'python',
'/dash/app/app.py',
'--port',
'{port}'
]
return {
"command": command,
"new_browser_tab": False,
"launcher_entry": {
"enabled": True,
'title': 'Dash'
}
}
import setuptools
setuptools.setup(
author="Julian Rasch",
author_email="julian.rasch@fh-muenster.de",
description="A small module to run Dash inside a dockerized Jupyterlab.",
name="jupyter-dash-proxy",
py_modules=["dash_proxy"],
entry_points={
"jupyter_serverproxy_servers": [
# name = packagename:function_name
"Dash = dash_proxy:setup_dash_proxy",
]
},
install_requires=["jupyter-server-proxy==4.0.0"],
)
# Configuration file for jupyter-notebook.
c.ServerProxy.servers = {
'dash': {
'command': [
'python',
'app/app.py',
'--port',
'{port}'
],
'absolute_url': False,
'new_browser_tab': False
}
}
import setuptools
setuptools.setup(
author="Julian Rasch",
author_email="julian.rasch@fh-muenster.de",
description="Helper modules to work with LLMs.",
name="llm_utils",
package_dir={"": "src"},
packages=setuptools.find_packages(where="src"),
install_requires=[
"openai",
"python-dotenv"
]
)
import os
import logging
from openai import AzureOpenAI
from dotenv import load_dotenv
from enum import Enum
try:
found_dotenv = load_dotenv(
"/home/jovyan/config.txt",
override=True
)
except ValueError:
logging.warn("Could not detect config.txt in /home/jovyan/. Searching in current folder ...")
found_dotenv = load_dotenv(
"config.txt",
override=True)
if not found_dotenv:
raise ValueError("Could not detect config.txt in /home/jovyan/.")
AZURE_OPENAI_API_KEY = os.environ.get("AZURE_OPENAI_API_KEY")
AZURE_OPENAI_ENDPOINT = os.environ.get("AZURE_OPENAI_ENDPOINT")
OPENAI_API_VERSION = os.environ.get("OPENAI_API_VERSION")
class OpenAIModels(Enum):
GPT_3 = "gpt3"
GPT_4 = "gpt4"
EMBED = "embed"
@classmethod
def get_all_values(cls):
return [member.value for member in cls]
def get_openai_client(model: str) -> AzureOpenAI:
if not model in OpenAIModels.get_all_values():
raise ValueError(f"<model> needs to be one of {OpenAIModels.get_all_values()}.")
if any(p is None for p in (AZURE_OPENAI_API_KEY, AZURE_OPENAI_API_KEY, OPENAI_API_VERSION)):
raise ValueError(
f"""None of the following parameters can be none:
AZURE_OPENAI_API_KEY: {AZURE_OPENAI_API_KEY},
AZURE_OPENAI_API_KEY: {AZURE_OPENAI_API_KEY},
OPENAI_API_VERSION: {OPENAI_API_VERSION}
"""
)
client = AzureOpenAI(
api_key=AZURE_OPENAI_API_KEY,
azure_endpoint=AZURE_OPENAI_ENDPOINT,
api_version=OPENAI_API_VERSION,
azure_deployment=model
)
return client
class ChatGPT:
def __init__(self, model="gpt4"):
self.model = model
self.client = get_openai_client(model=model)
self.messages = []
def chat_with_gpt(self, user_input: str):
self.messages.append({
"role": "user",
"content": user_input
})
response = self._generate_response(self.messages)
return response
def _generate_response(self, messages):
response = self.client.chat.completions.create(
model=self.model,
messages=messages,
temperature=0.2,
max_tokens=150,
top_p=1.0
)
response_message = response.choices[0].message
self.messages.append({
"role": response_message.role,
"content": response_message.content
})
return response_message.content
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment