Skip to content
Snippets Groups Projects
Commit 9782a559 authored by Julian Rasch's avatar Julian Rasch
Browse files

Merge branch 'dev' into 'dev'

Dev

See merge request !2
parents 382d5e48 cc492b0f
No related branches found
No related tags found
2 merge requests!6Finalized Jupyterlab for the sprint,!2Dev
Pipeline #182425 passed
This commit is part of merge request !6. Comments created here will be created in the context of that merge request.
*env/ *env/
.env .env
*.egg-*
*.pyc
*.txt
config.txt
...@@ -9,9 +9,13 @@ RUN conda env update -q -f /tmp/environment.yml && \ ...@@ -9,9 +9,13 @@ RUN conda env update -q -f /tmp/environment.yml && \
conda env export -n "root" && \ conda env export -n "root" && \
jupyter lab build jupyter lab build
COPY jupyter_notebook_config.py ${HOME}/.jupyter/ RUN pip3 install --upgrade pip
# copy dash app COPY dash_proxy /tmp/dash_proxy/
COPY app ${HOME}/app/ RUN pip install /tmp/dash_proxy/
RUN chown -R jovyan ${HOME}/app/
COPY llm_utils /llm_utils/
RUN pip install /llm_utils/
COPY app /dash/app/
RUN chown -R jovyan /dash/app/
# A Jupyterlab for LLM # A Jupyterlab for LLM
In order to run Dash or use the client, AZURE_OPENAI_API_KEY, AZURE_OPENAI_ENDPOINT, OPENAI_API_VERSION need to be stored in a config.txt file in the home directory.
import sys import sys
sys.path.append("/home/jovyan") sys.path.append("/home/jovyan/")
import argparse import argparse
import logging import logging
...@@ -26,7 +26,7 @@ else: ...@@ -26,7 +26,7 @@ else:
# define Dash app # define Dash app
app = Dash( app = Dash(
name=__name__, name=__name__,
requests_pathname_prefix="/dash/" requests_pathname_prefix="/Dash/"
) )
# define layout # define layout
......
from datetime import datetime
from dash import ( from dash import (
html, html,
Dash Dash
...@@ -8,16 +10,48 @@ from dash.dependencies import ( ...@@ -8,16 +10,48 @@ from dash.dependencies import (
State State
) )
from llm_utils.client import ChatGPT
def format_chat_messages(chat_history):
chat_messages = []
for message in chat_history:
chat_messages.append(html.Div([
html.P(f'{message["sender"]}: {message["message"]}'),
html.P(f'Sent at: {message["timestamp"]}')
]))
return chat_messages
def register_callbacks(app: Dash): def register_callbacks(app: Dash):
chat_gpt = ChatGPT(model="gpt4")
@app.callback( @app.callback(
Output("output-container", "children"), [Output('chat-container', 'children'),
[Input("send-button", "n_clicks")], Output('chat-history', 'data')],
[State("input-text", "value")] [Input('send-button', 'n_clicks')],
[State('user-input', 'value'),
State('chat-history', 'data')]
) )
def generate_response(n_clicks, input_text): def update_chat(n_clicks, input_value, chat_history):
if n_clicks > 0: if chat_history is None:
response = "You said: " + input_text chat_history = []
return html.Div(response)
else: if n_clicks > 0 and input_value:
return "" chat_history.append({
'sender': 'User',
'message': input_value,
'timestamp': datetime.now().strftime("%Y-%m-%d %H:%M:%S")
})
response = chat_gpt.chat_with_gpt(input_value)
# Add response to chat history
chat_history.append({
'sender': 'Language Model',
'message': response,
'timestamp': datetime.now().strftime("%Y-%m-%d %H:%M:%S")
})
return format_chat_messages(chat_history), chat_history
...@@ -3,12 +3,38 @@ from dash import ( ...@@ -3,12 +3,38 @@ from dash import (
dcc dcc
) )
layout = html.Div( layout = html.Div([
className="container", dcc.Store(
children=[ id='chat-history',
html.H1("GPT Chat", className="mt-5 mb-4"), data=[]
dcc.Textarea(id="input-text", placeholder="Enter your message:", className="form-control mb-3"), ),
html.Button("Send", id="send-button", n_clicks=0, className="btn btn-primary mb-3"), html.H1(
html.Div(id="output-container") "Simple Chat App",
] style={'text-align': 'center'}
) ),
html.Div(
id='chat-container',
style={'overflowY': 'scroll', 'height': '70vh', 'padding': '10px'}
),
html.Div([
dcc.Input(
id='user-input',
type='text',
placeholder='Type your message...',
debounce=True
),
html.Button(
'Send',
id='send-button',
n_clicks=0
)
], style={
'display': 'flex',
'alignItems': 'center',
'justifyContent': 'center',
'position': 'fixed',
'bottom': 0,
'width': '100%',
'padding': '10px'
})
], style={'position': 'relative'})
def setup_dash_proxy():
command = [
'python',
'/dash/app/app.py',
'--port',
'{port}'
]
return {
"command": command,
"new_browser_tab": False,
"launcher_entry": {
"enabled": True,
'title': 'Dash'
}
}
import setuptools
setuptools.setup(
author="Julian Rasch",
author_email="julian.rasch@fh-muenster.de",
description="A small module to run Dash inside a dockerized Jupyterlab.",
name="jupyter-dash-proxy",
py_modules=["dash_proxy"],
entry_points={
"jupyter_serverproxy_servers": [
# name = packagename:function_name
"Dash = dash_proxy:setup_dash_proxy",
]
},
install_requires=["jupyter-server-proxy==4.0.0"],
)
# Configuration file for jupyter-notebook.
c.ServerProxy.servers = {
'dash': {
'command': [
'python',
'app/app.py',
'--port',
'{port}'
],
'absolute_url': False,
'new_browser_tab': False
}
}
import setuptools
setuptools.setup(
author="Julian Rasch",
author_email="julian.rasch@fh-muenster.de",
description="Helper modules to work with LLMs.",
name="llm_utils",
package_dir={"": "src"},
packages=setuptools.find_packages(where="src"),
install_requires=[
"openai",
"python-dotenv"
]
)
import os
import logging
from openai import AzureOpenAI
from dotenv import load_dotenv
from enum import Enum
try:
found_dotenv = load_dotenv(
"/home/jovyan/config.txt",
override=True
)
except ValueError:
logging.warn("Could not detect config.txt in /home/jovyan/. Searching in current folder ...")
found_dotenv = load_dotenv(
"config.txt",
override=True)
if not found_dotenv:
raise ValueError("Could not detect config.txt in /home/jovyan/.")
AZURE_OPENAI_API_KEY = os.environ.get("AZURE_OPENAI_API_KEY")
AZURE_OPENAI_ENDPOINT = os.environ.get("AZURE_OPENAI_ENDPOINT")
OPENAI_API_VERSION = os.environ.get("OPENAI_API_VERSION")
class OpenAIModels(Enum):
GPT_3 = "gpt3"
GPT_4 = "gpt4"
EMBED = "embed"
@classmethod
def get_all_values(cls):
return [member.value for member in cls]
def get_openai_client(model: str) -> AzureOpenAI:
if not model in OpenAIModels.get_all_values():
raise ValueError(f"<model> needs to be one of {OpenAIModels.get_all_values()}.")
if any(p is None for p in (AZURE_OPENAI_API_KEY, AZURE_OPENAI_API_KEY, OPENAI_API_VERSION)):
raise ValueError(
f"""None of the following parameters can be none:
AZURE_OPENAI_API_KEY: {AZURE_OPENAI_API_KEY},
AZURE_OPENAI_API_KEY: {AZURE_OPENAI_API_KEY},
OPENAI_API_VERSION: {OPENAI_API_VERSION}
"""
)
client = AzureOpenAI(
api_key=AZURE_OPENAI_API_KEY,
azure_endpoint=AZURE_OPENAI_ENDPOINT,
api_version=OPENAI_API_VERSION,
azure_deployment=model
)
return client
class ChatGPT:
def __init__(self, model="gpt4"):
self.model = model
self.client = get_openai_client(model=model)
self.messages = []
def chat_with_gpt(self, user_input: str):
self.messages.append({
"role": "user",
"content": user_input
})
response = self._generate_response(self.messages)
return response
def _generate_response(self, messages):
response = self.client.chat.completions.create(
model=self.model,
messages=messages,
temperature=0.2,
max_tokens=150,
top_p=1.0
)
response_message = response.choices[0].message
self.messages.append({
"role": response_message.role,
"content": response_message.content
})
return response_message.content
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment