Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • jr662933/jupyterhub-ai
  • buecker/jupyterhub-ai
  • buecker/jupyterhub
  • sr151511/vennemann
4 results
Show changes
Commits on Source (25)
*env/
.env
*.egg-*
*.pyc
*.txt
config.txt
......@@ -9,5 +9,5 @@ docker-build-master:
before_script:
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
script:
- docker build --pull -t "$CI_REGISTRY_IMAGE":prod .
- docker push "$CI_REGISTRY_IMAGE":prod
- docker build --pull -t "$CI_REGISTRY_IMAGE":test .
- docker push "$CI_REGISTRY_IMAGE":test
FROM jupyter/scipy-notebook:hub-1.5.0
FROM jupyter/datascience-notebook:hub-3.1.1
# Install from APT repository
USER root
RUN apt-get update -y
RUN apt-get install -y git
# Install basics
USER jovyan
RUN pip3 install --upgrade pip
COPY requirements.txt environment.yml /tmp/
RUN conda env update -q -f /tmp/environment.yml && \
/opt/conda/bin/pip install -r /tmp/requirements.txt && \
conda clean -y --all && \
conda env export -n "root" && \
jupyter lab build
# Install 'nice to have lab extensions'
# RUN pip install --upgrade jupyterlab
RUN pip install jupyterlab-git==0.34.0
RUN pip install jupyterlab-gitlab==3.0.0
COPY dash_proxy /tmp/dash_proxy/
RUN pip install /tmp/dash_proxy/
COPY llm_utils /llm_utils/
RUN pip install /llm_utils/
ENV CONFIG_PATH=/home/jovyan/config.txt
COPY app /dash/app/
RUN chown -R jovyan /dash/app/
# bücker
# A Jupyterlab for LLM
In order to run Dash or use the client, AZURE_OPENAI_API_KEY, AZURE_OPENAI_ENDPOINT, OPENAI_API_VERSION need to be stored in a config.txt file in the home directory.
import sys
sys.path.append("/home/jovyan/")
import argparse
import logging
from urllib.parse import urlparse, urljoin
from dash import Dash
from jupyter_server.serverapp import list_running_servers
from layout import layout
from callbacks import register_callbacks
logging.basicConfig(level=logging.INFO)
# weird trick to find base_url for the jupyterlab
def find_jupyterlab_base_url():
servers = list_running_servers()
for server in servers:
if server["port"] == 8888:
return server['url']
return None
# get the correct port from proxy
parser = argparse.ArgumentParser()
parser.add_argument("--port", type=int)
args = parser.parse_args()
port: int = args.port
if not port:
raise ValueError(f"Port of proxy server for Dash not found in {args}.")
else:
logging.debug(f"Dash app running on port {port}.")
base_url = find_jupyterlab_base_url()
if base_url is None:
raise ValueError("Base URL of Jupyterlab could not be detected.")
logging.debug(f"Base URL: {base_url}")
proxy_base_path = urlparse(urljoin(base_url + "/", f"proxy/{port}/")).path
logging.debug(f"Proxy base path: {proxy_base_path}")
# define Dash app
app = Dash(
name=__name__,
requests_pathname_prefix=proxy_base_path
)
# define layout
app.layout = layout
# register all callback functions
register_callbacks(app=app)
# Run Dash app in the notebook
app.run(
jupyter_mode="jupyterlab",
port=port,
host="0.0.0.0",
debug=True
)
import os
from datetime import datetime
from dash import (
html,
Dash
)
from dash.dependencies import (
Input,
Output,
State
)
from llm_utils.client import ChatGPT, get_openai_client
def format_chat_messages(chat_history):
chat_messages = []
for message in chat_history:
chat_messages.append(html.Div([
html.P(f'{message["sender"]}: {message["message"]}'),
html.P(f'Sent at: {message["timestamp"]}')
]))
return chat_messages
def register_callbacks(app: Dash):
model="gpt4"
client = get_openai_client(
model=model,
config_path=os.environ.get("CONFIG_PATH")
)
chat_gpt = ChatGPT(
client=client,
model="gpt4"
)
@app.callback(
[Output('chat-container', 'children'),
Output('chat-history', 'data')],
[Input('send-button', 'n_clicks')],
[State('user-input', 'value'),
State('chat-history', 'data')]
)
def update_chat(n_clicks, input_value, chat_history):
if chat_history is None:
chat_history = []
if n_clicks > 0 and input_value:
chat_history.append({
'sender': 'User',
'message': input_value,
'timestamp': datetime.now().strftime("%Y-%m-%d %H:%M:%S")
})
response = chat_gpt.chat_with_gpt(input_value)
# Add response to chat history
chat_history.append({
'sender': 'Language Model',
'message': response,
'timestamp': datetime.now().strftime("%Y-%m-%d %H:%M:%S")
})
return format_chat_messages(chat_history), chat_history
from dash import (
html,
dcc
)
layout = html.Div([
dcc.Store(
id='chat-history',
data=[]
),
html.H1(
"Simple Chat App",
style={'text-align': 'center'}
),
html.Div(
id='chat-container',
style={'overflowY': 'scroll', 'height': '70vh', 'padding': '10px'}
),
html.Div([
dcc.Input(
id='user-input',
type='text',
placeholder='Type your message...',
debounce=True
),
html.Button(
'Send',
id='send-button',
n_clicks=0
)
], style={
'display': 'flex',
'alignItems': 'center',
'justifyContent': 'center',
'position': 'fixed',
'bottom': 0,
'width': '100%',
'padding': '10px'
})
], style={'position': 'relative'})
import sys
sys.path.append("/home/jovyan/")
import argparse
import logging
from urllib.parse import urlparse, urljoin
from dash import Dash
from jupyter_server.serverapp import list_running_servers
try:
from my_layout import layout
from my_callbacks import register_callbacks
except ModuleNotFoundError:
# do not let Dash start
exit()
logging.basicConfig(level=logging.INFO)
# weird trick to find base_url for the jupyterlab
def find_jupyterlab_base_url():
servers = list_running_servers()
for server in servers:
if server["port"] == 8888:
return server['url']
return None
# get the correct port from proxy
parser = argparse.ArgumentParser()
parser.add_argument("--port", type=int)
args = parser.parse_args()
port: int = args.port
if not port:
raise ValueError(f"Port of proxy server for Dash not found in {args}.")
else:
logging.debug(f"Dash app running on port {port}.")
base_url = find_jupyterlab_base_url()
if base_url is None:
raise ValueError("Base URL of Jupyterlab could not be detected.")
logging.debug(f"Base URL: {base_url}")
proxy_base_path = urlparse(urljoin(base_url + "/", f"proxy/{port}/")).path
logging.debug(f"Proxy base path: {proxy_base_path}")
# define Dash app
app = Dash(
name=__name__,
requests_pathname_prefix=proxy_base_path
)
# define layout
app.layout = layout
# register all callback functions
register_callbacks(app=app)
# Run Dash app in the notebook
app.run(
jupyter_mode="jupyterlab",
port=port,
host="0.0.0.0",
debug=True
)
def setup_dash_proxy():
command = [
'python',
'/dash/app/app.py',
'--port',
'{port}'
]
return {
"command": command,
"new_browser_tab": False,
"launcher_entry": {
"enabled": True,
'title': 'Dash'
}
}
def setup_my_app_proxy():
command = [
'python',
'/dash/app/my_app.py',
'--port',
'{port}'
]
return {
"command": command,
"new_browser_tab": False,
"launcher_entry": {
"enabled": True,
'title': 'MyApp'
}
}
import setuptools
setuptools.setup(
author="Julian Rasch",
author_email="julian.rasch@fh-muenster.de",
description="A small module to run Dash inside a dockerized Jupyterlab.",
name="jupyter-dash-proxy",
py_modules=["dash_proxy", "my_app_proxy"],
entry_points={
"jupyter_serverproxy_servers": [
# name = packagename:function_name
"Dash = dash_proxy:setup_dash_proxy",
"MyApp = my_app_proxy:setup_my_app_proxy"
]
},
install_requires=["jupyter-server-proxy==4.0.0"],
)
name: "base"
channels:
- defaults
# dependencies:
# - add packages here
# - one per line
prefix: "/opt/conda"
import setuptools
setuptools.setup(
author="Julian Rasch",
author_email="julian.rasch@fh-muenster.de",
description="Helper modules to work with LLMs.",
name="llm_utils",
package_dir={"": "src"},
packages=setuptools.find_packages(where="src"),
install_requires=[
"openai",
"python-dotenv"
]
)
import os
from openai import AzureOpenAI
from dotenv import load_dotenv
from enum import Enum
class OpenAIModels(Enum):
GPT_3 = "gpt3"
GPT_4 = "gpt4"
EMBED = "embed"
@classmethod
def get_all_values(cls):
return [member.value for member in cls]
def get_openai_client(
model: str,
config_path: str
) -> AzureOpenAI:
if not model in OpenAIModels.get_all_values():
raise ValueError(f"<model> needs to be one of {OpenAIModels.get_all_values()}.")
load_dotenv(
dotenv_path=config_path,
override=True
)
AZURE_OPENAI_API_KEY = os.environ.get("AZURE_OPENAI_API_KEY")
AZURE_OPENAI_ENDPOINT = os.environ.get("AZURE_OPENAI_ENDPOINT")
OPENAI_API_VERSION = os.environ.get("OPENAI_API_VERSION")
if any(p is None for p in (AZURE_OPENAI_API_KEY, AZURE_OPENAI_API_KEY, OPENAI_API_VERSION)):
raise ValueError(
f"""None of the following parameters can be None:
AZURE_OPENAI_API_KEY: {AZURE_OPENAI_API_KEY},
AZURE_OPENAI_API_KEY: {AZURE_OPENAI_API_KEY},
OPENAI_API_VERSION: {OPENAI_API_VERSION}
"""
)
client = AzureOpenAI(
api_key=AZURE_OPENAI_API_KEY,
azure_endpoint=AZURE_OPENAI_ENDPOINT,
api_version=OPENAI_API_VERSION,
azure_deployment=model
)
return client
class ChatGPT:
def __init__(self, client: AzureOpenAI, model: str):
self.model = model
self.client = client
self.messages = []
def chat_with_gpt(self, user_input: str):
self.messages.append({
"role": "user",
"content": user_input
})
response = self._generate_response(self.messages)
return response
def _generate_response(self, messages):
response = self.client.chat.completions.create(
model=self.model,
messages=messages,
temperature=0.2,
max_tokens=150,
top_p=1.0
)
response_message = response.choices[0].message
self.messages.append({
"role": response_message.role,
"content": response_message.content
})
return response_message.content