Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • jr662933/jupyterhub-ai
  • buecker/jupyterhub-ai
  • buecker/jupyterhub
  • sr151511/vennemann
4 results
Show changes
Commits on Source (76)
......@@ -5,3 +5,8 @@
*.pyc
*.txt
config.txt
run.sh
build.sh
run_my_app.sh
run_streamlit_app.sh
\ No newline at end of file
variables:
DOCKER_TLS_CERTDIR: ""
TIMEOUT: 3600
docker-build-master:
# Official docker image.
image: docker:latest
......@@ -9,5 +10,6 @@ docker-build-master:
before_script:
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
script:
- docker build --pull -t "$CI_REGISTRY_IMAGE":test .
- docker push "$CI_REGISTRY_IMAGE":test
- cp $ENV_FILE .env
- docker build --pull -t "$CI_REGISTRY_IMAGE":prodcc4 .
- docker push "$CI_REGISTRY_IMAGE":prodcc4
FROM jupyter/datascience-notebook:hub-3.1.1
FROM quay.io/jupyter/datascience-notebook:hub-5.2.0
USER root
COPY requirements.txt environment.yml /tmp/
RUN conda env update -q -f /tmp/environment.yml && \
/opt/conda/bin/pip install -r /tmp/requirements.txt && \
conda clean -y --all && \
conda env export -n "root" && \
jupyter lab build
COPY dash_proxy /tmp/dash_proxy/
RUN pip install /tmp/dash_proxy/
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt && rm requirements.txt
# install some utilities for GPT
COPY llm_utils /llm_utils/
RUN pip install /llm_utils/
ENV CONFIG_PATH=/home/jovyan/config.txt
COPY app /dash/app/
RUN chown -R jovyan /dash/app/
# install some NLTK and spaCy data
RUN python -m nltk.downloader stopwords
RUN python -m nltk.downloader wordnet
RUN python -m spacy download en_core_web_sm
USER jovyan
\ No newline at end of file
# A Jupyterlab for LLM
In order to run Dash or use the client, AZURE_OPENAI_API_KEY, AZURE_OPENAI_ENDPOINT, OPENAI_API_VERSION need to be stored in a config.txt file in the home directory.
import sys
sys.path.append("/home/jovyan/")
import argparse
import logging
from urllib.parse import urlparse, urljoin
from dash import Dash
from jupyter_server.serverapp import list_running_servers
from layout import layout
from callbacks import register_callbacks
logging.basicConfig(level=logging.INFO)
# weird trick to find base_url for the jupyterlab
def find_jupyterlab_base_url():
servers = list_running_servers()
for server in servers:
if server["port"] == 8888:
return server['url']
return None
# get the correct port from proxy
parser = argparse.ArgumentParser()
parser.add_argument("--port", type=int)
args = parser.parse_args()
port: int = args.port
if not port:
raise ValueError(f"Port of proxy server for Dash not found in {args}.")
else:
logging.debug(f"Dash app running on port {port}.")
base_url = find_jupyterlab_base_url()
if base_url is None:
raise ValueError("Base URL of Jupyterlab could not be detected.")
logging.debug(f"Base URL: {base_url}")
proxy_base_path = urlparse(urljoin(base_url + "/", f"proxy/{port}/")).path
logging.debug(f"Proxy base path: {proxy_base_path}")
# define Dash app
app = Dash(
name=__name__,
requests_pathname_prefix=proxy_base_path
)
# define layout
app.layout = layout
# register all callback functions
register_callbacks(app=app)
# Run Dash app in the notebook
app.run(
jupyter_mode="jupyterlab",
port=port,
host="0.0.0.0",
debug=True
)
import os
from datetime import datetime
from dash import (
html,
Dash
)
from dash.dependencies import (
Input,
Output,
State
)
from llm_utils.client import ChatGPT, get_openai_client
def format_chat_messages(chat_history):
chat_messages = []
for message in chat_history:
chat_messages.append(html.Div([
html.P(f'{message["sender"]}: {message["message"]}'),
html.P(f'Sent at: {message["timestamp"]}')
]))
return chat_messages
def register_callbacks(app: Dash):
model="gpt4"
client = get_openai_client(
model=model,
config_path=os.environ.get("CONFIG_PATH")
)
chat_gpt = ChatGPT(
client=client,
model="gpt4"
)
@app.callback(
[Output('chat-container', 'children'),
Output('chat-history', 'data')],
[Input('send-button', 'n_clicks')],
[State('user-input', 'value'),
State('chat-history', 'data')]
)
def update_chat(n_clicks, input_value, chat_history):
if chat_history is None:
chat_history = []
if n_clicks > 0 and input_value:
chat_history.append({
'sender': 'User',
'message': input_value,
'timestamp': datetime.now().strftime("%Y-%m-%d %H:%M:%S")
})
response = chat_gpt.chat_with_gpt(input_value)
# Add response to chat history
chat_history.append({
'sender': 'Language Model',
'message': response,
'timestamp': datetime.now().strftime("%Y-%m-%d %H:%M:%S")
})
return format_chat_messages(chat_history), chat_history
from dash import (
html,
dcc
)
layout = html.Div([
dcc.Store(
id='chat-history',
data=[]
),
html.H1(
"Simple Chat App",
style={'text-align': 'center'}
),
html.Div(
id='chat-container',
style={'overflowY': 'scroll', 'height': '70vh', 'padding': '10px'}
),
html.Div([
dcc.Input(
id='user-input',
type='text',
placeholder='Type your message...',
debounce=True
),
html.Button(
'Send',
id='send-button',
n_clicks=0
)
], style={
'display': 'flex',
'alignItems': 'center',
'justifyContent': 'center',
'position': 'fixed',
'bottom': 0,
'width': '100%',
'padding': '10px'
})
], style={'position': 'relative'})
import sys
sys.path.append("/home/jovyan/")
import argparse
import logging
from urllib.parse import urlparse, urljoin
from dash import Dash
from jupyter_server.serverapp import list_running_servers
try:
from my_layout import layout
from my_callbacks import register_callbacks
except ModuleNotFoundError:
# do not let Dash start
exit()
logging.basicConfig(level=logging.INFO)
# weird trick to find base_url for the jupyterlab
def find_jupyterlab_base_url():
servers = list_running_servers()
for server in servers:
if server["port"] == 8888:
return server['url']
return None
# get the correct port from proxy
parser = argparse.ArgumentParser()
parser.add_argument("--port", type=int)
args = parser.parse_args()
port: int = args.port
if not port:
raise ValueError(f"Port of proxy server for Dash not found in {args}.")
else:
logging.debug(f"Dash app running on port {port}.")
base_url = find_jupyterlab_base_url()
if base_url is None:
raise ValueError("Base URL of Jupyterlab could not be detected.")
logging.debug(f"Base URL: {base_url}")
proxy_base_path = urlparse(urljoin(base_url + "/", f"proxy/{port}/")).path
logging.debug(f"Proxy base path: {proxy_base_path}")
# define Dash app
app = Dash(
name=__name__,
requests_pathname_prefix=proxy_base_path
)
# define layout
app.layout = layout
# register all callback functions
register_callbacks(app=app)
# Run Dash app in the notebook
app.run(
jupyter_mode="jupyterlab",
port=port,
host="0.0.0.0",
debug=True
)
def setup_dash_proxy():
command = [
'python',
'/dash/app/app.py',
'--port',
'{port}'
]
return {
"command": command,
"new_browser_tab": False,
"launcher_entry": {
"enabled": True,
'title': 'Dash'
}
}
def setup_my_app_proxy():
command = [
'python',
'/dash/app/my_app.py',
'--port',
'{port}'
]
return {
"command": command,
"new_browser_tab": False,
"launcher_entry": {
"enabled": True,
'title': 'MyApp'
}
}
import setuptools
setuptools.setup(
author="Julian Rasch",
author_email="julian.rasch@fh-muenster.de",
description="A small module to run Dash inside a dockerized Jupyterlab.",
name="jupyter-dash-proxy",
py_modules=["dash_proxy", "my_app_proxy"],
entry_points={
"jupyter_serverproxy_servers": [
# name = packagename:function_name
"Dash = dash_proxy:setup_dash_proxy",
"MyApp = my_app_proxy:setup_my_app_proxy"
]
},
install_requires=["jupyter-server-proxy==4.0.0"],
)
name: "base"
channels:
- defaults
# dependencies:
# - add packages here
# - one per line
prefix: "/opt/conda"
......@@ -8,6 +8,7 @@ from enum import Enum
class OpenAIModels(Enum):
GPT_3 = "gpt3"
GPT_4 = "gpt4"
GPT_4o = "gpt-4o"
EMBED = "embed"
@classmethod
......@@ -43,7 +44,7 @@ def get_openai_client(
client = AzureOpenAI(
api_key=AZURE_OPENAI_API_KEY,
azure_endpoint=AZURE_OPENAI_ENDPOINT,
api_version=OPENAI_API_VERSION,
# api_version=OPENAI_API_VERSION,
azure_deployment=model
)
return client
......
from dash.dependencies import (
Input,
Output
)
from dash import html
def register_callbacks(app):
@app.callback(
Output('output-container-button', 'children'),
[Input('submit-btn', 'n_clicks')],
[Input('input-text', 'value')]
)
def update_output(n_clicks, input_value):
if n_clicks > 0:
return html.Div([
html.Label("You entered:"),
html.P(input_value)
])
else:
return ''
from dash import html
from dash import dcc
layout = html.Div([
html.H1("Yeay, my app!"),
html.Div([
html.Label("Enter your text:"),
dcc.Input(id='input-text', type='text', value=''),
html.Button('Submit', id='submit-btn', n_clicks=0),
]),
html.Div(id='output-container-button')
])