Skip to content
Snippets Groups Projects
Commit 933b9ab9 authored by Julian Rasch's avatar Julian Rasch
Browse files

Merge branch 'switch_jupyterlab_and_app' into 'main'

Switch jupyterlab and app

See merge request !7
parents b17b95ff 599dbc0b
No related branches found
No related tags found
1 merge request!7Switch jupyterlab and app
Pipeline #232640 passed
......@@ -5,3 +5,8 @@
*.pyc
*.txt
config.txt
run.sh
build.sh
run_my_app.sh
run_streamlit_app.sh
\ No newline at end of file
FROM jupyter/datascience-notebook:hub-3.1.1
FROM quay.io/jupyter/datascience-notebook:hub-5.2.0
USER root
COPY requirements.txt environment.yml /tmp/
RUN conda env update -q -f /tmp/environment.yml && \
/opt/conda/bin/pip install -r /tmp/requirements.txt && \
conda clean -y --all && \
conda env export -n "root" && \
jupyter lab build
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt && rm requirements.txt
COPY dash_proxy /tmp/dash_proxy/
RUN pip install /tmp/dash_proxy/
ENV HOME_PATH=/home/jovyan/
# add the proxy for streamlit
COPY streamlit_proxy /tmp/streamlit_proxy/
RUN pip install /tmp/streamlit_proxy/
# install some utilities for GPT
COPY llm_utils /llm_utils/
RUN pip install /llm_utils/
ENV CONFIG_PATH=/home/jovyan/config.txt
COPY app /dash/app/
RUN chown -R jovyan /dash/app/
# copy the apps into the container
COPY app /streamlit/app/
RUN chown -R jovyan /streamlit/app/
# install some NLTK and spaCy data
RUN python -m nltk.downloader stopwords
RUN python -m nltk.downloader wordnet
RUN python -m nltk.downloader punkt
RUN python -m spacy download en_core_web_sm
USER jovyan
\ No newline at end of file
import sys
sys.path.append("/home/jovyan/")
import os
import streamlit as st
import argparse
import logging
from llm_utils.client import get_openai_client
from urllib.parse import urlparse, urljoin
from dash import Dash
from jupyter_server.serverapp import list_running_servers
from layout import layout
from callbacks import register_callbacks
logging.basicConfig(level=logging.INFO)
# weird trick to find base_url for the jupyterlab
def find_jupyterlab_base_url():
servers = list_running_servers()
for server in servers:
if server["port"] == 8888:
return server['url']
return None
# get the correct port from proxy
parser = argparse.ArgumentParser()
parser.add_argument("--port", type=int)
args = parser.parse_args()
port: int = args.port
MODEL = "gpt-4o"
client = get_openai_client(
model=MODEL,
config_path=os.environ.get("CONFIG_PATH")
)
if not port:
raise ValueError(f"Port of proxy server for Dash not found in {args}.")
else:
logging.debug(f"Dash app running on port {port}.")
base_url = find_jupyterlab_base_url()
if base_url is None:
raise ValueError("Base URL of Jupyterlab could not be detected.")
logging.debug(f"Base URL: {base_url}")
# STREAMLIT APP
proxy_base_path = urlparse(urljoin(base_url + "/", f"proxy/{port}/")).path
logging.debug(f"Proxy base path: {proxy_base_path}")
st.title("ChatGPT in Streamlit")
# define Dash app
app = Dash(
name=__name__,
requests_pathname_prefix=proxy_base_path
client = get_openai_client(
model=MODEL,
config_path=os.environ.get("CONFIG_PATH")
)
# define layout
app.layout = layout
# register all callback functions
register_callbacks(app=app)
# Run Dash app in the notebook
app.run(
jupyter_mode="jupyterlab",
port=port,
host="0.0.0.0",
debug=True
)
if "openai_model" not in st.session_state:
st.session_state["openai_model"] = "gpt-4o"
if "messages" not in st.session_state:
st.session_state.messages = []
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
if prompt := st.chat_input("What is up?"):
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
with st.chat_message("assistant"):
stream = client.chat.completions.create(
model=st.session_state["openai_model"],
messages=[
{"role": m["role"], "content": m["content"]}
for m in st.session_state.messages
],
stream=True,
)
response = st.write_stream(stream)
st.session_state.messages.append({"role": "assistant", "content": response})
\ No newline at end of file
import os
from datetime import datetime
from dash import (
html,
Dash
)
from dash.dependencies import (
Input,
Output,
State
)
from llm_utils.client import ChatGPT, get_openai_client
def format_chat_messages(chat_history):
chat_messages = []
for message in chat_history:
chat_messages.append(html.Div([
html.P(f'{message["sender"]}: {message["message"]}'),
html.P(f'Sent at: {message["timestamp"]}')
]))
return chat_messages
def register_callbacks(app: Dash):
model="gpt4"
client = get_openai_client(
model=model,
config_path=os.environ.get("CONFIG_PATH")
)
chat_gpt = ChatGPT(
client=client,
model="gpt4"
)
@app.callback(
[Output('chat-container', 'children'),
Output('chat-history', 'data')],
[Input('send-button', 'n_clicks')],
[State('user-input', 'value'),
State('chat-history', 'data')]
)
def update_chat(n_clicks, input_value, chat_history):
if chat_history is None:
chat_history = []
if n_clicks > 0 and input_value:
chat_history.append({
'sender': 'User',
'message': input_value,
'timestamp': datetime.now().strftime("%Y-%m-%d %H:%M:%S")
})
response = chat_gpt.chat_with_gpt(input_value)
# Add response to chat history
chat_history.append({
'sender': 'Language Model',
'message': response,
'timestamp': datetime.now().strftime("%Y-%m-%d %H:%M:%S")
})
return format_chat_messages(chat_history), chat_history
from dash import (
html,
dcc
)
layout = html.Div([
dcc.Store(
id='chat-history',
data=[]
),
html.H1(
"Simple Chat App",
style={'text-align': 'center'}
),
html.Div(
id='chat-container',
style={'overflowY': 'scroll', 'height': '70vh', 'padding': '10px'}
),
html.Div([
dcc.Input(
id='user-input',
type='text',
placeholder='Type your message...',
debounce=True
),
html.Button(
'Send',
id='send-button',
n_clicks=0
)
], style={
'display': 'flex',
'alignItems': 'center',
'justifyContent': 'center',
'position': 'fixed',
'bottom': 0,
'width': '100%',
'padding': '10px'
})
], style={'position': 'relative'})
import os
import sys
sys.path.append("/home/jovyan/")
import argparse
import logging
HOME_PATH = os.environ.get("HOME_PATH")
sys.path.append(HOME_PATH)
from urllib.parse import urlparse, urljoin
from dash import Dash
from jupyter_server.serverapp import list_running_servers
print(HOME_PATH)
try:
from my_layout import layout
from my_callbacks import register_callbacks
except ModuleNotFoundError:
# do not let Dash start
exit()
logging.basicConfig(level=logging.INFO)
# weird trick to find base_url for the jupyterlab
def find_jupyterlab_base_url():
servers = list_running_servers()
for server in servers:
if server["port"] == 8888:
return server['url']
return None
# get the correct port from proxy
parser = argparse.ArgumentParser()
parser.add_argument("--port", type=int)
args = parser.parse_args()
port: int = args.port
import streamlit_app # this runs the app
if not port:
raise ValueError(f"Port of proxy server for Dash not found in {args}.")
else:
logging.debug(f"Dash app running on port {port}.")
base_url = find_jupyterlab_base_url()
if base_url is None:
raise ValueError("Base URL of Jupyterlab could not be detected.")
logging.debug(f"Base URL: {base_url}")
proxy_base_path = urlparse(urljoin(base_url + "/", f"proxy/{port}/")).path
logging.debug(f"Proxy base path: {proxy_base_path}")
# define Dash app
app = Dash(
name=__name__,
requests_pathname_prefix=proxy_base_path
)
# define layout
app.layout = layout
# register all callback functions
register_callbacks(app=app)
# Run Dash app in the notebook
app.run(
jupyter_mode="jupyterlab",
port=port,
host="0.0.0.0",
debug=True
)
except ModuleNotFoundError as e:
# do not let start
print(e)
exit()
<svg width="512" height="512" viewBox="0 0 512 512" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M255.968 288.494L166.211 241.067L10.4062 158.753C10.2639 158.611 9.97951 158.611 9.83728 158.611C4.14838 155.909 -1.68275 161.596 0.450591 167.283L79.8393 369.685L79.8535 369.728C79.9388 369.927 80.0099 370.126 80.0953 370.325C83.3522 377.874 90.4633 382.537 98.2002 384.371C98.8544 384.513 99.3225 384.643 100.108 384.799C100.89 384.973 101.983 385.21 102.922 385.281C103.078 385.295 103.221 385.295 103.377 385.31H103.491C103.605 385.324 103.718 385.324 103.832 385.338H103.989C104.088 385.352 104.202 385.352 104.302 385.352H104.486C104.6 385.366 104.714 385.366 104.828 385.366L167.175 392.161C226.276 398.602 285.901 398.602 345.002 392.161L407.35 385.366C408.558 385.366 409.739 385.31 410.877 385.196C411.246 385.153 411.602 385.111 411.958 385.068C412 385.054 412.057 385.054 412.1 385.039C412.342 385.011 412.583 384.968 412.825 384.926C413.181 384.883 413.536 384.812 413.892 384.741C414.603 384.585 414.926 384.471 415.891 384.139C416.856 383.808 418.458 383.228 419.461 382.745C420.464 382.261 421.159 381.798 421.999 381.272C423.037 380.618 424.024 379.948 425.025 379.198C425.457 378.868 425.753 378.656 426.066 378.358L425.895 378.258L255.968 288.494Z" fill="#FF2B2B"/>
<path d="M501.789 158.755H501.647L345.784 241.07L432.426 370.058L511.616 167.285V167.001C513.607 161.03 507.492 155.627 501.789 158.755" fill="#7D353B"/>
<path d="M264.274 119.615C260.292 113.8 251.616 113.8 247.776 119.615L166.211 241.068L255.968 288.495L426.067 378.357C427.135 377.312 427.991 376.293 428.897 375.217C430.177 373.638 431.372 371.947 432.424 370.056L345.782 241.068L264.274 119.615Z" fill="#BD4043"/>
</svg>
def setup_dash_proxy():
command = [
'python',
'/dash/app/app.py',
'--port',
'{port}'
]
return {
"command": command,
"new_browser_tab": False,
"launcher_entry": {
"enabled": True,
'title': 'Dash'
}
}
name: "base"
channels:
- defaults
# dependencies:
# - add packages here
# - one per line
prefix: "/opt/conda"
......@@ -8,6 +8,7 @@ from enum import Enum
class OpenAIModels(Enum):
GPT_3 = "gpt3"
GPT_4 = "gpt4"
GPT_4o = "gpt-4o"
EMBED = "embed"
@classmethod
......@@ -43,7 +44,7 @@ def get_openai_client(
client = AzureOpenAI(
api_key=AZURE_OPENAI_API_KEY,
azure_endpoint=AZURE_OPENAI_ENDPOINT,
api_version=OPENAI_API_VERSION,
# api_version=OPENAI_API_VERSION,
azure_deployment=model
)
return client
......
from dash.dependencies import (
Input,
Output
)
from dash import html
def register_callbacks(app):
@app.callback(
Output('output-container-button', 'children'),
[Input('submit-btn', 'n_clicks')],
[Input('input-text', 'value')]
)
def update_output(n_clicks, input_value):
if n_clicks > 0:
return html.Div([
html.Label("You entered:"),
html.P(input_value)
])
else:
return ''
from dash import html
from dash import dcc
layout = html.Div([
html.H1("Yeay, my app!"),
html.Div([
html.Label("Enter your text:"),
dcc.Input(id='input-text', type='text', value=''),
html.Button('Submit', id='submit-btn', n_clicks=0),
]),
html.Div(id='output-container-button')
])
import os
from dotenv import load_dotenv
import sqlalchemy
import pandas as pd
from urllib.parse import quote_plus
load_dotenv("/usr/src/app/.env")
class Database:
def __init__(self, db_name):
self.conn = db_connect(db_name)
def __getattr__(self, table_name):
return pd.read_sql_table(table_name, self.conn)
def list_tables(self):
inspector = sqlalchemy.inspect(self.conn)
table_names = inspector.get_table_names()
return table_names
def db_connect(db_name):
hostname=os.getenv("DB_HOST")
user=os.getenv("DB_USER")
password=quote_plus(os.getenv("DB_PASSWORD"))
conn = sqlalchemy.create_engine(f'postgresql+psycopg2://{user}:{password}@{hostname}/{db_name}')
return conn
def get_table(db_name, table_name):
conn = db_connect(db_name)
dat = pd.read_sql_table(table_name, conn)
return dat
def get_all_tables(db_name):
db_obj = Database(db_name)
return db_obj
import streamlit as st
st.title("This is my app!")
def setup_my_app_proxy():
command = [
'python',
'/dash/app/my_app.py',
'--port',
'{port}'
"/streamlit/app/run_my_app.sh",
"--browser.gatherUsageStats", "false",
"--browser.serverAddress", "0.0.0.0",
"--server.port", "{port}",
"--server.headless", "true",
"--server.enableCORS", "false",
"--server.enableXsrfProtection", "false",
]
return {
"command": command,
"timeout": 20,
"new_browser_tab": False,
"launcher_entry": {
"enabled": True,
'title': 'MyApp'
"title": "MyApp",
"icon_path": "/streamlit/app/streamlit-favicon.svg"
}
}
}
\ No newline at end of file
......@@ -3,15 +3,14 @@ import setuptools
setuptools.setup(
author="Julian Rasch",
author_email="julian.rasch@fh-muenster.de",
description="A small module to run Dash inside a dockerized Jupyterlab.",
name="jupyter-dash-proxy",
py_modules=["dash_proxy", "my_app_proxy"],
description="A small module to run Streamlit inside a dockerized JupyterLab.",
name="jupyter-streamlit-proxy",
py_modules=["streamlit_proxy", "my_app_proxy"],
entry_points={
"jupyter_serverproxy_servers": [
# name = packagename:function_name
"Dash = dash_proxy:setup_dash_proxy",
"Streamlit = streamlit_proxy:setup_streamlit_proxy",
"MyApp = my_app_proxy:setup_my_app_proxy"
]
},
install_requires=["jupyter-server-proxy==4.0.0"],
)
)
\ No newline at end of file
import os
def setup_streamlit_proxy():
command = [
"/streamlit/app/run_streamlit_app.sh",
"--browser.gatherUsageStats", "false",
"--browser.serverAddress", "0.0.0.0",
"--server.port", "{port}",
"--server.headless", "true",
"--server.enableCORS", "false",
"--server.enableXsrfProtection", "false",
]
return {
"command": command,
"timeout": 20,
"new_browser_tab": False,
"launcher_entry": {
"enabled": True,
"title": "Streamlit App",
"icon_path": "/streamlit/app/streamlit-favicon.svg"
}
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment