Skip to content
Snippets Groups Projects

switched app back to dash

Merged Julian Rasch requested to merge switch_back_to_dash into main
14 files
+ 306
98
Compare changes
  • Side-by-side
  • Inline
Files
14
+ 55
39
import os
import sys
import streamlit as st
sys.path.append("/home/jovyan/")
from llm_utils.client import get_openai_client
import argparse
 
import logging
 
from urllib.parse import urlparse, urljoin
MODEL = "gpt-4o"
from dash import Dash
client = get_openai_client(
model=MODEL,
from jupyter_server.serverapp import list_running_servers
config_path=os.environ.get("CONFIG_PATH")
)
from layout import layout
 
from callbacks import register_callbacks
 
 
logging.basicConfig(level=logging.INFO)
 
 
# weird trick to find base_url for the jupyterlab
 
def find_jupyterlab_base_url():
 
servers = list_running_servers()
 
for server in servers:
 
if server["port"] == 8888:
 
return server['url']
 
return None
 
# get the correct port from proxy
 
parser = argparse.ArgumentParser()
 
parser.add_argument("--port", type=int)
 
args = parser.parse_args()
 
port: int = args.port
# STREAMLIT APP
if not port:
 
raise ValueError(f"Port of proxy server for Dash not found in {args}.")
 
else:
 
logging.debug(f"Dash app running on port {port}.")
st.title("ChatGPT in Streamlit")
client = get_openai_client(
base_url = find_jupyterlab_base_url()
model=MODEL,
if base_url is None:
config_path=os.environ.get("CONFIG_PATH")
raise ValueError("Base URL of Jupyterlab could not be detected.")
 
logging.debug(f"Base URL: {base_url}")
 
 
proxy_base_path = urlparse(urljoin(base_url + "/", f"proxy/{port}/")).path
 
logging.debug(f"Proxy base path: {proxy_base_path}")
 
 
# define Dash app
 
app = Dash(
 
name=__name__,
 
requests_pathname_prefix=proxy_base_path
)
)
if "openai_model" not in st.session_state:
# define layout
st.session_state["openai_model"] = "gpt-4o"
app.layout = layout
if "messages" not in st.session_state:
# register all callback functions
st.session_state.messages = []
register_callbacks(app=app)
for message in st.session_state.messages:
# Run Dash app in the notebook
with st.chat_message(message["role"]):
app.run(
st.markdown(message["content"])
jupyter_mode="jupyterlab",
port=port,
if prompt := st.chat_input("What is up?"):
host="0.0.0.0",
st.session_state.messages.append({"role": "user", "content": prompt})
debug=True
with st.chat_message("user"):
)
st.markdown(prompt)
with st.chat_message("assistant"):
stream = client.chat.completions.create(
model=st.session_state["openai_model"],
messages=[
{"role": m["role"], "content": m["content"]}
for m in st.session_state.messages
],
stream=True,
)
response = st.write_stream(stream)
st.session_state.messages.append({"role": "assistant", "content": response})
\ No newline at end of file
Loading