Skip to content
Snippets Groups Projects

Switch jupyterlab and app

Merged Julian Rasch requested to merge switch_jupyterlab_and_app into main
19 files
+ 131
348
Compare changes
  • Side-by-side
  • Inline
Files
19
+ 39
55
import sys
sys.path.append("/home/jovyan/")
import os
import streamlit as st
import argparse
import logging
from llm_utils.client import get_openai_client
from urllib.parse import urlparse, urljoin
from dash import Dash
from jupyter_server.serverapp import list_running_servers
from layout import layout
from callbacks import register_callbacks
logging.basicConfig(level=logging.INFO)
# weird trick to find base_url for the jupyterlab
def find_jupyterlab_base_url():
servers = list_running_servers()
for server in servers:
if server["port"] == 8888:
return server['url']
return None
# get the correct port from proxy
parser = argparse.ArgumentParser()
parser.add_argument("--port", type=int)
args = parser.parse_args()
port: int = args.port
MODEL = "gpt-4o"
client = get_openai_client(
model=MODEL,
config_path=os.environ.get("CONFIG_PATH")
)
if not port:
raise ValueError(f"Port of proxy server for Dash not found in {args}.")
else:
logging.debug(f"Dash app running on port {port}.")
base_url = find_jupyterlab_base_url()
if base_url is None:
raise ValueError("Base URL of Jupyterlab could not be detected.")
logging.debug(f"Base URL: {base_url}")
# STREAMLIT APP
proxy_base_path = urlparse(urljoin(base_url + "/", f"proxy/{port}/")).path
logging.debug(f"Proxy base path: {proxy_base_path}")
st.title("ChatGPT in Streamlit")
# define Dash app
app = Dash(
name=__name__,
requests_pathname_prefix=proxy_base_path
client = get_openai_client(
model=MODEL,
config_path=os.environ.get("CONFIG_PATH")
)
# define layout
app.layout = layout
# register all callback functions
register_callbacks(app=app)
# Run Dash app in the notebook
app.run(
jupyter_mode="jupyterlab",
port=port,
host="0.0.0.0",
debug=True
)
if "openai_model" not in st.session_state:
st.session_state["openai_model"] = "gpt-4o"
if "messages" not in st.session_state:
st.session_state.messages = []
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
if prompt := st.chat_input("What is up?"):
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
with st.chat_message("assistant"):
stream = client.chat.completions.create(
model=st.session_state["openai_model"],
messages=[
{"role": m["role"], "content": m["content"]}
for m in st.session_state.messages
],
stream=True,
)
response = st.write_stream(stream)
st.session_state.messages.append({"role": "assistant", "content": response})
\ No newline at end of file
Loading