Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
J
jupyterhub-ai
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Requirements
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Locked files
Build
Pipelines
Jobs
Pipeline schedules
Test cases
Artifacts
Deploy
Releases
Package registry
Container Registry
Model registry
Operate
Environments
Terraform modules
Monitor
Incidents
Service Desk
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Code review analytics
Issue analytics
Insights
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
Michael Bücker
jupyterhub-ai
Merge requests
!7
Switch jupyterlab and app
Code
Review changes
Check out branch
Download
Patches
Plain diff
Merged
Switch jupyterlab and app
switch_jupyterlab_and_app
into
main
Overview
0
Commits
3
Pipelines
1
Changes
19
Merged
Julian Rasch
requested to merge
switch_jupyterlab_and_app
into
main
4 weeks ago
Overview
0
Commits
3
Pipelines
1
Changes
19
Expand
0
0
Merge request reports
Compare
main
main (base)
and
latest version
latest version
599dbc0b
3 commits,
4 weeks ago
19 files
+
131
−
348
Inline
Compare changes
Side-by-side
Inline
Show whitespace changes
Show one file at a time
Files
19
Search (e.g. *.vue) (Ctrl+P)
app/app.py
+
39
−
55
Options
import
s
ys
sys
.
path
.
append
(
"
/home/jovyan/
"
)
import
o
s
import
streamlit
as
st
import
argparse
import
logging
from
llm_utils.client
import
get_openai_client
from
urllib.parse
import
urlparse
,
urljoin
from
dash
import
Dash
from
jupyter_server.serverapp
import
list_running_servers
from
layout
import
layout
from
callbacks
import
register_callbacks
logging
.
basicConfig
(
level
=
logging
.
INFO
)
# weird trick to find base_url for the jupyterlab
def
find_jupyterlab_base_url
():
servers
=
list_running_servers
()
for
server
in
servers
:
if
server
[
"
port
"
]
==
8888
:
return
server
[
'
url
'
]
return
None
# get the correct port from proxy
parser
=
argparse
.
ArgumentParser
()
parser
.
add_argument
(
"
--port
"
,
type
=
int
)
args
=
parser
.
parse_args
()
port
:
int
=
args
.
port
MODEL
=
"
gpt-4o
"
client
=
get_openai_client
(
model
=
MODEL
,
config_path
=
os
.
environ
.
get
(
"
CONFIG_PATH
"
)
)
if
not
port
:
raise
ValueError
(
f
"
Port of proxy server for Dash not found in
{
args
}
.
"
)
else
:
logging
.
debug
(
f
"
Dash app running on port
{
port
}
.
"
)
base_url
=
find_jupyterlab_base_url
()
if
base_url
is
None
:
raise
ValueError
(
"
Base URL of Jupyterlab could not be detected.
"
)
logging
.
debug
(
f
"
Base URL:
{
base_url
}
"
)
# STREAMLIT APP
proxy_base_path
=
urlparse
(
urljoin
(
base_url
+
"
/
"
,
f
"
proxy/
{
port
}
/
"
)).
path
logging
.
debug
(
f
"
Proxy base path:
{
proxy_base_path
}
"
)
st
.
title
(
"
ChatGPT in Streamlit
"
)
# define Dash app
app
=
Dash
(
name
=
__name__
,
requests_pathname_prefix
=
proxy_base_path
client
=
get_openai_client
(
model
=
MODEL
,
config_path
=
os
.
environ
.
get
(
"
CONFIG_PATH
"
)
)
# define layout
app
.
layout
=
layout
# register all callback functions
register_callbacks
(
app
=
app
)
# Run Dash app in the notebook
app
.
run
(
jupyter_mode
=
"
jupyterlab
"
,
port
=
port
,
host
=
"
0.0.0.0
"
,
debug
=
True
)
if
"
openai_model
"
not
in
st
.
session_state
:
st
.
session_state
[
"
openai_model
"
]
=
"
gpt-4o
"
if
"
messages
"
not
in
st
.
session_state
:
st
.
session_state
.
messages
=
[]
for
message
in
st
.
session_state
.
messages
:
with
st
.
chat_message
(
message
[
"
role
"
]):
st
.
markdown
(
message
[
"
content
"
])
if
prompt
:
=
st
.
chat_input
(
"
What is up?
"
):
st
.
session_state
.
messages
.
append
({
"
role
"
:
"
user
"
,
"
content
"
:
prompt
})
with
st
.
chat_message
(
"
user
"
):
st
.
markdown
(
prompt
)
with
st
.
chat_message
(
"
assistant
"
):
stream
=
client
.
chat
.
completions
.
create
(
model
=
st
.
session_state
[
"
openai_model
"
],
messages
=
[
{
"
role
"
:
m
[
"
role
"
],
"
content
"
:
m
[
"
content
"
]}
for
m
in
st
.
session_state
.
messages
],
stream
=
True
,
)
response
=
st
.
write_stream
(
stream
)
st
.
session_state
.
messages
.
append
({
"
role
"
:
"
assistant
"
,
"
content
"
:
response
})
\ No newline at end of file
Loading