Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
J
jupyterhub-ai
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Requirements
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Locked files
Build
Pipelines
Jobs
Pipeline schedules
Test cases
Artifacts
Deploy
Releases
Package registry
Container Registry
Model registry
Operate
Environments
Terraform modules
Monitor
Incidents
Service Desk
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Code review analytics
Issue analytics
Insights
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
Michael Bücker
jupyterhub-ai
Merge requests
!9
switched app back to dash
Code
Review changes
Check out branch
Download
Patches
Plain diff
Merged
switched app back to dash
switch_back_to_dash
into
main
Overview
0
Commits
1
Pipelines
1
Changes
14
Merged
Julian Rasch
requested to merge
switch_back_to_dash
into
main
4 weeks ago
Overview
0
Commits
1
Pipelines
1
Changes
14
Expand
0
0
Merge request reports
Compare
main
main (base)
and
latest version
latest version
20a83e28
1 commit,
4 weeks ago
14 files
+
306
−
98
Side-by-side
Compare changes
Side-by-side
Inline
Show whitespace changes
Show one file at a time
Files
14
Search (e.g. *.vue) (Ctrl+P)
app/app.py
+
55
−
39
Options
import
o
s
import
s
ys
import
streamlit
as
st
sys
.
path
.
append
(
"
/home/jovyan/
"
)
from
llm_utils.client
import
get_openai_client
import
argparse
import
logging
from
urllib.parse
import
urlparse
,
urljoin
MODEL
=
"
gpt-4o
"
from
dash
import
Dash
client
=
get_openai_client
(
model
=
MODEL
,
from
jupyter_server.serverapp
import
list_running_servers
config_path
=
os
.
environ
.
get
(
"
CONFIG_PATH
"
)
)
from
layout
import
layout
from
callbacks
import
register_callbacks
logging
.
basicConfig
(
level
=
logging
.
INFO
)
# weird trick to find base_url for the jupyterlab
def
find_jupyterlab_base_url
():
servers
=
list_running_servers
()
for
server
in
servers
:
if
server
[
"
port
"
]
==
8888
:
return
server
[
'
url
'
]
return
None
# get the correct port from proxy
parser
=
argparse
.
ArgumentParser
()
parser
.
add_argument
(
"
--port
"
,
type
=
int
)
args
=
parser
.
parse_args
()
port
:
int
=
args
.
port
# STREAMLIT APP
if
not
port
:
raise
ValueError
(
f
"
Port of proxy server for Dash not found in
{
args
}
.
"
)
else
:
logging
.
debug
(
f
"
Dash app running on port
{
port
}
.
"
)
st
.
title
(
"
ChatGPT in Streamlit
"
)
client
=
get_openai_client
(
base_url
=
find_jupyterlab_base_url
()
model
=
MODEL
,
if
base_url
is
None
:
config_path
=
os
.
environ
.
get
(
"
CONFIG_PATH
"
)
raise
ValueError
(
"
Base URL of Jupyterlab could not be detected.
"
)
logging
.
debug
(
f
"
Base URL:
{
base_url
}
"
)
proxy_base_path
=
urlparse
(
urljoin
(
base_url
+
"
/
"
,
f
"
proxy/
{
port
}
/
"
)).
path
logging
.
debug
(
f
"
Proxy base path:
{
proxy_base_path
}
"
)
# define Dash app
app
=
Dash
(
name
=
__name__
,
requests_pathname_prefix
=
proxy_base_path
)
)
if
"
openai_model
"
not
in
st
.
session_state
:
# define layout
st
.
session_state
[
"
openai_model
"
]
=
"
gpt-4o
"
app
.
layout
=
layout
if
"
messages
"
not
in
st
.
session_state
:
# register all callback functions
st
.
session_state
.
messages
=
[]
register_callbacks
(
app
=
app
)
for
message
in
st
.
session_state
.
messages
:
# Run Dash app in the notebook
with
st
.
chat_message
(
message
[
"
role
"
]):
app
.
run
(
st
.
markdown
(
message
[
"
content
"
])
jupyter_mode
=
"
jupyterlab
"
,
port
=
port
,
if
prompt
:
=
st
.
chat_input
(
"
What is up?
"
):
host
=
"
0.0.0.0
"
,
st
.
session_state
.
messages
.
append
({
"
role
"
:
"
user
"
,
"
content
"
:
prompt
})
debug
=
True
with
st
.
chat_message
(
"
user
"
):
)
st
.
markdown
(
prompt
)
with
st
.
chat_message
(
"
assistant
"
):
stream
=
client
.
chat
.
completions
.
create
(
model
=
st
.
session_state
[
"
openai_model
"
],
messages
=
[
{
"
role
"
:
m
[
"
role
"
],
"
content
"
:
m
[
"
content
"
]}
for
m
in
st
.
session_state
.
messages
],
stream
=
True
,
)
response
=
st
.
write_stream
(
stream
)
st
.
session_state
.
messages
.
append
({
"
role
"
:
"
assistant
"
,
"
content
"
:
response
})
\ No newline at end of file
Loading