Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • jr662933/jupyterhub-ai
  • buecker/jupyterhub-ai
  • buecker/jupyterhub
  • sr151511/vennemann
4 results
Show changes
Commits on Source (43)
*env/
.env
*.egg-*
*.pyc
*.txt
config.txt
run.sh
build.sh
run_my_app.sh
run_streamlit_app.sh
\ No newline at end of file
variables:
DOCKER_TLS_CERTDIR: ""
TIMEOUT: 3600
docker-build-master:
# Official docker image.
image: docker:latest
......@@ -10,5 +11,5 @@ docker-build-master:
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
script:
- cp $ENV_FILE .env
- docker build --pull -t "$CI_REGISTRY_IMAGE":prod .
- docker push "$CI_REGISTRY_IMAGE":prod
- docker build --pull -t "$CI_REGISTRY_IMAGE":prodcc4 .
- docker push "$CI_REGISTRY_IMAGE":prodcc4
FROM jupyter/scipy-notebook:hub-1.5.0
FROM quay.io/jupyter/datascience-notebook:hub-5.2.0
# Install from APT repository
USER root
RUN apt-get update -y
RUN apt-get install -y git libpq-dev gcc
# Set working directory
WORKDIR /usr/src/app
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt && rm requirements.txt
# Install basics
USER jovyan
RUN pip3 install --upgrade pip
COPY .env .env
ENV HOME_PATH=/home/jovyan/
# Install python packages
COPY requirements.txt requirements.txt
RUN pip install --no-cache-dir -r requirements.txt
ENV IPYTHONDIR /usr/src/app/ipython/
WORKDIR /usr/src/app/ipython/profile_default/startup/
COPY python_startup/ ./
WORKDIR /home/
# add the proxy for streamlit
COPY streamlit_proxy /tmp/streamlit_proxy/
RUN pip install /tmp/streamlit_proxy/
# install some utilities for GPT
COPY llm_utils /llm_utils/
RUN pip install /llm_utils/
ENV CONFIG_PATH=/home/jovyan/config.txt
# copy the apps into the container
COPY app /streamlit/app/
RUN chown -R jovyan /streamlit/app/
# install some NLTK and spaCy data
RUN python -m nltk.downloader stopwords
RUN python -m nltk.downloader wordnet
RUN python -m nltk.downloader punkt
USER jovyan
\ No newline at end of file
# bücker
# A Jupyterlab for LLM
In order to run Dash or use the client, AZURE_OPENAI_API_KEY, AZURE_OPENAI_ENDPOINT, OPENAI_API_VERSION need to be stored in a config.txt file in the home directory.
import os
import streamlit as st
from llm_utils.client import get_openai_client
MODEL = "gpt-4o"
client = get_openai_client(
model=MODEL,
config_path=os.environ.get("CONFIG_PATH")
)
# STREAMLIT APP
st.title("ChatGPT in Streamlit")
client = get_openai_client(
model=MODEL,
config_path=os.environ.get("CONFIG_PATH")
)
if "openai_model" not in st.session_state:
st.session_state["openai_model"] = "gpt-4o"
if "messages" not in st.session_state:
st.session_state.messages = []
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
if prompt := st.chat_input("What is up?"):
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
with st.chat_message("assistant"):
stream = client.chat.completions.create(
model=st.session_state["openai_model"],
messages=[
{"role": m["role"], "content": m["content"]}
for m in st.session_state.messages
],
stream=True,
)
response = st.write_stream(stream)
st.session_state.messages.append({"role": "assistant", "content": response})
\ No newline at end of file
import os
import sys
HOME_PATH = os.environ.get("HOME_PATH")
sys.path.append(HOME_PATH)
print(HOME_PATH)
try:
import streamlit_app # this runs the app
except ModuleNotFoundError as e:
# do not let start
print(e)
exit()
<svg width="512" height="512" viewBox="0 0 512 512" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M255.968 288.494L166.211 241.067L10.4062 158.753C10.2639 158.611 9.97951 158.611 9.83728 158.611C4.14838 155.909 -1.68275 161.596 0.450591 167.283L79.8393 369.685L79.8535 369.728C79.9388 369.927 80.0099 370.126 80.0953 370.325C83.3522 377.874 90.4633 382.537 98.2002 384.371C98.8544 384.513 99.3225 384.643 100.108 384.799C100.89 384.973 101.983 385.21 102.922 385.281C103.078 385.295 103.221 385.295 103.377 385.31H103.491C103.605 385.324 103.718 385.324 103.832 385.338H103.989C104.088 385.352 104.202 385.352 104.302 385.352H104.486C104.6 385.366 104.714 385.366 104.828 385.366L167.175 392.161C226.276 398.602 285.901 398.602 345.002 392.161L407.35 385.366C408.558 385.366 409.739 385.31 410.877 385.196C411.246 385.153 411.602 385.111 411.958 385.068C412 385.054 412.057 385.054 412.1 385.039C412.342 385.011 412.583 384.968 412.825 384.926C413.181 384.883 413.536 384.812 413.892 384.741C414.603 384.585 414.926 384.471 415.891 384.139C416.856 383.808 418.458 383.228 419.461 382.745C420.464 382.261 421.159 381.798 421.999 381.272C423.037 380.618 424.024 379.948 425.025 379.198C425.457 378.868 425.753 378.656 426.066 378.358L425.895 378.258L255.968 288.494Z" fill="#FF2B2B"/>
<path d="M501.789 158.755H501.647L345.784 241.07L432.426 370.058L511.616 167.285V167.001C513.607 161.03 507.492 155.627 501.789 158.755" fill="#7D353B"/>
<path d="M264.274 119.615C260.292 113.8 251.616 113.8 247.776 119.615L166.211 241.068L255.968 288.495L426.067 378.357C427.135 377.312 427.991 376.293 428.897 375.217C430.177 373.638 431.372 371.947 432.424 370.056L345.782 241.068L264.274 119.615Z" fill="#BD4043"/>
</svg>
import setuptools
setuptools.setup(
author="Julian Rasch",
author_email="julian.rasch@fh-muenster.de",
description="Helper modules to work with LLMs.",
name="llm_utils",
package_dir={"": "src"},
packages=setuptools.find_packages(where="src"),
install_requires=[
"openai",
"python-dotenv"
]
)
File moved
import os
from openai import AzureOpenAI
from dotenv import load_dotenv
from enum import Enum
class OpenAIModels(Enum):
GPT_3 = "gpt3"
GPT_4 = "gpt4"
GPT_4o = "gpt-4o"
EMBED = "embed"
@classmethod
def get_all_values(cls):
return [member.value for member in cls]
def get_openai_client(
model: str,
config_path: str
) -> AzureOpenAI:
if not model in OpenAIModels.get_all_values():
raise ValueError(f"<model> needs to be one of {OpenAIModels.get_all_values()}.")
load_dotenv(
dotenv_path=config_path,
override=True
)
AZURE_OPENAI_API_KEY = os.environ.get("AZURE_OPENAI_API_KEY")
AZURE_OPENAI_ENDPOINT = os.environ.get("AZURE_OPENAI_ENDPOINT")
OPENAI_API_VERSION = os.environ.get("OPENAI_API_VERSION")
if any(p is None for p in (AZURE_OPENAI_API_KEY, AZURE_OPENAI_API_KEY, OPENAI_API_VERSION)):
raise ValueError(
f"""None of the following parameters can be None:
AZURE_OPENAI_API_KEY: {AZURE_OPENAI_API_KEY},
AZURE_OPENAI_API_KEY: {AZURE_OPENAI_API_KEY},
OPENAI_API_VERSION: {OPENAI_API_VERSION}
"""
)
client = AzureOpenAI(
api_key=AZURE_OPENAI_API_KEY,
azure_endpoint=AZURE_OPENAI_ENDPOINT,
# api_version=OPENAI_API_VERSION,
azure_deployment=model
)
return client
class ChatGPT:
def __init__(self, client: AzureOpenAI, model: str):
self.model = model
self.client = client
self.messages = []
def chat_with_gpt(self, user_input: str):
self.messages.append({
"role": "user",
"content": user_input
})
response = self._generate_response(self.messages)
return response
def _generate_response(self, messages):
response = self.client.chat.completions.create(
model=self.model,
messages=messages,
temperature=0.2,
max_tokens=150,
top_p=1.0
)
response_message = response.choices[0].message
self.messages.append({
"role": response_message.role,
"content": response_message.content
})
return response_message.content
import os
from dotenv import load_dotenv
import sqlalchemy
import pandas as pd
from urllib.parse import quote_plus
load_dotenv("/usr/src/app/.env")
class Database:
def __init__(self, db_name):
self.conn = db_connect(db_name)
def __getattr__(self, table_name):
return pd.read_sql_table(table_name, self.conn)
def list_tables(self):
inspector = sqlalchemy.inspect(self.conn)
table_names = inspector.get_table_names()
return table_names
def db_connect(db_name):
hostname=os.getenv("DB_HOST")
user=os.getenv("DB_USER")
password=quote_plus(os.getenv("DB_PASSWORD"))
conn = sqlalchemy.create_engine(f'postgresql+psycopg2://{user}:{password}@{hostname}/{db_name}')
return conn
def get_table(db_name, table_name):
conn = db_connect(db_name)
dat = pd.read_sql_table(table_name, conn)
return dat
def get_all_tables(db_name):
db_obj = Database(db_name)
return db_obj
import streamlit as st
st.title("This is my app!")
def setup_my_app_proxy():
command = [
"/streamlit/app/run_my_app.sh",
"--browser.gatherUsageStats", "false",
"--browser.serverAddress", "0.0.0.0",
"--server.port", "{port}",
"--server.headless", "true",
"--server.enableCORS", "false",
"--server.enableXsrfProtection", "false",
]
return {
"command": command,
"timeout": 20,
"new_browser_tab": False,
"launcher_entry": {
"enabled": True,
"title": "MyApp",
"icon_path": "/streamlit/app/streamlit-favicon.svg"
}
}
\ No newline at end of file
import setuptools
setuptools.setup(
author="Julian Rasch",
author_email="julian.rasch@fh-muenster.de",
description="A small module to run Streamlit inside a dockerized JupyterLab.",
name="jupyter-streamlit-proxy",
py_modules=["streamlit_proxy", "my_app_proxy"],
entry_points={
"jupyter_serverproxy_servers": [
"Streamlit = streamlit_proxy:setup_streamlit_proxy",
"MyApp = my_app_proxy:setup_my_app_proxy"
]
},
install_requires=["jupyter-server-proxy==4.0.0"],
)
\ No newline at end of file
import os
def setup_streamlit_proxy():
command = [
"/streamlit/app/run_streamlit_app.sh",
"--browser.gatherUsageStats", "false",
"--browser.serverAddress", "0.0.0.0",
"--server.port", "{port}",
"--server.headless", "true",
"--server.enableCORS", "false",
"--server.enableXsrfProtection", "false",
]
return {
"command": command,
"timeout": 20,
"new_browser_tab": False,
"launcher_entry": {
"enabled": True,
"title": "Streamlit App",
"icon_path": "/streamlit/app/streamlit-favicon.svg"
}
}