Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • jr662933/jupyterhub-ai
  • buecker/jupyterhub-ai
  • buecker/jupyterhub
  • sr151511/vennemann
4 results
Show changes
Commits on Source (113)
*env/
.env
*.egg-*
*.pyc
*.txt
config.txt
run.sh
build.sh
run_my_app.sh
run_streamlit_app.sh
\ No newline at end of file
variables: variables:
DOCKER_TLS_CERTDIR: "" DOCKER_TLS_CERTDIR: ""
TIMEOUT: 3600
docker-build-master: docker-build-master:
# Official docker image. # Official docker image.
image: docker:latest image: docker:latest
...@@ -9,5 +10,6 @@ docker-build-master: ...@@ -9,5 +10,6 @@ docker-build-master:
before_script: before_script:
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
script: script:
- docker build --pull -t "$CI_REGISTRY_IMAGE":prod . - cp $ENV_FILE .env
- docker push "$CI_REGISTRY_IMAGE":prod - docker build --pull -t "$CI_REGISTRY_IMAGE":prodcc4 .
- docker push "$CI_REGISTRY_IMAGE":prodcc4
FROM jupyter/scipy-notebook:hub-1.5.0 FROM quay.io/jupyter/datascience-notebook:hub-5.2.0
# Install from APT repository
USER root USER root
RUN apt-get update
RUN apt-get install -y git
# Install basics COPY requirements.txt .
USER jovyan RUN pip install --no-cache-dir -r requirements.txt && rm requirements.txt
RUN pip3 install --upgrade pip
# Install 'nice to have lab extensions' # install some utilities for GPT
RUN pip install --upgrade jupyterlab COPY llm_utils /llm_utils/
RUN pip install jupyterlab-git==0.34.0 RUN pip install /llm_utils/
RUN pip install jupyterlab-gitlab==3.0.0 ENV CONFIG_PATH=/home/jovyan/config.txt
# Install for -> ewirtsch USER jovyan
RUN pip install pandaSDMX==1.6.0 \ No newline at end of file
RUN pip install plotly==5.4.0
# Install for -> artifint
RUN pip install python-constraint==1.4.0
RUN pip install ortools==9.1.9490
RUN pip install tensorflow==2.7.0
RUN pip install keras==2.7.0
# Install for -> windkanl
RUN pip install paho-mqtt==1.6.1
# Install for -> stroemmt
RUN pip install openpiv
# vennemann # A Jupyterlab for LLM
import setuptools
setuptools.setup(
author="Julian Rasch",
author_email="julian.rasch@fh-muenster.de",
description="Helper modules to work with LLMs.",
name="llm_utils",
package_dir={"": "src"},
packages=setuptools.find_packages(where="src"),
install_requires=[
"openai",
"python-dotenv"
]
)
import os
from openai import AzureOpenAI
from dotenv import load_dotenv
from enum import Enum
class OpenAIModels(Enum):
GPT_3 = "gpt3"
GPT_4 = "gpt4"
GPT_4o = "gpt-4o"
EMBED = "embed"
@classmethod
def get_all_values(cls):
return [member.value for member in cls]
def get_openai_client(
model: str,
config_path: str
) -> AzureOpenAI:
if not model in OpenAIModels.get_all_values():
raise ValueError(f"<model> needs to be one of {OpenAIModels.get_all_values()}.")
load_dotenv(
dotenv_path=config_path,
override=True
)
AZURE_OPENAI_API_KEY = os.environ.get("AZURE_OPENAI_API_KEY")
AZURE_OPENAI_ENDPOINT = os.environ.get("AZURE_OPENAI_ENDPOINT")
OPENAI_API_VERSION = os.environ.get("OPENAI_API_VERSION")
if any(p is None for p in (AZURE_OPENAI_API_KEY, AZURE_OPENAI_API_KEY, OPENAI_API_VERSION)):
raise ValueError(
f"""None of the following parameters can be None:
AZURE_OPENAI_API_KEY: {AZURE_OPENAI_API_KEY},
AZURE_OPENAI_API_KEY: {AZURE_OPENAI_API_KEY},
OPENAI_API_VERSION: {OPENAI_API_VERSION}
"""
)
client = AzureOpenAI(
api_key=AZURE_OPENAI_API_KEY,
azure_endpoint=AZURE_OPENAI_ENDPOINT,
# api_version=OPENAI_API_VERSION,
azure_deployment=model
)
return client
class ChatGPT:
def __init__(self, client: AzureOpenAI, model: str):
self.model = model
self.client = client
self.messages = []
def chat_with_gpt(self, user_input: str):
self.messages.append({
"role": "user",
"content": user_input
})
response = self._generate_response(self.messages)
return response
def _generate_response(self, messages):
response = self.client.chat.completions.create(
model=self.model,
messages=messages,
temperature=0.2,
max_tokens=150,
top_p=1.0
)
response_message = response.choices[0].message
self.messages.append({
"role": response_message.role,
"content": response_message.content
})
return response_message.content
# jupyter-server-proxy==4.4.0
# dash
# dash-bootstrap-components
flake8
openai
rapidfuzz
nltk
plotly
scikit-learn
pdfplumber
python-dotenv