diff --git a/Dockerfile b/Dockerfile
index b2dd42d08c62277e5c5f2789ea89bb129f112e3c..2bea9fad660aec3a744beb0ce3377765ed5e31d8 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -9,13 +9,12 @@ RUN conda env update -q -f /tmp/environment.yml && \
     conda env export -n "root" && \
     jupyter lab build 
 
-RUN pip3 install --upgrade pip
-
 COPY dash_proxy /tmp/dash_proxy/
 RUN pip install /tmp/dash_proxy/
 
 COPY llm_utils /llm_utils/
 RUN pip install /llm_utils/
+ENV CONFIG_PATH=/home/jovyan/config.txt
 
 COPY app /dash/app/
 RUN chown -R jovyan /dash/app/
diff --git a/app/callbacks.py b/app/callbacks.py
index 6076465849aca4147f3158bc3fe3d50db435cbfa..9a61c5cb2d483203df434aae8482552bac8dfc0c 100644
--- a/app/callbacks.py
+++ b/app/callbacks.py
@@ -1,3 +1,4 @@
+import os
 from datetime import datetime
 
 from dash import (
@@ -10,7 +11,7 @@ from dash.dependencies import (
     State
 )
 
-from llm_utils.client import ChatGPT
+from llm_utils.client import ChatGPT, get_openai_client
 
 
 def format_chat_messages(chat_history):
@@ -24,8 +25,15 @@ def format_chat_messages(chat_history):
 
 
 def register_callbacks(app: Dash):
-    
-    chat_gpt = ChatGPT(model="gpt4")
+    model="gpt4"
+    client = get_openai_client(
+        model=model,
+        config_path=os.environ.get("CONFIG_PATH")
+    )
+    chat_gpt = ChatGPT(
+        client=client,
+        model="gpt4"
+    )
     
     @app.callback(
         [Output('chat-container', 'children'),
diff --git a/app/my_app.py b/app/my_app.py
new file mode 100644
index 0000000000000000000000000000000000000000..db083be592f45c76bd6c05d3f2783614b84cfcb0
--- /dev/null
+++ b/app/my_app.py
@@ -0,0 +1,70 @@
+import sys 
+sys.path.append("/home/jovyan/")
+
+import argparse
+import logging
+
+from urllib.parse import urlparse, urljoin
+
+from dash import Dash
+
+from jupyter_server.serverapp import list_running_servers
+
+try: 
+    from my_layout import layout
+    from my_callbacks import register_callbacks
+except ModuleNotFoundError:
+    # do not let Dash start
+    exit()
+
+
+logging.basicConfig(level=logging.INFO)
+
+# weird trick to find base_url for the jupyterlab
+def find_jupyterlab_base_url():
+    servers = list_running_servers()
+    for server in servers:
+        if server["port"] == 8888:
+            return server['url']
+    return None
+
+
+# get the correct port from proxy
+parser = argparse.ArgumentParser()
+parser.add_argument("--port", type=int)
+args = parser.parse_args()
+port: int = args.port
+
+if not port:
+    raise ValueError(f"Port of proxy server for Dash not found in {args}.")
+else: 
+    logging.debug(f"Dash app running on port {port}.")
+
+
+base_url = find_jupyterlab_base_url()
+if base_url is None:
+    raise ValueError("Base URL of Jupyterlab could not be detected.")
+logging.debug(f"Base URL: {base_url}")
+
+proxy_base_path = urlparse(urljoin(base_url + "/", f"proxy/{port}/")).path
+logging.debug(f"Proxy base path: {proxy_base_path}")
+
+# define Dash app
+app = Dash(
+    name=__name__, 
+    requests_pathname_prefix=proxy_base_path
+)
+
+# define layout
+app.layout = layout
+
+# register all callback functions
+register_callbacks(app=app)
+
+# Run Dash app in the notebook
+app.run(
+    jupyter_mode="jupyterlab", 
+    port=port, 
+    host="0.0.0.0",
+    debug=True
+)
diff --git a/dash_proxy/my_app_proxy.py b/dash_proxy/my_app_proxy.py
new file mode 100644
index 0000000000000000000000000000000000000000..e91a8649001749f630ee85b4e6706f99049fb634
--- /dev/null
+++ b/dash_proxy/my_app_proxy.py
@@ -0,0 +1,16 @@
+def setup_my_app_proxy():
+    command = [
+        'python',
+        '/dash/app/my_app.py',
+        '--port',
+        '{port}'
+    ]
+    
+    return {
+        "command": command,
+        "new_browser_tab": False,
+        "launcher_entry": {
+            "enabled": True,
+            'title': 'MyApp'
+        }
+    }
diff --git a/dash_proxy/setup.py b/dash_proxy/setup.py
index ccd874ca87acf8f35bea40e0eb6e883150528e3f..8e1ebf5761c3de7f783ce204b25474e10f2b0d8d 100644
--- a/dash_proxy/setup.py
+++ b/dash_proxy/setup.py
@@ -5,11 +5,12 @@ setuptools.setup(
     author_email="julian.rasch@fh-muenster.de",
     description="A small module to run Dash inside a dockerized Jupyterlab.",
     name="jupyter-dash-proxy",
-    py_modules=["dash_proxy"],
+    py_modules=["dash_proxy", "my_app_proxy"],
     entry_points={
         "jupyter_serverproxy_servers": [
             # name = packagename:function_name
             "Dash = dash_proxy:setup_dash_proxy",
+            "MyApp = my_app_proxy:setup_my_app_proxy"
         ]
     },
     install_requires=["jupyter-server-proxy==4.0.0"],
diff --git a/llm_utils/src/llm_utils/client.py b/llm_utils/src/llm_utils/client.py
index 82ceeac15722b63c195615bcb18a5563f36d349d..1bca863281a30f6cca7798da46b09adbb125d0a7 100644
--- a/llm_utils/src/llm_utils/client.py
+++ b/llm_utils/src/llm_utils/client.py
@@ -1,27 +1,9 @@
 import os
-import logging
 from openai import AzureOpenAI
 from dotenv import load_dotenv
 
 from enum import Enum
 
-try:
-    found_dotenv = load_dotenv(
-        "/home/jovyan/config.txt",
-        override=True
-    )
-except ValueError:
-    logging.warn("Could not detect config.txt in /home/jovyan/. Searching in current folder ...")
-    found_dotenv = load_dotenv(
-        "config.txt",
-        override=True)
-
-if not found_dotenv: 
-    raise ValueError("Could not detect config.txt in /home/jovyan/.")
-
-AZURE_OPENAI_API_KEY = os.environ.get("AZURE_OPENAI_API_KEY")
-AZURE_OPENAI_ENDPOINT = os.environ.get("AZURE_OPENAI_ENDPOINT")
-OPENAI_API_VERSION = os.environ.get("OPENAI_API_VERSION")
 
 class OpenAIModels(Enum):
     GPT_3 = "gpt3"
@@ -33,13 +15,25 @@ class OpenAIModels(Enum):
         return [member.value for member in cls]
 
 
-def get_openai_client(model: str) -> AzureOpenAI:
+def get_openai_client(
+    model: str,
+    config_path: str
+    ) -> AzureOpenAI:
     if not model in OpenAIModels.get_all_values():
         raise ValueError(f"<model> needs to be one of {OpenAIModels.get_all_values()}.")
     
+    load_dotenv(
+        dotenv_path=config_path,
+        override=True
+    )
+
+    AZURE_OPENAI_API_KEY = os.environ.get("AZURE_OPENAI_API_KEY")
+    AZURE_OPENAI_ENDPOINT = os.environ.get("AZURE_OPENAI_ENDPOINT")
+    OPENAI_API_VERSION = os.environ.get("OPENAI_API_VERSION")
+    
     if any(p is None for p in (AZURE_OPENAI_API_KEY, AZURE_OPENAI_API_KEY, OPENAI_API_VERSION)):
         raise ValueError(
-            f"""None of the following parameters can be none: 
+            f"""None of the following parameters can be None: 
             AZURE_OPENAI_API_KEY: {AZURE_OPENAI_API_KEY},
             AZURE_OPENAI_API_KEY: {AZURE_OPENAI_API_KEY},
             OPENAI_API_VERSION: {OPENAI_API_VERSION}
@@ -56,9 +50,9 @@ def get_openai_client(model: str) -> AzureOpenAI:
 
 
 class ChatGPT:
-    def __init__(self, model="gpt4"):
+    def __init__(self, client: AzureOpenAI, model: str):
         self.model = model
-        self.client = get_openai_client(model=model)
+        self.client = client
         self.messages = []
 
     def chat_with_gpt(self, user_input: str):
diff --git a/requirements.txt b/requirements.txt
index cf2d38745b7fdd77160de57b069714e8d73411e7..53389159789ff0c98b77b710e70ef4f99eba4dcc 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -2,11 +2,14 @@ jupyter-server-proxy==4.0.0
 jupyterlab-git==0.42.0
 jupyter_server>=2.0
 
+flake8
+
 dash
 dash-bootstrap-components
 plotly
 
 openai
 rapidfuzz
+nltk
 
 python-dotenv