diff --git a/.gitignore b/.gitignore
index f75c4cd83e14ed926c87ed58ca89a88fc6d13776..6609e2b52a8167d7633d53087d93804b72ffd227 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,2 +1,7 @@
 *env/
 .env
+
+*.egg-*
+*.pyc
+*.txt
+config.txt
diff --git a/Dockerfile b/Dockerfile
index e5fa5617104e60caab17506036ff1b0982cdd33a..b2dd42d08c62277e5c5f2789ea89bb129f112e3c 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -9,9 +9,13 @@ RUN conda env update -q -f /tmp/environment.yml && \
     conda env export -n "root" && \
     jupyter lab build 
 
-COPY jupyter_notebook_config.py ${HOME}/.jupyter/
+RUN pip3 install --upgrade pip
 
-# copy dash app
-COPY app ${HOME}/app/
-RUN chown -R jovyan ${HOME}/app/
+COPY dash_proxy /tmp/dash_proxy/
+RUN pip install /tmp/dash_proxy/
 
+COPY llm_utils /llm_utils/
+RUN pip install /llm_utils/
+
+COPY app /dash/app/
+RUN chown -R jovyan /dash/app/
diff --git a/README.md b/README.md
index 0afc659489002fca20af391308f6f1756f456bac..ef4faa61b32056f2b8678950612ad916b456bf0e 100644
--- a/README.md
+++ b/README.md
@@ -1,2 +1,3 @@
 # A Jupyterlab for LLM
 
+In order to run Dash or use the client, AZURE_OPENAI_API_KEY, AZURE_OPENAI_ENDPOINT, OPENAI_API_VERSION need to be stored in a config.txt file in the home directory.
diff --git a/app/app.py b/app/app.py
index 4975d674ba0a1f11b0bc9f79d0ec5f40b9855e3b..604849e09dcb66adf48675d9192e00f9ea6828e1 100644
--- a/app/app.py
+++ b/app/app.py
@@ -1,5 +1,5 @@
 import sys 
-sys.path.append("/home/jovyan")
+sys.path.append("/home/jovyan/")
 
 import argparse
 import logging
@@ -26,7 +26,7 @@ else:
 # define Dash app
 app = Dash(
     name=__name__, 
-    requests_pathname_prefix="/dash/"
+    requests_pathname_prefix="/Dash/"
 )
 
 # define layout
diff --git a/app/callbacks.py b/app/callbacks.py
index 9d22226c2194586ef572f89df93d984cdbb45b46..6076465849aca4147f3158bc3fe3d50db435cbfa 100644
--- a/app/callbacks.py
+++ b/app/callbacks.py
@@ -1,3 +1,5 @@
+from datetime import datetime
+
 from dash import (
     html, 
     Dash
@@ -8,16 +10,48 @@ from dash.dependencies import (
     State
 )
 
+from llm_utils.client import ChatGPT
+
+
+def format_chat_messages(chat_history):
+    chat_messages = []
+    for message in chat_history:
+        chat_messages.append(html.Div([
+            html.P(f'{message["sender"]}: {message["message"]}'),
+            html.P(f'Sent at: {message["timestamp"]}')
+        ]))
+    return chat_messages
+
 
 def register_callbacks(app: Dash):
+    
+    chat_gpt = ChatGPT(model="gpt4")
+    
     @app.callback(
-        Output("output-container", "children"),
-        [Input("send-button", "n_clicks")],
-        [State("input-text", "value")]
+        [Output('chat-container', 'children'),
+        Output('chat-history', 'data')],
+        [Input('send-button', 'n_clicks')],
+        [State('user-input', 'value'),
+        State('chat-history', 'data')]
     )
-    def generate_response(n_clicks, input_text):
-        if n_clicks > 0:
-            response = "You said: " + input_text
-            return html.Div(response)
-        else:
-            return ""
+    def update_chat(n_clicks, input_value, chat_history):
+        if chat_history is None:
+            chat_history = []
+        
+        if n_clicks > 0 and input_value:
+            chat_history.append({
+                'sender': 'User',
+                'message': input_value,
+                'timestamp': datetime.now().strftime("%Y-%m-%d %H:%M:%S")
+            })
+            
+            response = chat_gpt.chat_with_gpt(input_value)
+            
+            # Add response to chat history
+            chat_history.append({
+                'sender': 'Language Model',
+                'message': response,
+                'timestamp': datetime.now().strftime("%Y-%m-%d %H:%M:%S")
+            })
+        
+        return format_chat_messages(chat_history), chat_history
diff --git a/app/layout.py b/app/layout.py
index fbc93fabc4da27a70e5de4e5a87cd4d8e7a7de0b..4d82a24710224074e9123450133daf5eefd3aac2 100644
--- a/app/layout.py
+++ b/app/layout.py
@@ -3,12 +3,38 @@ from dash import (
     dcc
 )
 
-layout = html.Div(
-    className="container",
-    children=[
-        html.H1("GPT Chat", className="mt-5 mb-4"),
-        dcc.Textarea(id="input-text", placeholder="Enter your message:", className="form-control mb-3"),
-        html.Button("Send", id="send-button", n_clicks=0, className="btn btn-primary mb-3"),
-        html.Div(id="output-container")
-    ]
-)
+layout = html.Div([
+    dcc.Store(
+        id='chat-history', 
+        data=[]
+    ),
+    html.H1(
+        "Simple Chat App", 
+        style={'text-align': 'center'}
+    ),
+    html.Div(
+        id='chat-container', 
+        style={'overflowY': 'scroll', 'height': '70vh', 'padding': '10px'}
+    ),
+    html.Div([
+        dcc.Input(
+            id='user-input', 
+            type='text', 
+            placeholder='Type your message...', 
+            debounce=True
+        ),
+        html.Button(
+            'Send', 
+            id='send-button', 
+            n_clicks=0
+        )
+    ], style={
+        'display': 'flex', 
+        'alignItems': 'center', 
+        'justifyContent': 'center', 
+        'position': 'fixed', 
+        'bottom': 0, 
+        'width': '100%', 
+        'padding': '10px'
+    })
+], style={'position': 'relative'})
diff --git a/dash_proxy/dash_proxy.py b/dash_proxy/dash_proxy.py
new file mode 100644
index 0000000000000000000000000000000000000000..309e977e30e4ae08b01c6f3c3734e673b94fb444
--- /dev/null
+++ b/dash_proxy/dash_proxy.py
@@ -0,0 +1,16 @@
+def setup_dash_proxy():
+    command = [
+        'python',
+        '/dash/app/app.py',
+        '--port',
+        '{port}'
+    ]
+    
+    return {
+        "command": command,
+        "new_browser_tab": False,
+        "launcher_entry": {
+            "enabled": True,
+            'title': 'Dash'
+        }
+    }
diff --git a/dash_proxy/setup.py b/dash_proxy/setup.py
new file mode 100644
index 0000000000000000000000000000000000000000..ccd874ca87acf8f35bea40e0eb6e883150528e3f
--- /dev/null
+++ b/dash_proxy/setup.py
@@ -0,0 +1,16 @@
+import setuptools
+
+setuptools.setup(
+    author="Julian Rasch",
+    author_email="julian.rasch@fh-muenster.de",
+    description="A small module to run Dash inside a dockerized Jupyterlab.",
+    name="jupyter-dash-proxy",
+    py_modules=["dash_proxy"],
+    entry_points={
+        "jupyter_serverproxy_servers": [
+            # name = packagename:function_name
+            "Dash = dash_proxy:setup_dash_proxy",
+        ]
+    },
+    install_requires=["jupyter-server-proxy==4.0.0"],
+)
diff --git a/jupyter_notebook_config.py b/jupyter_notebook_config.py
deleted file mode 100644
index 22208d38066f4dd7b1271c4df093530370bf3943..0000000000000000000000000000000000000000
--- a/jupyter_notebook_config.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# Configuration file for jupyter-notebook.
-c.ServerProxy.servers = {
-    'dash': {
-        'command': [
-            'python',
-            'app/app.py',
-            '--port',
-            '{port}'
-        ],
-        'absolute_url': False,
-        'new_browser_tab': False
-    }
-}
diff --git a/llm_utils/setup.py b/llm_utils/setup.py
new file mode 100644
index 0000000000000000000000000000000000000000..737729b7716520f1c36d7351fea156faee49258f
--- /dev/null
+++ b/llm_utils/setup.py
@@ -0,0 +1,14 @@
+import setuptools
+
+setuptools.setup(
+    author="Julian Rasch",
+    author_email="julian.rasch@fh-muenster.de",
+    description="Helper modules to work with LLMs.",
+    name="llm_utils",
+    package_dir={"": "src"},
+    packages=setuptools.find_packages(where="src"),
+    install_requires=[
+        "openai", 
+        "python-dotenv"
+    ]
+)
diff --git a/llm_utils/src/__init__.py b/llm_utils/src/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/llm_utils/src/llm_utils/__init__.py b/llm_utils/src/llm_utils/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/llm_utils/src/llm_utils/client.py b/llm_utils/src/llm_utils/client.py
new file mode 100644
index 0000000000000000000000000000000000000000..82ceeac15722b63c195615bcb18a5563f36d349d
--- /dev/null
+++ b/llm_utils/src/llm_utils/client.py
@@ -0,0 +1,87 @@
+import os
+import logging
+from openai import AzureOpenAI
+from dotenv import load_dotenv
+
+from enum import Enum
+
+try:
+    found_dotenv = load_dotenv(
+        "/home/jovyan/config.txt",
+        override=True
+    )
+except ValueError:
+    logging.warn("Could not detect config.txt in /home/jovyan/. Searching in current folder ...")
+    found_dotenv = load_dotenv(
+        "config.txt",
+        override=True)
+
+if not found_dotenv: 
+    raise ValueError("Could not detect config.txt in /home/jovyan/.")
+
+AZURE_OPENAI_API_KEY = os.environ.get("AZURE_OPENAI_API_KEY")
+AZURE_OPENAI_ENDPOINT = os.environ.get("AZURE_OPENAI_ENDPOINT")
+OPENAI_API_VERSION = os.environ.get("OPENAI_API_VERSION")
+
+class OpenAIModels(Enum):
+    GPT_3 = "gpt3"
+    GPT_4 = "gpt4"
+    EMBED = "embed"
+
+    @classmethod
+    def get_all_values(cls):
+        return [member.value for member in cls]
+
+
+def get_openai_client(model: str) -> AzureOpenAI:
+    if not model in OpenAIModels.get_all_values():
+        raise ValueError(f"<model> needs to be one of {OpenAIModels.get_all_values()}.")
+    
+    if any(p is None for p in (AZURE_OPENAI_API_KEY, AZURE_OPENAI_API_KEY, OPENAI_API_VERSION)):
+        raise ValueError(
+            f"""None of the following parameters can be none: 
+            AZURE_OPENAI_API_KEY: {AZURE_OPENAI_API_KEY},
+            AZURE_OPENAI_API_KEY: {AZURE_OPENAI_API_KEY},
+            OPENAI_API_VERSION: {OPENAI_API_VERSION}
+            """
+        )
+    
+    client = AzureOpenAI(
+        api_key=AZURE_OPENAI_API_KEY,
+        azure_endpoint=AZURE_OPENAI_ENDPOINT,
+        api_version=OPENAI_API_VERSION, 
+        azure_deployment=model
+    )
+    return client
+
+
+class ChatGPT:
+    def __init__(self, model="gpt4"):
+        self.model = model
+        self.client = get_openai_client(model=model)
+        self.messages = []
+
+    def chat_with_gpt(self, user_input: str):
+        self.messages.append({
+            "role": "user",
+            "content": user_input
+        })
+        response = self._generate_response(self.messages)
+        return response
+
+    def _generate_response(self, messages):
+        response = self.client.chat.completions.create(
+            model=self.model,
+            messages=messages,        
+            temperature=0.2, 
+            max_tokens=150,
+            top_p=1.0
+        )
+        response_message = response.choices[0].message
+        self.messages.append({
+            "role": response_message.role,
+            "content": response_message.content
+        })
+
+        return response_message.content
+