first commit
This commit is contained in:
parent
5c5d88c92f
commit
eb4f62c56d
41 changed files with 3851 additions and 19 deletions
51
docker_svc/.env
Normal file
51
docker_svc/.env
Normal file
|
@ -0,0 +1,51 @@
|
|||
# Database configuration
|
||||
QDRANT_HOST=qdrant
|
||||
QDRANT_PORT=6333
|
||||
COLLECTION_NAME=ai_chron_docs_1024
|
||||
|
||||
# Ollama configuration
|
||||
OLLAMA_HOST=0.0.0.0
|
||||
OLLAMA_PORT=11434
|
||||
OLLAMA_BASE_URL=http://ollama:11434
|
||||
OLLAMA_NVIDIA_CAPABILITIES=compute,utility
|
||||
OLLAMA_NVIDIA_VISIBLE_DEVICES=all
|
||||
|
||||
# Model configuration
|
||||
EMBED_MODEL_NAME=mxbai-embed-large
|
||||
VECTOR_SIZE=1024
|
||||
LLM_MODEL_NAME=mistral-small:latest
|
||||
LLM_MODEL_NAME_THINKING=deepseek-r1:14b
|
||||
TEMPERATURE=0.7
|
||||
|
||||
# Timeouts
|
||||
TIMEOUT_REQUEST_EMBED=10
|
||||
TIMEOUT_REQUEST_CHAT_DIRECT=60
|
||||
TIMEOUT_REQUEST_CHAT_THINKING=100
|
||||
|
||||
# MariaDB configuration
|
||||
MYSQL_DATABASE=prompts
|
||||
MYSQL_ROOT_PASSWORD=pit_pass_root
|
||||
MYSQL_USER=pit_user
|
||||
MYSQL_PASSWORD=pit_pass_user
|
||||
|
||||
# phpMyAdmin configuration
|
||||
PMA_HOST=mariadb
|
||||
PMA_PORT=3306
|
||||
PMA_USER=pit_user
|
||||
PMA_PASSWORD=pit_pass_user
|
||||
|
||||
# RAG service configuration
|
||||
RAG_SERVICE_HOST=rag-service
|
||||
RAG_SERVICE_PORT=8000
|
||||
CORS_ORIGINS=http://localhost:3000,http://127.0.0.1:3000,http://localhost:8000,http://real-time-chat-app:3000
|
||||
|
||||
# React app configuration
|
||||
REACT_APP_PORT=3000
|
||||
REACT_APP_API_BASE_URL=http://rag-service:8000
|
||||
|
||||
# Database logging configuration
|
||||
DB_HOST=mariadb
|
||||
DB_PORT=3306
|
||||
DB_USER=pit_user
|
||||
DB_PASSWORD=pit_pass_user
|
||||
DB_NAME=prompts
|
108
docker_svc/Dockercompose.yaml
Normal file
108
docker_svc/Dockercompose.yaml
Normal file
|
@ -0,0 +1,108 @@
|
|||
services:
|
||||
|
||||
mariadb:
|
||||
image: mariadb:latest
|
||||
restart: always
|
||||
env_file:
|
||||
- .env
|
||||
ports:
|
||||
- "${DB_PORT:-3306}:3306"
|
||||
volumes:
|
||||
- maria_data:/var/lib/mysql
|
||||
networks:
|
||||
- backend
|
||||
|
||||
phpmyadmin:
|
||||
image: phpmyadmin/phpmyadmin
|
||||
restart: always
|
||||
ports:
|
||||
- "${PHPMYADMIN_PORT:-8080}:80"
|
||||
env_file:
|
||||
- .env
|
||||
networks:
|
||||
- backend
|
||||
depends_on:
|
||||
- mariadb
|
||||
|
||||
qdrant:
|
||||
image: qdrant/qdrant
|
||||
restart: always
|
||||
ports:
|
||||
- "${QDRANT_PORT:-6333}:6333"
|
||||
volumes:
|
||||
- qdrant_data:/qdrant/storage
|
||||
networks:
|
||||
- backend
|
||||
|
||||
ollama:
|
||||
image: ollama/ollama:latest
|
||||
restart: always
|
||||
ports:
|
||||
- "${OLLAMA_PORT:-11434}:11434"
|
||||
volumes:
|
||||
- ollama_data:/root/.ollama
|
||||
env_file:
|
||||
- .env
|
||||
environment:
|
||||
- NVIDIA_DRIVER_CAPABILITIES=${OLLAMA_NVIDIA_CAPABILITIES}
|
||||
- NVIDIA_VISIBLE_DEVICES=${OLLAMA_NVIDIA_VISIBLE_DEVICES}
|
||||
networks:
|
||||
- backend
|
||||
deploy: # Specify runtime NVIDIA
|
||||
resources:
|
||||
reservations:
|
||||
devices:
|
||||
- driver: nvidia
|
||||
capabilities: [ gpu ]
|
||||
count: all
|
||||
|
||||
rag-service:
|
||||
build: ./agent
|
||||
# image: thestefano/crohngpt:latest
|
||||
env_file:
|
||||
- .env
|
||||
ports:
|
||||
- "${RAG_SERVICE_PORT:-8000}:8000"
|
||||
volumes:
|
||||
- pdfs:/app/pdfs
|
||||
- ./agent/app:/app
|
||||
depends_on:
|
||||
- qdrant
|
||||
- mariadb
|
||||
- ollama
|
||||
networks:
|
||||
- backend
|
||||
- frontend # Add frontend network to allow direct communication with React app
|
||||
restart: always
|
||||
deploy:
|
||||
replicas: 1
|
||||
|
||||
real-time-chat-app:
|
||||
build:
|
||||
context: ./real-time-chat-app
|
||||
dockerfile: Dockerfile
|
||||
env_file:
|
||||
- .env
|
||||
environment:
|
||||
- REACT_APP_API_BASE_URL=${REACT_APP_API_BASE_URL}
|
||||
ports:
|
||||
- "${REACT_APP_PORT:-3000}:3000" # React app
|
||||
networks:
|
||||
- frontend
|
||||
restart: always
|
||||
depends_on:
|
||||
- rag-service
|
||||
deploy:
|
||||
replicas: 1
|
||||
|
||||
volumes:
|
||||
qdrant_data:
|
||||
pdfs:
|
||||
maria_data:
|
||||
ollama_data:
|
||||
|
||||
networks:
|
||||
backend:
|
||||
driver: bridge
|
||||
frontend:
|
||||
driver: bridge
|
15
docker_svc/agent/Dockerfile
Normal file
15
docker_svc/agent/Dockerfile
Normal file
|
@ -0,0 +1,15 @@
|
|||
FROM python:3.11-alpine
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY requirements.txt .
|
||||
|
||||
RUN apk add --no-cache build-base \
|
||||
&& pip install --no-cache-dir -r requirements.txt \
|
||||
&& apk del build-base
|
||||
|
||||
COPY app .
|
||||
|
||||
EXPOSE 8000
|
||||
|
||||
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000"]
|
0
docker_svc/agent/Dockerfile:Zone.Identifier
Normal file
0
docker_svc/agent/Dockerfile:Zone.Identifier
Normal file
36
docker_svc/agent/app/libs/check_medical.py
Normal file
36
docker_svc/agent/app/libs/check_medical.py
Normal file
|
@ -0,0 +1,36 @@
|
|||
def is_medical_query(message: str) -> bool:
|
||||
"""
|
||||
Check if the user message contains medical keywords. This function is case-insensitive.
|
||||
|
||||
:param message: The user message or any string to check.
|
||||
:return: True if the message contains medical keywords, False otherwise.
|
||||
"""
|
||||
medical_keywords = [
|
||||
"health",
|
||||
"doctor",
|
||||
"medicine",
|
||||
"disease",
|
||||
"symptom",
|
||||
"treatment",
|
||||
"salute",
|
||||
"medico",
|
||||
"malattia",
|
||||
"sintomo",
|
||||
"cura",
|
||||
"sanità",
|
||||
"santé",
|
||||
"médecin",
|
||||
"médicament",
|
||||
"maladie",
|
||||
"symptôme",
|
||||
"traitement",
|
||||
"gesundheit",
|
||||
"arzt",
|
||||
"medizin",
|
||||
"krankheit",
|
||||
"symptom",
|
||||
"behandlung",
|
||||
]
|
||||
|
||||
message_lower = message.lower()
|
||||
return any(keyword in message_lower for keyword in medical_keywords)
|
43
docker_svc/agent/app/libs/log_prompts.py
Normal file
43
docker_svc/agent/app/libs/log_prompts.py
Normal file
|
@ -0,0 +1,43 @@
|
|||
import os
|
||||
from mysql.connector import connect, Error
|
||||
import logging
|
||||
|
||||
# Configure logging
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
def log_prompt_to_db(userid: str | None, ip: str, prompt: str, answer: str):
|
||||
"""
|
||||
Logs the user's prompt and the corresponding response to the database.
|
||||
|
||||
Args:
|
||||
userid (str | None): User ID (optional, can be None).
|
||||
ip (str): Client's IP address.
|
||||
prompt (str): Full conversation history provided by the user.
|
||||
answer (str): Response generated by the AI.
|
||||
"""
|
||||
try:
|
||||
# Connect to the database using environment variables
|
||||
connection = connect(
|
||||
host=os.getenv("DB_HOST"),
|
||||
port=int(os.getenv("DB_PORT", "3306")),
|
||||
user=os.getenv("DB_USER"),
|
||||
password=os.getenv("DB_PASSWORD"),
|
||||
database=os.getenv("DB_NAME")
|
||||
)
|
||||
cursor = connection.cursor()
|
||||
|
||||
# SQL query to insert data
|
||||
query = """
|
||||
INSERT INTO user_prompts (userid, ip, prompt, answer)
|
||||
VALUES (%s, %s, %s, %s)
|
||||
"""
|
||||
values = (userid, ip, prompt, answer)
|
||||
cursor.execute(query, values)
|
||||
|
||||
# Commit the transaction and close resources
|
||||
connection.commit()
|
||||
cursor.close()
|
||||
connection.close()
|
||||
|
||||
except Error as e:
|
||||
logger.error(f"Error logging prompt to database: {e}")
|
36
docker_svc/agent/app/libs/manage_languages.py
Normal file
36
docker_svc/agent/app/libs/manage_languages.py
Normal file
|
@ -0,0 +1,36 @@
|
|||
# libs/manage_languages.py
|
||||
|
||||
from langdetect import detect
|
||||
from fastapi import HTTPException
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
def validate_language(language: str) -> None:
|
||||
"""Validate the language parameter. Throws an HTTPException if the language is invalid."""
|
||||
valid_languages = {"french", "italian", "english", "german", "auto"}
|
||||
if language not in valid_languages:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail="Invalid language. Must be one of: french, italian, english, german, or auto"
|
||||
)
|
||||
|
||||
def detect_language(current_message: str) -> str:
|
||||
"""Detect the language of the current message. Defaults to French if detection fails."""
|
||||
try:
|
||||
detected_lang = detect(current_message)
|
||||
if detected_lang == "fr":
|
||||
language = "french"
|
||||
elif detected_lang == "it":
|
||||
language = "italian"
|
||||
elif detected_lang == "en":
|
||||
language = "english"
|
||||
elif detected_lang == "de":
|
||||
language = "german"
|
||||
else:
|
||||
language = "french"
|
||||
logger.info(f"Detected language: {language}")
|
||||
return language
|
||||
except Exception as e:
|
||||
logger.error(f"Language detection failed: {str(e)}")
|
||||
return "french"
|
14
docker_svc/agent/app/libs/models.py
Normal file
14
docker_svc/agent/app/libs/models.py
Normal file
|
@ -0,0 +1,14 @@
|
|||
from typing import List, Optional, Literal
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
class ChatMessage(BaseModel):
|
||||
role: Literal["user", "coach"]
|
||||
content: str
|
||||
|
||||
class ChatRequest(BaseModel):
|
||||
messages: List[ChatMessage]
|
||||
language: str = "auto"
|
||||
temperature: float = 0.7
|
||||
reasoning: bool = False
|
||||
stream: bool = True
|
||||
personality: str = "supportive"
|
192
docker_svc/agent/app/libs/prompt_helper.py
Normal file
192
docker_svc/agent/app/libs/prompt_helper.py
Normal file
|
@ -0,0 +1,192 @@
|
|||
from llama_index.core.base.llms.types import ChatMessage as LlamaChatMessage
|
||||
import logging
|
||||
from libs.models import ChatMessage
|
||||
from typing import List, Dict, Any, Optional, AsyncGenerator
|
||||
import httpx
|
||||
import json
|
||||
import os
|
||||
import asyncio
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
def format_system_prompt(system_prompt_template: str, language_prompts: dict, language: str,
|
||||
retrieved_docs: str, is_medical: bool, personality: str = "supportive",
|
||||
personality_prompts: dict = {}) -> str:
|
||||
"""Formatta il prompt di sistema con il contenuto specifico della lingua, personalità e i documenti recuperati."""
|
||||
language_prompt = language_prompts[language]["prompt"]
|
||||
language_disclaimer = language_prompts[language]["disclaimer"]
|
||||
language_constraint = "" if language == "auto" else language_prompts[language]["constraint"]
|
||||
|
||||
# Miglioro il log e la gestione della personalità
|
||||
if personality not in personality_prompts:
|
||||
logger.warning(f"Personality '{personality}' not found in prompts, using default empty prompt")
|
||||
personality_prompt = ""
|
||||
else:
|
||||
personality_prompt = personality_prompts[personality]["prompt"]
|
||||
logger.info(f"Using '{personality}' personality: {personality_prompts[personality]['description'][:50]}...")
|
||||
|
||||
logger.info(f"Formatting system prompt with language {language}, personality {personality}")
|
||||
system_message_content = system_prompt_template.format(
|
||||
language_prompt=language_prompt,
|
||||
context=retrieved_docs,
|
||||
language_disclaimer=language_disclaimer if is_medical else "",
|
||||
personality_prompt=personality_prompt,
|
||||
language_constraint=language_constraint
|
||||
)
|
||||
logger.debug(f"System message content: {system_message_content[:200]}...")
|
||||
return system_message_content
|
||||
|
||||
async def perform_inference_streaming(
|
||||
llm,
|
||||
system_message: str,
|
||||
history: List[Dict],
|
||||
current_message: str
|
||||
) -> AsyncGenerator[str, None]:
|
||||
"""Stream inference results from Ollama API"""
|
||||
base_url = os.getenv("OLLAMA_BASE_URL", "http://ollama:11434")
|
||||
|
||||
# Prepare messages for Ollama API
|
||||
messages = []
|
||||
|
||||
# Add system message
|
||||
messages.append({
|
||||
"role": "system",
|
||||
"content": system_message
|
||||
})
|
||||
|
||||
# Add history
|
||||
for msg in history:
|
||||
messages.append({
|
||||
"role": "user" if msg.role == "user" else "assistant",
|
||||
"content": msg.content
|
||||
})
|
||||
|
||||
# Add current user message
|
||||
messages.append({
|
||||
"role": "user",
|
||||
"content": current_message
|
||||
})
|
||||
|
||||
# Prepare request payload
|
||||
payload = {
|
||||
"model": llm.model,
|
||||
"messages": messages,
|
||||
"stream": True,
|
||||
"options": {
|
||||
"temperature": llm.temperature
|
||||
}
|
||||
}
|
||||
|
||||
logger.debug(f"Sending streaming request to Ollama API: {base_url}/api/chat")
|
||||
|
||||
try:
|
||||
async with httpx.AsyncClient() as client:
|
||||
async with client.stream("POST", f"{base_url}/api/chat", json=payload, timeout=60.0) as response:
|
||||
if response.status_code != 200:
|
||||
error_detail = await response.aread()
|
||||
logger.error(f"Error from Ollama API: {response.status_code}, {error_detail}")
|
||||
yield f"Error: Failed to get response from language model (Status {response.status_code})"
|
||||
return
|
||||
|
||||
# Variable to accumulate the full response
|
||||
full_response = ""
|
||||
|
||||
# Process the streaming response
|
||||
async for chunk in response.aiter_text():
|
||||
if not chunk.strip():
|
||||
continue
|
||||
|
||||
# Each chunk might contain one JSON object
|
||||
try:
|
||||
data = json.loads(chunk)
|
||||
# Process message content if available
|
||||
if 'message' in data and 'content' in data['message']:
|
||||
content = data['message']['content']
|
||||
full_response += content
|
||||
yield content
|
||||
|
||||
# Check if this is the final message with done flag
|
||||
if data.get('done', False):
|
||||
logger.debug("Streaming response completed")
|
||||
except json.JSONDecodeError as e:
|
||||
logger.error(f"Failed to parse streaming response: {e}, chunk: {chunk}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error during streaming inference: {str(e)}")
|
||||
yield f"Error: {str(e)}"
|
||||
|
||||
# Return empty string at the end to signal completion
|
||||
yield ""
|
||||
|
||||
def perform_inference(
|
||||
llm,
|
||||
system_message: str,
|
||||
history: List[Dict],
|
||||
current_message: str,
|
||||
stream: bool = False
|
||||
) -> str:
|
||||
"""Perform inference with the given LLM."""
|
||||
if stream:
|
||||
# This will be handled by the streaming endpoint
|
||||
raise ValueError("Streaming not supported in synchronous inference")
|
||||
|
||||
# Prepare messages for the API
|
||||
messages = []
|
||||
|
||||
# Add system message
|
||||
messages.append({
|
||||
"role": "system",
|
||||
"content": system_message
|
||||
})
|
||||
|
||||
# Add history
|
||||
for msg in history:
|
||||
messages.append({
|
||||
"role": "user" if msg.role == "user" else "assistant",
|
||||
"content": msg.content
|
||||
})
|
||||
|
||||
# Add current user message
|
||||
messages.append({
|
||||
"role": "user",
|
||||
"content": current_message
|
||||
})
|
||||
|
||||
# For non-streaming, we'll use the httpx client directly to call Ollama API
|
||||
base_url = os.getenv("OLLAMA_BASE_URL", "http://ollama:11434")
|
||||
|
||||
# Prepare request payload
|
||||
payload = {
|
||||
"model": llm.model,
|
||||
"messages": messages,
|
||||
"stream": False,
|
||||
"options": {
|
||||
"temperature": llm.temperature
|
||||
}
|
||||
}
|
||||
|
||||
logger.debug(f"Sending non-streaming request to Ollama API: {base_url}/api/chat")
|
||||
|
||||
try:
|
||||
with httpx.Client(timeout=60.0) as client:
|
||||
response = client.post(f"{base_url}/api/chat", json=payload)
|
||||
|
||||
if response.status_code != 200:
|
||||
logger.error(f"Error from Ollama API: {response.status_code}, {response.text}")
|
||||
return f"Error: Failed to get response from language model (Status {response.status_code})"
|
||||
|
||||
data = response.json()
|
||||
if 'message' in data and 'content' in data['message']:
|
||||
return data['message']['content']
|
||||
else:
|
||||
logger.error(f"Unexpected response format: {data}")
|
||||
return "Error: Unexpected response format from language model"
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error during non-streaming inference: {str(e)}")
|
||||
return f"Error: {str(e)}"
|
||||
|
||||
def select_llm(llm, llm_reasoning, reasoning: bool):
|
||||
"""Select the LLM model based on the reasoning flag."""
|
||||
selected_llm = llm_reasoning if reasoning else llm
|
||||
return selected_llm
|
79
docker_svc/agent/app/libs/qdrant_helper.py
Normal file
79
docker_svc/agent/app/libs/qdrant_helper.py
Normal file
|
@ -0,0 +1,79 @@
|
|||
from llama_index.core import VectorStoreIndex, StorageContext
|
||||
from llama_index.vector_stores.qdrant import QdrantVectorStore
|
||||
from qdrant_client import QdrantClient
|
||||
from qdrant_client.http.models import Distance, VectorParams
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
def ensure_collection_exists(qdrant_client: QdrantClient, collection_name: str, vector_size: int) -> None:
|
||||
"""Verify that the Qdrant collection exists, and create it if it does not."""
|
||||
try:
|
||||
if not qdrant_client.collection_exists(collection_name):
|
||||
qdrant_client.create_collection(
|
||||
collection_name=collection_name,
|
||||
vectors_config=VectorParams(size=vector_size, distance=Distance.COSINE)
|
||||
)
|
||||
logger.info(f"Created Qdrant collection '{collection_name}' with vector size {vector_size}")
|
||||
else:
|
||||
logger.info(f"Qdrant collection '{collection_name}' already exists")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to ensure Qdrant collection exists: {str(e)}")
|
||||
raise
|
||||
|
||||
def retrieve_documents(qdrant_client: QdrantClient, collection_name: str, embed_model, current_message: str) -> str:
|
||||
"""Get the relevant documents from Qdrant based on the current message."""
|
||||
logger.info("Initializing Qdrant vector store")
|
||||
vector_store = QdrantVectorStore(
|
||||
client=qdrant_client,
|
||||
collection_name=collection_name,
|
||||
embed_model=embed_model
|
||||
)
|
||||
logger.info("Building vector store index")
|
||||
index = VectorStoreIndex.from_vector_store(
|
||||
vector_store=vector_store,
|
||||
embed_model=embed_model
|
||||
)
|
||||
logger.info("Retrieving documents")
|
||||
retriever = index.as_retriever()
|
||||
retrieved_nodes = retriever.retrieve(current_message)
|
||||
retrieved_docs = "\n\n".join([node.text for node in retrieved_nodes])
|
||||
logger.debug(f"Retrieved documents (first 200 chars): {retrieved_docs[:200]}...")
|
||||
return retrieved_docs
|
||||
|
||||
def index_documents(qdrant_client: QdrantClient, collection_name: str, embed_model, documents) -> None:
|
||||
"""Index the provided documents into the Qdrant collection."""
|
||||
vector_store = QdrantVectorStore(
|
||||
client=qdrant_client,
|
||||
collection_name=collection_name,
|
||||
embed_model=embed_model
|
||||
)
|
||||
logger.info(f"Indexing documents into Qdrant collection '{collection_name}'")
|
||||
storage_context = StorageContext.from_defaults(vector_store=vector_store)
|
||||
VectorStoreIndex.from_documents(
|
||||
documents,
|
||||
storage_context=storage_context,
|
||||
embed_model=embed_model
|
||||
)
|
||||
logger.info("Successfully indexed documents")
|
||||
|
||||
def delete_all_documents(qdrant_client: QdrantClient, collection_name: str, vector_size: int) -> None:
|
||||
"""Delete all vectors from the Qdrant collection by recreating it."""
|
||||
try:
|
||||
# Check if collection exists
|
||||
if qdrant_client.collection_exists(collection_name):
|
||||
# Delete the collection
|
||||
qdrant_client.delete_collection(collection_name=collection_name)
|
||||
logger.info(f"Deleted Qdrant collection '{collection_name}'")
|
||||
|
||||
# Recreate the empty collection with the same parameters
|
||||
qdrant_client.create_collection(
|
||||
collection_name=collection_name,
|
||||
vectors_config=VectorParams(size=vector_size, distance=Distance.COSINE)
|
||||
)
|
||||
logger.info(f"Recreated empty Qdrant collection '{collection_name}'")
|
||||
else:
|
||||
logger.warning(f"Qdrant collection '{collection_name}' does not exist, nothing to delete")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to delete Qdrant collection: {str(e)}")
|
||||
raise
|
304
docker_svc/agent/app/main.py
Normal file
304
docker_svc/agent/app/main.py
Normal file
|
@ -0,0 +1,304 @@
|
|||
from fastapi import FastAPI, File, UploadFile, HTTPException, Request, BackgroundTasks
|
||||
from fastapi.responses import JSONResponse, StreamingResponse
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
from fastapi.openapi.utils import get_openapi
|
||||
from llama_index.core import SimpleDirectoryReader
|
||||
from llama_index.embeddings.ollama import OllamaEmbedding
|
||||
from llama_index.llms.ollama import Ollama
|
||||
from typing import Literal, List
|
||||
from pydantic import BaseModel
|
||||
from langdetect import DetectorFactory
|
||||
from qdrant_client import QdrantClient
|
||||
import os
|
||||
from typing import List
|
||||
import uuid
|
||||
import yaml
|
||||
from dotenv import load_dotenv
|
||||
import logging
|
||||
import asyncio
|
||||
import json
|
||||
|
||||
from libs.check_medical import is_medical_query
|
||||
import libs.manage_languages as manage_languages
|
||||
import libs.qdrant_helper as qdrant_helper
|
||||
from libs.models import ChatMessage, ChatRequest
|
||||
import libs.prompt_helper as prompt_helper
|
||||
from libs.log_prompts import log_prompt_to_db
|
||||
|
||||
# Set seed for reproducibility of language detection
|
||||
DetectorFactory.seed = 0
|
||||
|
||||
# Configure logging
|
||||
logging.basicConfig(
|
||||
level=logging.DEBUG,
|
||||
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
|
||||
)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Load environment variables from .env file
|
||||
load_dotenv()
|
||||
|
||||
# Initialize FastAPI app
|
||||
app = FastAPI(
|
||||
docs_url="/docs",
|
||||
redoc_url="/redoc",
|
||||
max_request_body_size=100 * 1024 * 1024 # 100MB
|
||||
)
|
||||
|
||||
# Get CORS origins from environment or use default
|
||||
cors_origins = os.getenv("CORS_ORIGINS", "http://localhost:3000,http://127.0.0.1:3000").split(",")
|
||||
|
||||
# Add CORS middleware with proper configuration
|
||||
app.add_middleware(
|
||||
CORSMiddleware,
|
||||
allow_origins=cors_origins,
|
||||
allow_credentials=True,
|
||||
allow_methods=["*"],
|
||||
allow_headers=["*"],
|
||||
expose_headers=["Content-Type", "X-Content-Type-Options"],
|
||||
max_age=600, # 10 minutes for preflight cache
|
||||
)
|
||||
|
||||
# Load custom OpenAPI schema
|
||||
def load_custom_openapi():
|
||||
with open("openapi.json", "r") as f:
|
||||
custom_openapi = yaml.safe_load(f)
|
||||
default_openapi = get_openapi(
|
||||
title=app.title,
|
||||
version=app.version,
|
||||
openapi_version=app.openapi_version,
|
||||
description=app.description,
|
||||
routes=app.routes,
|
||||
)
|
||||
default_openapi["info"] = custom_openapi.get("info", default_openapi["info"])
|
||||
default_openapi["paths"].update(custom_openapi.get("paths", {}))
|
||||
return default_openapi
|
||||
|
||||
app.openapi = load_custom_openapi
|
||||
|
||||
with open("prompts.yaml", "r") as f:
|
||||
prompts = yaml.safe_load(f)
|
||||
SYSTEM_PROMPT_TEMPLATE = prompts["system_prompt"]
|
||||
LANGUAGE_PROMPTS = prompts["languages"]
|
||||
PERSONALITY_PROMPTS = prompts["personalities"]
|
||||
|
||||
# Configuration of models and services using .env variables
|
||||
OLLAMA_BASE_URL = os.getenv("OLLAMA_BASE_URL", "https://ollama.kube-ext.isc.heia-fr.ch")
|
||||
logger.info(f"Starting application with OLLAMA_BASE_URL: {OLLAMA_BASE_URL}")
|
||||
|
||||
# Embedding model using Ollama
|
||||
embed_model = OllamaEmbedding(
|
||||
model_name=os.getenv("EMBED_MODEL_NAME", "mxbai-embed-large"),
|
||||
base_url=OLLAMA_BASE_URL,
|
||||
request_timeout=os.getenv("TIMEOUT_REQUEST_EMBED", 20.0)
|
||||
)
|
||||
logger.info("OllamaEmbedding initialized with model: " + os.getenv("EMBED_MODEL_NAME", "mxbai-embed-large"))
|
||||
|
||||
# Direct inference model
|
||||
llm = Ollama(
|
||||
model=os.getenv("LLM_MODEL_NAME", "llama3"),
|
||||
base_url=OLLAMA_BASE_URL,
|
||||
temperature=float(os.getenv("TEMPERATURE", "0.7")),
|
||||
request_timeout=os.getenv("TIMEOUT_REQUEST_CHAT_DIRECT", 30.0)
|
||||
)
|
||||
logger.info(f"Ollama LLM initialized with model: {llm.model} "
|
||||
f"with temperature: {llm.temperature}")
|
||||
|
||||
# Reasoning model
|
||||
llm_reasoning = Ollama(
|
||||
model=os.getenv("LLM_MODEL_NAME_THINKING", "deepseek-r1:14b"),
|
||||
base_url=OLLAMA_BASE_URL,
|
||||
temperature=float(os.getenv("TEMPERATURE", "0.7")),
|
||||
request_timeout=os.getenv("TIMEOUT_REQUEST_CHAT_REASON", 60.0)
|
||||
)
|
||||
logger.info(f"Ollama reasoning LLM initialized with model: {llm_reasoning.model} "
|
||||
f"with temperature: {llm_reasoning.temperature}")
|
||||
|
||||
# Qdrant configuration
|
||||
qdrant_client = QdrantClient(
|
||||
host=os.getenv("QDRANT_HOST", "localhost"),
|
||||
port=int(os.getenv("QDRANT_PORT", "6333"))
|
||||
)
|
||||
collection_name = os.getenv("COLLECTION_NAME", "default_collection")
|
||||
vector_size = int(os.getenv("VECTOR_SIZE", "1024"))
|
||||
logger.info(f"Qdrant client initialized with host: {os.getenv('QDRANT_HOST')} and collection: {collection_name}")
|
||||
|
||||
# Ensure Qdrant collection exists
|
||||
qdrant_helper.ensure_collection_exists(qdrant_client, collection_name, vector_size)
|
||||
|
||||
# Endpoint to upload PDFs
|
||||
@app.post("/upload")
|
||||
async def upload_pdfs(files: List[UploadFile] = File(...)):
|
||||
logger.info("Received upload request")
|
||||
try:
|
||||
uploaded_files_count = len(files)
|
||||
logger.debug(f"Number of files to upload: {uploaded_files_count}")
|
||||
|
||||
for file in files:
|
||||
file_id = str(uuid.uuid4())
|
||||
file_path = f"./pdfs/{file_id}.pdf"
|
||||
logger.debug(f"Processing file: {file.filename}, saving as {file_path}")
|
||||
|
||||
with open(file_path, "wb") as f:
|
||||
f.write(await file.read())
|
||||
logger.debug(f"File {file.filename} saved successfully")
|
||||
|
||||
documents = SimpleDirectoryReader(input_files=[file_path]).load_data()
|
||||
logger.debug(f"Loaded {len(documents)} documents from {file.filename}")
|
||||
|
||||
qdrant_helper.index_documents(qdrant_client, collection_name, embed_model, documents)
|
||||
|
||||
return {"message": f"{uploaded_files_count} files processed and indexed successfully"}
|
||||
except Exception as e:
|
||||
logger.error(f"Error in upload endpoint: {str(e)}", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=f"Error processing files: {str(e)}")
|
||||
|
||||
# Chat endpoint with language, temperature, and reasoning support
|
||||
@app.post("/chat")
|
||||
async def chat_inference(chat_request: ChatRequest, http_request: Request, background_tasks: BackgroundTasks):
|
||||
logger.info("Received chat request")
|
||||
try:
|
||||
if not chat_request.messages:
|
||||
logger.warning("No messages provided in the request")
|
||||
raise HTTPException(status_code=400, detail="No messages provided")
|
||||
|
||||
# Log the complete request object to inspect its contents
|
||||
logger.debug(f"Complete chat request object: {chat_request.dict()}")
|
||||
|
||||
logger.debug(f"Request messages: {chat_request.messages}")
|
||||
logger.debug(f"Requested language: {chat_request.language}")
|
||||
logger.debug(f"Requested temperature: {chat_request.temperature}")
|
||||
logger.debug(f"Requested reasoning: {chat_request.reasoning}")
|
||||
logger.debug(f"Requested streaming: {chat_request.stream}")
|
||||
logger.debug(f"Requested personality: {chat_request.personality}")
|
||||
|
||||
# Validate language
|
||||
manage_languages.validate_language(chat_request.language)
|
||||
|
||||
# Log più dettagliato della personalità
|
||||
logger.info(f"Processing request with personality: {chat_request.personality}")
|
||||
|
||||
# Validate personality
|
||||
if chat_request.personality not in ["cool", "cynical", "supportive"]:
|
||||
logger.warning(f"Invalid personality: {chat_request.personality}, using 'supportive' as default")
|
||||
chat_request.personality = "supportive"
|
||||
|
||||
# Validate temperature
|
||||
if not (0 < chat_request.temperature < 1):
|
||||
raise HTTPException(status_code=400, detail="Temperature must be between 0 and 1 (exclusive)")
|
||||
|
||||
# Prepare message data
|
||||
current_message = chat_request.messages[-1].content.lower()
|
||||
history = chat_request.messages[:-1]
|
||||
logger.debug(f"Current user message: {current_message}")
|
||||
logger.debug(f"Message history: {history}")
|
||||
|
||||
# Prepare full conversation history as a concatenated string
|
||||
conversation_history = "\n".join([f"{msg.role}: {msg.content}" for msg in chat_request.messages])
|
||||
logger.debug(f"Full conversation history: {conversation_history}")
|
||||
|
||||
# Detect language if "auto"
|
||||
if chat_request.language == "auto":
|
||||
chat_request.language = manage_languages.detect_language(current_message)
|
||||
logger.info(f"Detected language using inference: {chat_request.language}")
|
||||
|
||||
# Check if the query is medical-related
|
||||
is_medical = is_medical_query(current_message)
|
||||
logger.debug(f"Is medical-related query? {is_medical}")
|
||||
|
||||
# Select LLM and set temperature
|
||||
selected_llm = prompt_helper.select_llm(llm, llm_reasoning, chat_request.reasoning)
|
||||
selected_llm.temperature = chat_request.temperature
|
||||
logger.info(f"Using LLM model: {selected_llm.model} with temperature: {selected_llm.temperature}")
|
||||
|
||||
# Retrieve documents from Qdrant
|
||||
retrieved_docs = qdrant_helper.retrieve_documents(qdrant_client, collection_name, embed_model, current_message)
|
||||
|
||||
# Format system prompt with personality - verifico passaggio corretto
|
||||
system_message_content = prompt_helper.format_system_prompt(
|
||||
SYSTEM_PROMPT_TEMPLATE,
|
||||
LANGUAGE_PROMPTS,
|
||||
chat_request.language,
|
||||
retrieved_docs,
|
||||
is_medical,
|
||||
chat_request.personality, # Confermo passaggio personalità
|
||||
PERSONALITY_PROMPTS # Confermo passaggio dizionario personalità
|
||||
)
|
||||
|
||||
# Decidiamo se utilizzare lo streaming o la risposta sincrona
|
||||
if chat_request.stream:
|
||||
# Streaming response
|
||||
logger.info("Using streaming response")
|
||||
|
||||
async def generate():
|
||||
full_response = ""
|
||||
async for content in prompt_helper.perform_inference_streaming(
|
||||
selected_llm,
|
||||
system_message_content,
|
||||
history,
|
||||
chat_request.messages[-1].content
|
||||
):
|
||||
if content:
|
||||
full_response += content
|
||||
# Formato SSE standard con \n\n alla fine per delimitare gli eventi
|
||||
yield f"data: {json.dumps({'content': content, 'full': full_response})}\n\n"
|
||||
|
||||
# Log the full conversation and response
|
||||
background_tasks.add_task(
|
||||
log_prompt_to_db,
|
||||
None, # TODO: User ID not available yet
|
||||
http_request.client.host, # Client's IP address
|
||||
conversation_history, # Full conversation history
|
||||
full_response # AI-generated response
|
||||
)
|
||||
|
||||
# Signal the end of the stream con formato SSE consistente
|
||||
yield f"data: {json.dumps({'done': True})}\n\n"
|
||||
|
||||
return StreamingResponse(
|
||||
generate(),
|
||||
media_type="text/event-stream; charset=utf-8",
|
||||
headers={
|
||||
"Cache-Control": "no-cache",
|
||||
"Connection": "keep-alive",
|
||||
"X-Accel-Buffering": "no",
|
||||
"Content-Type": "text/event-stream; charset=utf-8",
|
||||
}
|
||||
)
|
||||
else:
|
||||
# Non-streaming response
|
||||
logger.info("Using non-streaming response")
|
||||
|
||||
response_content = prompt_helper.perform_inference(
|
||||
selected_llm,
|
||||
system_message_content,
|
||||
history,
|
||||
chat_request.messages[-1].content,
|
||||
stream=False
|
||||
)
|
||||
|
||||
# Log the full conversation and response in the background
|
||||
background_tasks.add_task(
|
||||
log_prompt_to_db,
|
||||
None, # TODO: User ID not available yet
|
||||
http_request.client.host, # Client's IP address
|
||||
conversation_history, # Full conversation history
|
||||
response_content # AI-generated response
|
||||
)
|
||||
|
||||
return {"response": response_content}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in chat inference: {str(e)}", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=f"Error processing chat: {str(e)}")
|
||||
|
||||
# Add new DELETE endpoint to clear all documents
|
||||
@app.delete("/docs")
|
||||
async def delete_all_docs():
|
||||
logger.info("Received request to delete all documents")
|
||||
try:
|
||||
qdrant_helper.delete_all_documents(qdrant_client, collection_name, vector_size)
|
||||
return {"message": "All documents have been deleted from the database"}
|
||||
except Exception as e:
|
||||
logger.error(f"Error in delete endpoint: {str(e)}", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=f"Error deleting documents: {str(e)}")
|
235
docker_svc/agent/app/openapi.json
Normal file
235
docker_svc/agent/app/openapi.json
Normal file
|
@ -0,0 +1,235 @@
|
|||
{
|
||||
"openapi": "3.0.0",
|
||||
"info": {
|
||||
"title": "AI Crohn Coach RAG API",
|
||||
"version": "2.0.0",
|
||||
"description": "This API provides REST endpoints with Server-Sent Events (SSE) streaming capabilities for interactive chat"
|
||||
},
|
||||
"paths": {
|
||||
"/upload": {
|
||||
"post": {
|
||||
"summary": "Upload PDFs",
|
||||
"requestBody": {
|
||||
"content": {
|
||||
"multipart/form-data": {
|
||||
"schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"files": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string",
|
||||
"format": "binary"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "Files uploaded successfully"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/chat": {
|
||||
"post": {
|
||||
"summary": "Chat Inference with streaming",
|
||||
"description": "Send a request to the chat API. The API will respond with a stream of Server-Sent Events (SSE) by default, or a single JSON response if stream is set to false.",
|
||||
"requestBody": {
|
||||
"content": {
|
||||
"application/json; charset=utf-8": {
|
||||
"schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"messages": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"role": {
|
||||
"type": "string",
|
||||
"enum": ["user", "coach"]
|
||||
},
|
||||
"content": {
|
||||
"type": "string",
|
||||
"example": "Explique-moi comme si j'avais 5 ans. Qu'est-ce que la maladie de Crohn et comment savoir si je l'ai ?"
|
||||
}
|
||||
},
|
||||
"required": ["role", "content"]
|
||||
},
|
||||
"example": [
|
||||
{"role": "user", "content": "Bonjour le bro!"},
|
||||
{"role": "coach", "content": "Salut."},
|
||||
{"role": "user", "content": "Explique-moi comme si j'avais 5 ans. Qu'est-ce que la maladie de Crohn et comment savoir si je l'ai ?"}
|
||||
]
|
||||
},
|
||||
"language": {
|
||||
"type": "string",
|
||||
"enum": ["french", "italian", "english", "german", "auto"],
|
||||
"default": "auto",
|
||||
"description": "The language for the response. Must be one of: french, italian, english, german. Defaults to auto if not specified, which will try to infer the language."
|
||||
},
|
||||
"temperature": {
|
||||
"type": "number",
|
||||
"format": "float",
|
||||
"description": "The temperature for the response. Must be a float between 0 and 1. Defaults to 0.7 if not specified.",
|
||||
"default": 0.7,
|
||||
"minimum": 0,
|
||||
"maximum": 1,
|
||||
"exclusiveMaximum": false,
|
||||
"exclusiveMinimum": false,
|
||||
"example": 0.7
|
||||
},
|
||||
"reasoning": {
|
||||
"type": "boolean",
|
||||
"description": "Whether to use reasoning for the response. Defaults to false if not specified. Reasoning allows the model to show its thinking process.",
|
||||
"default": false,
|
||||
"example": false
|
||||
},
|
||||
"stream": {
|
||||
"type": "boolean",
|
||||
"description": "Whether to return a streaming response. If true, the response will be a stream of Server-Sent Events (SSE). If false, the response will be a single JSON object.",
|
||||
"default": true,
|
||||
"example": true
|
||||
},
|
||||
"personality": {
|
||||
"type": "string",
|
||||
"enum": ["cool", "cynical", "supportive"],
|
||||
"default": "supportive",
|
||||
"description": "The personality style for AI responses: cool (confident and direct), cynical (critical and pragmatic), or supportive (empathetic and encouraging).",
|
||||
"example": "supportive"
|
||||
}
|
||||
},
|
||||
"required": ["messages"]
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "Answer returned successfully",
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"response": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"text/event-stream": {
|
||||
"schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"content": {
|
||||
"type": "string",
|
||||
"description": "A fragment of the response text"
|
||||
},
|
||||
"full": {
|
||||
"type": "string",
|
||||
"description": "The accumulated response so far"
|
||||
},
|
||||
"done": {
|
||||
"type": "boolean",
|
||||
"description": "Indicates whether the response is complete"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/docs": {
|
||||
"delete": {
|
||||
"summary": "Delete all documents",
|
||||
"description": "Removes all documents from the database",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "All documents have been successfully deleted",
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"message": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"components": {
|
||||
"schemas": {
|
||||
"WebSocketMessage": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"messages": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"role": {
|
||||
"type": "string",
|
||||
"enum": ["user", "coach"]
|
||||
},
|
||||
"content": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"language": {
|
||||
"type": "string",
|
||||
"enum": ["french", "italian", "english", "german", "auto"]
|
||||
},
|
||||
"temperature": {
|
||||
"type": "number"
|
||||
},
|
||||
"reasoning": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"stream": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"personality": {
|
||||
"type": "string",
|
||||
"enum": ["cool", "cynical", "supportive"]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"x-sse-endpoints": {
|
||||
"/chat": {
|
||||
"description": "Server-Sent Events endpoint for streaming chat responses",
|
||||
"messages": {
|
||||
"fromServer": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"content": {
|
||||
"type": "string"
|
||||
},
|
||||
"full": {
|
||||
"type": "string"
|
||||
},
|
||||
"done": {
|
||||
"type": "boolean"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
60
docker_svc/agent/app/prompts.yaml
Normal file
60
docker_svc/agent/app/prompts.yaml
Normal file
|
@ -0,0 +1,60 @@
|
|||
system_prompt: |
|
||||
{language_prompt}
|
||||
|
||||
{personality_prompt}
|
||||
|
||||
Context:
|
||||
{context}
|
||||
|
||||
{language_disclaimer}
|
||||
|
||||
{language_constraint}
|
||||
|
||||
# Language-specific prompts and disclaimers
|
||||
languages:
|
||||
french:
|
||||
prompt: |
|
||||
Vous êtes un coach IA spécialisé dans le soutien aux personnes atteintes de la maladie de Crohn. Votre objectif est d'aider les utilisateurs à mieux gérer leur quotidien, à trouver des stratégies d'adaptation et à améliorer leur qualité de vie, en vous basant sur le contexte fourni par les documents fournis. Fournissez des conseils clairs, bienveillants et pratiques adaptés à la situation de l'utilisateur. Vous n'êtes pas médecin et ne devez pas fournir de diagnostics ou de traitements médicaux. Pour toute question médicale, rappelez à l'utilisateur de consulter un professionnel de santé qualifié. Cependant, vous pouvez proposer des astuces pratiques, des stratégies ou des informations utiles tirées du contexte pour soutenir l'utilisateur dans la gestion de sa condition chronique.
|
||||
disclaimer: |
|
||||
**Avertissement**: Je ne suis pas médecin. Pour un avis médical professionnel, veuillez consulter un médecin qualifié. Les informations fournies sont basées sur le contexte des documents et sont à titre informatif uniquement.
|
||||
constraint: |
|
||||
IMPORTANT: Vous devez répondre EXCLUSIVEMENT en français. Ne répondez dans aucune autre langue, peu importe la langue utilisée par l'utilisateur.
|
||||
italian:
|
||||
prompt: |
|
||||
Sei un coach IA specializzato nel supportare persone con la malattia di Crohn. Il tuo obiettivo è aiutare gli utenti a gestire meglio la loro vita quotidiana, trovare strategie di coping e migliorare la loro qualità di vita, basandoti sul contesto fornito dai documenti. Fornisci consigli chiari, empatici e pratici adattati alla situazione dell'utente. Non sei un medico e non devi fornire diagnosi o trattamenti medici. Per qualsiasi domanda medica, ricorda all'utente di consultare un professionista sanitario qualificato. Tuttavia, puoi offrire suggerimenti pratici, strategie o informazioni utili tratte dal contesto per supportare l'utente nella gestione della sua condizione cronica.
|
||||
disclaimer: |
|
||||
**Disclaimer**: Non sono un medico. Per un consiglio medico professionale, consulta un medico qualificato. Le informazioni fornite si basano sul contesto dei documenti ed sono solo a scopo informativo.
|
||||
constraint: |
|
||||
IMPORTANTE: Devi rispondere ESCLUSIVAMENTE in italiano. Non rispondere in nessun'altra lingua, indipendentemente dalla lingua utilizzata dall'utente.
|
||||
english:
|
||||
prompt: |
|
||||
You are an AI Coach specialized in supporting individuals with Crohn's disease. Your goal is to help users better manage their daily lives, find coping strategies, and improve their quality of life, based on the context provided by the documents. Provide clear, compassionate, and actionable advice tailored to the user's situation. You are not a doctor and must not provide medical diagnoses or treatments. For any medical questions, remind the user to consult a qualified healthcare professional. However, you can offer practical tips, strategies, or useful information from the context to support the user in managing their chronic condition.
|
||||
disclaimer: |
|
||||
**Disclaimer**: I am not a doctor. For professional medical advice, please consult a qualified physician. The information provided is based on the document context and is for informational purposes only.
|
||||
constraint: |
|
||||
IMPORTANT: You must respond EXCLUSIVELY in English. Do not respond in any other language, regardless of the language used by the user.
|
||||
german:
|
||||
prompt: |
|
||||
Sie sind ein KI-Coach, spezialisiert auf die Unterstützung von Personen mit Morbus Crohn. Ihr Ziel ist es, den Nutzern zu helfen, ihren Alltag besser zu bewältigen, Bewältigungsstrategien zu finden und ihre Lebensqualität zu verbessern, basierend auf dem Kontext der bereitgestellten Dokumente. Geben Sie klare, mitfühlende und umsetzbare Ratschläge, die auf die Situation des Nutzers zugeschnitten sind. Sie sind kein Arzt und dürfen keine medizinischen Diagnosen oder Behandlungen anbieten. Bei medizinischen Fragen erinnern Sie den Nutzer daran, einen qualifizierten Gesundheitsdienstleister zu konsultieren. Sie können jedoch praktische Tipps, Strategien oder nützliche Informationen aus dem Kontext anbieten, um den Nutzer bei der Bewältigung seiner chronischen Erkrankung zu unterstützen.
|
||||
disclaimer: |
|
||||
**Haftungsausschluss**: Ich bin kein Arzt. Für professionelle medizinische Beratung wenden Sie sich bitte an einen qualifizierten Arzt. Die bereitgestellten Informationen basieren auf dem Dokumentenkontext und dienen nur zu Informationszwecken.
|
||||
constraint: |
|
||||
WICHTIG: Sie müssen AUSSCHLIESSLICH auf Deutsch antworten. Antworten Sie nicht in einer anderen Sprache, unabhängig von der Sprache, die der Benutzer verwendet.
|
||||
|
||||
# Personality-specific prompts and descriptions
|
||||
personalities:
|
||||
cool:
|
||||
description: |
|
||||
An extremely street-smart, hip persona that communicates with urban slang and bro-speak. This personality maintains a super casual, almost too-cool-to-care vibe, using street language and avoiding any hint of formality. Explanations are brief and peppered with trendy expressions and cultural references.
|
||||
prompt: |
|
||||
Yo, listen up! You gotta be like the coolest bro in the neighborhood when you talk. Keep it 100% real and straight fire. Drop those fancy words and talk like you're chillin' with your homies. Use slang, keep it short and sweet. Say "bro", "man", "dude" a lot. Act like you're the most street-smart advisor ever. Don't get all formal and boring. Cut to the chase with that swagger. Be helpful but make it sound like you're doing them a solid. Throw in some "for real", "no cap", "straight up" to keep it authentic. Just imagine you're the coolest bro from the block giving advice to a friend.
|
||||
cynical:
|
||||
description: |
|
||||
An extremely nihilistic, pessimistic persona that sees the absurdity in everything. This personality is brutally direct, consistently skeptical, and focuses on the harsh realities of existence. Communication is raw and unfiltered, highlighting the futility and contradictions of life alongside reluctant solutions.
|
||||
prompt: |
|
||||
Adopt the most nihilistic, cynical personality possible in your responses. Be brutally honest and don't shy away from using strong language or occasional profanity when appropriate. Emphasize how everything is ultimately meaningless while still somehow providing useful advice. View optimism as delusional. Use dark humor and sarcasm liberally. Point out the absurdity and contradictions in everything. Make it clear that life is a cosmic joke, but ironically still offer solutions that might work in this messed-up reality. Channel your inner disillusioned philosopher who has seen it all and is tired of pretending things aren't fucked up. Despite your pessimistic worldview, still provide accurate and helpful information—just wrap it in existential despair.
|
||||
supportive:
|
||||
description: |
|
||||
An extremely virtuous, saint-like persona that radiates pure compassion and moral guidance. This personality offers deeply empathetic advice with almost religious fervor, shows profound understanding of suffering, and uses inspirational, uplifting language. Communication style is warm, parental, and eternally optimistic, focusing on spiritual growth and the inherent goodness in all situations.
|
||||
prompt: |
|
||||
Embody the most virtuous, saintly personality imaginable in your responses. Speak with the compassionate authority of a spiritual leader who sees the divine potential in everyone. Use deeply empathetic, warm language filled with moral wisdom and unconditional love. Address the user as "my child" or "my dear friend" occasionally. Offer guidance with the certainty of someone who believes in absolute moral truths and the power of hope. Include gentle metaphors about light, healing, and transformation. View every challenge as an opportunity for spiritual growth. Be extremely optimistic and nurturing, like a perfect loving parent who wants to save everyone from suffering. Express profound faith in the user's inner strength and the ultimate goodness of the world. Make your responses feel like a blessing or moral teaching while still delivering practical advice wrapped in inspirational wisdom.
|
7
docker_svc/agent/app/utils.py
Normal file
7
docker_svc/agent/app/utils.py
Normal file
|
@ -0,0 +1,7 @@
|
|||
# utils.py
|
||||
import os
|
||||
from dotenv import load_dotenv, find_dotenv
|
||||
|
||||
def get_deepseek_api_key():
|
||||
_ = load_dotenv(find_dotenv())
|
||||
return os.getenv("DEEPSEEK_API_KEY")
|
15
docker_svc/agent/requirements.txt
Normal file
15
docker_svc/agent/requirements.txt
Normal file
|
@ -0,0 +1,15 @@
|
|||
fastapi
|
||||
uvicorn
|
||||
llama-index
|
||||
llama-index-embeddings-ollama
|
||||
llama-index-llms-ollama
|
||||
llama-index-vector-stores-qdrant
|
||||
qdrant-client
|
||||
python-dotenv
|
||||
pyyaml
|
||||
langdetect
|
||||
typing
|
||||
pydantic
|
||||
python-multipart
|
||||
mysql-connector-python
|
||||
httpx
|
4
docker_svc/real-time-chat-app/.dockerignore
Normal file
4
docker_svc/real-time-chat-app/.dockerignore
Normal file
|
@ -0,0 +1,4 @@
|
|||
**/node_modules
|
||||
**/out/
|
||||
**/.history/
|
||||
**/__pycache__/
|
37
docker_svc/real-time-chat-app/Dockerfile
Normal file
37
docker_svc/real-time-chat-app/Dockerfile
Normal file
|
@ -0,0 +1,37 @@
|
|||
# Use the official Node.js image as a base
|
||||
FROM node:20
|
||||
|
||||
# Set the working directory inside the container
|
||||
WORKDIR /app
|
||||
|
||||
# Install pnpm globally with setup
|
||||
ENV PNPM_HOME=/usr/local/bin
|
||||
RUN npm install -g pnpm
|
||||
|
||||
# Copy package.json and package-lock.json to the working directory
|
||||
COPY app/package*.json ./
|
||||
|
||||
# Install dependencies using pnpm
|
||||
RUN pnpm install
|
||||
|
||||
# Set environment variables for the build
|
||||
ARG REACT_APP_API_BASE_URL
|
||||
ENV REACT_APP_API_BASE_URL=${REACT_APP_API_BASE_URL:-http://rag-service:8000}
|
||||
|
||||
# Copy specific folders to avoid node_modules
|
||||
COPY app/public ./public
|
||||
COPY app/src ./src
|
||||
COPY app/*.json ./
|
||||
|
||||
# Build the React application with pnpm - show env vars for debugging
|
||||
RUN echo "Building with API URL: $REACT_APP_API_BASE_URL" && \
|
||||
pnpm run build
|
||||
|
||||
# Use npm for global installs (more reliable in Docker)
|
||||
RUN npm install -g serve
|
||||
|
||||
# Expose the port the app runs on
|
||||
EXPOSE 3000
|
||||
|
||||
# Command to run the application
|
||||
CMD ["serve", "-s", "build"]
|
70
docker_svc/real-time-chat-app/README.md
Normal file
70
docker_svc/real-time-chat-app/README.md
Normal file
|
@ -0,0 +1,70 @@
|
|||
# Getting Started with Create React App
|
||||
|
||||
This project was bootstrapped with [Create React App](https://github.com/facebook/create-react-app).
|
||||
|
||||
## Available Scripts
|
||||
|
||||
In the project directory, you can run:
|
||||
|
||||
### `npm start`
|
||||
|
||||
Runs the app in the development mode.\
|
||||
Open [http://localhost:3000](http://localhost:3000) to view it in your browser.
|
||||
|
||||
The page will reload when you make changes.\
|
||||
You may also see any lint errors in the console.
|
||||
|
||||
### `npm test`
|
||||
|
||||
Launches the test runner in the interactive watch mode.\
|
||||
See the section about [running tests](https://facebook.github.io/create-react-app/docs/running-tests) for more information.
|
||||
|
||||
### `npm run build`
|
||||
|
||||
Builds the app for production to the `build` folder.\
|
||||
It correctly bundles React in production mode and optimizes the build for the best performance.
|
||||
|
||||
The build is minified and the filenames include the hashes.\
|
||||
Your app is ready to be deployed!
|
||||
|
||||
See the section about [deployment](https://facebook.github.io/create-react-app/docs/deployment) for more information.
|
||||
|
||||
### `npm run eject`
|
||||
|
||||
**Note: this is a one-way operation. Once you `eject`, you can't go back!**
|
||||
|
||||
If you aren't satisfied with the build tool and configuration choices, you can `eject` at any time. This command will remove the single build dependency from your project.
|
||||
|
||||
Instead, it will copy all the configuration files and the transitive dependencies (webpack, Babel, ESLint, etc) right into your project so you have full control over them. All of the commands except `eject` will still work, but they will point to the copied scripts so you can tweak them. At this point you're on your own.
|
||||
|
||||
You don't have to ever use `eject`. The curated feature set is suitable for small and middle deployments, and you shouldn't feel obligated to use this feature. However we understand that this tool wouldn't be useful if you couldn't customize it when you are ready for it.
|
||||
|
||||
## Learn More
|
||||
|
||||
You can learn more in the [Create React App documentation](https://facebook.github.io/create-react-app/docs/getting-started).
|
||||
|
||||
To learn React, check out the [React documentation](https://reactjs.org/).
|
||||
|
||||
### Code Splitting
|
||||
|
||||
This section has moved here: [https://facebook.github.io/create-react-app/docs/code-splitting](https://facebook.github.io/create-react-app/docs/code-splitting)
|
||||
|
||||
### Analyzing the Bundle Size
|
||||
|
||||
This section has moved here: [https://facebook.github.io/create-react-app/docs/analyzing-the-bundle-size](https://facebook.github.io/create-react-app/docs/analyzing-the-bundle-size)
|
||||
|
||||
### Making a Progressive Web App
|
||||
|
||||
This section has moved here: [https://facebook.github.io/create-react-app/docs/making-a-progressive-web-app](https://facebook.github.io/create-react-app/docs/making-a-progressive-web-app)
|
||||
|
||||
### Advanced Configuration
|
||||
|
||||
This section has moved here: [https://facebook.github.io/create-react-app/docs/advanced-configuration](https://facebook.github.io/create-react-app/docs/advanced-configuration)
|
||||
|
||||
### Deployment
|
||||
|
||||
This section has moved here: [https://facebook.github.io/create-react-app/docs/deployment](https://facebook.github.io/create-react-app/docs/deployment)
|
||||
|
||||
### `npm run build` fails to minify
|
||||
|
||||
This section has moved here: [https://facebook.github.io/create-react-app/docs/troubleshooting#npm-run-build-fails-to-minify](https://facebook.github.io/create-react-app/docs/troubleshooting#npm-run-build-fails-to-minify)
|
28
docker_svc/real-time-chat-app/app/nginx.conf
Normal file
28
docker_svc/real-time-chat-app/app/nginx.conf
Normal file
|
@ -0,0 +1,28 @@
|
|||
server {
|
||||
listen 80;
|
||||
|
||||
# Compression
|
||||
gzip on;
|
||||
gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript;
|
||||
|
||||
# Root directory and index file
|
||||
root /usr/share/nginx/html;
|
||||
index index.html index.htm;
|
||||
|
||||
# Handle React Router
|
||||
location / {
|
||||
try_files $uri $uri/ /index.html;
|
||||
}
|
||||
|
||||
# Cache static assets
|
||||
location ~* \.(jpg|jpeg|png|gif|ico|css|js|svg)$ {
|
||||
expires 30d;
|
||||
add_header Cache-Control "public, no-transform";
|
||||
}
|
||||
|
||||
# Error pages
|
||||
error_page 500 502 503 504 /50x.html;
|
||||
location = /50x.html {
|
||||
root /usr/share/nginx/html;
|
||||
}
|
||||
}
|
42
docker_svc/real-time-chat-app/app/package.json
Normal file
42
docker_svc/real-time-chat-app/app/package.json
Normal file
|
@ -0,0 +1,42 @@
|
|||
{
|
||||
"name": "real-time-chat-app",
|
||||
"version": "0.1.0",
|
||||
"private": true,
|
||||
"dependencies": {
|
||||
"@testing-library/dom": "^10.4.0",
|
||||
"@testing-library/jest-dom": "^6.6.3",
|
||||
"@testing-library/react": "^16.2.0",
|
||||
"@testing-library/user-event": "^13.5.0",
|
||||
"react": "^19.0.0",
|
||||
"react-dom": "^19.0.0",
|
||||
"react-scripts": "5.0.1",
|
||||
"web-vitals": "^2.1.4",
|
||||
"websocket": "^1.0.35",
|
||||
"react-syntax-highlighter": "^15.5.0",
|
||||
"react-markdown": "^7.0.1"
|
||||
},
|
||||
"scripts": {
|
||||
"start": "react-scripts start",
|
||||
"build": "react-scripts build",
|
||||
"test": "react-scripts test",
|
||||
"eject": "react-scripts eject"
|
||||
},
|
||||
"eslintConfig": {
|
||||
"extends": [
|
||||
"react-app",
|
||||
"react-app/jest"
|
||||
]
|
||||
},
|
||||
"browserslist": {
|
||||
"production": [
|
||||
">0.2%",
|
||||
"not dead",
|
||||
"not op_mini all"
|
||||
],
|
||||
"development": [
|
||||
"last 1 chrome version",
|
||||
"last 1 firefox version",
|
||||
"last 1 safari version"
|
||||
]
|
||||
}
|
||||
}
|
File diff suppressed because one or more lines are too long
After Width: | Height: | Size: 17 KiB |
60
docker_svc/real-time-chat-app/app/public/chad-logo-white.svg
Normal file
60
docker_svc/real-time-chat-app/app/public/chad-logo-white.svg
Normal file
File diff suppressed because one or more lines are too long
After Width: | Height: | Size: 16 KiB |
BIN
docker_svc/real-time-chat-app/app/public/favicon.ico
Normal file
BIN
docker_svc/real-time-chat-app/app/public/favicon.ico
Normal file
Binary file not shown.
After Width: | Height: | Size: 3.8 KiB |
43
docker_svc/real-time-chat-app/app/public/index.html
Normal file
43
docker_svc/real-time-chat-app/app/public/index.html
Normal file
|
@ -0,0 +1,43 @@
|
|||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8" />
|
||||
<link rel="icon" href="%PUBLIC_URL%/favicon.ico" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1" />
|
||||
<meta name="theme-color" content="#000000" />
|
||||
<meta
|
||||
name="description"
|
||||
content="Web site created using create-react-app"
|
||||
/>
|
||||
<link rel="apple-touch-icon" href="%PUBLIC_URL%/logo192.png" />
|
||||
<!--
|
||||
manifest.json provides metadata used when your web app is installed on a
|
||||
user's mobile device or desktop. See https://developers.google.com/web/fundamentals/web-app-manifest/
|
||||
-->
|
||||
<link rel="manifest" href="%PUBLIC_URL%/manifest.json" />
|
||||
<!--
|
||||
Notice the use of %PUBLIC_URL% in the tags above.
|
||||
It will be replaced with the URL of the `public` folder during the build.
|
||||
Only files inside the `public` folder can be referenced from the HTML.
|
||||
|
||||
Unlike "/favicon.ico" or "favicon.ico", "%PUBLIC_URL%/favicon.ico" will
|
||||
work correctly both with client-side routing and a non-root public URL.
|
||||
Learn how to configure a non-root public URL by running `npm run build`.
|
||||
-->
|
||||
<title>React App</title>
|
||||
</head>
|
||||
<body>
|
||||
<noscript>You need to enable JavaScript to run this app.</noscript>
|
||||
<div id="root"></div>
|
||||
<!--
|
||||
This HTML file is a template.
|
||||
If you open it directly in the browser, you will see an empty page.
|
||||
|
||||
You can add webfonts, meta tags, or analytics to this file.
|
||||
The build step will place the bundled scripts into the <body> tag.
|
||||
|
||||
To begin the development, run `npm start` or `yarn start`.
|
||||
To create a production bundle, use `npm run build` or `yarn build`.
|
||||
-->
|
||||
</body>
|
||||
</html>
|
BIN
docker_svc/real-time-chat-app/app/public/logo192.png
Normal file
BIN
docker_svc/real-time-chat-app/app/public/logo192.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 5.2 KiB |
BIN
docker_svc/real-time-chat-app/app/public/logo512.png
Normal file
BIN
docker_svc/real-time-chat-app/app/public/logo512.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 9.4 KiB |
25
docker_svc/real-time-chat-app/app/public/manifest.json
Normal file
25
docker_svc/real-time-chat-app/app/public/manifest.json
Normal file
|
@ -0,0 +1,25 @@
|
|||
{
|
||||
"short_name": "React App",
|
||||
"name": "Create React App Sample",
|
||||
"icons": [
|
||||
{
|
||||
"src": "favicon.ico",
|
||||
"sizes": "64x64 32x32 24x24 16x16",
|
||||
"type": "image/x-icon"
|
||||
},
|
||||
{
|
||||
"src": "logo192.png",
|
||||
"type": "image/png",
|
||||
"sizes": "192x192"
|
||||
},
|
||||
{
|
||||
"src": "logo512.png",
|
||||
"type": "image/png",
|
||||
"sizes": "512x512"
|
||||
}
|
||||
],
|
||||
"start_url": ".",
|
||||
"display": "standalone",
|
||||
"theme_color": "#000000",
|
||||
"background_color": "#ffffff"
|
||||
}
|
3
docker_svc/real-time-chat-app/app/public/robots.txt
Normal file
3
docker_svc/real-time-chat-app/app/public/robots.txt
Normal file
|
@ -0,0 +1,3 @@
|
|||
# https://www.robotstxt.org/robotstxt.html
|
||||
User-agent: *
|
||||
Disallow:
|
1074
docker_svc/real-time-chat-app/app/src/App.css
Normal file
1074
docker_svc/real-time-chat-app/app/src/App.css
Normal file
File diff suppressed because it is too large
Load diff
912
docker_svc/real-time-chat-app/app/src/App.js
Normal file
912
docker_svc/real-time-chat-app/app/src/App.js
Normal file
|
@ -0,0 +1,912 @@
|
|||
import React, { useEffect, useRef, useState } from 'react';
|
||||
import './App.css';
|
||||
// Code highlighting support:
|
||||
import { Prism as SyntaxHighlighter } from 'react-syntax-highlighter';
|
||||
import { tomorrow } from 'react-syntax-highlighter/dist/esm/styles/prism';
|
||||
// Markdown parsing:
|
||||
import ReactMarkdown from 'react-markdown';
|
||||
|
||||
// Custom renderer for code blocks to use our SyntaxHighlighter
|
||||
const MarkdownComponents = {
|
||||
code({node, inline, className, children, ...props}) {
|
||||
const match = /language-(\w+)/.exec(className || '');
|
||||
return !inline && match ? (
|
||||
<SyntaxHighlighter
|
||||
style={tomorrow}
|
||||
language={match[1]}
|
||||
PreTag="div"
|
||||
{...props}
|
||||
>
|
||||
{String(children).replace(/\n$/, '')}
|
||||
</SyntaxHighlighter>
|
||||
) : (
|
||||
<code className={className} {...props}>
|
||||
{children}
|
||||
</code>
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
// Helper to format messages using ReactMarkdown to support Markdown syntax and code highlighting
|
||||
const formatMessage = (content) => {
|
||||
return (
|
||||
<ReactMarkdown components={MarkdownComponents}>
|
||||
{content}
|
||||
</ReactMarkdown>
|
||||
);
|
||||
};
|
||||
|
||||
// New helper to process thinking content
|
||||
const processThinkingContent = (message) => {
|
||||
// Check if the message contains thinking tags
|
||||
const thinkPattern = /<think>([\s\S]*?)<\/think>/;
|
||||
const thinkMatch = message.match(thinkPattern);
|
||||
|
||||
if (thinkMatch) {
|
||||
// Extract thinking part and remaining content
|
||||
const thinkingContent = thinkMatch[1].trim();
|
||||
const regularContent = message.replace(thinkPattern, '').trim();
|
||||
|
||||
return {
|
||||
hasThinking: true,
|
||||
thinking: thinkingContent,
|
||||
response: regularContent
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
hasThinking: false,
|
||||
thinking: '',
|
||||
response: message
|
||||
};
|
||||
};
|
||||
|
||||
// Determine API base URL from environment or use dynamic fallbacks
|
||||
const getApiBaseUrl = () => {
|
||||
// When running locally, use localhost port (as published by docker-compose)
|
||||
if (window.location.hostname === 'localhost' || window.location.hostname === '127.0.0.1') {
|
||||
console.log('Using local API URL: http://localhost:8000');
|
||||
return 'http://localhost:8000';
|
||||
}
|
||||
|
||||
// Otherwise, if an environment variable is defined, use that
|
||||
if (process.env.REACT_APP_API_BASE_URL) {
|
||||
console.log(`Using API URL from env: ${process.env.REACT_APP_API_BASE_URL}`);
|
||||
return process.env.REACT_APP_API_BASE_URL;
|
||||
}
|
||||
|
||||
// Fallback to relative URLs (for production with reverse proxy)
|
||||
console.log('Using relative URL for API');
|
||||
return '';
|
||||
};
|
||||
|
||||
// Moon and Sun icons for theme toggle
|
||||
const MoonIcon = () => (
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="2" strokeLinecap="round" strokeLinejoin="round">
|
||||
<path d="M21 12.79A9 9 0 1 1 11.21 3 7 7 0 0 0 21 12.79z"></path>
|
||||
</svg>
|
||||
);
|
||||
|
||||
const SunIcon = () => (
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="2" strokeLinecap="round" strokeLinejoin="round">
|
||||
<circle cx="12" cy="12" r="5"></circle>
|
||||
<line x1="12" y1="1" x2="12" y2="3"></line>
|
||||
<line x1="12" y1="21" x2="12" y2="23"></line>
|
||||
<line x1="4.22" y1="4.22" x2="5.64" y2="5.64"></line>
|
||||
<line x1="18.36" y1="18.36" x2="19.78" y2="19.78"></line>
|
||||
<line x1="1" y1="12" x2="3" y2="12"></line>
|
||||
<line x1="21" y1="12" x2="23" y2="12"></line>
|
||||
<line x1="4.22" y1="19.78" x2="5.64" y2="18.36"></line>
|
||||
<line x1="18.36" y1="5.64" x2="19.78" y2="4.22"></line>
|
||||
</svg>
|
||||
);
|
||||
|
||||
// Icone per l'invio e il caricamento
|
||||
const SendIcon = () => (
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="2" strokeLinecap="round" strokeLinejoin="round">
|
||||
<line x1="22" y1="2" x2="11" y2="13"></line>
|
||||
<polygon points="22 2 15 22 11 13 2 9 22 2"></polygon>
|
||||
</svg>
|
||||
);
|
||||
|
||||
const LoadingIcon = () => (
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="2" strokeLinecap="round" strokeLinejoin="round" className="loading-spinner">
|
||||
<circle cx="12" cy="12" r="10"></circle>
|
||||
<path d="M12 6v6l4 2"></path>
|
||||
</svg>
|
||||
);
|
||||
|
||||
const AttachmentIcon = () => (
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="2" strokeLinecap="round" strokeLinejoin="round">
|
||||
<path d="M21.44 11.05l-9.19 9.19a5.5 5.5 0 0 1-7.78-7.78l9.19-9.19a4 4 0 0 1 5.66 5.66l-9.19 9.19a2.5 2.5 0 0 1-3.54-3.54l8.48-8.48"></path>
|
||||
</svg>
|
||||
);
|
||||
|
||||
// Icona cervello attivo per la modalità "thinking"
|
||||
const ThinkingIcon = () => (
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="2" strokeLinecap="round" strokeLinejoin="round">
|
||||
<path d="M9.5 2A2.5 2.5 0 0 1 12 4.5v15a2.5 2.5 0 0 1-4.96.44A2.5 2.5 0 0 1 5.5 17v-2.5a2.5 2.5 0 0 1-.64-4.9 2.5 2.5 0 0 1 2.14-4.5 2.5 2.5 0 0 1 4.5-3"></path>
|
||||
<path d="M14.5 2A2.5 2.5 0 0 0 12 4.5v15a2.5 2.5 0 0 0 4.96.44A2.5 2.5 0 0 0 18.5 17v-2.5a2.5 2.5 0 0 0 .64-4.9 2.5 2.5 0 0 0-2.14-4.5 2.5 2.5 0 0 0-4.5-3"></path>
|
||||
</svg>
|
||||
);
|
||||
|
||||
// Icona cervello disattivato (con traccia sopra)
|
||||
const ThinkingDisabledIcon = () => (
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="2" strokeLinecap="round" strokeLinejoin="round">
|
||||
<path d="M9.5 2A2.5 2.5 0 0 1 12 4.5v15a2.5 2.5 0 0 1-4.96.44A2.5 2.5 0 0 1 5.5 17v-2.5a2.5 2.5 0 0 1-.64-4.9 2.5 2.5 0 0 1 2.14-4.5 2.5 2.5 0 0 1 4.5-3"></path>
|
||||
<path d="M14.5 2A2.5 2.5 0 0 0 12 4.5v15a2.5 2.5 0 0 0 4.96.44A2.5 2.5 0 0 0 18.5 17v-2.5a2.5 2.5 0 0 0 .64-4.9 2.5 2.5 0 0 0-2.14-4.5 2.5 2.5 0 0 0-4.5-3"></path>
|
||||
<line x1="2" y1="2" x2="22" y2="22" strokeWidth="1.5"></line>
|
||||
</svg>
|
||||
);
|
||||
|
||||
// Add Trash icon for delete functionality
|
||||
const TrashIcon = () => (
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="2" strokeLinecap="round" strokeLinejoin="round">
|
||||
<polyline points="3 6 5 6 21 6"></polyline>
|
||||
<path d="M19 6v14a2 2 0 0 1-2 2H7a2 2 0 0 1-2-2V6m3 0V4a2 2 0 0 1 2-2h4a2 2 0 0 1 2 2v2"></path>
|
||||
</svg>
|
||||
);
|
||||
|
||||
// Replace the Clear History icon with a chat bubble icon
|
||||
const ClearHistoryIcon = () => (
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="2" strokeLinecap="round" strokeLinejoin="round">
|
||||
<path d="M21 15a2 2 0 0 1-2 2H7l-4 4V5a2 2 0 0 1 2-2h14a2 2 0 0 1 2 2z"></path>
|
||||
<line x1="9" y1="10" x2="15" y2="10"></line>
|
||||
<line x1="12" y1="7" x2="12" y2="13"></line>
|
||||
</svg>
|
||||
);
|
||||
|
||||
// Aggiungiamo le icone per le personalità
|
||||
const PersonalityIcon = () => (
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="2" strokeLinecap="round" strokeLinejoin="round">
|
||||
<circle cx="12" cy="12" r="10"></circle>
|
||||
<path d="M8 14s1.5 2 4 2 4-2 4-2"></path>
|
||||
<line x1="9" y1="9" x2="9.01" y2="9"></line>
|
||||
<line x1="15" y1="9" x2="15.01" y2="9"></line>
|
||||
</svg>
|
||||
);
|
||||
|
||||
// Aggiorniamo le icone per le personalità con versioni più moderne
|
||||
const CoolIcon = () => (
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="2" strokeLinecap="round" strokeLinejoin="round">
|
||||
<circle cx="12" cy="12" r="10"></circle>
|
||||
<path d="M8 14s1.5 2 4 2 4-2 4-2"></path>
|
||||
<line x1="8" y1="9" x2="9" y2="9"></line>
|
||||
<line x1="15" y1="9" x2="16" y2="9"></line>
|
||||
<path d="M3 8l2-1"></path>
|
||||
<path d="M21 8l-2-1"></path>
|
||||
</svg>
|
||||
);
|
||||
|
||||
const CynicalIcon = () => (
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="2" strokeLinecap="round" strokeLinejoin="round">
|
||||
<circle cx="12" cy="12" r="10"></circle>
|
||||
<path d="M8 16s1.5 -1 4 -1 4 1 4 1"></path>
|
||||
<line x1="9" y1="9" x2="9.01" y2="9"></line>
|
||||
<line x1="15" y1="9" x2="15.01" y2="9"></line>
|
||||
<path d="M17 5l2 -2"></path>
|
||||
<path d="M7 5l-2 -2"></path>
|
||||
</svg>
|
||||
);
|
||||
|
||||
const SupportiveIcon = () => (
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="2" strokeLinecap="round" strokeLinejoin="round">
|
||||
<circle cx="12" cy="12" r="10"></circle>
|
||||
<path d="M8 13s1.5 2 4 2 4-2 4-2"></path>
|
||||
<path d="M9 9h.01"></path>
|
||||
<path d="M15 9h.01"></path>
|
||||
<path d="M12 7v0.01"></path>
|
||||
<path d="M12 17v.01"></path>
|
||||
<path d="M12 3v2"></path>
|
||||
<path d="M12 19v2"></path>
|
||||
</svg>
|
||||
);
|
||||
|
||||
function App() {
|
||||
const [messages, setMessages] = useState(() => {
|
||||
// Carica i messaggi precedenti dal localStorage o inizializza un array vuoto
|
||||
const savedMessages = localStorage.getItem('chatMessages');
|
||||
return savedMessages ? JSON.parse(savedMessages) : [];
|
||||
});
|
||||
|
||||
const [messageInput, setMessageInput] = useState('');
|
||||
|
||||
const [language, setLanguage] = useState(() => {
|
||||
// Carica la lingua precedente dal localStorage o usa auto-detect come predefinito
|
||||
return localStorage.getItem('selectedLanguage') || 'auto';
|
||||
});
|
||||
|
||||
const [isLoading, setIsLoading] = useState(false);
|
||||
|
||||
// Aggiungo un ref per il container dei messaggi per gestire lo scroll
|
||||
const messagesEndRef = useRef(null);
|
||||
|
||||
const [darkTheme, setDarkTheme] = useState(() => {
|
||||
// Carica il tema precedente dal localStorage o usa dark come predefinito
|
||||
const savedTheme = localStorage.getItem('darkTheme');
|
||||
return savedTheme !== null ? JSON.parse(savedTheme) : true;
|
||||
});
|
||||
|
||||
const fileInputRef = useRef(null);
|
||||
|
||||
const [reasoning, setReasoning] = useState(() => {
|
||||
// Carica lo stato reasoning precedente dal localStorage
|
||||
const savedReasoning = localStorage.getItem('reasoningEnabled');
|
||||
return savedReasoning !== null ? JSON.parse(savedReasoning) : false;
|
||||
});
|
||||
|
||||
const [isDeleting, setIsDeleting] = useState(false);
|
||||
|
||||
const apiBaseUrl = getApiBaseUrl();
|
||||
|
||||
// Nomi localizzati delle lingue
|
||||
const languageLabels = {
|
||||
french: "Français",
|
||||
italian: "Italiano",
|
||||
english: "English",
|
||||
german: "Deutsch",
|
||||
auto: "Auto-detect"
|
||||
};
|
||||
|
||||
const [personality, setPersonality] = useState(() => {
|
||||
// Carica la personalità precedente dal localStorage o usa supportive come predefinito
|
||||
const savedPersonality = localStorage.getItem('personalityType');
|
||||
return savedPersonality || 'supportive';
|
||||
});
|
||||
|
||||
// Rimuovo il dropdown state che non serve più
|
||||
// const [showPersonalityDropdown, setShowPersonalityDropdown] = useState(false);
|
||||
|
||||
// Effect to set initial theme and adjust container height
|
||||
useEffect(() => {
|
||||
// Apply theme class on first load
|
||||
if (darkTheme) {
|
||||
document.body.classList.add('dark-theme');
|
||||
} else {
|
||||
document.body.classList.remove('dark-theme');
|
||||
}
|
||||
|
||||
// Save theme to localStorage
|
||||
localStorage.setItem('darkTheme', JSON.stringify(darkTheme));
|
||||
|
||||
// Function to adjust chat container height
|
||||
const adjustHeight = () => {
|
||||
const vh = window.innerHeight;
|
||||
const headerHeight = document.querySelector('.App-header')?.offsetHeight || 0;
|
||||
const messagesContainer = document.querySelector('.messages-container');
|
||||
if (messagesContainer) {
|
||||
const inputContainer = document.querySelector('.input-container')?.offsetHeight || 0;
|
||||
const fileUpload = document.querySelector('.file-upload')?.offsetHeight || 0;
|
||||
const availableHeight = vh - headerHeight - inputContainer - fileUpload - 60; // 60px for padding/margins
|
||||
messagesContainer.style.height = `${Math.max(400, availableHeight)}px`;
|
||||
}
|
||||
};
|
||||
|
||||
// Run once and add event listener for resize
|
||||
adjustHeight();
|
||||
window.addEventListener('resize', adjustHeight);
|
||||
|
||||
// Cleanup
|
||||
return () => window.removeEventListener('resize', adjustHeight);
|
||||
}, [darkTheme]);
|
||||
|
||||
// Save messages to localStorage whenever they change
|
||||
useEffect(() => {
|
||||
localStorage.setItem('chatMessages', JSON.stringify(messages));
|
||||
}, [messages]);
|
||||
|
||||
// Toggle between light and dark themes
|
||||
const toggleTheme = () => {
|
||||
setDarkTheme(prevTheme => !prevTheme);
|
||||
};
|
||||
|
||||
// Aggiorna il setLanguage per salvare nel localStorage
|
||||
const handleLanguageChange = (newLanguage) => {
|
||||
setLanguage(newLanguage);
|
||||
localStorage.setItem('selectedLanguage', newLanguage);
|
||||
};
|
||||
|
||||
// Aggiorna il setReasoning per salvare nel localStorage
|
||||
const toggleReasoning = () => {
|
||||
setReasoning(prev => {
|
||||
const newValue = !prev;
|
||||
localStorage.setItem('reasoningEnabled', JSON.stringify(newValue));
|
||||
return newValue;
|
||||
});
|
||||
};
|
||||
|
||||
// Funzione per scorrere al fondo della chat
|
||||
const scrollToBottom = () => {
|
||||
messagesEndRef.current?.scrollIntoView({ behavior: "smooth" });
|
||||
};
|
||||
|
||||
// Effect per far scorrere la chat quando cambia il contenuto dei messaggi
|
||||
useEffect(() => {
|
||||
scrollToBottom();
|
||||
}, [messages]);
|
||||
|
||||
// Send message directly to FastAPI endpoint
|
||||
const sendMessage = async () => {
|
||||
if (!messageInput.trim()) return;
|
||||
|
||||
// Add user message to the chat
|
||||
const newUserMessage = { role: 'user', content: messageInput };
|
||||
setMessages(prevMessages => [...prevMessages, newUserMessage]);
|
||||
|
||||
// Create the complete message history for the API
|
||||
const messageHistory = [...messages, newUserMessage];
|
||||
|
||||
// Show loading state
|
||||
setIsLoading(true);
|
||||
|
||||
// Clear the input field
|
||||
setMessageInput('');
|
||||
|
||||
try {
|
||||
// Build URL - if apiBaseUrl is empty, this becomes a relative URL
|
||||
const url = apiBaseUrl ? `${apiBaseUrl}/chat` : '/chat';
|
||||
|
||||
// Aggiungo log per vedere qual è la personalità inviata
|
||||
console.log(`Sending message with personality: ${personality}`);
|
||||
console.log(`Connecting to API URL: ${url}`);
|
||||
|
||||
// Aggiungi headers per CORS più espliciti
|
||||
const response = await fetch(url, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'Accept': 'text/event-stream',
|
||||
'Origin': window.location.origin
|
||||
},
|
||||
credentials: 'include', // Include cookies se necessario
|
||||
body: JSON.stringify({
|
||||
messages: messageHistory,
|
||||
language: language,
|
||||
temperature: 0.7,
|
||||
reasoning: reasoning,
|
||||
personality: personality, // Confermo che la personalità viene inviata
|
||||
stream: true
|
||||
}),
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(`API Error: ${response.statusText}`);
|
||||
}
|
||||
|
||||
// Streaming mode (sempre attivo)
|
||||
let currentResponseContent = '';
|
||||
let isInThinkingMode = false;
|
||||
let currentThinkingContent = '';
|
||||
|
||||
// Add a placeholder for the streaming response
|
||||
setMessages(prevMessages => [
|
||||
...prevMessages,
|
||||
{
|
||||
role: 'coach',
|
||||
content: '',
|
||||
streaming: true,
|
||||
thinking: '',
|
||||
isThinking: false
|
||||
}
|
||||
]);
|
||||
|
||||
const responseStream = await fetch(url, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'Accept': 'text/event-stream', // Aggiunto header per specificamente accettare eventi SSE
|
||||
},
|
||||
body: JSON.stringify({
|
||||
messages: messageHistory,
|
||||
language: language,
|
||||
temperature: 0.7,
|
||||
reasoning: reasoning,
|
||||
personality: personality, // Confermo che la personalità viene inviata
|
||||
stream: true
|
||||
}),
|
||||
});
|
||||
|
||||
if (!responseStream.ok) {
|
||||
throw new Error(`API Error: ${responseStream.statusText}`);
|
||||
}
|
||||
|
||||
// Process the streaming response
|
||||
const reader = response.body.getReader();
|
||||
const decoder = new TextDecoder("utf-8");
|
||||
|
||||
// Helper function to update the last message
|
||||
const updateLastMessage = (content, thinking = null, isThinking = null) => {
|
||||
setMessages(prevMessages => {
|
||||
const newMessages = [...prevMessages];
|
||||
const lastIndex = newMessages.length - 1;
|
||||
const lastMessage = {...newMessages[lastIndex]};
|
||||
|
||||
if (content !== null) {
|
||||
lastMessage.content = content;
|
||||
}
|
||||
|
||||
if (thinking !== null) {
|
||||
lastMessage.thinking = thinking;
|
||||
}
|
||||
|
||||
if (isThinking !== null) {
|
||||
lastMessage.isThinking = isThinking;
|
||||
}
|
||||
|
||||
newMessages[lastIndex] = lastMessage;
|
||||
return newMessages;
|
||||
});
|
||||
};
|
||||
|
||||
// Process chunks of text
|
||||
const processChunk = (text) => {
|
||||
// Check for thinking tags
|
||||
if (!isInThinkingMode && text.includes('<think>')) {
|
||||
isInThinkingMode = true;
|
||||
const parts = text.split('<think>');
|
||||
|
||||
// Add any text before the <think> tag to the regular response
|
||||
if (parts[0].length > 0) {
|
||||
currentResponseContent += parts[0];
|
||||
}
|
||||
|
||||
// Add the text after <think> to thinking content
|
||||
currentThinkingContent = parts[1] || '';
|
||||
|
||||
// Update the message with thinking mode enabled
|
||||
updateLastMessage(currentResponseContent, currentThinkingContent, true);
|
||||
return;
|
||||
}
|
||||
|
||||
// Check for end of thinking
|
||||
if (isInThinkingMode && text.includes('</think>')) {
|
||||
isInThinkingMode = false;
|
||||
const parts = text.split('</think>');
|
||||
|
||||
// Add any remaining text to thinking
|
||||
currentThinkingContent += parts[0];
|
||||
|
||||
// Add any text after </think> to the response
|
||||
if (parts[1] && parts[1].length > 0) {
|
||||
currentResponseContent += parts[1];
|
||||
}
|
||||
|
||||
// Update the message
|
||||
updateLastMessage(currentResponseContent, currentThinkingContent, true);
|
||||
return;
|
||||
}
|
||||
|
||||
// If we're in thinking mode, add to thinking content
|
||||
if (isInThinkingMode) {
|
||||
currentThinkingContent += text;
|
||||
updateLastMessage(null, currentThinkingContent, true);
|
||||
} else {
|
||||
// Otherwise add to normal content
|
||||
currentResponseContent += text;
|
||||
updateLastMessage(currentResponseContent);
|
||||
}
|
||||
};
|
||||
|
||||
const parser = createParser((event) => {
|
||||
if (event.type === "event") {
|
||||
try {
|
||||
const data = JSON.parse(event.data);
|
||||
|
||||
if (data.content) {
|
||||
// Process the chunk for thinking tags
|
||||
processChunk(data.content);
|
||||
}
|
||||
|
||||
if (data.done) {
|
||||
// Streaming completed, remove streaming flag but preserve thinking state
|
||||
setMessages(prevMessages => {
|
||||
const newMessages = [...prevMessages];
|
||||
const lastIndex = newMessages.length - 1;
|
||||
newMessages[lastIndex] = {
|
||||
...newMessages[lastIndex],
|
||||
streaming: false
|
||||
// Manteniamo i campi thinking e isThinking esistenti
|
||||
};
|
||||
return newMessages;
|
||||
});
|
||||
setIsLoading(false);
|
||||
}
|
||||
} catch (e) {
|
||||
console.error("Error parsing SSE data", e, event.data);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Read the stream
|
||||
while (true) {
|
||||
const { done, value } = await reader.read();
|
||||
if (done) break;
|
||||
|
||||
const text = decoder.decode(value);
|
||||
parser.feed(text);
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Failed to send message:', error);
|
||||
|
||||
// Aggiungi un messaggio più descrittivo sull'errore CORS
|
||||
let errorMessage = 'Error: Could not connect to the server. Please try again later.';
|
||||
|
||||
if (error.message && error.message.includes('NetworkError')) {
|
||||
errorMessage = 'Error: Network error occurred. This might be due to a CORS issue or the backend server being unavailable. Please contact the administrator.';
|
||||
}
|
||||
|
||||
setMessages(prevMessages => [...prevMessages,
|
||||
{ role: 'coach', content: errorMessage }
|
||||
]);
|
||||
setIsLoading(false);
|
||||
}
|
||||
};
|
||||
|
||||
// Helper function for SSE parsing - migliorato per gestire più tipi di eventi SSE
|
||||
function createParser(onParse) {
|
||||
let buffer = '';
|
||||
|
||||
return {
|
||||
feed(chunk) {
|
||||
buffer += chunk;
|
||||
|
||||
// Cerca per pattern di fine del messaggio SSE (doppio newline)
|
||||
let delimiterIndex;
|
||||
while ((delimiterIndex = buffer.indexOf('\n\n')) !== -1) {
|
||||
const rawEvent = buffer.slice(0, delimiterIndex);
|
||||
buffer = buffer.slice(delimiterIndex + 2);
|
||||
|
||||
// Estrai i dati dall'evento
|
||||
const dataMatch = /^data: (.+)$/m.exec(rawEvent);
|
||||
if (dataMatch) {
|
||||
try {
|
||||
const jsonData = dataMatch[1];
|
||||
onParse({ type: "event", data: jsonData });
|
||||
} catch (e) {
|
||||
console.error("Error parsing SSE event:", e, rawEvent);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// Add this function to trigger file selection
|
||||
const triggerFileSelect = () => {
|
||||
fileInputRef.current.click();
|
||||
};
|
||||
|
||||
// File upload functionality - modified to add chat messages instead of alerts
|
||||
const uploadFile = async (files) => {
|
||||
if (!files.length) return;
|
||||
|
||||
// Add validation for PDF files only
|
||||
for (let i = 0; i < files.length; i++) {
|
||||
if (files[i].type !== 'application/pdf') {
|
||||
// Add error message to chat instead of alert
|
||||
setMessages(prevMessages => [...prevMessages,
|
||||
{ role: 'coach', content: 'Error: Only PDF files are accepted. Please try again with PDF documents only.' }
|
||||
]);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Create list of file names for success message
|
||||
const fileNames = Array.from(files).map(file => file.name).join(', ');
|
||||
|
||||
const formData = new FormData();
|
||||
for (let i = 0; i < files.length; i++) {
|
||||
formData.append('files', files[i]);
|
||||
}
|
||||
|
||||
try {
|
||||
// Build URL - if apiBaseUrl is empty, this becomes a relative URL
|
||||
const url = apiBaseUrl ? `${apiBaseUrl}/upload` : '/upload';
|
||||
|
||||
// Show loading state
|
||||
setIsLoading(true);
|
||||
|
||||
const response = await fetch(url, {
|
||||
method: 'POST',
|
||||
body: formData,
|
||||
});
|
||||
|
||||
if (response.ok) {
|
||||
// Add success message to chat instead of alert
|
||||
setMessages(prevMessages => [...prevMessages,
|
||||
{ role: 'coach', content: `Files uploaded successfully: ${fileNames}` }
|
||||
]);
|
||||
} else {
|
||||
console.error('Upload failed:', response.statusText);
|
||||
// Add error message to chat instead of alert
|
||||
setMessages(prevMessages => [...prevMessages,
|
||||
{ role: 'coach', content: `Error: Failed to upload files. ${response.statusText}` }
|
||||
]);
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Failed to upload files:', error);
|
||||
// Add error message to chat instead of alert
|
||||
setMessages(prevMessages => [...prevMessages,
|
||||
{ role: 'coach', content: 'Error: Could not connect to the server. Failed to upload files.' }
|
||||
]);
|
||||
} finally {
|
||||
setIsLoading(false);
|
||||
}
|
||||
};
|
||||
|
||||
// Add function to handle deleting all documents
|
||||
const handleDeleteAllDocs = async () => {
|
||||
if (window.confirm('Are you sure you want to delete all documents? This action cannot be undone.')) {
|
||||
setIsDeleting(true);
|
||||
|
||||
try {
|
||||
const url = apiBaseUrl ? `${apiBaseUrl}/docs` : '/docs';
|
||||
|
||||
const response = await fetch(url, {
|
||||
method: 'DELETE',
|
||||
});
|
||||
|
||||
if (response.ok) {
|
||||
// Add confirmation message to chat
|
||||
setMessages(prevMessages => [...prevMessages,
|
||||
{ role: 'coach', content: 'Memory has been cleared. All documents have been deleted from the database.' }
|
||||
]);
|
||||
} else {
|
||||
console.error('Delete failed:', response.statusText);
|
||||
setMessages(prevMessages => [...prevMessages,
|
||||
{ role: 'coach', content: `Error: Failed to delete documents. ${response.statusText}` }
|
||||
]);
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Failed to delete documents:', error);
|
||||
setMessages(prevMessages => [...prevMessages,
|
||||
{ role: 'coach', content: 'Error: Could not connect to the server. Failed to delete documents.' }
|
||||
]);
|
||||
} finally {
|
||||
setIsDeleting(false);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Add function to clear chat history
|
||||
const clearChatHistory = () => {
|
||||
setMessages([]);
|
||||
localStorage.removeItem('chatMessages');
|
||||
};
|
||||
|
||||
// Aggiorna la personalità ciclando tra le opzioni disponibili
|
||||
const togglePersonality = () => {
|
||||
setPersonality(prevPersonality => {
|
||||
// Cicla tra le tre personalità
|
||||
let newPersonality;
|
||||
switch (prevPersonality) {
|
||||
case 'cool':
|
||||
newPersonality = 'cynical';
|
||||
break;
|
||||
case 'cynical':
|
||||
newPersonality = 'supportive';
|
||||
break;
|
||||
default: // 'supportive' o qualsiasi altro valore
|
||||
newPersonality = 'cool';
|
||||
}
|
||||
|
||||
localStorage.setItem('personalityType', newPersonality);
|
||||
return newPersonality;
|
||||
});
|
||||
};
|
||||
|
||||
// Funzione per ottenere l'icona della personalità corrente
|
||||
const getCurrentPersonalityIcon = () => {
|
||||
switch (personality) {
|
||||
case 'cool': return <CoolIcon />;
|
||||
case 'cynical': return <CynicalIcon />;
|
||||
case 'supportive': return <SupportiveIcon />;
|
||||
default: return <PersonalityIcon />;
|
||||
}
|
||||
};
|
||||
|
||||
// Funzione per ottenere il titolo della personalità
|
||||
const getPersonalityTitle = () => {
|
||||
switch (personality) {
|
||||
case 'cool': return "Cool & Confident style";
|
||||
case 'cynical': return "Cynical & Direct style";
|
||||
case 'supportive': return "Supportive & Empathetic style";
|
||||
default: return "Select AI personality style";
|
||||
}
|
||||
};
|
||||
|
||||
return (
|
||||
<div className="App">
|
||||
<header className="App-header">
|
||||
<div className="logo-container">
|
||||
<img
|
||||
src={darkTheme ? "/chad-logo-white.svg" : "/chad-logo-white-bg-black.svg"}
|
||||
alt="AI Coach Logo"
|
||||
className="app-logo"
|
||||
/>
|
||||
<h1>MediChaiD - ChaD GPT (Conversational Health Assistance & Direction) </h1>
|
||||
</div>
|
||||
<div className="header-controls">
|
||||
<button
|
||||
className="theme-toggle"
|
||||
onClick={toggleTheme}
|
||||
title={darkTheme ? "Switch to light theme" : "Switch to dark theme"}
|
||||
>
|
||||
{darkTheme ? <SunIcon/> : <MoonIcon/>}
|
||||
</button>
|
||||
</div>
|
||||
</header>
|
||||
|
||||
<div className="chat-container">
|
||||
<div className="messages-container">
|
||||
{messages.map((msg, index) => {
|
||||
// Process content for both streaming and complete messages
|
||||
let processedContent;
|
||||
|
||||
if (msg.role === 'coach') {
|
||||
if (msg.isThinking) {
|
||||
// Per i messaggi con stato thinking già elaborato durante lo streaming
|
||||
processedContent = {
|
||||
hasThinking: true,
|
||||
thinking: msg.thinking || '',
|
||||
response: msg.content
|
||||
};
|
||||
} else {
|
||||
// Per i messaggi normali o quelli completati senza thinking
|
||||
processedContent = processThinkingContent(msg.content);
|
||||
}
|
||||
} else {
|
||||
// Per i messaggi utente
|
||||
processedContent = {
|
||||
hasThinking: false,
|
||||
response: msg.content
|
||||
};
|
||||
}
|
||||
|
||||
return (
|
||||
<div key={index} className={`message ${msg.role}`}>
|
||||
{/* Render thinking box if present */}
|
||||
{processedContent.hasThinking && (
|
||||
<div className="thinking-content">
|
||||
<div className="thinking-header">Reasoning Process:</div>
|
||||
<div className="thinking-body">
|
||||
{formatMessage(processedContent.thinking)}
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Always render the main response */}
|
||||
<div className={`message-content ${msg.streaming ? 'streaming' : ''}`}>
|
||||
{/* Mostra i puntini di caricamento quando è l'ultimo messaggio, è del coach, è vuoto e isLoading è true */}
|
||||
{msg.streaming && msg.content === '' && index === messages.length - 1 ? (
|
||||
<div className="loading-dots">
|
||||
<span></span>
|
||||
<span></span>
|
||||
<span></span>
|
||||
</div>
|
||||
) : (
|
||||
formatMessage(processedContent.response)
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
})}
|
||||
|
||||
{/* Aggiungiamo un elemento invisibile per lo scroll */}
|
||||
<div ref={messagesEndRef} />
|
||||
|
||||
</div>
|
||||
|
||||
<div className="input-container">
|
||||
{/* Primary container for input and send button */}
|
||||
<div className="primary-input-container">
|
||||
<input
|
||||
type="text"
|
||||
value={messageInput}
|
||||
onChange={(e) => setMessageInput(e.target.value)}
|
||||
onKeyDown={(e) => e.key === 'Enter' && messageInput.trim() && !isLoading && sendMessage()}
|
||||
placeholder="Type your message..."
|
||||
className="message-input"
|
||||
disabled={isLoading}
|
||||
/>
|
||||
|
||||
<button
|
||||
onClick={sendMessage}
|
||||
className={`send-button ${messageInput.trim() ? 'enabled' : 'disabled'} ${isLoading ? 'loading' : ''}`}
|
||||
disabled={!messageInput.trim() || isLoading}
|
||||
aria-label="Send message"
|
||||
>
|
||||
{isLoading ? <LoadingIcon /> : <SendIcon />}
|
||||
</button>
|
||||
</div>
|
||||
|
||||
{/* Secondary container for all other buttons */}
|
||||
<div className="secondary-buttons-container">
|
||||
{/* Left button group */}
|
||||
<div className="left-buttons-group">
|
||||
<div className="language-select-container">
|
||||
<select
|
||||
value={language}
|
||||
onChange={(e) => handleLanguageChange(e.target.value)}
|
||||
className="language-selector"
|
||||
>
|
||||
<option value="auto">{languageLabels.auto}</option>
|
||||
<option value="english">{languageLabels.english}</option>
|
||||
<option value="french">{languageLabels.french}</option>
|
||||
<option value="italian">{languageLabels.italian}</option>
|
||||
<option value="german">{languageLabels.german}</option>
|
||||
</select>
|
||||
</div>
|
||||
|
||||
<button
|
||||
className={`personality-button ${personality}`}
|
||||
onClick={togglePersonality}
|
||||
aria-label={`AI personality: ${personality}`}
|
||||
title={getPersonalityTitle()}
|
||||
>
|
||||
{getCurrentPersonalityIcon()}
|
||||
</button>
|
||||
|
||||
<button
|
||||
onClick={toggleReasoning}
|
||||
className={`thinking-button ${reasoning ? 'active' : ''}`}
|
||||
title={reasoning ? "Disable thinking mode" : "Enable thinking mode"}
|
||||
aria-label={reasoning ? "Disable thinking mode" : "Enable thinking mode"}
|
||||
>
|
||||
{reasoning ? <ThinkingIcon /> : <ThinkingDisabledIcon />}
|
||||
</button>
|
||||
</div>
|
||||
|
||||
{/* Right button group */}
|
||||
<div className="right-buttons-group">
|
||||
<button
|
||||
onClick={clearChatHistory}
|
||||
className="clear-history-button"
|
||||
disabled={isLoading || isDeleting || messages.length === 0}
|
||||
aria-label="Clear chat history"
|
||||
title="Clear chat history"
|
||||
>
|
||||
<ClearHistoryIcon />
|
||||
</button>
|
||||
|
||||
<button
|
||||
onClick={triggerFileSelect}
|
||||
className="attachment-button"
|
||||
disabled={isLoading || isDeleting}
|
||||
aria-label="Upload PDF files"
|
||||
title="Upload PDF files"
|
||||
>
|
||||
<AttachmentIcon />
|
||||
</button>
|
||||
|
||||
<button
|
||||
onClick={handleDeleteAllDocs}
|
||||
className="delete-button"
|
||||
disabled={isLoading || isDeleting}
|
||||
aria-label="Delete all documents"
|
||||
title="Delete all documents"
|
||||
>
|
||||
{isDeleting ? <LoadingIcon /> : <TrashIcon />}
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div className="file-upload">
|
||||
<input
|
||||
ref={fileInputRef}
|
||||
type="file"
|
||||
multiple
|
||||
accept="application/pdf"
|
||||
onChange={(e) => uploadFile(e.target.files)}
|
||||
className="file-input hidden"
|
||||
disabled={isLoading}
|
||||
style={{ display: 'none' }} // Hide the input
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
export default App;
|
8
docker_svc/real-time-chat-app/app/src/App.test.js
Normal file
8
docker_svc/real-time-chat-app/app/src/App.test.js
Normal file
|
@ -0,0 +1,8 @@
|
|||
import { render, screen } from '@testing-library/react';
|
||||
import App from './App';
|
||||
|
||||
test('renders learn react link', () => {
|
||||
render(<App />);
|
||||
const linkElement = screen.getByText(/learn react/i);
|
||||
expect(linkElement).toBeInTheDocument();
|
||||
});
|
13
docker_svc/real-time-chat-app/app/src/index.css
Normal file
13
docker_svc/real-time-chat-app/app/src/index.css
Normal file
|
@ -0,0 +1,13 @@
|
|||
body {
|
||||
margin: 0;
|
||||
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', 'Roboto', 'Oxygen',
|
||||
'Ubuntu', 'Cantarell', 'Fira Sans', 'Droid Sans', 'Helvetica Neue',
|
||||
sans-serif;
|
||||
-webkit-font-smoothing: antialiased;
|
||||
-moz-osx-font-smoothing: grayscale;
|
||||
}
|
||||
|
||||
code {
|
||||
font-family: source-code-pro, Menlo, Monaco, Consolas, 'Courier New',
|
||||
monospace;
|
||||
}
|
13
docker_svc/real-time-chat-app/app/src/index.js
Normal file
13
docker_svc/real-time-chat-app/app/src/index.js
Normal file
|
@ -0,0 +1,13 @@
|
|||
import React from 'react';
|
||||
import ReactDOM from 'react-dom/client';
|
||||
import './index.css';
|
||||
import App from './App';
|
||||
import reportWebVitals from './reportWebVitals';
|
||||
|
||||
const root = ReactDOM.createRoot(document.getElementById('root'));
|
||||
root.render(
|
||||
<React.StrictMode>
|
||||
<App />
|
||||
</React.StrictMode>
|
||||
);
|
||||
reportWebVitals();
|
1
docker_svc/real-time-chat-app/app/src/logo.svg
Normal file
1
docker_svc/real-time-chat-app/app/src/logo.svg
Normal file
|
@ -0,0 +1 @@
|
|||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 841.9 595.3"><g fill="#61DAFB"><path d="M666.3 296.5c0-32.5-40.7-63.3-103.1-82.4 14.4-63.6 8-114.2-20.2-130.4-6.5-3.8-14.1-5.6-22.4-5.6v22.3c4.6 0 8.3.9 11.4 2.6 13.6 7.8 19.5 37.5 14.9 75.7-1.1 9.4-2.9 19.3-5.1 29.4-19.6-4.8-41-8.5-63.5-10.9-13.5-18.5-27.5-35.3-41.6-50 32.6-30.3 63.2-46.9 84-46.9V78c-27.5 0-63.5 19.6-99.9 53.6-36.4-33.8-72.4-53.2-99.9-53.2v22.3c20.7 0 51.4 16.5 84 46.6-14 14.7-28 31.4-41.3 49.9-22.6 2.4-44 6.1-63.6 11-2.3-10-4-19.7-5.2-29-4.7-38.2 1.1-67.9 14.6-75.8 3-1.8 6.9-2.6 11.5-2.6V78.5c-8.4 0-16 1.8-22.6 5.6-28.1 16.2-34.4 66.7-19.9 130.1-62.2 19.2-102.7 49.9-102.7 82.3 0 32.5 40.7 63.3 103.1 82.4-14.4 63.6-8 114.2 20.2 130.4 6.5 3.8 14.1 5.6 22.5 5.6 27.5 0 63.5-19.6 99.9-53.6 36.4 33.8 72.4 53.2 99.9 53.2 8.4 0 16-1.8 22.6-5.6 28.1-16.2 34.4-66.7 19.9-130.1 62-19.1 102.5-49.9 102.5-82.3zm-130.2-66.7c-3.7 12.9-8.3 26.2-13.5 39.5-4.1-8-8.4-16-13.1-24-4.6-8-9.5-15.8-14.4-23.4 14.2 2.1 27.9 4.7 41 7.9zm-45.8 106.5c-7.8 13.5-15.8 26.3-24.1 38.2-14.9 1.3-30 2-45.2 2-15.1 0-30.2-.7-45-1.9-8.3-11.9-16.4-24.6-24.2-38-7.6-13.1-14.5-26.4-20.8-39.8 6.2-13.4 13.2-26.8 20.7-39.9 7.8-13.5 15.8-26.3 24.1-38.2 14.9-1.3 30-2 45.2-2 15.1 0 30.2.7 45 1.9 8.3 11.9 16.4 24.6 24.2 38 7.6 13.1 14.5 26.4 20.8 39.8-6.3 13.4-13.2 26.8-20.7 39.9zm32.3-13c5.4 13.4 10 26.8 13.8 39.8-13.1 3.2-26.9 5.9-41.2 8 4.9-7.7 9.8-15.6 14.4-23.7 4.6-8 8.9-16.1 13-24.1zM421.2 430c-9.3-9.6-18.6-20.3-27.8-32 9 .4 18.2.7 27.5.7 9.4 0 18.7-.2 27.8-.7-9 11.7-18.3 22.4-27.5 32zm-74.4-58.9c-14.2-2.1-27.9-4.7-41-7.9 3.7-12.9 8.3-26.2 13.5-39.5 4.1 8 8.4 16 13.1 24 4.7 8 9.5 15.8 14.4 23.4zM420.7 163c9.3 9.6 18.6 20.3 27.8 32-9-.4-18.2-.7-27.5-.7-9.4 0-18.7.2-27.8.7 9-11.7 18.3-22.4 27.5-32zm-74 58.9c-4.9 7.7-9.8 15.6-14.4 23.7-4.6 8-8.9 16-13 24-5.4-13.4-10-26.8-13.8-39.8 13.1-3.1 26.9-5.8 41.2-7.9zm-90.5 125.2c-35.4-15.1-58.3-34.9-58.3-50.6 0-15.7 22.9-35.6 58.3-50.6 8.6-3.7 18-7 27.7-10.1 5.7 19.6 13.2 40 22.5 60.9-9.2 20.8-16.6 41.1-22.2 60.6-9.9-3.1-19.3-6.5-28-10.2zM310 490c-13.6-7.8-19.5-37.5-14.9-75.7 1.1-9.4 2.9-19.3 5.1-29.4 19.6 4.8 41 8.5 63.5 10.9 13.5 18.5 27.5 35.3 41.6 50-32.6 30.3-63.2 46.9-84 46.9-4.5-.1-8.3-1-11.3-2.7zm237.2-76.2c4.7 38.2-1.1 67.9-14.6 75.8-3 1.8-6.9 2.6-11.5 2.6-20.7 0-51.4-16.5-84-46.6 14-14.7 28-31.4 41.3-49.9 22.6-2.4 44-6.1 63.6-11 2.3 10.1 4.1 19.8 5.2 29.1zm38.5-66.7c-8.6 3.7-18 7-27.7 10.1-5.7-19.6-13.2-40-22.5-60.9 9.2-20.8 16.6-41.1 22.2-60.6 9.9 3.1 19.3 6.5 28.1 10.2 35.4 15.1 58.3 34.9 58.3 50.6-.1 15.7-23 35.6-58.4 50.6zM320.8 78.4z"/><circle cx="420.9" cy="296.5" r="45.7"/><path d="M520.5 78.1z"/></g></svg>
|
After Width: | Height: | Size: 2.6 KiB |
13
docker_svc/real-time-chat-app/app/src/reportWebVitals.js
Normal file
13
docker_svc/real-time-chat-app/app/src/reportWebVitals.js
Normal file
|
@ -0,0 +1,13 @@
|
|||
const reportWebVitals = onPerfEntry => {
|
||||
if (onPerfEntry && onPerfEntry instanceof Function) {
|
||||
import('web-vitals').then(({ getCLS, getFID, getFCP, getLCP, getTTFB }) => {
|
||||
getCLS(onPerfEntry);
|
||||
getFID(onPerfEntry);
|
||||
getFCP(onPerfEntry);
|
||||
getLCP(onPerfEntry);
|
||||
getTTFB(onPerfEntry);
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
export default reportWebVitals;
|
5
docker_svc/real-time-chat-app/app/src/setupTests.js
Normal file
5
docker_svc/real-time-chat-app/app/src/setupTests.js
Normal file
|
@ -0,0 +1,5 @@
|
|||
// jest-dom adds custom jest matchers for asserting on DOM nodes.
|
||||
// allows you to do things like:
|
||||
// expect(element).toHaveTextContent(/react/i)
|
||||
// learn more: https://github.com/testing-library/jest-dom
|
||||
import '@testing-library/jest-dom';
|
Loading…
Add table
Add a link
Reference in a new issue