File size: 2,107 Bytes
47e4aa2
 
080146c
 
 
 
 
47e4aa2
 
 
 
 
 
e6b7117
47e4aa2
080146c
e6b7117
 
080146c
 
 
 
 
e6b7117
080146c
 
 
 
 
03e1062
080146c
 
 
03e1062
080146c
03e1062
080146c
 
03e1062
080146c
03e1062
e6b7117
 
 
 
 
080146c
 
 
 
 
 
 
 
 
 
e6b7117
 
 
 
 
080146c
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
import os
from dotenv import load_dotenv
from enum import Enum
from openai import OpenAI
from pathlib import Path



# Carica le variabili d'ambiente dal file .env
load_dotenv()

# Configurazione del modello
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
DEEPSEEK_API_KEY = os.getenv("DEEPSEEK_API_KEY")
if not OPENAI_API_KEY:
    raise ValueError("OPENAI_API_KEY non trovata. Verifica il file .env")
if not DEEPSEEK_API_KEY:
    raise ValueError("DEEPSEEK_API_KEY non trovata. Verifica il file .env")

class LLMType(Enum):
    OPENAI_GPT_4O_MINI = "openai - GPT-4o-mini"
    LOCAL_QWEN = "local - Qwen 7B"
    LOCAL_PHI = "local - Phi-3 Mini"
    DEEPSEEK = "deepseek - DeepSeek Chat"

# Configurazione modelli
LLM_CONFIGS = {
    LLMType.OPENAI_GPT_4O_MINI: {
        "client": lambda: OpenAI(api_key=OPENAI_API_KEY),
        "model": "gpt-4o-mini",
        "base_url": None
    },
    LLMType.LOCAL_QWEN: {
        "client": lambda: OpenAI(base_url="http://192.168.43.199:1234/v1", api_key="not-needed"),
        "model": "qwen2.5-coder-7b-instruct",
        "base_url": "http://192.168.43.199:1234/v1"
    },
    LLMType.LOCAL_PHI: {
        "client": lambda: OpenAI(base_url="http://192.168.43.199:1234/v1", api_key="not-needed"),
        "model": "phi-3.5-mini-ita",
        "base_url": "http://192.168.43.199:1234/v1"
    },
    LLMType.DEEPSEEK: {
        "client": lambda: OpenAI(api_key=DEEPSEEK_API_KEY, base_url="https://api.deepseek.com/v1"),
        "model": "deepseek-chat",
        "base_url": "https://api.deepseek.com/v1"
    }
}

EMBEDDING_CONFIG = {
    "model_name": "sentence-transformers/multi-qa-mpnet-base-dot-v1",
    "chunk_size": 2000,
    "chunk_overlap": 100,
    "min_similarity": 0.7
}

LLM_CONFIGS_EXTENDED = {
    "temperature": 0.7,
    "max_tokens": 2048
}

# Aggiungi questa costante
EMBEDDING_MODEL = "sentence-transformers/multi-qa-mpnet-base-dot-v1"

# Definisci il percorso base per i database
BASE_DB_PATH = "db"

# Voci italiane edge-tts
VOICE_USER = "it-IT-DiegoNeural"      # Voce maschile utente
VOICE_ASSISTANT = "it-IT-ElsaNeural"   # Voce femminile assistente