Nugh75 commited on
Commit
24083bf
·
1 Parent(s): c87954b

llm locale funzianante

Browse files
Files changed (2) hide show
  1. app/llm_handling.py +38 -18
  2. ui/chatbot_tab.py +47 -33
app/llm_handling.py CHANGED
@@ -1,6 +1,7 @@
1
  import logging
2
  import os
3
  import shutil
 
4
 
5
  from openai import OpenAI
6
  from langchain_community.vectorstores import FAISS
@@ -13,11 +14,24 @@ from app.configs.prompts import SYSTEM_PROMPTS
13
 
14
  logging.basicConfig(level=logging.INFO)
15
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  def get_system_prompt(prompt_type="tutor"):
17
  """Seleziona il prompt di sistema appropriato"""
18
  return SYSTEM_PROMPTS.get(prompt_type, SYSTEM_PROMPTS["tutor"])
19
 
20
- def answer_question(question, db_name, prompt_type="tutor", chat_history=None):
21
  """
22
  Risponde alla domanda 'question' usando i documenti del database 'db_name'.
23
  Restituisce una lista di 2 messaggi in formato:
@@ -60,24 +74,30 @@ def answer_question(question, db_name, prompt_type="tutor", chat_history=None):
60
 
61
  # Prepara il contesto dai documenti
62
  context = "\n".join([doc.page_content for doc in relevant_docs])
 
63
 
64
- client = OpenAI(api_key=OPENAI_API_KEY)
65
-
66
- messages = [
67
- {"role": "system", "content": SYSTEM_PROMPTS[prompt_type].format(context=context)},
68
- {"role": "user", "content": question}
69
- ]
70
-
71
- # Esegui la chiamata a OpenAI
72
- response = client.chat.completions.create(
73
- model="gpt-4o-mini",
74
- messages=messages,
75
- temperature=0.5,
76
- max_tokens=3048
77
- )
78
-
79
- answer = response.choices[0].message.content
80
-
 
 
 
 
 
81
  return [
82
  {"role": "user", "content": question},
83
  {"role": "assistant", "content": answer}
 
1
  import logging
2
  import os
3
  import shutil
4
+ from enum import Enum
5
 
6
  from openai import OpenAI
7
  from langchain_community.vectorstores import FAISS
 
14
 
15
  logging.basicConfig(level=logging.INFO)
16
 
17
+ class LLMType(Enum):
18
+ OPENAI = "openai"
19
+ LOCAL = "local"
20
+
21
+ # Client OpenAI standard
22
+ openai_client = OpenAI(api_key=OPENAI_API_KEY)
23
+
24
+ # Client LM Studio locale
25
+ local_client = OpenAI(
26
+ base_url="http://192.168.140.5:1234/v1",
27
+ api_key="not-needed"
28
+ )
29
+
30
  def get_system_prompt(prompt_type="tutor"):
31
  """Seleziona il prompt di sistema appropriato"""
32
  return SYSTEM_PROMPTS.get(prompt_type, SYSTEM_PROMPTS["tutor"])
33
 
34
+ def answer_question(question, db_name, prompt_type="tutor", chat_history=None, llm_type=LLMType.OPENAI):
35
  """
36
  Risponde alla domanda 'question' usando i documenti del database 'db_name'.
37
  Restituisce una lista di 2 messaggi in formato:
 
74
 
75
  # Prepara il contesto dai documenti
76
  context = "\n".join([doc.page_content for doc in relevant_docs])
77
+ prompt = SYSTEM_PROMPTS[prompt_type].format(context=context)
78
 
79
+ if llm_type == LLMType.OPENAI:
80
+ response = openai_client.chat.completions.create(
81
+ model="gpt-4-mini",
82
+ messages=[
83
+ {"role": "system", "content": prompt},
84
+ {"role": "user", "content": question}
85
+ ],
86
+ temperature=0.7
87
+ )
88
+ answer = response.choices[0].message.content
89
+
90
+ else: # LOCAL
91
+ response = local_client.chat.completions.create(
92
+ model="qwen2.5-coder-7b-instruct",
93
+ messages=[
94
+ {"role": "system", "content": prompt},
95
+ {"role": "user", "content": question}
96
+ ],
97
+ temperature=0.7
98
+ )
99
+ answer = response.choices[0].message.content
100
+
101
  return [
102
  {"role": "user", "content": question},
103
  {"role": "assistant", "content": answer}
ui/chatbot_tab.py CHANGED
@@ -3,10 +3,10 @@
3
  import gradio as gr
4
  from app.functions.database_handling import list_databases
5
  from app.configs.prompts import SYSTEM_PROMPTS # Aggiunta importazionei
6
- from app.llm_handling import answer_question
7
  from utils.helpers import extract_text_from_files
8
 
9
- def create_chatbot_tab(create_interface=True):
10
  """Crea il tab 'Chatbot' dell'interfaccia Gradio."""
11
 
12
  def chat_upload_and_respond(files, chat_history, db_name):
@@ -23,12 +23,20 @@ def create_chatbot_tab(create_interface=True):
23
 
24
  return chat_history
25
 
26
- def respond(message, chat_history, db_name, prompt_type):
27
  """Genera una risposta alla domanda dell'utente e aggiorna la chat."""
28
  if chat_history is None:
29
  chat_history = []
30
 
31
- new_messages = answer_question(message, db_name, prompt_type)
 
 
 
 
 
 
 
 
32
  chat_history.extend(new_messages)
33
 
34
  return "", chat_history
@@ -41,46 +49,52 @@ def create_chatbot_tab(create_interface=True):
41
  databases = list_databases()
42
 
43
  with gr.Tab("Chatbot"):
 
44
  with gr.Row():
45
- with gr.Column(scale=2):
46
- # Singolo dropdown per il database
47
  db_name_chat = gr.Dropdown(
48
  choices=databases,
49
  label="Seleziona Database",
50
  value="default_db"
51
  )
52
-
 
53
  prompt_selector = gr.Dropdown(
54
  choices=list(SYSTEM_PROMPTS.keys()),
55
  label="Seleziona Stile Risposta",
56
  value="tutor"
57
  )
58
-
59
- # Componente Chatbot
60
- chatbot = gr.Chatbot(label="Conversazione", type="messages")
61
-
62
- # Input per la domanda
63
- question_input = gr.Textbox(
64
- label="Fai una domanda",
65
- placeholder="Scrivi qui la tua domanda...",
66
- lines=2
67
  )
68
-
69
- # Bottoni per azioni
70
- with gr.Row():
71
- ask_button = gr.Button("Invia")
72
- clear_button = gr.Button("Pulisci Chat")
73
-
74
- # Upload file con dimensioni ridotte
75
- with gr.Row():
76
- file_input = gr.File(
77
- label="Carica PDF/Docx/TXT per la conversazione",
78
- file_types=[".pdf", ".docx", ".txt"],
79
- file_count="multiple",
80
- height="100px",
81
- scale=3
82
- )
83
- upload_button = gr.Button("Carica Documenti", scale=1)
 
 
 
 
 
 
 
 
84
 
85
  # Stato della chat
86
  chat_state = gr.State([])
@@ -94,7 +108,7 @@ def create_chatbot_tab(create_interface=True):
94
 
95
  ask_button.click(
96
  fn=respond,
97
- inputs=[question_input, chat_state, db_name_chat, prompt_selector],
98
  outputs=[question_input, chatbot]
99
  )
100
 
 
3
  import gradio as gr
4
  from app.functions.database_handling import list_databases
5
  from app.configs.prompts import SYSTEM_PROMPTS # Aggiunta importazionei
6
+ from app.llm_handling import answer_question, LLMType # Aggiungi LLMType
7
  from utils.helpers import extract_text_from_files
8
 
9
+ def create_chatbot_tab():
10
  """Crea il tab 'Chatbot' dell'interfaccia Gradio."""
11
 
12
  def chat_upload_and_respond(files, chat_history, db_name):
 
23
 
24
  return chat_history
25
 
26
+ def respond(message, chat_history, db_name, prompt_type, llm_type):
27
  """Genera una risposta alla domanda dell'utente e aggiorna la chat."""
28
  if chat_history is None:
29
  chat_history = []
30
 
31
+ # Converti stringa in enum
32
+ selected_llm = LLMType.LOCAL if llm_type == "local" else LLMType.OPENAI
33
+
34
+ new_messages = answer_question(
35
+ message,
36
+ db_name,
37
+ prompt_type,
38
+ llm_type=selected_llm
39
+ )
40
  chat_history.extend(new_messages)
41
 
42
  return "", chat_history
 
49
  databases = list_databases()
50
 
51
  with gr.Tab("Chatbot"):
52
+ # Prima riga: Dropdown selettori
53
  with gr.Row():
54
+ with gr.Column(scale=1):
 
55
  db_name_chat = gr.Dropdown(
56
  choices=databases,
57
  label="Seleziona Database",
58
  value="default_db"
59
  )
60
+
61
+ with gr.Column(scale=1):
62
  prompt_selector = gr.Dropdown(
63
  choices=list(SYSTEM_PROMPTS.keys()),
64
  label="Seleziona Stile Risposta",
65
  value="tutor"
66
  )
67
+
68
+ with gr.Column(scale=1):
69
+ llm_selector = gr.Dropdown(
70
+ choices=["openai", "local"],
71
+ label="Seleziona Modello",
72
+ value="openai"
 
 
 
73
  )
74
+
75
+ # Chatbot e input
76
+ chatbot = gr.Chatbot(label="Conversazione", type="messages")
77
+ question_input = gr.Textbox(
78
+ label="Fai una domanda",
79
+ placeholder="Scrivi qui la tua domanda...",
80
+ lines=2
81
+ )
82
+
83
+ # Bottoni per azioni
84
+ with gr.Row():
85
+ ask_button = gr.Button("Invia")
86
+ clear_button = gr.Button("Pulisci Chat")
87
+
88
+ # Upload file con dimensioni ridotte
89
+ with gr.Row():
90
+ file_input = gr.File(
91
+ label="Carica PDF/Docx/TXT per la conversazione",
92
+ file_types=[".pdf", ".docx", ".txt"],
93
+ file_count="multiple",
94
+ height="100px",
95
+ scale=3
96
+ )
97
+ upload_button = gr.Button("Carica Documenti", scale=1)
98
 
99
  # Stato della chat
100
  chat_state = gr.State([])
 
108
 
109
  ask_button.click(
110
  fn=respond,
111
+ inputs=[question_input, chat_state, db_name_chat, prompt_selector, llm_selector], # Aggiungi il selettore del modello
112
  outputs=[question_input, chatbot]
113
  )
114