|
|
|
import logging |
|
import gradio as gr |
|
from openai import OpenAI |
|
from langchain_chroma import Chroma |
|
from langchain_huggingface import HuggingFaceEmbeddings |
|
from datetime import datetime |
|
|
|
|
|
client = OpenAI() |
|
|
|
|
|
embeddings = HuggingFaceEmbeddings(model_name='radlab/polish-sts-v2') |
|
|
|
|
|
vector_store = Chroma( |
|
collection_name='baza', |
|
embedding_function=embeddings, |
|
persist_directory='baza' |
|
) |
|
|
|
|
|
logging.basicConfig( |
|
filename=f'logs/chat_log_{datetime.now().strftime("%Y%m%d")}.log', |
|
level=logging.INFO, |
|
format='%(asctime)s - %(message)s' |
|
) |
|
|
|
def szukaj(query, konwersacja): |
|
"""Wyszukuje podobne fragmenty w bazie wiedzy""" |
|
query += konwersacja |
|
context_objects = vector_store.similarity_search(query=query, k=3) |
|
context = "\n".join(obj.page_content for obj in context_objects) |
|
return context |
|
|
|
def formatuj_historie_dla_promptu(history): |
|
"""Formatuje histori臋 konwersacji do promptu""" |
|
return "\n".join(message["content"] for message in history) |
|
|
|
def odp(message, history): |
|
"""G艂贸wna funkcja obs艂uguj膮ca odpowiedzi chatbota""" |
|
|
|
history = history[-6:] if len(history) > 6 else history |
|
|
|
|
|
kontekst_konwersacji = formatuj_historie_dla_promptu(history) |
|
kontekst = szukaj(message, kontekst_konwersacji) |
|
prompt = f"Konwersacja:\n{kontekst_konwersacji}\nKontekst z bazy wiedzy:\n{kontekst}\nPytanie u偶ytkownika: {message}" |
|
|
|
|
|
response = client.chat.completions.create( |
|
model='gpt-4o-mini', |
|
temperature=0.2, |
|
messages=[ |
|
{ |
|
'role': 'system', |
|
'content': 'Jeste艣 ekspertem dost臋pno艣ci cyfrowej i masz na imi臋 Jacek. Odpowiadaj kr贸tko na pytania korzystaj膮c z kontekstu i historii konwersacji.' |
|
}, |
|
{ |
|
'role': 'user', |
|
'content': prompt |
|
} |
|
] |
|
) |
|
|
|
answer = response.choices[0].message.content |
|
|
|
|
|
logging.info( |
|
f"User message: {message}\n" |
|
f"Context length: {len(kontekst)}\n" |
|
f"Prompt tokens: {response.usage.prompt_tokens}\n" |
|
f"Completion tokens: {response.usage.completion_tokens}\n" |
|
f"Total tokens: {response.usage.total_tokens}\n" |
|
f"Response: {answer}" |
|
) |
|
|
|
|
|
history.append({'role': 'user', 'content': message}) |
|
history.append({'role': 'assistant', 'content': answer}) |
|
|
|
return '', history |
|
|
|
|
|
with gr.Blocks(title='Jacek AI') as demo: |
|
chatbot = gr.Chatbot(type='messages', label='Jacek AI') |
|
msg = gr.Textbox(autofocus=True, label='Pytaj', show_label=False) |
|
msg.submit(odp, [msg, chatbot], [msg, chatbot]) |
|
|
|
|
|
demo.launch(inbrowser=True) |
|
|