Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -38,22 +38,29 @@ def extract_text_from_pdf(pdf_path):
|
|
38 |
text_chunks.extend(chunks)
|
39 |
return text_chunks
|
40 |
|
|
|
|
|
|
|
|
|
|
|
|
|
41 |
# Suche und Bewertung
|
42 |
def search_and_rank_answers(query, index, documents, k=10):
|
43 |
query_embedding = model.encode([query])[0].astype('float32')
|
44 |
D, I = index.search(np.array([query_embedding]), k=k)
|
45 |
|
|
|
|
|
|
|
46 |
ranked_answers = []
|
47 |
-
for
|
48 |
-
context = documents[doc_index]
|
49 |
try:
|
50 |
result = qa_model(question=query, context=context)
|
51 |
-
ranked_answers.append((result['answer'],
|
52 |
except Exception as e:
|
53 |
logging.warning(f"Fehler bei der Antwortgenerierung: {e}")
|
54 |
|
55 |
-
|
56 |
-
ranked_answers = sorted(ranked_answers, key=lambda x: x[1])
|
57 |
return [answer for answer, _ in ranked_answers]
|
58 |
|
59 |
# Antworten kombinieren
|
@@ -76,6 +83,7 @@ def chatbot_response(pdf_path, question):
|
|
76 |
# Antworten kombinieren
|
77 |
detailed_answer = combine_answers(answers)
|
78 |
|
|
|
79 |
return detailed_answer
|
80 |
|
81 |
# Gradio-Interface
|
|
|
38 |
text_chunks.extend(chunks)
|
39 |
return text_chunks
|
40 |
|
41 |
+
# Kontexte nach Relevanz bewerten
|
42 |
+
def rank_contexts_by_relevance(query, contexts):
|
43 |
+
scores = model.encode([query]) @ model.encode(contexts).T
|
44 |
+
ranked_contexts = sorted(zip(scores[0], contexts), key=lambda x: x[0], reverse=True)
|
45 |
+
return [context for _, context in ranked_contexts[:5]] # Nur die Top 5 Kontexte
|
46 |
+
|
47 |
# Suche und Bewertung
|
48 |
def search_and_rank_answers(query, index, documents, k=10):
|
49 |
query_embedding = model.encode([query])[0].astype('float32')
|
50 |
D, I = index.search(np.array([query_embedding]), k=k)
|
51 |
|
52 |
+
ranked_contexts = [documents[i] for i in I[0]]
|
53 |
+
top_contexts = rank_contexts_by_relevance(query, ranked_contexts)
|
54 |
+
|
55 |
ranked_answers = []
|
56 |
+
for context in top_contexts:
|
|
|
57 |
try:
|
58 |
result = qa_model(question=query, context=context)
|
59 |
+
ranked_answers.append((result['answer'], result['score']))
|
60 |
except Exception as e:
|
61 |
logging.warning(f"Fehler bei der Antwortgenerierung: {e}")
|
62 |
|
63 |
+
ranked_answers = sorted(ranked_answers, key=lambda x: x[1], reverse=True)
|
|
|
64 |
return [answer for answer, _ in ranked_answers]
|
65 |
|
66 |
# Antworten kombinieren
|
|
|
83 |
# Antworten kombinieren
|
84 |
detailed_answer = combine_answers(answers)
|
85 |
|
86 |
+
logging.info(f"Antwort: {detailed_answer}")
|
87 |
return detailed_answer
|
88 |
|
89 |
# Gradio-Interface
|