File size: 3,256 Bytes
2702698
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
import openai
from openai import OpenAI

from Query_Agent import extract_action


class Answering_Agent:
    def __init__(self, openai_api_key) -> None:
        # TODO: Initialize the Answering_Agent
        # openai_api_key = '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'

        self.openai_client = openai
        openai.api_key = openai_api_key

    def get_document_content(self, doc_id):
        # This is a placeholder function, you'll need to implement the logic to retrieve the actual content.
        # For example, it might query a database or an API with the document ID.
        return "Document content for ID " + doc_id

    def generate_response(self, query, docs, conv_history, k=5, mode="chatty"):
        # TODO: Generate a response to the user's query
        # Concatenate the contents of the top k relevant documents
        # top_docs = docs[2][:k]  # Get the top k documents based on the score

        # Retrieve the content for each of the top documents
        context_texts = "\n\n".join(
            [f"Context {idx + 1}: {result[2]}" for idx, result in enumerate(docs)])  # Assuming result[2] is the text
        print(f"context_texts is : {context_texts} \n\n\n")
        # docs_content = "\n".join([self.get_document_content(doc["id"]) for doc in docs])

        # Optional: Include conversation history in the prompt if provided
        if conv_history:
            history_str = "\n".join([f"{turn['role']}: {turn['content']}" for turn in conv_history])
            prompt = f"""Based on the following documents and conversation history, answer the query:
            Documents:
            {context_texts}
            Conversation:
            {history_str}
            Query: {query}
            ONLY If neither Documents nor Conversation has anything to do with query, you must reply directly: No relevant documents found in the documents. Please ask a relevant question to the book on Machine Learning. 
            Answer: """

        else:
            prompt = f"Based on the following documents, answer the query:\nDocuments:\n{context_texts}\nQuery: {query}\n ONLY If Documents has nothing to do with query,  you must reply directly: No relevant documents found in the documents.Please ask a relevant question to the book on Machine Learning. \nAnswer: "

        # Adjust the prompt, max_tokens, and temperature based on the mode
        max_tokens = 4000 if mode == "chatty" else 100
        temperature = 0.9 if mode == "chatty" else 0.5

        # mode of chatty
        if mode == "chatty":
            prompt = prompt + "Please provide a detailed and comprehensive response that includes background information, relevant examples, and any important distinctions or perspectives related to the topic. Where possible, include step-by-step explanations or descriptions to ensure clarity and depth in your answer"

        client = OpenAI(api_key=openai.api_key)
        message = {"role": "user", "content": prompt}
        response = client.chat.completions.create(
            model="gpt-3.5-turbo",
            messages=[message],
            max_tokens=max_tokens,
            temperature=temperature,
            stop=["\n", "Query:"]
        )
        return response.choices[0].message.content