File size: 4,536 Bytes
0c83a68
 
 
 
 
 
 
 
 
 
e7239b1
 
0c83a68
 
 
 
 
e7239b1
0c83a68
 
 
 
 
 
 
 
 
 
 
 
b78f969
0c83a68
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2a90304
0c83a68
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b78f969
0c83a68
 
 
d735420
0c83a68
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
import os
import uuid
import json
import chromadb

import gradio as gr

from dotenv import load_dotenv
from openai import OpenAI

from langchain_community.embeddings.sentence_transformer import SentenceTransformerEmbeddings

from langchain_community.vectorstores import Chroma

from huggingface_hub import CommitScheduler
from pathlib import Path

embedding_model = SentenceTransformerEmbeddings(model_name='thenlper/gte-small')

load_dotenv()

tesla_10k_collection = 'tesla-10k-2019-to-2023'

anyscale_api_key = os.environ['ANYSCALE_API_KEY']

client = OpenAI(
    base_url="https://api.endpoints.anyscale.com/v1",
    api_key=anyscale_api_key
)
 
qna_model = 'meta-llama/Meta-Llama-3-8B-Instruct'

chromadb_client = chromadb.PersistentClient(path='./tesla_db')

vectorstore_persisted = Chroma(
    client=chromadb_client,
    collection_name=tesla_10k_collection,
    embedding_function=embedding_model
)

retriever = vectorstore_persisted.as_retriever(
    search_type='similarity',
    search_kwargs={'k': 5}
)

# Prepare the logging functionality

log_file = Path("logs/") / f"data_{uuid.uuid4()}.json"
log_folder = log_file.parent

scheduler = CommitScheduler(
    repo_id="document-qna-chroma-anyscale-logs",
    repo_type="dataset",
    folder_path=log_folder,
    path_in_repo="data",
    every=2
)

qna_system_message = """
You are an assistant to a financial services firm who answers user queries on annual reports.
Users will ask questions delimited by triple backticks, that is, ```.
User input will have the context required by you to answer user questions.
This context will begin with the token: ###Context.
The context contains references to specific portions of a document relevant to the user query.
Please answer only using the context provided in the input. However, do not mention anything about the context in your answer. 
If the answer is not found in the context, respond "I don't know".
"""

qna_user_message_template = """
###Context
Here are some documents that are relevant to the question.
{context}
```
{question}
```
"""

def predict(input: str, history):

    """
    Predict the response of the chatbot and complete a running list of chat history.
    """

    relevant_document_chunks = retriever.invoke(input)
    context_list = [d.page_content for d in relevant_document_chunks]
    context_for_query = "\n".join(context_list)

    user_message = [{
        'role': 'user', 
        'content': qna_user_message_template.format(
            context=context_for_query,
            question=input
        )
    }]

    prompt = [{'role':'system', 'content': qna_system_message}]

    for entry in history:
        prompt += (
            [{'role': 'user', 'content': entry[0]}] +
            [{'role': 'assistant', 'content': entry[1]}]
        )
    
    final_prompt = prompt + user_message

    try:
        
        response = client.chat.completions.create(
              model=qna_model,
              messages=final_prompt,
              temperature=0
        )

        prediction = response.choices[0].message.content.strip()
    except Exception as e:
        prediction = f"Sorry, I cannot answer your question at this point. {e}"

    # While the prediction is made, log both the inputs and outputs to a local log file
    # While writing to the log file, ensure that the commit scheduler is locked to avoid parallel
    # access
    
    with scheduler.lock:
        with log_file.open("a") as f:
            f.write(json.dumps(
                {
                    'user_input': input,
                    'retrieved_context': context_for_query,
                    'model_response': prediction
                }
            ))
            f.write("\n")

    return prediction

demo = gr.ChatInterface(
    fn=predict,
    title="AMA on Tesla 10-K statements",
    description="This web API presents an interface to ask questions on contents of the Tesla 10-K reports for the period 2019 - 2023.",
    examples=[["What was the total revenue of the company in 2022?"],
              ["Summarize the Management Discussion and Analysis section of the 2021 report in 50 words."],
              ["What was the company's debt level in 2020?"],
              ["Identify 5 key risks identified in the 2019 10k report?"],
              ["What is the view of the management on the future of electric vehicle batteries?"]
             ],
    cache_examples=False,
    theme=gr.themes.Base(),
    concurrency_limit=8,
    show_progress="full"
)

demo.launch(auth=("demouser", os.getenv('PASSWD')))