Keytaro commited on
Commit
1c329f8
·
1 Parent(s): 0490c5a

1st commit

Browse files
.gitattributes CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ *.pdf filter=lfs diff=lfs merge=lfs -text
37
+ *.sqlite3 filter=lfs diff=lfs merge=lfs -text
10k-reports_db/a3d72286-dd89-4f49-96b6-ab75d8096c72/data_level0.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:edbdd6d03ab6da46431caeb4b1bfb570ee671e225431565ae7fc17e7a4579371
3
+ size 4236000
10k-reports_db/a3d72286-dd89-4f49-96b6-ab75d8096c72/header.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fcc596bc1909f7cc610d5839236c90513b4fbad06776c253fa1b21bfd712e940
3
+ size 100
10k-reports_db/a3d72286-dd89-4f49-96b6-ab75d8096c72/length.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc19b1997119425765295aeab72d76faa6927d4f83985d328c26f20468d6cc76
3
+ size 4000
10k-reports_db/a3d72286-dd89-4f49-96b6-ab75d8096c72/link_lists.bin ADDED
File without changes
10k-reports_db/chroma.sqlite3 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ceabfa8fae681616b9e031f165526d03aab676c3d7efb72ef7056883388b13a0
3
+ size 16334848
Dataset-10k/IBM-10-k-2023.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dfa7312edab9311c67e4f84354eaac39c49c52fca579981d725201f7acdf6662
3
+ size 574740
Dataset-10k/Meta-10-k-2023.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e8fadc2448e4f99ad0ec2dc2e41d13b864204955238cf1f7cd9c96839f274a6c
3
+ size 2481466
Dataset-10k/aws-10-k-2023.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:084ef3d6031c4b57169b32ae6a878a86ebbcbc7bb868030e3170d04392b4096e
3
+ size 712683
Dataset-10k/google-10-k-2023.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1950a82ce1d443a9c8fc10e051acbed517bf5c0bbbab584be1812f5eb79c06e4
3
+ size 926655
Dataset-10k/msft-10-k-2023.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3c318819680126f6817c2fbb3b8ffa7c01eae31dcfea7e174406e4fa70a5cf29
3
+ size 2434632
app.py ADDED
@@ -0,0 +1,186 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ ## Setup
4
+ # Import the necessary Libraries
5
+ import os
6
+ import uuid
7
+ import joblib
8
+ import json
9
+ import tiktoken
10
+ import pandas as pd
11
+ import gradio as gr
12
+ from openai import OpenAI
13
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
14
+ from langchain_core.documents import Document
15
+ from langchain_community.document_loaders import PyPDFDirectoryLoader
16
+ from langchain_community.embeddings.sentence_transformer import (
17
+ SentenceTransformerEmbeddings
18
+ )
19
+ from langchain_community.vectorstores import Chroma
20
+ from huggingface_hub import CommitScheduler
21
+ from pathlib import Path
22
+
23
+ # Create Client
24
+ os.environ['OPENAI_API_KEY'] = "gl-U2FsdGVkX1+0bNWD6YsVLZUYsn0m1WfLxUzrP0xUFbtWFAfk9Z1Cz+mD8u1yqKtV"; # e.g. gl-U2FsdGVkX19oG1mRO+LGAiNeC7nAeU8M65G4I6bfcdI7+9GUEjFFbplKq48J83by
25
+ os.environ["OPENAI_BASE_URL"] = "https://aibe.mygreatlearning.com/openai/v1" # e.g. "https://aibe.mygreatlearning.com/openai/v1";
26
+ client = OpenAI()
27
+
28
+ # Define the embedding model and the vectorstore
29
+ model_name = 'gpt-4o-mini'
30
+ embedding_model = SentenceTransformerEmbeddings(model_name='thenlper/gte-large')
31
+
32
+ # Load the persisted vectorDB
33
+ persisted_vectordb_location = '10k-reports_db'
34
+ collection_name = '10k-reports'
35
+ reports_db = Chroma(
36
+ collection_name=collection_name,
37
+ persist_directory=persisted_vectordb_location,
38
+ embedding_function=embedding_model
39
+ )
40
+ reports_db.get()
41
+
42
+ # Prepare the logging functionality
43
+ log_file = Path("logs/") / f"data_{uuid.uuid4()}.json"
44
+ log_folder = log_file.parent
45
+
46
+ scheduler = CommitScheduler(
47
+ repo_id="Keytaro/10K-reports-mlops-logs",
48
+ repo_type="dataset",
49
+ folder_path=log_folder,
50
+ path_in_repo="data",
51
+ every=2
52
+ )
53
+
54
+ # Define the Q&A system message
55
+ qna_system_message = """
56
+ You are an assistant to a Gen AI Data Scientist. Your task is to automate the extraction, summarization, and analysis of information from the 10-K reports.
57
+
58
+ User input will include the necessary context for you to answer their questions. This context will begin with the token: ###Context.
59
+ The context contains references to specific portions of documents relevant to the user's query, along with source links.
60
+ The source for a context will begin with the token ###Source
61
+
62
+ When crafting your response:
63
+ 1. Select only context relevant to answer the question.
64
+ 2. Include the source links in your response.
65
+ 3. User questions will begin with the token: ###Question.
66
+ 4. If the question is irrelevant to streamlit respond with - "I am an assistant for Gen AI Data Scientist. I can only help you with questions related to 10-K reports."
67
+
68
+ Please adhere to the following guidelines:
69
+ - Your response should only be about the question asked and nothing else.
70
+ - Answer only using the context provided.
71
+ - Do not mention anything about the context in your final answer.
72
+ - If the answer is not found in the context, it is very very important for you to respond with "I don't know. Please check the 10-K reports"
73
+ - Always quote the source when you use the context. Cite the relevant source at the end of your response under the section - Source:
74
+ - Do not make up sources. Use the links provided in the sources section of the context and nothing else. You are prohibited from providing other links/sources.
75
+
76
+ Here is an example of how to structure your response:
77
+
78
+ Answer:
79
+ [Answer]
80
+
81
+ Source:
82
+ [Source]
83
+ """
84
+
85
+ # Define the user message template
86
+ qna_user_message_template = """
87
+ ###Context
88
+ Here are some documents and their source links that are relevant to the question mentioned below.
89
+ {context}
90
+
91
+ ###Question
92
+ {question}
93
+ """
94
+
95
+ # Define the predict function that runs when 'Submit' is clicked or when a API request is made
96
+ def predict(user_input,company):
97
+
98
+ companyfile = {
99
+ "Amazon": "aws",
100
+ "Google": "google",
101
+ "Microsoft": "mtfs",
102
+ "Meta": "meta",
103
+ "IBM": "IBM"
104
+ }.get(company, None)
105
+ if companyfile is not None:
106
+ user_input = user_input.replace("the company", company)
107
+
108
+ filter = "dataset/"+companyfile+"-10-k-2023.pdf"
109
+ relevant_document_chunks = vectorstore_persisted.similarity_search(user_input, k=5, filter={"source":filter})
110
+
111
+ # Create context_for_query
112
+ context_list = [d.page_content + f"\n ###Source: \'{d.metadata['source']}\', p.{d.metadata['page']}\n\n " for d in relevant_document_chunks]
113
+ context_for_query = ". ".join(context_list)
114
+
115
+ # Create messages
116
+ prompt = [
117
+ {'role':'system', 'content': qna_system_message},
118
+ {'role': 'user', 'content': qna_user_message_template.format(
119
+ context=context_for_query,
120
+ question=user_input
121
+ )
122
+ }
123
+ ]
124
+
125
+ # Get response from the LLM
126
+ try:
127
+ response = client.chat.completions.create(
128
+ model=model_name,
129
+ messages=prompt,
130
+ temperature=0
131
+ )
132
+
133
+ prediction = response.choices[0].message.content.strip()
134
+
135
+ except Exception as e:
136
+ prediction = f'Sorry, I encountered the following error: \n {e}'
137
+
138
+
139
+ # While the prediction is made, log both the inputs and outputs to a local log file
140
+ # While writing to the log file, ensure that the commit scheduler is locked to avoid parallel
141
+ # access
142
+
143
+ with scheduler.lock:
144
+ with log_file.open("a") as f:
145
+ f.write(json.dumps(
146
+ {
147
+ 'user_input': user_input,
148
+ 'retrieved_context': context_for_query,
149
+ 'model_response': prediction
150
+ }
151
+ ))
152
+ f.write("\n")
153
+
154
+ return prediction
155
+
156
+ # Set-up the Gradio UI
157
+ # Add text box and radio button to the interface
158
+ # The radio button is used to select the company 10k report in which the context needs to be retrieved.
159
+
160
+ textbox = gr.Textbox()
161
+ company = gr.Radio()
162
+
163
+ inputs = [
164
+ gr.Radio(label="user_input", choices=["Has the company made any significant acquisitions in the AI space, and how are these acquisitions being integrated into the company's strategy?",
165
+ "How much capital has been allocated towards AI research and development by the company?",
166
+ "What initiatives has the company implemented to address ethical concerns surrounding AI, such as fairness, accountability, and privacy?",
167
+ "How does the company plan to differentiate itself in the AI space relative to competitors?"]),
168
+ gr.Radio(label="Company", choices=["Amazon", "Google", "Microsoft", "Meta", "IBM"]),
169
+ ]
170
+
171
+ output = gr.Textbox(label="Answer")
172
+
173
+ # Create the interface
174
+ # For the inputs parameter of Interface provide [textbox,company]
175
+ demo = gr.Interface(
176
+ fn=predict,
177
+ inputs=inputs,
178
+ outputs=output,
179
+ title="10-K reports RAG system",
180
+ description="This API allows you to answer one of the 5 questions based on 10-K reports.",
181
+ allow_flagging="auto", #
182
+ concurrency_limit=8 #
183
+ )
184
+
185
+ demo.queue()
186
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ openai==1.23.2
2
+ tiktoken==0.6.0
3
+ pypdf==4.0.1
4
+ langchain==0.1.1
5
+ langchain-community==0.0.13
6
+ chromadb==0.4.22
7
+ sentence-transformers==2.3.1