aidevhund commited on
Commit
7e6b008
β€’
1 Parent(s): af18afb

Copied files from source repository

Browse files
Files changed (9) hide show
  1. Images/ai-logo.png +0 -0
  2. Images/github-logo.png +0 -0
  3. Images/linkedin-logo.png +0 -0
  4. README.md +6 -6
  5. app.py +177 -0
  6. bot.py +41 -0
  7. data/abc.pdf +1 -0
  8. markdowm.py +155 -0
  9. requirements.txt +7 -0
Images/ai-logo.png ADDED
Images/github-logo.png ADDED
Images/linkedin-logo.png ADDED
README.md CHANGED
@@ -1,13 +1,13 @@
1
  ---
2
- title: Chatbot
3
- emoji: πŸ”₯
4
- colorFrom: purple
5
- colorTo: pink
6
  sdk: gradio
7
- sdk_version: 5.9.1
8
  app_file: app.py
9
  pinned: false
10
- license: apache-2.0
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: Document QA Bot
3
+ emoji: πŸ“š
4
+ colorFrom: pink
5
+ colorTo: purple
6
  sdk: gradio
7
+ sdk_version: 4.44.0
8
  app_file: app.py
9
  pinned: false
10
+ license: mit
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datetime import datetime
2
+ from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
3
+ from llama_index.embeddings.huggingface import HuggingFaceEmbedding
4
+ from llama_parse import LlamaParse
5
+ from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
6
+ import os
7
+ from dotenv import load_dotenv
8
+ import gradio as gr
9
+ import markdowm as md
10
+ import base64
11
+
12
+ # Load environment variables
13
+ load_dotenv()
14
+
15
+ llm_models = [
16
+ "mistralai/Mixtral-8x7B-Instruct-v0.1",
17
+ "meta-llama/Meta-Llama-3-8B-Instruct",
18
+ "mistralai/Mistral-7B-Instruct-v0.2",
19
+ "tiiuae/falcon-7b-instruct",
20
+ # "mistralai/Mixtral-8x22B-Instruct-v0.1", ## 281GB>10GB
21
+ # "NousResearch/Yarn-Mistral-7b-64k", ## 14GB>10GB
22
+ # "impira/layoutlm-document-qa", ## ERR
23
+ # "Qwen/Qwen1.5-7B", ## 15GB
24
+ # "Qwen/Qwen2.5-3B", ## high response time
25
+ # "google/gemma-2-2b-jpn-it", ## high response time
26
+ # "impira/layoutlm-invoices", ## bad req
27
+ # "google/pix2struct-docvqa-large", ## bad req
28
+ # "google/gemma-7b-it", ## 17GB > 10GB
29
+ # "google/gemma-2b-it", ## high response time
30
+ # "HuggingFaceH4/zephyr-7b-beta", ## high response time
31
+ # "HuggingFaceH4/zephyr-7b-gemma-v0.1", ## bad req
32
+ # "microsoft/phi-2", ## high response time
33
+ # "TinyLlama/TinyLlama-1.1B-Chat-v1.0", ## high response time
34
+ # "mosaicml/mpt-7b-instruct", ## 13GB>10GB
35
+ # "google/flan-t5-xxl" ## high respons time
36
+ # "NousResearch/Yarn-Mistral-7b-128k", ## 14GB>10GB
37
+ # "Qwen/Qwen2.5-7B-Instruct", ## 15GB>10GB
38
+ ]
39
+
40
+ embed_models = [
41
+ "BAAI/bge-small-en-v1.5", # 33.4M
42
+ "NeuML/pubmedbert-base-embeddings",
43
+ "BAAI/llm-embedder", # 109M
44
+ "BAAI/bge-large-en" # 335M
45
+ ]
46
+
47
+ # Global variable for selected model
48
+ selected_llm_model_name = llm_models[0] # Default to the first model in the list
49
+ selected_embed_model_name = embed_models[0] # Default to the first model in the list
50
+ vector_index = None
51
+
52
+ # Initialize the parser
53
+ parser = LlamaParse(api_key=os.getenv("LLAMA_INDEX_API"), result_type='markdown')
54
+ # Define file extractor with various common extensions
55
+ file_extractor = {
56
+ '.pdf': parser, # PDF documents
57
+ '.docx': parser, # Microsoft Word documents
58
+ '.doc': parser, # Older Microsoft Word documents
59
+ '.txt': parser, # Plain text files
60
+ '.csv': parser, # Comma-separated values files
61
+ '.xlsx': parser, # Microsoft Excel files (requires additional processing for tables)
62
+ '.pptx': parser, # Microsoft PowerPoint files (for slides)
63
+ '.html': parser, # HTML files (web pages)
64
+ # '.rtf': parser, # Rich Text Format files
65
+ # '.odt': parser, # OpenDocument Text files
66
+ # '.epub': parser, # ePub files (e-books)
67
+
68
+ # Image files for OCR processing
69
+ '.jpg': parser, # JPEG images
70
+ '.jpeg': parser, # JPEG images
71
+ '.png': parser, # PNG images
72
+ # '.bmp': parser, # Bitmap images
73
+ # '.tiff': parser, # TIFF images
74
+ # '.tif': parser, # TIFF images (alternative extension)
75
+ # '.gif': parser, # GIF images (can contain text)
76
+
77
+ # Scanned documents in image formats
78
+ '.webp': parser, # WebP images
79
+ '.svg': parser, # SVG files (vector format, may contain embedded text)
80
+ }
81
+
82
+
83
+ # File processing function
84
+ def load_files(file_path: str, embed_model_name: str):
85
+ try:
86
+ global vector_index
87
+ document = SimpleDirectoryReader(input_files=[file_path], file_extractor=file_extractor).load_data()
88
+ embed_model = HuggingFaceEmbedding(model_name=embed_model_name)
89
+ vector_index = VectorStoreIndex.from_documents(document, embed_model=embed_model)
90
+ print(f"Parsing done for {file_path}")
91
+ filename = os.path.basename(file_path)
92
+ return f"Ready to give response on {filename}"
93
+ except Exception as e:
94
+ return f"An error occurred: {e}"
95
+
96
+
97
+ # Function to handle the selected model from dropdown
98
+ def set_llm_model(selected_model):
99
+ global selected_llm_model_name
100
+ selected_llm_model_name = selected_model # Update the global variable
101
+ # print(f"Model selected: {selected_model_name}")
102
+ # return f"Model set to: {selected_model_name}"
103
+
104
+
105
+ # Respond function that uses the globally set selected model
106
+ def respond(message, history):
107
+ try:
108
+ # Initialize the LLM with the selected model
109
+ llm = HuggingFaceInferenceAPI(
110
+ model_name=selected_llm_model_name,
111
+ contextWindow=8192, # Context window size (typically max length of the model)
112
+ maxTokens=1024, # Tokens per response generation (512-1024 works well for detailed answers)
113
+ temperature=0.3, # Lower temperature for more focused answers (0.2-0.4 for factual info)
114
+ topP=0.9, # Top-p sampling to control diversity while retaining quality
115
+ frequencyPenalty=0.5, # Slight penalty to avoid repetition
116
+ presencePenalty=0.5, # Encourages exploration without digressing too much
117
+ token=os.getenv("TOKEN")
118
+ )
119
+
120
+ # Set up the query engine with the selected LLM
121
+ query_engine = vector_index.as_query_engine(llm=llm)
122
+ bot_message = query_engine.query(message)
123
+
124
+ print(f"\n{datetime.now()}:{selected_llm_model_name}:: {message} --> {str(bot_message)}\n")
125
+ return f"{selected_llm_model_name}:\n{str(bot_message)}"
126
+ except Exception as e:
127
+ if str(e) == "'NoneType' object has no attribute 'as_query_engine'":
128
+ return "Please upload a file."
129
+ return f"An error occurred: {e}"
130
+
131
+ def encode_image(image_path):
132
+ with open(image_path, "rb") as image_file:
133
+ return base64.b64encode(image_file.read()).decode('utf-8')
134
+
135
+ # Encode the images
136
+ github_logo_encoded = encode_image("Images/github-logo.png")
137
+ linkedin_logo_encoded = encode_image("Images/linkedin-logo.png")
138
+ website_logo_encoded = encode_image("Images/ai-logo.png")
139
+
140
+ # UI Setup
141
+ with gr.Blocks(theme=gr.themes.Soft(font=[gr.themes.GoogleFont("Roboto Mono")]), css='footer {visibility: hidden}') as demo:
142
+ gr.Markdown("# DocBotπŸ“„πŸ€–")
143
+ with gr.Tabs():
144
+ with gr.TabItem("Intro"):
145
+ gr.Markdown(md.description)
146
+
147
+ with gr.TabItem("DocBot"):
148
+ with gr.Accordion("=== IMPORTANT: READ ME FIRST ===", open=False):
149
+ guid = gr.Markdown(md.guide)
150
+ with gr.Row():
151
+ with gr.Column(scale=1):
152
+ file_input = gr.File(file_count="single", type='filepath', label="Step-1: Upload document")
153
+ # gr.Markdown("Dont know what to select check out in Intro tab")
154
+ embed_model_dropdown = gr.Dropdown(embed_models, label="Step-2: Select Embedding", interactive=True)
155
+ with gr.Row():
156
+ btn = gr.Button("Submit", variant='primary')
157
+ clear = gr.ClearButton()
158
+ output = gr.Text(label='Vector Index')
159
+ llm_model_dropdown = gr.Dropdown(llm_models, label="Step-3: Select LLM", interactive=True)
160
+ with gr.Column(scale=3):
161
+ gr.ChatInterface(
162
+ fn=respond,
163
+ chatbot=gr.Chatbot(height=500),
164
+ theme = "soft",
165
+ show_progress='full',
166
+ # cache_mode='lazy',
167
+ textbox=gr.Textbox(placeholder="Step-4: Ask me questions on the uploaded document!", container=False)
168
+ )
169
+ gr.HTML(md.footer.format(github_logo_encoded, linkedin_logo_encoded, website_logo_encoded))
170
+ # Set up Gradio interactions
171
+ llm_model_dropdown.change(fn=set_llm_model, inputs=llm_model_dropdown)
172
+ btn.click(fn=load_files, inputs=[file_input, embed_model_dropdown], outputs=output)
173
+ clear.click(lambda: [None] * 3, outputs=[file_input, embed_model_dropdown, output])
174
+
175
+ # Launch the demo with a public link option
176
+ if __name__ == "__main__":
177
+ demo.launch()
bot.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import httpx
2
+ import time
3
+ from datetime import datetime
4
+ from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
5
+ from llama_parse import LlamaParse
6
+ from llama_index.embeddings.huggingface import HuggingFaceEmbedding
7
+
8
+ llm = llm = HuggingFaceInferenceAPI(
9
+ model_name="meta-llama/Llama-3.2-1B"
10
+ )
11
+
12
+ parser = LlamaParse(api_key='llx-zKtsC5UBLs8DOApOsLluXMBdQhC75ea0Vs80SmPSjsmDzuhh', result_type='markdown')
13
+ file_extractor = {'.pdf': parser}
14
+ documents = SimpleDirectoryReader('data/', file_extractor=file_extractor).load_data()
15
+
16
+ embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5")
17
+ vector_index = VectorStoreIndex.from_documents(documents, embed_model=embed_model)
18
+ query_engine = vector_index.as_query_engine(llm=llm)
19
+
20
+ def query_with_retry(query, max_retries=3, wait_time=5):
21
+ for attempt in range(max_retries):
22
+ try:
23
+ start_time = datetime.now()
24
+ response = query_engine.query(query)
25
+ end_time = datetime.now()
26
+ duration = (end_time - start_time).total_seconds()
27
+ print(f"Query completed in {duration:.2f} seconds.\n {response}")
28
+ return response
29
+ except httpx.ReadTimeout:
30
+ if attempt < max_retries - 1:
31
+ print(f"Timeout occurred. Retrying in {wait_time} seconds...")
32
+ time.sleep(wait_time)
33
+ else:
34
+ raise
35
+ except Exception as e:
36
+ print(f"An error occurred: {e}")
37
+ break
38
+
39
+ if __name__ == "__main__":
40
+ q3= 'Your task is to act as my personal [UHV] professor. Provide a detailed, well-structured explanation on the topic of [What are the programs needed to achieve the comprehensive human goal?]. Begin with an engaging introduction, followed by a comprehensive description, and break down key concepts under relevant subheadings. The content should be thorough and professionally written, similar to educational resources found on sites like GeeksforGeeks, JavaTpoint, and other learning platforms'
41
+ print(query_with_retry(q3))
data/abc.pdf ADDED
@@ -0,0 +1 @@
 
 
1
+ kjrkga h3woifh'2p3
markdowm.py ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ description = '''
2
+ # πŸ“„ **Document QA Bot: A RAG-Based Application for Interactive Document Querying**
3
+
4
+ Welcome to the Document QA Bot, a sophisticated Retrieval-Augmented Generation (RAG) application that utilizes **LlamaIndex** and **Hugging Face** models to answer questions based on documents you upload. This bot is designed to empower you with rapid, insightful responses, providing a choice of language models (LLMs) and embedding models that cater to various requirements, including performance, accuracy, and response time.
5
+
6
+ ## ✨ **Application Overview**
7
+ With Document QA Bot, you can interactively query your document, receive contextual answers, and dynamically switch between LLMs as needed for optimal results. The bot supports various file formats, allowing you to upload and analyze different types of documents and even some image formats.
8
+
9
+ ### **Key Features**
10
+ - **Choice of Models:** Access a list of powerful LLMs and embedding models for optimal results.
11
+ - **Flexible Document Support:** Multiple file types supported, including images.
12
+ - **Real-time Interaction:** Easily switch between models for experimentation and fine-tuning answers.
13
+ - **User-Friendly:** Seamless experience powered by Gradio's intuitive interface.
14
+
15
+ ---
16
+
17
+ ## πŸš€ **Steps to Use the Document QA Bot**
18
+
19
+ 1. **Upload Your File**
20
+ Begin by uploading a document. Supported formats include `.pdf`, `.docx`, `.txt`, `.csv`, `.xlsx`, `.pptx`, `.html`, `.jpg`, `.png`, and more.
21
+
22
+ 2. **Select Embedding Model**
23
+ Choose an embedding model to parse and index the document’s contents, then submit. Wait for the confirmation message that the document has been successfully indexed.
24
+
25
+ 3. **Choose a Language Model (LLM)**
26
+ Pick an LLM from the dropdown to tailor the bot’s response style and accuracy.
27
+
28
+ 4. **Start Chatting**
29
+ Ask questions about your document! You can switch between LLMs as needed for different insights or to test model behavior on the same question.
30
+
31
+ ---
32
+
33
+ ## βš™οΈ **How the Application Works**
34
+
35
+ Upon uploading a document, the bot utilizes **LlamaParse** to parse its content. The parsed data is then indexed with a selected embedding model, generating a vector representation that enables quick and relevant responses. When you ask questions, the chosen LLM interprets the document context to generate responses specific to the content uploaded.
36
+
37
+ ---
38
+
39
+ ## πŸ” **Available LLMs and Embedding Models**
40
+
41
+ ### **Embedding Models** (For indexing document content)
42
+ 1. **`BAAI/bge-large-en`**
43
+ - **Size**: 335M parameters
44
+ - **Best For**: Complex, detailed embeddings; slower but yields high accuracy.
45
+ 2. **`BAAI/bge-small-en-v1.5`**
46
+ - **Size**: 33.4M parameters
47
+ - **Best For**: Faster embeddings, ideal for lighter workloads and quick responses.
48
+ 3. **`NeuML/pubmedbert-base-embeddings`**
49
+ - **Size**: 768-dimensional dense vector space
50
+ - **Best For**: Biomedical or medical-related text; highly specialized.
51
+ 4. **`BAAI/llm-embedder`**
52
+ - **Size**: 109M parameters
53
+ - **Best For**: Basic embeddings for straightforward use cases.
54
+
55
+ ### **LLMs** (For generating answers)
56
+ 1. **`mistralai/Mixtral-8x7B-Instruct-v0.1`**
57
+ - **Size**: 46.7B parameters
58
+ - **Purpose**: Demonstrates compelling performance with minimal fine-tuning. Suited for unmoderated or exploratory use.
59
+ 2. **`meta-llama/Meta-Llama-3-8B-Instruct`**
60
+ - **Size**: 8.03B parameters
61
+ - **Purpose**: Optimized for dialogue, emphasizing safety and helpfulness. Excellent for structured, instructive responses.
62
+ 3. **`mistralai/Mistral-7B-Instruct-v0.2`**
63
+ - **Size**: 7.24B parameters
64
+ - **Purpose**: Fine-tuned for effectiveness; lacks moderation, useful for quick demonstration purposes.
65
+ 4. **`tiiuae/falcon-7b-instruct`**
66
+ - **Size**: 7.22B parameters
67
+ - **Purpose**: Robust open-source model for inference, leveraging large-scale data for highly contextual responses.
68
+
69
+ ---
70
+
71
+ ## πŸ”— **Best Embedding Model Combinations for Optimal Performance in RAG**
72
+
73
+ The choice of embedding models plays a crucial role in determining the speed and accuracy of document responses. Since you can dynamically switch LLMs during the chat, focusing on an optimal embedding model at the outset will significantly influence response quality and efficiency. Below is a guide to the best embedding models for various scenarios based on the need for time efficiency and answer accuracy.
74
+
75
+ | **Scenario** | **Embedding Model** | **Strengths** | **Trade-Offs** |
76
+ |:-----------------------------:|:------------------------------------:|:--------------------------------------------------:|:------------------------------------:|
77
+ | **Fastest Response** | `BAAI/bge-small-en-v1.5` | Speed-oriented, ideal for high-frequency querying | May miss nuanced details |
78
+ | **High Accuracy for Large Texts** | `BAAI/bge-large-en` | High accuracy, captures complex document structure | Slower response time |
79
+ | **Balanced General Purpose** | `BAAI/llm-embedder` | Reliable, quick response, adaptable across topics | Moderate accuracy, general use case |
80
+ | **Biomedical & Specialized Text** | `NeuML/pubmedbert-base-embeddings` | Optimized for medical and scientific text | Specialized, slightly slower |
81
+
82
+ ---
83
+
84
+ ## πŸ“‚ **Supported File Formats**
85
+
86
+ The bot supports a range of document formats, making it versatile for various data sources. Below are the currently supported formats:
87
+ - **Documents**: `.pdf`, `.docx`, `.doc`, `.txt`, `.csv`, `.xlsx`, `.pptx`, `.html`
88
+ - **Images**: `.jpg`, `.jpeg`, `.png`, `.webp`, `.svg`
89
+
90
+ ---
91
+
92
+ ## 🎯 **Use Cases**
93
+
94
+ 1. **Educational Research**
95
+ Upload research papers or study materials and get precise answers for revision or note-taking.
96
+
97
+ 2. **Corporate Data Analysis**
98
+ Interrogate reports, presentations, or financial data for quick insights without reading extensive documents.
99
+
100
+ 3. **Legal Document Analysis**
101
+ Analyze lengthy legal documents by querying clauses, terms, and specific details.
102
+
103
+ 4. **Healthcare and Scientific Research**
104
+ Access detailed insights into medical or scientific documents with models trained on domain-specific data.
105
+
106
+ ---
107
+
108
+ ### 🌟 **Get Started Today and Experience Document-Centric Question Answering**
109
+ Whether you're a student, researcher, or professional, the Document QA Bot is your go-to tool for interactive, accurate document analysis. Upload your file, select your model, and dive into a seamless question-answering experience tailored to your document's unique content.
110
+ '''
111
+
112
+ guide = '''
113
+ ### Embedding Models and Trade-Offs
114
+
115
+ | **Embedding Model** | **Speed (Vector Index)** | **Advantages** | **Trade-Offs** |
116
+ |-----------------------------|-------------------|-------------------------------------|---------------------------------|
117
+ | `BAAI/bge-small-en-v1.5` | **Fastest** | Ideal for quick indexing | May miss nuanced details |
118
+ | `BAAI/llm-embedder` | **Fast** | Balanced performance and detail | Slightly less precise than large models |
119
+ | `BAAI/bge-large-en` | **Slow** | Best overall precision and detail | Slower due to complexity |
120
+
121
+
122
+ ### Language Models (LLMs) and Use Cases
123
+
124
+ | **LLM** | **Best Use Case** |
125
+ |------------------------------------|-----------------------------------------|
126
+ | `mistralai/Mixtral-8x7B-Instruct-v0.1` | Works well for **both short and long answers** |
127
+ | `meta-llama/Meta-Llama-3-8B-Instruct` | Ideal for **long-length answers** |
128
+ | `tiiuae/falcon-7b-instruct` | Best suited for **short-length answers** |
129
+
130
+ '''
131
+
132
+ footer = """
133
+ <div style="background-color: #1d2938; color: white; padding: 10px; width: 100%; bottom: 0; left: 0; display: flex; justify-content: space-between; align-items: center; padding: .2rem 35px; box-sizing: border-box; font-size: 16px;">
134
+ <div style="text-align: left;">
135
+ <p style="margin: 0;">&copy; 2024 </p>
136
+ </div>
137
+ <div style="text-align: center; flex-grow: 1;">
138
+ <p style="margin: 0;"> This website is made with ❀ by SARATH CHANDRA</p>
139
+ </div>
140
+ <div class="social-links" style="display: flex; gap: 20px; justify-content: flex-end; align-items: center;">
141
+ <a href="https://github.com/21bq1a4210" target="_blank" style="text-align: center;">
142
+ <img src="data:image/png;base64,{}" alt="GitHub" width="40" height="40" style="display: block; margin: 0 auto;">
143
+ <span style="font-size: 14px;">GitHub</span>
144
+ </a>
145
+ <a href="https://www.linkedin.com/in/sarath-chandra-bandreddi-07393b1aa/" target="_blank" style="text-align: center;">
146
+ <img src="data:image/png;base64,{}" alt="LinkedIn" width="40" height="40" style="display: block; margin: 0 auto;">
147
+ <span style="font-size: 14px;">LinkedIn</span>
148
+ </a>
149
+ <a href="https://21bq1a4210.github.io/MyPortfolio-/" target="_blank" style="text-align: center;">
150
+ <img src="data:image/png;base64,{}" alt="Portfolio" width="40" height="40" style="display: block; margin-right: 40px;">
151
+ <span style="font-size: 14px;">Portfolio</span>
152
+ </a>
153
+ </div>
154
+ </div>
155
+ """
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ huggingface-hub==0.23.5
2
+ llama-index==0.11.17
3
+ llama-index-llms-huggingface==0.3.5
4
+ llama-index-embeddings-huggingface==0.3.1
5
+ llama-index-embeddings-huggingface-api==0.2.1
6
+ llama-index-llms-huggingface-api==0.2.0
7
+ python-dotenv