Update app.py
Browse files
app.py
CHANGED
@@ -1,20 +1,19 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
import gradio as gr
|
6 |
import weaviate
|
7 |
from weaviate.embedded import EmbeddedOptions
|
8 |
import os
|
9 |
-
from openai import
|
10 |
from dotenv import load_dotenv
|
11 |
import textwrap
|
|
|
|
|
|
|
12 |
|
13 |
# Load environment variables
|
14 |
load_dotenv()
|
15 |
|
16 |
-
# Set up
|
17 |
-
openai_client =
|
18 |
|
19 |
# Connect to Weaviate
|
20 |
client = weaviate.Client(
|
@@ -28,35 +27,47 @@ client = weaviate.Client(
|
|
28 |
# Get the collection name from environment variable
|
29 |
COLLECTION_NAME = os.getenv('WEAVIATE_COLLECTION_NAME')
|
30 |
|
31 |
-
|
32 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
input=text,
|
34 |
model="text-embedding-3-large"
|
35 |
)
|
36 |
return response.data[0].embedding
|
37 |
|
38 |
-
def search_multimodal(query: str, limit: int = 30, alpha: float = 0.6):
|
39 |
-
query_vector = get_embedding(query)
|
40 |
|
41 |
try:
|
42 |
-
response = (
|
43 |
-
client.query
|
44 |
-
|
45 |
-
|
46 |
-
.with_hybrid(
|
47 |
-
query=query,
|
48 |
-
vector=query_vector,
|
49 |
-
alpha=alpha
|
50 |
-
)
|
51 |
.with_limit(limit)
|
52 |
-
.do
|
53 |
)
|
54 |
return response['data']['Get'][COLLECTION_NAME]
|
55 |
except Exception as e:
|
56 |
print(f"An error occurred during the search: {str(e)}")
|
57 |
return []
|
58 |
|
59 |
-
def generate_response(query: str, context: str) -> str:
|
60 |
prompt = f"""
|
61 |
You are an AI assistant with extensive expertise in the semiconductor industry. Your knowledge spans a wide range of companies, technologies, and products, including but not limited to: System-on-Chip (SoC) designs, Field-Programmable Gate Arrays (FPGAs), Microcontrollers, Integrated Circuits (ICs), semiconductor manufacturing processes, and emerging technologies like quantum computing and neuromorphic chips.
|
62 |
Use the following context, your vast knowledge, and the user's question to generate an accurate, comprehensive, and insightful answer. While formulating your response, follow these steps internally:
|
@@ -83,7 +94,7 @@ User Question: {query}
|
|
83 |
Based on the above context and your extensive knowledge of the semiconductor industry, provide your detailed, accurate, and grounded response below. Remember, only include information you're confident is correct, and clearly state any uncertainties:
|
84 |
"""
|
85 |
|
86 |
-
response = openai_client.chat.completions.create(
|
87 |
model="gpt-4o",
|
88 |
messages=[
|
89 |
{"role": "system", "content": "You are an expert Semi Conductor industry analyst"},
|
@@ -94,19 +105,22 @@ Based on the above context and your extensive knowledge of the semiconductor ind
|
|
94 |
|
95 |
return response.choices[0].message.content
|
96 |
|
97 |
-
def
|
98 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
99 |
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
elif item['content_type'] == 'image':
|
105 |
-
context += f"Image Description from {item['source_document']} (Page {item['page_number']}, Path: {item['image_path']}): {item['description']}\n\n"
|
106 |
-
elif item['content_type'] == 'table':
|
107 |
-
context += f"Table Description from {item['source_document']} (Page {item['page_number']}): {item['description']}\n\n"
|
108 |
-
|
109 |
-
response = generate_response(user_query, context)
|
110 |
|
111 |
sources = []
|
112 |
for item in search_results[:5]: # Limit to top 5 sources
|
@@ -126,8 +140,8 @@ def esg_analysis(user_query: str):
|
|
126 |
def wrap_text(text, width=120):
|
127 |
return textwrap.fill(text, width=width)
|
128 |
|
129 |
-
def gradio_interface(user_question):
|
130 |
-
ai_response, sources = esg_analysis(user_question)
|
131 |
|
132 |
# Format AI response
|
133 |
formatted_response = f"""
|
@@ -152,7 +166,7 @@ def gradio_interface(user_question):
|
|
152 |
return formatted_response, source_text
|
153 |
|
154 |
iface = gr.Interface(
|
155 |
-
fn=gradio_interface,
|
156 |
inputs=gr.Textbox(lines=2, placeholder="Enter your question about the semiconductor industry..."),
|
157 |
outputs=[
|
158 |
gr.Markdown(label="AI Response"),
|
@@ -164,4 +178,4 @@ iface = gr.Interface(
|
|
164 |
)
|
165 |
|
166 |
if __name__ == "__main__":
|
167 |
-
iface.launch(server_name="0.0.0.0", server_port=7860)
|
|
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
import weaviate
|
3 |
from weaviate.embedded import EmbeddedOptions
|
4 |
import os
|
5 |
+
from openai import AsyncOpenAI
|
6 |
from dotenv import load_dotenv
|
7 |
import textwrap
|
8 |
+
import asyncio
|
9 |
+
import aiohttp
|
10 |
+
from functools import wraps
|
11 |
|
12 |
# Load environment variables
|
13 |
load_dotenv()
|
14 |
|
15 |
+
# Set up AsyncOpenAI client
|
16 |
+
openai_client = AsyncOpenAI(api_key=os.getenv('OPENAI_API_KEY'))
|
17 |
|
18 |
# Connect to Weaviate
|
19 |
client = weaviate.Client(
|
|
|
27 |
# Get the collection name from environment variable
|
28 |
COLLECTION_NAME = os.getenv('WEAVIATE_COLLECTION_NAME')
|
29 |
|
30 |
+
# Async-compatible caching decorator
|
31 |
+
def async_lru_cache(maxsize=128):
|
32 |
+
cache = {}
|
33 |
+
|
34 |
+
def decorator(func):
|
35 |
+
@wraps(func)
|
36 |
+
async def wrapper(*args, **kwargs):
|
37 |
+
key = str(args) + str(kwargs)
|
38 |
+
if key not in cache:
|
39 |
+
if len(cache) >= maxsize:
|
40 |
+
cache.pop(next(iter(cache)))
|
41 |
+
cache[key] = await func(*args, **kwargs)
|
42 |
+
return cache[key]
|
43 |
+
return wrapper
|
44 |
+
return decorator
|
45 |
+
|
46 |
+
@async_lru_cache(maxsize=1000)
|
47 |
+
async def get_embedding(text):
|
48 |
+
response = await openai_client.embeddings.create(
|
49 |
input=text,
|
50 |
model="text-embedding-3-large"
|
51 |
)
|
52 |
return response.data[0].embedding
|
53 |
|
54 |
+
async def search_multimodal(query: str, limit: int = 30, alpha: float = 0.6):
|
55 |
+
query_vector = await get_embedding(query)
|
56 |
|
57 |
try:
|
58 |
+
response = await asyncio.to_thread(
|
59 |
+
client.query.get(COLLECTION_NAME, ["content_type", "url", "source_document", "page_number",
|
60 |
+
"paragraph_number", "text", "image_path", "description", "table_content"])
|
61 |
+
.with_hybrid(query=query, vector=query_vector, alpha=alpha)
|
|
|
|
|
|
|
|
|
|
|
62 |
.with_limit(limit)
|
63 |
+
.do
|
64 |
)
|
65 |
return response['data']['Get'][COLLECTION_NAME]
|
66 |
except Exception as e:
|
67 |
print(f"An error occurred during the search: {str(e)}")
|
68 |
return []
|
69 |
|
70 |
+
async def generate_response(query: str, context: str) -> str:
|
71 |
prompt = f"""
|
72 |
You are an AI assistant with extensive expertise in the semiconductor industry. Your knowledge spans a wide range of companies, technologies, and products, including but not limited to: System-on-Chip (SoC) designs, Field-Programmable Gate Arrays (FPGAs), Microcontrollers, Integrated Circuits (ICs), semiconductor manufacturing processes, and emerging technologies like quantum computing and neuromorphic chips.
|
73 |
Use the following context, your vast knowledge, and the user's question to generate an accurate, comprehensive, and insightful answer. While formulating your response, follow these steps internally:
|
|
|
94 |
Based on the above context and your extensive knowledge of the semiconductor industry, provide your detailed, accurate, and grounded response below. Remember, only include information you're confident is correct, and clearly state any uncertainties:
|
95 |
"""
|
96 |
|
97 |
+
response = await openai_client.chat.completions.create(
|
98 |
model="gpt-4o",
|
99 |
messages=[
|
100 |
{"role": "system", "content": "You are an expert Semi Conductor industry analyst"},
|
|
|
105 |
|
106 |
return response.choices[0].message.content
|
107 |
|
108 |
+
def process_search_result(item):
|
109 |
+
if item['content_type'] == 'text':
|
110 |
+
return f"Text from {item['source_document']} (Page {item['page_number']}, Paragraph {item['paragraph_number']}): {item['text']}\n\n"
|
111 |
+
elif item['content_type'] == 'image':
|
112 |
+
return f"Image Description from {item['source_document']} (Page {item['page_number']}, Path: {item['image_path']}): {item['description']}\n\n"
|
113 |
+
elif item['content_type'] == 'table':
|
114 |
+
return f"Table Description from {item['source_document']} (Page {item['page_number']}): {item['description']}\n\n"
|
115 |
+
return ""
|
116 |
+
|
117 |
+
async def esg_analysis(user_query: str):
|
118 |
+
search_results = await search_multimodal(user_query)
|
119 |
|
120 |
+
context_parts = await asyncio.gather(*[asyncio.to_thread(process_search_result, item) for item in search_results])
|
121 |
+
context = "".join(context_parts)
|
122 |
+
|
123 |
+
response = await generate_response(user_query, context)
|
|
|
|
|
|
|
|
|
|
|
|
|
124 |
|
125 |
sources = []
|
126 |
for item in search_results[:5]: # Limit to top 5 sources
|
|
|
140 |
def wrap_text(text, width=120):
|
141 |
return textwrap.fill(text, width=width)
|
142 |
|
143 |
+
async def gradio_interface(user_question):
|
144 |
+
ai_response, sources = await esg_analysis(user_question)
|
145 |
|
146 |
# Format AI response
|
147 |
formatted_response = f"""
|
|
|
166 |
return formatted_response, source_text
|
167 |
|
168 |
iface = gr.Interface(
|
169 |
+
fn=lambda user_question: asyncio.run(gradio_interface(user_question)),
|
170 |
inputs=gr.Textbox(lines=2, placeholder="Enter your question about the semiconductor industry..."),
|
171 |
outputs=[
|
172 |
gr.Markdown(label="AI Response"),
|
|
|
178 |
)
|
179 |
|
180 |
if __name__ == "__main__":
|
181 |
+
iface.launch(server_name="0.0.0.0", server_port=7860, share=True)
|