File size: 7,825 Bytes
2bb9056
 
 
 
721e8b5
2bb9056
 
721e8b5
 
 
2bb9056
 
 
 
721e8b5
 
2bb9056
 
 
 
 
 
 
 
 
 
 
 
 
721e8b5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2bb9056
7c3b051
2bb9056
 
 
721e8b5
 
2bb9056
 
721e8b5
 
 
 
2bb9056
721e8b5
2bb9056
 
 
 
 
 
721e8b5
2bb9056
1b92ee6
 
7c3b051
1b92ee6
 
 
 
 
 
7c3b051
1b92ee6
 
 
7c3b051
1b92ee6
 
 
 
 
7c3b051
1b92ee6
 
 
2bb9056
 
721e8b5
7c3b051
 
 
 
 
 
2bb9056
 
7c3b051
2bb9056
721e8b5
 
 
 
 
 
 
 
 
 
 
7c3b051
721e8b5
 
 
 
7c3b051
 
 
 
2bb9056
 
 
 
7c3b051
 
 
 
 
2bb9056
 
 
 
 
 
721e8b5
 
2bb9056
7c3b051
 
 
 
 
 
2bb9056
7c3b051
 
 
 
 
 
 
 
 
 
 
 
2bb9056
 
 
 
721e8b5
2bb9056
 
 
 
 
 
 
7c3b051
2bb9056
 
 
721e8b5
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
import gradio as gr
import weaviate
from weaviate.embedded import EmbeddedOptions
import os
from openai import AsyncOpenAI
from dotenv import load_dotenv
import textwrap
import asyncio
import aiohttp
from functools import wraps

# Load environment variables
load_dotenv()

# Set up AsyncOpenAI client
openai_client = AsyncOpenAI(api_key=os.getenv('OPENAI_API_KEY'))

# Connect to Weaviate
client = weaviate.Client(
    url=os.getenv('WCS_URL'),
    auth_client_secret=weaviate.auth.AuthApiKey(os.getenv('WCS_API_KEY')),
    additional_headers={
        "X-OpenAI-Api-Key": os.getenv('OPENAI_API_KEY')
    }
)

# Get the collection name from environment variable
COLLECTION_NAME = os.getenv('WEAVIATE_COLLECTION_NAME')

# Async-compatible caching decorator
def async_lru_cache(maxsize=128):
    cache = {}

    def decorator(func):
        @wraps(func)
        async def wrapper(*args, **kwargs):
            key = str(args) + str(kwargs)
            if key not in cache:
                if len(cache) >= maxsize:
                    cache.pop(next(iter(cache)))
                cache[key] = await func(*args, **kwargs)
            return cache[key]
        return wrapper
    return decorator

@async_lru_cache(maxsize=1000)
async def get_embedding(text):
    response = await openai_client.embeddings.create(
        input=text,
        model="text-embedding-3-large"
    )
    return response.data[0].embedding

async def search_multimodal(query: str, limit: int = 30, alpha: float = 0.6):
    query_vector = await get_embedding(query)
    
    try:
        response = await asyncio.to_thread(
            client.query.get(COLLECTION_NAME, ["content_type", "url", "source_document", "page_number",
                                               "paragraph_number", "text", "image_path", "description", "table_content"])
            .with_hybrid(query=query, vector=query_vector, alpha=alpha)
            .with_limit(limit)
            .do
        )
        return response['data']['Get'][COLLECTION_NAME]
    except Exception as e:
        print(f"An error occurred during the search: {str(e)}")
        return []

async def generate_response(query: str, context: str) -> str:
    prompt = f"""
You are an AI assistant with extensive expertise in the semiconductor industry. Your knowledge spans a wide range of companies, technologies, and products, including but not limited to: System-on-Chip (SoC) designs, Field-Programmable Gate Arrays (FPGAs), Microcontrollers, Integrated Circuits (ICs), semiconductor manufacturing processes, and emerging technologies like quantum computing and neuromorphic chips.
Use the following context, your vast knowledge, and the user's question to generate an accurate, comprehensive, and insightful answer. While formulating your response, follow these steps internally:

Analyze the question to identify the main topic and specific information requested.
Evaluate the provided context and identify relevant information.
Retrieve additional relevant knowledge from your semiconductor industry expertise.
Reason and formulate a response by combining context and knowledge.
Generate a detailed response that covers all aspects of the query.
Review and refine your answer for coherence and accuracy.

In your output, provide only the final, polished response. Do not include your step-by-step reasoning or mention the process you followed.
IMPORTANT: Ensure your response is grounded in factual information. Do not hallucinate or invent information. If you're unsure about any aspect of the answer or if the necessary information is not available in the provided context or your knowledge base, clearly state this uncertainty. It's better to admit lack of information than to provide inaccurate details.
Your response should be:

Thorough and directly address all aspects of the user's question
Based solely on factual information from the provided context and your reliable knowledge
Include specific examples, data points, or case studies only when you're certain of their accuracy
Explain technical concepts clearly, considering the user may have varying levels of expertise
Clearly indicate any areas where information is limited or uncertain

Context: {context}
User Question: {query}
Based on the above context and your extensive knowledge of the semiconductor industry, provide your detailed, accurate, and grounded response below. Remember, only include information you're confident is correct, and clearly state any uncertainties: 
    """

    response = await openai_client.chat.completions.create(
        model="gpt-4o",
        messages=[
            {"role": "system", "content": "You are an expert Semi Conductor industry analyst"},
            {"role": "user", "content": prompt}
        ],
        temperature=0
    )

    return response.choices[0].message.content

def process_search_result(item):
    if item['content_type'] == 'text':
        return f"Text from {item['source_document']} (Page {item['page_number']}, Paragraph {item['paragraph_number']}): {item['text']}\n\n"
    elif item['content_type'] == 'image':
        return f"Image Description from {item['source_document']} (Page {item['page_number']}, Path: {item['image_path']}): {item['description']}\n\n"
    elif item['content_type'] == 'table':
        return f"Table Description from {item['source_document']} (Page {item['page_number']}): {item['description']}\n\n"
    return ""

async def esg_analysis(user_query: str):
    search_results = await search_multimodal(user_query)
    
    context_parts = await asyncio.gather(*[asyncio.to_thread(process_search_result, item) for item in search_results])
    context = "".join(context_parts)
    
    response = await generate_response(user_query, context)

    sources = []
    for item in search_results[:5]:  # Limit to top 5 sources
        source = {
            "type": item.get("content_type", "Unknown"),
            "document": item.get("source_document", "N/A"),
            "page": item.get("page_number", "N/A"),
        }
        if item.get("content_type") == 'text':
            source["paragraph"] = item.get("paragraph_number", "N/A")
        elif item.get("content_type") == 'image':
            source["image_path"] = item.get("image_path", "N/A")
        sources.append(source)

    return response, sources

def wrap_text(text, width=120):
    return textwrap.fill(text, width=width)

async def gradio_interface(user_question):
    ai_response, sources = await esg_analysis(user_question)
    
    # Format AI response
    formatted_response = f"""
## AI Response

{ai_response}
    """
    
    # Format sources
    source_text = "## Top 5 Sources\n\n"
    for i, source in enumerate(sources, 1):
        source_text += f"### Source {i}\n"
        source_text += f"- **Type:** {source['type']}\n"
        source_text += f"- **Document:** {source['document']}\n"
        source_text += f"- **Page:** {source['page']}\n"
        if 'paragraph' in source:
            source_text += f"- **Paragraph:** {source['paragraph']}\n"
        if 'image_path' in source:
            source_text += f"- **Image Path:** {source['image_path']}\n"
        source_text += "\n"
    
    return formatted_response, source_text

iface = gr.Interface(
    fn=lambda user_question: asyncio.run(gradio_interface(user_question)),
    inputs=gr.Textbox(lines=2, placeholder="Enter your question about the semiconductor industry..."),
    outputs=[
        gr.Markdown(label="AI Response"),
        gr.Markdown(label="Sources")
    ],
    title="Semiconductor Industry ESG Analysis",
    description="Ask questions about the semiconductor industry and get AI-powered answers with sources.",
    flagging_dir="/app/flagged"  # Specify the flagging directory
)

if __name__ == "__main__":
    iface.launch(server_name="0.0.0.0", server_port=7860, share=True)