File size: 18,971 Bytes
c1cdf7c
 
a2335c5
 
c1cdf7c
 
a2335c5
c1cdf7c
96b8cb4
 
e2808f2
 
 
 
 
 
 
 
 
 
664e897
c1cdf7c
a2335c5
c1cdf7c
 
a2335c5
96b8cb4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0a74a16
 
 
6848cbe
0a74a16
 
 
 
 
 
 
 
 
 
 
 
 
 
a2335c5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c1cdf7c
a2335c5
 
 
7d6eec9
a2335c5
 
 
 
 
 
 
 
 
 
7d6eec9
a2335c5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7d6eec9
a2335c5
 
 
 
 
5a57fa7
e2808f2
a2335c5
 
 
 
 
 
 
 
 
e2808f2
 
 
 
 
 
 
a2335c5
 
 
 
 
 
 
 
 
 
e2808f2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c1cdf7c
 
 
 
 
 
 
2bcd6e7
c1cdf7c
2bcd6e7
c1cdf7c
0a74a16
 
 
 
 
 
 
 
 
c1cdf7c
 
 
0a74a16
c1cdf7c
 
 
0a74a16
c1cdf7c
 
 
 
 
0a74a16
c1cdf7c
 
0a74a16
 
c1cdf7c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34054e0
c1cdf7c
 
 
 
a65ba38
c1cdf7c
 
 
 
 
a65ba38
c1cdf7c
 
 
 
 
 
34054e0
c1cdf7c
 
 
 
34054e0
c1cdf7c
 
 
a65ba38
c1cdf7c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7d6eec9
c1cdf7c
 
7d6eec9
 
 
c1cdf7c
7d6eec9
c1cdf7c
 
 
7d6eec9
c1cdf7c
 
 
 
 
7d6eec9
c1cdf7c
 
 
0a74a16
96b8cb4
 
 
0a74a16
 
 
 
 
 
 
 
 
c1cdf7c
0a74a16
c1cdf7c
0a74a16
96b8cb4
c1cdf7c
 
96b8cb4
c1cdf7c
34054e0
7d6eec9
c1cdf7c
 
 
0a74a16
 
c1cdf7c
 
96b8cb4
0a74a16
96b8cb4
 
 
0a74a16
c1cdf7c
96b8cb4
c1cdf7c
0a74a16
96b8cb4
57a48db
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
import fitz  # PyMuPDF
import gradio as gr
import requests
from bs4 import BeautifulSoup
import urllib.parse
import random
import os
from dotenv import load_dotenv
import shutil
import tempfile
import re
import unicodedata
from nltk.corpus import stopwords
from nltk.tokenize import sent_tokenize, word_tokenize
from nltk.probability import FreqDist
import nltk

# Download necessary NLTK data
nltk.download('punkt')
nltk.download('stopwords')

load_dotenv()  # Load environment variables from .env file

# Now replace the hard-coded token with the environment variable
HUGGINGFACE_API_TOKEN = os.getenv("HUGGINGFACE_TOKEN")

def clear_cache():
    try:
        # Clear Gradio cache
        cache_dir = tempfile.gettempdir()
        shutil.rmtree(os.path.join(cache_dir, "gradio"), ignore_errors=True)
        
        # Clear any custom cache you might have
        # For example, if you're caching PDF files or search results:
        if os.path.exists("output_summary.pdf"):
            os.remove("output_summary.pdf")
        
        # Add any other cache clearing operations here
        
        print("Cache cleared successfully.")
        return "Cache cleared successfully."
    except Exception as e:
        print(f"Error clearing cache: {e}")
        return f"Error clearing cache: {e}"

PREDEFINED_QUERIES = {
    "Recent Earnings": {
        "query": "{company} recent quarterly earnings",
        "instructions": "Provide the most recent quarterly earnings data for {company}. Include revenue, net income, loan growth, deposit growth if any, EPS and asset quality. Specify the exact quarter and year."
    },
    "Recent News": {
        "query": "{company} recent news",
        "instructions": "Summarize the most recent significant news about {company}. Focus on events that could impact the company's financial performance or stock price."
    },
    "Credit Rating": {
        "query": "{company} current credit rating",
        "instructions": "Provide the most recent credit rating for {company}. Include the rating agency, the exact rating, and the date it was issued or last confirmed."
    },
    "Earnings Call Transcript": {
        "query": "{company} most recent earnings call transcript",
        "instructions": "Summarize key points from {company}'s most recent earnings call. Include date of the call, major financial highlights, and any significant forward-looking statements."
    }
}
_useragent_list = [
    "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
    "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Edge/91.0.864.59 Safari/537.36",
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Edge/91.0.864.59 Safari/537.36",
    "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Safari/537.36",
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Safari/537.36",
]

# Function to extract visible text from HTML content of a webpage
def extract_text_from_webpage(html):
    print("Extracting text from webpage...")
    soup = BeautifulSoup(html, 'html.parser')
    for script in soup(["script", "style"]):
        script.extract()  # Remove scripts and styles
    text = soup.get_text()
    lines = (line.strip() for line in text.splitlines())
    chunks = (phrase.strip() for line in lines for phrase in line.split("  "))
    text = '\n'.join(chunk for chunk in chunks if chunk)
    print(f"Extracted text length: {len(text)}")
    return text

# Function to perform a Google search and retrieve results
def google_search(term, num_results=5, lang="en", timeout=5, safe="active", ssl_verify=None):
    """Performs a Google search and returns the results."""
    print(f"Searching for term: {term}")
    escaped_term = urllib.parse.quote_plus(term)
    start = 0
    all_results = []
    max_chars_per_page = 8000  # Limit the number of characters from each webpage to stay under the token limit

    with requests.Session() as session:
        while start < num_results:
            print(f"Fetching search results starting from: {start}")
            try:
                # Choose a random user agent
                user_agent = random.choice(_useragent_list)
                headers = {
                    'User-Agent': user_agent
                }
                print(f"Using User-Agent: {headers['User-Agent']}")

                resp = session.get(
                    url="https://www.google.com/search",
                    headers=headers,
                    params={
                        "q": term,
                        "num": num_results - start,
                        "hl": lang,
                        "start": start,
                        "safe": safe,
                    },
                    timeout=timeout,
                    verify=ssl_verify,
                )
                resp.raise_for_status()
            except requests.exceptions.RequestException as e:
                print(f"Error fetching search results: {e}")
                break

            soup = BeautifulSoup(resp.text, "html.parser")
            result_block = soup.find_all("div", attrs={"class": "g"})
            if not result_block:
                print("No more results found.")
                break
            keywords = term.split()  # Use the search term as keywords for filtering

            for result in result_block:
                link = result.find("a", href=True)
                if link:
                    link = link["href"]
                    print(f"Found link: {link}")
                    try:
                        webpage = session.get(link, headers=headers, timeout=timeout)
                        webpage.raise_for_status()
                        visible_text = extract_text_from_webpage(webpage.text)
                        
                        # Apply preprocessing to the visible text
                        preprocessed_text = preprocess_web_content(visible_text, keywords)
                        
                        if len(preprocessed_text) > max_chars_per_page:
                            preprocessed_text = preprocessed_text[:max_chars_per_page] + "..."
                        all_results.append({"link": link, "text": preprocessed_text})
                    except requests.exceptions.RequestException as e:
                        print(f"Error fetching or processing {link}: {e}")
                        all_results.append({"link": link, "text": None})
                else:
                    print("No link found in result.")
                    all_results.append({"link": None, "text": None})
            start += len(result_block)
    print(f"Total results fetched: {len(all_results)}")
    return all_results

def preprocess_text(text):
    # Remove HTML tags
    text = BeautifulSoup(text, "html.parser").get_text()
    
    # Remove URLs
    text = re.sub(r'http\S+|www.\S+', '', text)
    
    # Remove special characters and digits
    text = re.sub(r'[^a-zA-Z\s]', '', text)
    
    # Remove extra whitespace
    text = ' '.join(text.split())
    
    # Convert to lowercase
    text = text.lower()
    
    return text

def remove_boilerplate(text):
    # List of common boilerplate phrases to remove
    boilerplate = [
        "all rights reserved",
        "terms of service",
        "privacy policy",
        "cookie policy",
        "copyright ©",
        "follow us on social media"
    ]
    
    for phrase in boilerplate:
        text = text.replace(phrase, '')
    
    return text

def keyword_filter(text, keywords):
    sentences = sent_tokenize(text)
    filtered_sentences = [sentence for sentence in sentences if any(keyword.lower() in sentence.lower() for keyword in keywords)]
    return ' '.join(filtered_sentences)

def summarize_text(text, num_sentences=3):
    # Tokenize the text into words
    words = word_tokenize(text)
    
    # Remove stopwords
    stop_words = set(stopwords.words('english'))
    words = [word for word in words if word.lower() not in stop_words]
    
    # Calculate word frequencies
    freq_dist = FreqDist(words)
    
    # Score sentences based on word frequencies
    sentences = sent_tokenize(text)
    sentence_scores = {}
    for sentence in sentences:
        for word in word_tokenize(sentence.lower()):
            if word in freq_dist:
                if sentence not in sentence_scores:
                    sentence_scores[sentence] = freq_dist[word]
                else:
                    sentence_scores[sentence] += freq_dist[word]
    
    # Get the top N sentences with highest scores
    summary_sentences = sorted(sentence_scores, key=sentence_scores.get, reverse=True)[:num_sentences]
    
    # Sort the selected sentences in the order they appear in the original text
    summary_sentences = sorted(summary_sentences, key=text.index)
    
    return ' '.join(summary_sentences)

def preprocess_web_content(content, keywords):
    # Apply basic preprocessing
    preprocessed_text = preprocess_text(content)
    
    # Remove boilerplate
    preprocessed_text = remove_boilerplate(preprocessed_text)
    
    # Apply keyword filtering
    filtered_text = keyword_filter(preprocessed_text, keywords)
    
    # Summarize the text
    summarized_text = summarize_text(filtered_text)
    
    return summarized_text


# Function to format the prompt for the Hugging Face API
def format_prompt(query, search_results, instructions):
    formatted_results = ""
    for result in search_results:
        link = result["link"]
        text = result["text"]
        if link:
            formatted_results += f"URL: {link}\nContent: {text}\n{'-' * 80}\n"
        else:
            formatted_results += "No link found.\n" + '-' * 80 + '\n'

    prompt = f"""Instructions: {instructions}
User Query: {query}

Web Search Results:
{formatted_results}

Important: Provide a precise and factual response based solely on the information given above. Include specific dates, numbers, and sources where available. If exact information is not provided in the search results, clearly state that the information is not available in the given context. Do not make assumptions or provide information that is not directly supported by the search results.

Assistant:"""
    return prompt

# Function to generate text using Hugging Face API
def generate_text(input_text, temperature=0.3, repetition_penalty=1.2, top_p=0.9):
    print("Generating text using Hugging Face API...")
    endpoint = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.3"
    headers = {
        "Authorization": f"Bearer {HUGGINGFACE_API_TOKEN}",
        "Content-Type": "application/json"
    }
    data = {
        "inputs": input_text,
        "parameters": {
            "max_new_tokens": 1000,  # Reduced to focus on more concise answers
            "temperature": temperature,
            "repetition_penalty": repetition_penalty,
            "top_p": top_p,
            "do_sample": True
        }
    }

    try:
        response = requests.post(endpoint, headers=headers, json=data)
        response.raise_for_status()

        # Check if response is JSON
        try:
            json_data = response.json()
        except ValueError:
            print("Response is not JSON.")
            return None

        # Extract generated text from response JSON
        if isinstance(json_data, list):
            # Handle list response (if applicable for your use case)
            generated_text = json_data[0].get("generated_text") if json_data else None
        elif isinstance(json_data, dict):
            # Handle dictionary response
            generated_text = json_data.get("generated_text")
        else:
            print("Unexpected response format.")
            return None

        if generated_text is not None:
            print("Text generation complete using Hugging Face API.")
            print(f"Generated text: {generated_text}")  # Debugging line
            return generated_text
        else:
            print("Generated text not found in response.")
            return None

    except requests.exceptions.RequestException as e:
        print(f"Error generating text using Hugging Face API: {e}")
        return None

# Function to read and extract text from a PDF
def read_pdf(file_obj):
    with fitz.open(file_obj.name) as document:
        text = ""
        for page_num in range(document.page_count):
            page = document.load_page(page_num)
            text += page.get_text()
        return text

# Function to format the prompt with instructions for text generation
def format_prompt_with_instructions(text, instructions):
    prompt = f"{instructions}{text}\n\nAssistant:"
    return prompt

# Function to save text to a PDF
def save_text_to_pdf(text, output_path):
    print(f"Saving text to PDF at {output_path}...")
    doc = fitz.open()  # Create a new PDF document
    page = doc.new_page()  # Create a new page

    # Set the page margins
    margin = 50  # 50 points margin
    page_width = page.rect.width
    page_height = page.rect.height
    text_width = page_width - 2 * margin
    text_height = page_height - 2 * margin

    # Define font size and line spacing
    font_size = 9
    line_spacing = 1 * font_size
    fontname = "times-roman"  # Use a supported font name

    # Process the text to handle line breaks and paragraphs
    paragraphs = text.split("\n")  # Split text into paragraphs
    y_position = margin

    for paragraph in paragraphs:
        words = paragraph.split()
        current_line = ""

        for word in words:
            word = str(word)  # Ensure word is treated as string
            # Calculate the length of the current line plus the new word
            current_line_length = fitz.get_text_length(current_line + " " + word, fontsize=font_size, fontname=fontname)
            if current_line_length <= text_width:
                current_line += " " + word
            else:
                page.insert_text(fitz.Point(margin, y_position), current_line.strip(), fontsize=font_size, fontname=fontname)
                y_position += line_spacing
                if y_position + line_spacing > page_height - margin:
                    page = doc.new_page()  # Add a new page if text exceeds page height
                    y_position = margin
                current_line = word

        # Add the last line of the paragraph
        page.insert_text(fitz.Point(margin, y_position), current_line.strip(), fontsize=font_size, fontname=fontname)
        y_position += line_spacing

        # Add extra space for new paragraph
        y_position += line_spacing
        if y_position + line_spacing > page_height - margin:
            page = doc.new_page()  # Add a new page if text exceeds page height
            y_position = margin

    doc.save(output_path)  # Save the PDF to the specified path
    print("PDF saved successfully.")

# Integrated function to perform web scraping, formatting, and text generation
def scrape_and_display(query, num_results, instructions, web_search=True, temperature=0.7, repetition_penalty=1.0, top_p=0.9):
    print(f"Scraping and displaying results for query: {query} with num_results: {num_results}")
    if web_search:
        search_results = google_search(query, num_results)
        formatted_prompt = format_prompt(query, search_results, instructions)
        generated_summary = generate_text(formatted_prompt, temperature=temperature, repetition_penalty=repetition_penalty, top_p=top_p)
    else:
        formatted_prompt = format_prompt_with_instructions(query, instructions)
        generated_summary = generate_text(formatted_prompt, temperature=temperature, repetition_penalty=repetition_penalty, top_p=top_p)
    print("Scraping and display complete.")
    if generated_summary:
        # Extract and return text starting from "Assistant:"
        assistant_index = generated_summary.find("Assistant:")
        if assistant_index != -1:
            generated_summary = generated_summary[assistant_index:]
        else:
            generated_summary = "Assistant: No response generated."
    print(f"Generated summary: {generated_summary}")  # Debugging line
    return generated_summary

# Main Gradio interface function
def gradio_interface(query, use_dashboard, use_pdf, pdf, num_results, custom_instructions, temperature, repetition_penalty, top_p, clear_cache_flag):
    if clear_cache_flag:
        return clear_cache()
    
    if use_dashboard:
        results = []
        for query_type, query_info in PREDEFINED_QUERIES.items():
            formatted_query = query_info['query'].format(company=query)
            formatted_instructions = query_info['instructions'].format(company=query)
            result = scrape_and_display(formatted_query, num_results=num_results, instructions=formatted_instructions, web_search=True, temperature=temperature, repetition_penalty=repetition_penalty, top_p=top_p)
            results.append(f"**{query_type}**\n\n{result}\n\n")
        generated_summary = "\n".join(results)
    elif use_pdf and pdf is not None:
        pdf_text = read_pdf(pdf)
        generated_summary = scrape_and_display(pdf_text, num_results=0, instructions=custom_instructions, web_search=False, temperature=temperature, repetition_penalty=repetition_penalty, top_p=top_p)
    else:
        generated_summary = scrape_and_display(query, num_results=num_results, instructions=custom_instructions, web_search=True, temperature=temperature, repetition_penalty=repetition_penalty, top_p=top_p)
    
    output_pdf_path = "output_summary.pdf"
    save_text_to_pdf(generated_summary, output_pdf_path)
    
    return generated_summary, output_pdf_path

# Deploy Gradio Interface
gr.Interface(
    fn=gradio_interface,
    inputs=[
        gr.Textbox(label="Company Name or Query"),
        gr.Checkbox(label="Use Dashboard"),
        gr.Checkbox(label="Use PDF"),
        gr.File(label="Upload PDF"),
        gr.Slider(minimum=1, maximum=20, value=5, step=1, label="Number of Results"),
        gr.Textbox(label="Custom Instructions"),
        gr.Slider(minimum=0.1, maximum=1.0, value=0.7, step=0.1, label="Temperature"),
        gr.Slider(minimum=0.1, maximum=2.0, value=1.0, step=0.1, label="Repetition Penalty"),
        gr.Slider(minimum=0.1, maximum=1.0, value=0.9, step=0.1, label="Top p"),
        gr.Checkbox(label="Clear Cache", visible=False)
    ],
    outputs=["text", gr.File(label="Generated PDF")],
    title="Financial Analyst AI Assistant",
    description="Enter a company name to get a financial dashboard, or enter a custom query. Optionally, upload a PDF for analysis. Adjust parameters as needed for optimal results.",
    allow_flagging="never"
).launch(share=True)