Spaces:
Sleeping
Sleeping
File size: 7,882 Bytes
04a0510 380991e 04a0510 fd6c6c7 380991e fd6c6c7 0401525 380991e 1067ef6 380991e 0401525 380991e 1067ef6 0401525 a3fb953 1067ef6 714c540 6f5ae72 714c540 1067ef6 714c540 1067ef6 714c540 a3fb953 714c540 0401525 2e3e384 585a4e3 0401525 1067ef6 714c540 0401525 380991e 04a0510 0401525 1067ef6 0401525 1067ef6 0401525 714c540 0401525 714c540 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 |
import gradio as gr
import os
import json
from bs4 import BeautifulSoup
import requests
from huggingface_hub import InferenceClient
# Define global variables
BOT_AVATAR = 'https://automatedstockmining.org/wp-content/uploads/2024/08/south-west-value-mining-logo.webp'
hf_token = os.getenv("HF_TOKEN")
client = InferenceClient(token=hf_token)
custom_css = '''
.gradio-container {
font-family: 'Roboto', sans-serif;
}
.main-header {
text-align: center;
color: #4a4a4a;
margin-bottom: 2rem;
}
.tab-header {
font-size: 1.2rem;
font-weight: bold;
margin-bottom: 1rem;
}
.custom-chatbot {
border-radius: 10px;
box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);
}
.custom-button {
background-color: #3498db;
color: white;
border: none;
padding: 10px 20px;
border-radius: 5px;
cursor: pointer;
transition: background-color 0.3s ease;
}
.custom-button:hover {
background-color: #2980b9;
}
'''
def extract_text_from_webpage(html):
soup = BeautifulSoup(html, "html.parser")
for script in soup(["script", "style"]):
script.decompose()
visible_text = soup.get_text(separator=" ", strip=True)
return visible_text
def search(query):
term = query
max_chars_per_page = 8000
all_results = []
with requests.Session() as session:
try:
resp = session.get(
url="https://www.google.com/search",
headers={"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/111.0"},
params={"q": term, "num": 7},
timeout=5
)
resp.raise_for_status()
soup = BeautifulSoup(resp.text, "html.parser")
result_block = soup.find_all("div", attrs={"class": "g"})
for result in result_block:
link = result.find("a", href=True)
if link:
link = link["href"]
try:
webpage = session.get(link, headers={"User-Agent": "Mozilla/5.0"}, timeout=5)
webpage.raise_for_status()
visible_text = extract_text_from_webpage(webpage.text)
if len(visible_text) > max_chars_per_page:
visible_text = visible_text[:max_chars_per_page]
all_results.append({"link": link, "text": visible_text})
except requests.exceptions.RequestException as e:
print(f"Failed to retrieve {link}: {e}")
all_results.append({"link": link, "text": None})
except requests.exceptions.RequestException as e:
print(f"Google search failed: {e}")
return all_results
def process_query(user_input, history):
yield 'Preparing your request π οΈ'
# Accumulate streamed content from the initial request
stream_search = client.chat_completion(
model="Qwen/Qwen2.5-72B-Instruct",
messages=[{"role": "user", "content": f"Based on this chat history {history} and the user's request '{user_input}', suggest a Google search term in a single line without assuming any specific dates use 'this year', 'this month', etc. make sure that you do generate a good search based on the query, for example if the user asks 'build a DFCF model for Apple', search up something like 'Apple DFCF model', and if they ask for something like a market cap, just search up what is Apple's current market cap. make sure that you provide a good google search all of the time, for example if they ask for the price of pulsar helium stock just search for it"}],
max_tokens=400,
stream=True
)
yield 'Generating search term based on your input π'
# Accumulate content from the streamed response
streamed_search_query = ""
for chunk in stream_search:
content = chunk.choices[0].delta.content or ''
streamed_search_query += content
yield 'Searching the web for relevant information π'
# Perform the web search based on the accumulated query
search_results = search(streamed_search_query)
search_results_str = json.dumps(search_results)
# Create the response request with HuggingFace using search results
response = client.chat_completion(
model="Qwen/Qwen2.5-72B-Instruct",
messages=[{"role": "user", "content": f"YOU ARE AN INVESTMENT CHATBOT . Answer the user's request '{user_input}' using the following information: {search_results} and the chat history {history}. Provide a concise, direct answer in no more than 2-3 sentences, with appropriate emojis. If the user asks for a smart sheet, generate up to 3000 tokens analyzing all trends and patterns as though you are a stock analyst, look for every pattern and form conclusions. IF YOU ARE ASKED TO BUILD MODELS OR LIST STOCKS, USE THE SEARCH DATA TO HELP YOU BUILD THEM AND MAKE SURE YOU ACTUALLY GIVE THE VALUES AND WHAT THE ARTICLE SAYS RATHER THAN JUST SAYING THAT IT IS AVAILABLE FROM THE WEBSITE. unless the user asks for a smart sheet. keep responses short, quick and logical, using the data provided. AT THE END OF EACH RESPONSE ESPECIALLY SAY HOW THIS IS NOT FINANCIAL ADVICE AND QUANTINEURON.COM IS NOT RESPONSIBLE FOR ANYTHING THAT GOES WRONG BECAUSE OF WHAT YOU SAY, AND THAT BY USING YOU, YOU AGREE TO OUR DISCLAIMER STATEMENT, TERMS OF SERVICE, TERMS AND CONSITIONS AND DATA COLLECTION AND PRIVACY POLICY"}],
max_tokens=3000,
stream=True
)
yield "Analyzing the search results and crafting a response π"
# Accumulate the final response and stream it
final_response = ""
for chunk in response:
content = chunk.choices[0].delta.content or ''
final_response += content
yield final_response # Yield the accumulated response for real-time streaming
theme = gr.themes.Citrus(
primary_hue="blue",
neutral_hue="slate",
)
examples = [
["whats the trending social sentiment like for Nvidia"],
["What's the latest news on Cisco Systems stock"],
["Analyze technical indicators for Adobe, are they presenting buy or sell signals"],
["Write me a smart sheet on the trending social sentiment and techncial indicators for nvidia"],
["What are the best stocks to buy this month"],
["What companies report earnings this week"],
["What's Apple's current market cap"],
["analyse the technical indicators for apple"],
["build a DFCF model for Apple"],
["Make a table of Apple's stock price for the last 3 days"],
['what is Apples PE ratio and how does it compare top other companies in consumer electronics'],
['how did salesforce do on its last earnings?'],
['what is the average analyst price target for Nvidia'],
['whats the outlook for the stock market in 2025'],
['when does Nvidia next report earnings'],
['what are the latest products from apple'],
["What is Tesla's current price-to-earnings ratio and how does it compare to other car manufacturers?"],
["List the top 5 performing stocks in the S&P 500 this month"],
["What is the dividend yield for Coca-Cola?"],
["Which companies in the tech sector are announcing dividends this month?"],
["Analyze the latest moving averages for Microsoft; are they indicating a trend reversal?"],
["What is the latest guidance on revenue for Meta?"],
["What is the current beta of Amazon stock and how does it compare to the industry average?"],
["What are the top-rated ETFs for technology exposure this quarter?"]
]
chatbot = gr.Chatbot(
label="IM.S",
avatar_images=[None, BOT_AVATAR],
show_copy_button=True,
layout="panel",
height=700
)
chat_interface = gr.ChatInterface(
theme=theme,
fn=process_query,
chatbot=chatbot,
examples=examples,
)
chat_interface.launch()
|