Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,83 +1,19 @@
|
|
1 |
import datetime
|
2 |
import pytz
|
3 |
import yaml
|
4 |
-
import
|
5 |
-
|
6 |
-
from smolagents import CodeAgent, HfApiModel, load_tool, tool
|
7 |
from tools.final_answer import FinalAnswerTool
|
8 |
from Gradio_UI import GradioUI
|
9 |
|
10 |
-
#
|
11 |
-
|
12 |
-
def get_current_stock_price(symbol: str) -> str:
|
13 |
-
"""Fetches the current stock price for the given stock symbol.
|
14 |
-
|
15 |
-
Args:
|
16 |
-
symbol: The stock ticker symbol (e.g., 'AAPL').
|
17 |
-
|
18 |
-
Returns:
|
19 |
-
A string reporting the current stock price.
|
20 |
-
"""
|
21 |
-
try:
|
22 |
-
ticker = yf.Ticker(symbol)
|
23 |
-
price = ticker.info.get("regularMarketPrice", None)
|
24 |
-
if price is None:
|
25 |
-
return f"Could not fetch current price for {symbol}."
|
26 |
-
return f"The current price of {symbol} is {price:.2f} USD."
|
27 |
-
except Exception as e:
|
28 |
-
return f"Error fetching current price for {symbol}: {str(e)}"
|
29 |
-
|
30 |
-
# Tool to fetch historical stock data for a given timeline using yfinance.
|
31 |
-
@tool
|
32 |
-
def get_stock_historical_data(symbol: str, timeline: str) -> str:
|
33 |
-
"""Fetches historical stock data for a given symbol and timeline.
|
34 |
-
|
35 |
-
Args:
|
36 |
-
symbol: The stock ticker symbol (e.g., 'TSLA').
|
37 |
-
timeline: The timeframe for data. Acceptable values: '1h', '1day', '1week', '1year'.
|
38 |
-
|
39 |
-
Returns:
|
40 |
-
A summary string of the historical price data.
|
41 |
-
"""
|
42 |
-
try:
|
43 |
-
ticker = yf.Ticker(symbol)
|
44 |
-
if timeline == "1h":
|
45 |
-
# For 1h data, fetch 1 day of data at 1-minute intervals and take the last 60 minutes.
|
46 |
-
df = ticker.history(period="1d", interval="1m").tail(60)
|
47 |
-
elif timeline == "1day":
|
48 |
-
df = ticker.history(period="1d", interval="5m")
|
49 |
-
elif timeline == "1week":
|
50 |
-
df = ticker.history(period="7d", interval="30m")
|
51 |
-
elif timeline == "1year":
|
52 |
-
df = ticker.history(period="1y", interval="1d")
|
53 |
-
else:
|
54 |
-
return f"Timeline '{timeline}' is not supported. Use '1h', '1day', '1week', or '1year'."
|
55 |
-
|
56 |
-
if df.empty:
|
57 |
-
return f"No historical data available for {symbol} over timeline {timeline}."
|
58 |
-
|
59 |
-
start_price = df['Close'].iloc[0]
|
60 |
-
end_price = df['Close'].iloc[-1]
|
61 |
-
min_price = df['Close'].min()
|
62 |
-
max_price = df['Close'].max()
|
63 |
-
summary = (
|
64 |
-
f"For {symbol} over {timeline}:\n"
|
65 |
-
f"Start Price: {start_price:.2f} USD\n"
|
66 |
-
f"End Price: {end_price:.2f} USD\n"
|
67 |
-
f"Min Price: {min_price:.2f} USD\n"
|
68 |
-
f"Max Price: {max_price:.2f} USD"
|
69 |
-
)
|
70 |
-
return summary
|
71 |
-
except Exception as e:
|
72 |
-
return f"Error fetching historical data for {symbol} over {timeline}: {str(e)}"
|
73 |
|
74 |
# Final answer tool (must be included)
|
75 |
final_answer = FinalAnswerTool()
|
76 |
|
77 |
# Define the model configuration.
|
78 |
-
# Here we reduce max_tokens to help keep the total token count below the limit.
|
79 |
model = HfApiModel(
|
80 |
-
max_tokens=1000, # Reduced
|
81 |
temperature=0.5,
|
82 |
model_id='Qwen/Qwen2.5-Coder-32B-Instruct',
|
83 |
custom_role_conversions=None,
|
@@ -90,16 +26,16 @@ image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_co
|
|
90 |
with open("prompts.yaml", 'r') as stream:
|
91 |
prompt_templates = yaml.safe_load(stream)
|
92 |
|
93 |
-
# Initialize the CodeAgent with the
|
94 |
agent = CodeAgent(
|
95 |
model=model,
|
96 |
-
tools=[final_answer,
|
97 |
-
max_steps=3, # Reduced steps to limit the chain-of-thought length
|
98 |
verbosity_level=1,
|
99 |
grammar=None,
|
100 |
planning_interval=None,
|
101 |
-
name="
|
102 |
-
description="An AI agent that
|
103 |
prompt_templates=prompt_templates
|
104 |
)
|
105 |
|
|
|
1 |
import datetime
|
2 |
import pytz
|
3 |
import yaml
|
4 |
+
from smolagents import CodeAgent, HfApiModel, load_tool, tool, DuckDuckGoSearchTool
|
|
|
|
|
5 |
from tools.final_answer import FinalAnswerTool
|
6 |
from Gradio_UI import GradioUI
|
7 |
|
8 |
+
# Instantiate the DuckDuckGo search tool provided by smolagents.
|
9 |
+
duckduckgo_tool = DuckDuckGoSearchTool()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
|
11 |
# Final answer tool (must be included)
|
12 |
final_answer = FinalAnswerTool()
|
13 |
|
14 |
# Define the model configuration.
|
|
|
15 |
model = HfApiModel(
|
16 |
+
max_tokens=1000, # Reduced max_tokens to help prevent token limit issues.
|
17 |
temperature=0.5,
|
18 |
model_id='Qwen/Qwen2.5-Coder-32B-Instruct',
|
19 |
custom_role_conversions=None,
|
|
|
26 |
with open("prompts.yaml", 'r') as stream:
|
27 |
prompt_templates = yaml.safe_load(stream)
|
28 |
|
29 |
+
# Initialize the CodeAgent with the DuckDuckGo search tool.
|
30 |
agent = CodeAgent(
|
31 |
model=model,
|
32 |
+
tools=[final_answer, duckduckgo_tool],
|
33 |
+
max_steps=3, # Reduced steps to limit the chain-of-thought length.
|
34 |
verbosity_level=1,
|
35 |
grammar=None,
|
36 |
planning_interval=None,
|
37 |
+
name="SearchAgent",
|
38 |
+
description="An AI agent that uses DuckDuckGo search to fetch information from the web.",
|
39 |
prompt_templates=prompt_templates
|
40 |
)
|
41 |
|