|
import logging |
|
|
|
import gradio as gr |
|
import pandas as pd |
|
import torch |
|
from GoogleNews import GoogleNews |
|
from transformers import pipeline |
|
|
|
|
|
|
|
logging.basicConfig( |
|
level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s" |
|
) |
|
|
|
SENTIMENT_ANALYSIS_MODEL = ( |
|
"mrm8488/distilroberta-finetuned-financial-news-sentiment-analysis" |
|
) |
|
|
|
DEVICE = "cuda" if torch.cuda.is_available() else "cpu" |
|
logging.info(f"Using device: {DEVICE}") |
|
|
|
logging.info("Initializing sentiment analysis model...") |
|
sentiment_analyzer = pipeline( |
|
"sentiment-analysis", model=SENTIMENT_ANALYSIS_MODEL, device=DEVICE |
|
) |
|
logging.info("Model initialized successfully") |
|
|
|
|
|
def fetch_articles(query): |
|
try: |
|
logging.info(f"Fetching articles for query: '{query}'") |
|
googlenews = GoogleNews(lang="en") |
|
googlenews.search(query) |
|
articles = googlenews.result() |
|
logging.info(f"Fetched {len(articles)} articles") |
|
return articles |
|
except Exception as e: |
|
logging.error( |
|
f"Error while searching articles for query: '{query}'. Error: {e}" |
|
) |
|
raise gr.Error( |
|
f"Unable to search articles for query: '{query}'. Try again later...", |
|
duration=5, |
|
) |
|
|
|
|
|
def analyze_article_sentiment(article): |
|
logging.info(f"Analyzing sentiment for article: {article['title']}") |
|
sentiment = sentiment_analyzer(article["desc"])[0] |
|
article["sentiment"] = sentiment |
|
return article |
|
|
|
|
|
def analyze_asset_sentiment(asset_name): |
|
logging.info(f"Starting sentiment analysis for asset: {asset_name}") |
|
|
|
logging.info("Fetching articles") |
|
articles = fetch_articles(asset_name) |
|
|
|
logging.info("Analyzing sentiment of each article") |
|
analyzed_articles = [analyze_article_sentiment(article) for article in articles] |
|
|
|
logging.info("Sentiment analysis completed") |
|
|
|
return convert_to_dataframe(analyzed_articles) |
|
|
|
|
|
def convert_to_dataframe(analyzed_articles): |
|
df = pd.DataFrame(analyzed_articles) |
|
df["Title"] = df.apply( |
|
lambda row: f'<a href="{row["link"]}" target="_blank">{row["title"]}</a>', |
|
axis=1, |
|
) |
|
df["Description"] = df["desc"] |
|
df["Date"] = df["date"] |
|
|
|
def sentiment_badge(sentiment): |
|
colors = { |
|
"negative": "red", |
|
"neutral": "gray", |
|
"positive": "green", |
|
} |
|
color = colors.get(sentiment, "grey") |
|
return f'<span style="background-color: {color}; color: white; padding: 2px 6px; border-radius: 4px;">{sentiment}</span>' |
|
|
|
df["Sentiment"] = df["sentiment"].apply(lambda x: sentiment_badge(x["label"])) |
|
return df[["Sentiment", "Title", "Description", "Date"]] |
|
|
|
|
|
with gr.Blocks() as iface: |
|
gr.Markdown("# Trading Asset Sentiment Analysis") |
|
gr.Markdown("Analyze the sentiment of recent articles related to a trading asset.") |
|
gr.Markdown( |
|
f"### π¨βπ» Author: [**Nanda Safiq Alfiansyah**](https://ndav.my.id)" |
|
) |
|
gr.Markdown("### π NIM: 21533401 | Kelas: TI 7A") |
|
gr.Markdown( |
|
""" |
|
π **How it works:** |
|
Enter the name of a trading asset below, and I'll fetch the latest articles |
|
and provide a detailed sentiment analysis. Let's dive in! |
|
""" |
|
) |
|
|
|
with gr.Row(): |
|
input_asset = gr.Textbox( |
|
label="Asset Name", |
|
lines=1, |
|
placeholder="Enter the name of the trading asset...", |
|
) |
|
|
|
with gr.Row(): |
|
analyze_button = gr.Button("Analyze Sentiment", size="sm") |
|
|
|
gr.Examples( |
|
examples=[ |
|
|
|
"Bitcoin", |
|
"Ethereum", |
|
"Ripple", |
|
"Litecoin", |
|
"Binance Coin", |
|
"Cardano", |
|
"Polkadot", |
|
"Solana", |
|
|
|
|
|
"Tesla", |
|
"Apple", |
|
"Amazon", |
|
"Microsoft", |
|
"Meta", |
|
"Google", |
|
"Netflix", |
|
"NVIDIA", |
|
|
|
|
|
"Gold", |
|
"Silver", |
|
"Platinum", |
|
"Crude Oil", |
|
"Natural Gas", |
|
"Copper", |
|
|
|
|
|
"S&P 500", |
|
"Dow Jones", |
|
"Nasdaq 100", |
|
"FTSE 100", |
|
"DAX 30", |
|
"Nikkei 225", |
|
"Hang Seng", |
|
|
|
|
|
"USD/EUR", |
|
"USD/JPY", |
|
"GBP/USD", |
|
"AUD/USD", |
|
"USD/CAD", |
|
"USD/CHF", |
|
|
|
|
|
"Alibaba", |
|
"Samsung", |
|
"Toyota", |
|
"Sony", |
|
"Roche", |
|
"Volkswagen", |
|
"Tencent", |
|
"HSBC", |
|
|
|
|
|
"Coca-Cola", |
|
"PepsiCo", |
|
"McDonald's", |
|
"Procter & Gamble", |
|
"Johnson & Johnson", |
|
"Intel", |
|
"IBM", |
|
], |
|
inputs=input_asset, |
|
) |
|
|
|
with gr.Row(): |
|
with gr.Column(): |
|
with gr.Blocks(): |
|
gr.Markdown("## Articles and Sentiment Analysis") |
|
articles_output = gr.Dataframe( |
|
headers=["Sentiment", "Title", "Description", "Date"], |
|
datatype=["markdown", "html", "markdown", "markdown"], |
|
wrap=False, |
|
) |
|
|
|
analyze_button.click( |
|
analyze_asset_sentiment, |
|
inputs=[input_asset], |
|
outputs=[articles_output], |
|
) |
|
|
|
logging.info("Launching Gradio interface") |
|
iface.queue().launch() |
|
|