File size: 1,689 Bytes
f151edb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
import streamlit as st
from transformers import BertTokenizer, BertForSequenceClassification, AdamW
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader

# Config class
class Config:
    BERT_PATH = "ahmedrachid/FinancialBERT"
    MODEL_PATH = "model.bin"
    TRAIN_BATCH_SIZE = 32
    VALID_BATCH_SIZE = 32
    EPOCHS = 10
    MAX_LEN = 512
    TOKENIZER = BertTokenizer.from_pretrained(BERT_PATH)

# FinancialBERT model class
class FinancialBERT(nn.Module):
    def __init__(self):
        super(FinancialBERT, self).__init__()
        self.bert = BertForSequenceClassification.from_pretrained(Config.BERT_PATH, num_labels=3, hidden_dropout_prob=0.5)
        
    def forward(self, input_ids, attention_mask, labels=None):
        output = self.bert(input_ids, attention_mask=attention_mask, labels=labels)
        return output.loss, output.logits

# Load model
model = FinancialBERT()
model.load_state_dict(torch.load(Config.MODEL_PATH, map_location=torch.device('cpu')))
model.eval()

# Tokenizer
tokenizer = Config.TOKENIZER

def predict_sentiment(sentences):
    inputs = tokenizer(sentences, return_tensors="pt", truncation=True, padding=True, max_length=Config.MAX_LEN)
    with torch.no_grad():
        logits = model(**inputs)[1]
    probs = torch.nn.functional.softmax(logits, dim=-1)
    predictions = torch.argmax(probs, dim=-1)
    return ['negative', 'neutral', 'positive'][predictions[0].item()]

# Streamlit app
st.title("Financial Sentiment Analysis")
sentence = st.text_area("Enter a financial sentence:", "")
if st.button("Predict"):
    sentiment = predict_sentiment([sentence])
    st.write(f"The predicted sentiment is: {sentiment}")