manasagangotri commited on
Commit
4edbcc8
Β·
verified Β·
1 Parent(s): be732bd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +126 -113
app.py CHANGED
@@ -1,19 +1,19 @@
1
  import streamlit as st
2
  import torch
3
- from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
4
- from deep_translator import GoogleTranslator
5
  import requests
6
- import plotly.graph_objects as go
 
7
 
8
- # Load models
 
 
9
  tokenizer = AutoTokenizer.from_pretrained("hamzab/roberta-fake-news-classification")
10
  model = AutoModelForSequenceClassification.from_pretrained("hamzab/roberta-fake-news-classification")
11
 
12
- summarizer = pipeline("summarization", model="facebook/bart-large-cnn")
13
-
14
- st.set_page_config(page_title="Fake News Detector", page_icon="πŸ“°")
15
 
16
- # --- Functions ---
17
 
18
  def translate_to_english(text):
19
  try:
@@ -21,11 +21,7 @@ def translate_to_english(text):
21
  except Exception as e:
22
  return f"Error in translation: {e}"
23
 
24
- def summarize_text(text):
25
- if len(text) > 100:
26
- summary = summarizer(text[:1024], max_length=80, min_length=30, do_sample=False)
27
- return summary[0]['summary_text']
28
- return text
29
 
30
  def predict_fake(title, text):
31
  input_str = "<title>" + title + "<content>" + text + "<end>"
@@ -34,109 +30,126 @@ def predict_fake(title, text):
34
  model.to(device)
35
  with torch.no_grad():
36
  output = model(input_ids["input_ids"].to(device), attention_mask=input_ids["attention_mask"].to(device))
37
- probs = torch.nn.Softmax(dim=1)(output.logits)[0]
38
- return {"Fake": probs[0].item(), "Real": probs[1].item()}
39
-
40
- def render_confidence_chart(confidence):
41
- fig = go.Figure(data=[go.Pie(
42
- values=[confidence, 100 - confidence],
43
- labels=['Confidence', 'Uncertainty'],
44
- hole=0.6,
45
- marker_colors=[
46
- f'hsl({confidence * 1.2}, 70%, 50%)',
47
- 'rgb(240, 240, 240)'
48
- ],
49
- textinfo='label+percent',
50
- hoverinfo='label+value'
51
- )])
52
- fig.update_layout(
53
- showlegend=False,
54
- margin=dict(t=10, b=10, l=10, r=10),
55
- annotations=[dict(text=f'{confidence}%', x=0.5, y=0.5, font_size=20, showarrow=False)],
56
- height=300
57
- )
58
- st.plotly_chart(fig, use_container_width=True)
59
-
60
- def simulate_detected_patterns(text):
61
- patterns = []
62
- if "breaking" in text.lower():
63
- patterns.append({"phrase": "breaking", "category": "clickbait", "impact": -5})
64
- if "confirmed" in text.lower():
65
- patterns.append({"word": "confirmed", "category": "assertive", "impact": 5})
66
- if "shocking" in text.lower():
67
- patterns.append({"word": "shocking", "category": "exaggeration", "impact": -10})
68
- return patterns
69
 
70
  def fact_check_with_google(api_key, query):
71
  url = f"https://factchecktools.googleapis.com/v1alpha1/claims:search"
72
- params = {"query": query, "key": api_key}
 
 
 
73
  response = requests.get(url, params=params)
74
- return response.json() if response.status_code == 200 else {"error": f"Error: {response.status_code}"}
75
-
76
- # --- App UI ---
77
-
78
- st.title("πŸ“° Fake News Detection App")
79
- st.markdown("Enter a news article to predict its credibility and view confidence metrics.")
80
-
81
- with st.form("news_form"):
82
- title = st.text_input("πŸ“ Title")
83
- text = st.text_area("πŸ“œ Content")
84
- language = st.selectbox("🌐 Language of Input", ["English", "Other"])
85
- summarize_option = st.checkbox("🧠 Summarize before fact check")
86
- check_fact = st.checkbox("πŸ” Check with Google Fact Check")
87
- GOOGLE_API_KEY = st.text_input("πŸ”‘ Google API Key (optional)", type="password")
88
- submit_button = st.form_submit_button("πŸš€ Predict")
89
-
90
- if submit_button:
91
- if language != "English":
92
- title = translate_to_english(title)
93
- text = translate_to_english(text)
94
-
95
- if summarize_option:
96
- summary = summarize_text(title + " " + text)
97
- st.markdown("### βœ‚οΈ Summary Used for Fact Check")
98
- st.info(summary)
99
  else:
100
- summary = title + " " + text
101
-
102
- prediction = predict_fake(title, text)
103
- confidence = round(prediction["Real"] * 100)
104
- verdict = "Real" if confidence > 60 else "Fake" if confidence < 40 else "Uncertain"
105
- color = "green" if verdict == "Real" else "red" if verdict == "Fake" else "orange"
106
-
107
- # Output
108
- st.subheader("βœ… Prediction Result")
109
- st.markdown(f"""
110
- <div style='background-color:#f7f9fc;padding:1rem;border-radius:10px'>
111
- <b>Verdict:</b> <span style='color:{color}; font-weight:600'>{verdict}</span><br>
112
- <b>Confidence:</b> {confidence}%
113
- </div>
114
- """, unsafe_allow_html=True)
115
-
116
- render_confidence_chart(confidence)
117
-
118
- # Detected Patterns
119
- patterns = simulate_detected_patterns(title + " " + text)
120
- if patterns:
121
- st.subheader("πŸ”Ž Detected Language Patterns")
122
- for p in patterns:
123
- word = p.get("word") or p.get("phrase")
124
- st.markdown(f"- **{word}** ({p['category']}) β€” "
125
- f"<span style='color:{'green' if p['impact']>0 else 'red'};'>{p['impact']:+}</span>",
126
- unsafe_allow_html=True)
127
-
128
- GOOGLE_API_KEY = "AIzaSyAf5v5380xkpo0Rk3kBiSxpxYVBQwcDi2A"
129
- # Google Fact Check
130
- if check_fact and GOOGLE_API_KEY:
131
- st.subheader("🌐 Google Fact Check Results")
132
- facts = fact_check_with_google(GOOGLE_API_KEY, summary)
133
- if "claims" in facts:
134
- for claim in facts["claims"]:
135
- st.markdown(f"**Claim:** {claim.get('text', 'N/A')}")
136
- for review in claim.get("claimReview", []):
137
- st.write(f"- **Publisher**: {review.get('publisher', {}).get('name', 'N/A')}")
138
- st.write(f"- **Rating**: {review.get('textualRating', 'N/A')}")
139
- st.write(f"- **URL**: {review.get('url', 'N/A')}")
140
- st.write("---")
141
  else:
142
- st.warning("No fact-check results found.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
2
  import torch
3
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification,pipeline
4
+
5
  import requests
6
+ import json
7
+
8
 
9
+ st.set_page_config(page_title="News Prediction", page_icon=":earth_africa:")
10
+
11
+ # Load tokenizer and model
12
  tokenizer = AutoTokenizer.from_pretrained("hamzab/roberta-fake-news-classification")
13
  model = AutoModelForSequenceClassification.from_pretrained("hamzab/roberta-fake-news-classification")
14
 
 
 
 
15
 
16
+ from deep_translator import GoogleTranslator
17
 
18
  def translate_to_english(text):
19
  try:
 
21
  except Exception as e:
22
  return f"Error in translation: {e}"
23
 
24
+
 
 
 
 
25
 
26
  def predict_fake(title, text):
27
  input_str = "<title>" + title + "<content>" + text + "<end>"
 
30
  model.to(device)
31
  with torch.no_grad():
32
  output = model(input_ids["input_ids"].to(device), attention_mask=input_ids["attention_mask"].to(device))
33
+ return dict(zip(["Fake", "Real"], [x.item() for x in list(torch.nn.Softmax()(output.logits)[0])]))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
 
35
  def fact_check_with_google(api_key, query):
36
  url = f"https://factchecktools.googleapis.com/v1alpha1/claims:search"
37
+ params = {
38
+ "query": query,
39
+ "key": api_key
40
+ }
41
  response = requests.get(url, params=params)
42
+ if response.status_code == 200:
43
+ return response.json()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
  else:
45
+ return {"error": f"Unable to fetch results from Google Fact Check API. HTTP {response.status_code}: {response.text}"}
46
+
47
+ '''def main():
48
+ st.title("Fake News Prediction")
49
+
50
+ # Load Google API key from a secure location or environment variable
51
+
52
+
53
+ # Create the form for user input
54
+ with st.form("news_form"):
55
+ st.subheader("Enter News Details")
56
+ title = st.text_input("Title")
57
+ text = st.text_area("Text")
58
+ language = st.selectbox("Select Language", options=["English", "Other"])
59
+ submit_button = st.form_submit_button("Submit")
60
+
61
+ # Process form submission and make prediction
62
+ if submit_button:
63
+ if language == "Other":
64
+ title = translate_to_english(title)
65
+ text = translate_to_english(text)
66
+
67
+ prediction = predict_fake(title, text)
68
+
69
+ st.subheader("Prediction:")
70
+ st.write("Prediction: ", prediction)
71
+
72
+ if prediction.get("Real") > 0.5:
73
+ st.write("This news is predicted to be **real** :muscle:")
 
 
 
 
 
 
 
 
 
 
 
 
74
  else:
75
+ st.write("This news is predicted to be **fake** :shit:")
76
+
77
+ '''
78
+ # Load summarizer
79
+ @st.cache_resource
80
+ def load_summarizer():
81
+ return pipeline("summarization", model="facebook/bart-large-cnn")
82
+
83
+ summarizer = load_summarizer()
84
+
85
+
86
+
87
+ def summarize_text(text):
88
+ try:
89
+ summary = summarizer(text, max_length=30, min_length=5, do_sample=False)
90
+ return summary[0]['summary_text']
91
+ except Exception as e:
92
+ return f"Error in summarization: {e}"
93
+
94
+
95
+ def main():
96
+ st.title("Fake News Prediction")
97
+
98
+ # Store your API key here or load from environment variable
99
+ GOOGLE_API_KEY = "AIzaSyAf5v5380xkpo0Rk3kBiSxpxYVBQwcDi2A" # πŸ” Replace this!
100
+
101
+ with st.form("news_form"):
102
+ st.subheader("Enter News Details")
103
+ title = st.text_input("Title")
104
+ text = st.text_area("Text")
105
+ language = st.selectbox("Select Language", options=["English", "Other"])
106
+ check_fact = st.checkbox("Also check with Google Fact Check API")
107
+ submit_button = st.form_submit_button("Submit")
108
+
109
+ if submit_button:
110
+ if language == "Other":
111
+ title = translate_to_english(title)
112
+ text = translate_to_english(text)
113
+
114
+ prediction = predict_fake(title, text)
115
+
116
+ st.subheader("Prediction:")
117
+ st.write("Prediction: ", prediction)
118
+
119
+ if prediction.get("Real") > 0.5:
120
+ st.write("This news is predicted to be **real** :muscle:")
121
+ else:
122
+ st.write("This news is predicted to be **fake** :shit:")
123
+
124
+
125
+ if check_fact and GOOGLE_API_KEY:
126
+ st.subheader("Google Fact Check Results")
127
+
128
+ # Optional: user-provided claim input
129
+ # custom_claim = st.text_input("Optional: Enter a specific claim to fact-check", "")
130
+ # query = custom_claim if custom_claim else title # Use custom claim if provided
131
+ summarized_claim = summarize_text(title)
132
+ st.info(f"πŸ” Fact check query (summarized): **{summarized_claim}**")
133
+
134
+ fact_check_data = fact_check_with_google(GOOGLE_API_KEY, summarized_claim)
135
+
136
+ # Optional: show raw data for debugging
137
+ # st.json(fact_check_data)
138
+
139
+
140
+
141
+ if "claims" in fact_check_data and len(fact_check_data["claims"]) > 0:
142
+ for claim in fact_check_data["claims"]:
143
+ st.markdown(f"**Claim:** {claim.get('text', 'N/A')}")
144
+ for review in claim.get("claimReview", []):
145
+ st.write(f"- **Publisher**: {review.get('publisher', {}).get('name', 'N/A')}")
146
+ st.write(f"- **Rating**: {review.get('textualRating', 'N/A')}")
147
+ st.write(f"- **URL**: {review.get('url', 'N/A')}")
148
+ st.write("---")
149
+ else:
150
+ st.warning("No fact-check results found. Try changing the title or query.")
151
+
152
+
153
+ if __name__ == "__main__":
154
+ main()
155
+