Vinay-M commited on
Commit
5f1e84c
·
1 Parent(s): 950d2fc
Files changed (1) hide show
  1. app.py +6 -19
app.py CHANGED
@@ -1,23 +1,10 @@
1
  import streamlit as st
2
- from transformers import AutoTokenizer, AutoModelForSequenceClassification
3
  import torch
4
 
5
- # Load pre-trained model and tokenizer
6
- model_name = "username/my_spam_detector" # replace 'username' with your Hugging Face account name
7
- tokenizer = AutoTokenizer.from_pretrained(model_name)
8
- model = AutoModelForSequenceClassification.from_pretrained(model_name)
 
9
 
10
- st.title("Spam Detector")
11
-
12
- text = st.text_input("Enter a text")
13
-
14
- if st.button('Predict'):
15
- # Tokenize the input text
16
- inputs = tokenizer(text, return_tensors='pt')
17
-
18
- # Get model's prediction
19
- outputs = model(**inputs)
20
- probs = torch.nn.functional.softmax(outputs.logits, dim=-1)
21
-
22
- # Show prediction
23
- st.write(f"The probability of the text being spam is {probs[0][1].item() * 100:.2f}%.")
 
1
  import streamlit as st
2
+ from transformers import pipeline
3
  import torch
4
 
5
+ pipe = pipeline('sentiment-analysis')
6
+ text = st.text_area("Enter text: ")
7
+ if text:
8
+ out = pipe(text)
9
+ st.json(out)
10