kylezhao101 commited on
Commit
0f51a7f
·
1 Parent(s): 6759ca3

import knowledge_extraction model

Browse files
Files changed (1) hide show
  1. app.py +13 -14
app.py CHANGED
@@ -1,20 +1,19 @@
1
  import streamlit as st
2
- from transformers import BertTokenizer, BertForSequenceClassification
3
- import torch
4
 
5
- # Load the pre-trained BERT model and tokenizer
6
- model_name = "bert-base-uncased" # Replace with your specific model if needed
7
- tokenizer = BertTokenizer.from_pretrained(model_name)
8
- model = BertForSequenceClassification.from_pretrained(model_name)
9
 
10
  # Streamlit UI
11
- st.title("BERT Text Classification")
12
- text_input = st.text_input("Enter text for classification:")
13
 
14
  if text_input:
15
- # Tokenize input
16
- inputs = tokenizer(text_input, return_tensors="pt", truncation=True, padding=True, max_length=512)
17
- with torch.no_grad():
18
- logits = model(**inputs).logits
19
- predicted_class = torch.argmax(logits, dim=1).item()
20
- st.write(f"Predicted Class: {predicted_class}")
 
 
1
  import streamlit as st
2
+ from transformers import pipeline
 
3
 
4
+ # Load the token classification pipeline
5
+ model_name = "jjzha/jobbert_knowledge_extraction"
6
+ pipe = pipeline("token-classification", model=model_name)
 
7
 
8
  # Streamlit UI
9
+ st.title("Token Classification with Hugging Face")
10
+ text_input = st.text_area("Enter text for token classification:")
11
 
12
  if text_input:
13
+ # Perform token classification
14
+ results = pipe(text_input)
15
+
16
+ # Display the results
17
+ st.write("Token Classification Results:")
18
+ for result in results:
19
+ st.write(f"Entity: {result['entity']}, Word: {result['word']}, Score: {result['score']:.2f}")