File size: 1,678 Bytes
c15480b
8ff3ba1
7abeff1
8c3e6c9
3b2d0a7
8ff3ba1
 
 
8c3e6c9
c616fef
8ff3ba1
c15480b
827a18f
8ff3ba1
827a18f
 
d632c67
72e75f1
ec9466e
 
 
 
d632c67
5cd743d
827a18f
 
 
 
 
 
 
 
 
 
6b2c99b
827a18f
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
import streamlit as st
from transformers import AutoTokenizer, AutoModelForTokenClassification, pipeline

# โหลด Tokenizer และ Model
model_name = "Nucha/Nucha_SkillNER_BERT"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForTokenClassification.from_pretrained(model_name)

# สร้าง NER Pipeline
ner_pipeline = pipeline("ner", model=model, tokenizer=tokenizer)

# UI ด้วย Streamlit
col1, col2 = st.columns(2)

with col1:
    st.header("Input")
    default_text="""
            To enhance my programming skills, I took online courses in Python , PHP and cloud computing technologies. 
            The workshop on machine learning taught me valuable skills in TensorFlow. 
            The developer utilized Python for backend development and JavaScript for frontend, ensuring a seamless user experience. 
            In my previous role, I collaborated with data scientists to implement machine learning models using R and TensorFlow. 
            I have strong communication skills.
    """
    text = st.text_area("Enter text for NER analysis:", value=default_text, height=400, max_chars=None, key=None, help=None, placeholder=None)
    analyze_button = st.button("Analyze")
    
with col2:
    st.header("Output")
    if analyze_button:
        ner_results = ner_pipeline(text)
        
        # Display results in a structured output block
        if ner_results:
            output_data = [{"Entity": entity['word'], "Label": entity['entity'], "Score": f"{entity['score']:.4f}"} for entity in ner_results]
            st.table(output_data)  # Display as a table
        else:
            st.write("No entities found.")