Update app.py
Browse files
app.py
CHANGED
@@ -1,13 +1,8 @@
|
|
1 |
import streamlit as st
|
2 |
-
import
|
|
|
3 |
import re
|
4 |
-
from
|
5 |
-
|
6 |
-
# Load the scikit-learn model
|
7 |
-
sklearn_model = joblib.load("arabic-msa-dialects-segmentation-v1.pkl")
|
8 |
-
|
9 |
-
# Wrap the scikit-learn model inside a Hugging Face pipeline
|
10 |
-
pipeline_model = pipeline(task="feature-extraction", model=sklearn_model)
|
11 |
|
12 |
# Define feature functions
|
13 |
def features(sentence, index):
|
@@ -58,10 +53,14 @@ if text_input:
|
|
58 |
# Extract features
|
59 |
features_list = [features(tokenized_text, i) for i in range(len(tokenized_text))]
|
60 |
|
61 |
-
#
|
62 |
-
|
|
|
|
|
|
|
|
|
63 |
|
64 |
-
# Display the
|
65 |
-
st.write("
|
66 |
else:
|
67 |
st.write("Please enter some text.")
|
|
|
1 |
import streamlit as st
|
2 |
+
import skops.hub_utils as hub_utils
|
3 |
+
import pandas as pd
|
4 |
import re
|
5 |
+
from nltk.tokenize import word_tokenize
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
|
7 |
# Define feature functions
|
8 |
def features(sentence, index):
|
|
|
53 |
# Extract features
|
54 |
features_list = [features(tokenized_text, i) for i in range(len(tokenized_text))]
|
55 |
|
56 |
+
# Create a DataFrame with the features
|
57 |
+
data = pd.DataFrame(features_list)
|
58 |
+
|
59 |
+
# Load the model from the Hub
|
60 |
+
model_id = "Alshargi/arabic-msa-dialects-segmentation"
|
61 |
+
res = hub_utils.get_model_output(model_id, data)
|
62 |
|
63 |
+
# Display the model output
|
64 |
+
st.write("Model Output:", res)
|
65 |
else:
|
66 |
st.write("Please enter some text.")
|