Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
import gradio as gr
|
2 |
-
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
3 |
import torch
|
4 |
import nltk
|
5 |
import numpy as np
|
@@ -41,20 +41,21 @@ try:
|
|
41 |
except FileNotFoundError:
|
42 |
raise FileNotFoundError("Error: 'data.pickle' file not found. Ensure it exists and matches the model.")
|
43 |
|
44 |
-
# Build the model structure
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
|
|
49 |
|
50 |
-
model
|
51 |
-
model
|
52 |
-
model.add(Dense(8, activation='relu'))
|
53 |
-
model.add(Dense(len(output[0]), activation='softmax'))
|
54 |
-
model.compile(optimizer=Adam(), loss=CategoricalCrossentropy(), metrics=['accuracy'])
|
55 |
|
56 |
-
#
|
57 |
-
|
|
|
|
|
|
|
58 |
|
59 |
# Function to process user input into a bag-of-words format
|
60 |
def bag_of_words(s, words):
|
@@ -74,7 +75,7 @@ def chat(message, history):
|
|
74 |
|
75 |
try:
|
76 |
# Predict the tag
|
77 |
-
results = model.predict(
|
78 |
results_index = np.argmax(results)
|
79 |
tag = labels[results_index]
|
80 |
|
@@ -162,7 +163,7 @@ def scrape_website_for_contact_info(website):
|
|
162 |
response = requests.get(website, timeout=5)
|
163 |
soup = BeautifulSoup(response.content, 'html.parser')
|
164 |
|
165 |
-
phone_match = re.search(r'
|
166 |
if phone_match:
|
167 |
phone_number = phone_match.group()
|
168 |
|
@@ -243,7 +244,7 @@ def main():
|
|
243 |
# Gradio UI setup
|
244 |
with gr.Blocks() as demo:
|
245 |
# Load pre-trained model and tokenizer
|
246 |
-
@gr.
|
247 |
def load_model():
|
248 |
tokenizer = AutoTokenizer.from_pretrained("j-hartmann/emotion-english-distilroberta-base")
|
249 |
model = AutoModelForSequenceClassification.from_pretrained("j-hartmann/emotion-english-distilroberta-base")
|
@@ -278,7 +279,7 @@ with gr.Blocks() as demo:
|
|
278 |
|
279 |
try:
|
280 |
# Predict the tag
|
281 |
-
results = model.predict(
|
282 |
results_index = np.argmax(results)
|
283 |
tag = labels[results_index]
|
284 |
|
|
|
1 |
import gradio as gr
|
2 |
+
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
3 |
import torch
|
4 |
import nltk
|
5 |
import numpy as np
|
|
|
41 |
except FileNotFoundError:
|
42 |
raise FileNotFoundError("Error: 'data.pickle' file not found. Ensure it exists and matches the model.")
|
43 |
|
44 |
+
# Build the model structure
|
45 |
+
net = tflearn.input_data(shape=[None, len(training[0])])
|
46 |
+
net = tflearn.fully_connected(net, 8)
|
47 |
+
net = tflearn.fully_connected(net, 8)
|
48 |
+
net = tflearn.fully_connected(net, len(output[0]), activation="softmax")
|
49 |
+
net = tflearn.regression(net)
|
50 |
|
51 |
+
# Create a new model instance
|
52 |
+
model = tflearn.DNN(net)
|
|
|
|
|
|
|
53 |
|
54 |
+
# Create a checkpoint object
|
55 |
+
checkpoint = tf.train.Checkpoint(model=model)
|
56 |
+
|
57 |
+
# Load the model weights
|
58 |
+
checkpoint.restore("path/to/save/MentalHealthChatBotmodel")
|
59 |
|
60 |
# Function to process user input into a bag-of-words format
|
61 |
def bag_of_words(s, words):
|
|
|
75 |
|
76 |
try:
|
77 |
# Predict the tag
|
78 |
+
results = model.predict([bag_of_words(message, words)])
|
79 |
results_index = np.argmax(results)
|
80 |
tag = labels[results_index]
|
81 |
|
|
|
163 |
response = requests.get(website, timeout=5)
|
164 |
soup = BeautifulSoup(response.content, 'html.parser')
|
165 |
|
166 |
+
phone_match = re.search(r'\(?\+?[0-9]*\)?[0-9_\- \(\)]*', soup.get_text())
|
167 |
if phone_match:
|
168 |
phone_number = phone_match.group()
|
169 |
|
|
|
244 |
# Gradio UI setup
|
245 |
with gr.Blocks() as demo:
|
246 |
# Load pre-trained model and tokenizer
|
247 |
+
@gr.cache_resource
|
248 |
def load_model():
|
249 |
tokenizer = AutoTokenizer.from_pretrained("j-hartmann/emotion-english-distilroberta-base")
|
250 |
model = AutoModelForSequenceClassification.from_pretrained("j-hartmann/emotion-english-distilroberta-base")
|
|
|
279 |
|
280 |
try:
|
281 |
# Predict the tag
|
282 |
+
results = model.predict([bag_of_words(message, words)])
|
283 |
results_index = np.argmax(results)
|
284 |
tag = labels[results_index]
|
285 |
|