Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import json
|
3 |
+
import random
|
4 |
+
import pickle
|
5 |
+
import numpy as np
|
6 |
+
import nltk
|
7 |
+
from tensorflow.keras.models import Sequential
|
8 |
+
from tensorflow.keras.optimizers.legacy import SGD
|
9 |
+
from tensorflow.keras.layers import Dense,Activation,Dropout
|
10 |
+
from tensorflow.keras.models import load_model
|
11 |
+
|
12 |
+
|
13 |
+
from nltk.stem import WordNetLemmatizer
|
14 |
+
lemmatizer = WordNetLemmatizer()
|
15 |
+
|
16 |
+
import nltk
|
17 |
+
nltk.download('punkt')
|
18 |
+
nltk.download('wordnet')
|
19 |
+
|
20 |
+
# Load intents and model
|
21 |
+
new_intents = json.loads(open('intents.json').read())
|
22 |
+
words = pickle.load(open('words.pkl', 'rb'))
|
23 |
+
classes = pickle.load(open('classes.pkl', 'rb'))
|
24 |
+
model = load_model("chatbot_final.h5")
|
25 |
+
|
26 |
+
|
27 |
+
# Load intents and model
|
28 |
+
new_intents = json.loads(open('intents.json').read())
|
29 |
+
words = pickle.load(open('words.pkl', 'rb'))
|
30 |
+
classes = pickle.load(open('classes.pkl', 'rb'))
|
31 |
+
model = load_model("chatbot_final.h5")
|
32 |
+
|
33 |
+
# Function to clean up a sentence
|
34 |
+
def clean_up_sentence(sentence):
|
35 |
+
sentence_words = nltk.word_tokenize(sentence)
|
36 |
+
sentence_words = [lemmatizer.lemmatize(word) for word in sentence_words]
|
37 |
+
return sentence_words
|
38 |
+
|
39 |
+
# Function to convert a sentence to a bag of words
|
40 |
+
def bag_of_words(sentence):
|
41 |
+
sentence_words = clean_up_sentence(sentence)
|
42 |
+
bag = [0] * len(words)
|
43 |
+
for w in sentence_words:
|
44 |
+
for i, word in enumerate(words):
|
45 |
+
if word == w:
|
46 |
+
bag[i] = 1
|
47 |
+
return np.array(bag)
|
48 |
+
|
49 |
+
# Function to predict the intent of a sentence
|
50 |
+
def predict_class(sentence):
|
51 |
+
bow = bag_of_words(sentence)
|
52 |
+
res = model.predict(np.array([bow]))[0]
|
53 |
+
ERROR_THRESHOLD = 0.25
|
54 |
+
results = [[i, r] for i, r in enumerate(res) if r > ERROR_THRESHOLD]
|
55 |
+
|
56 |
+
results.sort(key=lambda x: x[1], reverse=True)
|
57 |
+
return_list = []
|
58 |
+
for r in results:
|
59 |
+
return_list.append({'intent': classes[r[0]], 'probability': str(r[1])})
|
60 |
+
|
61 |
+
if not return_list:
|
62 |
+
# No intents detected, return a default response
|
63 |
+
return_list.append({'intent': 'default', 'probability': '1.0'})
|
64 |
+
|
65 |
+
return return_list# Function to get a response based on detected intent
|
66 |
+
def get_response(intents_list, intents_json):
|
67 |
+
tag = intents_list[0]['intent']
|
68 |
+
list_of_intents = intents_json['intents']
|
69 |
+
for i in list_of_intents:
|
70 |
+
if i['tag'] == tag:
|
71 |
+
result = random.choice(i['responses'])
|
72 |
+
break
|
73 |
+
return result
|
74 |
+
|
75 |
+
|
76 |
+
|
77 |
+
|
78 |
+
def greet(name):
|
79 |
+
ints = predict_class(name)
|
80 |
+
res = get_response(ints)
|
81 |
+
return res
|
82 |
+
|
83 |
+
iface = gr.Interface(fn=greet, inputs="text", outputs="text")
|
84 |
+
iface.launch()
|