Spaces:
Building
Building
Upload main.py
Browse files- src/main.py +15 -9
src/main.py
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
import display_gloss as dg
|
|
|
2 |
from NLP_Spacy_base_translator import NlpSpacyBaseTranslator
|
3 |
from flask import Flask, render_template, Response, request
|
4 |
|
@@ -7,6 +8,12 @@ app = Flask(__name__)
|
|
7 |
|
8 |
@app.route('/')
|
9 |
def index():
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
return render_template('index.html')
|
11 |
|
12 |
@app.route('/translate/', methods=['POST'])
|
@@ -15,18 +22,17 @@ def result():
|
|
15 |
sentence = request.form['inputSentence']
|
16 |
eng_to_asl_translator = NlpSpacyBaseTranslator(sentence=sentence)
|
17 |
generated_gloss = eng_to_asl_translator.translate_to_gloss()
|
18 |
-
|
19 |
-
print(
|
20 |
-
|
|
|
|
|
|
|
21 |
|
22 |
@app.route('/video_feed')
|
23 |
def video_feed():
|
24 |
-
sentence = request.args.get('
|
25 |
-
|
26 |
-
generated_gloss = eng_to_asl_translator.translate_to_gloss()
|
27 |
-
gloss_list = [gloss.lower() for gloss in generated_gloss.split()]
|
28 |
-
print(f'video_feed gloss_list: {gloss_list}')
|
29 |
-
dataset, vocabulary_list = dg.load_data()
|
30 |
return Response(dg.generate_video(gloss_list, dataset, vocabulary_list), mimetype='multipart/x-mixed-replace; boundary=frame')
|
31 |
|
32 |
if __name__ == "__main__":
|
|
|
1 |
import display_gloss as dg
|
2 |
+
import synonyms_preprocess as sp
|
3 |
from NLP_Spacy_base_translator import NlpSpacyBaseTranslator
|
4 |
from flask import Flask, render_template, Response, request
|
5 |
|
|
|
8 |
|
9 |
@app.route('/')
|
10 |
def index():
|
11 |
+
global dataset, vocabulary_list, dict_2000_tokens, nlp, dict_docs_spacy
|
12 |
+
|
13 |
+
dataset, vocabulary_list = dg.load_data()
|
14 |
+
dict_2000_tokens = dataset["gloss"].unique()
|
15 |
+
nlp, dict_docs_spacy = sp.load_spacy_values()
|
16 |
+
|
17 |
return render_template('index.html')
|
18 |
|
19 |
@app.route('/translate/', methods=['POST'])
|
|
|
22 |
sentence = request.form['inputSentence']
|
23 |
eng_to_asl_translator = NlpSpacyBaseTranslator(sentence=sentence)
|
24 |
generated_gloss = eng_to_asl_translator.translate_to_gloss()
|
25 |
+
gloss_list_lower = [gloss.lower() for gloss in generated_gloss.split() if gloss.isalnum() ]
|
26 |
+
print('gloss before synonym:', gloss_list_lower)
|
27 |
+
gloss_list = [sp.find_synonyms(gloss, nlp, dict_docs_spacy, dict_2000_tokens) for gloss in gloss_list_lower]
|
28 |
+
print('synonym list:', gloss_list)
|
29 |
+
gloss_sentence = " ".join(gloss_list)
|
30 |
+
return render_template('translate.html', sentence=sentence, gloss_list=gloss_list, gloss_sentence=gloss_sentence)
|
31 |
|
32 |
@app.route('/video_feed')
|
33 |
def video_feed():
|
34 |
+
sentence = request.args.get('gloss_sentence', '')
|
35 |
+
gloss_list = sentence.split()
|
|
|
|
|
|
|
|
|
36 |
return Response(dg.generate_video(gloss_list, dataset, vocabulary_list), mimetype='multipart/x-mixed-replace; boundary=frame')
|
37 |
|
38 |
if __name__ == "__main__":
|