Sign-language / src /main-backup.py
ginipick's picture
Update src/main-backup.py
4b83eaf verified
raw
history blame
9.05 kB
import display_gloss as dg
import synonyms_preprocess as sp
from NLP_Spacy_base_translator import NlpSpacyBaseTranslator
from flask import Flask, render_template, Response, request, send_file
import io
import cv2
import numpy as np
import os
import requests
from urllib.parse import quote, unquote
import tempfile
import re
app = Flask(__name__, static_folder='static')
app.config['TITLE'] = 'Sign Language Translate'
nlp, dict_docs_spacy = sp.load_spacy_values()
dataset, list_2000_tokens = dg.load_data()
def clean_quotes(text):
"""따옴표 정리 함수"""
text = re.sub(r"'+", "'", text)
text = re.sub(r'\s+', ' ', text).strip()
return text
def is_korean(text):
"""한글이 포함되어 있는지 확인"""
return bool(re.search('[가-힣]', text))
def is_english(text):
"""텍스트가 영어인지 확인하는 함수"""
text_without_quotes = re.sub(r"'[^']*'|\s", "", text)
return bool(re.match(r'^[A-Za-z.,!?-]*$', text_without_quotes))
def normalize_quotes(text):
"""따옴표 형식을 정규화하는 함수"""
text = re.sub(r"'+", "'", text)
text = re.sub(r'\s+', ' ', text).strip()
# 이미 따옴표로 묶인 단어가 있으면 그대로 반환
if re.search(r"'[^']*'", text):
return text
return text
def find_quoted_words(text):
"""작은따옴표로 묶인 단어들을 찾는 함수"""
return re.findall(r"'([^']*)'", text)
def spell_out_word(word):
"""단어를 개별 알파벳으로 분리하는 함수"""
return ' '.join(list(word.lower()))
def translate_korean_text(text):
"""한글 전용 번역 함수"""
try:
quoted_match = re.search(r"'([^']*)'", text)
if not quoted_match:
return text
quoted_word = quoted_match.group(1)
url = "https://translate.googleapis.com/translate_a/single"
params = {
"client": "gtx",
"sl": "ko",
"tl": "en",
"dt": "t",
"q": text.replace(f"'{quoted_word}'", "XXXXX")
}
response = requests.get(url, params=params)
if response.status_code != 200:
return text
translated_text = ' '.join(item[0] for item in response.json()[0] if item[0])
if re.match(r'^[A-Za-z]+$', quoted_word):
proper_noun = quoted_word.upper()
else:
params["q"] = quoted_word
response = requests.get(url, params=params)
if response.status_code == 200:
proper_noun = response.json()[0][0][0].upper()
else:
proper_noun = quoted_word.upper()
final_text = translated_text.replace("XXXXX", f"'{proper_noun}'")
final_text = re.sub(r'\bNAME\b', 'name', final_text)
final_text = final_text.replace(" .", ".")
return final_text
except Exception as e:
print(f"Korean translation error: {e}")
return text
def translate_korean_to_english(text):
"""전체 텍스트 번역 함수"""
try:
text = normalize_quotes(text)
if is_english(text):
quoted_match = re.search(r"'([^']*)'", text)
if quoted_match:
quoted_word = quoted_match.group(1).upper()
text = re.sub(r"'[^']*'", f"'{quoted_word}'", text, 1)
return text
if is_korean(text):
return translate_korean_text(text)
return text
except Exception as e:
print(f"Translation error: {e}")
return text
@app.route('/')
def index():
return render_template('index.html', title=app.config['TITLE'])
@app.route('/translate/', methods=['POST'])
def result():
if request.method == 'POST':
input_text = request.form['inputSentence'].strip()
if not input_text:
return render_template('error.html', error="Please enter text to translate")
try:
input_text = normalize_quotes(input_text)
english_text = translate_korean_to_english(input_text)
if not english_text:
raise Exception("Translation failed")
quoted_words = find_quoted_words(english_text)
clean_english = re.sub(r"'([^']*)'", r"\1", english_text)
eng_to_asl_translator = NlpSpacyBaseTranslator(sentence=clean_english)
generated_gloss = eng_to_asl_translator.translate_to_gloss()
processed_gloss = []
words = generated_gloss.split()
for word in words:
word_upper = word.upper()
if quoted_words and word_upper in [w.upper() for w in quoted_words]:
spelled_word = spell_out_word(word)
processed_gloss.extend(['FINGERSPELL-START'] + spelled_word.split() + ['FINGERSPELL-END'])
else:
processed_gloss.append(word.lower())
gloss_sentence_before_synonym = " ".join(processed_gloss)
final_gloss = []
i = 0
while i < len(processed_gloss):
if processed_gloss[i] == 'FINGERSPELL-START':
final_gloss.extend(processed_gloss[i:i+2])
i += 2
while i < len(processed_gloss) and processed_gloss[i] != 'FINGERSPELL-END':
final_gloss.append(processed_gloss[i])
i += 1
if i < len(processed_gloss):
final_gloss.append(processed_gloss[i])
i += 1
else:
word = processed_gloss[i]
final_gloss.append(sp.find_synonyms(word, nlp, dict_docs_spacy, list_2000_tokens))
i += 1
gloss_sentence_after_synonym = " ".join(final_gloss)
return render_template('result.html',
title=app.config['TITLE'],
original_sentence=input_text,
english_translation=english_text,
gloss_sentence_before_synonym=gloss_sentence_before_synonym,
gloss_sentence_after_synonym=gloss_sentence_after_synonym)
except Exception as e:
return render_template('error.html', error=f"Translation error: {str(e)}")
def generate_complete_video(gloss_list, dataset, list_2000_tokens):
try:
frames = []
is_spelling = False
for gloss in gloss_list:
if gloss == 'FINGERSPELL-START':
is_spelling = True
continue
elif gloss == 'FINGERSPELL-END':
is_spelling = False
continue
for frame in dg.generate_video([gloss], dataset, list_2000_tokens):
frame_data = frame.split(b'\r\n\r\n')[1]
nparr = np.frombuffer(frame_data, np.uint8)
img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
frames.append(img)
if not frames:
raise Exception("No frames generated")
height, width = frames[0].shape[:2]
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
with tempfile.NamedTemporaryFile(suffix='.mp4', delete=False) as temp_file:
temp_path = temp_file.name
out = cv2.VideoWriter(temp_path, fourcc, 25, (width, height))
for frame in frames:
out.write(frame)
out.release()
with open(temp_path, 'rb') as f:
video_bytes = f.read()
os.remove(temp_path)
return video_bytes
except Exception as e:
print(f"Error generating video: {str(e)}")
raise
@app.route('/video_feed')
def video_feed():
sentence = request.args.get('gloss_sentence_to_display', '')
gloss_list = sentence.split()
return Response(dg.generate_video(gloss_list, dataset, list_2000_tokens),
mimetype='multipart/x-mixed-replace; boundary=frame')
@app.route('/download_video/<path:gloss_sentence>')
def download_video(gloss_sentence):
try:
decoded_sentence = unquote(gloss_sentence)
gloss_list = decoded_sentence.split()
if not gloss_list:
return "No gloss provided", 400
video_bytes = generate_complete_video(gloss_list, dataset, list_2000_tokens)
if not video_bytes:
return "Failed to generate video", 500
return send_file(
io.BytesIO(video_bytes),
mimetype='video/mp4',
as_attachment=True,
download_name='sign_language.mp4'
)
except Exception as e:
print(f"Download error: {str(e)}")
return f"Error downloading video: {str(e)}", 500
if __name__ == "__main__":
app.run(host="0.0.0.0", port=7860, debug=True)