BilalSardar's picture
Create app.py
5b80433
raw
history blame
2.35 kB
import cv2
import os
from moviepy.editor import *
import gradio as gr
def parse_string(string, dataset):
parsed_list = []
start = 0
otherword=""
end = len(string)
while start < end:
max_chunk = ""
max_length = 0
for chunk in VideosNames:
if string.startswith(chunk.lower(), start) and len(chunk) > max_length:
max_chunk = chunk
max_length = len(chunk)
if max_chunk:
if len(max_chunk)>1:
parsed_list.append(max_chunk)
print(max_chunk)
else:
otherword+=max_chunk
start += len(max_chunk)
else:
parsed_list.append(otherword)
otherword=""
start += 1
return parsed_list
def remove_empty_values(lst):
return [x for x in lst if x and (not isinstance(x, (str, list, dict)) or x)]
def flatten_lists(lst):
flat_list = []
for i in lst:
if type(i) == list:
flat_list.extend(flatten_lists(i))
else:
flat_list.append(i)
return flat_list
path = 'Dataset'
videos = []
VideosNames = []
myList = os.listdir(path)
print(myList)
for cu_video in myList:
current_Video = cv2.imread(f'{path}/{cu_video}')
videos.append(current_Video)
VideosNames.append((os.path.splitext(cu_video)[0]).replace("-"," ").lower())
print(VideosNames)
def texttoSign(text):
listofwords=parse_string(text,VideosNames)
listofwords=remove_empty_values(listofwords)
index=0
for word in listofwords:
if word not in VideosNames:
listofwords[index]=(list(word))
index+=1
listofwords=flatten_lists(listofwords)
clips=[]
for i in range(len(listofwords)):
clips.append(VideoFileClip("Dataset/"+(listofwords[i])+".mp4"))
clips[i]=clips[i].subclip(1, 4)
result_clip=concatenate_videoclips(clips)
result_clip.write_videofile("combined.mp4")
return "combined.mp4"
# except:
# pass
demo=gr.Interface(fn=texttoSign,
inputs="text",
outputs="video",
title="Urdu Text To Sign",
description="This is a small text to sign language model based on Urdu sign langugae standards",
examples=[["good boy"]])
demo.launch(debug=True)