Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -20,25 +20,25 @@ import textwrap
|
|
20 |
import pandas as pd
|
21 |
|
22 |
#Uncomment these for Huggingface
|
23 |
-
nltk.download('maxent_ne_chunker') #Chunker
|
24 |
-
nltk.download('stopwords') #Stop Words List (Mainly Roman Languages)
|
25 |
-
nltk.download('words') #200 000+ Alphabetical order list
|
26 |
-
nltk.download('punkt') #Tokenizer
|
27 |
-
nltk.download('verbnet') #For Description of Verbs
|
28 |
-
nltk.download('omw')
|
29 |
-
nltk.download('omw-1.4') #Multilingual Wordnet
|
30 |
-
nltk.download('wordnet') #For Definitions, Antonyms and Synonyms
|
31 |
-
nltk.download('shakespeare')
|
32 |
-
nltk.download('dolch') #Sight words
|
33 |
-
nltk.download('names') #People Names NER
|
34 |
-
nltk.download('gazetteers') #Location NER
|
35 |
-
nltk.download('opinion_lexicon') #Sentiment words
|
36 |
-
nltk.download('averaged_perceptron_tagger') #Parts of Speech Tagging
|
37 |
-
|
38 |
-
spacy.cli.download("en_core_web_sm")
|
39 |
-
spacy.cli.download('ko_core_news_sm')
|
40 |
-
spacy.cli.download('ja_core_news_sm')
|
41 |
-
spacy.cli.download('zh_core_web_sm')
|
42 |
|
43 |
nlp = spacy.load('en_core_web_sm')
|
44 |
translator = Translator()
|
@@ -191,8 +191,8 @@ def merge_lines(roman_file, w4w_file, full_mean_file, macaronic_file):
|
|
191 |
|
192 |
return "\n".join(merged_lines)
|
193 |
|
194 |
-
TTSLangOptions = gr.Dropdown(choices=["en", "de", "es", "ja", "ko", "zh-cn"], value="en", label="choose the language of the srt")
|
195 |
-
TTSLangOptions2 = gr.Dropdown(choices=["en", "de", "es", "ja", "ko", "zh-cn"], value="en", label="choose the language of the srt")
|
196 |
|
197 |
def TTSforListeningPractice(text, language = "en"):
|
198 |
speech = gTTS(text=text, lang=language, slow="False")
|
@@ -461,8 +461,8 @@ def create_collapsiblebutton(button_id, button_caption, div_content):
|
|
461 |
#---------------
|
462 |
|
463 |
def removeTonalMarks(string):
|
464 |
-
tonalMarks = "
|
465 |
-
nonTonalMarks = "
|
466 |
noTonalMarksStr = ""
|
467 |
for char in string:
|
468 |
index = tonalMarks.find(char)
|
@@ -474,6 +474,7 @@ def removeTonalMarks(string):
|
|
474 |
|
475 |
|
476 |
def add_text_to_image(input_image, text, output_image_path="output.png", border_size=2):
|
|
|
477 |
imagearr = np.asarray(input_image) #Image.open(input_image_path)
|
478 |
width, height = imagearr.shape[:2] #width, height = image.size
|
479 |
img = Image.fromarray(imagearr)
|
@@ -785,28 +786,31 @@ Translationchuncksize = gr.Number(value=4998)
|
|
785 |
|
786 |
with gr.Blocks() as lliface: #theme=gr.themes.Glass(primary_hue='green', secondary_hue='red', neutral_hue='blue', )
|
787 |
gr.HTML("<p>Timing Practice - Repitition: Run from it, Dread it, Repitition is inevitable - Thanos --> Repitition of reaction - Foreign in eyes/ears native in mind (For beginners) | Repitition is a multitask activity like driving must be subconcious process to show mastery </p>")
|
788 |
-
gr.HTML("""<a href='https://huggingface.co/spaces/sanchit-gandhi/whisper-jax'> -- Whisper JAX -- </a> | <a href="https://translate.google.com/?hl=en&tab=TT"> -- Google Translate -- </a> | <a href='https://huggingface.co/spaces/damo-vilab/modelscope-text-to-video-synthesis'> -- Modelscope Text to Video -- </a> | <a href='https://huggingface.co/spaces/stabilityai/stable-diffusion'> -- stable-diffusion 2 -- </a> | <a href='https://huggingface.co/spaces/stabilityai/stable-diffusion-1'> -- stable-diffusion 1 -- </a> | <a href='https://huggingface.co/spaces/kakaobrain/karlo'> -- karlo 1 -- </a> | <a href='https://huggingface.co/spaces/suno/bark'> -- Bark (TTS) -- </a> | <a href='https://chat.lmsys.org/'> -- Offline Text Model Demos
|
789 |
with gr.Row():
|
790 |
with gr.Column(scale=1):
|
791 |
-
with gr.Tab("
|
792 |
gr.HTML("""Gradio Version Below <iframe height="1200" style="width: 100%;" scrolling="no" title="Memorisation Aid" src="https://codepen.io/kwabs22/embed/GRXKQgj?default-tab=result&editable=true" frameborder="no" loading="lazy" allowtransparency="true" allowfullscreen="true">
|
793 |
-
See the Pen <a href="https://codepen.io/kwabs22/pen/GRXKQgj"> Memorisation Aid</a> by kwabs22 (<a href="https://codepen.io/kwabs22">@kwabs22</a>) on <a href="https://codepen.io">CodePen</a>. </iframe>""")
|
794 |
-
|
|
|
795 |
with gr.Tab("Navigation"):
|
796 |
-
gr.HTML("Picture Annotation <br> Chorus Focused Word List <br> Merged Subtitles <br> Repetitive Audio (TTS) <br> Word and Sentence Jumbling <br>")
|
|
|
|
|
797 |
with gr.Column(scale=3):
|
798 |
with gr.Tab("Beginner - Listen + Read"):
|
799 |
-
with gr.
|
800 |
-
gr.
|
801 |
-
|
802 |
-
|
803 |
-
|
804 |
-
|
805 |
-
|
806 |
-
gr.
|
807 |
-
|
808 |
-
|
809 |
-
|
810 |
#with gr.Tab("Transcribe - RASMUS Whisper"):
|
811 |
#gr.Interface.load("spaces/RASMUS/Whisper-youtube-crosslingual-subtitles", title="Subtitles")
|
812 |
with gr.Tab("Advanced - LingQ Addon Ideas"):
|
@@ -853,7 +857,7 @@ with gr.Blocks() as lliface: #theme=gr.themes.Glass(primary_hue='green', seconda
|
|
853 |
with gr.Tab("Transition is the end goal"):
|
854 |
with gr.Row():
|
855 |
with gr.Column():
|
856 |
-
gr.Textbox("A word is a list of letter as a fact is a list of words. Both are in a specific order. What is most important is practice the order so randomiser is the tool")
|
857 |
gr.Interface(fn=RandomiseTextbyType, inputs=["text", RandomiseTextType], outputs="text", description="Randomise order within words, sentences, paragrahs")
|
858 |
with gr.Column():
|
859 |
#with gr.Tab("Collocations (Markov)"):
|
@@ -862,7 +866,7 @@ with gr.Blocks() as lliface: #theme=gr.themes.Glass(primary_hue='green', seconda
|
|
862 |
gr.Interface(fn=markov_generate, inputs=["text", Markovlength], outputs="text", description="Generate Text based on the collocations in the text")
|
863 |
with gr.Column():
|
864 |
#with gr.Tab("Spelling + Chunks"):
|
865 |
-
gr.
|
866 |
gr.HTML("<p> Spell multiple words simultaneously for simultaneous access </p> <p> Spelling Simplification - Use a dual language list? | Spelling is the end goal, you already know many letter orders called words so you need leverage them to remember random sequences")
|
867 |
gr.Interface(fn=create_dictionary, inputs="text", outputs="text", title="Sort Text by first two letters")
|
868 |
gr.Interface(fn=keep_nouns_verbs, inputs=["text"], outputs="text", description="Noun and Verbs only (Plus punctuation)")
|
@@ -894,9 +898,10 @@ with gr.Blocks() as lliface: #theme=gr.themes.Glass(primary_hue='green', seconda
|
|
894 |
gr.HTML("Tree and Branches approach to learning = familiarity with keywords/headings/summaries before reading the whole text <hr> Productivity/Work revolves around repitition which can be found looking for plurals and grouping terms eg. Headings and Hyper/Hyponyms Analysis")
|
895 |
with gr.Tab("AR"):
|
896 |
gr.Textbox("Alpha Test version = Real time Lablling of All things in view using SAM and Clip Interrogator and OpenCV on pydroid")
|
|
|
897 |
gr.HTML("<a href='https://huggingface.co/spaces/curt-park/segment-anything-with-clip'> -- SAM with Clip -- </a>")
|
898 |
gr.Interface(fn=arrealtimetestidea, inputs='image', outputs="text", description="Vision Assistant - see and execute")
|
899 |
-
gr.
|
900 |
#gr.Interface(fn=arrealtimetestidea, inputs='webcam', outputs="text", description="Vision Assistant aka Free Observation llm judgement (GPT Vision API goes here when released). FPS is the difference between realtime app and static image")
|
901 |
with gr.Tab("Random Ideas"):
|
902 |
gr.HTML("""<p>Spaces Test - Still Undercontruction --> Next Milestone is Turning this interface handsfree | Knowledge is a Language but productive knowledge is find replace as well | LingQ is good option for per word state management</p> <p> Arrows app json creator for easy knowledge graphing and spacy POS graph? --> Questions? -->
|
|
|
20 |
import pandas as pd
|
21 |
|
22 |
#Uncomment these for Huggingface
|
23 |
+
#nltk.download('maxent_ne_chunker') #Chunker
|
24 |
+
#nltk.download('stopwords') #Stop Words List (Mainly Roman Languages)
|
25 |
+
#nltk.download('words') #200 000+ Alphabetical order list
|
26 |
+
#nltk.download('punkt') #Tokenizer
|
27 |
+
#nltk.download('verbnet') #For Description of Verbs
|
28 |
+
#nltk.download('omw')
|
29 |
+
#nltk.download('omw-1.4') #Multilingual Wordnet
|
30 |
+
#nltk.download('wordnet') #For Definitions, Antonyms and Synonyms
|
31 |
+
#nltk.download('shakespeare')
|
32 |
+
#nltk.download('dolch') #Sight words
|
33 |
+
#nltk.download('names') #People Names NER
|
34 |
+
#nltk.download('gazetteers') #Location NER
|
35 |
+
#nltk.download('opinion_lexicon') #Sentiment words
|
36 |
+
#nltk.download('averaged_perceptron_tagger') #Parts of Speech Tagging
|
37 |
+
|
38 |
+
#spacy.cli.download("en_core_web_sm")
|
39 |
+
#spacy.cli.download('ko_core_news_sm')
|
40 |
+
#spacy.cli.download('ja_core_news_sm')
|
41 |
+
#spacy.cli.download('zh_core_web_sm')
|
42 |
|
43 |
nlp = spacy.load('en_core_web_sm')
|
44 |
translator = Translator()
|
|
|
191 |
|
192 |
return "\n".join(merged_lines)
|
193 |
|
194 |
+
TTSLangOptions = gr.Dropdown(choices=["en", "de", "es", "ja", "ko", "zh-cn"], value="en", label="choose the language of the srt/text accent")
|
195 |
+
TTSLangOptions2 = gr.Dropdown(choices=["en", "de", "es", "ja", "ko", "zh-cn"], value="en", label="choose the language of the srt/text accent")
|
196 |
|
197 |
def TTSforListeningPractice(text, language = "en"):
|
198 |
speech = gTTS(text=text, lang=language, slow="False")
|
|
|
461 |
#---------------
|
462 |
|
463 |
def removeTonalMarks(string):
|
464 |
+
tonalMarks = "āēīōūǖáéíóúǘǎěǐǒǔǚàèìòùǜɔɛ"
|
465 |
+
nonTonalMarks = "aeiouuaeiouuaeiouuaeiouoe"
|
466 |
noTonalMarksStr = ""
|
467 |
for char in string:
|
468 |
index = tonalMarks.find(char)
|
|
|
474 |
|
475 |
|
476 |
def add_text_to_image(input_image, text, output_image_path="output.png", border_size=2):
|
477 |
+
text = removeTonalMarks(text)
|
478 |
imagearr = np.asarray(input_image) #Image.open(input_image_path)
|
479 |
width, height = imagearr.shape[:2] #width, height = image.size
|
480 |
img = Image.fromarray(imagearr)
|
|
|
786 |
|
787 |
with gr.Blocks() as lliface: #theme=gr.themes.Glass(primary_hue='green', secondary_hue='red', neutral_hue='blue', )
|
788 |
gr.HTML("<p>Timing Practice - Repitition: Run from it, Dread it, Repitition is inevitable - Thanos --> Repitition of reaction - Foreign in eyes/ears native in mind (For beginners) | Repitition is a multitask activity like driving must be subconcious process to show mastery </p>")
|
789 |
+
gr.HTML("""<a href='https://huggingface.co/spaces/sanchit-gandhi/whisper-jax'> -- Whisper JAX -- </a> | <a href="https://translate.google.com/?hl=en&tab=TT"> -- Google Translate -- </a> | <a href='https://huggingface.co/spaces/damo-vilab/modelscope-text-to-video-synthesis'> -- Modelscope Text to Video -- </a> | <a href='https://huggingface.co/spaces/stabilityai/stable-diffusion'> -- stable-diffusion 2 -- </a> | <a href='https://huggingface.co/spaces/stabilityai/stable-diffusion-1'> -- stable-diffusion 1 -- </a> | <a href='https://huggingface.co/spaces/kakaobrain/karlo'> -- karlo 1 -- </a> | <a href='https://huggingface.co/spaces/suno/bark'> -- Bark (TTS) -- </a> | <a href='https://chat.lmsys.org/'> -- Offline Text Model Demos -- </a> | <a href='https://huggingface.co/spaces/curt-park/segment-anything-with-clip'> -- SAM with Clip -- </a> | <a href='https://beta.elevenlabs.io/'> -- Eleven Labs -- </a> | <a href='https://www.d-id.com/'> -- Animate an Image -- </a> | <a href='https://voice.ai/'> -- Clone a voice -- </a> | <a href='https://openai.com/pricing'> -- OpenAI pricing -- </a> | <a href='https://huggingface.co/spaces/sohojoe/soho-clip-embeddings-explorer'> -- Image Training Data Search -- </a> | """)
|
790 |
with gr.Row():
|
791 |
with gr.Column(scale=1):
|
792 |
+
with gr.Tab("Rep - Gradio"):
|
793 |
gr.HTML("""Gradio Version Below <iframe height="1200" style="width: 100%;" scrolling="no" title="Memorisation Aid" src="https://codepen.io/kwabs22/embed/GRXKQgj?default-tab=result&editable=true" frameborder="no" loading="lazy" allowtransparency="true" allowfullscreen="true">
|
794 |
+
See the Pen <a href="https://codepen.io/kwabs22/pen/GRXKQgj"> Memorisation Aid</a> by kwabs22 (<a href="https://codepen.io/kwabs22">@kwabs22</a>) on <a href="https://codepen.io">CodePen</a>. </iframe>""")
|
795 |
+
with gr.Tab("Rep - Gradio"):
|
796 |
+
gr.Interface(fn=group_words, inputs=groupinput_text, outputs=groupoutput_text, description="Word Grouping and Rotation - Group a list of words into sets of 10 and rotate them every 60 seconds.") #.queue()
|
797 |
with gr.Tab("Navigation"):
|
798 |
+
gr.HTML("Picture Annotation <br> Chorus Focused Word List <br> Merged Subtitles <br> Repetitive Audio (TTS) <br> Word and Sentence Jumbling <br> Unkown: Wordnet <br> Unknown: Wikipeadia <br>")
|
799 |
+
with gr.Tab("Vector Database = Memorisation"):
|
800 |
+
gr.HTML("Open AI - 2500000 character text = <1$ (0.0004 per 1000 tokens), Cohere Multilingual = free for personal use / Commercial use = \n Vector Database query = Better than text search but not for logical relationships")
|
801 |
with gr.Column(scale=3):
|
802 |
with gr.Tab("Beginner - Listen + Read"):
|
803 |
+
with gr.Row():
|
804 |
+
with gr.Column(scale=1):
|
805 |
+
gr.HTML("Listening - Songs - Chorus <br> Anticipation of the item to remember is how you learn lyrics that is why songs are easy as if you heard it 10 times already your capacity to anticipate the words is great <br><br> This is where TTS helps as you are ignoring all words except the words just before the actual <hr>")
|
806 |
+
gr.Interface(fn=TTSforListeningPractice, inputs=["text", TTSLangOptions], outputs="audio", description="Paste chorus lyrics from below here and use TTS or make notes to save here (Or paste anything)")
|
807 |
+
gr.HTML("<p>Fastest way to learn words = is to have your own sound reference --> probably why babies learn fast as they make random noise</p> <p>If you know the flow of the song you can remember the spelling easier</p><p>Essentially if the sounds are repeated or long notes they are easy to remember</p>")
|
808 |
+
gr.Interface(fn=AutoChorusInvestigator, inputs="text", outputs="text", description="Paste Full Lyrics to try find only chorus lines")
|
809 |
+
gr.Interface(fn=AutoChorusPerWordScheduler, inputs="text", outputs="text", description="Create order of repitition for tts practice")
|
810 |
+
with gr.Column(scale=1):
|
811 |
+
gr.HTML("""Reading - Caption images (SD/Dalle-E) <br> <a href="https://huggingface.co/spaces/pharma/CLIP-Interrogator"> --Huggingface CLIP-Interrogator Space-- </a><br> """)
|
812 |
+
gr.Interface(fn=add_text_to_image , inputs=["image", "text"], outputs="image", description="Create Annotated images (Can create using stable diffusion and use the prompt) - Describe from one side to the other to make guessing easy")
|
813 |
+
gr.HTML("Use Shift Enter To put text on new lines if the text doesnt fit <br> if theres an error you have to remove the foreign letters and place roman ones")
|
814 |
#with gr.Tab("Transcribe - RASMUS Whisper"):
|
815 |
#gr.Interface.load("spaces/RASMUS/Whisper-youtube-crosslingual-subtitles", title="Subtitles")
|
816 |
with gr.Tab("Advanced - LingQ Addon Ideas"):
|
|
|
857 |
with gr.Tab("Transition is the end goal"):
|
858 |
with gr.Row():
|
859 |
with gr.Column():
|
860 |
+
gr.Textbox("A word is a list of letter as a fact is a list of words. Both are in a specific order. What is most important is practice the order so randomiser is the tool", lines=4)
|
861 |
gr.Interface(fn=RandomiseTextbyType, inputs=["text", RandomiseTextType], outputs="text", description="Randomise order within words, sentences, paragrahs")
|
862 |
with gr.Column():
|
863 |
#with gr.Tab("Collocations (Markov)"):
|
|
|
866 |
gr.Interface(fn=markov_generate, inputs=["text", Markovlength], outputs="text", description="Generate Text based on the collocations in the text")
|
867 |
with gr.Column():
|
868 |
#with gr.Tab("Spelling + Chunks"):
|
869 |
+
gr.Textbox("Merged Spelling Practice Placeholder - Spell multiple words simultaneously for simultaneous access", lines=3)
|
870 |
gr.HTML("<p> Spell multiple words simultaneously for simultaneous access </p> <p> Spelling Simplification - Use a dual language list? | Spelling is the end goal, you already know many letter orders called words so you need leverage them to remember random sequences")
|
871 |
gr.Interface(fn=create_dictionary, inputs="text", outputs="text", title="Sort Text by first two letters")
|
872 |
gr.Interface(fn=keep_nouns_verbs, inputs=["text"], outputs="text", description="Noun and Verbs only (Plus punctuation)")
|
|
|
898 |
gr.HTML("Tree and Branches approach to learning = familiarity with keywords/headings/summaries before reading the whole text <hr> Productivity/Work revolves around repitition which can be found looking for plurals and grouping terms eg. Headings and Hyper/Hyponyms Analysis")
|
899 |
with gr.Tab("AR"):
|
900 |
gr.Textbox("Alpha Test version = Real time Lablling of All things in view using SAM and Clip Interrogator and OpenCV on pydroid")
|
901 |
+
gr.HTML("Some Prompt ideas --> Prompt: Describe the place where these descriptions may be (You job is to be speculative for brainstorming purposes): A dog and a boy, the area is texas, the weather is sunny, the date is 01 May 2021 <hr> Prompt Content Ideas Ideas Clip Interrogator + Location Data aka tags for place, location and time + general news updates on the location + overview of the items in the location <br> Location based advise is most important but after that is information observed by appliances in the location eg. Times Computer turned on, times geyser inspected, amount of time keys havent been touched etc. <br> each location will have an ai personality that will relay more information ")
|
902 |
gr.HTML("<a href='https://huggingface.co/spaces/curt-park/segment-anything-with-clip'> -- SAM with Clip -- </a>")
|
903 |
gr.Interface(fn=arrealtimetestidea, inputs='image', outputs="text", description="Vision Assistant - see and execute")
|
904 |
+
gr.Textbox("Placeholder for webcam stream")
|
905 |
#gr.Interface(fn=arrealtimetestidea, inputs='webcam', outputs="text", description="Vision Assistant aka Free Observation llm judgement (GPT Vision API goes here when released). FPS is the difference between realtime app and static image")
|
906 |
with gr.Tab("Random Ideas"):
|
907 |
gr.HTML("""<p>Spaces Test - Still Undercontruction --> Next Milestone is Turning this interface handsfree | Knowledge is a Language but productive knowledge is find replace as well | LingQ is good option for per word state management</p> <p> Arrows app json creator for easy knowledge graphing and spacy POS graph? --> Questions? -->
|