Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -16,6 +16,8 @@ from collections import Counter
|
|
16 |
from PIL import Image, ImageDraw, ImageFont
|
17 |
import numpy as np
|
18 |
|
|
|
|
|
19 |
nltk.download('maxent_ne_chunker') #Chunker
|
20 |
nltk.download('stopwords') #Stop Words List (Mainly Roman Languages)
|
21 |
nltk.download('words') #200 000+ Alphabetical order list
|
@@ -32,6 +34,9 @@ nltk.download('opinion_lexicon') #Sentiment words
|
|
32 |
nltk.download('averaged_perceptron_tagger') #Parts of Speech Tagging
|
33 |
|
34 |
spacy.cli.download("en_core_web_sm")
|
|
|
|
|
|
|
35 |
|
36 |
nlp = spacy.load('en_core_web_sm')
|
37 |
translator = Translator()
|
@@ -184,8 +189,10 @@ def merge_lines(roman_file, w4w_file, full_mean_file, macaronic_file):
|
|
184 |
|
185 |
return "\n".join(merged_lines)
|
186 |
|
187 |
-
|
188 |
-
|
|
|
|
|
189 |
speech = gTTS(text=text, lang=language, slow="False")
|
190 |
speech.save("CurrentTTSFile.mp3")
|
191 |
#file = BytesIO()
|
@@ -281,14 +288,23 @@ def split_verbs_nouns(text):
|
|
281 |
|
282 |
return pos_string_text, verbs_nouns_text, other_words_text
|
283 |
|
284 |
-
|
|
|
|
|
285 |
# Open the SRT file and read its contents
|
286 |
#with open(file_path, 'r') as f:
|
287 |
# srt_contents = f.read()
|
|
|
|
|
|
|
|
|
|
|
|
|
288 |
srt_contents = text
|
289 |
|
290 |
# Split the SRT file by timestamp
|
291 |
srt_sections = srt_contents.split('\n\n')
|
|
|
292 |
|
293 |
# Loop through each section of the SRT file
|
294 |
for i in range(len(srt_sections)):
|
@@ -296,15 +312,21 @@ def split_srt_file(text): #file_path):
|
|
296 |
section_lines = srt_sections[i].split('\n')
|
297 |
timestamp = section_lines[1]
|
298 |
subtitle_text = ' | '.join(section_lines[2:])
|
|
|
|
|
299 |
|
300 |
# Replace spaces in the subtitle text with " | "
|
301 |
-
subtitle_text = subtitle_text.replace(' ', ' | ')
|
|
|
|
|
|
|
302 |
|
303 |
# Reconstruct the section with the updated subtitle text
|
304 |
srt_sections[i] = f"{section_lines[0]}\n{timestamp}\n{subtitle_text[3:]}"
|
|
|
305 |
|
306 |
# Join the SRT sections back together into a single string
|
307 |
-
return '\n\n'.join(srt_sections)
|
308 |
|
309 |
def find_string_positions(s, string):
|
310 |
positions = []
|
@@ -318,7 +340,8 @@ def find_string_positions(s, string):
|
|
318 |
return positions
|
319 |
|
320 |
def splittext(string):
|
321 |
-
|
|
|
322 |
split_strings = []
|
323 |
prepos = 0
|
324 |
for pos in split_positions:
|
@@ -329,7 +352,7 @@ def splittext(string):
|
|
329 |
FinalOutput = ""
|
330 |
stoutput = ""
|
331 |
linenumber = 1
|
332 |
-
print(linenumber)
|
333 |
for item in split_strings[1:]:
|
334 |
stoutput = item[0:29] + "\n" + item[30:]
|
335 |
stspaces = find_string_positions(stoutput, " ")
|
@@ -388,7 +411,7 @@ def text_to_links(text): #TextCompFormat
|
|
388 |
html = ""
|
389 |
for line in lines:
|
390 |
if line.startswith("http"):
|
391 |
-
html += f
|
392 |
else:
|
393 |
html += line + "Not a link <br> \n"
|
394 |
return html
|
@@ -450,40 +473,31 @@ groupinput_text = gr.inputs.Textbox(lines=2, label="Enter a list of words")
|
|
450 |
groupoutput_text = gr.outputs.Textbox(label="Grouped words")
|
451 |
|
452 |
with gr.Blocks() as lliface:
|
453 |
-
gr.HTML("<p>Audio = best long form attention mechanism AS it is ANTICIPATION (Awareness of something before it happens like knowing song Lyrics) FOCUSED - Attention (Focused Repitition) + Exposure (Random Repitition) </p>")
|
|
|
454 |
with gr.Tab("Welcome"):
|
455 |
-
gr.HTML("""<
|
456 |
-
<p>
|
457 |
-
<p>
|
458 |
-
|
459 |
-
<
|
460 |
-
|
461 |
-
|
462 |
-
""
|
|
|
463 |
with gr.Tab("Unique word ID - use in Infranodus"):
|
464 |
-
gr.Interface(fn=unique_word_count, inputs="text", outputs="text",
|
465 |
-
gr.Interface(fn=SepHypandSynExpansion, inputs="text", outputs=["text", "text"],
|
466 |
-
gr.Interface(fn=WikiSearch, inputs="text", outputs="text",
|
467 |
with gr.Tab("Automating related information linking"):
|
468 |
gr.HTML("Questions - Tacking and suggesting questions to ask = new education")
|
469 |
-
with gr.Tab("
|
470 |
-
gr.HTML("<p>Run from it, Dread it, Repitition is inevitable - Thanos</p> <p>Next Milestone is Turning this interface handsfree</p>")
|
471 |
-
with gr.Tab("Gradio Version"):
|
472 |
-
gr.Interface(fn=group_words, inputs=groupinput_text, outputs=groupoutput_text, title="Word Grouping and Rotation", description="Group a list of words into sets of 10 and rotate them every 60 seconds.") #.queue()
|
473 |
-
with gr.Tab("HTML Version"):
|
474 |
-
gr.HTML("""<iframe height="1200" style="width: 100%;" scrolling="no" title="Memorisation Aid" src="https://codepen.io/kwabs22/embed/preview/GRXKQgj?default-tab=result&editable=true" frameborder="no" loading="lazy" allowtransparency="true" allowfullscreen="true">
|
475 |
-
See the Pen <a href="https://codepen.io/kwabs22/pen/GRXKQgj">
|
476 |
-
Memorisation Aid</a> by kwabs22 (<a href="https://codepen.io/kwabs22">@kwabs22</a>)
|
477 |
-
on <a href="https://codepen.io">CodePen</a>.
|
478 |
-
</iframe>""")
|
479 |
-
with gr.Tab("Beginner - Listening and Reading"):
|
480 |
with gr.Tab("Listening - Songs - Chorus"):
|
481 |
gr.HTML("Anticipation of the item to remember is how you learn lyrics that is why songs are easy as if you heard it 10 times already your capacity to anticipate the words is great <br><br> This is where TTS helps as you are ignoring all words except the words just before the actual <hr>")
|
482 |
gr.HTML("<p>Fastest way to learn words = is to have your own sound reference --> probably why babies learn fast as they make random noise</p> <p>If you know the flow of the song you can remember the spelling easier</p><p>Essentially if the sounds are repeated or long notes they are easy to remember</p>")
|
483 |
-
gr.HTML("""<hr><a href="https://translate.google.com/?hl=en&tab=TT"> --Google Translate-- </a><br>""")
|
484 |
gr.Interface(fn=AutoChorusInvestigator, inputs="text", outputs="text", description="Paste Full Lyrics to try find only chorus lines")
|
485 |
gr.Interface(fn=AutoChorusPerWordScheduler, inputs="text", outputs="text", description="Create order of repitition for tts practice")
|
486 |
-
gr.Interface(fn=TTSforListeningPractice, inputs="text", outputs="
|
487 |
with gr.Tab("Reading - Caption images (SD/Dalle-E)"):
|
488 |
gr.HTML("Predictable to identify the parts of picture being described --> The description moves in one direction from one side of the image to the other side is easiest <hr>")
|
489 |
gr.HTML("Image = instant comprehension like Stable Diffusion --> Audiovisual experience is the most optimal reading experience <br> Manga with summary descriptions for the chapters = Most aligned visual to audio experience")
|
@@ -492,40 +506,44 @@ with gr.Blocks() as lliface:
|
|
492 |
gr.Interface(fn=add_text_to_image , inputs=["image", "text"], outputs="image", description="Create Annotated images (Can create using stable diffusion and use the prompt)")
|
493 |
#with gr.Tab("Transcribe - RASMUS Whisper"):
|
494 |
#gr.Interface.load("spaces/RASMUS/Whisper-youtube-crosslingual-subtitles", title="Subtitles")
|
495 |
-
with gr.Tab("Advanced - LingQ
|
496 |
-
gr.
|
497 |
-
|
498 |
-
|
499 |
-
with gr.Tab("
|
500 |
-
gr.HTML("
|
501 |
-
gr.
|
502 |
-
gr.HTML("
|
503 |
-
gr.
|
504 |
-
|
505 |
-
|
506 |
-
|
507 |
-
|
508 |
-
|
509 |
-
|
510 |
-
|
511 |
-
|
512 |
-
|
513 |
-
|
514 |
-
|
515 |
-
|
516 |
-
|
517 |
-
|
518 |
-
|
519 |
-
|
|
|
|
|
|
|
520 |
gr.Text("Text to Closed Class + Adjectives + Punctuation or Noun Verb + Punctuation ")
|
521 |
-
with gr.Tab("Spelling
|
522 |
gr.Text("Merged Spelling Practice Placeholder - Spell multiple words simultaneously for simultaneous access")
|
523 |
-
gr.HTML("<p> Spelling is the end goal, you already know many letter orders called words so you need leverage them to remember random sequences")
|
524 |
-
|
525 |
-
|
526 |
-
|
527 |
-
|
528 |
-
|
|
|
529 |
with gr.Tab("Knowledge Ideas - Notetaking"):
|
530 |
gr.HTML("""<p>Good knowledge = ability to answer questions --> find Questions you cant answer and look for hidden answer within them </p>
|
531 |
<p>My One Word Theory = We only use more words than needed when we have to or are bored --> Headings exist because title is not sufficient, subheadings exist because headings are not sufficient, Book Text exists because subheadings are not sufficient</p>
|
@@ -537,4 +555,4 @@ with gr.Blocks() as lliface:
|
|
537 |
with gr.Tab("Automated Reading Assitant"):
|
538 |
gr.HTML("Tree and Branches approach to learning = familiarity with keywords/headings/summaries before reading the whole text <hr> Productivity/Work revolves around repitition which can be found looking for plurals and grouping terms eg. Headings and Hyper/Hyponyms Analysis")
|
539 |
|
540 |
-
lliface.queue().launch()
|
|
|
16 |
from PIL import Image, ImageDraw, ImageFont
|
17 |
import numpy as np
|
18 |
|
19 |
+
|
20 |
+
#Uncomment these for Huggingface
|
21 |
nltk.download('maxent_ne_chunker') #Chunker
|
22 |
nltk.download('stopwords') #Stop Words List (Mainly Roman Languages)
|
23 |
nltk.download('words') #200 000+ Alphabetical order list
|
|
|
34 |
nltk.download('averaged_perceptron_tagger') #Parts of Speech Tagging
|
35 |
|
36 |
spacy.cli.download("en_core_web_sm")
|
37 |
+
spacy.cli.download('ko_core_news_sm')
|
38 |
+
spacy.cli.download('ja_core_news_sm')
|
39 |
+
spacy.cli.download('zh_core_web_sm')
|
40 |
|
41 |
nlp = spacy.load('en_core_web_sm')
|
42 |
translator = Translator()
|
|
|
189 |
|
190 |
return "\n".join(merged_lines)
|
191 |
|
192 |
+
TTSLangOptions = gr.Dropdown(choices=["en", "ja", "ko", "zh-cn"], value="en", label="choose the language of the srt")
|
193 |
+
TTSLangOptions2 = gr.Dropdown(choices=["en", "ja", "ko", "zh-cn"], value="en", label="choose the language of the srt")
|
194 |
+
|
195 |
+
def TTSforListeningPractice(text, language = "en"):
|
196 |
speech = gTTS(text=text, lang=language, slow="False")
|
197 |
speech.save("CurrentTTSFile.mp3")
|
198 |
#file = BytesIO()
|
|
|
288 |
|
289 |
return pos_string_text, verbs_nouns_text, other_words_text
|
290 |
|
291 |
+
SRTLangOptions = gr.Dropdown(choices=["en", "ja", "ko", "zh-cn"], value="en", label="choose the language of the srt")
|
292 |
+
|
293 |
+
def split_srt_file(text, lang): #file_path):
|
294 |
# Open the SRT file and read its contents
|
295 |
#with open(file_path, 'r') as f:
|
296 |
# srt_contents = f.read()
|
297 |
+
|
298 |
+
if lang == "en": nlp = spacy.load('en_core_web_sm')
|
299 |
+
if lang == "ja": nlp = spacy.load('ja_core_news_sm')
|
300 |
+
if lang == "ko": nlp = spacy.load('ko_core_news_sm')
|
301 |
+
if lang == "zn-cn": nlp = spacy.load('zn_core_web_sm')
|
302 |
+
|
303 |
srt_contents = text
|
304 |
|
305 |
# Split the SRT file by timestamp
|
306 |
srt_sections = srt_contents.split('\n\n')
|
307 |
+
srt_sections_POSversion = []
|
308 |
|
309 |
# Loop through each section of the SRT file
|
310 |
for i in range(len(srt_sections)):
|
|
|
312 |
section_lines = srt_sections[i].split('\n')
|
313 |
timestamp = section_lines[1]
|
314 |
subtitle_text = ' | '.join(section_lines[2:])
|
315 |
+
sub_split_line = nlp(subtitle_text)
|
316 |
+
subtitle_textPOSversion = ""
|
317 |
|
318 |
# Replace spaces in the subtitle text with " | "
|
319 |
+
#subtitle_text = subtitle_text.replace(' ', ' | ')
|
320 |
+
for token in sub_split_line:
|
321 |
+
subtitle_text += token.text + " | "
|
322 |
+
subtitle_textPOSversion += token.pos_ + " | "
|
323 |
|
324 |
# Reconstruct the section with the updated subtitle text
|
325 |
srt_sections[i] = f"{section_lines[0]}\n{timestamp}\n{subtitle_text[3:]}"
|
326 |
+
srt_sections_POSversion.append(f"{section_lines[0]}\n{timestamp}\n{subtitle_textPOSversion[3:]}\n\n")
|
327 |
|
328 |
# Join the SRT sections back together into a single string
|
329 |
+
return '\n\n'.join(srt_sections), ''.join(srt_sections_POSversion)
|
330 |
|
331 |
def find_string_positions(s, string):
|
332 |
positions = []
|
|
|
340 |
return positions
|
341 |
|
342 |
def splittext(string):
|
343 |
+
string_no_formaterror = string.replace(" -- > ", " --> ")
|
344 |
+
split_positions = find_string_positions(string_no_formaterror, " --> ")
|
345 |
split_strings = []
|
346 |
prepos = 0
|
347 |
for pos in split_positions:
|
|
|
352 |
FinalOutput = ""
|
353 |
stoutput = ""
|
354 |
linenumber = 1
|
355 |
+
#print(linenumber)
|
356 |
for item in split_strings[1:]:
|
357 |
stoutput = item[0:29] + "\n" + item[30:]
|
358 |
stspaces = find_string_positions(stoutput, " ")
|
|
|
411 |
html = ""
|
412 |
for line in lines:
|
413 |
if line.startswith("http"):
|
414 |
+
html += f"<a href='{line}'>{line}</a><br> \n"
|
415 |
else:
|
416 |
html += line + "Not a link <br> \n"
|
417 |
return html
|
|
|
473 |
groupoutput_text = gr.outputs.Textbox(label="Grouped words")
|
474 |
|
475 |
with gr.Blocks() as lliface:
|
476 |
+
gr.HTML("<p> Target 1: Dual audio at word Level while using repitition to train random recall --> Word level Time <br> Target 2: Video --> Split by sentence --> each word repeated (60) + each phrase (10) + each sentence (10) --> TTS file for practice --> State Management/Known word Tracker <hr> The trick is minimum one minute of focus on a new word --> Listening is hard because there are new word within seconds and you need repeated focus on each to learn </p> <p>Audio = best long form attention mechanism AS it is ANTICIPATION (Awareness of something before it happens like knowing song Lyrics) FOCUSED - Attention (Focused Repitition) + Exposure (Random Repitition) </p>")
|
477 |
+
gr.HTML("""<hr> <a href="https://translate.google.com/?hl=en&tab=TT"> -- Google Translate -- </a> | <a href='https://huggingface.co/spaces/damo-vilab/modelscope-text-to-video-synthesis'> -- Modelscope Text to Video -- </a> | <a href='https://huggingface.co/spaces/stabilityai/stable-diffusion'> -- stable-diffusion 2 -- </a> | <a href='https://huggingface.co/spaces/stabilityai/stable-diffusion-1'> -- stable-diffusion 1 -- </a>""")
|
478 |
with gr.Tab("Welcome"):
|
479 |
+
gr.HTML("""<p>Spaces Test - Still Undercontruction | Knowledge is a Language but productive knowledge is find replace as well | LingQ is good option for per word state management</p> <p> Arrows app json creator for easy knowledge graphing and spacy POS graph? --> Questions? -->
|
480 |
+
<p> ChatGPT Turns Learning into a read only what you dont know ask only what you dont know feedback loop --> All you have to do is keep track of what prompts you have asked in the past</p> """)
|
481 |
+
gr.HTML("Timing Practice - Repitition <hr> <p>Run from it, Dread it, Repitition is inevitable - Thanos --> Next Milestone is Turning this interface handsfree</p>")
|
482 |
+
gr.Interface(fn=group_words, inputs=groupinput_text, outputs=groupoutput_text, description="Word Grouping and Rotation - Group a list of words into sets of 10 and rotate them every 60 seconds.") #.queue()
|
483 |
+
gr.HTML("""HTML Version <hr> <iframe height="1200" style="width: 100%;" scrolling="no" title="Memorisation Aid" src="https://codepen.io/kwabs22/embed/preview/GRXKQgj?default-tab=result&editable=true" frameborder="no" loading="lazy" allowtransparency="true" allowfullscreen="true">
|
484 |
+
See the Pen <a href="https://codepen.io/kwabs22/pen/GRXKQgj"> Memorisation Aid</a> by kwabs22 (<a href="https://codepen.io/kwabs22">@kwabs22</a>) on <a href="https://codepen.io">CodePen</a>. </iframe>""")
|
485 |
+
with gr.Tab("Unknown Tracker"):
|
486 |
+
gr.HTML("Repitition of things you know is a waste of time when theres stuff you dont know <p> In Language the goal is bigger vocab --> Knowledge equivalent = question answer pairs but to get to those you need related information pairs</p> <p> Vocab = Glossary + all non text wall(lists, diagrams, etc.)</p>")
|
487 |
+
gr.Textbox("Placeholder for a function that creates a set list and can takes a list for known words and auto find replaces the stuff you know out of the content")
|
488 |
with gr.Tab("Unique word ID - use in Infranodus"):
|
489 |
+
gr.Interface(fn=unique_word_count, inputs="text", outputs="text", description="Wordcounter")
|
490 |
+
gr.Interface(fn=SepHypandSynExpansion, inputs="text", outputs=["text", "text"], description="Word suggestions - Analyse the unique words in infranodus")
|
491 |
+
gr.Interface(fn=WikiSearch, inputs="text", outputs="text", description="Unique word suggestions(wiki articles)")
|
492 |
with gr.Tab("Automating related information linking"):
|
493 |
gr.HTML("Questions - Tacking and suggesting questions to ask = new education")
|
494 |
+
with gr.Tab("Beginner - Listen + Read"):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
495 |
with gr.Tab("Listening - Songs - Chorus"):
|
496 |
gr.HTML("Anticipation of the item to remember is how you learn lyrics that is why songs are easy as if you heard it 10 times already your capacity to anticipate the words is great <br><br> This is where TTS helps as you are ignoring all words except the words just before the actual <hr>")
|
497 |
gr.HTML("<p>Fastest way to learn words = is to have your own sound reference --> probably why babies learn fast as they make random noise</p> <p>If you know the flow of the song you can remember the spelling easier</p><p>Essentially if the sounds are repeated or long notes they are easy to remember</p>")
|
|
|
498 |
gr.Interface(fn=AutoChorusInvestigator, inputs="text", outputs="text", description="Paste Full Lyrics to try find only chorus lines")
|
499 |
gr.Interface(fn=AutoChorusPerWordScheduler, inputs="text", outputs="text", description="Create order of repitition for tts practice")
|
500 |
+
gr.Interface(fn=TTSforListeningPractice, inputs=["text", TTSLangOptions], outputs="audio", description="Placeholder - paste chorus here and use TTS or make notes to save here")
|
501 |
with gr.Tab("Reading - Caption images (SD/Dalle-E)"):
|
502 |
gr.HTML("Predictable to identify the parts of picture being described --> The description moves in one direction from one side of the image to the other side is easiest <hr>")
|
503 |
gr.HTML("Image = instant comprehension like Stable Diffusion --> Audiovisual experience is the most optimal reading experience <br> Manga with summary descriptions for the chapters = Most aligned visual to audio experience")
|
|
|
506 |
gr.Interface(fn=add_text_to_image , inputs=["image", "text"], outputs="image", description="Create Annotated images (Can create using stable diffusion and use the prompt)")
|
507 |
#with gr.Tab("Transcribe - RASMUS Whisper"):
|
508 |
#gr.Interface.load("spaces/RASMUS/Whisper-youtube-crosslingual-subtitles", title="Subtitles")
|
509 |
+
with gr.Tab("Advanced - LingQ Addon Ideas"):
|
510 |
+
with gr.Tab("Audio - Only English thoughts as practice"):
|
511 |
+
gr.HTML("For Audio Most productive is real time recall of native (where your full reasoning ability will always be) <br><hr> Find Replace new lines of the foreign text with full stops or | to get per word translation")
|
512 |
+
gr.Interface(fn=TTSforListeningPractice, inputs=["text", TTSLangOptions2], outputs="audio", description="Paste only english words in foreign order and then keep removing the words from this to practice as effectively")
|
513 |
+
with gr.Tab("Visual - Multiline Custom Video Subtitles"):
|
514 |
+
gr.HTML("LingQ Companion Idea - i.e. Full Translation Read along, and eventually Videoplayer watch along like RAMUS whisper space <br><br>Extra functions needed - Persitent Sentence translation, UNWFWO, POS tagging and Word Count per user of words in their account. Macaronic Text is also another way to practice only the important information")
|
515 |
+
gr.HTML("""<hr> <p>For Transcripts to any video on youtube use the link below ⬇️</p> <a href="https://huggingface.co/spaces/RASMUS/Whisper-youtube-crosslingual-subtitles">https://huggingface.co/spaces/RASMUS/Whisper-youtube-crosslingual-subtitles</a> | <a href="https://huggingface.co/spaces/vumichien/whisper-speaker-diarization">https://huggingface.co/spaces/vumichien/whisper-speaker-diarization</a>""")
|
516 |
+
#gr.HTML("<p>If Space not loaded its because of offline devopment errors please message for edit</p> <hr>")
|
517 |
+
with gr.Tab("Merged Subtitles"):
|
518 |
+
gr.HTML("Step 1 - Word for Word Translation Creation in both Directions (Paste Google Translation here)")
|
519 |
+
gr.Interface(fn=split_srt_file, inputs=["text", SRTLangOptions] , outputs=["text", "text"], description="SRT Contents to W4W Split SRT for Google Translate")
|
520 |
+
gr.HTML("Step 2 - Pronounciation (Roman) to Subtitle Format --> GTranslate returns unformatted string")
|
521 |
+
gr.Interface(fn=splittext, inputs="text", outputs="text", description="Text for w4w creation in G Translate")
|
522 |
+
gr.HTML("Step 3 - Merge into one file")
|
523 |
+
with gr.Row():
|
524 |
+
RomanFile = gr.File(label="Paste Roman")
|
525 |
+
W4WFile = gr.File(label="Paste Word 4 Word")
|
526 |
+
FullMeanFile = gr.File(label="Paste Full Meaning")
|
527 |
+
MacaronicFile = gr.File(label="Paste Macaronic Text")
|
528 |
+
SentGramFormula = gr.File(label="Paste Sentence Grammar Formula Text")
|
529 |
+
with gr.Row():
|
530 |
+
MergeButton = gr.Button()
|
531 |
+
with gr.Row():
|
532 |
+
MergeOutput = gr.TextArea(label="Output")
|
533 |
+
MergeButton.click(merge_lines, inputs=[RomanFile, W4WFile, FullMeanFile, MacaronicFile], outputs=[MergeOutput])
|
534 |
+
with gr.Tab("Split video to segments"):
|
535 |
+
gr.HTML("<a href='https://www.vlchelp.com/automated-screenshots-interval/'>How to make screenshot in vlc - https://www.vlchelp.com/automated-screenshots-interval/</a><br>")
|
536 |
+
gr.Interface(VideotoSegment, inputs=[spvvideo_file_input, spvsubtitle_file_input], outputs=spvdownload_output)
|
537 |
gr.Text("Text to Closed Class + Adjectives + Punctuation or Noun Verb + Punctuation ")
|
538 |
+
with gr.Tab("Spelling + Chunks"):
|
539 |
gr.Text("Merged Spelling Practice Placeholder - Spell multiple words simultaneously for simultaneous access")
|
540 |
+
gr.HTML("<p> Spell multiple words simultaneously for simultaneous access </p> <p> Spelling Simplification - Use a dual language list? | Spelling is the end goal, you already know many letter orders called words so you need leverage them to remember random sequences")
|
541 |
+
gr.Interface(fn=create_dictionary, inputs="text", outputs="text", title="Sort Text by first two letters")
|
542 |
+
gr.Interface(fn=keep_nouns_verbs, inputs=["text"], outputs="text", description="Noun and Verbs only (Plus punctuation)")
|
543 |
+
gr.Interface(fn=FrontRevSentChunk, inputs=[ChunkModeDrop, "checkbox", "text", langdest], outputs="text", description="Chunks creator")
|
544 |
+
with gr.Tab("Thinking Practice"):
|
545 |
+
with gr.Tab("Sentence to Format"):
|
546 |
+
gr.Interface(fn=split_verbs_nouns , inputs="text", outputs=["text", "text", "text"], description="Comprehension reading and Sentence Format Creator")
|
547 |
with gr.Tab("Knowledge Ideas - Notetaking"):
|
548 |
gr.HTML("""<p>Good knowledge = ability to answer questions --> find Questions you cant answer and look for hidden answer within them </p>
|
549 |
<p>My One Word Theory = We only use more words than needed when we have to or are bored --> Headings exist because title is not sufficient, subheadings exist because headings are not sufficient, Book Text exists because subheadings are not sufficient</p>
|
|
|
555 |
with gr.Tab("Automated Reading Assitant"):
|
556 |
gr.HTML("Tree and Branches approach to learning = familiarity with keywords/headings/summaries before reading the whole text <hr> Productivity/Work revolves around repitition which can be found looking for plurals and grouping terms eg. Headings and Hyper/Hyponyms Analysis")
|
557 |
|
558 |
+
lliface.queue().launch() #(inbrowser="true")
|