File size: 17,305 Bytes
2c4ef8c
 
 
8c74afb
196b595
4cc9c49
b325a26
32962aa
 
7f40716
 
 
 
 
196b595
 
 
 
 
 
 
 
 
 
 
 
 
 
41df859
196b595
2c4ef8c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0aafd85
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2c4ef8c
 
 
 
 
0aafd85
2c4ef8c
 
 
 
 
 
 
 
 
 
0aafd85
 
2c4ef8c
 
 
 
 
 
 
e9e3195
 
 
 
 
 
 
 
 
 
 
 
 
0aafd85
 
 
e9e3195
0aafd85
2c4ef8c
0aafd85
 
 
 
 
 
 
 
e9e3195
0aafd85
e9e3195
 
 
 
 
2c4ef8c
4cc9c49
 
 
 
 
 
 
ac8d93f
e5579ec
42fd0f6
2931540
42fd0f6
 
 
 
 
 
 
 
9c949f0
 
b325a26
 
9c949f0
 
fb38229
c7cc4f2
b910e0e
b325a26
9c949f0
b325a26
 
 
 
32962aa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24185ae
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
454d167
eec7d18
ac8d93f
eec7d18
 
 
 
ac8d93f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7f40716
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
454d167
 
 
2c4ef8c
f174f81
b325a26
97d5756
b325a26
f174f81
 
9b637cf
 
 
f174f81
 
 
 
 
 
b325a26
9b637cf
97d5756
b325a26
 
 
 
 
 
97d5756
32962aa
dc21082
32962aa
 
b325a26
 
 
 
 
 
 
 
 
7f40716
b325a26
 
97d5756
554aad0
 
 
b325a26
7bbaac1
24185ae
eec7d18
 
 
 
24185ae
 
 
 
 
 
 
 
 
 
 
7f40716
 
24185ae
 
 
7bbaac1
 
b325a26
2c4ef8c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
from googletrans import Translator
import spacy
import gradio as gr
import nltk
from nltk.corpus import wordnet
import wikipedia
import re
import time
import random
import os
import zipfile
import gradio as gr
import ffmpeg


nltk.download('maxent_ne_chunker') #Chunker
nltk.download('stopwords') #Stop Words List (Mainly Roman Languages)
nltk.download('words') #200 000+ Alphabetical order list
nltk.download('punkt') #Tokenizer
nltk.download('verbnet') #For Description of Verbs
nltk.download('omw')
nltk.download('omw-1.4') #Multilingual Wordnet
nltk.download('wordnet') #For Definitions, Antonyms and Synonyms
nltk.download('shakespeare')
nltk.download('dolch') #Sight words
nltk.download('names') #People Names NER
nltk.download('gazetteers') #Location NER
nltk.download('opinion_lexicon') #Sentiment words
nltk.download('averaged_perceptron_tagger') #Parts of Speech Tagging


spacy.cli.download("en_core_web_sm")

nlp = spacy.load('en_core_web_sm')
translator = Translator()

def Sentencechunker(sentence):
    Sentchunks = sentence.split(" ")
    chunks = []
    for i in range(len(Sentchunks)):
        chunks.append(" ".join(Sentchunks[:i+1]))
    return " | ".join(chunks)

def ReverseSentenceChunker(sentence):
    reversed_sentence = " ".join(reversed(sentence.split()))
    chunks = Sentencechunker(reversed_sentence)
    return chunks

def three_words_chunk(sentence):
    words = sentence.split()
    chunks = [words[i:i+3] for i in range(len(words)-2)]
    chunks = [" ".join(chunk) for chunk in chunks]
    return " | ".join(chunks)

def keep_nouns_verbs(sentence):
    doc = nlp(sentence)
    nouns_verbs = []
    for token in doc:
        if token.pos_ in ['NOUN','VERB','PUNCT']:
            nouns_verbs.append(token.text)
    return " ".join(nouns_verbs)

def unique_word_count(text="", state=None):
    if state is None:
        state = {}
    words = text.split()
    word_counts = state
    for word in words:
        if word in word_counts:
            word_counts[word] += 1
        else:
            word_counts[word] = 1
    sorted_word_counts = sorted(word_counts.items(), key=lambda x: x[1], reverse=True)
    return sorted_word_counts,    

def Wordchunker(word):
    chunks = []
    for i in range(len(word)):
        chunks.append(word[:i+1])
    return chunks

def BatchWordChunk(sentence):
  words = sentence.split(" ")
  FinalOutput = ""
  Currentchunks = ""
  ChunksasString = ""
  for word in words:
    ChunksasString = ""
    Currentchunks = Wordchunker(word)
    for chunk in Currentchunks:
      ChunksasString += chunk + " "
    FinalOutput += "\n" + ChunksasString
  return FinalOutput

# Translate from English to French

langdest = gr.Dropdown(choices=["af", "de", "es", "ko", "ja", "zh-cn"], label="Choose Language", value="de")

ChunkModeDrop = gr.Dropdown(choices=["Chunks", "Reverse", "Three Word Chunks", "Spelling Chunks"], label="Choose Chunk Type", value="Chunks")

def FrontRevSentChunk (Chunkmode, Translate, Text, langdest):
  FinalOutput = ""
  TransFinalOutput = ""
  if Chunkmode=="Chunks": 
    FinalOutput += Sentencechunker(Text)
  if Chunkmode=="Reverse":
    FinalOutput += ReverseSentenceChunker(Text)
  if Chunkmode=="Three Word Chunks": 
    FinalOutput += three_words_chunk(Text) 
  if Chunkmode=="Spelling Chunks":
    FinalOutput += BatchWordChunk(Text)
  
  if Translate: 
    TransFinalOutput = FinalOutput
    translated = translator.translate(TransFinalOutput, dest=langdest)
    FinalOutput += "\n" + translated.text
  return FinalOutput

# Define a function to filter out non-verb, noun, or adjective words
def filter_words(words):
    # Use NLTK to tag each word with its part of speech
    tagged_words = nltk.pos_tag(words)

    # Define a set of parts of speech to keep (verbs, nouns, adjectives)
    keep_pos = {'VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ', 'NN', 'NNS', 'NNP', 'NNPS', 'JJ', 'JJR', 'JJS'}

    # Filter the list to only include words with the desired parts of speech
    filtered_words = [word for word, pos in tagged_words if pos in keep_pos]

    return filtered_words

def SepHypandSynExpansion(text):
  # Tokenize the text
  tokens = nltk.word_tokenize(text)
  NoHits = ""
  FinalOutput = ""

  # Find synonyms and hypernyms of each word in the text
  for token in tokens:
      synonyms = []
      hypernyms = []
      for synset in wordnet.synsets(token):
          synonyms += synset.lemma_names()
          hypernyms += [hypernym.name() for hypernym in synset.hypernyms()]
      if not synonyms and not hypernyms:
          NoHits += f"{token} | "
      else:
          FinalOutput += "\n" f"{token}: hypernyms={hypernyms}, synonyms={synonyms} \n"
  NoHits = set(NoHits.split(" | "))  
  NoHits = filter_words(NoHits)
  NoHits = "Words to pay special attention to: \n" + str(NoHits)
  return NoHits, FinalOutput


def WikiSearch(term):
    termtoks = term.split(" ")

    for item in termtoks:
      # Search for the term on Wikipedia and get the first result
      result = wikipedia.search(item, results=20)
    return result 

def create_dictionary(word_list, word_dict = {}):
    word_list = set(word_list.split(" "))
    for word in word_list:
        key = word[:2]
        if key not in word_dict:
            word_dict[key] = [word]
        else:
            word_dict[key].append(word)
    return word_dict

def merge_lines(roman_file, w4w_file, full_mean_file, macaronic_file):
    files = [roman_file, w4w_file, full_mean_file, macaronic_file]
    merged_lines = []

    with open(roman_file.name, "r") as f1, open(w4w_file.name, "r") as f2, \
            open(full_mean_file.name, "r") as f3, open(macaronic_file.name, "r") as f4:
        for lines in zip(f1, f2, f3, f4):
            merged_line = "\n".join(line.strip() for line in lines)
            merged_lines.append(merged_line)

    return "\n".join(merged_lines)

def TTSforListeningPractice(text):
	return "not finished"

def group_words(inlist):
    inlisttoks = inlist.split(" ")
    inlistset = set(inlisttoks)

    word_groups = []
    current_group = []

    for word in inlisttoks:
        current_group.append(word)
        if len(current_group) == 10:
            word_groups.append(current_group)
            current_group = []
    if current_group:
        word_groups.append(current_group)

    current_group_index = 0
    current_group_time = 0

    while True:
        if current_group_time == 60:
            current_group_index = (current_group_index + 1) % len(word_groups)
            current_group_time = 0
        else:
            if current_group_time % 10 == 0:
                random.shuffle(word_groups[current_group_index])
            current_group_time += 10

        yield " ".join(word_groups[current_group_index])
        time.sleep(10) 

def split_verbs_nouns(text):
    nlp = spacy.load("en_core_web_sm")
    doc = nlp(text)
    
    verbs_nouns = []
    other_words = []
    
    for token in doc:
        if token.pos_ in ["VERB", "NOUN"]:
            verbs_nouns.append(token.text)
        elif token.text in [punct.text for punct in doc if punct.is_punct]:
            verbs_nouns.append(token.text)
            other_words.append(token.text)
        else:
            other_words.append(token.text)
    
    verbs_nouns_text = " ".join(verbs_nouns)
    other_words_text = " ".join(other_words)
    
    return verbs_nouns_text, other_words_text

def split_srt_file(text): #file_path):
    # Open the SRT file and read its contents
    #with open(file_path, 'r') as f:
    #    srt_contents = f.read()
    srt_contents = text
    
    # Split the SRT file by timestamp
    srt_sections = srt_contents.split('\n\n')

    # Loop through each section of the SRT file
    for i in range(len(srt_sections)):
        # Split the section into its timestamp and subtitle text
        section_lines = srt_sections[i].split('\n')
        timestamp = section_lines[1]
        subtitle_text = ' | '.join(section_lines[2:])

        # Replace spaces in the subtitle text with " | "
        subtitle_text = subtitle_text.replace(' ', ' | ')

        # Reconstruct the section with the updated subtitle text
        srt_sections[i] = f"{section_lines[0]}\n{timestamp}\n{subtitle_text[3:]}"

    # Join the SRT sections back together into a single string
    return '\n\n'.join(srt_sections)

def find_string_positions(s, string):
    positions = []
    start = 0
    while True:
        position = s.find(string, start)
        if position == -1:
            break
        positions.append(position)
        start = position + len(string)
    return positions

def splittext(string):
  split_positions = find_string_positions(string, " --> ")  
  split_strings = []
  prepos = 0
  for pos in split_positions:
      pos -= 12
      split_strings.append((string[prepos:pos])) #, string[pos:]))
      prepos = pos
  
  FinalOutput = ""
  stoutput = ""
  linenumber = 1
  print(linenumber)
  for item in split_strings[1:]:
    stoutput = item[0:29] + "\n" + item[30:]
    stspaces = find_string_positions(stoutput, " ")
    FinalOutput += str(linenumber) + "\n" + stoutput[:stspaces[-2]] + "\n"
    FinalOutput += "\n"
    linenumber += 1
  return FinalOutput[2:]   

def VideotoSegment(video_file, subtitle_file):
    # Read the subtitle file and extract the timings for each subtitle
    timings = []
    for line in subtitle_file:
        if '-->' in line:
            start, end = line.split('-->')
            start_time = start.strip().replace(',', '.')
            end_time = end.strip().replace(',', '.')
            timings.append((start_time, end_time))

    # Cut the video into segments based on the subtitle timings
    video_segments = []
    for i, (start_time, end_time) in enumerate(timings):
        output_file = f'segment_{i}.mp4'
        ffmpeg.input(video_file, ss=start_time, to=end_time).output(output_file, codec='copy').run()
        video_segments.append(output_file)

    # Convert each segment to an MP3 audio file using FFmpeg
    audio_segments = []
    for i in range(len(timings)):
        output_file = f'segment_{i}.mp3'
        ffmpeg.input(video_segments[i]).output(output_file, codec='libmp3lame', qscale='4').run()
        audio_segments.append(output_file)

    # Create a ZIP archive containing all of the segmented files
    zip_file = zipfile.ZipFile('segmented_files.zip', 'w')
    for segment in video_segments + audio_segments:
        zip_file.write(segment)
        os.remove(segment)
    zip_file.close()

    # Return the ZIP archive for download
    return 'segmented_files.zip'


# Define the Gradio interface inputs and outputs for video split
spvvideo_file_input = gr.File(label='Video File')
spvsubtitle_file_input = gr.File(label='Subtitle File')
spvdownload_output = gr.File(label='Download Segmented Files')


groupinput_text = gr.inputs.Textbox(lines=2, label="Enter a list of words")
groupoutput_text = gr.outputs.Textbox(label="Grouped words")

with gr.Blocks() as lliface:
  with gr.Tab("Welcome"):
    gr.HTML("""<h1> Spaces Test - Still Undercontruction </h1> <p> You only learn when you convert things you dont know to known --> Normally Repetition is the only reliable method for everybody </p>  
    <p> Knowledge is a Language but productive knowledge is find replace as well </p> <p>LingQ is good option for per word state management</p> <p> Arrows app json creator for easy knowledge graphing and spacy POS graph? </p> 
    <p> Vocab = Glossary + all non text wall(lists, diagrams, etc.)</p>
	<p> https://huggingface.co/spaces/vumichien/whisper-speaker-diarization<br></p>
    <p> In Language the goal is bigger vocab --> Knowledge equivalent = question answer pairs but to get to those you need related information pairs</p> 
    <p> ChatGPT Turns Learning into a read only what you dont know ask only what you dont know feedback loop --> All you have to do is keep track of what prompts you have asked in the past</p>
    <p> Spell multiple words simultaneously for simultaneous access </p>
    """)
    with gr.Tab("Unique word ID"):
      gr.Interface(fn=unique_word_count, inputs="text", outputs="text", title="Wordcounter")
      gr.Interface(fn=SepHypandSynExpansion, inputs="text", outputs=["text", "text"], title="Word suggestions")
      gr.Interface(fn=WikiSearch, inputs="text", outputs="text", title="Unique word suggestions(wiki articles)")
    with gr.Tab("Automating related information linking"):
      gr.HTML("Questions - Tacking and suggesting questions to ask = new education")  
  with gr.Tab("Spelling and Chunks"):
    gr.Text("Merged Spelling Practice Placeholder - Spell multiple words simultaneously for simultaneous access")  
    gr.HTML("<p> Spelling is the end goal, you already know many letter orders called words so you need leverage them to remember random sequences")
    with gr.Tab("Spelling Simplification - Use a dual language list"):
        gr.Interface(fn=create_dictionary, inputs="text", outputs="text", title="Sort Text by first two letters")
    with gr.Tab("Chunks"): 
        gr.Interface(fn=FrontRevSentChunk, inputs=[ChunkModeDrop, "checkbox", "text", langdest], outputs="text")
        gr.Interface(fn=keep_nouns_verbs, inputs=["text"], outputs="text", title="Noun and Verbs only (Plus punctuation)")
  with gr.Tab("Timing Practice - Repitition"):
    gr.HTML("<p>Run from it, Dread it, Repitition is inevitable - Thanos</p> <p>Next Milestone is Turning this interface handsfree</p>")  
    with gr.Tab("Gradio Version"):
      gr.Interface(fn=group_words, inputs=groupinput_text, outputs=groupoutput_text, title="Word Grouping and Rotation", description="Group a list of words into sets of 10 and rotate them every 60 seconds.").queue()          
    with gr.Tab("HTML Version"):
      gr.HTML("""<iframe height="1200" style="width: 100%;" scrolling="no" title="Memorisation Aid" src="https://codepen.io/kwabs22/embed/preview/GRXKQgj?default-tab=result&editable=true" frameborder="no" loading="lazy" allowtransparency="true" allowfullscreen="true">
  See the Pen <a href="https://codepen.io/kwabs22/pen/GRXKQgj">
  Memorisation Aid</a> by kwabs22 (<a href="https://codepen.io/kwabs22">@kwabs22</a>)
  on <a href="https://codepen.io">CodePen</a>.
</iframe>""")
  with gr.Tab("Knowledge Ideas"):
      gr.HTML("""<p>Good knowledge = ability to answer questions --> find Questions you cant answer and look for hidden answer within them </p>
      <p>My One Word Theory = We only use more words than needed when we have to or are bored --> Headings exist because title is not sufficient, subheadings exist because headings are not sufficient, Book Text exists because subheadings are not sufficient</p>
      <p>Big Picture = Expand the Heading and the subheadings and compare them to each other</p>
      <p>Application of Knowledge = App Version of the text (eg. Jupyter Notebooks) is what you create and learn first</p>
	  """)    
  with gr.Tab("Beginner - Songs - Chorus"):
      gr.HTML("Essentially if the sounds are repeated or long notes they are easy to remember")
      gr.Interface(fn=TTSforListeningPractice, inputs="text", outputs="text", title="Placeholder - paste chorus here and use TTS or make notes to save here")
  with gr.Tab("Transcribe - RASMUS Whisper"):
    gr.HTML("""<p>If this tab doesnt work use the link below ⬇️</p> <a href="https://huggingface.co/spaces/RASMUS/Whisper-youtube-crosslingual-subtitles">https://huggingface.co/spaces/RASMUS/Whisper-youtube-crosslingual-subtitles</a>""")
    gr.Interface.load("spaces/RASMUS/Whisper-youtube-crosslingual-subtitles", title="Subtitles")
  with gr.Tab("Advanced - LingQ Addons ideas"):
    gr.HTML("Extra functions needed - Persitent Sentence translation, UNWFWO, POS tagging and Word Count per user of words in their account. Macaronic Text is also another way to practice only the important information")
    with gr.Tab("Merged Subtitles"):
        gr.HTML("SRT Contents to W4W Split SRT for Google Translate")
        gr.Interface(fn=split_srt_file, inputs="text", outputs="text")
        gr.HTML("Text for w4w creation in G Translate")
        gr.Interface(fn=splittext, inputs="text", outputs="text")
        with gr.Row():
            RomanFile = gr.File(label="Paste Roman")
            W4WFile = gr.File(label="Paste Word 4 Word")
            FullMeanFile = gr.File(label="Paste Full Meaning")
            MacaronicFile = gr.File(label="Paste Macaronic Text")
            SentGramFormula = gr.File(label="Paste Sentence Grammar Formula Text")
        with gr.Row():
            MergeButton = gr.Button()
        with gr.Row():
            MergeOutput = gr.TextArea(label="Output")
            MergeButton.click(merge_lines, inputs=[RomanFile, W4WFile, FullMeanFile, MacaronicFile], outputs=[MergeOutput])
    with gr.Tab("Split video to segments"):
        gr.Interface(VideotoSegment, inputs=[spvvideo_file_input, spvsubtitle_file_input], outputs=spvdownload_output)
    with gr.Tab("Sentence to Format"):
        gr.Interface(fn=split_verbs_nouns , inputs="text", outputs=["text", "text"], title="Comprehension reading and Sentence Format Creator")
        gr.Text("Text to Closed Class + Adjectives + Punctuation or Noun Verb + Punctuation ")
  with gr.Tab("Dictionary from text"):
    gr.Interface(fn=create_dictionary, inputs="text", outputs="text", title="Two Letter Dictionary")
  
lliface.launch()