KwabsHug commited on
Commit
864431b
1 Parent(s): a36ca8b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +351 -87
app.py CHANGED
@@ -25,8 +25,11 @@ from langdetect import detect
25
  import datetime
26
  import cv2
27
  import math
 
 
 
28
 
29
- #When I forgot about the readme file ChatGPT suggested these - Leaving to remember the ReadmeF.md must be updated as well
30
  #print(gr.__version__)
31
  #import subprocess
32
  #subprocess.run(["pip", "install", "--upgrade", "gradio==3.47.1"]) #For huggingface as they sometimes install specific versions on container build
@@ -48,6 +51,7 @@ nltk.download('opinion_lexicon') #Sentiment words
48
  nltk.download('averaged_perceptron_tagger') #Parts of Speech Tagging
49
  nltk.download('udhr') # Declaration of Human rights in many languages
50
 
 
51
  spacy.cli.download("en_core_web_sm")
52
  spacy.cli.download('ko_core_news_sm')
53
  spacy.cli.download('ja_core_news_sm')
@@ -61,6 +65,8 @@ nlp_es = spacy.load("es_core_news_sm")
61
  nlp_ko = spacy.load("ko_core_news_sm")
62
  nlp_ja = spacy.load("ja_core_news_sm")
63
  nlp_zh = spacy.load("zh_core_web_sm")
 
 
64
 
65
  nlp = spacy.load('en_core_web_sm')
66
  translator = Translator()
@@ -91,6 +97,16 @@ def keep_nouns_verbs(sentence):
91
  nouns_verbs.append(token.text)
92
  return " ".join(nouns_verbs)
93
 
 
 
 
 
 
 
 
 
 
 
94
  def unique_word_count(text="", state=None):
95
  if state is None:
96
  state = {}
@@ -287,7 +303,7 @@ def group_words(inlist):
287
  random.shuffle(word_groups[current_group_index])
288
  current_group_time += 10
289
 
290
- yield " ".join(word_groups[current_group_index])
291
  time.sleep(10)
292
 
293
  def split_verbs_nouns(text):
@@ -1213,6 +1229,10 @@ The player who finds the <<Chinese>> word and makes the mnemonic should write th
1213
 
1214
  <hr>
1215
 
 
 
 
 
1216
  Make jokes while following rules for a syllogism jokes game:
1217
 
1218
  The game can be played with any number of people.
@@ -1245,11 +1265,6 @@ next sentence is AI Town is a virtual town where AI characters live, chat and so
1245
 
1246
  LLPromptIdeasasbtns = LLPromptIdeas.split("<hr>")
1247
 
1248
- def loadforcopybuttonllmpromptideas():
1249
- global LLPromptIdeasasbtns
1250
- list = LLPromptIdeasasbtns
1251
- return "Load the examples with the button below, Alternatively copy paste manually above", list[0], list[1], list[2], list[3], list[4]
1252
-
1253
  def display_website(link):
1254
  html = f"<iframe src='{link}' width='100%' height='1000px'></iframe>"
1255
  gr.Info("If 404 then the space/page has probably been disabled - normally due to a better alternative")
@@ -1261,7 +1276,6 @@ def RepititionPracticeTimeCalculator(text, reps_per_item, seconds_per_item):
1261
  FinalOutput = f"Total Time is estimated: { lines * reps_per_item * seconds_per_item / 60 } minutes ( {lines} lines)"
1262
  return FinalOutput
1263
 
1264
-
1265
  randomExposuremessageText = ["Great Test for LLM function calling (with Gradio Client)", "Unknown Tracker Tab = Incomplete Reading Assistant Idea - HTML app based on text to be read", "Bing mnemonic - lost = dont ignore unusual sounds here inside lost cave", "1000 verbs in lists of 100, verbs = easy setence structure estimation (SVO, SOV, etc.)", "Can put any message here in the navigatoin tab"]
1266
 
1267
  def randommarquee():
@@ -1291,7 +1305,7 @@ def segment_video_with_opencv(file_path, segment_duration=60):
1291
  # Define the codec and create VideoWriter object
1292
  # For .mp4 output, use the H.264 codec with the tag 'mp4v'
1293
  fourcc = cv2.VideoWriter_fourcc(*'mp4v')
1294
- output_filename = f'chunk_{segment}.mp4'
1295
  out = cv2.VideoWriter(output_filename, fourcc, fps, (int(cap.get(3)), int(cap.get(4))))
1296
 
1297
  for frame_num in range(fps * segment_duration):
@@ -1341,26 +1355,235 @@ def TestSplitandUpdate(Text):
1341
 
1342
  TestSplitandUpdateinput = gr.Textbox(placeholder="Counter and Placeholder one point of entry for the text to be analysed across the whole app")
1343
 
1344
- def RepititionInjectedReading(learning, reading):
1345
  readingdoc = nlp(reading)
1346
  learninglist = learning.splitlines()
1347
  FinalOutput = ""
1348
- numofsentencesinreading = sum(1 for _ in readingdoc.sents) #len(readingdoc.sents) is wrong because of generator
1349
- numofsentencesinlearning = len(learninglist)
 
 
 
 
1350
  RepInjectedText = "\n"
1351
 
1352
- for i in range(0, numofsentencesinlearning):
1353
  for sent in readingdoc.sents:
1354
  RepInjectedText += sent.text + " (" + learninglist[i] + ") "
1355
 
1356
- FinalOutput = f"{ numofsentencesinreading } repitition oppurtunities between the sentences: \n { RepInjectedText }"
1357
 
1358
  return FinalOutput
1359
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1360
  # For testing purposes
1361
  # file_paths = segment_video_with_opencv("path_to_your_video.mp4")
1362
  # print(file_paths)
1363
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1364
 
1365
  # Define the Gradio interface inputs and outputs for video split
1366
  spvvideo_file_input = gr.File(label='Video File')
@@ -1381,32 +1604,45 @@ randomExposuremessage2 = randommarquee()
1381
  VideoTestInput = gr.File(label="select a mp4 video file", file_types=[".mp4"])
1382
  VideoTestSubtitleInput = gr.File(label="select a subtitle file", file_types=[".txt", ".srt", ".vtt"])
1383
  VideoSplitTestInput = gr.File(label="select a mp4 video file", file_types=[".mp4"])
 
1384
 
1385
  with gr.Blocks() as lliface: #theme=gr.themes.Glass(primary_hue='green', secondary_hue='red', neutral_hue='blue', )
1386
- gr.HTML('<div style="display: flex; justify-content: center; align-items: center; height: 100%;"> ---- Under Construction: Very Slowly figuring out what AI intergrated interface means (Chat vs Forms vs Function calling vs Sensor + Trigger vs Agent) | How to end copy paste once and for all? ---- </div> <div style="display: flex; justify-content: center; align-items: center; height: 100%;"> All the apis from the below space need to be treated like RAG as notes for the LLM to read before providing its answer </div>')
 
 
 
 
 
 
1387
  with gr.Accordion("LLM HF Spaces/Sites (Click Here to Open) - Use 'Acronym Map Creation Space' Tab with this - Ask for Translation of image tags made below, sentence to emojis, Wordlists, Test Conversations, Get Grammar Explanations etc., Can use GPT-4 or new SOTA to review the conversation", open=False):
1388
  with gr.Row():
1389
  linktochat = gr.Dropdown(choices=["https://sdk.vercel.ai/docs", "https://labs.perplexity.ai/", "https://chat.lmsys.org", "https://huggingfaceh4-zephyr-chat.hf.space", "https://osanseviero-mistral-super-fast.hf.space", "https://artificialguybr-qwen-14b-chat-demo.hf.space", "https://huggingface-projects-llama-2-7b-chat.hf.space", "https://ysharma-explore-llamav2-with-tgi.hf.space", "https://mosaicml-mpt-30b-chat.hf.space", "https://huggingfaceh4-falcon-chat.hf.space", "https://uwnlp-guanaco-playground-tgi.hf.space", "https://stabilityai-stablelm-tuned-alpha-chat.hf.space", "https://mosaicml-mpt-7b-storywriter.hf.space", "https://huggingfaceh4-starchat-playground.hf.space", "https://bigcode-bigcode-playground.hf.space", "https://mosaicml-mpt-7b-chat.hf.space", "https://huggingchat-chat-ui.hf.space", "https://togethercomputer-openchatkit.hf.space"], label="Choose/Cancel type any .hf.space link here (can also type a link)'", allow_custom_value=True)
1390
- chatspacebtn = gr.Button("Use the chosen URL to load interface with a chat model. For sdk.vercel click the chat button on the top left")
1391
  with gr.Accordion("Some prompt ideas", open=False):
1392
  with gr.Accordion("Prompts in text (Manual copy paste)", open=False):
1393
  gr.HTML(LLPromptIdeas)
1394
- gr.Interface(loadforcopybuttonllmpromptideas, inputs=None, outputs=["html", "code", "code", "code", "code", "code"])
 
 
 
 
 
1395
  chatspace = gr.HTML("Chat Space Chosen will load here")
1396
  chatspacebtn.click(display_website, inputs=linktochat, outputs=chatspace)
1397
  with gr.Accordion("Image HF Spaces/Sites (Click Here to Open) - Use with the image placeholder in Workflows tab", open=False):
1398
  with gr.Row():
1399
- linktoimagegen = gr.Dropdown(choices=["https://simianluo-latent-consistency-model.hf.space", "https://google-sdxl.hf.space", "https://fffiloni-sdxl-control-loras.hf.space"], label="Choose/Cancel type any .hf.space link here (can also type a link)'", allow_custom_value=True)
1400
  imagegenspacebtn = gr.Button("Use the chosen URL to load interface with a chat model")
1401
  imagegenspace = gr.HTML("Chat Space Chosen will load here")
1402
  imagegenspacebtn.click(display_website, inputs=linktoimagegen, outputs=imagegenspace)
1403
- gr.HTML("<div style='display: flex; justify-content: center; align-items: center; height: 100%;'> Agents = Custom Software (Personalised UI and Mods, among other things) = Custom Environments (AR) - <a href='https://github.com/KillianLucas/open-interpreter'> -- Open Interpreter -- </a> | <a href='https://github.com/microsoft/autogen'> -- Microsoft Autogen -- </a> | </div>")
1404
  with gr.Row():
1405
  with gr.Column(scale=1):
 
1406
  with gr.Tabs() as nav1:
1407
  with gr.Tab("Rep - HTML"):
1408
  gr.HTML("UNNWFWO = Unknown Native Word Foreign Word Order i.e. during active listening practice you only need the words you dont know")
1409
- gr.HTML("""<iframe height="1200" style="width: 100%;" scrolling="no" title="Memorisation Aid" src="https://codepen.io/kwabs22/embed/GRXKQgj?default-tab=result&editable=true" frameborder="no" loading="lazy" allowtransparency="true" allowfullscreen="true">
1410
  See the Pen <a href="https://codepen.io/kwabs22/pen/GRXKQgj"> Memorisation Aid</a> by kwabs22 (<a href="https://codepen.io/kwabs22">@kwabs22</a>) on <a href="https://codepen.io">CodePen</a>. </iframe>""")
1411
  with gr.Tab("Rep - Gradio"):
1412
  gr.Interface(fn=group_words, inputs=groupinput_text, outputs=groupoutput_text, description="Word Grouping and Rotation - Group a list of words into sets of 10 and rotate them every 60 seconds.") #.queue()
@@ -1425,47 +1661,63 @@ with gr.Blocks() as lliface: #theme=gr.themes.Glass(primary_hue='green', seconda
1425
  with gr.Tab("Vector Database = Memorisation"):
1426
  gr.HTML("Phrasebook on demand in realtime <br><br> Open AI - 10000 * 1000tokens (+- 4000 characters) = 1$ (0.0001 per 1000 tokens / 750 words), Cohere Multilingual = free for personal use / Commercial use = \n Vector Database query = Better than text search but not for logical relationships")
1427
  with gr.Tab("Time Estimate Calculator"):
1428
- gr.HTML("Repitition = A subconcious time gaame - transparent screens + below repitition assist (Vision) or (Audio) ")
1429
  gr.Interface(fn=RepititionPracticeTimeCalculator, inputs=["text", "number", "number"], outputs="text")
1430
- with gr.Column(scale=3):
1431
- with gr.Tab("Workflows"):
1432
- with gr.Row():
1433
- gr.HTML("<span style:{'fontsize: 20'}>Start at Unkown Tracker if unseure<span> <br> UNNWFWO = Unknown Native Word Foreign Word Order i.e. during active listening practice you only need the words you dont know <br><br> General Ideas in this space - Speed of Learning = Avoid Things you know like the plague -- How to track what you know -- Counter is easiest and How you feel is the hardest (The more you know, the more confusion on what you dont know as you probably werent keeping track) <br><br> Visulisation of long text - Bottom of this page <br> Wordlist - 1 new word at a time per minute in the space to the left <br> Youtube Video Watching - Subtitles Tab <br> Reading - Unknown Tracker Tabs <br> Longer Text Memorising - Acronym Map Creation Tab and Transition Tab <br> Brainstorming - Reading Assistant <br> Random Exposure <br> ")
1434
- gr.Interface(fn=TestSplitandUpdate, inputs=TestSplitandUpdateinput, outputs=["text", "button"])
1435
- with gr.Row():
1436
  PracticeExposure = gr.HTML(randomExposuremessage)
1437
  PracticeExposure2 = gr.HTML(randomExposuremessage2)
1438
  PracticeExposurebtn.click(fn=changeexposuretext, inputs=PracticeExposureInput, outputs=PracticeExposure)
1439
- with gr.Row():
1440
- with gr.Column(scale=1):
1441
- gr.HTML("Advanced Repitition = Combinatorics --> to understand a sentence properly you need understanding of every word --> in language that means use with other words --> Combos within the unique words in a sentence, paragraph, page, etc. --> as close to 3 word sentences")
1442
- with gr.Column(scale=1):
1443
- gr.HTML("<p>Timing Practice - Repitition: Run from it, Dread it, Repitition is inevitable - Thanos --> Repitition of reaction - Foreign in eyes/ears native in mind (For beginners) | Repitition is a multitask activity like driving must be subconcious process to show mastery </p>")
1444
- gr.HTML(""" <a href='https://github.com/eugeneyan/open-llms'> -- Opensource List -- </a> | <a href='https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard'> -- Open LLM Leaderboard -- </a> | <a href='https://huggingface.co/spaces/sanchit-gandhi/whisper-jax'> -- Whisper JAX -- </a> | <a href="https://translate.google.com/?hl=en&tab=TT"> -- Google Translate -- </a> | <a href='https://huggingface.co/spaces/damo-vilab/modelscope-text-to-video-synthesis'> -- Modelscope Text to Video -- </a> | <a href='https://huggingface.co/spaces/stabilityai/stable-diffusion'> -- stable-diffusion 2 -- </a> | <a href='https://huggingface.co/spaces/stabilityai/stable-diffusion-1'> -- stable-diffusion 1 -- </a> | <a href='https://huggingface.co/spaces/kakaobrain/karlo'> -- karlo 1 -- </a> | <a href='https://huggingface.co/spaces/suno/bark'> -- Bark (TTS) -- </a> | <a href='https://chat.lmsys.org/'> -- Offline Text Model Demos -- </a> | <a href='https://huggingface.co/spaces/curt-park/segment-anything-with-clip'> -- SAM with Clip -- </a> | <a href='https://beta.elevenlabs.io/'> -- Eleven Labs -- </a> | <a href='https://www.d-id.com/'> -- Animate an Image -- </a> | <a href='https://voice.ai/'> -- Clone a voice -- </a> | <a href='https://openai.com/pricing'> -- OpenAI pricing -- </a> | <a href='https://huggingface.co/spaces/sohojoe/soho-clip-embeddings-explorer'> -- Image Training Data Search -- </a> | <a href='https://huggingface.co/spaces/huggingchat/chat-ui'> -- Huggingface Chat -- </a> | <a href='https://huggingface.co/spaces/bguisard/stable-diffusion-nano'> -- 128x128 Stable Diffusion (Fast) -- </a> | <a href='https://huggingface.co/spaces/colonelwatch/abstracts-index'> -- Search 95 million research abstracts -- </a> | <a href='https://huggingface.co/datasets/roneneldan/TinyStories'> -- Tiny Stories Dataset -- </a> | <a href='https://huggingface.co/spaces/lykeven/visualglm-6b'> -- Visualglm6b - Discuss images -- </a> | <a href='https://huggingface.co/spaces/xinyu1205/Recognize_Anything-Tag2Text'> -- RAM and Tag2Text -- </a> | <a href='https://huggingface.co/camenduru/potat1'> -- Potat1 Text2vid -- </a> | <a href='https://twitter.com/willdepue/status/1661781355452325889'> -- Alexandria Prohect (Will Deque) - Free Embeddings -- </a> | <a href='https://artsandculture.google.com/'> -- Google Arts and Culture Portal -- </a> | <a href='https://huggingface.co/spaces/Matthijs/whisper_word_timestamps'> -- Word Level Timestamps -- </a> | <a href='https://huggingface.co/spaces/zaanind/NLLB-translation'> -- NLLB 600M Demo -- </a> = <a href='https://github.com/facebookresearch/fairseq/tree/nllb'> -- NLLB Github -- </a> | <a href='https://huggingface.co/spaces/hysts/zeroscope-v2'> -- Zeroscope v2 Text to video -- </a> | <a href='https://huggingface.co/spaces/SpacesExamples/ComfyUI'> -- ComfyUI Text to Image -- </a> | <a href='https://huggingface.co/spaces/DeepFloyd/IF'> -- Deepfloyd IF - Text in image -- </a> | <a href='https://huggingface.co/spaces/ysharma/ChatGPT-Plugins-in-Gradio'> -- ChatGPT Custom Plugins Test Space -- </a> | <a href='https://www.reddit.com/r/LocalLLaMA/'> -- r/LocalLlama -- </a> | <a href='https://www.reddit.com/r/singularity/'> -- r/Singularity -- </a> | <a href='https://huggingface.co/spaces/hysts/SD-XL'> -- SD-XL Test Space -- </a> | <a href='https://huggingface.co/spaces/facebook/seamless_m4t'> -- Seamless M4T - Translation one stop shop -- </a> | <a href='https://huggingface.co/spaces/codellama/codellama-playground'> -- Code Llama playground -- </a> | <a href='https://huggingface.co/spaces/Voicemod/Text-to-Sing'> -- Text to sing -- </a> | <a href='https://huggingface.co/spaces/camenduru-com/webui'> -- Stable Diffusion Webui (Camenduru Space) -- </a> | <a href='https://huggingface.co/spaces/ysharma/WizardCoder34b'> -- Wizard Coder 34B -- </a> | <a href='https://huggingface.co/spaces/chansung/co-write-with-llama2'> -- Cowrite with llama2 -- </a> | <a href='https://huggingface.co/spaces/fffiloni/Image-to-Story'> -- Image to Story -- </a> | <a href='https://huggingface.co/spaces/fffiloni/CLIP-Interrogator-2'> -- Clip interrogator 2 -- </a> | <a href='https://github.com/THUDM/AgentBench'> -- Agent Benchmarks -- </a> | <a href='https://www.convex.dev/ai-town'> -- AI Town Live Demo -- </a> = <a href='https://github.com/a16z-infra/ai-town'> -- AI Town Repository (Deployment]) -- </a> | <a href='https://github.com/joonspk-research/generative_agents/tree/main'> -- Generative Agents: Interactive Simulacra of Human Behavior (Research paper Repository) -- </a> | <a href='https://huggingface.co/spaces/HuggingFaceM4/idefics_playground'> -- IDEFICS - open Multimodal model -- </a> | <a href='https://github.com/facebookresearch/belebele'> -- Belebele (Meta Dataset) -- </a> | <a href='https://huggingface.co/spaces/jbilcke-hf/ai-comic-factory'> -- AI Comic Factory -- </a> | <a href='https://github.com/camenduru'> -- CAMENDURU REPOS -- </a> | <a href='https://huggingface.co/datasets/b-mc2/sql-create-context'> -- SQL Dataset - A list of simple questions -- </a> | <a href='https://github.com/KillianLucas/open-interpreter'> -- Open Interpreter (alt to ChatGPT Pro) -- </a> | <a href='https://easywithai.com/fun-ai-tools/'> -- List - Easy with AI -- </a> | <a href='https://huggingface.co/spaces/Xenova/whisper-web'> -- Whisper Web (UI) -- </a> | <a href='https://blog.roblox.com/2023/09/revolutionizing-creation-roblox/'> -- Roblox Assistant -- </a> | <a href='https://huggingface.co/spaces/AP123/IllusionDiffusion'> -- Illusion Diffusion (Hide words or shapes in the image) -- </a> | <a href='https://huggingface.co/spaces/Shopify/background-replacement'> -- Background replacement - Shopify -- </a> | <a href='https://huggingface.co/spaces/multimodalart/LoraTheExplorer'> -- Lora The Explorer (SDXL) -- </a> | <a href='https://huggingface.co/spaces/XCLiu/InstaFlow'> -- InstaFlow (Under 1 second Inference) -- </a> | <a href='https://github.com/tairov/llama2.mojo'> -- TinyStories on mojo (230+ tk/s) -- </a> | <a href='https://emojis.alexandru.so/p/OHVEmfMwQl'> -- Any Emoji you want - emojijs -- </a> | <a href='https://huggingface.co/spaces/google/sdxl'> -- SDXL on TPUv5 -- </a> | <a href='https://huggingface.co/spaces/SimianLuo/Latent_Consistency_Model'> -- LCM - SD1.5 at 7secs per 4 images (after coldstart) -- </a> | <a href='https://huggingface.co/spaces/fffiloni/sdxl-control-loras'> -- SDXL Control Lora -- </a> | <a href='https://huggingface.co/spaces/aadnk/faster-whisper-webui'> -- Whisper WebUI -- </a> | """)
1445
- gr.HTML("Placeholder for every images of each sentence - Good ChatGPT + Dall-E ")
1446
- with gr.Row():
1447
- with gr.Column(scale=4):
1448
- imageplaceholderinput = gr.TextArea()
1449
- with gr.Column(scale=1):
1450
- gr.Label("Enter Text and Get a line by line placeholder for image associated with the text")
1451
- imageplaceholderdownload = gr.File()
1452
- imageplaceholderbtn = gr.Button("Create the image placeholder")
1453
- with gr.Row():
1454
- with gr.Column(scale=3):
1455
- imageplaceholderoutput = gr.HTML("Preview will load here")
1456
- with gr.Column(scale=2):
1457
- imageplaceholdertextoutput = gr.Code("The code for the HTML created will come here")
1458
- imageplaceholderbtn.click(fn=imagebasedreading, inputs=[imageplaceholderinput], outputs=[imageplaceholderdownload, imageplaceholderoutput, imageplaceholdertextoutput])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1459
  with gr.Tab("Repetition Injected Text"):
1460
  gr.Label("Optimal Study Reps is inbetween new information acquisition - i.e. any thing you havent read already")
1461
- gr.Interface(fn=RepititionInjectedReading, inputs=["text", "text"], outputs="text")
1462
- with gr.Tab("Progress Tracking"):
 
 
 
 
1463
  gr.Label("Missing is database integration for the counter and non-english - ALSO TODO - Parralell interface for the html and acronym creator")
1464
  gr.Interface(fn=UnknownTrackTexttoApp, inputs="text", outputs=["file", "html", "text"], description="HTML mini App - UNNWFWO (To track verbs you dont know for listening practice). Use the text from here to create lists you use for the TTS section")
1465
  gr.Interface(create_acronym_map, inputs='text', outputs=['text', 'text'])
1466
  gr.HTML("On the Acronyms you need to underline the verbs")
1467
  gr.HTML("Aim for 1000 reps per item in your mind - the end goal for full sentences is to identify the SOV equivalent ASAP")
1468
  gr.Interface(fill_lines, inputs=["text", RepSched_Num_lines], outputs="text")
 
1469
  with gr.Tab("Beginner - Listen + Read"):
1470
  gr.Label("Closed Eye Recital per new word | 1 new word a minute while recycling the words from the previous minutes")
1471
  with gr.Row():
@@ -1505,8 +1757,8 @@ with gr.Blocks() as lliface: #theme=gr.themes.Glass(primary_hue='green', seconda
1505
  with gr.Tab("Unique word ID - use in Infranodus"):
1506
  with gr.Accordion(label="Infranodus", open=False):
1507
  gr.HTML(" <a href='https://infranodus.com/'> -- Infranodus - Word Level Knowledge graphs -- </a> | <br> Use the below interfaces to find the items that dont have entries --> These will represent new concepts or people which need to be understood individually to fully understand the text --> Infranodus search will help find related and unrelated investigation paths <br><br> TODO Figure Output Zoom / Image Dimensions")
1508
- gr.Image(source="upload", label="Open Infranodus Screenshot")
1509
- gr.Image(source="upload", label="Open Infranodus Screenshot")
1510
  gr.Interface(fn=unique_word_count, inputs="text", outputs="text", description="Wordcounter")
1511
  gr.HTML("Use the below interface to fill in the space in this format and then use the chat iframe at the top to ask llm to analyse this: <br><br> Consider how the following sentence meaning will change if the each if the selected word is replaced with one hypernym at a time: <br>Sentence: <br>Hypernyms: ")
1512
  gr.Interface(fn=SepHypandSynExpansion, inputs="text", outputs=["text", "text"], description="Word suggestions - Analyse the unique words in infranodus")
@@ -1520,33 +1772,30 @@ with gr.Blocks() as lliface: #theme=gr.themes.Glass(primary_hue='green', seconda
1520
  gr.HTML("Parts of speech recognition = comprehension <br> Three word sentences will give a easier guessing chance")
1521
  gr.HTML('<iframe src="https://spacy-gradio-pipeline-visualizer.hf.space" frameborder="0" width="100%" height="600"></iframe>')
1522
  with gr.Tab("Advanced - Making Questions = Reading"):
1523
- gr.HTML("Some Example Prompts <br><br>Please make 10 questions baseed on this text: <br>")
1524
  with gr.Row():
1525
  gr.TextArea("Paste the text to read here", interactive=True)
1526
  gr.TextArea("Make as many questions on the text as you can in native language and then translate", interactive=True)
1527
  gr.Dropdown(["Placeholder chunk 1", "Placeholder chunk 2", "Placeholder chunk 3"])
1528
  gr.HTML("Load the current chunk here and Put a Dataframe where you have only one column for the questions")
1529
- with gr.Tab('Acronym Map Creation Space'):
1530
- gr.HTML("Acronym cant be read with previous attentive reading - accurate measure of known vs unknown")
1531
- with gr.Row():
1532
- with gr.Accordion('Acronym Map/Skeleton Creator'):
1533
- gr.HTML("Moved to Progress for now")
1534
- with gr.Accordion('Test with LLM'):
1535
- gr.Label('Letters are always easier to recall than whole words. GPT 4 and above best suited for this prompt but can test anywhere')
1536
- gr.HTML('Please help me study by making a acronym map for the maths ontology (Ask if theres questions)')
1537
- gr.TextArea('', label='Paste LLM response')
1538
- gr.HTML('Good but we need to now create a 9 Acronym based words - 1 for the headings together and then one each for the subheadings')
1539
- gr.TextArea('', label='Paste LLM response')
1540
- with gr.Accordion(''):
1541
- gr.HTML('If study content was a map the first letters shape of the whole text = Roads')
1542
- gr.HTML('Known = ability to match an item to a retrieval cue instantly - Retrieval cue for the whole text = Acronym Map')
1543
  with gr.Tab("Advanced - Youtube - Subtitles - LingQ Addon Ideas"):
1544
  gr.HTML("<a href='https://www.lingq.com/en/'>Find LingQ Here --> https://www.lingq.com/en/</a>")
1545
- with gr.Tab("Visual - Multiline Custom Video Subtitles"):
 
 
 
 
 
 
 
 
 
 
 
1546
  gr.HTML("LingQ Companion Idea - i.e. Full Translation Read along, and eventually Videoplayer watch along like RAMUS whisper space <br><br>Extra functions needed - Persitent Sentence translation, UNWFWO, POS tagging and Word Count per user of words in their account. Macaronic Text is also another way to practice only the important information")
1547
  gr.HTML("""<hr> <p>For Transcripts to any video on youtube use the link below ⬇️</p> <a href="https://huggingface.co/spaces/RASMUS/Whisper-youtube-crosslingual-subtitles">https://huggingface.co/spaces/RASMUS/Whisper-youtube-crosslingual-subtitles</a> | <a href="https://huggingface.co/spaces/vumichien/whisper-speaker-diarization">https://huggingface.co/spaces/vumichien/whisper-speaker-diarization</a>""")
1548
  #gr.HTML("<p>If Space not loaded its because of offline devopment errors please message for edit</p> <hr>")
1549
- with gr.Tab("Merged Subtitles"):
1550
  gr.HTML(""" Core Idea = Ability to follow one video from start to finish is more important than number of words (except for verbs) <hr>
1551
  Step 1 - Get foreign transcript - WHISPER (Need to download video though - booo) / Youtube / Youtube transcript api / SRT websites <br>
1552
  Step 2 - Get Translation of foreign transcript <br>
@@ -1564,13 +1813,13 @@ with gr.Blocks() as lliface: #theme=gr.themes.Glass(primary_hue='green', seconda
1564
  MacaronicFile = gr.File(label="Paste Macaronic Text")
1565
  SentGramFormula = gr.File(label="Paste Sentence Grammar Formula Text")
1566
  with gr.Row():
1567
- MergeButton = gr.Button(label='Merge the seperate files into one interpolated file (Line by line merge)')
1568
  with gr.Row():
1569
  MergeOutput = gr.TextArea(label="Output")
1570
  MergeButton.click(merge_lines, inputs=[RomanFile, W4WFile, FullMeanFile, MacaronicFile], outputs=[MergeOutput], )
1571
  with gr.Row():
1572
  gr.Text("Make sure there are 4 spaces after the last subtitle block (Otherwise its skipped)")
1573
- CleanedMergeButton = gr.Button(label='Create a Usable file for SRT')
1574
  with gr.Row():
1575
  CleanedMergeOutput = gr.TextArea(label="Output")
1576
  CleanedMergeButton.click(fn=SRTLineSort, inputs=[MergeOutput], outputs=[CleanedMergeOutput])
@@ -1600,15 +1849,6 @@ with gr.Blocks() as lliface: #theme=gr.themes.Glass(primary_hue='green', seconda
1600
  gr.HTML("<p> Spell multiple words simultaneously for simultaneous access </p> <p> Spelling Simplification - Use a dual language list? | Spelling is the end goal, you already know many letter orders called words so you need leverage them to remember random sequences")
1601
  gr.Interface(fn=create_dictionary, inputs="text", outputs="text", title="Sort Text by first two letters")
1602
  gr.Interface(fn=keep_nouns_verbs, inputs=["text"], outputs="text", description="Noun and Verbs only (Plus punctuation)")
1603
- with gr.Tab("Thinking Practice (POS)"):
1604
- gr.HTML("By removing all nouns and verbs you get a format to practice thinking about your words to use to make sentences which make sense within constraints")
1605
- with gr.Row():
1606
- with gr.Column():
1607
- with gr.Tab("Sentence to Practice Format"):
1608
- gr.Interface(fn=split_verbs_nouns , inputs="text", outputs=["text", "text", "text"], description="Comprehension reading and Sentence Format Creator")
1609
- with gr.Column():
1610
- gr.HTML("<a href='https://huggingface.co/datasets/b-mc2/sql-create-context'> -- SQL Dataset - A list of simple questions -- </a> |")
1611
- gr.Textbox(label='Use this text to hold translations of the SQL rows in the above linked dataset (A kind of What I say vs what I want)')
1612
  with gr.Tab("Knowledge Ideas - Notetaking"):
1613
  gr.HTML("""<p>Good knowledge = ability to answer questions --> find Questions you cant answer and look for hidden answer within them </p>
1614
  <p>My One Word Theory = We only use more words than needed when we have to or are bored --> Headings exist because title is not sufficient, subheadings exist because headings are not sufficient, Book Text exists because subheadings are not sufficient</p>
@@ -1619,18 +1859,21 @@ with gr.Blocks() as lliface: #theme=gr.themes.Glass(primary_hue='green', seconda
1619
  gr.Interface(fn=TextCompFormat, inputs=["textarea", HTMLCompMode], outputs="text", description="Convert Text to HTML Dropdown or Links which you paste in any html file")
1620
  gr.Interface(fn=create_collapsiblebutton, inputs=["textbox", "textbox", "textarea"], outputs="textbox", description="Button and Div HTML Generator, Generate the HTML for a button and the corresponding div element.")
1621
  with gr.Tab("Real-Time AI - Video/Audio/AR"):
1622
- gr.HTML("HUD Experiment (Waiting for GPT4V API)- Full context of user situation + Ability to communicate in real-time to user using images (H100+ and low enough resolution and low enough steps - it/s = fps) - just like google maps but for real life")
 
 
1623
  gr.Interface(fn=ImageTranslationTest , inputs=[VideoTestInput, VideoTestSubtitleInput], outputs="video")
1624
- with gr.Accordion("Whisper Spaces"):
1625
  Whisperspaceoptions = gr.Dropdown(choices=["https://sanchit-gandhi-whisper-jax-diarization.hf.space", "https://sanchit-gandhi-whisper-jax.hf.space", "https://sanchit-gandhi-whisper-large-v2.hf.space", "https://facebook-seamless-m4t.hf.space"], label="existing whisper spaces")
1626
  Whisperspaceoptionsbtn = gr.Button("Load Whisper Space")
1627
  WhisperspaceoptionsOut = gr.HTML()
1628
  Whisperspaceoptionsbtn.click(fn=display_website, inputs=Whisperspaceoptions, outputs=WhisperspaceoptionsOut)
1629
  with gr.Accordion("Image as prompt Spaces"):
1630
- Imagepromptspaceoptions = gr.Dropdown(choices=["https://badayvedat-llava.hf.space", "https://xinyu1205-recognize-anything.hf.space"], label="existing whisper spaces")
1631
  Imagepromptspaceoptionsbtn = gr.Button("Load a Image as prompt Space")
1632
  ImagepromptspaceoptionsOut = gr.HTML()
1633
  Imagepromptspaceoptionsbtn.click(fn=display_website, inputs=Imagepromptspaceoptions, outputs=ImagepromptspaceoptionsOut)
 
1634
  with gr.Accordion("Old Ideas to consider", open=False):
1635
  gr.HTML("Nicolai Nielsen Youtube channel - aruco markers = position --> can test using premade ones from an image search")
1636
  gr.Textbox("Alpha Test version = Real time Lablling of All things in view using SAM and Clip Interrogator and OpenCV on pydroid --> Adjusted Demo")
@@ -1672,14 +1915,35 @@ with gr.Blocks() as lliface: #theme=gr.themes.Glass(primary_hue='green', seconda
1672
  gr.Interface(fn=FirstLetterSummary, inputs=["text"], outputs=["text"], title="Order fast fast practice --> 1 letter a word = fastest read")
1673
  gr.Interface(fn=imagebasedreading, inputs=["text"], outputs=["file", "html", "text"], title="Placeholder for every newline")
1674
  with gr.Tab("Long Text Analysis"):
1675
- gr.Interface(fn=LoadNLTKUDHRText, inputs=NLTKudhr, outputs=["text", "textarea"])
1676
  gr.HTML("For Long text searches are useful under time pressure and also bring all direct interactions with search terms - a word is defined by those around it")
1677
- gr.Interface(fn=onlyplurals, inputs=["text"], outputs=["text"], title="Only plurals = optimal concepts to learn first as work = repitition")
1678
  gr.Label("Placeholder for old code for concordance and word counting in other test space")
1679
  with gr.Tab("Video Segmentation with OpenCV Test"):
1680
- gr.Interface(fn=segment_video_with_opencv, inputs=VideoSplitTestInput, outputs="fileexplorer")
1681
  with gr.Tab("State Management and Education"):
1682
  gr.HTML("Education = Learning things you didnt know yesterday and not forgetting more than you learn <br><br> What you didnt know forms = <br> Glossary <br> Lists <br> Formulas <br> graphs <br> Procedures <br> <br> for each you will need a seperate way to track the progress but amount of times + recency = approximate state ")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1683
 
1684
-
1685
- lliface.queue().launch(share="true") #(inbrowser="true")
 
25
  import datetime
26
  import cv2
27
  import math
28
+ from langchain.document_loaders import YoutubeLoader #need youtube_transcpt_api and pytube installed
29
+ from youtube_transcript_api import YouTubeTranscriptApi
30
+ from spacy_syllables import SpacySyllables #https://spacy.io/universe/project/spacy_syllables/
31
 
32
+ #When I forgot about the readme file ChatGPT suggested these - Leaving to remember the Readme.md must be updated as well
33
  #print(gr.__version__)
34
  #import subprocess
35
  #subprocess.run(["pip", "install", "--upgrade", "gradio==3.47.1"]) #For huggingface as they sometimes install specific versions on container build
 
51
  nltk.download('averaged_perceptron_tagger') #Parts of Speech Tagging
52
  nltk.download('udhr') # Declaration of Human rights in many languages
53
 
54
+
55
  spacy.cli.download("en_core_web_sm")
56
  spacy.cli.download('ko_core_news_sm')
57
  spacy.cli.download('ja_core_news_sm')
 
65
  nlp_ko = spacy.load("ko_core_news_sm")
66
  nlp_ja = spacy.load("ja_core_news_sm")
67
  nlp_zh = spacy.load("zh_core_web_sm")
68
+ nlp_en_syllable = spacy.load("en_core_web_sm")
69
+ nlp_en_syllable.add_pipe("syllables", after="tagger") #https://spacy.io/universe/project/spacy_syllables/
70
 
71
  nlp = spacy.load('en_core_web_sm')
72
  translator = Translator()
 
97
  nouns_verbs.append(token.text)
98
  return " ".join(nouns_verbs)
99
 
100
+ def keep_nouns(sentence):
101
+ doc = nlp(sentence)
102
+ nouns = []
103
+ for token in doc:
104
+ if token.pos_ in ['NOUN', 'PUNCT']:
105
+ nouns.append(token.text)
106
+ if token.text == '.':
107
+ nouns.append("\n")
108
+ return " ".join(nouns)
109
+
110
  def unique_word_count(text="", state=None):
111
  if state is None:
112
  state = {}
 
303
  random.shuffle(word_groups[current_group_index])
304
  current_group_time += 10
305
 
306
+ yield " ".join(word_groups[current_group_index])
307
  time.sleep(10)
308
 
309
  def split_verbs_nouns(text):
 
1229
 
1230
  <hr>
1231
 
1232
+ Try make sepreate sentences with this acronym as the best wordplay expert in the world - SI(AGM, KPHSIO, ACFJG, DYNAGJ, JBMNNA, HNPDM, BSED, WUENN |
1233
+
1234
+ <hr>
1235
+
1236
  Make jokes while following rules for a syllogism jokes game:
1237
 
1238
  The game can be played with any number of people.
 
1265
 
1266
  LLPromptIdeasasbtns = LLPromptIdeas.split("<hr>")
1267
 
 
 
 
 
 
1268
  def display_website(link):
1269
  html = f"<iframe src='{link}' width='100%' height='1000px'></iframe>"
1270
  gr.Info("If 404 then the space/page has probably been disabled - normally due to a better alternative")
 
1276
  FinalOutput = f"Total Time is estimated: { lines * reps_per_item * seconds_per_item / 60 } minutes ( {lines} lines)"
1277
  return FinalOutput
1278
 
 
1279
  randomExposuremessageText = ["Great Test for LLM function calling (with Gradio Client)", "Unknown Tracker Tab = Incomplete Reading Assistant Idea - HTML app based on text to be read", "Bing mnemonic - lost = dont ignore unusual sounds here inside lost cave", "1000 verbs in lists of 100, verbs = easy setence structure estimation (SVO, SOV, etc.)", "Can put any message here in the navigatoin tab"]
1280
 
1281
  def randommarquee():
 
1305
  # Define the codec and create VideoWriter object
1306
  # For .mp4 output, use the H.264 codec with the tag 'mp4v'
1307
  fourcc = cv2.VideoWriter_fourcc(*'mp4v')
1308
+ output_filename = f'./splitvideo/chunk_{segment}.mp4'
1309
  out = cv2.VideoWriter(output_filename, fourcc, fps, (int(cap.get(3)), int(cap.get(4))))
1310
 
1311
  for frame_num in range(fps * segment_duration):
 
1355
 
1356
  TestSplitandUpdateinput = gr.Textbox(placeholder="Counter and Placeholder one point of entry for the text to be analysed across the whole app")
1357
 
1358
+ def RepititionInjectedReading(splitby, learning, reading):
1359
  readingdoc = nlp(reading)
1360
  learninglist = learning.splitlines()
1361
  FinalOutput = ""
1362
+ if splitby == "sentences":
1363
+ numofsplitsinreading = sum(1 for _ in readingdoc.sents) #len(readingdoc.sents) is wrong because of generator
1364
+ if splitby == "words":
1365
+ numofsplitsinreading = sum(1 for _ in readingdoc.sents) #len(readingdoc.tokens) is wrong because of generator
1366
+
1367
+ numofsplitsinlearning = len(learninglist)
1368
  RepInjectedText = "\n"
1369
 
1370
+ for i in range(0, numofsplitsinlearning):
1371
  for sent in readingdoc.sents:
1372
  RepInjectedText += sent.text + " (" + learninglist[i] + ") "
1373
 
1374
+ FinalOutput = f"{ numofsplitsinreading } repitition oppurtunities between the sentences: \n { RepInjectedText }"
1375
 
1376
  return FinalOutput
1377
 
1378
+ Repsplitdropdown = gr.Dropdown(choices=["sentences", "words"], value="sentences", label="Split by")
1379
+
1380
+ def hidingbuttontesttranslate(text):
1381
+ html = """
1382
+ <html>
1383
+ <head>
1384
+ <style>
1385
+ #container {
1386
+ display: flex;
1387
+ flex-direction: column;
1388
+ }
1389
+ button {
1390
+ width: 200px;
1391
+ padding: 12px 20px;
1392
+ margin: 8px 0;
1393
+ }
1394
+ .hidden {
1395
+ display: none;
1396
+ }
1397
+ </style>
1398
+ </head>
1399
+ <body>
1400
+ <div id="container">
1401
+ """
1402
+
1403
+ doc = nlp(text)
1404
+ sentences = [sent.text for sent in doc.sents]
1405
+
1406
+ for i, sentence in enumerate(sentences):
1407
+ html += f"""
1408
+ <button id="sentence{i}" class="sentence">
1409
+ {sentence}
1410
+ </button>
1411
+ """
1412
+
1413
+ html += """
1414
+ </div>
1415
+
1416
+ <script>
1417
+ let activeBtn;
1418
+
1419
+ const buttons = document.querySelectorAll('.sentence');
1420
+
1421
+ buttons.forEach(button => {
1422
+
1423
+ button.addEventListener('click', () => {
1424
+
1425
+ buttons.forEach(b => b.classList.add('hidden'))
1426
+
1427
+ if (activeBtn) {
1428
+ activeBtn.classList.remove('hidden');
1429
+ }
1430
+
1431
+ activeBtn = button;
1432
+ activeBtn.classList.remove('hidden');
1433
+
1434
+ });
1435
+ });
1436
+ </script>
1437
+
1438
+ </body>
1439
+ </html>
1440
+ """
1441
+
1442
+ return gr.Code(html, language="html"), gr.HTML(html)
1443
+
1444
+
1445
+
1446
+ def extract_video_id(youtube_url):
1447
+ # Regular expression patterns for different YouTube URL formats
1448
+ regex_patterns = [
1449
+ r"(?<=v=)[a-zA-Z0-9_-]+", # Pattern for 'https://www.youtube.com/watch?v=VIDEO_ID'
1450
+ r"(?<=be/)[a-zA-Z0-9_-]+", # Pattern for 'https://youtu.be/VIDEO_ID'
1451
+ r"(?<=embed/)[a-zA-Z0-9_-]+" # Pattern for 'https://www.youtube.com/embed/VIDEO_ID'
1452
+ ]
1453
+
1454
+ video_id = None
1455
+
1456
+ for pattern in regex_patterns:
1457
+ match = re.search(pattern, youtube_url)
1458
+ if match:
1459
+ video_id = match.group(0)
1460
+ break
1461
+
1462
+ return video_id
1463
+
1464
+ YTTtranscriptSubtitleOutput = []
1465
+ YTTtrancriptAnalysedSubtitleOutput = []
1466
+ def YTTransciptAnalysisandLoad(link):
1467
+ global YTTtranscriptSubtitleOutput, YTTtrancriptAnalysedSubtitleOutput
1468
+ if re.match(r'https?:\/\/youtu\.be\/', link) is not None:
1469
+ print("Needs Reformat")
1470
+ video_id = link.split('/')[3]
1471
+ link = 'https://www.youtube.com/watch?v={}'.format(video_id)
1472
+ else: video_id = extract_video_id(link)
1473
+
1474
+ #loader = YoutubeLoader.from_youtube_url(f"{ link }", add_video_info=True)
1475
+ #YTTtranscriptloader = loader.load()
1476
+
1477
+ try:
1478
+ YTTtranscript = YouTubeTranscriptApi.get_transcript(video_id)
1479
+ #YTTtranscript = YTTtranscriptloader[0]
1480
+ YTTtranscriptSubtitleOutput = YTTtranscript
1481
+ except IndexError or AttributeError:
1482
+ print("No Transcript Found")
1483
+ YTTtranscript = "No Transcript found"
1484
+
1485
+ YTTtrancriptAnalysed = []
1486
+ for subtitle in YTTtranscript:
1487
+ YTTtrancriptAnalysed.append({'text': keep_nouns(subtitle['text']), 'start': subtitle['start'], 'duration': subtitle['duration']})
1488
+ #YTTtrancriptAnalysed = str(len(YTTtranscript.page_content)) + "" + str(YTTtranscript.metadata)
1489
+
1490
+ YTTtrancriptAnalysedSubtitleOutput = YTTtrancriptAnalysed
1491
+
1492
+ if re.match(r'https?:\/\/(?:www\.)?youtube\.com\/watch', link) is not None:
1493
+ video_id = re.search(r'v=([^&]+)', link).group(1)
1494
+ link = 'https://www.youtube.com/embed/{}'.format(video_id)
1495
+
1496
+ return f'<iframe width="100%" height="640" src="{ link }" title="" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share" allowfullscreen></iframe>', YTTtranscript, YTTtrancriptAnalysed
1497
+
1498
+ def TimedList(list, duration, splits=0):
1499
+ if splits == 0:
1500
+ splits = duration // len(list)
1501
+ for item in list:
1502
+ yield item
1503
+ time.sleep(splits)
1504
+
1505
+ def synctextboxes(text):
1506
+ return text
1507
+
1508
+ def subtitle_generator():
1509
+ global YTTtranscriptSubtitleOutput, YTTtrancriptAnalysedSubtitleOutput
1510
+ """
1511
+ A generator that yields the subtitle based on the current playback time.
1512
+
1513
+ :param subtitles: List of subtitles, where each subtitle is a dictionary with 'start', 'duration', and 'text' keys.
1514
+ :param playback_start_time: The time when playback started, used to calculate the current playback time.
1515
+ """
1516
+ if YTTtranscriptSubtitleOutput == "":
1517
+ return ("No subtitle", "No subtitle")
1518
+
1519
+ playback_start_time = time.time()
1520
+ while True:
1521
+ current_time = time.time() - playback_start_time
1522
+ for index, subtitle in enumerate(YTTtranscriptSubtitleOutput):
1523
+ start_time = int(subtitle['start'])
1524
+ end_time = start_time + int(subtitle['duration'])
1525
+ if start_time <= current_time < end_time:
1526
+ yield (YTTtrancriptAnalysedSubtitleOutput[index]['text'], subtitle['text'])
1527
+ break
1528
+ else:
1529
+ yield ("", "")
1530
+ time.sleep(1) # Wait for 1 second before updating
1531
+
1532
+ def word_to_k8s_format(word):
1533
+ if len(word) <= 2:
1534
+ return word
1535
+ else:
1536
+ return word[0] + str(len(word) - 2) + word[-1]
1537
+
1538
+ def ForeignSyllableListenFormat(text):
1539
+ FinalOutput = ""
1540
+ words = nlp_en_syllable(text)
1541
+ FirstSyllablesonly = ""
1542
+ tempsyllablelist = None
1543
+
1544
+ #Keep only the first syllable of every word
1545
+ for item in words:
1546
+ if item.pos_ != "PUNCT":
1547
+ FinalOutput += item.text + " "
1548
+ tempsyllablelist = item._.syllables
1549
+ if type(tempsyllablelist) == list:
1550
+ FirstSyllablesonly += str(tempsyllablelist[0]) + " "
1551
+ FinalOutput += str(tempsyllablelist) + " " #str(item._.syllables) + " "
1552
+ FinalOutput += str(item._.syllables_count) + " | "
1553
+ else:
1554
+ FinalOutput += item.text + " "
1555
+ FirstSyllablesonly += item.text + " "
1556
+
1557
+ FinalOutput = "The first Syllables (Listening Practice): \n" + FirstSyllablesonly + "\nSyllable Analysis:\n" + FinalOutput
1558
+ return FinalOutput
1559
+
1560
  # For testing purposes
1561
  # file_paths = segment_video_with_opencv("path_to_your_video.mp4")
1562
  # print(file_paths)
1563
 
1564
+ def FirstLetAccronymsSpacy(text):
1565
+ FinalOutput = ""
1566
+ doc = nlp(text)
1567
+ for sent in doc.sents:
1568
+ for word in sent:
1569
+ FinalOutput += word.text[0]
1570
+ FinalOutput += "\n"
1571
+
1572
+ return FinalOutput
1573
+
1574
+ def MultiOutputInterface(inputtext):
1575
+ k8sformat = ""
1576
+ inputwordlist = inputtext.split(" ")
1577
+ for word in inputwordlist:
1578
+ k8sformat += word_to_k8s_format(word) + " "
1579
+ FirstLetAccronyms = FirstLetAccronymsSpacy(inputtext)
1580
+ AcronymMap = create_acronym_map(inputtext)
1581
+ Output1 = keep_nouns_verbs(inputtext)
1582
+ Output2 = keep_nouns(inputtext)
1583
+ Plurals = onlyplurals(inputtext)
1584
+ Output3 = TestSplitandUpdate(inputtext)
1585
+ Output4 = ForeignSyllableListenFormat(inputtext)
1586
+ return Output3[0], FirstLetAccronyms, AcronymMap[0], AcronymMap[1], Output1, Output2, Plurals, k8sformat, Output4, Output3[1]
1587
 
1588
  # Define the Gradio interface inputs and outputs for video split
1589
  spvvideo_file_input = gr.File(label='Video File')
 
1604
  VideoTestInput = gr.File(label="select a mp4 video file", file_types=[".mp4"])
1605
  VideoTestSubtitleInput = gr.File(label="select a subtitle file", file_types=[".txt", ".srt", ".vtt"])
1606
  VideoSplitTestInput = gr.File(label="select a mp4 video file", file_types=[".mp4"])
1607
+ SplitVideoOutput = gr.FileExplorer(root='./splitvideo')
1608
 
1609
  with gr.Blocks() as lliface: #theme=gr.themes.Glass(primary_hue='green', secondary_hue='red', neutral_hue='blue', )
1610
+ gr.HTML('<div style="display: flex; justify-content: center; align-items: center; height: 100%;"> Reading comprehension speed through picture based compression (collage), Live Image Subtitles and Listening Comprehension Test </div><div style="display: flex; justify-content: center; align-items: center; height: 100%;"> ---- Under Construction: Very Slowly figuring out what AI intergrated interface means (Chat vs Forms vs Function calling vs Sensor + Trigger vs Agent) | How to end copy paste once and for all? ---- </div> <div style="display: flex; justify-content: center; align-items: center; height: 100%;"> All the apis from the below space need to be treated like RAG as notes for the LLM to read before providing its answer </div>')
1611
+ with gr.Accordion("Translation HF Spaces/Sites (Click Here to Open) - Use to get rough translations", open=False):
1612
+ with gr.Row():
1613
+ linktotranslate = gr.Dropdown(choices=["https://kadirnar-multilingual-translation.hf.space", "https://geonmo-nllb-translation-demo.hf.space"], label="Choose/Cancel type any .hf.space link here (can also type a link)'", allow_custom_value=True)
1614
+ translatespacebtn = gr.Button("Use the chosen URL to load interface with a translate model")
1615
+ translatespace = gr.HTML("Translate Space Chosen will load here")
1616
+ translatespacebtn.click(display_website, inputs=linktotranslate, outputs=translatespace)
1617
  with gr.Accordion("LLM HF Spaces/Sites (Click Here to Open) - Use 'Acronym Map Creation Space' Tab with this - Ask for Translation of image tags made below, sentence to emojis, Wordlists, Test Conversations, Get Grammar Explanations etc., Can use GPT-4 or new SOTA to review the conversation", open=False):
1618
  with gr.Row():
1619
  linktochat = gr.Dropdown(choices=["https://sdk.vercel.ai/docs", "https://labs.perplexity.ai/", "https://chat.lmsys.org", "https://huggingfaceh4-zephyr-chat.hf.space", "https://osanseviero-mistral-super-fast.hf.space", "https://artificialguybr-qwen-14b-chat-demo.hf.space", "https://huggingface-projects-llama-2-7b-chat.hf.space", "https://ysharma-explore-llamav2-with-tgi.hf.space", "https://mosaicml-mpt-30b-chat.hf.space", "https://huggingfaceh4-falcon-chat.hf.space", "https://uwnlp-guanaco-playground-tgi.hf.space", "https://stabilityai-stablelm-tuned-alpha-chat.hf.space", "https://mosaicml-mpt-7b-storywriter.hf.space", "https://huggingfaceh4-starchat-playground.hf.space", "https://bigcode-bigcode-playground.hf.space", "https://mosaicml-mpt-7b-chat.hf.space", "https://huggingchat-chat-ui.hf.space", "https://togethercomputer-openchatkit.hf.space"], label="Choose/Cancel type any .hf.space link here (can also type a link)'", allow_custom_value=True)
1620
+ chatspacebtn = gr.Button("Use the chosen URL to load interface with a chat model. For sdk.vercel click the chat button on the top left. For lymsys / chat arena copy the link and use a new tab")
1621
  with gr.Accordion("Some prompt ideas", open=False):
1622
  with gr.Accordion("Prompts in text (Manual copy paste)", open=False):
1623
  gr.HTML(LLPromptIdeas)
1624
+ with gr.Group():
1625
+ promptidea0 = gr.Code(label="Prompt Idea 1", value=LLPromptIdeasasbtns[0])
1626
+ promptidea1 = gr.Code(label="Prompt Idea 2", value=LLPromptIdeasasbtns[1])
1627
+ promptidea2 = gr.Code(label="Prompt Idea 3", value=LLPromptIdeasasbtns[2])
1628
+ promptidea3 = gr.Code(label="Prompt Idea 4", value=LLPromptIdeasasbtns[3])
1629
+ promptidea4 = gr.Code(label="Prompt Idea 5", value=LLPromptIdeasasbtns[4])
1630
  chatspace = gr.HTML("Chat Space Chosen will load here")
1631
  chatspacebtn.click(display_website, inputs=linktochat, outputs=chatspace)
1632
  with gr.Accordion("Image HF Spaces/Sites (Click Here to Open) - Use with the image placeholder in Workflows tab", open=False):
1633
  with gr.Row():
1634
+ linktoimagegen = gr.Dropdown(choices=["https://segmind-segmind-stable-diffusion.hf.space", "https://simianluo-latent-consistency-model.hf.space", "https://google-sdxl.hf.space", "https://fffiloni-sdxl-control-loras.hf.space"], label="Choose/Cancel type any .hf.space link here (can also type a link)'", allow_custom_value=True)
1635
  imagegenspacebtn = gr.Button("Use the chosen URL to load interface with a chat model")
1636
  imagegenspace = gr.HTML("Chat Space Chosen will load here")
1637
  imagegenspacebtn.click(display_website, inputs=linktoimagegen, outputs=imagegenspace)
1638
+ gr.HTML("<div style='display: flex; justify-content: center; align-items: center; height: 100%;'> Agents = Custom Software (Personalised UI and Mods, among other things) = Custom Environments (AR) <a href='https://github.com/KillianLucas/open-interpreter'> -- Open Interpreter -- </a> | <a href='https://github.com/microsoft/autogen'> -- Microsoft Autogen -- </a> | </div>")
1639
  with gr.Row():
1640
  with gr.Column(scale=1):
1641
+ gr.HTML(""" <a href='https://github.com/eugeneyan/open-llms'> -- Opensource List -- </a> | <a href='https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard'> -- Open LLM Leaderboard -- </a> | <a href='https://huggingface.co/spaces/sanchit-gandhi/whisper-jax'> -- Whisper JAX -- </a> | <a href="https://translate.google.com/?hl=en&tab=TT"> -- Google Translate -- </a> | <a href='https://huggingface.co/spaces/damo-vilab/modelscope-text-to-video-synthesis'> -- Modelscope Text to Video -- </a> | <a href='https://huggingface.co/spaces/stabilityai/stable-diffusion'> -- stable-diffusion 2 -- </a> | <a href='https://huggingface.co/spaces/stabilityai/stable-diffusion-1'> -- stable-diffusion 1 -- </a> | <a href='https://huggingface.co/spaces/kakaobrain/karlo'> -- karlo 1 -- </a> | <a href='https://huggingface.co/spaces/suno/bark'> -- Bark (TTS) -- </a> | <a href='https://chat.lmsys.org/'> -- Offline Text Model Demos -- </a> | <a href='https://huggingface.co/spaces/curt-park/segment-anything-with-clip'> -- SAM with Clip -- </a> | <a href='https://beta.elevenlabs.io/'> -- Eleven Labs -- </a> | <a href='https://www.d-id.com/'> -- Animate an Image -- </a> | <a href='https://voice.ai/'> -- Clone a voice -- </a> | <a href='https://openai.com/pricing'> -- OpenAI pricing -- </a> | <a href='https://huggingface.co/spaces/sohojoe/soho-clip-embeddings-explorer'> -- Image Training Data Search -- </a> | <a href='https://huggingface.co/spaces/huggingchat/chat-ui'> -- Huggingface Chat -- </a> | <a href='https://huggingface.co/spaces/bguisard/stable-diffusion-nano'> -- 128x128 Stable Diffusion (Fast) -- </a> | <a href='https://huggingface.co/spaces/colonelwatch/abstracts-index'> -- Search 95 million research abstracts -- </a> | <a href='https://huggingface.co/datasets/roneneldan/TinyStories'> -- Tiny Stories Dataset -- </a> | <a href='https://huggingface.co/spaces/lykeven/visualglm-6b'> -- Visualglm6b - Discuss images -- </a> | <a href='https://huggingface.co/spaces/xinyu1205/Recognize_Anything-Tag2Text'> -- RAM and Tag2Text -- </a> | <a href='https://huggingface.co/camenduru/potat1'> -- Potat1 Text2vid -- </a> | <a href='https://twitter.com/willdepue/status/1661781355452325889'> -- Alexandria Prohect (Will Deque) - Free Embeddings -- </a> | <a href='https://artsandculture.google.com/'> -- Google Arts and Culture Portal -- </a> | <a href='https://huggingface.co/spaces/Matthijs/whisper_word_timestamps'> -- Word Level Timestamps -- </a> | <a href='https://huggingface.co/spaces/zaanind/NLLB-translation'> -- NLLB 600M Demo -- </a> = <a href='https://github.com/facebookresearch/fairseq/tree/nllb'> -- NLLB Github -- </a> | <a href='https://huggingface.co/spaces/hysts/zeroscope-v2'> -- Zeroscope v2 Text to video -- </a> | <a href='https://huggingface.co/spaces/SpacesExamples/ComfyUI'> -- ComfyUI Text to Image -- </a> | <a href='https://huggingface.co/spaces/DeepFloyd/IF'> -- Deepfloyd IF - Text in image -- </a> | <a href='https://huggingface.co/spaces/ysharma/ChatGPT-Plugins-in-Gradio'> -- ChatGPT Custom Plugins Test Space -- </a> | <a href='https://www.reddit.com/r/LocalLLaMA/'> -- r/LocalLlama -- </a> | <a href='https://www.reddit.com/r/singularity/'> -- r/Singularity -- </a> | <a href='https://huggingface.co/spaces/hysts/SD-XL'> -- SD-XL Test Space -- </a> | <a href='https://huggingface.co/spaces/facebook/seamless_m4t'> -- Seamless M4T - Translation one stop shop -- </a> | <a href='https://huggingface.co/spaces/codellama/codellama-playground'> -- Code Llama playground -- </a> | <a href='https://huggingface.co/spaces/Voicemod/Text-to-Sing'> -- Text to sing -- </a> | <a href='https://huggingface.co/spaces/camenduru-com/webui'> -- Stable Diffusion Webui (Camenduru Space) -- </a> | <a href='https://huggingface.co/spaces/ysharma/WizardCoder34b'> -- Wizard Coder 34B -- </a> | <a href='https://huggingface.co/spaces/chansung/co-write-with-llama2'> -- Cowrite with llama2 -- </a> | <a href='https://huggingface.co/spaces/fffiloni/Image-to-Story'> -- Image to Story -- </a> | <a href='https://huggingface.co/spaces/fffiloni/CLIP-Interrogator-2'> -- Clip interrogator 2 -- </a> | <a href='https://github.com/THUDM/AgentBench'> -- Agent Benchmarks -- </a> | <a href='https://www.convex.dev/ai-town'> -- AI Town Live Demo -- </a> = <a href='https://github.com/a16z-infra/ai-town'> -- AI Town Repository (Deployment]) -- </a> | <a href='https://github.com/joonspk-research/generative_agents/tree/main'> -- Generative Agents: Interactive Simulacra of Human Behavior (Research paper Repository) -- </a> | <a href='https://huggingface.co/spaces/HuggingFaceM4/idefics_playground'> -- IDEFICS - open Multimodal model -- </a> | <a href='https://github.com/facebookresearch/belebele'> -- Belebele (Meta Dataset) -- </a> | <a href='https://huggingface.co/spaces/jbilcke-hf/ai-comic-factory'> -- AI Comic Factory -- </a> | <a href='https://github.com/camenduru'> -- CAMENDURU REPOS -- </a> | <a href='https://huggingface.co/datasets/b-mc2/sql-create-context'> -- SQL Dataset - A list of simple questions -- </a> | <a href='https://github.com/KillianLucas/open-interpreter'> -- Open Interpreter (alt to ChatGPT Pro) -- </a> | <a href='https://easywithai.com/fun-ai-tools/'> -- List - Easy with AI -- </a> | <a href='https://huggingface.co/spaces/Xenova/whisper-web'> -- Whisper Web (UI) -- </a> | <a href='https://blog.roblox.com/2023/09/revolutionizing-creation-roblox/'> -- Roblox Assistant -- </a> | <a href='https://huggingface.co/spaces/AP123/IllusionDiffusion'> -- Illusion Diffusion (Hide words or shapes in the image) -- </a> | <a href='https://huggingface.co/spaces/Shopify/background-replacement'> -- Background replacement - Shopify -- </a> | <a href='https://huggingface.co/spaces/multimodalart/LoraTheExplorer'> -- Lora The Explorer (SDXL) -- </a> | <a href='https://huggingface.co/spaces/XCLiu/InstaFlow'> -- InstaFlow (Under 1 second Inference) -- </a> | <a href='https://github.com/tairov/llama2.mojo'> -- TinyStories on mojo (230+ tk/s) -- </a> | <a href='https://emojis.alexandru.so/p/OHVEmfMwQl'> -- Any Emoji you want - emojijs -- </a> | <a href='https://huggingface.co/spaces/google/sdxl'> -- SDXL on TPUv5 -- </a> | <a href='https://huggingface.co/spaces/SimianLuo/Latent_Consistency_Model'> -- LCM - SD1.5 at 7secs per 4 images (after coldstart) -- </a> | <a href='https://huggingface.co/spaces/fffiloni/sdxl-control-loras'> -- SDXL Control Lora -- </a> | <a href='https://huggingface.co/spaces/aadnk/faster-whisper-webui'> -- Whisper WebUI -- </a> | """)
1642
  with gr.Tabs() as nav1:
1643
  with gr.Tab("Rep - HTML"):
1644
  gr.HTML("UNNWFWO = Unknown Native Word Foreign Word Order i.e. during active listening practice you only need the words you dont know")
1645
+ gr.HTML("""<iframe height="1200" style="width: 100%;" scrolling="no" title="Memorisation Aid" src="https://codepen.io/kwabs22/embed/preview/GRXKQgj?default-tab=result&editable=true" frameborder="no" loading="lazy" allowtransparency="true" allowfullscreen="true">
1646
  See the Pen <a href="https://codepen.io/kwabs22/pen/GRXKQgj"> Memorisation Aid</a> by kwabs22 (<a href="https://codepen.io/kwabs22">@kwabs22</a>) on <a href="https://codepen.io">CodePen</a>. </iframe>""")
1647
  with gr.Tab("Rep - Gradio"):
1648
  gr.Interface(fn=group_words, inputs=groupinput_text, outputs=groupoutput_text, description="Word Grouping and Rotation - Group a list of words into sets of 10 and rotate them every 60 seconds.") #.queue()
 
1661
  with gr.Tab("Vector Database = Memorisation"):
1662
  gr.HTML("Phrasebook on demand in realtime <br><br> Open AI - 10000 * 1000tokens (+- 4000 characters) = 1$ (0.0001 per 1000 tokens / 750 words), Cohere Multilingual = free for personal use / Commercial use = \n Vector Database query = Better than text search but not for logical relationships")
1663
  with gr.Tab("Time Estimate Calculator"):
1664
+ gr.HTML("Repitition = A subconcious time gaame - transparent screens + below repitition assist (Vision) or (Audio)")
1665
  gr.Interface(fn=RepititionPracticeTimeCalculator, inputs=["text", "number", "number"], outputs="text")
1666
+ with gr.Row():
 
 
 
 
 
1667
  PracticeExposure = gr.HTML(randomExposuremessage)
1668
  PracticeExposure2 = gr.HTML(randomExposuremessage2)
1669
  PracticeExposurebtn.click(fn=changeexposuretext, inputs=PracticeExposureInput, outputs=PracticeExposure)
1670
+ with gr.Row():
1671
+ with gr.Column(scale=1):
1672
+ gr.HTML("Advanced Repitition = Combinatorics --> to understand a sentence properly you need understanding of every word --> in language that means use with other words --> Combos within the unique words in a sentence, paragraph, page, etc. --> as close to 3 word sentences")
1673
+ with gr.Column(scale=1):
1674
+ gr.HTML("<p>Timing Practice - Repitition: Run from it, Dread it, Repitition is inevitable - Thanos --> Repitition of reaction - Foreign in eyes/ears native in mind (For beginners) | Repitition is a multitask activity like driving must be subconcious process to show mastery </p>")
1675
+ with gr.Column(scale=3):
1676
+ with gr.Tab("General"):
1677
+ with gr.Row():
1678
+ gr.HTML("<span style:{'fontsize: 20'}>Start at Unkown Tracker if unseure<span> <br> UNNWFWO = Unknown Native Word Foreign Word Order i.e. during active listening practice you only need the words you dont know <br><br> General Ideas in this space - Speed of Learning = Avoid Things you know like the plague -- How to track what you know -- Counter is easiest and How you feel is the hardest (The more you know, the more confusion on what you dont know as you probably werent keeping track) <br><br> Visulisation of long text - Bottom of this page <br> Wordlist - 1 new word at a time per minute in the space to the left <br> Youtube Video Watching - Subtitles Tab <br> Reading - Unknown Tracker Tabs <br> Longer Text Memorising - Acronym Map Creation Tab and Transition Tab <br> Brainstorming - Reading Assistant <br> Random Exposure <br> ")
1679
+ gr.Interface(fn=MultiOutputInterface, inputs=TestSplitandUpdateinput, outputs=["text", "text", "text", "text", "text", "text", "text", "text", "text", "button"])
1680
+ gr.Interface(fn=LoadNLTKUDHRText, inputs=NLTKudhr, outputs=["text", "textarea"], description="UDHR as some test texts")
1681
+ with gr.Tab("Speed through Imagery"):
1682
+ gr.HTML("Categories for Image Creation <br>1. Extract only nouns <br>2. Extract Acronyms <br>3. Convert verbs to Careers <br>Dall-E 3 best for multi concept images - every thing else = one picture at a time <br>Ask the llm to create a prompt from the nouns extracted or to create english words ot sentences from the acronyms")
1683
+ ImageryInput = gr.Textbox(placeholder='Enter the text here and use in this tab')
1684
+ with gr.Tab("Filter Text"):
1685
+ gr.Interface(fn=onlyplurals, inputs=["text"], outputs=["text"], description="Only plurals = optimal concepts to learn first as LT work = repitition")
1686
+ gr.Interface(fn=create_acronym_map, inputs="textbox", outputs="textbox", description="Acronyms")
1687
+ gr.Interface(fn=keep_nouns, inputs="textbox", outputs="textbox", description="Nouns only")
1688
+ with gr.Tab("Placeholder Genration"):
1689
+ gr.HTML("Placeholder for every images of each sentence - Good ChatGPT + Dall-E ")
1690
+ with gr.Row():
1691
+ with gr.Column(scale=4):
1692
+ imageplaceholderinput = gr.TextArea()
1693
+ with gr.Column(scale=1):
1694
+ gr.Label("Enter Text and Get a line by line placeholder for image associated with the text")
1695
+ imageplaceholderdownload = gr.File()
1696
+ imageplaceholderbtn = gr.Button("Create the image placeholder")
1697
+ with gr.Row():
1698
+ with gr.Column(scale=3):
1699
+ imageplaceholderoutput = gr.HTML("Preview will load here")
1700
+ with gr.Column(scale=2):
1701
+ imageplaceholdertextoutput = gr.Code("The code for the HTML created will come here")
1702
+ imageplaceholderbtn.click(fn=imagebasedreading, inputs=[imageplaceholderinput], outputs=[imageplaceholderdownload, imageplaceholderoutput, imageplaceholdertextoutput])
1703
+ with gr.Tab('Picture Subtitles / Image Slide Show'):
1704
+ gr.Image()
1705
+ gr.HTML('placeholder for button to start generator for time based image recognition i.e. Picture Subtitles')
1706
  with gr.Tab("Repetition Injected Text"):
1707
  gr.Label("Optimal Study Reps is inbetween new information acquisition - i.e. any thing you havent read already")
1708
+ gr.Interface(fn=RepititionInjectedReading, inputs=[Repsplitdropdown, "text", "text"], outputs="text")
1709
+ with gr.Tab("Stateful Reading - Progress Tracking"):
1710
+ gr.HTML("The problem is to learn you need to repeat things you know that you didnt know thiss morning - The repitition space to the right is only half of solution <br>You need to read new stuff while revising the stuff you didnt know that you just learned aka the things you write as notes")
1711
+ gr.HTML("The sentences used as notes repitition interface then can count as recently known sentences that you can ignore in favour of the sentences you didnt interact with")
1712
+ gr.Label("Some Tests - click to hide - unknown word only list")
1713
+ gr.Interface(fn=hidingbuttontesttranslate, inputs="text", outputs=["code", "html"])
1714
  gr.Label("Missing is database integration for the counter and non-english - ALSO TODO - Parralell interface for the html and acronym creator")
1715
  gr.Interface(fn=UnknownTrackTexttoApp, inputs="text", outputs=["file", "html", "text"], description="HTML mini App - UNNWFWO (To track verbs you dont know for listening practice). Use the text from here to create lists you use for the TTS section")
1716
  gr.Interface(create_acronym_map, inputs='text', outputs=['text', 'text'])
1717
  gr.HTML("On the Acronyms you need to underline the verbs")
1718
  gr.HTML("Aim for 1000 reps per item in your mind - the end goal for full sentences is to identify the SOV equivalent ASAP")
1719
  gr.Interface(fill_lines, inputs=["text", RepSched_Num_lines], outputs="text")
1720
+
1721
  with gr.Tab("Beginner - Listen + Read"):
1722
  gr.Label("Closed Eye Recital per new word | 1 new word a minute while recycling the words from the previous minutes")
1723
  with gr.Row():
 
1757
  with gr.Tab("Unique word ID - use in Infranodus"):
1758
  with gr.Accordion(label="Infranodus", open=False):
1759
  gr.HTML(" <a href='https://infranodus.com/'> -- Infranodus - Word Level Knowledge graphs -- </a> | <br> Use the below interfaces to find the items that dont have entries --> These will represent new concepts or people which need to be understood individually to fully understand the text --> Infranodus search will help find related and unrelated investigation paths <br><br> TODO Figure Output Zoom / Image Dimensions")
1760
+ gr.Image(label="Open Infranodus Screenshot")
1761
+ gr.Image(label="Open Infranodus Screenshot")
1762
  gr.Interface(fn=unique_word_count, inputs="text", outputs="text", description="Wordcounter")
1763
  gr.HTML("Use the below interface to fill in the space in this format and then use the chat iframe at the top to ask llm to analyse this: <br><br> Consider how the following sentence meaning will change if the each if the selected word is replaced with one hypernym at a time: <br>Sentence: <br>Hypernyms: ")
1764
  gr.Interface(fn=SepHypandSynExpansion, inputs="text", outputs=["text", "text"], description="Word suggestions - Analyse the unique words in infranodus")
 
1772
  gr.HTML("Parts of speech recognition = comprehension <br> Three word sentences will give a easier guessing chance")
1773
  gr.HTML('<iframe src="https://spacy-gradio-pipeline-visualizer.hf.space" frameborder="0" width="100%" height="600"></iframe>')
1774
  with gr.Tab("Advanced - Making Questions = Reading"):
1775
+ gr.HTML("Some Example Prompts (Even Mistral 7b is good at this) <br><br>Please make a comprehension test for the following: <br>Please make 10 questions baseed on this text: <br>")
1776
  with gr.Row():
1777
  gr.TextArea("Paste the text to read here", interactive=True)
1778
  gr.TextArea("Make as many questions on the text as you can in native language and then translate", interactive=True)
1779
  gr.Dropdown(["Placeholder chunk 1", "Placeholder chunk 2", "Placeholder chunk 3"])
1780
  gr.HTML("Load the current chunk here and Put a Dataframe where you have only one column for the questions")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1781
  with gr.Tab("Advanced - Youtube - Subtitles - LingQ Addon Ideas"):
1782
  gr.HTML("<a href='https://www.lingq.com/en/'>Find LingQ Here --> https://www.lingq.com/en/</a>")
1783
+ with gr.Tab("New - Learning with Youtube"):
1784
+ gr.HTML("TODO: 1st syllable subtitle. First Syllable of any word is all you need to listen - pair this with Youtube subtitle interface - Listening is ability to spell really fast (real time)")
1785
+ gr.HTML(" <a href='https://huggingface.co/spaces/artificialguybr/VIDEO-TRANSLATION-TRANSCRIPTION'> -- artificialguybr's Video Translation/Transcription Space -- </a> | ")
1786
+ YTTransciptOutDropdown = gr.Dropdown(choices=["https://www.youtube.com/watch?v=UYk43fncV68&pp=ygULcnVubmluZyBtYW4%3D", "https://youtu.be/dxVaP0-aFIE"], value="https://www.youtube.com/watch?v=UYk43fncV68&pp=ygULcnVubmluZyBtYW4%3D", allow_custom_value=True)
1787
+ YTTransciptOutbtn = gr.Button("Transcript to text")
1788
+ YTTransciptOutVid = gr.HTML('Video will load Here')
1789
+ gr.Interface(fn=subtitle_generator, inputs=None, outputs=["textbox", "textbox"], description='Modified Subtitles Test - will only work after video has been loaded')
1790
+ with gr.Row():
1791
+ YTTransciptAnalysedOut = gr.Textbox(placeholder="Place the translated transcript here (Click on the youtube logo to open the video and copy)")
1792
+ YTTransciptOut = gr.Textbox(placeholder="Place the translated transcript here (Click on the youtube logo to open the video and copy)")
1793
+ YTTransciptOutbtn.click(fn=YTTransciptAnalysisandLoad, inputs=YTTransciptOutDropdown, outputs=[YTTransciptOutVid, YTTransciptOut, YTTransciptAnalysedOut])
1794
+ with gr.Tab("Old - Visual - Multiline Custom Video Subtitles"):
1795
  gr.HTML("LingQ Companion Idea - i.e. Full Translation Read along, and eventually Videoplayer watch along like RAMUS whisper space <br><br>Extra functions needed - Persitent Sentence translation, UNWFWO, POS tagging and Word Count per user of words in their account. Macaronic Text is also another way to practice only the important information")
1796
  gr.HTML("""<hr> <p>For Transcripts to any video on youtube use the link below ⬇️</p> <a href="https://huggingface.co/spaces/RASMUS/Whisper-youtube-crosslingual-subtitles">https://huggingface.co/spaces/RASMUS/Whisper-youtube-crosslingual-subtitles</a> | <a href="https://huggingface.co/spaces/vumichien/whisper-speaker-diarization">https://huggingface.co/spaces/vumichien/whisper-speaker-diarization</a>""")
1797
  #gr.HTML("<p>If Space not loaded its because of offline devopment errors please message for edit</p> <hr>")
1798
+ with gr.Tab("Merged Subtitles (Incomplete)"):
1799
  gr.HTML(""" Core Idea = Ability to follow one video from start to finish is more important than number of words (except for verbs) <hr>
1800
  Step 1 - Get foreign transcript - WHISPER (Need to download video though - booo) / Youtube / Youtube transcript api / SRT websites <br>
1801
  Step 2 - Get Translation of foreign transcript <br>
 
1813
  MacaronicFile = gr.File(label="Paste Macaronic Text")
1814
  SentGramFormula = gr.File(label="Paste Sentence Grammar Formula Text")
1815
  with gr.Row():
1816
+ MergeButton = gr.Button('Merge the seperate files into one interpolated file (Line by line merge)', )
1817
  with gr.Row():
1818
  MergeOutput = gr.TextArea(label="Output")
1819
  MergeButton.click(merge_lines, inputs=[RomanFile, W4WFile, FullMeanFile, MacaronicFile], outputs=[MergeOutput], )
1820
  with gr.Row():
1821
  gr.Text("Make sure there are 4 spaces after the last subtitle block (Otherwise its skipped)")
1822
+ CleanedMergeButton = gr.Button('Create a Usable file for SRT')
1823
  with gr.Row():
1824
  CleanedMergeOutput = gr.TextArea(label="Output")
1825
  CleanedMergeButton.click(fn=SRTLineSort, inputs=[MergeOutput], outputs=[CleanedMergeOutput])
 
1849
  gr.HTML("<p> Spell multiple words simultaneously for simultaneous access </p> <p> Spelling Simplification - Use a dual language list? | Spelling is the end goal, you already know many letter orders called words so you need leverage them to remember random sequences")
1850
  gr.Interface(fn=create_dictionary, inputs="text", outputs="text", title="Sort Text by first two letters")
1851
  gr.Interface(fn=keep_nouns_verbs, inputs=["text"], outputs="text", description="Noun and Verbs only (Plus punctuation)")
 
 
 
 
 
 
 
 
 
1852
  with gr.Tab("Knowledge Ideas - Notetaking"):
1853
  gr.HTML("""<p>Good knowledge = ability to answer questions --> find Questions you cant answer and look for hidden answer within them </p>
1854
  <p>My One Word Theory = We only use more words than needed when we have to or are bored --> Headings exist because title is not sufficient, subheadings exist because headings are not sufficient, Book Text exists because subheadings are not sufficient</p>
 
1859
  gr.Interface(fn=TextCompFormat, inputs=["textarea", HTMLCompMode], outputs="text", description="Convert Text to HTML Dropdown or Links which you paste in any html file")
1860
  gr.Interface(fn=create_collapsiblebutton, inputs=["textbox", "textbox", "textarea"], outputs="textbox", description="Button and Div HTML Generator, Generate the HTML for a button and the corresponding div element.")
1861
  with gr.Tab("Real-Time AI - Video/Audio/AR"):
1862
+ gr.HTML("Pydroid and OpenCV and Tkinter = Frontend for OpenAI / OSS vision API as ChatGPT doesnt support video / real-time screenshot yet")
1863
+ gr.HTML("HUD Experiment (Waiting for GPT4V API) - Full context of user situation + Ability to communicate in real-time to user using images (H100+ and low enough resolution and low enough steps - it/s = fps) - just like google maps but for real life")
1864
+ gr.HTML("Some sample Open Interpreter Code - https://huggingface.co/spaces/ysharma/open-interpreter/blob/main/app.py")
1865
  gr.Interface(fn=ImageTranslationTest , inputs=[VideoTestInput, VideoTestSubtitleInput], outputs="video")
1866
+ with gr.Accordion("Whisper Spaces (sanchit-gandhi spaces have microphone input)"):
1867
  Whisperspaceoptions = gr.Dropdown(choices=["https://sanchit-gandhi-whisper-jax-diarization.hf.space", "https://sanchit-gandhi-whisper-jax.hf.space", "https://sanchit-gandhi-whisper-large-v2.hf.space", "https://facebook-seamless-m4t.hf.space"], label="existing whisper spaces")
1868
  Whisperspaceoptionsbtn = gr.Button("Load Whisper Space")
1869
  WhisperspaceoptionsOut = gr.HTML()
1870
  Whisperspaceoptionsbtn.click(fn=display_website, inputs=Whisperspaceoptions, outputs=WhisperspaceoptionsOut)
1871
  with gr.Accordion("Image as prompt Spaces"):
1872
+ Imagepromptspaceoptions = gr.Dropdown(choices=["https://adept-fuyu-8b-demo.hf.space", "https://badayvedat-llava.hf.space", "https://xinyu1205-recognize-anything.hf.space"], label="existing Image prompt spaces")
1873
  Imagepromptspaceoptionsbtn = gr.Button("Load a Image as prompt Space")
1874
  ImagepromptspaceoptionsOut = gr.HTML()
1875
  Imagepromptspaceoptionsbtn.click(fn=display_website, inputs=Imagepromptspaceoptions, outputs=ImagepromptspaceoptionsOut)
1876
+ gr.HTML("Video Dubbing - <a href='https://huggingface.co/spaces/artificialguybr/video-dubbing'> -- artificialguybr's video dubbing -- </a> | <br> Real Time video dubbing will be a true gamechanger")
1877
  with gr.Accordion("Old Ideas to consider", open=False):
1878
  gr.HTML("Nicolai Nielsen Youtube channel - aruco markers = position --> can test using premade ones from an image search")
1879
  gr.Textbox("Alpha Test version = Real time Lablling of All things in view using SAM and Clip Interrogator and OpenCV on pydroid --> Adjusted Demo")
 
1915
  gr.Interface(fn=FirstLetterSummary, inputs=["text"], outputs=["text"], title="Order fast fast practice --> 1 letter a word = fastest read")
1916
  gr.Interface(fn=imagebasedreading, inputs=["text"], outputs=["file", "html", "text"], title="Placeholder for every newline")
1917
  with gr.Tab("Long Text Analysis"):
 
1918
  gr.HTML("For Long text searches are useful under time pressure and also bring all direct interactions with search terms - a word is defined by those around it")
 
1919
  gr.Label("Placeholder for old code for concordance and word counting in other test space")
1920
  with gr.Tab("Video Segmentation with OpenCV Test"):
1921
+ gr.Interface(fn=segment_video_with_opencv, inputs=VideoSplitTestInput, outputs=SplitVideoOutput, description="Split video into even increments for better study tracking ")
1922
  with gr.Tab("State Management and Education"):
1923
  gr.HTML("Education = Learning things you didnt know yesterday and not forgetting more than you learn <br><br> What you didnt know forms = <br> Glossary <br> Lists <br> Formulas <br> graphs <br> Procedures <br> <br> for each you will need a seperate way to track the progress but amount of times + recency = approximate state ")
1924
+ with gr.Tab('Acronym Map Creation Space'):
1925
+ gr.HTML("Acronym cant be read with previous attentive reading - accurate measure of known vs unknown")
1926
+ with gr.Row():
1927
+ with gr.Accordion('Acronym Map/Skeleton Creator'):
1928
+ gr.HTML("Moved to Progress for now")
1929
+ with gr.Accordion('Test with LLM'):
1930
+ gr.Label('Letters are always easier to recall than whole words. GPT 4 and above best suited for this prompt but can test anywhere')
1931
+ gr.HTML('Please help me study by making a acronym map for the maths ontology (Ask if theres questions)')
1932
+ gr.TextArea('', label='Paste LLM response')
1933
+ gr.HTML('Good but we need to now create a 9 Acronym based words - 1 for the headings together and then one each for the subheadings')
1934
+ gr.TextArea('', label='Paste LLM response')
1935
+ with gr.Accordion(''):
1936
+ gr.HTML('If study content was a map the first letters shape of the whole text = Roads')
1937
+ gr.HTML('Known = ability to match an item to a retrieval cue instantly - Retrieval cue for the whole text = Acronym Map')
1938
+ with gr.Tab("Thinking Practice (POS)"):
1939
+ gr.HTML("By removing all nouns and verbs you get a format to practice thinking about your words to use to make sentences which make sense within constraints")
1940
+ with gr.Row():
1941
+ with gr.Column():
1942
+ with gr.Tab("Sentence to Practice Format"):
1943
+ gr.Interface(fn=split_verbs_nouns , inputs="text", outputs=["text", "text", "text"], description="Comprehension reading and Sentence Format Creator")
1944
+ with gr.Column():
1945
+ gr.HTML("<a href='https://huggingface.co/datasets/b-mc2/sql-create-context'> -- SQL Dataset - A list of simple questions -- </a> |")
1946
+ gr.Textbox(label='Use this text to hold translations of the SQL rows in the above linked dataset (A kind of What I say vs what I want)')
1947
+
1948
 
1949
+ lliface.queue().launch() #(inbrowser="true")