kwabs22 commited on
Commit
d7fc782
1 Parent(s): cf6fafe

Random links and ideas

Browse files
Files changed (1) hide show
  1. app.py +348 -132
app.py CHANGED
@@ -27,8 +27,7 @@ from langdetect import detect
27
  import datetime
28
  import cv2
29
  import math
30
- #from langchain_community.document_loaders import YoutubeLoader #Suggested solution for the deprecated import below
31
- #from langchain.document_loaders import YoutubeLoader #need youtube_transcpt_api and pytube installed
32
  from youtube_transcript_api import YouTubeTranscriptApi
33
  from spacy_syllables import SpacySyllables #https://spacy.io/universe/project/spacy_syllables/
34
  import torch
@@ -164,23 +163,23 @@ langdest = gr.Dropdown(choices=langdropdown_choices, label="Choose Language", va
164
 
165
  ChunkModeDrop = gr.Dropdown(choices=["Chunks", "Reverse", "Three Word Chunks", "Spelling Chunks"], label="Choose Chunk Type", value="Chunks")
166
 
167
- # def FrontRevSentChunk (Chunkmode, Translate, Text, langdest):
168
- # FinalOutput = ""
169
- # TransFinalOutput = ""
170
- # if Chunkmode=="Chunks":
171
- # FinalOutput += Sentencechunker(Text)
172
- # if Chunkmode=="Reverse":
173
- # FinalOutput += ReverseSentenceChunker(Text)
174
- # if Chunkmode=="Three Word Chunks":
175
- # FinalOutput += three_words_chunk(Text)
176
- # if Chunkmode=="Spelling Chunks":
177
- # FinalOutput += BatchWordChunk(Text)
178
 
179
- # if Translate:
180
- # TransFinalOutput = FinalOutput
181
- # translated = translator.translate(TransFinalOutput, dest=langdest[:2])
182
- # FinalOutput += "\n" + translated.text
183
- # return FinalOutput
184
 
185
  # Define a function to filter out non-verb, noun, or adjective words
186
  def filter_words(words):
@@ -537,25 +536,6 @@ def removeTonalMarks(string):
537
  return noTonalMarksStr
538
 
539
 
540
- # def add_text_to_image(input_image, text, output_image_path="output.png", border_size=2):
541
- # text = removeTonalMarks(text)
542
- # imagearr = np.asarray(input_image) #Image.open(input_image_path)
543
- # width, height = imagearr.shape[:2] #width, height = image.size
544
- # img = Image.fromarray(imagearr)
545
- # draw = ImageDraw.Draw(img)
546
- # font = ImageFont.truetype("ShortBaby.ttf", 36) #ShortBaby-Mg2w.ttf
547
- # text_width, text_height = draw.textbbox((0, 0), text, font=font)[2:] #draw.textsize(text, font)
548
- # # calculate the x, y coordinates of the text box
549
- # x = (width - text_width) / 2
550
- # y = (height - text_height) / 2
551
- # # put the text on the image with a border
552
- # for dx, dy in [(0, 0), (border_size, border_size), (-border_size, -border_size), (border_size, -border_size), (-border_size, border_size)]:
553
- # draw.text((x + dx, y + dy), text, font=font, fill=(255, 255, 255))
554
- # draw.text((x, y), text, font=font, fill=(0, 0, 0))
555
- # img.save(output_image_path, "PNG")
556
- # return "output.png"
557
-
558
-
559
  def calculate_max_chars(image_width, font_size, font_path="ShortBaby.ttf", margin=20):
560
  # Create a temporary image to calculate character width
561
  img_temp = Image.new('RGB', (100, 100))
@@ -1606,65 +1586,118 @@ Repsplitdropdown = gr.Dropdown(choices=["sentences", "words"], value="sentences"
1606
 
1607
  def hidingbuttontesttranslate(text):
1608
  html = """
1609
- <html>
1610
- <head>
1611
- <style>
1612
- #container {
1613
- display: flex;
1614
- flex-direction: column;
1615
- }
1616
- button {
1617
- width: 200px;
1618
- padding: 12px 20px;
1619
- margin: 8px 0;
1620
- }
1621
- .hidden {
1622
- display: none;
1623
- }
1624
- </style>
1625
- </head>
1626
- <body>
1627
- <div id="container">
1628
- """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1629
 
1630
  doc = nlp(text)
1631
  sentences = [sent.text for sent in doc.sents]
1632
-
1633
  for i, sentence in enumerate(sentences):
1634
  html += f"""
1635
- <button id="sentence{i}" class="sentence">
1636
- {sentence}
1637
- </button>
1638
- """
1639
 
1640
  html += """
1641
- </div>
1642
-
1643
- <script>
1644
- let activeBtn;
1645
-
1646
- const buttons = document.querySelectorAll('.sentence');
1647
-
1648
- buttons.forEach(button => {
1649
-
1650
- button.addEventListener('click', () => {
1651
-
1652
- buttons.forEach(b => b.classList.add('hidden'))
1653
-
1654
- if (activeBtn) {
1655
- activeBtn.classList.remove('hidden');
1656
- }
1657
-
1658
- activeBtn = button;
1659
- activeBtn.classList.remove('hidden');
1660
-
1661
- });
1662
- });
1663
- </script>
1664
 
1665
- </body>
1666
- </html>
1667
- """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1668
 
1669
  return gr.Code(html, language="html"), gr.HTML(html)
1670
 
@@ -2064,36 +2097,114 @@ VideoTestSubtitleInput = gr.File(label="select a subtitle file", file_types=[".t
2064
  VideoSplitTestInput = gr.File(label="select a mp4 video file", file_types=[".mp4"])
2065
  SplitVideoOutput = gr.FileExplorer(root_dir='./splitvideo')
2066
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2067
  with gr.Blocks() as lliface: #theme=gr.themes.Glass(primary_hue='green', secondary_hue='red', neutral_hue='blue', )
2068
  gr.HTML('<div style="display: flex; justify-content: center; align-items: center; height: 100%;"> Reading comprehension speed through picture based compression (collage), Live Image Subtitles and Listening Comprehension Test - <a href="https://chat.openai.com/g/g-bYMSVlb8y-lingua-link"> -- Lingua Link (Simple GPT for assistinng image creation) -- </a> | </div><div style="display: flex; justify-content: center; align-items: center; height: 100%;"> ---- Under Construction: Very Slowly figuring out what AI intergrated interface means (Chat vs Forms vs Function calling vs Sensor + Trigger vs Agent) | How to end copy paste once and for all? ---- </div> <div style="display: flex; justify-content: center; align-items: center; height: 100%;"> All the apis from the below space need to be treated like RAG as notes for the LLM to read before providing its answer </div>')
2069
  with gr.Accordion("Some Useful Spaces", open=False):
2070
  with gr.Accordion("Translation or STT HF Spaces/Sites (Click Here to Open) - Use to get rough translations", open=False):
2071
  with gr.Row():
2072
- linktotranslate = gr.Dropdown(choices=["https://facebook-seamless-m4t-v2-large.hf.space", "https://hf-audio-whisper-large-v3.hf.space", "https://pyf98-owsm-v3-demo.hf.space", "https://kadirnar-multilingual-translation.hf.space", "https://geonmo-nllb-translation-demo.hf.space"], label="Choose/Cancel type any .hf.space link here (can also type a link)'", allow_custom_value=True)
2073
  translatespacebtn = gr.Button("Use the chosen URL to load interface with a translate model")
2074
  translatespace = gr.HTML("Translate Space Chosen will load here")
2075
  translatespacebtn.click(display_website, inputs=linktotranslate, outputs=translatespace)
2076
  with gr.Accordion("Audio Gen HF Spaces/Sites (Click Here to Open)", open=False):
2077
  with gr.Row():
2078
- linktoaudiogen = gr.Dropdown(choices=["https://coqui-xtts.hf.space", "https://suno-bark.hf.space", "https://mrfakename-metavoice-1b-v0-1.hf.space"], label="Choose/Cancel type any .hf.space link here (can also type a link)'", allow_custom_value=True)
2079
  audiogenspacebtn = gr.Button("Use the chosen URL to load interface with a chat model")
2080
  audiogenspace = gr.HTML("Chat Space Chosen will load here")
2081
  audiogenspacebtn.click(display_website, inputs=linktoaudiogen, outputs=audiogenspace)
2082
  with gr.Accordion("Image Gen or Animation HF Spaces/Sites (Click Here to Open) - Use with the image placeholder in Workflows tab", open=False):
2083
  with gr.Row():
2084
- linktoimagegen = gr.Dropdown(choices=["https://modelscope-transferanything.hf.space", "https://visionmaze-magic-me.hf.space", "https://wangfuyun-animatelcm.hf.space", "https://artgan-diffusion-api.hf.space", "https://multimodalart-stable-cascade.hf.space", "https://radames-real-time-text-to-image-sdxl-lightning.hf.space", "https://ap123-sdxl-lightning.hf.space", "https://google-sdxl.hf.space", "https://guoyww-animatediff.hf.space", "https://segmind-segmind-stable-diffusion.hf.space", "https://simianluo-latent-consistency-model.hf.space", "https://artificialguybr-studio-ghibli-lora-sdxl.hf.space", "https://artificialguybr-pixel-art-generator.hf.space", "https://fffiloni-sdxl-control-loras.hf.space"], label="Choose/Cancel type any .hf.space link here (can also type a link)'", allow_custom_value=True)
2085
  imagegenspacebtn = gr.Button("Use the chosen URL to load interface with a chat model")
2086
  imagegenspace = gr.HTML("Chat Space Chosen will load here")
2087
  imagegenspacebtn.click(display_website, inputs=linktoimagegen, outputs=imagegenspace)
2088
- with gr.Accordion("Vision HF Spaces/Sites (Click Here to Open)", open=False):
2089
  with gr.Row():
2090
- linktovisionund = gr.Dropdown(choices=["https://badayvedat-llava.hf.space", "https://languagebind-moe-llava.hf.space", "https://vision-cair-minigpt4.hf.space", "https://fffiloni-live-vision.hf.space", "https://ysharma-gemini-pro-vision-chat.hf.space", "https://kvikontent-chatgpt-vision.hf.space"], label="Choose/Cancel type any .hf.space link here (can also type a link)'", allow_custom_value=True)
2091
  visionundspacebtn = gr.Button("Use the chosen URL to load interface with a chat model")
2092
  visionundspace = gr.HTML("Chat Space Chosen will load here")
2093
  visionundspacebtn.click(display_website, inputs=linktovisionund, outputs=visionundspace)
2094
  with gr.Accordion("LLM HF Spaces/Sites (Click Here to Open) - Use 'Acronym Map Creation Space' Tab with this - Ask for Translation of image tags made below, sentence to emojis, Wordlists, Test Conversations, Get Grammar Explanations etc., Can use GPT-4 or new SOTA to review the conversation", open=False):
2095
  with gr.Row():
2096
- linktochat = gr.Dropdown(choices=["https://sdk.vercel.ai/docs", "https://labs.perplexity.ai/", "https://chat.lmsys.org", "https://stabilityai-stablelm-2-1-6b-zephyr.hf.space", "https://qwen-qwen1-5-72b-chat.hf.space", "https://deepseek-ai-deepseek-coder-7b-instruct.hf.space", "https://01-ai-yi-34b-chat.hf.space", "https://ysharma-zephyr-playground.hf.space", "https://huggingfaceh4-zephyr-chat.hf.space", "https://osanseviero-mistral-super-fast.hf.space", "https://artificialguybr-qwen-14b-chat-demo.hf.space", "https://huggingface-projects-llama-2-7b-chat.hf.space", "https://ysharma-explore-llamav2-with-tgi.hf.space", "https://mosaicml-mpt-30b-chat.hf.space", "https://huggingfaceh4-falcon-chat.hf.space", "https://uwnlp-guanaco-playground-tgi.hf.space", "https://stabilityai-stablelm-tuned-alpha-chat.hf.space", "https://mosaicml-mpt-7b-storywriter.hf.space", "https://huggingfaceh4-starchat-playground.hf.space", "https://bigcode-bigcode-playground.hf.space", "https://mosaicml-mpt-7b-chat.hf.space", "https://huggingchat-chat-ui.hf.space", "https://togethercomputer-openchatkit.hf.space"], label="Choose/Cancel type any .hf.space link here (can also type a link)'", allow_custom_value=True)
2097
  chatspacebtn = gr.Button("Use the chosen URL to load interface with a chat model. For sdk.vercel click the chat button on the top left. For lymsys / chat arena copy the link and use a new tab")
2098
  with gr.Accordion("Some prompt ideas", open=False):
2099
  with gr.Accordion("Prompts in text (Manual copy paste)", open=False):
@@ -2110,7 +2221,7 @@ with gr.Blocks() as lliface: #theme=gr.themes.Glass(primary_hue='green', seconda
2110
  #-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
2111
  with gr.Row():
2112
  with gr.Column(scale=1):
2113
- gr.HTML(""" <div style="height: 350px; width: 100%; border: 1px solid black; overflow: auto;"> Some useful links <br> <a href='https://github.com/eugeneyan/open-llms'> -- Opensource List -- </a> | <a href='https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard'> -- Open LLM Leaderboard -- </a> | <a href='https://openxlab.org.cn/apps'> -- Openxlabs - Huggingface Alternative -- </a> | <a href='https://huggingface.co/spaces/sanchit-gandhi/whisper-jax'> -- Whisper JAX -- </a> | <a href="https://translate.google.com/?hl=en&tab=TT"> -- Google Translate -- </a> | <a href='https://huggingface.co/spaces/damo-vilab/modelscope-text-to-video-synthesis'> -- Modelscope Text to Video -- </a> | <a href='https://huggingface.co/spaces/stabilityai/stable-diffusion'> -- stable-diffusion 2 -- </a> | <a href='https://huggingface.co/spaces/stabilityai/stable-diffusion-1'> -- stable-diffusion 1 -- </a> | <a href='https://huggingface.co/spaces/kakaobrain/karlo'> -- karlo 1 -- </a> | <a href='https://huggingface.co/spaces/suno/bark'> -- Bark (TTS) -- </a> | <a href='https://chat.lmsys.org/'> -- Offline Text Model Demos -- </a> | <a href='https://huggingface.co/spaces/curt-park/segment-anything-with-clip'> -- SAM with Clip -- </a> | <a href='https://beta.elevenlabs.io/'> -- Eleven Labs -- </a> | <a href='https://www.d-id.com/'> -- Animate an Image -- </a> | <a href='https://voice.ai/'> -- Clone a voice -- </a> | <a href='https://openai.com/pricing'> -- OpenAI pricing -- </a> | <a href='https://huggingface.co/spaces/sohojoe/soho-clip-embeddings-explorer'> -- Image Training Data Search -- </a> | <a href='https://huggingface.co/spaces/huggingchat/chat-ui'> -- Huggingface Chat -- </a> | <a href='https://huggingface.co/spaces/bguisard/stable-diffusion-nano'> -- 128x128 Stable Diffusion (Fast) -- </a> | <a href='https://huggingface.co/spaces/colonelwatch/abstracts-index'> -- Search 95 million research abstracts -- </a> | <a href='https://huggingface.co/datasets/roneneldan/TinyStories'> -- Tiny Stories Dataset -- </a> | <a href='https://huggingface.co/spaces/lykeven/visualglm-6b'> -- Visualglm6b - Discuss images -- </a> | <a href='https://huggingface.co/spaces/xinyu1205/Recognize_Anything-Tag2Text'> -- RAM and Tag2Text -- </a> | <a href='https://huggingface.co/camenduru/potat1'> -- Potat1 Text2vid -- </a> | <a href='https://twitter.com/willdepue/status/1661781355452325889'> -- Alexandria Prohect (Will Deque) - Free Embeddings -- </a> | <a href='https://artsandculture.google.com/'> -- Google Arts and Culture Portal -- </a> | <a href='https://huggingface.co/spaces/Matthijs/whisper_word_timestamps'> -- Word Level Timestamps -- </a> | <a href='https://huggingface.co/spaces/zaanind/NLLB-translation'> -- NLLB 600M Demo -- </a> = <a href='https://github.com/facebookresearch/fairseq/tree/nllb'> -- NLLB Github -- </a> | <a href='https://huggingface.co/spaces/hysts/zeroscope-v2'> -- Zeroscope v2 Text to video -- </a> | <a href='https://huggingface.co/spaces/SpacesExamples/ComfyUI'> -- ComfyUI Text to Image -- </a> | <a href='https://huggingface.co/spaces/DeepFloyd/IF'> -- Deepfloyd IF - Text in image -- </a> | <a href='https://huggingface.co/spaces/ysharma/ChatGPT-Plugins-in-Gradio'> -- ChatGPT Custom Plugins Test Space -- </a> | <a href='https://www.reddit.com/r/LocalLLaMA/'> -- r/LocalLlama -- </a> | <a href='https://www.reddit.com/r/singularity/'> -- r/Singularity -- </a> | <a href='https://huggingface.co/spaces/hysts/SD-XL'> -- SD-XL Test Space -- </a> | <a href='https://huggingface.co/spaces/facebook/seamless_m4t'> -- Seamless M4T - Translation one stop shop -- </a> | <a href='https://huggingface.co/spaces/codellama/codellama-playground'> -- Code Llama playground -- </a> | <a href='https://huggingface.co/spaces/Voicemod/Text-to-Sing'> -- Text to sing -- </a> | <a href='https://huggingface.co/spaces/camenduru-com/webui'> -- Stable Diffusion Webui (Camenduru Space) -- </a> | <a href='https://huggingface.co/spaces/ysharma/WizardCoder34b'> -- Wizard Coder 34B -- </a> | <a href='https://huggingface.co/spaces/chansung/co-write-with-llama2'> -- Cowrite with llama2 -- </a> | <a href='https://huggingface.co/spaces/fffiloni/Image-to-Story'> -- Image to Story -- </a> | <a href='https://huggingface.co/spaces/fffiloni/CLIP-Interrogator-2'> -- Clip interrogator 2 -- </a> | <a href='https://github.com/THUDM/AgentBench'> -- Agent Benchmarks -- </a> | <a href='https://www.convex.dev/ai-town'> -- AI Town Live Demo -- </a> = <a href='https://github.com/a16z-infra/ai-town'> -- AI Town Repository (Deployment]) -- </a> | <a href='https://github.com/joonspk-research/generative_agents/tree/main'> -- Generative Agents: Interactive Simulacra of Human Behavior (Research paper Repository) -- </a> | <a href='https://huggingface.co/spaces/HuggingFaceM4/idefics_playground'> -- IDEFICS - open Multimodal model -- </a> | <a href='https://github.com/facebookresearch/belebele'> -- Belebele (Meta Dataset) -- </a> | <a href='https://huggingface.co/spaces/jbilcke-hf/ai-comic-factory'> -- AI Comic Factory -- </a> | <a href='https://github.com/camenduru'> -- CAMENDURU REPOS -- </a> | <a href='https://huggingface.co/datasets/b-mc2/sql-create-context'> -- SQL Dataset - A list of simple questions -- </a> | <a href='https://github.com/KillianLucas/open-interpreter'> -- Open Interpreter (alt to ChatGPT Pro) -- </a> | <a href='https://easywithai.com/fun-ai-tools/'> -- List - Easy with AI -- </a> | <a href='https://huggingface.co/spaces/Xenova/whisper-web'> -- Whisper Web (UI) -- </a> | <a href='https://blog.roblox.com/2023/09/revolutionizing-creation-roblox/'> -- Roblox Assistant -- </a> | <a href='https://huggingface.co/spaces/AP123/IllusionDiffusion'> -- Illusion Diffusion (Hide words or shapes in the image) -- </a> | <a href='https://huggingface.co/spaces/Shopify/background-replacement'> -- Background replacement - Shopify -- </a> | <a href='https://huggingface.co/spaces/multimodalart/LoraTheExplorer'> -- Lora The Explorer (SDXL) -- </a> | <a href='https://huggingface.co/spaces/XCLiu/InstaFlow'> -- InstaFlow (Under 1 second Inference) -- </a> | <a href='https://github.com/tairov/llama2.mojo'> -- TinyStories on mojo (230+ tk/s) -- </a> | <a href='https://emojis.alexandru.so/p/OHVEmfMwQl'> -- Any Emoji you want - emojijs -- </a> | <a href='https://huggingface.co/spaces/google/sdxl'> -- SDXL on TPUv5 -- </a> | <a href='https://huggingface.co/spaces/SimianLuo/Latent_Consistency_Model'> -- LCM - SD1.5 at 7secs per 4 images (after coldstart) -- </a> | <a href='https://huggingface.co/spaces/fffiloni/sdxl-control-loras'> -- SDXL Control Lora -- </a> | <a href='https://huggingface.co/spaces/aadnk/faster-whisper-webui'> -- Whisper WebUI -- </a> | <a href='https://huggingface.co/spaces/guoyww/AnimateDiff'> -- AnimateDiff: Create an image make a video -- </a> | <a href='https://huggingface.co/spaces/facebook/seamless-m4t-v2-large'> -- Seamless m4t v2 -- </a> | <a href='https://huggingface.co/spaces/Otter-AI/OtterHD-Demo'> -- OtterHD: Multimodal model -- </a> | <a href='https://ai.meta.com/blog/ego-exo4d-video-learning-perception/'> -- Ego-exo4d Multimodal dataset -- </a> | <a href='https://imagine.meta.com/'> -- Meta Imagine images (Free) -- </a> | <a href='https://www.mage.space/'> -- Mage Space images (Free) -- </a> | <a href='https://www.bing.com/images/create?FORM=GENILP'> -- Bing Image Creator (Free) -- </a> | <a href='https://jalammar.github.io/'> -- Jay Alammar Blog - Illustrated Transformer, Stable Diffusion and More -- </a> | <a href='https://huggingface.co/spaces/myshell-ai/OpenVoice'> -- OpenVoice - Open Source Voice Clone -- </a> | <a href='https://huggingface.co/spaces/fffiloni/live-vision'> -- Live-Vision HF Space - Live commentary on a video feed demo -- </a> | <a href='https://xenova.github.io/transformers.js/'> -- Transformers JS demo - Xenova (HF) -- </a> | <a href='https://huggingface.co/chat/assistants'> -- Huggingface Assistants -- </a> | <a href='https://huggingface.co/spaces/AP123/SDXL-Lightning'> -- 4-step SDXL Inference through LORA -- </a> | <a href='https://huggingface.co/datasets/HuggingFaceTB/cosmopedia'> -- Cosmopedia - 92 GB synthetic dataset made using Mixtral (25 billion tokens) -- </a> | </div>""")
2114
  with gr.Tabs() as nav1:
2115
  with gr.Tab("Rep - HTML"):
2116
  gr.HTML("UNWFWO = Unknown Native Word Foreign Word Order i.e. during active listening practice you only need the words you dont know")
@@ -2151,10 +2262,10 @@ with gr.Blocks() as lliface: #theme=gr.themes.Glass(primary_hue='green', seconda
2151
  gr.HTML("<b>FINAL VERSION = Image placeholder + Merged Images + Side by Side Audio + UNWFWO Reader script + Spotify/Youtube integration in one interface</b> <br>True mastery is from the relations between each item aka how every word relates to each other - Repitition in the form combinatorics - llm turns these into full sentences / ideas ")
2152
  gr.HTML("Focus = Thinking = Audio = Repitition = This space is just ideas for optimising the audio content. - Audio of side by side version -- listen till you can say the foreign before the audio plays it (Knowledge version is Glossary as vocab you must mastering before reading)")
2153
  with gr.Accordion("Some Current Incomplete Tests", open=False):
2154
- with gr.Group():
2155
- gr.HTML("Stable LM 2 zephyr 1.6 Placeholder - llama-cpp-python issues locally")
2156
- gr.Interface(fn=lambda name: f"Placeholder to talk to Stable LM. Prompt = {name}", inputs="text", outputs="text")
2157
- gr.Interface(fn=whisperlocaltts, inputs="file", outputs="text", description="Incomplete - Whisper base Test - Can record and then copy the text for use")
2158
  gr.HTML('Memorisation by string comparison idea <br><br>Result of prompt chain starting with: Lets say I have the strings "red" and "ppalgan" how can I guess the second from the first from just spelling (eg. similar words and distance in the alphabet, ...), how can I use python to do this i.e. output of no matching letters, closest letter to r, then e, then d, a dictionary of letters that look similar eg. d and p, l and I a and d etc.')
2159
  gr.Interface(fn=letterbased_guess_word, inputs=["text", "text"], outputs="text", description="letter based guess suggestions (one word to one word is the designed use case)")
2160
  gr.HTML("Side by side reading creator (Google Translate) TODO - Roman output of Non roman characters")
@@ -2205,6 +2316,135 @@ with gr.Blocks() as lliface: #theme=gr.themes.Glass(primary_hue='green', seconda
2205
  nllbtranscpubtnword4word.click(fn=nllbtransctranslationoptionalw4w, inputs=[nllbtranscpulangsrc, nllbtranscpulangdest, nllbtranscpuinput], outputs=[nllbtranscpuOutputword4word, nllbtranscpudetailsword4wordOutput])
2206
  nllbtranscpubtn.click(fn=nllbtransctranslation, inputs=[nllbtranscpulangsrc, nllbtranscpulangdest, nllbtranscpuinput], outputs=[nllbtranscpuOutput, nllbtranscpudetailsOutput])
2207
  gr.Interface(fn=LoadNLTKUDHRText, inputs=NLTKudhr, outputs=["text", "textarea"], description="UDHR as some test texts")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2208
  with gr.Tab("Youtube Subs Listening Comprehension"):
2209
  gr.HTML("<a href='https://www.lingq.com/en/'>State Management Solution for Word --> Find LingQ Here --> https://www.lingq.com/en/</a>")
2210
  with gr.Tab("New - Learning with Youtube"):
@@ -2265,7 +2505,7 @@ with gr.Blocks() as lliface: #theme=gr.themes.Glass(primary_hue='green', seconda
2265
  gr.Text("Text to Closed Class + Adjectives + Punctuation or Noun Verb + Punctuation ")
2266
  with gr.Tab("Audio - Only English thoughts as practice"):
2267
  gr.HTML("For Audio Most productive is real time recall of native (where your full reasoning ability will always be) <br><hr> Find Replace new lines of the foreign text with full stops or | to get per word translation")
2268
- # gr.Interface(fn=TTSforListeningPractice, inputs=["text", TTSLangOptions2], outputs="audio", description="Paste only english words in foreign order and then keep removing the words from this to practice as effectively")
2269
  with gr.Tab("Speed through Imagery"):
2270
  gr.HTML("<a href='https://chat.openai.com/g/g-bYMSVlb8y-lingua-link'> -- Lingua Link (Simple GPT for assistinng image creation) -- </a> <br>Use with placeholder generator tab below <br> Best for this is 2 nouns as one phrase i.e. nouns as adjectives and then you can a verb (1000 of those will take you far)")
2271
  with gr.Accordion("More Details - conversation example", open=False):
@@ -2304,8 +2544,9 @@ with gr.Blocks() as lliface: #theme=gr.themes.Glass(primary_hue='green', seconda
2304
  gr.Interface(fn=onlyplurals, inputs=["text"], outputs=["text"], description="Only plurals = optimal concepts to learn first as LT work = repitition")
2305
  gr.Interface(fn=create_acronym_map, inputs="textbox", outputs="textbox", description="Acronyms")
2306
  gr.Interface(fn=keep_nouns, inputs="textbox", outputs="textbox", description="Nouns only")
2307
- with gr.Tab("Placeholder Genration"):
2308
  gr.HTML("Placeholder for every image of each sentence - Good for ChatGPT + Dall-E (First 16 Characters is part of the filename if you get error)")
 
2309
  with gr.Row():
2310
  with gr.Column(scale=4):
2311
  imageplaceholderinput = gr.TextArea(placeholder="Enter Text and Get a line by line (stand in for sentences for now) placeholder for image associated with the text")
@@ -2318,7 +2559,7 @@ with gr.Blocks() as lliface: #theme=gr.themes.Glass(primary_hue='green', seconda
2318
  with gr.Column(scale=2):
2319
  imageplaceholdertextoutput = gr.Code("The code for the HTML created will come here")
2320
  imageplaceholderbtn.click(fn=imagebasedreading, inputs=[imageplaceholderinput], outputs=[imageplaceholderdownload, imageplaceholderoutput, imageplaceholdertextoutput])
2321
- with gr.Tab("Word level Placeholder Genration"):
2322
  gr.HTML("Placeholder for every image of each word - Good for ChatGPT + Dall-E (First 16 Characters is part of the filename if you get error)")
2323
  with gr.Row():
2324
  with gr.Column(scale=4):
@@ -2357,34 +2598,6 @@ with gr.Blocks() as lliface: #theme=gr.themes.Glass(primary_hue='green', seconda
2357
  <br><br> # https://docs.aws.amazon.com/polly/latest/dg/ref-phoneme-tables-shell.html
2358
  <br><br> # https://docs.aws.amazon.com/polly/latest/dg/ph-table-english-za.html
2359
  <br><br> # https://docs.aws.amazon.com/polly/latest/dg/ph-table-korean.html""")
2360
- with gr.Tab("Beginner - Listen + Read"):
2361
- gr.Label("Closed Eye Recital per new word | 1 new word a minute while recycling the words from the previous minutes")
2362
- with gr.Row():
2363
- with gr.Column(scale=1):
2364
- gr.HTML("Listening - Songs - Chorus <br> Anticipation of the item to remember is how you learn lyrics that is why songs are easy as if you heard it 10 times already your capacity to anticipate the words is great <br><br> This is where TTS helps as you are ignoring all words except the words just before the actual <br> <b>Tiny Stories dataset is like a graded reader</b> <br>")
2365
- # gr.Interface(fn=TTSforListeningPractice, inputs=["text", TTSLangOptions, "checkbox"], outputs="audio", description="Paste chorus lyrics from below here and use TTS or make notes to save here (Or paste anything)")
2366
- with gr.Accordion("TTS Spaces", open=False):
2367
- TTSspaceoptions = gr.Dropdown(choices=["https://suno-bark.hf.space", "https://coqui-xtts.hf.space"], label="existing whisper spaces")
2368
- TTSspaceoptionsbtn = gr.Button("Load a Image as prompt Space")
2369
- TTSspaceoptionsOut = gr.HTML()
2370
- TTSspaceoptionsbtn.click(fn=display_website, inputs=TTSspaceoptions, outputs=TTSspaceoptionsOut)
2371
- gr.HTML("<p>Fastest way to learn words = is to have your own sound reference --> probably why babies learn fast as they make random noise</p> <p>If you know the flow of the song you can remember the spelling easier</p><p>Essentially if the sounds are repeated or long notes they are easy to remember</p>")
2372
- gr.Interface(fn=AutoChorusInvestigator, inputs="text", outputs="text", description="Paste Full Lyrics to try find only chorus lines")
2373
- gr.Interface(fn=AutoChorusPerWordScheduler, inputs="text", outputs="text", description="Create order of repitition for tts practice")
2374
- with gr.Column(scale=1):
2375
- gr.HTML("""Reading - Caption images (SD/Dalle-E) <br> <a href='https://unsplash.com/'> -- Unsplash - free images -- </a> | <a href="https://huggingface.co/spaces/pharma/CLIP-Interrogator"> --Huggingface CLIP-Interrogator Space-- </a> | <a href='https://huggingface.co/spaces/fffiloni/CLIP-Interrogator-2'> -- Clip interrogator 2 -- </a> | <a href='https://huggingface.co/spaces/xinyu1205/Recognize_Anything-Tag2Text'> -- Tag2Text is faster than clip -- </a> | <br> <a href='https://huggingface.co/spaces/bkhmsi/Word-To-Image'> -- Transform word to an image -- </a> | <a href='https://huggingface.co/spaces/microsoft/Promptist'> -- Promptist (Microsoft) -- </a> | <a href='https://huggingface.co/spaces/xinyu1205/Recognize_Anything-Tag2Text'> -- RAM and Tag2Text -- </a> | <a href='https://huggingface.co/spaces/curt-park/segment-anything-with-clip'> -- SAM with Clip -- </a> """)
2376
- with gr.Accordion("RAM/Tag2Text Space - Create Tags here and Copy paste", open=False):
2377
- RAMSpaceLink = gr.Textbox("https://xinyu1205-recognize-anything.hf.space")
2378
- RAMSpacetest = gr.HTML("")
2379
- RAMSpacetestbtn = gr.Button('Load Space')
2380
- RAMSpacetestbtn.click(display_website, RAMSpaceLink, RAMSpacetest)
2381
- with gr.Accordion("SAM Space Test", open=False):
2382
- SAMSpaceLink = gr.Textbox("https://curt-park-segment-anything-with-clip.hf.space")
2383
- SAMSpacetest = gr.HTML("")
2384
- SAMSpacetestbtn = gr.Button('Load Space')
2385
- SAMSpacetestbtn.click(display_website, SAMSpaceLink, SAMSpacetest)
2386
- gr.HTML("Use Shift Enter To put text on new lines if the text doesnt fit <br> if theres an error you have to remove the foreign letters and place roman ones")
2387
- gr.Interface(fn=add_text_to_image , inputs=["image", "text"], outputs="image", description="Create Annotated images (Can create using stable diffusion and use the prompt) - Describe from one side to the other to make guessing easy")
2388
  #with gr.Tab("Transcribe - RASMUS Whisper"):
2389
  #gr.Interface.load("spaces/RASMUS/Whisper-youtube-crosslingual-subtitles", title="Subtitles")
2390
  with gr.Tab("Beginner - Reading Assitant + Unknown Tracker"):
@@ -2460,7 +2673,7 @@ Each type of knowing involves different cognitive processes and levels of unders
2460
  """)
2461
 
2462
  with gr.Tab("Transition is the end goal (SOV, SVO, VSO)"):
2463
- # gr.Interface(fn=FrontRevSentChunk, inputs=[ChunkModeDrop, "checkbox", "text", langdest], outputs="text", description="Chunks creator")
2464
  with gr.Row():
2465
  with gr.Column():
2466
  gr.Interface(fn=AutoSyllablePractice, inputs="text", outputs="text", description="One Word At A Time | Audio Spelling Practice Using vowels as the seperator")
@@ -2586,4 +2799,7 @@ Each type of knowing involves different cognitive processes and levels of unders
2586
  gr.Textbox(label='Use this text to hold translations of the SQL rows in the above linked dataset (A kind of What I say vs what I want)')
2587
 
2588
 
2589
- lliface.queue().launch(share=True) #docker #(inbrowser="true") #colab
 
 
 
 
27
  import datetime
28
  import cv2
29
  import math
30
+ from langchain.document_loaders import YoutubeLoader #need youtube_transcpt_api and pytube installed
 
31
  from youtube_transcript_api import YouTubeTranscriptApi
32
  from spacy_syllables import SpacySyllables #https://spacy.io/universe/project/spacy_syllables/
33
  import torch
 
163
 
164
  ChunkModeDrop = gr.Dropdown(choices=["Chunks", "Reverse", "Three Word Chunks", "Spelling Chunks"], label="Choose Chunk Type", value="Chunks")
165
 
166
+ def FrontRevSentChunk (Chunkmode, Translate, Text, langdest):
167
+ FinalOutput = ""
168
+ TransFinalOutput = ""
169
+ if Chunkmode=="Chunks":
170
+ FinalOutput += Sentencechunker(Text)
171
+ if Chunkmode=="Reverse":
172
+ FinalOutput += ReverseSentenceChunker(Text)
173
+ if Chunkmode=="Three Word Chunks":
174
+ FinalOutput += three_words_chunk(Text)
175
+ if Chunkmode=="Spelling Chunks":
176
+ FinalOutput += BatchWordChunk(Text)
177
 
178
+ if Translate:
179
+ TransFinalOutput = FinalOutput
180
+ translated = translator.translate(TransFinalOutput, dest=langdest[:2])
181
+ FinalOutput += "\n" + translated.text
182
+ return FinalOutput
183
 
184
  # Define a function to filter out non-verb, noun, or adjective words
185
  def filter_words(words):
 
536
  return noTonalMarksStr
537
 
538
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
539
  def calculate_max_chars(image_width, font_size, font_path="ShortBaby.ttf", margin=20):
540
  # Create a temporary image to calculate character width
541
  img_temp = Image.new('RGB', (100, 100))
 
1586
 
1587
  def hidingbuttontesttranslate(text):
1588
  html = """
1589
+ <html>
1590
+ <head>
1591
+ <style>
1592
+ #container {
1593
+ display: flex;
1594
+ flex-direction: column;
1595
+ }
1596
+ button {
1597
+ width: 200px;
1598
+ padding: 12px 20px;
1599
+ margin: 8px 0;
1600
+ }
1601
+ .hidden {
1602
+ display: none;
1603
+ }
1604
+ .collapsible {
1605
+ cursor: pointer;
1606
+ padding: 10px;
1607
+ width: 100%;
1608
+ border: none;
1609
+ text-align: left;
1610
+ outline: none;
1611
+ font-size: 16px;
1612
+ background-color: #f1f1f1;
1613
+ }
1614
+ .active, .collapsible:hover {
1615
+ background-color: #ddd;
1616
+ }
1617
+ .content {
1618
+ padding: 0 18px;
1619
+ display: none;
1620
+ overflow: hidden;
1621
+ background-color: #f1f1f1;
1622
+ }
1623
+ </style>
1624
+ </head>
1625
+ <body>
1626
+ <div id="container">
1627
+ <button type="button" class="collapsible">Overview of current progress</button>
1628
+ <div class="content">
1629
+ <ul id="overview"></ul>
1630
+ </div>
1631
+ <hr>
1632
+ <div id="buttons">
1633
+ """
1634
 
1635
  doc = nlp(text)
1636
  sentences = [sent.text for sent in doc.sents]
 
1637
  for i, sentence in enumerate(sentences):
1638
  html += f"""
1639
+ <button onclick="toggleVisibility({i})" id="sentence{i}" class="sentence">
1640
+ {sentence}
1641
+ </button>
1642
+ """
1643
 
1644
  html += """
1645
+ </div>
1646
+ </div>
1647
+ <script>
1648
+ const shownItems = new Set();
1649
+ const hiddenItems = new Set();
1650
+
1651
+ function toggleVisibility(index) {
1652
+ const button = document.getElementById(`sentence${index}`);
1653
+ const overviewItem = document.getElementById(`item${index}`);
1654
+
1655
+ if (shownItems.has(index)) {
1656
+ shownItems.delete(index);
1657
+ hiddenItems.add(index);
1658
+ button.classList.add('hidden');
1659
+ overviewItem.style.textDecoration = 'line-through';
1660
+ } else {
1661
+ shownItems.add(index);
1662
+ hiddenItems.delete(index);
1663
+ button.classList.remove('hidden');
1664
+ overviewItem.style.textDecoration = 'none';
1665
+ }
1666
+ }
 
1667
 
1668
+ function initializeOverview() {
1669
+ const overview = document.getElementById('overview');
1670
+ const sentences = document.querySelectorAll('.sentence');
1671
+
1672
+ sentences.forEach((sentence, index) => {
1673
+ const listItem = document.createElement('li');
1674
+ listItem.id = `item${index}`;
1675
+ listItem.textContent = sentence.textContent;
1676
+ overview.appendChild(listItem);
1677
+ shownItems.add(index);
1678
+ });
1679
+ }
1680
+
1681
+ function toggleCollapsible() {
1682
+ this.classList.toggle("active");
1683
+ const content = this.nextElementSibling;
1684
+ if (content.style.display === "block") {
1685
+ content.style.display = "none";
1686
+ } else {
1687
+ content.style.display = "block";
1688
+ }
1689
+ }
1690
+
1691
+ initializeOverview();
1692
+
1693
+ const coll = document.getElementsByClassName("collapsible");
1694
+ for (let i = 0; i < coll.length; i++) {
1695
+ coll[i].addEventListener("click", toggleCollapsible);
1696
+ }
1697
+ </script>
1698
+ </body>
1699
+ </html>
1700
+ """
1701
 
1702
  return gr.Code(html, language="html"), gr.HTML(html)
1703
 
 
2097
  VideoSplitTestInput = gr.File(label="select a mp4 video file", file_types=[".mp4"])
2098
  SplitVideoOutput = gr.FileExplorer(root_dir='./splitvideo')
2099
 
2100
+ #---------------------------------------------------------------------------------------------------------
2101
+ def build_sentence_basic(subject, verb, obj, order):
2102
+ if order == "SVO":
2103
+ return f"{subject} {verb} {obj}."
2104
+ elif order == "SOV":
2105
+ return f"{subject} {obj} {verb}."
2106
+ else: # VSO
2107
+ return f"{verb} {subject} {obj}."
2108
+
2109
+ def build_sentence(subject, verb, obj, adjective, adverb, prep_phrase, conjunction, structure):
2110
+ structures = {
2111
+ "SVOAAPC": f"{subject} {verb} {obj} {adjective} {adverb} {prep_phrase} {conjunction}.",
2112
+ "SVOAPC": f"{subject} {verb} {obj} {adjective} {prep_phrase} {conjunction}.",
2113
+ "SVOAC": f"{subject} {verb} {obj} {adjective} {conjunction}.",
2114
+ "SVOAPAC": f"{subject} {verb} {obj} {adverb} {prep_phrase} {adjective} {conjunction}.",
2115
+ "SVCAOAP": f"{subject} {verb} {conjunction} {adverb} {obj} {adjective} {prep_phrase}.",
2116
+ "SVACOAP": f"{subject} {verb} {adverb} {conjunction} {obj} {adjective} {prep_phrase}.",
2117
+ "SVACOP": f"{subject} {verb} {adverb} {conjunction} {obj} {prep_phrase}.",
2118
+ "SVCAPO": f"{subject} {verb} {conjunction} {adjective} {prep_phrase} {obj}.",
2119
+ "SVAPO": f"{subject} {verb} {adverb} {prep_phrase} {obj}.",
2120
+ "SVCPO": f"{subject} {verb} {conjunction} {prep_phrase} {obj}.",
2121
+ "SVOAA": f"{subject} {verb} {obj} {adjective} {adverb}.",
2122
+ "SVAPA": f"{subject} {verb} {adverb} {prep_phrase} {adjective}.",
2123
+ "SVPAO": f"{subject} {verb} {prep_phrase} {adjective} {obj}.",
2124
+ "SVOAP": f"{subject} {verb} {obj} {adverb} {prep_phrase}.",
2125
+ "SVPOA": f"{subject} {verb} {prep_phrase} {obj} {adjective}.",
2126
+ "SVOPA": f"{subject} {verb} {obj} {prep_phrase} {adjective}.",
2127
+ "SADVV": f"{subject} {adverb} {verb}.",
2128
+ "SADJV": f"{subject} {adjective} {verb}.",
2129
+ "SPPV": f"{subject} {prep_phrase} {verb}.",
2130
+ "SCONJV": f"{subject} {conjunction} {verb}.",
2131
+ "SADVPPV": f"{subject} {adverb} {prep_phrase} {verb}.",
2132
+ "SADJCONJV": f"{subject} {adjective} {conjunction} {verb}.",
2133
+ "ASVOP": f"{adverb}, {subject} {verb} {obj} {prep_phrase}.",
2134
+ "PSVOAC": f"{prep_phrase}, {subject} {verb} {obj} {adjective} {conjunction}.",
2135
+ "CSVO": f"{conjunction} {subject} {verb} {obj}.",
2136
+ "SVOPC": f"{subject} {verb} {obj} {prep_phrase} {conjunction}.",
2137
+ "SVCOP": f"{subject} {verb} {conjunction} {obj} {prep_phrase}.",
2138
+ "SVOCA": f"{subject} {verb} {obj} {conjunction} {adverb}."
2139
+ }
2140
+ return structures.get(structure, "Invalid structure.").capitalize()
2141
+
2142
+ sentbuildchoices = [
2143
+ "SVOAAPC", "SVOAPC", "SVOAC", "SVOAPAC", "SVCAOAP", "SVACOAP", "SVACOP", "SVCAPO",
2144
+ "SVAPO", "SVCPO", "SVOAA", "SVAPA", "SVPAO", "SVOAP", "SVPOA", "SVOPA",
2145
+ "SADVV", "SADJV", "SPPV", "SCONJV", "SADVPPV", "SADJCONJV",
2146
+ "ASVOP", "PSVOAC", "CSVO", "SVOPC", "SVCOP", "SVOCA"
2147
+ ]
2148
+
2149
+ sentbuildsubjects = ["The curious cat", "The playful dog", "The majestic eagle", "The wise owl", "The friendly dolphin", "The cat", "The dog", "The bird", "The elephant", "The lion", "The monkey", "The rabbit", "The turtle"]
2150
+ sentbuildverbs = ["chases", "discovers", "explores", "investigates", "observes", "chases", "eats", "watches", "plays with", "jumps over", "runs from", "hides from", "sleeps on"]
2151
+ sentbuildobjects = ["the colorful butterfly", "the mysterious cave", "the ancient ruins", "the hidden treasure", "the underwater world"]
2152
+ sentbuildadjectives = ["beautiful", "enchanting", "fascinating", "intriguing", "marvelous", "the mouse", "the bone", "the worm", "the ball", "the fence", "the hunter", "the predator", "the leaf"]
2153
+ sentbuildadverbs = ["cautiously", "eagerly", "enthusiastically", "patiently", "swiftly"]
2154
+ sentbuildprep_phrases = ["in the dense forest", "near the babbling brook", "on the sandy beach", "under the starry sky", "within the deep ocean"]
2155
+ sentbuildconjunctions = ["and", "but", "yet", "for", "so"]
2156
+
2157
+ def sentbuildgenerate_quiz_question():
2158
+ structure = random.choice(sentbuildchoices)
2159
+ subject = random.choice(sentbuildsubjects)
2160
+ verb = random.choice(sentbuildverbs)
2161
+ obj = random.choice(sentbuildobjects)
2162
+ adjective = random.choice(sentbuildadjectives)
2163
+ adverb = random.choice(sentbuildadverbs)
2164
+ prep_phrase = random.choice(sentbuildprep_phrases)
2165
+ conjunction = random.choice(sentbuildconjunctions)
2166
+
2167
+ sentence = build_sentence(subject, verb, obj, adjective, adverb, prep_phrase, conjunction, structure)
2168
+ return sentence, "Identify the sentence structure:", gr.Radio(sentbuildchoices, label="Options"), structure
2169
+
2170
+ def sentbuildcheck_answer(selected, correct):
2171
+ if selected == correct:
2172
+ return "Correct!", gr.update(interactive=False)
2173
+ else:
2174
+ return "Incorrect. Please try again.", gr.update(interactive=True)
2175
+
2176
+ #--------------------------------------------------------------------------------------------------------------------------------------------
2177
+
2178
  with gr.Blocks() as lliface: #theme=gr.themes.Glass(primary_hue='green', secondary_hue='red', neutral_hue='blue', )
2179
  gr.HTML('<div style="display: flex; justify-content: center; align-items: center; height: 100%;"> Reading comprehension speed through picture based compression (collage), Live Image Subtitles and Listening Comprehension Test - <a href="https://chat.openai.com/g/g-bYMSVlb8y-lingua-link"> -- Lingua Link (Simple GPT for assistinng image creation) -- </a> | </div><div style="display: flex; justify-content: center; align-items: center; height: 100%;"> ---- Under Construction: Very Slowly figuring out what AI intergrated interface means (Chat vs Forms vs Function calling vs Sensor + Trigger vs Agent) | How to end copy paste once and for all? ---- </div> <div style="display: flex; justify-content: center; align-items: center; height: 100%;"> All the apis from the below space need to be treated like RAG as notes for the LLM to read before providing its answer </div>')
2180
  with gr.Accordion("Some Useful Spaces", open=False):
2181
  with gr.Accordion("Translation or STT HF Spaces/Sites (Click Here to Open) - Use to get rough translations", open=False):
2182
  with gr.Row():
2183
+ linktotranslate = gr.Dropdown(choices=["https://hf-audio-whisper-large-v3.hf.space", "https://pyf98-owsm-v3-demo.hf.space", "https://kadirnar-multilingual-translation.hf.space", "https://geonmo-nllb-translation-demo.hf.space"], label="Choose/Cancel type any .hf.space link here (can also type a link)'", allow_custom_value=True)
2184
  translatespacebtn = gr.Button("Use the chosen URL to load interface with a translate model")
2185
  translatespace = gr.HTML("Translate Space Chosen will load here")
2186
  translatespacebtn.click(display_website, inputs=linktotranslate, outputs=translatespace)
2187
  with gr.Accordion("Audio Gen HF Spaces/Sites (Click Here to Open)", open=False):
2188
  with gr.Row():
2189
+ linktoaudiogen = gr.Dropdown(choices=["https://facebook-seamless-m4t-v2-large.hf.space", "https://mms-meta-mms.hf.space", "https://coqui-xtts.hf.space", "https://suno-bark.hf.space", "https://mrfakename-metavoice-1b-v0-1.hf.space"], label="Choose/Cancel type any .hf.space link here (can also type a link)'", allow_custom_value=True)
2190
  audiogenspacebtn = gr.Button("Use the chosen URL to load interface with a chat model")
2191
  audiogenspace = gr.HTML("Chat Space Chosen will load here")
2192
  audiogenspacebtn.click(display_website, inputs=linktoaudiogen, outputs=audiogenspace)
2193
  with gr.Accordion("Image Gen or Animation HF Spaces/Sites (Click Here to Open) - Use with the image placeholder in Workflows tab", open=False):
2194
  with gr.Row():
2195
+ linktoimagegen = gr.Dropdown(choices=["https://gparmar-img2img-turbo-sketch.hf.space", "https://kadirnar-open-sora.hf.space", "https://bytedance-animatediff-lightning.hf.space", "https://radames-real-time-text-to-image-sdxl-lightning.hf.space", "https://cagliostrolab-animagine-xl-3-1.hf.space", "https://wangfuyun-animatelcm-svd.hf.space" "https://modelscope-transferanything.hf.space", "https://visionmaze-magic-me.hf.space", "https://wangfuyun-animatelcm.hf.space", "https://artgan-diffusion-api.hf.space", "https://multimodalart-stable-cascade.hf.space", "https://ap123-sdxl-lightning.hf.space", "https://google-sdxl.hf.space", "https://guoyww-animatediff.hf.space", "https://segmind-segmind-stable-diffusion.hf.space", "https://simianluo-latent-consistency-model.hf.space", "https://artificialguybr-studio-ghibli-lora-sdxl.hf.space", "https://artificialguybr-pixel-art-generator.hf.space", "https://fffiloni-sdxl-control-loras.hf.space"], label="Choose/Cancel type any .hf.space link here (can also type a link)'", allow_custom_value=True)
2196
  imagegenspacebtn = gr.Button("Use the chosen URL to load interface with a chat model")
2197
  imagegenspace = gr.HTML("Chat Space Chosen will load here")
2198
  imagegenspacebtn.click(display_website, inputs=linktoimagegen, outputs=imagegenspace)
2199
+ with gr.Accordion("Image Understanding/Vision Conversation HF Spaces/Sites (Click Here to Open)", open=False):
2200
  with gr.Row():
2201
+ linktovisionund = gr.Dropdown(choices=["https://linfanluntan-grounded-sam.hf.space", "https://merve-llava-next.hf.space", "https://badayvedat-llava.hf.space", "https://otter-ai-otterhd-demo.hf.space", "https://adept-fuyu-8b-demo.hf.space", "https://xinyu1205-recognize-anything.hf.space", "https://languagebind-moe-llava.hf.space", "https://vision-cair-minigpt4.hf.space", "https://fffiloni-live-vision.hf.space", "https://ysharma-gemini-pro-vision-chat.hf.space", "https://kvikontent-chatgpt-vision.hf.space"], label="Choose/Cancel type any .hf.space link here (can also type a link)'", allow_custom_value=True)
2202
  visionundspacebtn = gr.Button("Use the chosen URL to load interface with a chat model")
2203
  visionundspace = gr.HTML("Chat Space Chosen will load here")
2204
  visionundspacebtn.click(display_website, inputs=linktovisionund, outputs=visionundspace)
2205
  with gr.Accordion("LLM HF Spaces/Sites (Click Here to Open) - Use 'Acronym Map Creation Space' Tab with this - Ask for Translation of image tags made below, sentence to emojis, Wordlists, Test Conversations, Get Grammar Explanations etc., Can use GPT-4 or new SOTA to review the conversation", open=False):
2206
  with gr.Row():
2207
+ linktochat = gr.Dropdown(choices=["https://sdk.vercel.ai/docs", "https://labs.perplexity.ai/", "https://chat.lmsys.org", "https://cyzgab-catch-me-if-you-can.hf.space", "https://databricks-dbrx-instruct.hf.space", "https://qwen-qwen1-5-moe-a2-7b-chat-demo.hf.space", "https://stabilityai-stablelm-2-1-6b-zephyr.hf.space", "https://qwen-qwen1-5-72b-chat.hf.space", "https://deepseek-ai-deepseek-coder-7b-instruct.hf.space", "https://01-ai-yi-34b-chat.hf.space", "https://ysharma-zephyr-playground.hf.space", "https://huggingfaceh4-zephyr-chat.hf.space", "https://osanseviero-mistral-super-fast.hf.space", "https://artificialguybr-qwen-14b-chat-demo.hf.space", "https://huggingface-projects-llama-2-7b-chat.hf.space", "https://ysharma-explore-llamav2-with-tgi.hf.space", "https://mosaicml-mpt-30b-chat.hf.space", "https://huggingfaceh4-falcon-chat.hf.space", "https://uwnlp-guanaco-playground-tgi.hf.space", "https://stabilityai-stablelm-tuned-alpha-chat.hf.space", "https://mosaicml-mpt-7b-storywriter.hf.space", "https://huggingfaceh4-starchat-playground.hf.space", "https://bigcode-bigcode-playground.hf.space", "https://mosaicml-mpt-7b-chat.hf.space", "https://huggingchat-chat-ui.hf.space", "https://togethercomputer-openchatkit.hf.space"], label="Choose/Cancel type any .hf.space link here (can also type a link)'", allow_custom_value=True)
2208
  chatspacebtn = gr.Button("Use the chosen URL to load interface with a chat model. For sdk.vercel click the chat button on the top left. For lymsys / chat arena copy the link and use a new tab")
2209
  with gr.Accordion("Some prompt ideas", open=False):
2210
  with gr.Accordion("Prompts in text (Manual copy paste)", open=False):
 
2221
  #-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
2222
  with gr.Row():
2223
  with gr.Column(scale=1):
2224
+ gr.HTML(""" <div style="height: 350px; width: 100%; border: 1px solid black; overflow: auto;"> Some useful links <br> <a href='https://github.com/eugeneyan/open-llms'> -- Opensource List -- </a> | <a href='https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard'> -- Open LLM Leaderboard -- </a> | <a href='https://openxlab.org.cn/apps'> -- Openxlabs - Huggingface Alternative -- </a> | <a href='https://huggingface.co/spaces/sanchit-gandhi/whisper-jax'> -- Whisper JAX -- </a> | <a href="https://translate.google.com/?hl=en&tab=TT"> -- Google Translate -- </a> | <a href='https://huggingface.co/spaces/damo-vilab/modelscope-text-to-video-synthesis'> -- Modelscope Text to Video -- </a> | <a href='https://huggingface.co/spaces/stabilityai/stable-diffusion'> -- stable-diffusion 2 -- </a> | <a href='https://huggingface.co/spaces/stabilityai/stable-diffusion-1'> -- stable-diffusion 1 -- </a> | <a href='https://huggingface.co/spaces/kakaobrain/karlo'> -- karlo 1 -- </a> | <a href='https://huggingface.co/spaces/suno/bark'> -- Bark (TTS) -- </a> | <a href='https://chat.lmsys.org/'> -- Offline Text Model Demos -- </a> | <a href='https://huggingface.co/spaces/curt-park/segment-anything-with-clip'> -- SAM with Clip -- </a> | <a href='https://beta.elevenlabs.io/'> -- Eleven Labs -- </a> | <a href='https://www.d-id.com/'> -- Animate an Image -- </a> | <a href='https://voice.ai/'> -- Clone a voice -- </a> | <a href='https://openai.com/pricing'> -- OpenAI pricing -- </a> | <a href='https://huggingface.co/spaces/sohojoe/soho-clip-embeddings-explorer'> -- Image Training Data Search -- </a> | <a href='https://huggingface.co/spaces/huggingchat/chat-ui'> -- Huggingface Chat -- </a> | <a href='https://huggingface.co/spaces/bguisard/stable-diffusion-nano'> -- 128x128 Stable Diffusion (Fast) -- </a> | <a href='https://huggingface.co/spaces/colonelwatch/abstracts-index'> -- Search 95 million research abstracts -- </a> | <a href='https://huggingface.co/datasets/roneneldan/TinyStories'> -- Tiny Stories Dataset -- </a> | <a href='https://huggingface.co/spaces/lykeven/visualglm-6b'> -- Visualglm6b - Discuss images -- </a> | <a href='https://huggingface.co/spaces/xinyu1205/Recognize_Anything-Tag2Text'> -- RAM and Tag2Text -- </a> | <a href='https://huggingface.co/camenduru/potat1'> -- Potat1 Text2vid -- </a> | <a href='https://twitter.com/willdepue/status/1661781355452325889'> -- Alexandria Prohect (Will Deque) - Free Embeddings -- </a> | <a href='https://artsandculture.google.com/'> -- Google Arts and Culture Portal -- </a> | <a href='https://huggingface.co/spaces/Matthijs/whisper_word_timestamps'> -- Word Level Timestamps -- </a> | <a href='https://huggingface.co/spaces/zaanind/NLLB-translation'> -- NLLB 600M Demo -- </a> = <a href='https://github.com/facebookresearch/fairseq/tree/nllb'> -- NLLB Github -- </a> | <a href='https://huggingface.co/spaces/hysts/zeroscope-v2'> -- Zeroscope v2 Text to video -- </a> | <a href='https://huggingface.co/spaces/SpacesExamples/ComfyUI'> -- ComfyUI Text to Image -- </a> | <a href='https://huggingface.co/spaces/DeepFloyd/IF'> -- Deepfloyd IF - Text in image -- </a> | <a href='https://huggingface.co/spaces/ysharma/ChatGPT-Plugins-in-Gradio'> -- ChatGPT Custom Plugins Test Space -- </a> | <a href='https://www.reddit.com/r/LocalLLaMA/'> -- r/LocalLlama -- </a> | <a href='https://www.reddit.com/r/singularity/'> -- r/Singularity -- </a> | <a href='https://huggingface.co/spaces/hysts/SD-XL'> -- SD-XL Test Space -- </a> | <a href='https://huggingface.co/spaces/facebook/seamless_m4t'> -- Seamless M4T - Translation one stop shop -- </a> | <a href='https://huggingface.co/spaces/codellama/codellama-playground'> -- Code Llama playground -- </a> | <a href='https://huggingface.co/spaces/Voicemod/Text-to-Sing'> -- Text to sing -- </a> | <a href='https://huggingface.co/spaces/camenduru-com/webui'> -- Stable Diffusion Webui (Camenduru Space) -- </a> | <a href='https://huggingface.co/spaces/ysharma/WizardCoder34b'> -- Wizard Coder 34B -- </a> | <a href='https://huggingface.co/spaces/chansung/co-write-with-llama2'> -- Cowrite with llama2 -- </a> | <a href='https://huggingface.co/spaces/fffiloni/Image-to-Story'> -- Image to Story -- </a> | <a href='https://huggingface.co/spaces/fffiloni/CLIP-Interrogator-2'> -- Clip interrogator 2 -- </a> | <a href='https://github.com/THUDM/AgentBench'> -- Agent Benchmarks -- </a> | <a href='https://www.convex.dev/ai-town'> -- AI Town Live Demo -- </a> = <a href='https://github.com/a16z-infra/ai-town'> -- AI Town Repository (Deployment]) -- </a> | <a href='https://github.com/joonspk-research/generative_agents/tree/main'> -- Generative Agents: Interactive Simulacra of Human Behavior (Research paper Repository) -- </a> | <a href='https://huggingface.co/spaces/HuggingFaceM4/idefics_playground'> -- IDEFICS - open Multimodal model -- </a> | <a href='https://github.com/facebookresearch/belebele'> -- Belebele (Meta Dataset) -- </a> | <a href='https://huggingface.co/spaces/jbilcke-hf/ai-comic-factory'> -- AI Comic Factory -- </a> | <a href='https://github.com/camenduru'> -- CAMENDURU REPOS -- </a> | <a href='https://huggingface.co/datasets/b-mc2/sql-create-context'> -- SQL Dataset - A list of simple questions -- </a> | <a href='https://github.com/KillianLucas/open-interpreter'> -- Open Interpreter (alt to ChatGPT Pro) -- </a> | <a href='https://easywithai.com/fun-ai-tools/'> -- List - Easy with AI -- </a> | <a href='https://huggingface.co/spaces/Xenova/whisper-web'> -- Whisper Web (UI) -- </a> | <a href='https://blog.roblox.com/2023/09/revolutionizing-creation-roblox/'> -- Roblox Assistant -- </a> | <a href='https://huggingface.co/spaces/AP123/IllusionDiffusion'> -- Illusion Diffusion (Hide words or shapes in the image) -- </a> | <a href='https://huggingface.co/spaces/Shopify/background-replacement'> -- Background replacement - Shopify -- </a> | <a href='https://huggingface.co/spaces/multimodalart/LoraTheExplorer'> -- Lora The Explorer (SDXL) -- </a> | <a href='https://huggingface.co/spaces/XCLiu/InstaFlow'> -- InstaFlow (Under 1 second Inference) -- </a> | <a href='https://github.com/tairov/llama2.mojo'> -- TinyStories on mojo (230+ tk/s) -- </a> | <a href='https://emojis.alexandru.so/p/OHVEmfMwQl'> -- Any Emoji you want - emojijs -- </a> | <a href='https://huggingface.co/spaces/google/sdxl'> -- SDXL on TPUv5 -- </a> | <a href='https://huggingface.co/spaces/SimianLuo/Latent_Consistency_Model'> -- LCM - SD1.5 at 7secs per 4 images (after coldstart) -- </a> | <a href='https://huggingface.co/spaces/fffiloni/sdxl-control-loras'> -- SDXL Control Lora -- </a> | <a href='https://huggingface.co/spaces/aadnk/faster-whisper-webui'> -- Whisper WebUI -- </a> | <a href='https://huggingface.co/spaces/guoyww/AnimateDiff'> -- AnimateDiff: Create an image make a video -- </a> | <a href='https://huggingface.co/spaces/facebook/seamless-m4t-v2-large'> -- Seamless m4t v2 -- </a> | <a href='https://huggingface.co/spaces/Otter-AI/OtterHD-Demo'> -- OtterHD: Multimodal model -- </a> | <a href='https://ai.meta.com/blog/ego-exo4d-video-learning-perception/'> -- Ego-exo4d Multimodal dataset -- </a> | <a href='https://imagine.meta.com/'> -- Meta Imagine images (Free) -- </a> | <a href='https://www.mage.space/'> -- Mage Space images (Free) -- </a> | <a href='https://www.bing.com/images/create?FORM=GENILP'> -- Bing Image Creator (Free) -- </a> | <a href='https://jalammar.github.io/'> -- Jay Alammar Blog - Illustrated Transformer, Stable Diffusion and More -- </a> | <a href='https://huggingface.co/spaces/myshell-ai/OpenVoice'> -- OpenVoice - Open Source Voice Clone -- </a> | <a href='https://huggingface.co/spaces/fffiloni/live-vision'> -- Live-Vision HF Space - Live commentary on a video feed demo -- </a> | <a href='https://xenova.github.io/transformers.js/'> -- Transformers JS demo - Xenova (HF) -- </a> | <a href='https://huggingface.co/chat/assistants'> -- Huggingface Assistants -- </a> | <a href='https://huggingface.co/spaces/AP123/SDXL-Lightning'> -- 4-step SDXL Inference through LORA -- </a> | <a href='https://huggingface.co/datasets/HuggingFaceTB/cosmopedia'> -- Cosmopedia - 92 GB synthetic dataset made using Mixtral (25 billion tokens) -- </a> | <a href='https://huggingface.co/spaces/dylanebert/LGM-mini'> -- LGM-mini: image to ply -- </a> | <a href='https://playgroundai-playground-v2-5.hf.space'> -- Playground v2.5 -- </a> | <a href='https://github.com/openai/transformer-debugger'> -- OpenAI - Transformer Debugger -- </a> | <a href='https://huggingface.co/datasets/princeton-nlp/SWE-bench'> -- SWE-bench dataset (Real world github issues) -- </a> | <a href='https://huggingface.co/papers/2402.17764'> -- The Era of 1-bit LLMs - All Large Language Models are in 1.58 Bits -- </a> | <a href='https://github.com/microsoft/unilm/tree/master'> -- Microsoft Repo for AI research (Bitnet andd others will be here) -- </a> | <a href='https://huggingface.co/spaces/cyzgab/catch-me-if-you-can'> -- Realtime response using GroqCloud and live gradio interface -- </a> | <a href='https://console.groq.com/docs/showcase-applications'> -- GroqCloud Application showcase -- </a> | <a href='https://kadirnar-open-sora.hf.space'> -- Open version of Open AI SORA -- </a> | <a href='https://huggingface.co/spaces/mms-meta/MMS'> -- MMS (Meta) - TTS for 1000 languages -- </a> | <a href='https://huggingface.co/pyp1/VoiceCraft'> -- VoiceCraft (Audio Clone Model) -- </a> | <a href='https://huggingface.co/papers/2403.09629'> -- QuietStar Paper (HF) - Models linked -- </a> | <a href='https://huggingface.co/ai21labs/Jamba-v0.1'> -- JAMBA - mamba based 52B with 140K context on one gpu!! -- </a> | <a href='https://huggingface.co/papers/2403.16627'> -- SDXS for realtime generation (upto 100FPS) -- </a> | </div>""")
2225
  with gr.Tabs() as nav1:
2226
  with gr.Tab("Rep - HTML"):
2227
  gr.HTML("UNWFWO = Unknown Native Word Foreign Word Order i.e. during active listening practice you only need the words you dont know")
 
2262
  gr.HTML("<b>FINAL VERSION = Image placeholder + Merged Images + Side by Side Audio + UNWFWO Reader script + Spotify/Youtube integration in one interface</b> <br>True mastery is from the relations between each item aka how every word relates to each other - Repitition in the form combinatorics - llm turns these into full sentences / ideas ")
2263
  gr.HTML("Focus = Thinking = Audio = Repitition = This space is just ideas for optimising the audio content. - Audio of side by side version -- listen till you can say the foreign before the audio plays it (Knowledge version is Glossary as vocab you must mastering before reading)")
2264
  with gr.Accordion("Some Current Incomplete Tests", open=False):
2265
+ # with gr.Group():
2266
+ # gr.HTML("Stable LM 2 zephyr 1.6 Placeholder - llama-cpp-python issues locally") #Seperate space tested
2267
+ # gr.Interface(fn=lambda name: f"Placeholder to talk to Stable LM. Prompt = {name}", inputs="text", outputs="text")
2268
+ # gr.Interface(fn=whisperlocaltts, inputs="file", outputs="text", description="Incomplete - Whisper base Test - Can record and then copy the text for use") #Seperate space tested
2269
  gr.HTML('Memorisation by string comparison idea <br><br>Result of prompt chain starting with: Lets say I have the strings "red" and "ppalgan" how can I guess the second from the first from just spelling (eg. similar words and distance in the alphabet, ...), how can I use python to do this i.e. output of no matching letters, closest letter to r, then e, then d, a dictionary of letters that look similar eg. d and p, l and I a and d etc.')
2270
  gr.Interface(fn=letterbased_guess_word, inputs=["text", "text"], outputs="text", description="letter based guess suggestions (one word to one word is the designed use case)")
2271
  gr.HTML("Side by side reading creator (Google Translate) TODO - Roman output of Non roman characters")
 
2316
  nllbtranscpubtnword4word.click(fn=nllbtransctranslationoptionalw4w, inputs=[nllbtranscpulangsrc, nllbtranscpulangdest, nllbtranscpuinput], outputs=[nllbtranscpuOutputword4word, nllbtranscpudetailsword4wordOutput])
2317
  nllbtranscpubtn.click(fn=nllbtransctranslation, inputs=[nllbtranscpulangsrc, nllbtranscpulangdest, nllbtranscpuinput], outputs=[nllbtranscpuOutput, nllbtranscpudetailsOutput])
2318
  gr.Interface(fn=LoadNLTKUDHRText, inputs=NLTKudhr, outputs=["text", "textarea"], description="UDHR as some test texts")
2319
+ with gr.Tab("Beginner - Listen + Read"):
2320
+ gr.Label("Closed Eye Recital per new word | 1 new word a minute while recycling the words from the previous minutes")
2321
+ with gr.Row():
2322
+ with gr.Column(scale=1):
2323
+ gr.HTML("Listening - Songs - Chorus <br> Anticipation of the item to remember is how you learn lyrics that is why songs are easy as if you heard it 10 times already your capacity to anticipate the words is great <br><br> This is where TTS helps as you are ignoring all words except the words just before the actual <br> <b>Tiny Stories dataset is like a graded reader</b> <br>")
2324
+ gr.Interface(fn=TTSforListeningPractice, inputs=["text", TTSLangOptions, "checkbox"], outputs="audio", description="Paste chorus lyrics from below here and use TTS or make notes to save here (Or paste anything)")
2325
+ with gr.Accordion("TTS Spaces", open=False):
2326
+ TTSspaceoptions = gr.Dropdown(choices=["https://facebook-seamless-m4t-v2-large.hf.space", "https://mms-meta-mms.hf.space", "https://suno-bark.hf.space", "https://coqui-xtts.hf.space"], label="existing whisper spaces")
2327
+ TTSspaceoptionsbtn = gr.Button("Load a Image as prompt Space")
2328
+ TTSspaceoptionsOut = gr.HTML()
2329
+ TTSspaceoptionsbtn.click(fn=display_website, inputs=TTSspaceoptions, outputs=TTSspaceoptionsOut)
2330
+ gr.HTML("<p>Fastest way to learn words = is to have your own sound reference --> probably why babies learn fast as they make random noise</p> <p>If you know the flow of the song you can remember the spelling easier</p><p>Essentially if the sounds are repeated or long notes they are easy to remember</p>")
2331
+ gr.Interface(fn=AutoChorusInvestigator, inputs="text", outputs="text", description="Paste Full Lyrics to try find only chorus lines")
2332
+ gr.Interface(fn=AutoChorusPerWordScheduler, inputs="text", outputs="text", description="Create order of repitition for tts practice")
2333
+ with gr.Column(scale=1):
2334
+ gr.HTML("""Reading - Caption images (SD/Dalle-E) <br> <a href='https://unsplash.com/'> -- Unsplash - free images -- </a> | <a href="https://huggingface.co/spaces/pharma/CLIP-Interrogator"> --Huggingface CLIP-Interrogator Space-- </a> | <a href='https://huggingface.co/spaces/fffiloni/CLIP-Interrogator-2'> -- Clip interrogator 2 -- </a> | <a href='https://huggingface.co/spaces/xinyu1205/Recognize_Anything-Tag2Text'> -- Tag2Text is faster than clip -- </a> | <br> <a href='https://huggingface.co/spaces/bkhmsi/Word-To-Image'> -- Transform word to an image -- </a> | <a href='https://huggingface.co/spaces/microsoft/Promptist'> -- Promptist (Microsoft) -- </a> | <a href='https://huggingface.co/spaces/xinyu1205/Recognize_Anything-Tag2Text'> -- RAM and Tag2Text -- </a> | <a href='https://huggingface.co/spaces/curt-park/segment-anything-with-clip'> -- SAM with Clip -- </a> """)
2335
+ with gr.Accordion("RAM/Tag2Text Space - Create Tags here and Copy paste", open=False):
2336
+ RAMSpaceLink = gr.Textbox("https://xinyu1205-recognize-anything.hf.space")
2337
+ RAMSpacetest = gr.HTML("")
2338
+ RAMSpacetestbtn = gr.Button('Load Space')
2339
+ RAMSpacetestbtn.click(display_website, RAMSpaceLink, RAMSpacetest)
2340
+ with gr.Accordion("Grounded SAM Space Test", open=False):
2341
+ SAMSpaceLink = gr.Textbox("https://linfanluntan-grounded-sam.hf.space")
2342
+ SAMSpacetest = gr.HTML("")
2343
+ SAMSpacetestbtn = gr.Button('Load Space')
2344
+ SAMSpacetestbtn.click(display_website, SAMSpaceLink, SAMSpacetest)
2345
+ gr.HTML("Use Shift Enter To put text on new lines if the text doesnt fit <br> if theres an error you have to remove the foreign letters and place roman ones")
2346
+ gr.Interface(fn=add_text_to_image , inputs=["image", "text"], outputs="image", description="Create Annotated images (Can create using stable diffusion and use the prompt) - Describe from one side to the other to make guessing easy")
2347
+ with gr.Tab("Basic Sentence Builder"):
2348
+ with gr.Tab("SVO"):
2349
+ gr.Markdown(
2350
+ """
2351
+ ## Subject-Verb-Object (SVO) Order
2352
+
2353
+ Some languages that follow the SVO order:
2354
+ - English
2355
+ - Spanish
2356
+ - French
2357
+ - Italian
2358
+ - Chinese
2359
+ """
2360
+ )
2361
+ svo_subject = gr.Dropdown(sentbuildsubjects, label="Subject")
2362
+ svo_verb = gr.Dropdown(sentbuildverbs, label="Verb")
2363
+ svo_object = gr.Dropdown(sentbuildobjects, label="Object")
2364
+ svo_output = gr.Textbox(label="Sentence (SVO)")
2365
+ svo_btn = gr.Button("Generate SVO Sentence")
2366
+ svo_btn.click(build_sentence_basic, inputs=[svo_subject, svo_verb, svo_object, gr.State("SVO")], outputs=svo_output)
2367
+
2368
+ with gr.Tab("SOV"):
2369
+ gr.Markdown(
2370
+ """
2371
+ ## Subject-Object-Verb (SOV) Order
2372
+
2373
+ Some languages that follow the SOV order:
2374
+ - Japanese
2375
+ - Korean
2376
+ - Turkish
2377
+ - Hindi
2378
+ - Latin
2379
+ """
2380
+ )
2381
+ sov_subject = gr.Dropdown(sentbuildsubjects, label="Subject")
2382
+ sov_object = gr.Dropdown(sentbuildobjects, label="Object")
2383
+ sov_verb = gr.Dropdown(sentbuildverbs, label="Verb")
2384
+ sov_output = gr.Textbox(label="Sentence (SOV)")
2385
+ sov_btn = gr.Button("Generate SOV Sentence")
2386
+ sov_btn.click(build_sentence_basic, inputs=[sov_subject, sov_verb, sov_object, gr.State("SOV")], outputs=sov_output)
2387
+
2388
+ with gr.Tab("VSO"):
2389
+ gr.Markdown(
2390
+ """
2391
+ ## Verb-Subject-Object (VSO) Order
2392
+
2393
+ Some languages that follow the VSO order:
2394
+ - Arabic
2395
+ - Hebrew
2396
+ - Irish
2397
+ - Welsh
2398
+ - Samoan
2399
+ """
2400
+ )
2401
+ vso_verb = gr.Dropdown(sentbuildverbs, label="Verb")
2402
+ vso_subject = gr.Dropdown(sentbuildsubjects, label="Subject")
2403
+ vso_object = gr.Dropdown(sentbuildobjects, label="Object")
2404
+ vso_output = gr.Textbox(label="Sentence (VSO)")
2405
+ vso_btn = gr.Button("Generate VSO Sentence")
2406
+ vso_btn.click(build_sentence_basic, inputs=[vso_subject, vso_verb, vso_object, gr.State("VSO")], outputs=vso_output)
2407
+
2408
+ with gr.Tab("Complex Sentence Builder"):
2409
+ gr.Markdown(
2410
+ """
2411
+ ## Complex Sentence Builder
2412
+
2413
+ Create intricate sentences using various grammatical components.
2414
+ """
2415
+ )
2416
+ complex_subject = gr.Dropdown(sentbuildsubjects, label="Subject")
2417
+ complex_verb = gr.Dropdown(sentbuildverbs, label="Verb")
2418
+ complex_object = gr.Dropdown(sentbuildobjects, label="Object")
2419
+ complex_adjective = gr.Dropdown(sentbuildadjectives, label="Adjective")
2420
+ complex_adverb = gr.Dropdown(sentbuildadverbs, label="Adverb")
2421
+ complex_prep_phrase = gr.Dropdown(sentbuildprep_phrases, label="Prepositional Phrase")
2422
+ complex_conjunction = gr.Dropdown(sentbuildconjunctions, label="Conjunction")
2423
+ complex_structure = gr.Radio(sentbuildchoices, label="Structure")
2424
+ complex_output = gr.Textbox(label="Complex Sentence")
2425
+ complex_btn = gr.Button("Generate Complex Sentence")
2426
+ complex_btn.click(build_sentence, inputs=[complex_subject, complex_verb, complex_object, complex_adjective, complex_adverb, complex_prep_phrase, complex_conjunction, complex_structure], outputs=complex_output)
2427
+
2428
+ with gr.Tab("Quiz"):
2429
+ # ... (Quiz tab remains the same)
2430
+ gr.Markdown(
2431
+ """
2432
+ ## Sentence Order Quiz
2433
+
2434
+ Test your knowledge of sentence orders by identifying the correct order for each given sentence.
2435
+ """
2436
+ )
2437
+ quiz_sentence = gr.Textbox(label="Sentence")
2438
+ quiz_question = gr.Textbox(label="Question")
2439
+ quiz_choices = gr.Radio(["SVO", "SOV", "VSO"], label="Options")
2440
+ quiz_answer = gr.Textbox(label="Answer")
2441
+ quiz_feedback = gr.Textbox(label="Feedback")
2442
+
2443
+ quiz_button = gr.Button("Generate Quiz Question")
2444
+ quiz_button.click(sentbuildgenerate_quiz_question, inputs=[], outputs=[quiz_sentence, quiz_question, quiz_choices, quiz_answer])
2445
+
2446
+ submit_button = gr.Button("Submit Answer")
2447
+ submit_button.click(sentbuildcheck_answer, inputs=[quiz_choices, quiz_answer], outputs=[quiz_feedback, submit_button])
2448
  with gr.Tab("Youtube Subs Listening Comprehension"):
2449
  gr.HTML("<a href='https://www.lingq.com/en/'>State Management Solution for Word --> Find LingQ Here --> https://www.lingq.com/en/</a>")
2450
  with gr.Tab("New - Learning with Youtube"):
 
2505
  gr.Text("Text to Closed Class + Adjectives + Punctuation or Noun Verb + Punctuation ")
2506
  with gr.Tab("Audio - Only English thoughts as practice"):
2507
  gr.HTML("For Audio Most productive is real time recall of native (where your full reasoning ability will always be) <br><hr> Find Replace new lines of the foreign text with full stops or | to get per word translation")
2508
+ gr.Interface(fn=TTSforListeningPractice, inputs=["text", TTSLangOptions2], outputs="audio", description="Paste only english words in foreign order and then keep removing the words from this to practice as effectively")
2509
  with gr.Tab("Speed through Imagery"):
2510
  gr.HTML("<a href='https://chat.openai.com/g/g-bYMSVlb8y-lingua-link'> -- Lingua Link (Simple GPT for assistinng image creation) -- </a> <br>Use with placeholder generator tab below <br> Best for this is 2 nouns as one phrase i.e. nouns as adjectives and then you can a verb (1000 of those will take you far)")
2511
  with gr.Accordion("More Details - conversation example", open=False):
 
2544
  gr.Interface(fn=onlyplurals, inputs=["text"], outputs=["text"], description="Only plurals = optimal concepts to learn first as LT work = repitition")
2545
  gr.Interface(fn=create_acronym_map, inputs="textbox", outputs="textbox", description="Acronyms")
2546
  gr.Interface(fn=keep_nouns, inputs="textbox", outputs="textbox", description="Nouns only")
2547
+ with gr.Tab("Placeholder Generation"):
2548
  gr.HTML("Placeholder for every image of each sentence - Good for ChatGPT + Dall-E (First 16 Characters is part of the filename if you get error)")
2549
+ gr.Interface(fn=lambda s: ".".join(s.split()), inputs=["text"], outputs=["text"], description="Use full stops before input below to make a world level version")
2550
  with gr.Row():
2551
  with gr.Column(scale=4):
2552
  imageplaceholderinput = gr.TextArea(placeholder="Enter Text and Get a line by line (stand in for sentences for now) placeholder for image associated with the text")
 
2559
  with gr.Column(scale=2):
2560
  imageplaceholdertextoutput = gr.Code("The code for the HTML created will come here")
2561
  imageplaceholderbtn.click(fn=imagebasedreading, inputs=[imageplaceholderinput], outputs=[imageplaceholderdownload, imageplaceholderoutput, imageplaceholdertextoutput])
2562
+ with gr.Tab("Word level Placeholder Generation"):
2563
  gr.HTML("Placeholder for every image of each word - Good for ChatGPT + Dall-E (First 16 Characters is part of the filename if you get error)")
2564
  with gr.Row():
2565
  with gr.Column(scale=4):
 
2598
  <br><br> # https://docs.aws.amazon.com/polly/latest/dg/ref-phoneme-tables-shell.html
2599
  <br><br> # https://docs.aws.amazon.com/polly/latest/dg/ph-table-english-za.html
2600
  <br><br> # https://docs.aws.amazon.com/polly/latest/dg/ph-table-korean.html""")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2601
  #with gr.Tab("Transcribe - RASMUS Whisper"):
2602
  #gr.Interface.load("spaces/RASMUS/Whisper-youtube-crosslingual-subtitles", title="Subtitles")
2603
  with gr.Tab("Beginner - Reading Assitant + Unknown Tracker"):
 
2673
  """)
2674
 
2675
  with gr.Tab("Transition is the end goal (SOV, SVO, VSO)"):
2676
+ gr.Interface(fn=FrontRevSentChunk, inputs=[ChunkModeDrop, "checkbox", "text", langdest], outputs="text", description="Chunks creator")
2677
  with gr.Row():
2678
  with gr.Column():
2679
  gr.Interface(fn=AutoSyllablePractice, inputs="text", outputs="text", description="One Word At A Time | Audio Spelling Practice Using vowels as the seperator")
 
2799
  gr.Textbox(label='Use this text to hold translations of the SQL rows in the above linked dataset (A kind of What I say vs what I want)')
2800
 
2801
 
2802
+ lliface.queue().launch(share=True) #docker #(inbrowser="true") #colab
2803
+
2804
+ #httpcore and googletrans seem to be the cause all my bugs ---> These are problems to watch
2805
+ #not using the exact package versions as your local environment will lead to problems in the future when backwards compatibility is not reintroduced