Spaces:
Running
Running
File size: 3,486 Bytes
8353fd4 76cfe4d c7837c1 8353fd4 e4af8c9 8353fd4 e4af8c9 66ea586 e4af8c9 8353fd4 b85203e 76cfe4d 8353fd4 76cfe4d 8353fd4 b85203e e4af8c9 e896479 d5d6ab5 8353fd4 d734be8 d5d6ab5 d734be8 d5d6ab5 d734be8 e4af8c9 36b6e8a b70baf7 8353fd4 d5d6ab5 8353fd4 d734be8 b70baf7 76cfe4d 8e7d9a9 8353fd4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 |
#!/usr/bin/env python
from __future__ import annotations
import gradio as gr
# from model import AppModel
MAINTENANCE_NOTICE='Sorry, this space is under maintenance, and will be restored soon. '
DESCRIPTION = '''# <a href="https://github.com/THUDM/CogVideo">CogVideo</a>
Currently, this Space only supports the first stage of the CogVideo pipeline due to hardware limitations.
The model accepts only Chinese as input.
By checking the "Translate to Chinese" checkbox, the results of English to Chinese translation with [this Space](https://huggingface.co/spaces/chinhon/translation_eng2ch) will be used as input.
Since the translation model may mistranslate, you may want to use the translation results from other translation services.
'''
NOTES = 'This app is adapted from <a href="https://github.com/hysts/CogVideo_demo">https://github.com/hysts/CogVideo_demo</a>. It would be recommended to use the repo if you want to run the app yourself.'
FOOTER = '<img id="visitor-badge" alt="visitor badge" src="https://visitor-badge.glitch.me/badge?page_id=THUDM.CogVideo" />'
def main():
only_first_stage = True
# model = AppModel(only_first_stage)
with gr.Blocks(css='style.css') as demo:
gr.Markdown(MAINTENANCE_NOTICE)
'''
gr.Markdown(DESCRIPTION)
with gr.Row():
with gr.Column():
with gr.Group():
text = gr.Textbox(label='Input Text')
translate = gr.Checkbox(label='Translate to Chinese',
value=False)
seed = gr.Slider(0,
100000,
step=1,
value=1234,
label='Seed')
only_first_stage = gr.Checkbox(
label='Only First Stage',
value=only_first_stage,
visible=not only_first_stage)
image_prompt = gr.Image(type="filepath",
label="Image Prompt",
value=None)
run_button = gr.Button('Run')
with gr.Column():
with gr.Group():
translated_text = gr.Textbox(label='Translated Text')
with gr.Tabs():
with gr.TabItem('Output (Video)'):
result_video = gr.Video(show_label=False)
examples = gr.Examples(
examples=[['骑滑板的皮卡丘', False, 1234, True,None],
['a cat playing chess', True, 1253, True,None]],
fn=model.run_with_translation,
inputs=[text, translate, seed, only_first_stage,image_prompt],
outputs=[translated_text, result_video],
cache_examples=True)
gr.Markdown(NOTES)
gr.Markdown(FOOTER)
print(gr.__version__)
run_button.click(fn=model.run_with_translation,
inputs=[
text,
translate,
seed,
only_first_stage,
image_prompt
],
outputs=[translated_text, result_video])
print(gr.__version__)
'''
demo.launch()
if __name__ == '__main__':
main()
|