prithivMLmods commited on
Commit
bab3303
·
verified ·
1 Parent(s): b7ae39e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -6
app.py CHANGED
@@ -24,7 +24,7 @@ from transformers.image_utils import load_image
24
  from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
25
 
26
  DESCRIPTION = """
27
- # Gen Vision 💬
28
  """
29
 
30
  css = '''
@@ -47,10 +47,6 @@ MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
47
 
48
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
49
 
50
- # ------------------------------
51
- # Text Generation Models & TTS
52
- # ------------------------------
53
-
54
  # Load text-only model and tokenizer for text generation
55
  model_id = "prithivMLmods/FastThink-0.5B-Tiny"
56
  tokenizer = AutoTokenizer.from_pretrained(model_id)
@@ -329,7 +325,7 @@ demo = gr.ChatInterface(
329
  description=DESCRIPTION,
330
  css=css,
331
  fill_height=True,
332
- textbox=gr.MultimodalTextbox(label="Query Input", file_types=["image"], file_count="multiple"),
333
  stop_btn="Stop Generation",
334
  multimodal=True,
335
  )
 
24
  from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
25
 
26
  DESCRIPTION = """
27
+ # Gen Vision ⚛️
28
  """
29
 
30
  css = '''
 
47
 
48
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
49
 
 
 
 
 
50
  # Load text-only model and tokenizer for text generation
51
  model_id = "prithivMLmods/FastThink-0.5B-Tiny"
52
  tokenizer = AutoTokenizer.from_pretrained(model_id)
 
325
  description=DESCRIPTION,
326
  css=css,
327
  fill_height=True,
328
+ textbox=gr.MultimodalTextbox(label="Query Input", file_types=["image"], file_count="multiple", placeholder="scroll down examples to explore more art styles"),
329
  stop_btn="Stop Generation",
330
  multimodal=True,
331
  )