Akshayram1 commited on
Commit
1761d18
1 Parent(s): a22baae

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +68 -0
app.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import subprocess
3
+ import torch
4
+ from PIL import Image
5
+ from transformers import AutoProcessor, AutoModelForCausalLM
6
+
7
+ # import os
8
+ # import random
9
+ # from gradio_client import Client
10
+
11
+
12
+ subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
13
+
14
+ # Initialize Florence model
15
+ device = "cuda" if torch.cuda.is_available() else "cpu"
16
+ florence_model = AutoModelForCausalLM.from_pretrained('microsoft/Florence-2-base', trust_remote_code=True).to(device).eval()
17
+ florence_processor = AutoProcessor.from_pretrained('microsoft/Florence-2-base', trust_remote_code=True)
18
+
19
+ # api_key = os.getenv("HF_READ_TOKEN")
20
+
21
+ def generate_caption(image):
22
+ if not isinstance(image, Image.Image):
23
+ image = Image.fromarray(image)
24
+
25
+ inputs = florence_processor(text="<MORE_DETAILED_CAPTION>", images=image, return_tensors="pt").to(device)
26
+ generated_ids = florence_model.generate(
27
+ input_ids=inputs["input_ids"],
28
+ pixel_values=inputs["pixel_values"],
29
+ max_new_tokens=1024,
30
+ early_stopping=False,
31
+ do_sample=False,
32
+ num_beams=3,
33
+ )
34
+ generated_text = florence_processor.batch_decode(generated_ids, skip_special_tokens=False)[0]
35
+ parsed_answer = florence_processor.post_process_generation(
36
+ generated_text,
37
+ task="<MORE_DETAILED_CAPTION>",
38
+ image_size=(image.width, image.height)
39
+ )
40
+ prompt = parsed_answer["<MORE_DETAILED_CAPTION>"]
41
+ print("\n\nGeneration completed!:"+ prompt)
42
+ return prompt
43
+ # yield prompt, None
44
+ # image_path = generate_image(prompt,random.randint(0, 4294967296))
45
+ # yield prompt, image_path
46
+
47
+ # def generate_image(prompt, seed=42, width=1024, height=1024):
48
+ # try:
49
+ # result = Client("KingNish/Realtime-FLUX", hf_token=api_key).predict(
50
+ # prompt=prompt,
51
+ # seed=seed,
52
+ # width=width,
53
+ # height=height,
54
+ # api_name="/generate_image"
55
+ # )
56
+ # # Extract the image path from the result tuple
57
+ # image_path = result[0]
58
+ # return image_path
59
+ # except Exception as e:
60
+ # raise Exception(f"Error generating image: {str(e)}")
61
+
62
+ io = gr.Interface(generate_caption,
63
+ inputs=[gr.Image(label="Input Image")],
64
+ outputs = [gr.Textbox(label="Output Prompt", lines=2, show_copy_button = True),
65
+ # gr.Image(label="Output Image")
66
+ ]
67
+ )
68
+ io.launch(debug=True)