Rathapoom commited on
Commit
30beb89
·
verified ·
1 Parent(s): e495bc7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -34
app.py CHANGED
@@ -4,65 +4,52 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
4
  from PIL import Image
5
  import requests
6
  import gradio as gr
7
- import spaces # Import Hugging Face Spaces package
8
 
9
  # Load model and tokenizer
10
- device = 'cuda' if torch.cuda.is_available() else 'cpu'
11
  model_name = 'scb10x/llama-3-typhoon-v1.5-8b-instruct-vision-preview'
12
 
13
- @spaces.GPU(duration=60) # Decorate the function to dynamically request and release GPU
14
  def load_model():
15
  model = AutoModelForCausalLM.from_pretrained(
16
  model_name,
17
- revision='main', # Or a specific commit hash
18
- torch_dtype=torch.float16 if device == 'cuda' else torch.float32,
19
  device_map='auto',
20
  trust_remote_code=True
21
  )
22
-
23
  return model
24
 
25
  model = load_model()
26
-
27
  tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
28
 
29
- def prepare_inputs(text, image, device='cuda'):
30
  messages = [
31
  {"role": "system", "content": "You are a helpful vision-capable assistant who eagerly converses with the user in their language."},
 
32
  ]
33
- messages.append({"role": "user", "content": "<|image|>\n" + text})
34
-
35
- inputs_formatted = tokenizer.apply_chat_template(
36
- messages,
37
- add_generation_prompt=True,
38
- tokenize=False
39
- )
40
-
41
- text_chunks = [tokenizer(chunk).input_ids for chunk in inputs_formatted.split('<|image|>')]
42
- input_ids = torch.tensor(text_chunks[0] + [-200] + text_chunks[1][1:], dtype=torch.long).unsqueeze(0).to(device)
43
- attention_mask = torch.ones_like(input_ids).to(device)
44
 
45
- return input_ids, attention_mask
46
-
47
- @spaces.GPU(duration=60) # Decorate the function for GPU use
48
  def predict(prompt, img_url):
49
  try:
50
- image = Image.open(requests.get(img_url, stream=True).raw)
51
- image_tensor = model.process_images([image], model.config).to(dtype=model.dtype, device=device)
52
-
53
- input_ids, attention_mask = prepare_inputs(prompt, image, device=device)
54
-
 
55
  output_ids = model.generate(
56
- input_ids,
57
  images=image_tensor,
58
  max_new_tokens=100,
59
- use_cache=True,
60
  temperature=0.2,
61
  top_p=0.2,
62
  repetition_penalty=1.0
63
  )[0]
64
-
65
- result = tokenizer.decode(output_ids[input_ids.shape[1]:], skip_special_tokens=True).strip()
66
  return result
67
  except Exception as e:
68
  return str(e)
@@ -72,10 +59,12 @@ inputs = [
72
  gr.Textbox(label="Prompt", placeholder="Ask about the food in the image"),
73
  gr.Textbox(label="Image URL", placeholder="Enter an image URL")
74
  ]
75
-
76
  outputs = gr.Textbox(label="Generated Output")
77
 
78
  gr.Interface(
79
- fn=predict, inputs=inputs, outputs=outputs, title="Food Image AI Assistant",
 
 
 
80
  description="This model can analyze food images and answer questions about them."
81
- ).launch()
 
4
  from PIL import Image
5
  import requests
6
  import gradio as gr
7
+ import spaces
8
 
9
  # Load model and tokenizer
 
10
  model_name = 'scb10x/llama-3-typhoon-v1.5-8b-instruct-vision-preview'
11
 
12
+ @spaces.GPU(duration=120) # ใช้ GPU เป็นเวลา 120 วินาที
13
  def load_model():
14
  model = AutoModelForCausalLM.from_pretrained(
15
  model_name,
16
+ torch_dtype=torch.float16,
 
17
  device_map='auto',
18
  trust_remote_code=True
19
  )
 
20
  return model
21
 
22
  model = load_model()
 
23
  tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
24
 
25
+ def prepare_inputs(text, image):
26
  messages = [
27
  {"role": "system", "content": "You are a helpful vision-capable assistant who eagerly converses with the user in their language."},
28
+ {"role": "user", "content": f"<image>\n{text}"}
29
  ]
30
+ inputs = tokenizer.apply_chat_template(messages, return_tensors="pt").to(model.device)
31
+ return inputs
 
 
 
 
 
 
 
 
 
32
 
33
+ @spaces.GPU(duration=60) # ใช้ GPU เป็นเวลา 60 วินาที
 
 
34
  def predict(prompt, img_url):
35
  try:
36
+ image = Image.open(requests.get(img_url, stream=True).raw).convert('RGB')
37
+ image = image.resize((model.config.image_size, model.config.image_size))
38
+ image_tensor = model.preprocess_images([image]).to(model.device)
39
+
40
+ inputs = prepare_inputs(prompt, image)
41
+
42
  output_ids = model.generate(
43
+ inputs,
44
  images=image_tensor,
45
  max_new_tokens=100,
46
+ do_sample=True,
47
  temperature=0.2,
48
  top_p=0.2,
49
  repetition_penalty=1.0
50
  )[0]
51
+
52
+ result = tokenizer.decode(output_ids[inputs.shape[1]:], skip_special_tokens=True).strip()
53
  return result
54
  except Exception as e:
55
  return str(e)
 
59
  gr.Textbox(label="Prompt", placeholder="Ask about the food in the image"),
60
  gr.Textbox(label="Image URL", placeholder="Enter an image URL")
61
  ]
 
62
  outputs = gr.Textbox(label="Generated Output")
63
 
64
  gr.Interface(
65
+ fn=predict,
66
+ inputs=inputs,
67
+ outputs=outputs,
68
+ title="Food Image AI Assistant",
69
  description="This model can analyze food images and answer questions about them."
70
+ ).launch()