VinitT commited on
Commit
2f43b7c
·
verified ·
1 Parent(s): 1ae2aba

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -17
app.py CHANGED
@@ -1,12 +1,11 @@
1
  import streamlit as st
2
- from transformers import AutoProcessor, Qwen2VLForConditionalGeneration, AutoConfig
3
  from PIL import Image
4
  import torch
5
 
6
- # Load the processor and model configuration
7
  processor = AutoProcessor.from_pretrained("Qwen/Qwen2-VL-2B-Instruct")
8
- config = AutoConfig.from_pretrained("Qwen/Qwen2-VL-2B-Instruct")
9
- model = Qwen2VLForConditionalGeneration.from_pretrained("Qwen/Qwen2-VL-2B-Instruct", config=config)
10
 
11
  # Streamlit app
12
  st.title("Image Description Generator")
@@ -37,10 +36,6 @@ if uploaded_file is not None:
37
  messages, tokenize=False, add_generation_prompt=True
38
  )
39
 
40
- # Debugging: Display the generated text
41
- st.write("Generated text for processing:")
42
- st.write(text)
43
-
44
  # Pass the image to the processor
45
  inputs = processor(
46
  text=[text],
@@ -50,10 +45,6 @@ if uploaded_file is not None:
50
  )
51
  inputs = inputs.to("cpu")
52
 
53
- # Debugging: Display the inputs
54
- st.write("Inputs for the model:")
55
- st.write(inputs)
56
-
57
  # Inference: Generation of the output
58
  generated_ids = model.generate(**inputs, max_new_tokens=128)
59
  generated_ids_trimmed = [
@@ -63,9 +54,5 @@ if uploaded_file is not None:
63
  generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
64
  )
65
 
66
- # Debugging: Display the raw output text
67
- st.write("Raw output text:")
68
- st.write(output_text)
69
-
70
  st.write("Description:")
71
- st.write(output_text[0])
 
1
  import streamlit as st
2
+ from transformers import AutoProcessor, Qwen2VLForConditionalGeneration
3
  from PIL import Image
4
  import torch
5
 
6
+ # Load the processor and model directly
7
  processor = AutoProcessor.from_pretrained("Qwen/Qwen2-VL-2B-Instruct")
8
+ model = Qwen2VLForConditionalGeneration.from_pretrained("Qwen/Qwen2-VL-2B-Instruct")
 
9
 
10
  # Streamlit app
11
  st.title("Image Description Generator")
 
36
  messages, tokenize=False, add_generation_prompt=True
37
  )
38
 
 
 
 
 
39
  # Pass the image to the processor
40
  inputs = processor(
41
  text=[text],
 
45
  )
46
  inputs = inputs.to("cpu")
47
 
 
 
 
 
48
  # Inference: Generation of the output
49
  generated_ids = model.generate(**inputs, max_new_tokens=128)
50
  generated_ids_trimmed = [
 
54
  generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
55
  )
56
 
 
 
 
 
57
  st.write("Description:")
58
+ st.write(output_text)