jvedsaqib commited on
Commit
ba6748c
·
verified ·
1 Parent(s): ed0ee75

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -33
app.py CHANGED
@@ -1,6 +1,5 @@
1
  import os
2
  from flask import Flask, render_template, request
3
- from openai import OpenAI
4
  from transformers import BlipProcessor, BlipForConditionalGeneration
5
  from PIL import Image
6
 
@@ -13,9 +12,6 @@ os.makedirs(UPLOAD_FOLDER, exist_ok=True)
13
  processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
14
  model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
15
 
16
- # OpenAI client
17
- client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
18
-
19
  def generate_caption(image_path):
20
  """Generate caption from image using BLIP."""
21
  raw_image = Image.open(image_path).convert("RGB")
@@ -23,38 +19,15 @@ def generate_caption(image_path):
23
  out = model.generate(**inputs, max_new_tokens=50)
24
  return processor.decode(out[0], skip_special_tokens=True)
25
 
26
- def process_with_openai(user_message, caption=None):
27
- """Send user + image caption to OpenAI API."""
28
- context = ""
29
- if caption:
30
- context += f"The user uploaded an image. BLIP caption: '{caption}'.\n"
31
- if user_message:
32
- context += f"User message: {user_message}\n"
33
-
34
- if not context.strip():
35
- return "Please enter a message or upload an image."
36
-
37
- response = client.chat.completions.create(
38
- model="gpt-4o-mini",
39
- messages=[
40
- {"role": "system", "content": "You are FUTURE_ON AI assistant. Combine image info + text queries."},
41
- {"role": "user", "content": context}
42
- ]
43
- )
44
- return response.choices[0].message.content.strip()
45
-
46
  @app.route("/", methods=["GET", "POST"])
47
  @app.route("/Query.html", methods=["GET", "POST"])
48
  def query():
49
  filename = None
50
- user_message = None
51
- bot_response = None
52
 
53
  if request.method == "POST":
54
- user_message = request.form.get("message-input")
55
  file = request.files.get("file-input")
56
 
57
- caption = None
58
  if file and file.filename != "":
59
  filepath = os.path.join(UPLOAD_FOLDER, file.filename)
60
  file.save(filepath)
@@ -62,13 +35,9 @@ def query():
62
  # BLIP generates caption
63
  caption = generate_caption(filepath)
64
 
65
- # Pass text + BLIP caption to OpenAI
66
- bot_response = process_with_openai(user_message, caption)
67
-
68
  return render_template("query.html",
69
  filename=filename,
70
- user_message=user_message,
71
- bot_response=bot_response)
72
 
73
  if __name__ == "__main__":
74
  app.run(host="0.0.0.0", port=7860)
 
1
  import os
2
  from flask import Flask, render_template, request
 
3
  from transformers import BlipProcessor, BlipForConditionalGeneration
4
  from PIL import Image
5
 
 
12
  processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
13
  model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
14
 
 
 
 
15
  def generate_caption(image_path):
16
  """Generate caption from image using BLIP."""
17
  raw_image = Image.open(image_path).convert("RGB")
 
19
  out = model.generate(**inputs, max_new_tokens=50)
20
  return processor.decode(out[0], skip_special_tokens=True)
21
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
  @app.route("/", methods=["GET", "POST"])
23
  @app.route("/Query.html", methods=["GET", "POST"])
24
  def query():
25
  filename = None
26
+ caption = None
 
27
 
28
  if request.method == "POST":
 
29
  file = request.files.get("file-input")
30
 
 
31
  if file and file.filename != "":
32
  filepath = os.path.join(UPLOAD_FOLDER, file.filename)
33
  file.save(filepath)
 
35
  # BLIP generates caption
36
  caption = generate_caption(filepath)
37
 
 
 
 
38
  return render_template("query.html",
39
  filename=filename,
40
+ bot_response=caption)
 
41
 
42
  if __name__ == "__main__":
43
  app.run(host="0.0.0.0", port=7860)