Keyven commited on
Commit
75992d9
Β·
1 Parent(s): dfc4fa8

Update response function

Browse files
Files changed (1) hide show
  1. app.py +26 -10
app.py CHANGED
@@ -46,9 +46,13 @@ def format_text(text):
46
  text = "".join(lines)
47
  return text
48
 
49
- def get_chat_response(chatbot, task_history):
 
50
  """Generate a response using the model."""
 
 
51
  history_cp = copy.deepcopy(task_history)
 
52
 
53
  history_filter = []
54
  pic_idx = 1
@@ -64,16 +68,28 @@ def get_chat_response(chatbot, task_history):
64
  pre = ""
65
  history, message = history_filter[:-1], history_filter[-1][0]
66
 
67
- inputs = tokenizer.encode_plus(message, return_tensors='pt')
68
- inputs = {k: v.to(model.device) for k, v in inputs.items()} # Ensure inputs are on the same device as the model
69
- outputs = model.generate(inputs['input_ids'], max_length=150, num_beams=4, length_penalty=2.0, early_stopping=True)
70
- response = tokenizer.decode(outputs[0], skip_special_tokens=True)
71
-
72
- task_history.append((message, response))
73
- chatbot.append((format_text(message), format_text(response)))
 
 
 
 
 
 
 
 
 
 
74
 
75
- return chatbot, task_history
 
76
 
 
77
 
78
 
79
  def handle_text_input(history, task_history, text):
@@ -126,7 +142,7 @@ css = '''
126
  '''
127
 
128
  with gr.Blocks(css=css) as demo:
129
- gr.Markdown("# Qwen-VL-Chat Bot")
130
  gr.Markdown(
131
  "## Developed by Keyvan Hardani (Keyvven on [Twitter](https://twitter.com/Keyvven))\n"
132
  "Special thanks to [@Artificialguybr](https://twitter.com/artificialguybr) for the inspiration from his code.\n"
 
46
  text = "".join(lines)
47
  return text
48
 
49
+
50
+ def get_chat_response(chatbot, task_history, model, tokenizer):
51
  """Generate a response using the model."""
52
+ chat_query = chatbot[-1][0]
53
+ query = task_history[-1][0]
54
  history_cp = copy.deepcopy(task_history)
55
+ full_response = ""
56
 
57
  history_filter = []
58
  pic_idx = 1
 
68
  pre = ""
69
  history, message = history_filter[:-1], history_filter[-1][0]
70
 
71
+ response, history = model.chat(tokenizer, message, history=history)
72
+ image = tokenizer.draw_bbox_on_latest_picture(response, history)
73
+ if image is not None:
74
+ temp_dir = secrets.token_hex(20)
75
+ temp_dir = Path("/tmp") / temp_dir
76
+ temp_dir.mkdir(exist_ok=True, parents=True)
77
+ name = f"tmp{secrets.token_hex(5)}.jpg"
78
+ filename = temp_dir / name
79
+ image.save(str(filename))
80
+ chatbot[-1] = (chat_query, (str(filename),))
81
+ chat_response = response.replace("<ref>", "")
82
+ chat_response = chat_response.replace(r"</ref>", "")
83
+ if chat_response != "":
84
+ chatbot.append((None, chat_response))
85
+ else:
86
+ chatbot[-1] = (chat_query, response)
87
+ full_response = response # modified this line to ensure full_response is not empty
88
 
89
+ task_history.append((message, full_response))
90
+ chatbot.append((format_text(message), format_text(full_response)))
91
 
92
+ return chatbot, task_history
93
 
94
 
95
  def handle_text_input(history, task_history, text):
 
142
  '''
143
 
144
  with gr.Blocks(css=css) as demo:
145
+ gr.Markdown("# Qwen-VL Multimodal-Vision-Insight")
146
  gr.Markdown(
147
  "## Developed by Keyvan Hardani (Keyvven on [Twitter](https://twitter.com/Keyvven))\n"
148
  "Special thanks to [@Artificialguybr](https://twitter.com/artificialguybr) for the inspiration from his code.\n"