Harshveer commited on
Commit
85aab5e
·
verified ·
1 Parent(s): bfefae6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -13
app.py CHANGED
@@ -39,21 +39,35 @@ def respond(
39
  model = genai.GenerativeModel("gemini-pro-vision")
40
  ## for image
41
  response = model.generate_content([messages, img])
42
- print (response)
43
- return response
44
- response = ""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
 
46
- for message in client.chat_completion(
47
- messages,
48
- max_tokens=max_tokens,
49
- stream=True,
50
- temperature=temperature,
51
- top_p=top_p,
52
- ):
53
- token = message.choices[0].delta.content
54
 
55
- response += token
56
- yield response
57
 
58
  """
59
  For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
 
39
  model = genai.GenerativeModel("gemini-pro-vision")
40
  ## for image
41
  response = model.generate_content([messages, img])
42
+ try:
43
+ response = model.generate_content([custom_prompt, img])
44
+ if not response or not response.text:
45
+ return "No valid response received. The response might have been blocked."
46
+
47
+ # Formatting the response
48
+ formatted_response = ""
49
+ for line in response.text.split("\n"):
50
+ if line.strip().endswith(":"):
51
+ formatted_response += f"**{line.strip()}**\n"
52
+ else:
53
+ formatted_response += line + "\n"
54
+
55
+ return formatted_response
56
+ except ValueError as e:
57
+ return f"Error in generating response: {e}"
58
+ # response = ""
59
 
60
+ # for message in client.chat_completion(
61
+ # messages,
62
+ # max_tokens=max_tokens,
63
+ # stream=True,
64
+ # temperature=temperature,
65
+ # top_p=top_p,
66
+ # ):
67
+ # token = message.choices[0].delta.content
68
 
69
+ # response += token
70
+ # yield response
71
 
72
  """
73
  For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface