Update app.py
Browse files
app.py
CHANGED
@@ -39,21 +39,35 @@ def respond(
|
|
39 |
model = genai.GenerativeModel("gemini-pro-vision")
|
40 |
## for image
|
41 |
response = model.generate_content([messages, img])
|
42 |
-
|
43 |
-
|
44 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
45 |
|
46 |
-
for message in client.chat_completion(
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
):
|
53 |
-
|
54 |
|
55 |
-
|
56 |
-
|
57 |
|
58 |
"""
|
59 |
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
|
|
|
39 |
model = genai.GenerativeModel("gemini-pro-vision")
|
40 |
## for image
|
41 |
response = model.generate_content([messages, img])
|
42 |
+
try:
|
43 |
+
response = model.generate_content([custom_prompt, img])
|
44 |
+
if not response or not response.text:
|
45 |
+
return "No valid response received. The response might have been blocked."
|
46 |
+
|
47 |
+
# Formatting the response
|
48 |
+
formatted_response = ""
|
49 |
+
for line in response.text.split("\n"):
|
50 |
+
if line.strip().endswith(":"):
|
51 |
+
formatted_response += f"**{line.strip()}**\n"
|
52 |
+
else:
|
53 |
+
formatted_response += line + "\n"
|
54 |
+
|
55 |
+
return formatted_response
|
56 |
+
except ValueError as e:
|
57 |
+
return f"Error in generating response: {e}"
|
58 |
+
# response = ""
|
59 |
|
60 |
+
# for message in client.chat_completion(
|
61 |
+
# messages,
|
62 |
+
# max_tokens=max_tokens,
|
63 |
+
# stream=True,
|
64 |
+
# temperature=temperature,
|
65 |
+
# top_p=top_p,
|
66 |
+
# ):
|
67 |
+
# token = message.choices[0].delta.content
|
68 |
|
69 |
+
# response += token
|
70 |
+
# yield response
|
71 |
|
72 |
"""
|
73 |
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
|