Sujithanumala commited on
Commit
8f714ea
·
verified ·
1 Parent(s): 7ab3752

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -30
app.py CHANGED
@@ -6,41 +6,35 @@ from langchain_community.vectorstores import FAISS
6
  from langchain_google_genai import ChatGoogleGenerativeAI
7
  import re
8
 
 
9
  genai.configure(api_key="AIzaSyD2o8vjePJb6z8vT_PVe82lVWMD3_cBL0g")
10
 
 
11
  def format_gemini_response(text):
12
- bold_pattern = r"\*\*(.*?)\*\*"
13
- italic_pattern = r"\*(.*?)\*"
14
- code_pattern = r"```(.*?)```"
15
- text = text.replace('\n', '<br>')
16
- formatted_text = re.sub(code_pattern, "<pre><code>\\1</code></pre>", text)
17
- formatted_text = re.sub(bold_pattern, "<b>\\1</b>", formatted_text)
18
- formatted_text = re.sub(italic_pattern, "<i>\\1</i>", formatted_text)
19
- return formatted_text
20
 
21
- def predict(message: str, chat_his: List[List[str]], d: dict) -> str:
22
- if not message.strip():
23
- return "Error: Message cannot be empty.", chat_his, d
 
 
 
 
 
 
24
 
 
 
25
  model = genai.GenerativeModel("gemini-pro")
26
  his = []
27
- for i, j in chat_his:
28
- his.extend([
29
- {"role": "user", "parts": i},
30
- {"role": "model", "parts": j},
31
- ])
32
-
33
- chat = model.start_chat(history=his)
 
34
  response = chat.send_message(message)
35
-
36
- # Update chat history
37
- chat_his.append((message, response.text))
38
-
39
- return format_gemini_response(response.text), chat_his, d
40
-
41
- iface = gr.Interface(
42
- fn=predict,
43
- inputs=["text", "list", "json"],
44
- outputs="html" # Change to HTML for proper rendering
45
- )
46
- iface.launch(share=True)
 
6
  from langchain_google_genai import ChatGoogleGenerativeAI
7
  import re
8
 
9
+
10
  genai.configure(api_key="AIzaSyD2o8vjePJb6z8vT_PVe82lVWMD3_cBL0g")
11
 
12
+
13
  def format_gemini_response(text):
 
 
 
 
 
 
 
 
14
 
15
+ bold_pattern = r"\*\*(.*?)\*\*"
16
+ italic_pattern = r"\*(.*?)\*"
17
+ code_pattern = r"
18
+ (.*?)
19
+ "
20
+ text = text.replace('\n', '<br>')
21
+ formatted_text = re.sub(code_pattern,"<pre><code>\\1</code></pre>",text)
22
+ formatted_text = re.sub(bold_pattern, "<b>\\1</b>", formatted_text)
23
+ formatted_text = re.sub(italic_pattern, "<i>\\1</i>", formatted_text)
24
 
25
+ return formatted_text
26
+ def predict(message :str , chat_his ,d ) -> str:
27
  model = genai.GenerativeModel("gemini-pro")
28
  his = []
29
+ # for i,j in history:
30
+ # his.extend([
31
+ # {"role": "user", "parts": i},
32
+ # {"role": "model", "parts": j},
33
+ # ])
34
+ chat = model.start_chat(
35
+ history=his
36
+ )
37
  response = chat.send_message(message)
38
+ return format_gemini_response(response.text),chat_his, d
39
+ iface = gr.Interface(fn = predict,inputs = ["text","list","json"],outputs = "text")
40
+ iface.launch()