Spaces:
Running
Running
File size: 1,269 Bytes
1e156c7 52631c2 6fe5c25 8f714ea 372d723 52631c2 8f714ea 6fe5c25 7ab3752 8f714ea dc582ee 8f714ea 6fe5c25 8f714ea 52631c2 8f714ea 7ab3752 8f714ea dc582ee |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 |
import gradio as gr
from typing import List
from langchain_google_genai import GoogleGenerativeAIEmbeddings
import google.generativeai as genai
from langchain_community.vectorstores import FAISS
from langchain_google_genai import ChatGoogleGenerativeAI
import re
genai.configure(api_key="AIzaSyD2o8vjePJb6z8vT_PVe82lVWMD3_cBL0g")
def format_gemini_response(text):
bold_pattern = r"\*\*(.*?)\*\*"
italic_pattern = r"\*(.*?)\*"
code_pattern = r"(.*?)"
text = text.replace('\n', '<br>')
formatted_text = re.sub(code_pattern,"<pre><code>\\1</code></pre>",text)
formatted_text = re.sub(bold_pattern, "<b>\\1</b>", formatted_text)
formatted_text = re.sub(italic_pattern, "<i>\\1</i>", formatted_text)
return formatted_text
def predict(message :str , chat_his ,d ) -> str:
model = genai.GenerativeModel("gemini-pro")
his = []
# for i,j in history:
# his.extend([
# {"role": "user", "parts": i},
# {"role": "model", "parts": j},
# ])
chat = model.start_chat(
history=his
)
response = chat.send_message(message)
return format_gemini_response(response.text),chat_his, d
iface = gr.Interface(fn = predict,inputs = ["text","list","json"],outputs = "text")
iface.launch(debug = True)
|