Spaces:
Sleeping
Sleeping
File size: 1,514 Bytes
1e156c7 52631c2 6fe5c25 372d723 52631c2 6fe5c25 7ab3752 6fe5c25 52631c2 7ab3752 52631c2 7ab3752 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 |
import gradio as gr
from typing import List
from langchain_google_genai import GoogleGenerativeAIEmbeddings
import google.generativeai as genai
from langchain_community.vectorstores import FAISS
from langchain_google_genai import ChatGoogleGenerativeAI
import re
genai.configure(api_key="AIzaSyD2o8vjePJb6z8vT_PVe82lVWMD3_cBL0g")
def format_gemini_response(text):
bold_pattern = r"\*\*(.*?)\*\*"
italic_pattern = r"\*(.*?)\*"
code_pattern = r"```(.*?)```"
text = text.replace('\n', '<br>')
formatted_text = re.sub(code_pattern, "<pre><code>\\1</code></pre>", text)
formatted_text = re.sub(bold_pattern, "<b>\\1</b>", formatted_text)
formatted_text = re.sub(italic_pattern, "<i>\\1</i>", formatted_text)
return formatted_text
def predict(message: str, chat_his: List[List[str]], d: dict) -> str:
if not message.strip():
return "Error: Message cannot be empty.", chat_his, d
model = genai.GenerativeModel("gemini-pro")
his = []
for i, j in chat_his:
his.extend([
{"role": "user", "parts": i},
{"role": "model", "parts": j},
])
chat = model.start_chat(history=his)
response = chat.send_message(message)
# Update chat history
chat_his.append((message, response.text))
return format_gemini_response(response.text), chat_his, d
iface = gr.Interface(
fn=predict,
inputs=["text", "list", "json"],
outputs="html" # Change to HTML for proper rendering
)
iface.launch(share=True)
|