BioSphere-AI / app.py
Safwanahmad619's picture
Update app.py
746ff42 verified
import gradio as gr
from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM
# Load Ethical Inquiry and Learning Support Model
ethics_tokenizer = AutoTokenizer.from_pretrained("google/flan-t5-base")
ethics_model = AutoModelForSeq2SeqLM.from_pretrained("google/flan-t5-base")
ethics_pipeline = pipeline("text2text-generation", model=ethics_model, tokenizer=ethics_tokenizer)
# Query Classification
def classify_query(query):
"""Classify the query into Ethical Inquiry or Learning Support."""
if "ethics" in query or "privacy" in query:
return "ethical_inquiry"
else:
return "learning_support"
# Process Query
def handle_query(query):
"""Route the query to the appropriate model and generate a response."""
task = classify_query(query)
if task == "ethical_inquiry":
try:
# Ethical guidance response
response = ethics_pipeline(query)
return f"Ethical Inquiry Response: {response[0]['generated_text']}"
except Exception as e:
return f"Error in Ethical Inquiry: {e}"
else:
try:
# Learning support or general question response
response = ethics_pipeline(query)
return f"Learning Support Response: {response[0]['generated_text']}"
except Exception as e:
return f"Error in Learning Support: {e}"
# Gradio Interface
def chatbot(query):
return handle_query(query)
# Deploy with Gradio
interface = gr.Interface(
fn=chatbot,
inputs="text",
outputs="text",
title="BioSphere AI Chatbot",
description="A chatbot for Ethical Guidance and Learning Support in Biotech.",
)
# Add Gemmini API Key Integration
def deploy_with_gemmini(api_key):
print(f"Deploying using Gemmini API Key: {api_key}")
interface.launch()
# Replace 'your_api_key' with your actual Gemmini API key
gemmini_api_key = "your_api_key"
deploy_with_gemmini(gemmini_api_key)