File size: 2,675 Bytes
7a2a71d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
import gradio as gr
from transformers import pipeline, AutoTokenizer, AutoModelForMaskedLM, AutoModelForSeq2SeqLM

# Load DNA Analysis Model
dna_tokenizer = AutoTokenizer.from_pretrained("facebook/esm2_t6_8M_UR50D")
dna_model = AutoModelForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D")
dna_pipeline = pipeline("fill-mask", model=dna_model, tokenizer=dna_tokenizer)

# Load Ethical Inquiry and Learning Support Model
ethics_tokenizer = AutoTokenizer.from_pretrained("google/flan-t5-base")
ethics_model = AutoModelForSeq2SeqLM.from_pretrained("google/flan-t5-base")
ethics_pipeline = pipeline("text2text-generation", model=ethics_model, tokenizer=ethics_tokenizer)

# Query Classification
def classify_query(query):
    """Classify the query into DNA Analysis, Ethical Inquiry, or Learning Support."""
    if "DNA" in query or "sequence" in query:
        return "dna_analysis"
    elif "ethics" in query or "privacy" in query:
        return "ethical_inquiry"
    else:
        return "learning_support"

# Process Query
def handle_query(query):
    """Route the query to the appropriate model and generate a response."""
    task = classify_query(query)

    if task == "dna_analysis":
        try:
            # Example DNA sequence processing
            masked_sequence = query.replace("X", "[MASK]")
            output = dna_pipeline(masked_sequence)
            return f"DNA Analysis Result: {output}"
        except Exception as e:
            return f"Error in DNA Analysis: {e}"

    elif task == "ethical_inquiry":
        try:
            # Ethical guidance response
            response = ethics_pipeline(query)
            return f"Ethical Inquiry Response: {response[0]['generated_text']}"
        except Exception as e:
            return f"Error in Ethical Inquiry: {e}"

    else:
        try:
            # Learning support or general question response
            response = ethics_pipeline(query)
            return f"Learning Support Response: {response[0]['generated_text']}"
        except Exception as e:
            return f"Error in Learning Support: {e}"

# Gradio Interface
def chatbot(query):
    return handle_query(query)

# Deploy with Gradio
interface = gr.Interface(
    fn=chatbot,
    inputs="text",
    outputs="text",
    title="BioSphere AI Chatbot",
    description="A chatbot for DNA Analysis, Ethical Guidance, and Learning Support in Biotech.",
)

# Add Gemmini API Key Integration
def deploy_with_gemmini(api_key):
    print(f"Deploying using Gemmini API Key: {api_key}")
    interface.launch()

# Replace 'your_api_key' with your actual Gemmini API key
gemmini_api_key = "your_api_key"
deploy_with_gemmini(gemmini_api_key)