Safwanahmad619 commited on
Commit
7a2a71d
·
verified ·
1 Parent(s): c7a1d37

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +74 -0
app.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import pipeline, AutoTokenizer, AutoModelForMaskedLM, AutoModelForSeq2SeqLM
3
+
4
+ # Load DNA Analysis Model
5
+ dna_tokenizer = AutoTokenizer.from_pretrained("facebook/esm2_t6_8M_UR50D")
6
+ dna_model = AutoModelForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D")
7
+ dna_pipeline = pipeline("fill-mask", model=dna_model, tokenizer=dna_tokenizer)
8
+
9
+ # Load Ethical Inquiry and Learning Support Model
10
+ ethics_tokenizer = AutoTokenizer.from_pretrained("google/flan-t5-base")
11
+ ethics_model = AutoModelForSeq2SeqLM.from_pretrained("google/flan-t5-base")
12
+ ethics_pipeline = pipeline("text2text-generation", model=ethics_model, tokenizer=ethics_tokenizer)
13
+
14
+ # Query Classification
15
+ def classify_query(query):
16
+ """Classify the query into DNA Analysis, Ethical Inquiry, or Learning Support."""
17
+ if "DNA" in query or "sequence" in query:
18
+ return "dna_analysis"
19
+ elif "ethics" in query or "privacy" in query:
20
+ return "ethical_inquiry"
21
+ else:
22
+ return "learning_support"
23
+
24
+ # Process Query
25
+ def handle_query(query):
26
+ """Route the query to the appropriate model and generate a response."""
27
+ task = classify_query(query)
28
+
29
+ if task == "dna_analysis":
30
+ try:
31
+ # Example DNA sequence processing
32
+ masked_sequence = query.replace("X", "[MASK]")
33
+ output = dna_pipeline(masked_sequence)
34
+ return f"DNA Analysis Result: {output}"
35
+ except Exception as e:
36
+ return f"Error in DNA Analysis: {e}"
37
+
38
+ elif task == "ethical_inquiry":
39
+ try:
40
+ # Ethical guidance response
41
+ response = ethics_pipeline(query)
42
+ return f"Ethical Inquiry Response: {response[0]['generated_text']}"
43
+ except Exception as e:
44
+ return f"Error in Ethical Inquiry: {e}"
45
+
46
+ else:
47
+ try:
48
+ # Learning support or general question response
49
+ response = ethics_pipeline(query)
50
+ return f"Learning Support Response: {response[0]['generated_text']}"
51
+ except Exception as e:
52
+ return f"Error in Learning Support: {e}"
53
+
54
+ # Gradio Interface
55
+ def chatbot(query):
56
+ return handle_query(query)
57
+
58
+ # Deploy with Gradio
59
+ interface = gr.Interface(
60
+ fn=chatbot,
61
+ inputs="text",
62
+ outputs="text",
63
+ title="BioSphere AI Chatbot",
64
+ description="A chatbot for DNA Analysis, Ethical Guidance, and Learning Support in Biotech.",
65
+ )
66
+
67
+ # Add Gemmini API Key Integration
68
+ def deploy_with_gemmini(api_key):
69
+ print(f"Deploying using Gemmini API Key: {api_key}")
70
+ interface.launch()
71
+
72
+ # Replace 'your_api_key' with your actual Gemmini API key
73
+ gemmini_api_key = "your_api_key"
74
+ deploy_with_gemmini(gemmini_api_key)