Esther-Gail Fraser commited on
Commit
a9d0d7b
·
1 Parent(s): b08814e

Add application file

Browse files
Files changed (1) hide show
  1. app.py +44 -0
app.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """NLP - GRADIO.ipynb
3
+
4
+ Automatically generated by Colab.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/drive/1mrV6DhKMqFwtVE1E89DygsKexajnb60N
8
+ """
9
+
10
+ import gradio as gr
11
+ import torch
12
+ from transformers import pipeline
13
+
14
+ def generate_chat_response(query):
15
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
16
+ pipe = pipeline("text-generation", model="TinyLlama/TinyLlama-1.1B-Chat-v1.0", torch_dtype=torch.bfloat16, device=device)
17
+
18
+ # Define system message to indicate context
19
+ system_message = "You are a medical chatbot who is answering questions about cancer. Please be considerate."
20
+
21
+ # Convert query to Gradio input format
22
+ gradio_input = [{"role": "system", "content": system_message}, {"role": "user", "content": query}]
23
+
24
+ # Apply chat template
25
+ prompt = pipe.tokenizer.apply_chat_template(gradio_input, add_special_tokens=False, tokenize=False, add_prefix_space=True, add_generation_prompt=True)
26
+
27
+ # Generate response
28
+ outputs = pipe(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
29
+
30
+ # Extract generated text
31
+ generated_responses = [output["generated_text"] for output in outputs]
32
+ return generated_responses
33
+
34
+ # Define Gradio interface
35
+ demo = gr.Interface(
36
+ fn=generate_chat_response,
37
+ inputs="text",
38
+ outputs="text",
39
+ title="Medical Chatbot",
40
+ description="Enter your question about cancer to interact with the medical chatbot."
41
+ )
42
+
43
+ # Launch Gradio interface
44
+ demo.launch()