Medico / app.py
schakkir's picture
Update app.py
52e4b68 verified
import gradio as gr
from transformers import AutoModelWithLMHead, AutoTokenizer, pipeline
# Load your model and tokenizer from hub
#model = AutoModelWithLMHead.from_pretrained('schakkir/demogpt2')
#tokenizer = AutoTokenizer.from_pretrained('schakkir/demogpt2')
prasad_model_path = "PrasadJali/demogpt2"
model = AutoModelWithLMHead.from_pretrained(prasad_model_path)
tokenizer = AutoTokenizer.from_pretrained(prasad_model_path)
# Function for response generation
def generate_query_response(prompt, max_length=200):
"""
Generates a response to a given prompt using the fine-tuned GPT-2 model.
Args:
prompt (str): The input prompt text.
max_length (int, optional): The maximum length of the generated response. Defaults to 200.
Returns:
str: The generated response text.
"""
# Create a text generation pipeline
generator = pipeline('text-generation', model=model, tokenizer=tokenizer)
# Generate response using the pipeline
response = generator(prompt, max_length=max_length, num_return_sequences=1)
# Extract and return the generated text
return response[0]['generated_text']
# Input and output gradio elements
in_prompt = gr.Textbox(lines=2, placeholder="Enter your medical query here...", label="Medical Query")
in_max_length = gr.Slider(minimum=50, maximum=500, value=200, step=10, label="Max Response Length")
out_response = gr.Textbox(label="Generated Response")
# Create a gradio interface object
iface = gr.Interface(
fn=generate_query_response,
inputs=[in_prompt, in_max_length],
outputs=out_response,
title="Medical Q&A with GPT-2",
description="Ask medical questions and get answers from a fine-tuned GPT-2 model.",
)
# Launch the interface to generate UI
iface.launch(share=True)