File size: 1,793 Bytes
e4eec4d 29fe1f0 a3308e0 e4eec4d a3308e0 29fe1f0 4c6b264 e4eec4d 52e4b68 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 |
import gradio as gr
from transformers import AutoModelWithLMHead, AutoTokenizer, pipeline
# Load your model and tokenizer from hub
#model = AutoModelWithLMHead.from_pretrained('schakkir/demogpt2')
#tokenizer = AutoTokenizer.from_pretrained('schakkir/demogpt2')
prasad_model_path = "PrasadJali/demogpt2"
model = AutoModelWithLMHead.from_pretrained(prasad_model_path)
tokenizer = AutoTokenizer.from_pretrained(prasad_model_path)
# Function for response generation
def generate_query_response(prompt, max_length=200):
"""
Generates a response to a given prompt using the fine-tuned GPT-2 model.
Args:
prompt (str): The input prompt text.
max_length (int, optional): The maximum length of the generated response. Defaults to 200.
Returns:
str: The generated response text.
"""
# Create a text generation pipeline
generator = pipeline('text-generation', model=model, tokenizer=tokenizer)
# Generate response using the pipeline
response = generator(prompt, max_length=max_length, num_return_sequences=1)
# Extract and return the generated text
return response[0]['generated_text']
# Input and output gradio elements
in_prompt = gr.Textbox(lines=2, placeholder="Enter your medical query here...", label="Medical Query")
in_max_length = gr.Slider(minimum=50, maximum=500, value=200, step=10, label="Max Response Length")
out_response = gr.Textbox(label="Generated Response")
# Create a gradio interface object
iface = gr.Interface(
fn=generate_query_response,
inputs=[in_prompt, in_max_length],
outputs=out_response,
title="Medical Q&A with GPT-2",
description="Ask medical questions and get answers from a fine-tuned GPT-2 model.",
)
# Launch the interface to generate UI
iface.launch(share=True)
|