aimlops / app.py
guptavishal79's picture
Load model
76a7205 verified
raw
history blame
965 Bytes
import gradio
# Load model
hub_path = 'guptavishal79/aimlops'
loaded_model = GPT2LMHeadModel.from_pretrained(hub_path)
loaded_tokenizer = GPT2Tokenizer.from_pretrained(hub_path)
# Function for response generation
def generate_query_response(prompt, max_length=200):
model = loaded_model
tokenizer = loaded_tokenizer
prompt = f"<question>{prompt}<answer>"
response = generate_response(model, tokenizer, prompt, max_length)
return response
# Gradio elements
# Input from user
in_prompt = gradio.Textbox(lines=2, placeholder=None, value="", label='Enter Medical Question')
in_max_length = gradio.Number(value=200, label='Answer Length')
# Output response
out_response = gradio.Textbox(type="text", label='Answer')
# Gradio interface to generate UI link
iface = gradio.Interface(fn = generate_query_response,
inputs = [in_prompt, in_max_length],
outputs = [out_response])
iface.launch(share = True)