Spaces:
Sleeping
Sleeping
File size: 1,697 Bytes
193b0c9 675fbf9 193b0c9 6c761bc 8e9f7e9 a378f63 193b0c9 f06cd88 193b0c9 9bca116 193b0c9 8fced6f 193b0c9 c07c1e4 193b0c9 ffe00c4 193b0c9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 |
# -*- coding: utf-8 -*-
"""gradio_deploy.ipynb
Automatically generated by Colaboratory.
"""
import os
import gradio
from PIL import Image
from timeit import default_timer as timer
from tensorflow import keras
import torch
from transformers import GPT2Tokenizer, GPT2LMHeadModel
from transformers import AutoModelWithLMHead, AutoTokenizer
import numpy as np
loaded_model = AutoModelWithLMHead.from_pretrained("runaksh/medquad-finetuned-gpt2")
loaded_tokenizer = AutoTokenizer.from_pretrained("runaksh/medquad-finetuned-gpt2")
def generate_query_response(prompt, max_length=200):
model = loaded_model
tokenizer = loaded_tokenizer
input_ids = tokenizer.encode(prompt, return_tensors="pt")
attention_mask = torch.ones_like(input_ids)
pad_token_id = tokenizer.eos_token_id
output = model.generate(
input_ids,
max_length=max_length,
num_return_sequences=1,
attention_mask=attention_mask,
pad_token_id=pad_token_id
)
return tokenizer.decode(output[0])
title = "Question and Answer"
description = "Medical Question and Answer"
# Gradio elements
# Input from user
in_prompt = gradio.inputs.Textbox(lines=2, label='Enter the question')
in_max_length = gradio.inputs.Number(label='Enter the max length')
# Output response
out_response = gradio.outputs.Textbox(label='Answer')
# Gradio interface to generate UI link
iface = gradio.Interface(fn=generate_query_response,
inputs = [in_prompt,in_max_length],
outputs = out_response,
title=title,
description=description
)
iface.launch(debug = True)
|