|
import gradio as gr |
|
from transformers import pipeline |
|
import torch |
|
|
|
|
|
device = 0 if torch.cuda.is_available() else -1 |
|
|
|
|
|
model = pipeline( |
|
"text-generation", |
|
model="rish13/polymers", |
|
device=device |
|
) |
|
|
|
def generate_response(prompt): |
|
|
|
response = model( |
|
prompt, |
|
max_length=50, |
|
num_return_sequences=1, |
|
temperature=0.5, |
|
top_k=50, |
|
top_p=0.9 |
|
) |
|
|
|
|
|
generated_text = response[0]['generated_text'] |
|
|
|
return generated_text |
|
|
|
|
|
interface = gr.Interface( |
|
fn=generate_response, |
|
inputs=gr.Textbox( |
|
lines=2, |
|
placeholder="Enter your prompt here...", |
|
label="Prompt", |
|
elem_id="input-textbox" |
|
), |
|
outputs=gr.Textbox( |
|
label="Generated Text", |
|
elem_id="output-textbox" |
|
), |
|
title="Polymer Knowledge Model", |
|
description=( |
|
"This application uses a fine-tuned model to generate text related to polymers. " |
|
"Enter a prompt to get started, and the model will generate relevant text." |
|
), |
|
theme="huggingface", |
|
layout="horizontal", |
|
live=True |
|
) |
|
|
|
|
|
interface.launch() |
|
|
|
|