File size: 1,248 Bytes
ab86870 3680a42 ab86870 bbfcb0d 3680a42 2a4edc1 3680a42 2a4edc1 ab86870 7fb7b85 13cba81 e1edd9d 13cba81 e1edd9d 01ca18e fc4cc2c 13cba81 892e1e5 1694eaa 892e1e5 01ca18e ab86870 7fb7b85 0eed6d3 8937d91 0eed6d3 ab86870 01ca18e ab86870 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 |
import gradio as gr
from transformers import pipeline
import torch
# Check if a GPU is available
device = 0 if torch.cuda.is_available() else -1
# Load the text-generation pipeline with the appropriate device
model = pipeline(
"text-generation",
model="rish13/polymers",
device=device # Automatically use GPU if available, otherwise CPU
)
def generate_response(prompt):
# Generate text from the model
response = model(
prompt,
max_length=70, # Adjusted to generate shorter text
num_return_sequences=1,
temperature=0.6, # Increased to add more randomness
top_k=100, # Increased to allow a wider selection of words
top_p=0.95 # Slightly increased cumulative probability threshold
)
# Get the generated text from the response
generated_text = response[0]['generated_text']
return generated_text
# Define the Gradio interface
interface = gr.Interface(
fn=generate_response,
inputs=gr.Textbox(lines=2, placeholder="Enter your prompt here...", label="Prompt"),
outputs="text",
title="Polymer Knowledge Model",
description="A model fine-tuned for generating text related to polymers."
)
# Launch the interface
interface.launch()
|