Spaces:
Sleeping
Sleeping
File size: 1,166 Bytes
a3a7e9a 2a04c3f a3a7e9a 2a04c3f a3a7e9a 2a04c3f a3a7e9a 2a04c3f a3a7e9a 2a04c3f a3a7e9a 2a04c3f a3a7e9a 2a04c3f a3a7e9a 2a04c3f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 |
import gradio as gr
import torch
from transformers import GPT2Tokenizer, GPT2LMHeadModel
import matplotlib.pyplot as plt
import numpy as np
# Load pre-trained GPT-2 model and tokenizer
model = GPT2LMHeadModel.from_pretrained("gpt2")
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
# Define a function to generate color based on text prompt
def generate_color(prompt):
input_ids = tokenizer.encode(prompt, return_tensors='pt')
output = model.generate(input_ids, max_length=50, num_return_sequences=1, no_repeat_ngram_size=2)
color_name = tokenizer.decode(output[0], skip_special_tokens=True)
# Create an image with the generated color
color = [int(ord(char) * 255 / 122) for char in color_name[:3]]
img = np.full((100, 100, 3), color, dtype=np.uint8)
return img
# Create Gradio interface
inputs = gr.Textbox(lines=2, label="Enter a text prompt (e.g., 'a color that represents happiness'):")
output = gr.Image(type="numpy", label="Generated color:")
gr.Interface(fn=generate_color, inputs=inputs, outputs=output, title="AI Color Generator", description="Generate a color based on a text prompt using GPT-2 model.").launch()
|