File size: 1,452 Bytes
e990e13 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 |
#!/usr/bin/env python3
import torch
import numpy as np
from huggingface_hub import HfApi
from diffusers import ShapEPipeline
from diffusers.utils import export_to_gif
api = HfApi()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
batch_size = 1
guidance_scale = 15.0
prompt = "a red table"
prompt = "A chair that looks like an avocado"
torch.manual_seed(0)
repo = "openai/shap-e"
pipe = ShapEPipeline.from_pretrained(repo)
pipe = pipe.to(device)
generator = torch.Generator(device="cuda").manual_seed(0)
prompts = [
"A chair that looks like an avocado",
"An airplane that looks like a banana",
"A spaceship",
"A birthday cupcake",
"A chair that looks like a tree",
"A green boot",
"A penguin",
"Ube ice cream cone",
"A bowl of vegetables",
]
for prompt in prompts:
images = pipe(
prompt,
num_images_per_prompt=batch_size,
generator=generator,
guidance_scale=guidance_scale,
num_inference_steps=64,
frame_size=256,
output_type='pil'
).images
path = f"/home/patrick/images/{'_'.join(prompt.split())}.gif"
export_to_gif(images[0], path)
api.upload_file(
path_or_fileobj=path,
path_in_repo=path.split("/")[-1],
repo_id="patrickvonplaten/images",
repo_type="dataset",
)
print(f"https://huggingface.co/datasets/patrickvonplaten/images/blob/main/{path.split('/')[-1]}")
|