3DGenTripoSR / app.py
IAMTFRMZA's picture
Update app.py
8eea10a verified
raw
history blame
5.43 kB
import logging
import os
import tempfile
import time
import gradio as gr
import numpy as np
import rembg
import torch
from PIL import Image
from functools import partial
from serpapi import GoogleSearch
import requests
from io import BytesIO
import matplotlib.pyplot as plt
from tsr.system import TSR
from tsr.utils import remove_background, resize_foreground, to_gradio_3d_orientation
# Set your SerpApi key here
SERPAPI_KEY = "YOUR_SERPAPI_KEY"
HEADER = """
**TripoSR** is a state-of-the-art open-source model for **fast** feedforward 3D reconstruction from a single image, developed in collaboration between [Tripo AI](https://www.tripo3d.ai/) and [Stability AI](https://stability.ai/).
**Tips:**
1. If you find the result is unsatisfied, please try to change the foreground ratio. It might improve the results.
2. Please disable "Remove Background" option only if your input image is RGBA with transparent background, image contents are centered and occupy more than 70% of image width or height.
"""
def get_motorcycle_image(make, model):
params = {
"api_key": SERPAPI_KEY,
"engine": "google",
"q": f"{make} {model} motorcycle product photo",
"tbm": "isch"
}
search = GoogleSearch(params)
results = search.get_dict()
if "images_results" in results:
first_image = results["images_results"][0]
image_url = first_image.get("original")
if image_url:
image_response = requests.get(image_url)
image = Image.open(BytesIO(image_response.content))
return image
else:
print("Image URL not found in results.")
return None
else:
print("No image results found.")
return None
def preprocess(input_image, do_remove_background, foreground_ratio):
def fill_background(image):
image = np.array(image).astype(np.float32) / 255.0
image = image[:, :, :3] * image[:, :, 3:4] + (1 - image[:, :, 3:4]) * 0.5
image = Image.fromarray((image * 255.0).astype(np.uint8))
return image
if do_remove_background:
image = input_image.convert("RGB")
image = remove_background(image, rembg_session)
image = resize_foreground(image, foreground_ratio)
image = fill_background(image)
else:
image = input_image
if image.mode == "RGBA":
image = fill_background(image)
return image
def generate(image):
scene_codes = model(image, device=device)
mesh = model.extract_mesh(scene_codes)[0]
mesh = to_gradio_3d_orientation(mesh)
mesh_path = tempfile.NamedTemporaryFile(suffix=".obj", delete=False)
mesh_path2 = tempfile.NamedTemporaryFile(suffix=".glb", delete=False)
mesh.export(mesh_path.name)
mesh.export(mesh_path2.name)
return mesh_path.name, mesh_path2.name
def run_example(make, model):
image = get_motorcycle_image(make, model)
if image:
# Save the image
input_image_path = '/content/motorcycle.jpg'
image.save(input_image_path)
# Load the image
img = Image.open(input_image_path)
output_image_path = '/content/motorcyclebg.png'
img_no_bg = rembg_remove(img)
img_no_bg.save(output_image_path)
# Preprocess and generate 3D model
preprocessed = preprocess(img_no_bg, False, 0.9)
mesh_name, mesh_name2 = generate(preprocessed)
return preprocessed, mesh_name, mesh_name2
else:
raise gr.Error("Image could not be fetched.")
if torch.cuda.is_available():
device = "cuda:0"
else:
device = "cpu"
d = os.environ.get("DEVICE", None)
if d != None:
device = d
model = TSR.from_pretrained(
"stabilityai/TripoSR",
config_name="config.yaml",
weight_name="model.ckpt",
)
model.renderer.set_chunk_size(131072)
model.to(device)
rembg_session = rembg.new_session()
with gr.Blocks() as demo:
gr.Markdown(HEADER)
with gr.Row(variant="panel"):
with gr.Column():
with gr.Row():
make_input = gr.Textbox(label="Motorcycle Make", placeholder="Enter motorcycle make")
model_input = gr.Textbox(label="Motorcycle Model", placeholder="Enter motorcycle model")
processed_image = gr.Image(label="Processed Image", interactive=False)
with gr.Row():
with gr.Group():
do_remove_background = gr.Checkbox(
label="Remove Background", value=True
)
foreground_ratio = gr.Slider(
label="Foreground Ratio",
minimum=0.5,
maximum=1.0,
value=0.85,
step=0.05,
)
with gr.Row():
submit = gr.Button("Generate", elem_id="generate", variant="primary")
with gr.Column():
with gr.Tab("obj"):
output_model = gr.Model3D(
label="Output Model",
interactive=False,
)
with gr.Tab("glb"):
output_model2 = gr.Model3D(
label="Output Model",
interactive=False,
)
submit.click(fn=run_example, inputs=[make_input, model_input], outputs=[processed_image, output_model, output_model2])
demo.queue(max_size=10)
demo.launch()