from huggingface_hub import InferenceClient client = InferenceClient( provider="fal-ai", api_key="", ) # output is a PIL.Image object image = client.text_to_image( "Astronaut riding a horse", model="multimodalart/isometric-skeumorphic-3d-bnb", ) from gradio_client import Client, handle_file client = Client("Lightricks/ltx-video-distilled") result = client.predict( prompt="The creature from the image starts to move", negative_prompt="worst quality, inconsistent motion, blurry, jittery, distorted", input_image_filepath=handle_file('https://raw.githubusercontent.com/gradio-app/gradio/main/test/test_files/bus.png'), input_video_filepath="Hello!!", height_ui=512, width_ui=704, mode="image-to-video", duration_ui=2, ui_frames_to_use=9, seed_ui=42, randomize_seed=True, ui_guidance_scale=1, improve_texture_flag=True, api_name="/image_to_video" ) print(result)