#import gradio as gr #gr.load("models/Qwen/Qwen2.5-Coder-32B-Instruct").launch() from huggingface_hub import InferenceClient client = InferenceClient(api_key="hf_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx") messages = [ { "role": "user", "content": "What is the capital of France?" } ] completion = client.chat.completions.create( model="Qwen/Qwen2.5-Coder-32B-Instruct", messages=messages, max_tokens=2048 ) print(completion.choices[0].message)