Spaces:
Runtime error
Runtime error
File size: 2,068 Bytes
313b838 5b94e9b be6d20e 313b838 be6d20e 5b94e9b be6d20e 5b94e9b be6d20e 5b94e9b be6d20e 5b94e9b be6d20e 313b838 be6d20e 313b838 5b94e9b 313b838 be6d20e 313b838 be6d20e 313b838 5b94e9b 313b838 be6d20e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 |
import gradio as gr
from huggingface_hub import InferenceClient
# Initialize the client
client = InferenceClient(
model="davnas/Italian_Cousine_2.1",
headers={"Content-Type": "application/json"}
)
def respond(message, history, system_message, max_tokens, temperature, top_p):
# Format the prompt including history and system message
prompt = ""
# Add system message if provided
if system_message:
prompt += f"{system_message}\n"
# Add conversation history
for msg in history:
if isinstance(msg, list) and len(msg) == 2:
prompt += f"User: {msg[0]}\nAssistant: {msg[1]}\n"
# Add current message
prompt += f"User: {message}\nAssistant:"
# Prepare parameters for text generation
parameters = {
"max_new_tokens": max_tokens,
"temperature": temperature,
"top_p": top_p,
"return_full_text": False
}
response = ""
try:
# Use generate_text with proper parameters
for token in client.text_generation(
prompt,
stream=True,
**parameters
):
response += token
yield response
except Exception as e:
yield f"Error: {str(e)}"
# Create the interface
demo = gr.ChatInterface(
respond,
additional_inputs=[
gr.Textbox(
value="You are a helpful assistant knowledgeable about Italian cuisine.",
label="System message"
),
gr.Slider(
minimum=1,
maximum=2048,
value=512,
step=1,
label="Max new tokens"
),
gr.Slider(
minimum=0.1,
maximum=4.0,
value=0.7,
step=0.1,
label="Temperature"
),
gr.Slider(
minimum=0.1,
maximum=1.0,
value=0.95,
step=0.05,
label="Top-p (nucleus sampling)"
),
]
)
if __name__ == "__main__":
demo.launch(server_name="0.0.0.0", server_port=7860) |