File size: 1,352 Bytes
dffd394
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
import gradio as gr
from huggingface_hub import InferenceClient

# Initialize your REAL AI model (with Hugging Face API token)
def get_client():
    api_token = "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"
    return InferenceClient(token=api_token)

model_name = "HuggingFaceH4/zephyr-7b-beta"

# Function to generate AI-driven response
def generate_response(prompt, max_length, temperature, repetition_penalty):
    client = get_client()
    response = client.text_generation(
        prompt,
        model=model_name,
        max_new_tokens=max_length,
        temperature=temperature,
        repetition_penalty=repetition_penalty,
        do_sample=True,
    )
    return response

# Enhanced UI with Gradio
demo = gr.Interface(
    fn=generate_response,
    inputs=[
        gr.Textbox(lines=4, label="Enter your prompt", placeholder="Provide clear instructions or questions to avoid repetitive outputs."),
        gr.Slider(50, 500, value=200, step=50, label="Response Length"),
        gr.Slider(0.1, 1.0, value=0.7, step=0.1, label="Creativity (temperature)"),
        gr.Slider(1.0, 2.0, value=1.1, step=0.1, label="Repetition Penalty")
    ],
    outputs=gr.Textbox(label="AI Response"),
    title="AI Assistant",
    description="Provide your prompt below and get a dynamic, well-structured AI-generated response."
)

# Launch app
demo.launch()