Spaces:
Sleeping
Sleeping
import gradio as gr | |
model=gr.load("models/Qwen/Qwen2-7B").launch() | |
# Load the model only once at startup | |
def predict(input_data): | |
return model(input_data) | |
# Inference pipeline | |
generator = pipeline("text-generation", model=model, tokenizer=tokenizer, device=0 if torch.cuda.is_available() else -1) | |
# Chat function | |
def chat_with_model(prompt, max_tokens=100): | |
responses = generator(prompt, max_length=max_tokens, do_sample=True, temperature=0.7, top_k=50) | |
return responses[0]["generated_text"] | |
# Gradio Interface | |
with gr.Blocks() as chat_interface: | |
gr.Markdown("# π Super Fast ChatGPT") | |
with gr.Row(): | |
with gr.Column(): | |
user_input = gr.Textbox(label="Enter your message", placeholder="Type something...") | |
max_tokens = gr.Slider(50, 300, value=100, step=10, label="Max Tokens") | |
send_button = gr.Button("Send") | |
with gr.Column(): | |
chat_output = gr.Textbox(label="ChatGPT's Response", placeholder="Response will appear here...", interactive=False) | |
send_button.click(fn=chat_with_model, inputs=[user_input, max_tokens], outputs=chat_output) | |
from transformers import BitsAndBytesConfig | |
quant_config = BitsAndBytesConfig(load_in_4bit=True) | |
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, quantization_config=quant_config) | |
# Launch the app | |
chat_interface.launch(share=False, server_name="0.0.0.0", server_port=7860) | |