Shankarm08 commited on
Commit
7728085
·
verified ·
1 Parent(s): 143adc4

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -0
app.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
3
+
4
+ # Load model and tokenizer
5
+ model_id = "Qwen/Qwen3-0.6B"
6
+ tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
7
+ model = AutoModelForCausalLM.from_pretrained(model_id, trust_remote_code=True)
8
+
9
+ # Create pipeline
10
+ generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
11
+
12
+ # Chat function
13
+ def chat(prompt):
14
+ output = generator(prompt, max_new_tokens=100, do_sample=True, temperature=0.7)
15
+ return output[0]["generated_text"]
16
+
17
+ # Gradio UI
18
+ gr.Interface(
19
+ fn=chat,
20
+ inputs=gr.Textbox(lines=3, placeholder="Enter your prompt here..."),
21
+ outputs="text",
22
+ title="Qwen3-0.6B Chatbot",
23
+ description="A simple demo using Qwen3-0.6B from Hugging Face"
24
+ ).launch()