Spaces:
Runtime error
Runtime error
Create app.py
Browse files
app.py
CHANGED
@@ -1,64 +1,46 @@
|
|
1 |
import gradio as gr
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
""
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
temperature,
|
16 |
-
top_p,
|
17 |
-
):
|
18 |
-
messages = [{"role": "system", "content": system_message}]
|
19 |
-
|
20 |
-
for val in history:
|
21 |
-
if val[0]:
|
22 |
-
messages.append({"role": "user", "content": val[0]})
|
23 |
-
if val[1]:
|
24 |
-
messages.append({"role": "assistant", "content": val[1]})
|
25 |
-
|
26 |
-
messages.append({"role": "user", "content": message})
|
27 |
-
|
28 |
-
response = ""
|
29 |
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
token = message.choices[0].delta.content
|
38 |
|
39 |
-
|
40 |
-
|
41 |
|
|
|
|
|
|
|
|
|
42 |
|
43 |
-
|
44 |
-
|
45 |
-
""
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
gr.
|
50 |
-
|
51 |
-
|
52 |
-
gr.Slider(
|
53 |
-
minimum=0.1,
|
54 |
-
maximum=1.0,
|
55 |
-
value=0.95,
|
56 |
-
step=0.05,
|
57 |
-
label="Top-p (nucleus sampling)",
|
58 |
-
),
|
59 |
-
],
|
60 |
-
)
|
61 |
|
|
|
62 |
|
|
|
63 |
if __name__ == "__main__":
|
64 |
demo.launch()
|
|
|
1 |
import gradio as gr
|
2 |
+
import torch
|
3 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig, pipeline
|
4 |
+
|
5 |
+
# Model ID
|
6 |
+
model_id = "large-traversaal/Alif-1.0-8B-Instruct"
|
7 |
+
|
8 |
+
# 4-bit quantization configuration
|
9 |
+
quantization_config = BitsAndBytesConfig(
|
10 |
+
load_in_4bit=True,
|
11 |
+
bnb_4bit_compute_dtype=torch.float16,
|
12 |
+
bnb_4bit_use_double_quant=True,
|
13 |
+
bnb_4bit_quant_type="nf4"
|
14 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
|
16 |
+
# Load tokenizer and model in 4-bit
|
17 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
18 |
+
model = AutoModelForCausalLM.from_pretrained(
|
19 |
+
model_id,
|
20 |
+
quantization_config=quantization_config,
|
21 |
+
device_map="auto"
|
22 |
+
)
|
|
|
23 |
|
24 |
+
# Create text generation pipeline
|
25 |
+
chatbot = pipeline("text-generation", model=model, tokenizer=tokenizer, device_map="auto")
|
26 |
|
27 |
+
# Function to generate responses
|
28 |
+
def chat(message):
|
29 |
+
response = chatbot(message, max_new_tokens=100, do_sample=True, temperature=0.3)
|
30 |
+
return response[0]["generated_text"]
|
31 |
|
32 |
+
# Gradio UI
|
33 |
+
with gr.Blocks() as demo:
|
34 |
+
gr.Markdown("# π€ Alif Chatbot - Urdu Language AI Model")
|
35 |
+
with gr.Row():
|
36 |
+
user_input = gr.Textbox(label="User Input", placeholder="Ψ§ΩΎΩΨ§ Ψ³ΩΨ§Ω ΫΫΨ§ΪΊ ΩΪ©ΪΎΫΪΊ...")
|
37 |
+
with gr.Row():
|
38 |
+
submit_btn = gr.Button("Send")
|
39 |
+
with gr.Row():
|
40 |
+
bot_response = gr.Textbox(label="AI Response")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
41 |
|
42 |
+
submit_btn.click(fn=chat, inputs=user_input, outputs=bot_response)
|
43 |
|
44 |
+
# Launch the app
|
45 |
if __name__ == "__main__":
|
46 |
demo.launch()
|