MOHAMMED AMEEN
commited on
Commit
·
185ae59
1
Parent(s):
340a292
🚀 إضافة نموذج Gemma مع Claude Reasoning وواجهة Gradio
Browse files- app.py +19 -0
- requirements.txt +4 -0
app.py
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from huggingface_hub import login
|
3 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
4 |
+
import gradio as gr
|
5 |
+
|
6 |
+
# الدخول إلى حساب Hugging Face باستخدام التوكن المخزن في Secret
|
7 |
+
login(token=os.environ["HF_TOKEN"])
|
8 |
+
|
9 |
+
model_id = "reedmayhew/claude-3.7-sonnet-reasoning-gemma3-12B"
|
10 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id, use_auth_token=True)
|
11 |
+
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype="auto", use_auth_token=True)
|
12 |
+
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
13 |
+
|
14 |
+
def chat(prompt):
|
15 |
+
output = pipe(prompt, max_new_tokens=200)[0]["generated_text"]
|
16 |
+
return output
|
17 |
+
|
18 |
+
demo = gr.Interface(fn=chat, inputs="text", outputs="text", title="Gemma + Claude Reasoning Bot")
|
19 |
+
demo.launch()
|
requirements.txt
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
transformers
|
2 |
+
huggingface_hub
|
3 |
+
torch
|
4 |
+
accelerate
|