Update app.py
Browse files
app.py
CHANGED
@@ -1,76 +1,25 @@
|
|
1 |
-
|
2 |
-
from
|
3 |
-
from transformers import AutoModelForCausalLM, AutoTokenizer
|
4 |
-
import torch
|
5 |
-
import gradio as gr
|
6 |
|
7 |
app = Flask(__name__)
|
8 |
|
9 |
-
# تحميل
|
10 |
-
|
11 |
-
tokenizer = None
|
12 |
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
print("جاري تحميل النموذج...")
|
17 |
-
tokenizer = AutoTokenizer.from_pretrained("amd/AMD-OLMo-1B")
|
18 |
-
model = AutoModelForCausalLM.from_pretrained(
|
19 |
-
"amd/AMD-OLMo-1B",
|
20 |
-
torch_dtype=torch.float16,
|
21 |
-
device_map="auto"
|
22 |
-
)
|
23 |
-
print("تم تحميل النموذج بنجاح!")
|
24 |
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
if model is None:
|
30 |
-
load_model()
|
31 |
-
|
32 |
-
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
|
33 |
-
with torch.no_grad():
|
34 |
-
outputs = model.generate(
|
35 |
-
**inputs,
|
36 |
-
max_length=200,
|
37 |
-
num_return_sequences=1,
|
38 |
-
temperature=0.7,
|
39 |
-
top_p=0.9,
|
40 |
-
repetition_penalty=1.2,
|
41 |
-
pad_token_id=tokenizer.eos_token_id
|
42 |
-
)
|
43 |
-
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
44 |
-
return response.replace(prompt, "").strip()
|
45 |
-
except Exception as e:
|
46 |
-
print(f"خطأ في توليد الرد: {str(e)}")
|
47 |
-
return "عذراً، حدث خطأ في معالجة رسالتك."
|
48 |
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
|
53 |
-
|
54 |
-
def chat():
|
55 |
-
try:
|
56 |
-
data = request.json
|
57 |
-
if not data:
|
58 |
-
return jsonify({"response": "لم يتم استلام أي بيانات"}), 400
|
59 |
-
|
60 |
-
user_message = data.get('message', '')
|
61 |
-
if not user_message:
|
62 |
-
return jsonify({"response": "الرسالة فارغة"}), 400
|
63 |
-
|
64 |
-
print(f"رسالة مستلمة: {user_message}")
|
65 |
-
response = generate_response(user_message)
|
66 |
-
print(f"الرد: {response}")
|
67 |
-
|
68 |
-
return jsonify({"response": response})
|
69 |
-
|
70 |
-
except Exception as e:
|
71 |
-
print(f"خطأ في معالجة الرسالة: {str(e)}")
|
72 |
-
return jsonify({"response": "عذراً، حدث خطأ في معالجة رسالتك"}), 500
|
73 |
|
74 |
if __name__ == "__main__":
|
75 |
-
|
76 |
-
app.run()
|
|
|
1 |
+
from flask import Flask, request, jsonify, render_template
|
2 |
+
from transformers import pipeline
|
|
|
|
|
|
|
3 |
|
4 |
app = Flask(__name__)
|
5 |
|
6 |
+
# تحميل نموذج توليد النصوص
|
7 |
+
generator = pipeline("text-generation", model="gpt2") # يمكنك تعديل اسم النموذج حسب احتياجك
|
|
|
8 |
|
9 |
+
@app.route("/")
|
10 |
+
def index():
|
11 |
+
return render_template("index.html")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
|
13 |
+
@app.route("/generate-response", methods=["POST"])
|
14 |
+
def generate_response():
|
15 |
+
data = request.get_json()
|
16 |
+
user_input = data.get("text", "")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
|
18 |
+
# توليد استجابة من النموذج
|
19 |
+
result = generator(user_input, max_length=100, num_return_sequences=1)
|
20 |
+
response = result[0]["generated_text"]
|
21 |
|
22 |
+
return jsonify({"response": response})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
|
24 |
if __name__ == "__main__":
|
25 |
+
app.run(host="0.0.0.0", port=7860)
|
|