Spaces:
Sleeping
Sleeping
Upload 14 files
Browse files- Dockerfile +5 -4
- backend/__init__.py +0 -0
- backend/__pycache__/__init__.cpython-313.pyc +0 -0
- backend/__pycache__/main.cpython-313.pyc +0 -0
- backend/__pycache__/models.cpython-313.pyc +0 -0
- backend/__pycache__/utils.cpython-313.pyc +0 -0
- backend/main.py +56 -18
- backend/models.py +46 -0
- backend/utils.py +15 -0
- frontend/index.html +25 -1
- frontend/script.js +64 -5
- frontend/style.css +26 -0
Dockerfile
CHANGED
@@ -1,16 +1,17 @@
|
|
1 |
-
# استخدام صورة
|
2 |
FROM python:3.8
|
3 |
|
4 |
# تعيين مجلد العمل
|
5 |
WORKDIR /app
|
6 |
|
7 |
-
# نسخ
|
8 |
COPY backend/ backend/
|
9 |
COPY frontend/ frontend/
|
10 |
COPY requirements.txt requirements.txt
|
|
|
11 |
|
12 |
-
# تثبيت المكتبات
|
13 |
RUN pip install --no-cache-dir -r requirements.txt
|
14 |
|
15 |
-
# تشغيل التطبيق
|
16 |
CMD ["uvicorn", "backend.main:app", "--host", "0.0.0.0", "--port", "7860"]
|
|
|
1 |
+
# استخدام صورة Python
|
2 |
FROM python:3.8
|
3 |
|
4 |
# تعيين مجلد العمل
|
5 |
WORKDIR /app
|
6 |
|
7 |
+
# نسخ ملفات المشروع إلى الحاوية
|
8 |
COPY backend/ backend/
|
9 |
COPY frontend/ frontend/
|
10 |
COPY requirements.txt requirements.txt
|
11 |
+
COPY .env .env
|
12 |
|
13 |
+
# تثبيت المكتبات المطلوبة
|
14 |
RUN pip install --no-cache-dir -r requirements.txt
|
15 |
|
16 |
+
# تشغيل التطبيق عند تشغيل الحاوية
|
17 |
CMD ["uvicorn", "backend.main:app", "--host", "0.0.0.0", "--port", "7860"]
|
backend/__init__.py
ADDED
File without changes
|
backend/__pycache__/__init__.cpython-313.pyc
ADDED
Binary file (139 Bytes). View file
|
|
backend/__pycache__/main.cpython-313.pyc
CHANGED
Binary files a/backend/__pycache__/main.cpython-313.pyc and b/backend/__pycache__/main.cpython-313.pyc differ
|
|
backend/__pycache__/models.cpython-313.pyc
ADDED
Binary file (2.18 kB). View file
|
|
backend/__pycache__/utils.cpython-313.pyc
ADDED
Binary file (778 Bytes). View file
|
|
backend/main.py
CHANGED
@@ -1,31 +1,69 @@
|
|
1 |
-
from fastapi import FastAPI, UploadFile, File
|
2 |
from pydantic import BaseModel
|
3 |
-
import
|
4 |
-
import tika
|
5 |
-
from tika import parser
|
6 |
import os
|
7 |
|
8 |
-
|
9 |
-
|
10 |
|
11 |
-
# إنشاء تطبيق FastAPI
|
12 |
app = FastAPI()
|
13 |
|
14 |
-
|
15 |
-
|
16 |
-
|
|
|
|
|
|
|
17 |
|
18 |
@app.get("/")
|
19 |
async def home():
|
20 |
return {"message": "AI Web App is Running!"}
|
21 |
|
22 |
-
#
|
23 |
@app.post("/extract_text/")
|
24 |
async def extract_text(file: UploadFile = File(...)):
|
25 |
-
|
26 |
-
with open(
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
os.remove(
|
31 |
-
return {"text":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import FastAPI, UploadFile, File, Form
|
2 |
from pydantic import BaseModel
|
3 |
+
import shutil
|
|
|
|
|
4 |
import os
|
5 |
|
6 |
+
from backend.models import summarizer, image_captioning, qa_pipeline, translator, code_generator
|
7 |
+
from backend.utils import extract_text_from_pdf, extract_text_from_document
|
8 |
|
|
|
9 |
app = FastAPI()
|
10 |
|
11 |
+
UPLOAD_DIR = "uploads"
|
12 |
+
os.makedirs(UPLOAD_DIR, exist_ok=True)
|
13 |
+
|
14 |
+
class QARequest(BaseModel):
|
15 |
+
question: str
|
16 |
+
context: str
|
17 |
|
18 |
@app.get("/")
|
19 |
async def home():
|
20 |
return {"message": "AI Web App is Running!"}
|
21 |
|
22 |
+
# 1️⃣ استخراج النصوص من المستندات
|
23 |
@app.post("/extract_text/")
|
24 |
async def extract_text(file: UploadFile = File(...)):
|
25 |
+
file_path = f"{UPLOAD_DIR}/{file.filename}"
|
26 |
+
with open(file_path, "wb") as f:
|
27 |
+
shutil.copyfileobj(file.file, f)
|
28 |
+
|
29 |
+
text = extract_text_from_document(file_path)
|
30 |
+
os.remove(file_path)
|
31 |
+
return {"text": text}
|
32 |
+
|
33 |
+
# 2️⃣ تلخيص المستندات
|
34 |
+
@app.post("/summarize/")
|
35 |
+
async def summarize_text(text: str = Form(...)):
|
36 |
+
summary = summarizer(text)
|
37 |
+
return {"summary": summary[0]['summary_text']}
|
38 |
+
|
39 |
+
# 3️⃣ تحليل الصور وتوليد وصف لها
|
40 |
+
@app.post("/image_caption/")
|
41 |
+
async def caption_image(file: UploadFile = File(...)):
|
42 |
+
file_path = f"{UPLOAD_DIR}/{file.filename}"
|
43 |
+
with open(file_path, "wb") as f:
|
44 |
+
shutil.copyfileobj(file.file, f)
|
45 |
+
|
46 |
+
with open(file_path, "rb") as img:
|
47 |
+
image_bytes = img.read()
|
48 |
+
|
49 |
+
caption = image_captioning(image_bytes)
|
50 |
+
os.remove(file_path)
|
51 |
+
return {"caption": caption[0]['generated_text']}
|
52 |
+
|
53 |
+
# 4️⃣ الإجابة على الأسئلة من النصوص
|
54 |
+
@app.post("/qa/")
|
55 |
+
async def answer_question(request: QARequest):
|
56 |
+
answer = qa_pipeline(request.question, request.context)
|
57 |
+
return {"answer": answer["answer"]}
|
58 |
+
|
59 |
+
# 5️⃣ ترجمة المستندات
|
60 |
+
@app.post("/translate/")
|
61 |
+
async def translate_text(text: str = Form(...)):
|
62 |
+
translation = translator(text)
|
63 |
+
return {"translation": translation[0]['translation_text']}
|
64 |
+
|
65 |
+
# 6️⃣ توليد أكواد التصور من أوامر نصية
|
66 |
+
@app.post("/generate_code/")
|
67 |
+
async def generate_code(prompt: str = Form(...)):
|
68 |
+
code = code_generator(prompt)
|
69 |
+
return {"code": code}
|
backend/models.py
CHANGED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import requests
|
3 |
+
from dotenv import load_dotenv
|
4 |
+
|
5 |
+
# تحميل متغيرات البيئة
|
6 |
+
HUGGING_FACE_API_KEY = os.environ.get("HUGGING_FACE_API_KEY")
|
7 |
+
|
8 |
+
|
9 |
+
# دالة تلخيص النصوص
|
10 |
+
def summarizer(text):
|
11 |
+
url = "https://api-inference.huggingface.co/models/facebook/bart-large-cnn"
|
12 |
+
headers = {"Authorization": f"Bearer {HUGGING_FACE_API_KEY}"}
|
13 |
+
payload = {"inputs": text}
|
14 |
+
|
15 |
+
response = requests.post(url, headers=headers, json=payload)
|
16 |
+
return response.json()
|
17 |
+
|
18 |
+
# دالة توليد وصف الصور
|
19 |
+
def image_captioning(image_bytes):
|
20 |
+
url = "https://api-inference.huggingface.co/models/nlpconnect/vit-gpt2-image-captioning"
|
21 |
+
headers = {"Authorization": f"Bearer {HUGGING_FACE_API_KEY}"}
|
22 |
+
|
23 |
+
response = requests.post(url, headers=headers, files={"file": ("image.jpg", image_bytes, "image/jpeg")})
|
24 |
+
return response.json()
|
25 |
+
|
26 |
+
# دالة الإجابة على الأسئلة
|
27 |
+
def qa_pipeline(question, context):
|
28 |
+
url = "https://api-inference.huggingface.co/models/deepset/roberta-base-squad2"
|
29 |
+
headers = {"Authorization": f"Bearer {HUGGING_FACE_API_KEY}"}
|
30 |
+
payload = {"inputs": {"question": question, "context": context}}
|
31 |
+
|
32 |
+
response = requests.post(url, headers=headers, json=payload)
|
33 |
+
return response.json()
|
34 |
+
|
35 |
+
# دالة الترجمة
|
36 |
+
def translator(text):
|
37 |
+
url = "https://api-inference.huggingface.co/models/Helsinki-NLP/opus-mt-en-fr"
|
38 |
+
headers = {"Authorization": f"Bearer {HUGGING_FACE_API_KEY}"}
|
39 |
+
payload = {"inputs": text}
|
40 |
+
|
41 |
+
response = requests.post(url, headers=headers, json=payload)
|
42 |
+
return response.json()
|
43 |
+
|
44 |
+
# دالة توليد كود التصور البياني
|
45 |
+
def code_generator(prompt):
|
46 |
+
return f"import matplotlib.pyplot as plt\n# {prompt}"
|
backend/utils.py
CHANGED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import fitz # pymupdf لاستخراج النصوص من PDF
|
2 |
+
from tika import parser
|
3 |
+
|
4 |
+
# دالة لاستخراج النص من ملف PDF
|
5 |
+
def extract_text_from_pdf(file_path):
|
6 |
+
doc = fitz.open(file_path)
|
7 |
+
text = ""
|
8 |
+
for page in doc:
|
9 |
+
text += page.get_text()
|
10 |
+
return text.strip()
|
11 |
+
|
12 |
+
# دالة لاستخراج النصوص من أي مستند (PDF, DOCX, PPTX)
|
13 |
+
def extract_text_from_document(file_path):
|
14 |
+
parsed = parser.from_file(file_path)
|
15 |
+
return parsed["content"]
|
frontend/index.html
CHANGED
@@ -8,8 +8,32 @@
|
|
8 |
</head>
|
9 |
<body>
|
10 |
<h1>AI Web App</h1>
|
|
|
|
|
|
|
11 |
<input type="file" id="fileInput">
|
12 |
-
<button onclick="uploadFile()"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
<pre id="output"></pre>
|
14 |
|
15 |
<script src="script.js"></script>
|
|
|
8 |
</head>
|
9 |
<body>
|
10 |
<h1>AI Web App</h1>
|
11 |
+
|
12 |
+
<!-- ✅ رفع المستندات -->
|
13 |
+
<h3>🔹 رفع مستند</h3>
|
14 |
<input type="file" id="fileInput">
|
15 |
+
<button onclick="uploadFile('extract_text')">📄 استخراج النصوص</button>
|
16 |
+
<button onclick="uploadFile('image_caption')">🖼️ تحليل الصور</button>
|
17 |
+
|
18 |
+
<!-- ✅ إدخال نصوص -->
|
19 |
+
<h3>🔹 إدخال نص</h3>
|
20 |
+
<textarea id="textInput" rows="5" cols="50"></textarea><br>
|
21 |
+
<button onclick="processText('summarize')">📝 تلخيص</button>
|
22 |
+
<button onclick="processText('translate')">🌍 ترجمة</button>
|
23 |
+
|
24 |
+
<!-- ✅ إدخال سؤال + نص مرجعي -->
|
25 |
+
<h3>🔹 الإجابة عن الأسئلة</h3>
|
26 |
+
<textarea id="contextInput" rows="3" cols="50" placeholder="أدخل النص المرجعي"></textarea><br>
|
27 |
+
<input type="text" id="questionInput" placeholder="أدخل سؤالك">
|
28 |
+
<button onclick="askQuestion()">❓ إجابة على سؤال</button>
|
29 |
+
|
30 |
+
<!-- ✅ إدخال طلب لتوليد كود تصور بياني -->
|
31 |
+
<h3>🔹 توليد كود التصور البياني</h3>
|
32 |
+
<input type="text" id="codeInput" placeholder="أدخل وصفًا للرسم البياني">
|
33 |
+
<button onclick="generateCode()">📊 توليد كود</button>
|
34 |
+
|
35 |
+
<!-- ✅ منطقة عرض النتائج -->
|
36 |
+
<h3>🔹 النتيجة</h3>
|
37 |
<pre id="output"></pre>
|
38 |
|
39 |
<script src="script.js"></script>
|
frontend/script.js
CHANGED
@@ -1,20 +1,79 @@
|
|
1 |
-
async function uploadFile() {
|
2 |
const fileInput = document.getElementById("fileInput");
|
3 |
const file = fileInput.files[0];
|
4 |
-
|
5 |
if (!file) {
|
6 |
-
alert("
|
7 |
return;
|
8 |
}
|
9 |
|
10 |
const formData = new FormData();
|
11 |
formData.append("file", file);
|
12 |
|
13 |
-
const response = await fetch(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
method: "POST",
|
15 |
body: formData
|
16 |
});
|
17 |
|
18 |
const data = await response.json();
|
19 |
-
document.getElementById("output").innerText = data
|
20 |
}
|
|
|
1 |
+
async function uploadFile(endpoint) {
|
2 |
const fileInput = document.getElementById("fileInput");
|
3 |
const file = fileInput.files[0];
|
4 |
+
|
5 |
if (!file) {
|
6 |
+
alert("يرجى اختيار ملف أولاً.");
|
7 |
return;
|
8 |
}
|
9 |
|
10 |
const formData = new FormData();
|
11 |
formData.append("file", file);
|
12 |
|
13 |
+
const response = await fetch(`http://127.0.0.1:8000/${endpoint}/`, {
|
14 |
+
method: "POST",
|
15 |
+
body: formData
|
16 |
+
});
|
17 |
+
|
18 |
+
const data = await response.json();
|
19 |
+
document.getElementById("output").innerText = JSON.stringify(data, null, 2);
|
20 |
+
}
|
21 |
+
|
22 |
+
async function processText(endpoint) {
|
23 |
+
const text = document.getElementById("textInput").value;
|
24 |
+
|
25 |
+
if (!text) {
|
26 |
+
alert("يرجى إدخال نص أولاً.");
|
27 |
+
return;
|
28 |
+
}
|
29 |
+
|
30 |
+
const formData = new FormData();
|
31 |
+
formData.append("text", text);
|
32 |
+
|
33 |
+
const response = await fetch(`http://127.0.0.1:8000/${endpoint}/`, {
|
34 |
+
method: "POST",
|
35 |
+
body: formData
|
36 |
+
});
|
37 |
+
|
38 |
+
const data = await response.json();
|
39 |
+
document.getElementById("output").innerText = JSON.stringify(data, null, 2);
|
40 |
+
}
|
41 |
+
|
42 |
+
async function askQuestion() {
|
43 |
+
const context = document.getElementById("contextInput").value;
|
44 |
+
const question = document.getElementById("questionInput").value;
|
45 |
+
|
46 |
+
if (!context || !question) {
|
47 |
+
alert("يرجى إدخال كل من النص المرجعي والسؤال.");
|
48 |
+
return;
|
49 |
+
}
|
50 |
+
|
51 |
+
const response = await fetch("http://127.0.0.1:8000/qa/", {
|
52 |
+
method: "POST",
|
53 |
+
headers: { "Content-Type": "application/json" },
|
54 |
+
body: JSON.stringify({ context: context, question: question })
|
55 |
+
});
|
56 |
+
|
57 |
+
const data = await response.json();
|
58 |
+
document.getElementById("output").innerText = JSON.stringify(data, null, 2);
|
59 |
+
}
|
60 |
+
|
61 |
+
async function generateCode() {
|
62 |
+
const prompt = document.getElementById("codeInput").value;
|
63 |
+
|
64 |
+
if (!prompt) {
|
65 |
+
alert("يرجى إدخال وصف للرسم البياني.");
|
66 |
+
return;
|
67 |
+
}
|
68 |
+
|
69 |
+
const formData = new FormData();
|
70 |
+
formData.append("prompt", prompt);
|
71 |
+
|
72 |
+
const response = await fetch("http://127.0.0.1:8000/generate_code/", {
|
73 |
method: "POST",
|
74 |
body: formData
|
75 |
});
|
76 |
|
77 |
const data = await response.json();
|
78 |
+
document.getElementById("output").innerText = JSON.stringify(data, null, 2);
|
79 |
}
|
frontend/style.css
CHANGED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
body {
|
2 |
+
font-family: Arial, sans-serif;
|
3 |
+
text-align: center;
|
4 |
+
background-color: #f4f4f4;
|
5 |
+
margin: 20px;
|
6 |
+
}
|
7 |
+
|
8 |
+
h1 {
|
9 |
+
color: #333;
|
10 |
+
}
|
11 |
+
|
12 |
+
input, button {
|
13 |
+
padding: 10px;
|
14 |
+
margin: 10px;
|
15 |
+
font-size: 16px;
|
16 |
+
}
|
17 |
+
|
18 |
+
pre {
|
19 |
+
background: white;
|
20 |
+
padding: 10px;
|
21 |
+
border-radius: 5px;
|
22 |
+
text-align: left;
|
23 |
+
max-width: 600px;
|
24 |
+
margin: 20px auto;
|
25 |
+
box-shadow: 0px 0px 10px rgba(0, 0, 0, 0.1);
|
26 |
+
}
|