Update app.py
Browse files
app.py
CHANGED
@@ -1,13 +1,40 @@
|
|
1 |
import gradio as gr
|
2 |
from transformers import pipeline, AutoTokenizer
|
3 |
import torch
|
4 |
-
import spaces
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
|
6 |
# Define dropdown options
|
7 |
grade_options = ["1", "2", "3", "4", "5", "6"]
|
8 |
topic_options = ["Addition", "Subtraction", "Counting", "Number Recognition", "Multiplication", "Division"]
|
9 |
level_options = ["Beginner", "Intermediate", "Advanced"]
|
10 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
@spaces.GPU
|
12 |
def generate_lesson(grade, topic, level):
|
13 |
device = 0 if torch.cuda.is_available() else -1
|
@@ -34,7 +61,6 @@ Grade: {grade}
|
|
34 |
Topic: {topic}
|
35 |
TaRL Level: {level}
|
36 |
"""
|
37 |
-
|
38 |
output = pipe(
|
39 |
prompt,
|
40 |
max_new_tokens=300,
|
@@ -43,8 +69,10 @@ TaRL Level: {level}
|
|
43 |
eos_token_id=tokenizer.eos_token_id
|
44 |
)
|
45 |
|
46 |
-
|
|
|
47 |
|
|
|
48 |
|
49 |
@spaces.GPU
|
50 |
def generate_all_lessons():
|
@@ -84,9 +112,9 @@ TaRL Level: {level}
|
|
84 |
eos_token_id=tokenizer.eos_token_id
|
85 |
)
|
86 |
results += f"πΉ ααααΆαα {grade} | {topic} | {level}\n{output[0]['generated_text']}\n\n{'-'*50}\n\n"
|
|
|
87 |
return results
|
88 |
|
89 |
-
|
90 |
# Gradio Interface
|
91 |
with gr.Blocks() as demo:
|
92 |
gr.Markdown("## π€ α’ααααααα½ααααααΎααααααααα·ααα·ααααΆ")
|
@@ -114,5 +142,5 @@ with gr.Blocks() as demo:
|
|
114 |
gen_all_btn.click(fn=generate_all_lessons, outputs=output_box)
|
115 |
clear_btn.click(fn=lambda: "", outputs=output_box)
|
116 |
|
117 |
-
demo.queue()
|
118 |
demo.launch()
|
|
|
1 |
import gradio as gr
|
2 |
from transformers import pipeline, AutoTokenizer
|
3 |
import torch
|
4 |
+
import spaces
|
5 |
+
import datetime
|
6 |
+
import json
|
7 |
+
import os
|
8 |
+
from datasets import Dataset
|
9 |
+
from huggingface_hub import HfFolder
|
10 |
+
|
11 |
+
# β
Authenticate Hugging Face account (save token only once)
|
12 |
+
HfFolder.save_token("hf_your_token_here") # β Replace with your real HF token
|
13 |
|
14 |
# Define dropdown options
|
15 |
grade_options = ["1", "2", "3", "4", "5", "6"]
|
16 |
topic_options = ["Addition", "Subtraction", "Counting", "Number Recognition", "Multiplication", "Division"]
|
17 |
level_options = ["Beginner", "Intermediate", "Advanced"]
|
18 |
|
19 |
+
# β
Save lesson to Hugging Face Datasets
|
20 |
+
def save_to_hf_dataset(prompt, output, repo_id="Pisethan/khmer-lesson-history"):
|
21 |
+
timestamp = datetime.datetime.now().isoformat()
|
22 |
+
record = {
|
23 |
+
"timestamp": timestamp,
|
24 |
+
"prompt": prompt,
|
25 |
+
"lesson": output
|
26 |
+
}
|
27 |
+
|
28 |
+
# Save locally
|
29 |
+
os.makedirs("history", exist_ok=True)
|
30 |
+
local_path = f"history/lesson_{timestamp.replace(':', '-')}.json"
|
31 |
+
with open(local_path, "w", encoding="utf-8") as f:
|
32 |
+
json.dump(record, f, ensure_ascii=False, indent=2)
|
33 |
+
|
34 |
+
# Push to HF Dataset
|
35 |
+
dataset = Dataset.from_list([record])
|
36 |
+
dataset.push_to_hub(repo_id)
|
37 |
+
|
38 |
@spaces.GPU
|
39 |
def generate_lesson(grade, topic, level):
|
40 |
device = 0 if torch.cuda.is_available() else -1
|
|
|
61 |
Topic: {topic}
|
62 |
TaRL Level: {level}
|
63 |
"""
|
|
|
64 |
output = pipe(
|
65 |
prompt,
|
66 |
max_new_tokens=300,
|
|
|
69 |
eos_token_id=tokenizer.eos_token_id
|
70 |
)
|
71 |
|
72 |
+
# β
Save lesson
|
73 |
+
save_to_hf_dataset(prompt, output[0]["generated_text"])
|
74 |
|
75 |
+
return output[0]['generated_text']
|
76 |
|
77 |
@spaces.GPU
|
78 |
def generate_all_lessons():
|
|
|
112 |
eos_token_id=tokenizer.eos_token_id
|
113 |
)
|
114 |
results += f"πΉ ααααΆαα {grade} | {topic} | {level}\n{output[0]['generated_text']}\n\n{'-'*50}\n\n"
|
115 |
+
save_to_hf_dataset(prompt, output[0]["generated_text"]) # β
Save each one
|
116 |
return results
|
117 |
|
|
|
118 |
# Gradio Interface
|
119 |
with gr.Blocks() as demo:
|
120 |
gr.Markdown("## π€ α’ααααααα½ααααααΎααααααααα·ααα·ααααΆ")
|
|
|
142 |
gen_all_btn.click(fn=generate_all_lessons, outputs=output_box)
|
143 |
clear_btn.click(fn=lambda: "", outputs=output_box)
|
144 |
|
145 |
+
demo.queue()
|
146 |
demo.launch()
|