File size: 2,476 Bytes
0d4ee70
 
 
c139ec7
66c7691
39d5e72
1b2fed0
8b909bf
39d5e72
 
 
8b909bf
d074190
 
39d5e72
 
 
 
 
 
 
 
 
d074190
8b909bf
39d5e72
b716af8
d074190
 
 
 
 
 
 
 
7d71aa7
39d5e72
7d71aa7
39d5e72
 
 
 
7d71aa7
39d5e72
 
 
 
 
 
 
 
 
 
 
 
 
d074190
39d5e72
0858459
39d5e72
 
 
7d71aa7
39d5e72
b94fa53
4baf273
7489b32
9cb9d28
fd473a9
39d5e72
57b8b74
 
39d5e72
 
 
 
57b8b74
 
 
39d5e72
57b8b74
 
 
 
 
0858459
39d5e72
7d71aa7
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, TrainingArguments, Trainer
from peft import LoraConfig, get_peft_model
from datasets import load_dataset
import gradio as gr
import time
import spaces

# === 1️⃣ MODEL VE TOKENIZER YÜKLEME ===
MODEL_NAME = "mistralai/Mistral-7B-v0.1"
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)

torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, torch_dtype=torch_dtype, device_map="auto")

# === 2️⃣ LoRA AYARLARI ===
lora_config = LoraConfig(
    r=8,              
    lora_alpha=32,    
    lora_dropout=0.1, 
    bias="none",
    target_modules=["q_proj", "v_proj"],
)
model = get_peft_model(model, lora_config).to("cuda" if torch.cuda.is_available() else "cpu")

# === 3️⃣ VERΔ° SETΔ° ===
@spaces.GPU
def load_and_prepare_dataset():
    dataset = load_dataset("oscar", "unshuffled_deduplicated_tr", trust_remote_code=True)
    subset = dataset["train"].shuffle(seed=42).select(range(10000))
    
    def tokenize_function(examples):
        return tokenizer(examples["text"], truncation=True, max_length=512)
    
    tokenized_datasets = subset.map(tokenize_function, batched=True)
    return tokenized_datasets

tokenized_dataset = load_and_prepare_dataset()

# === 4️⃣ EĞİTΔ°M AYARLARI ===
batch_size = 1  
num_epochs = 1  
max_steps = (len(tokenized_dataset) // batch_size) * num_epochs

training_args = TrainingArguments(
    output_dir="./mistral_lora",
    per_device_train_batch_size=1,  
    gradient_accumulation_steps=16, 
    learning_rate=5e-4,  
    num_train_epochs=1,  
    max_steps=max_steps,  
    save_steps=500,
    save_total_limit=2,
    logging_dir="./logs",
    logging_steps=10,
    optim="adamw_torch",
    fp16=torch.cuda.is_available(),  
)

trainer = Trainer(
    model=model,
    args=training_args,
    train_dataset=tokenized_dataset,
)

@spaces.GPU
def train_model():
    trainer.train()

# === 5️⃣ CHAT ARAYÜZÜ ===
@spaces.GPU
def slow_echo(message, history):
    response = "Model henüz eğitilmedi. Lütfen eğitimi başlatın."
    if model:
        response = f"You typed: {message}"
    return response

demo = gr.ChatInterface(
    slow_echo,
    type="text",
    flagging_mode="manual",
    flagging_options=["Like", "Spam", "Inappropriate", "Other"],
    save_history=True,
)

if __name__ == "__main__":
    train_model()
    demo.launch(share=True)