Sengil/ABSA-Turkish-bert-based-small
Text Classification
•
Updated
•
86
•
2
import time
from askui import VisionAgent
with VisionAgent() as agent:
agent.tools.webbrowser.open_new("http://www.google.com")
time.sleep(0.5)
agent.click("search field in the center of the screen", model_name="Qwen/Qwen2-VL-7B-Instruct")
agent.type("cats")
agent.keyboard("enter")
time.sleep(0.5)
agent.click("text 'Images'", model_name="AskUI/PTA-1")
time.sleep(0.5)
agent.click("second cat image", model_name="OS-Copilot/OS-Atlas-Base-7B")
from transformers import AutoModelForSequenceClassification, Trainer, TrainingArguments
from peft import LoraConfig, get_peft_model
from datasets import load_dataset
# Loading the pre-trained BERT model
model = AutoModelForSequenceClassification.from_pretrained('bert-base-uncased', num_labels=2)
# Configuring the LoRA parameters
lora_config = LoraConfig(
r=8,
lora_alpha=16,
lora_dropout=0.1,
bias="none"
)
# Applying LoRA to the model
model = get_peft_model(model, lora_config)
# Loading dataset for classification
dataset = load_dataset("glue", "sst2")
train_dataset = dataset["train"]
# Setting the training arguments
training_args = TrainingArguments(
output_dir="./results",
per_device_train_batch_size=16,
num_train_epochs=3,
logging_dir="./logs",
)
# Creating a Trainer instance for fine-tuning
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
)
# Finally we can fine-tune the model
trainer.train()