Spaces:
Running
Running
Update pages/19_ResNet.py
Browse files- pages/19_ResNet.py +19 -50
pages/19_ResNet.py
CHANGED
@@ -27,7 +27,6 @@ In this exercise, we will fine-tune a pre-trained ResNet model on a custom image
|
|
27 |
|
28 |
# User Inputs
|
29 |
st.sidebar.header("Model Parameters")
|
30 |
-
data_dir = st.sidebar.text_input("Path to Dataset Directory", 'path_to_caltech101_dataset')
|
31 |
input_size = st.sidebar.number_input("Input Size", value=224)
|
32 |
batch_size = st.sidebar.number_input("Batch Size", value=32)
|
33 |
num_epochs = st.sidebar.number_input("Number of Epochs", value=25)
|
@@ -37,30 +36,24 @@ momentum = st.sidebar.number_input("Momentum", value=0.9)
|
|
37 |
# Data Preparation Section
|
38 |
st.markdown("""
|
39 |
### Data Preparation
|
40 |
-
We will use the
|
41 |
""")
|
42 |
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
}
|
57 |
-
|
58 |
-
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x])
|
59 |
-
for x in ['train', 'val']}
|
60 |
-
dataloaders = {x: DataLoader(image_datasets[x], batch_size=batch_size, shuffle=True, num_workers=4)
|
61 |
-
for x in ['train', 'val']}
|
62 |
-
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}
|
63 |
-
class_names = image_datasets['train'].classes
|
64 |
|
65 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
66 |
|
@@ -204,32 +197,8 @@ st.markdown("""
|
|
204 |
We will use the Hugging Face library to load the dataset and prepare it for training. This integration will allow us to leverage the benefits of Hugging Face's powerful tools and APIs.
|
205 |
""")
|
206 |
|
207 |
-
|
208 |
-
|
209 |
-
def preprocess_function(examples):
|
210 |
-
return {'pixel_values': [data_transforms['train'](image) for image in examples['image']], 'labels': examples['label']}
|
211 |
-
|
212 |
-
dataset = dataset.map(preprocess_function, batched=True)
|
213 |
-
|
214 |
-
training_args = TrainingArguments(
|
215 |
-
output_dir='./results',
|
216 |
-
evaluation_strategy="epoch",
|
217 |
-
per_device_train_batch_size=8,
|
218 |
-
per_device_eval_batch_size=8,
|
219 |
-
num_train_epochs=3,
|
220 |
-
save_strategy="epoch",
|
221 |
-
logging_dir='./logs',
|
222 |
-
)
|
223 |
-
|
224 |
-
trainer = Trainer(
|
225 |
-
model=model_ft,
|
226 |
-
args=training_args,
|
227 |
-
train_dataset=dataset['train'],
|
228 |
-
eval_dataset=dataset['val'],
|
229 |
-
tokenizer=None,
|
230 |
-
)
|
231 |
-
|
232 |
if st.button('Train with Hugging Face'):
|
233 |
-
|
234 |
-
st.write("Model trained using Hugging Face")
|
235 |
|
|
|
27 |
|
28 |
# User Inputs
|
29 |
st.sidebar.header("Model Parameters")
|
|
|
30 |
input_size = st.sidebar.number_input("Input Size", value=224)
|
31 |
batch_size = st.sidebar.number_input("Batch Size", value=32)
|
32 |
num_epochs = st.sidebar.number_input("Number of Epochs", value=25)
|
|
|
36 |
# Data Preparation Section
|
37 |
st.markdown("""
|
38 |
### Data Preparation
|
39 |
+
We will use the CIFAR-10 dataset, which contains 60,000 images from 10 classes. The dataset will be split into training and validation sets, and transformations will be applied to augment the data and normalize it.
|
40 |
""")
|
41 |
|
42 |
+
transform = transforms.Compose([
|
43 |
+
transforms.Resize(input_size),
|
44 |
+
transforms.ToTensor(),
|
45 |
+
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
|
46 |
+
])
|
47 |
+
|
48 |
+
train_dataset = datasets.CIFAR10(root='./data', train=True, download=True, transform=transform)
|
49 |
+
val_dataset = datasets.CIFAR10(root='./data', train=False, download=True, transform=transform)
|
50 |
+
|
51 |
+
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=4)
|
52 |
+
val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False, num_workers=4)
|
53 |
+
|
54 |
+
dataloaders = {'train': train_loader, 'val': val_loader}
|
55 |
+
dataset_sizes = {'train': len(train_dataset), 'val': len(val_dataset)}
|
56 |
+
class_names = train_dataset.classes
|
|
|
|
|
|
|
|
|
|
|
|
|
57 |
|
58 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
59 |
|
|
|
197 |
We will use the Hugging Face library to load the dataset and prepare it for training. This integration will allow us to leverage the benefits of Hugging Face's powerful tools and APIs.
|
198 |
""")
|
199 |
|
200 |
+
# This part is just illustrative since Hugging Face's Trainer does not natively support ResNet.
|
201 |
+
# However, you can still follow a similar approach for transformer models and NLP datasets.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
202 |
if st.button('Train with Hugging Face'):
|
203 |
+
st.write("This section is illustrative and typically used for NLP tasks with Hugging Face transformers.")
|
|
|
204 |
|