Abdulkader commited on
Commit
3899a9a
·
1 Parent(s): 5ed23cb

Rename HumanMotionsClassificationZiad.py to Train.py

Browse files
HumanMotionsClassificationZiad.py → Train.py RENAMED
@@ -364,92 +364,6 @@ torch.save(model.state_dict(), 'model')
364
  print('TRAINING COMPLETE')
365
 
366
 
367
- # In[ ]:
368
-
369
-
370
- #Testing.....................................
371
-
372
-
373
- # In[43]:
374
-
375
-
376
- import torch
377
- import numpy as np
378
- import argparse
379
- import joblib
380
- import cv2
381
- import torch.nn as nn
382
- import torch.nn.functional as F
383
- import time
384
- import albumentations
385
- from torchvision.transforms import transforms
386
- from torch.utils.data import Dataset, DataLoader
387
- from PIL import Image
388
-
389
-
390
- # In[44]:
391
-
392
-
393
- aug = albumentations.Compose([
394
- albumentations.Resize(224, 224),
395
- ])
396
-
397
-
398
- # In[ ]:
399
-
400
-
401
-
402
-
403
-
404
- # In[51]:
405
-
406
-
407
- import cv2
408
- cap = cv2.VideoCapture(r'C:\Users\abdul\Desktop\Research\work\mhamad syrian\ziad\dataVideos\bend\daria_bend.avi')
409
- if (cap.isOpened() == False):
410
- print('Error while trying to read video. Plese check again...')
411
- # get the frame width and height
412
- frame_width = int(cap.get(3))
413
- frame_height = int(cap.get(4))
414
- # define codec and create VideoWriter object
415
- out = cv2.VideoWriter(str('output'), cv2.VideoWriter_fourcc(*'mp4v'), 30, (frame_width,frame_height))
416
-
417
-
418
- # In[52]:
419
-
420
-
421
- # read until end of video
422
- while(cap.isOpened()):
423
- # capture each frame of the video
424
- ret, frame = cap.read()
425
- if ret == True:
426
- model.eval()
427
- with torch.no_grad():
428
- # conver to PIL RGB format before predictions
429
- pil_image = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
430
- pil_image = aug(image=np.array(pil_image))['image']
431
- pil_image = np.transpose(pil_image, (2, 0, 1)).astype(np.float32)
432
- pil_image = torch.tensor(pil_image, dtype=torch.float).cuda()
433
- pil_image = pil_image.unsqueeze(0)
434
-
435
- outputs = model(pil_image)
436
- _, preds = torch.max(outputs.data, 1)
437
-
438
- cv2.putText(frame, lb.classes_[preds], (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 200, 0), 2)
439
- cv2.imshow('image', frame)
440
- out.write(frame)
441
- # press `q` to exit
442
- if cv2.waitKey(27) & 0xFF == ord('q'):
443
- break
444
- else:
445
- break
446
- # release VideoCapture()
447
- cap.release()
448
- # close all frames and video windows
449
- cv2.destroyAllWindows()
450
-
451
-
452
- # In[ ]:
453
 
454
 
455
 
 
364
  print('TRAINING COMPLETE')
365
 
366
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
367
 
368
 
369