File size: 3,405 Bytes
547112a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ef2fb67
547112a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ef2fb67
 
547112a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
#!/bin/bash

# Base URL of the Hugging Face repository
BASE_URL="https://huggingface.co/datasets/imageomics/KABR/resolve/main/KABR"

# Array of relative file paths
FILES=(
"README.txt"
"annotation/classes.json"
"annotation/distribution.xlsx"
"annotation/train.csv"
"annotation/val.csv"
"configs/I3D.yaml"
"configs/SLOWFAST.yaml"
"configs/X3D.yaml"
"dataset/image2video.py"
"dataset/image2visual.py"
"dataset/image/giraffes_md5.txt"
"dataset/image/giraffes_part_aa"
"dataset/image/giraffes_part_ab"
"dataset/image/giraffes_part_ac"
"dataset/image/giraffes_part_ad"
"dataset/image/giraffes.zip"
"dataset/image/zebras_grevys_md5.txt"
"dataset/image/zebras_grevys_part_aa"
"dataset/image/zebras_grevys_part_ab"
"dataset/image/zebras_grevys_part_ac"
"dataset/image/zebras_grevys_part_ad"
"dataset/image/zebras_grevys_part_ae"
"dataset/image/zebras_grevys_part_af"
"dataset/image/zebras_grevys_part_ag"
"dataset/image/zebras_grevys_part_ah"
"dataset/image/zebras_grevys_part_ai"
"dataset/image/zebras_grevys_part_aj"
"dataset/image/zebras_grevys_part_ak"
"dataset/image/zebras_grevys_part_al"
"dataset/image/zebras_grevys_part_am"
"dataset/image/zebras_plains_md5.txt"
"dataset/image/zebras_plains_part_aa"
"dataset/image/zebras_plains_part_ab"
"dataset/image/zebras_plains_part_ac"
"dataset/image/zebras_plains_part_ad"
"dataset/image/zebras_plains_part_ae"
"dataset/image/zebras_plains_part_af"
"dataset/image/zebras_plains_part_ag"
"dataset/image/zebras_plains_part_ah"
"dataset/image/zebras_plains_part_ai"
"dataset/image/zebras_plains_part_aj"
"dataset/image/zebras_plains_part_ak"
"dataset/image/zebras_plains_part_al"
)

# Loop through each relative file path
for FILE_PATH in "${FILES[@]}"; do
  # Construct the full URL
  FULL_URL="$BASE_URL/$FILE_PATH"

  # Create the necessary directories based on the file path
  mkdir -p "$(dirname "KABR_files/$FILE_PATH")"

  # Download the file and save it with the preserved file path
  curl -L -o "KABR_files/$FILE_PATH" "$FULL_URL"

done

ANIMALS=("giraffes" "zebras_grevys" "zebras_plains")

# Loop through each animal name
for ANIMAL in "${ANIMALS[@]}"; do
  # Concatenate the split files into their archive.
  PART_FILES="./KABR_files/dataset/image/${ANIMAL}_part_*"
  if ls $PART_FILES 1> /dev/null 2>&1; then
    cat $PART_FILES > "./KABR_files/dataset/image/${ANIMAL}.zip"
  else
    echo "No part files found for $ANIMAL."
    continue
  fi

  # Calculate the MD5 sum of the ZIP file
  ZIP_MD5=$(md5sum "./KABR_files/dataset/image/${ANIMAL}.zip" | awk '{ print $1 }')

  # Read the expected MD5 sum from the associated txt file
  EXPECTED_MD5=$(cat "./KABR_files/dataset/image/${ANIMAL}_md5.txt" | awk '{ print $1 }')

  # Compare the calculated MD5 sum with the expected MD5 sum
  if [ "$ZIP_MD5" == "$EXPECTED_MD5" ]; then
    echo "MD5 sum for ${ANIMAL}.zip is correct."
    # Delete the part files
    rm $PART_FILES
    unzip -d "./KABR_files/dataset/image/" "./KABR_files/dataset/image/${ANIMAL}.zip"
    rm "./KABR_files/dataset/image/${ANIMAL}.zip"
    rm "./KABR_files/dataset/image/${ANIMAL}_md5.txt"
  else
    echo "MD5 sum for ${ANIMAL}.zip is incorrect. Expected: $EXPECTED_MD5, but got: $ZIP_MD5."
    echo "There may be data corruption. Please try to download and reconstruct the data again or reach out to the corresponding authors for assistance."
  fi

done

echo "Download, reconstruction, extraction, and verification completed."