zhoubingyu egrace479 commited on
Commit
f01fe46
·
0 Parent(s):

Duplicate from imageomics/KABR

Browse files

Co-authored-by: Elizabeth Campolongo <egrace479@users.noreply.huggingface.co>

Files changed (45) hide show
  1. .gitattributes +59 -0
  2. KABR/README.txt +55 -0
  3. KABR/annotation/classes.json +1 -0
  4. KABR/annotation/distribution.xlsx +0 -0
  5. KABR/annotation/train.csv +3 -0
  6. KABR/annotation/val.csv +3 -0
  7. KABR/configs/I3D.yaml +99 -0
  8. KABR/configs/SLOWFAST.yaml +108 -0
  9. KABR/configs/X3D.yaml +98 -0
  10. KABR/dataset/image/giraffes_md5.txt +1 -0
  11. KABR/dataset/image/giraffes_part_aa +3 -0
  12. KABR/dataset/image/giraffes_part_ab +3 -0
  13. KABR/dataset/image/giraffes_part_ac +3 -0
  14. KABR/dataset/image/giraffes_part_ad +3 -0
  15. KABR/dataset/image/zebras_grevys_md5.txt +1 -0
  16. KABR/dataset/image/zebras_grevys_part_aa +3 -0
  17. KABR/dataset/image/zebras_grevys_part_ab +3 -0
  18. KABR/dataset/image/zebras_grevys_part_ac +3 -0
  19. KABR/dataset/image/zebras_grevys_part_ad +3 -0
  20. KABR/dataset/image/zebras_grevys_part_ae +3 -0
  21. KABR/dataset/image/zebras_grevys_part_af +3 -0
  22. KABR/dataset/image/zebras_grevys_part_ag +3 -0
  23. KABR/dataset/image/zebras_grevys_part_ah +3 -0
  24. KABR/dataset/image/zebras_grevys_part_ai +3 -0
  25. KABR/dataset/image/zebras_grevys_part_aj +3 -0
  26. KABR/dataset/image/zebras_grevys_part_ak +3 -0
  27. KABR/dataset/image/zebras_grevys_part_al +3 -0
  28. KABR/dataset/image/zebras_grevys_part_am +3 -0
  29. KABR/dataset/image/zebras_plains_md5.txt +1 -0
  30. KABR/dataset/image/zebras_plains_part_aa +3 -0
  31. KABR/dataset/image/zebras_plains_part_ab +3 -0
  32. KABR/dataset/image/zebras_plains_part_ac +3 -0
  33. KABR/dataset/image/zebras_plains_part_ad +3 -0
  34. KABR/dataset/image/zebras_plains_part_ae +3 -0
  35. KABR/dataset/image/zebras_plains_part_af +3 -0
  36. KABR/dataset/image/zebras_plains_part_ag +3 -0
  37. KABR/dataset/image/zebras_plains_part_ah +3 -0
  38. KABR/dataset/image/zebras_plains_part_ai +3 -0
  39. KABR/dataset/image/zebras_plains_part_aj +3 -0
  40. KABR/dataset/image/zebras_plains_part_ak +3 -0
  41. KABR/dataset/image/zebras_plains_part_al +3 -0
  42. KABR/dataset/image2video.py +67 -0
  43. KABR/dataset/image2visual.py +67 -0
  44. README.md +274 -0
  45. download.py +154 -0
.gitattributes ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.lz4 filter=lfs diff=lfs merge=lfs -text
12
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
13
+ *.model filter=lfs diff=lfs merge=lfs -text
14
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
15
+ *.npy filter=lfs diff=lfs merge=lfs -text
16
+ *.npz filter=lfs diff=lfs merge=lfs -text
17
+ *.onnx filter=lfs diff=lfs merge=lfs -text
18
+ *.ot filter=lfs diff=lfs merge=lfs -text
19
+ *.parquet filter=lfs diff=lfs merge=lfs -text
20
+ *.pb filter=lfs diff=lfs merge=lfs -text
21
+ *.pickle filter=lfs diff=lfs merge=lfs -text
22
+ *.pkl filter=lfs diff=lfs merge=lfs -text
23
+ *.pt filter=lfs diff=lfs merge=lfs -text
24
+ *.pth filter=lfs diff=lfs merge=lfs -text
25
+ *.rar filter=lfs diff=lfs merge=lfs -text
26
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
27
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ # Audio files - uncompressed
37
+ *.pcm filter=lfs diff=lfs merge=lfs -text
38
+ *.sam filter=lfs diff=lfs merge=lfs -text
39
+ *.raw filter=lfs diff=lfs merge=lfs -text
40
+ # Audio files - compressed
41
+ *.aac filter=lfs diff=lfs merge=lfs -text
42
+ *.flac filter=lfs diff=lfs merge=lfs -text
43
+ *.mp3 filter=lfs diff=lfs merge=lfs -text
44
+ *.ogg filter=lfs diff=lfs merge=lfs -text
45
+ *.wav filter=lfs diff=lfs merge=lfs -text
46
+ # Image files - uncompressed
47
+ *.bmp filter=lfs diff=lfs merge=lfs -text
48
+ *.gif filter=lfs diff=lfs merge=lfs -text
49
+ *.png filter=lfs diff=lfs merge=lfs -text
50
+ *.tiff filter=lfs diff=lfs merge=lfs -text
51
+ # Image files - compressed
52
+ *.jpg filter=lfs diff=lfs merge=lfs -text
53
+ *.jpeg filter=lfs diff=lfs merge=lfs -text
54
+ *.webp filter=lfs diff=lfs merge=lfs -text
55
+ # Split data files
56
+ *_part_* filter=lfs diff=lfs merge=lfs -text
57
+ # Custom
58
+ KABR/annotation/train.csv filter=lfs diff=lfs merge=lfs -text
59
+ KABR/annotation/val.csv filter=lfs diff=lfs merge=lfs -text
KABR/README.txt ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ KABR: High-Quality Dataset for Kenyan Animal Behavior Recognition from Drone Videos
2
+
3
+ ---------------------------------------------------------------------------------------------------
4
+
5
+ We present a novel high-quality dataset for animal behavior recognition from drone videos. The dataset is focused on Kenyan wildlife and contains behaviors of giraffes, plains zebras, and Grevy's zebras. The dataset consists of more than 10 hours of annotated videos, and it includes eight different classes, encompassing seven types of animal behavior and an additional category for occluded instances. In the annotation process for this dataset, a team of 10 people was involved, with an expert zoologist overseeing the process. Each behavior was labeled based on its distinctive features, using a standardized set of criteria to ensure consistency and accuracy across the annotations. The dataset was collected using drones that flew over the animals in the Mpala Research Centre in Kenya, providing high-quality video footage of the animal's natural behaviors. We believe that this dataset will be a valuable resource for researchers working on animal behavior recognition, as it provides a diverse and high-quality set of annotated videos that can be used for evaluating deep learning models. Additionally, the dataset can be used to study the behavior patterns of Kenyan animals and can help to inform conservation efforts and wildlife management strategies. We provide a detailed description of the dataset and its annotation process, along with some initial experiments on the dataset using conventional deep learning models. The results demonstrate the effectiveness of the dataset for animal behavior recognition and highlight the potential for further research in this area.
6
+
7
+ ---------------------------------------------------------------------------------------------------
8
+
9
+ The KABR dataset follows the Charades format. The Charades format:
10
+
11
+ KABR
12
+ /images
13
+ /video_1
14
+ /image_1.jpg
15
+ /image_2.jpg
16
+ ...
17
+ /image_n.jpg
18
+ /video_2
19
+ /image_1.jpg
20
+ /image_2.jpg
21
+ ...
22
+ /image_n.jpg
23
+ ...
24
+ /video_n
25
+ /image_1.jpg
26
+ /image_2.jpg
27
+ /image_3.jpg
28
+ ...
29
+ /image_n.jpg
30
+ /annotation
31
+ /classes.json
32
+ /train.csv
33
+ /val.csv
34
+
35
+ The dataset can be directly loaded and processed by the SlowFast (https://github.com/facebookresearch/SlowFast) framework.
36
+
37
+ ---------------------------------------------------------------------------------------------------
38
+
39
+ Naming:
40
+ G0XXX.X - Giraffes
41
+ ZP0XXX.X - Plains Zebras
42
+ ZG0XXX.X - Grevy's Zebras
43
+
44
+ ---------------------------------------------------------------------------------------------------
45
+
46
+ Information:
47
+
48
+ KABR/configs: examples of SlowFast framework configs.
49
+ KABR/annotation/distribution.xlsx: distribution of classes for all videos.
50
+
51
+ ---------------------------------------------------------------------------------------------------
52
+
53
+ Scripts:
54
+ image2video.py: Encode image sequences into the original video. For example, [image/G0067.1, image/G0067.2, ..., image/G0067.24] will be encoded into video/G0067.mp4.
55
+ image2visual.py: Encode image sequences into the original video with corresponding annotations. For example, [image/G0067.1, image/G0067.2, ..., image/G0067.24] will be encoded into visual/G0067.mp4.
KABR/annotation/classes.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"Walk": 0, "Graze": 1, "Browse": 2, "Head Up": 3, "Auto-Groom": 4, "Trot": 5, "Run": 6, "Occluded": 7}
KABR/annotation/distribution.xlsx ADDED
Binary file (5.62 kB). View file
 
KABR/annotation/train.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9fded23bb35b4bbef7d1d2f606a73afd8996957eea4ffe542b79c6cdcc7eee78
3
+ size 30325892
KABR/annotation/val.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b663c75fa0f2ecadc01798623da56f040050420c3b5db71cc2444319db32df73
3
+ size 10652837
KABR/configs/I3D.yaml ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ TRAIN:
2
+ ENABLE: True
3
+ DATASET: charades
4
+ BATCH_SIZE: 8
5
+ EVAL_PERIOD: 5
6
+ CHECKPOINT_PERIOD: 5
7
+ AUTO_RESUME: True
8
+ # CHECKPOINT_FILE_PATH:
9
+ CHECKPOINT_TYPE: pytorch
10
+ CHECKPOINT_INFLATE: False
11
+ MIXED_PRECISION: True
12
+
13
+ TEST:
14
+ ENABLE: True
15
+ DATASET: charades
16
+ BATCH_SIZE: 8
17
+ NUM_ENSEMBLE_VIEWS: 2
18
+ NUM_SPATIAL_CROPS: 1
19
+ # CHECKPOINT_FILE_PATH:
20
+ CHECKPOINT_TYPE: pytorch
21
+
22
+ DATA:
23
+ NUM_FRAMES: 16
24
+ SAMPLING_RATE: 5
25
+ TRAIN_JITTER_SCALES: [320, 320]
26
+ TRAIN_CROP_SIZE: 320
27
+ TEST_CROP_SIZE: 320
28
+ TRAIN_CROP_NUM_TEMPORAL: 1
29
+ INPUT_CHANNEL_NUM: [3]
30
+ MULTI_LABEL: False
31
+ RANDOM_FLIP: True
32
+ SSL_COLOR_JITTER: True
33
+ SSL_COLOR_BRI_CON_SAT: [0.2, 0.2, 0.2]
34
+ INV_UNIFORM_SAMPLE: True
35
+ ENSEMBLE_METHOD: max
36
+ REVERSE_INPUT_CHANNEL: True
37
+ PATH_TO_DATA_DIR: "./KABR/annotation"
38
+ PATH_PREFIX: "./KABR/dataset/image"
39
+ DECODING_BACKEND: torchvision
40
+
41
+ RESNET:
42
+ ZERO_INIT_FINAL_BN: True
43
+ WIDTH_PER_GROUP: 64
44
+ NUM_GROUPS: 1
45
+ DEPTH: 50
46
+ TRANS_FUNC: bottleneck_transform
47
+ STRIDE_1X1: False
48
+ NUM_BLOCK_TEMP_KERNEL: [[3], [4], [6], [3]]
49
+
50
+ NONLOCAL:
51
+ LOCATION: [[[]], [[]], [[]], [[]]]
52
+ GROUP: [[1], [1], [1], [1]]
53
+ INSTANTIATION: softmax
54
+
55
+ BN:
56
+ USE_PRECISE_STATS: True
57
+ NUM_BATCHES_PRECISE: 100
58
+ NORM_TYPE: sync_batchnorm
59
+ NUM_SYNC_DEVICES: 1
60
+
61
+ SOLVER:
62
+ BASE_LR: 0.1
63
+ LR_POLICY: cosine
64
+ MAX_EPOCH: 120
65
+ MOMENTUM: 0.9
66
+ WEIGHT_DECAY: 1e-4
67
+ WARMUP_EPOCHS: 34.0
68
+ WARMUP_START_LR: 0.01
69
+ OPTIMIZING_METHOD: sgd
70
+
71
+ MODEL:
72
+ NUM_CLASSES: 8
73
+ ARCH: i3d
74
+ MODEL_NAME: ResNet
75
+ LOSS_FUNC: cross_entropy
76
+ DROPOUT_RATE: 0.5
77
+
78
+ DATA_LOADER:
79
+ NUM_WORKERS: 8
80
+ PIN_MEMORY: True
81
+
82
+ NUM_GPUS: 1
83
+ NUM_SHARDS: 1
84
+ RNG_SEED: 0
85
+ OUTPUT_DIR: ./logs/i3d-kabr
86
+ LOG_MODEL_INFO: True
87
+
88
+ TENSORBOARD:
89
+ ENABLE: False
90
+
91
+ DEMO:
92
+ ENABLE: True
93
+ LABEL_FILE_PATH: ./KABR/annotation/classes.json
94
+ # INPUT_VIDEO: # path to input
95
+ # OUTPUT_FILE: # path to output
96
+ THREAD_ENABLE: False
97
+ THREAD_ENABLE: False
98
+ NUM_VIS_INSTANCES: 1
99
+ NUM_CLIPS_SKIP: 1
KABR/configs/SLOWFAST.yaml ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ TRAIN:
2
+ ENABLE: True
3
+ DATASET: charades
4
+ BATCH_SIZE: 8
5
+ EVAL_PERIOD: 5
6
+ CHECKPOINT_PERIOD: 5
7
+ AUTO_RESUME: True
8
+ # CHECKPOINT_FILE_PATH:
9
+ CHECKPOINT_TYPE: pytorch
10
+ CHECKPOINT_INFLATE: False
11
+ MIXED_PRECISION: True
12
+
13
+ TEST:
14
+ ENABLE: True
15
+ DATASET: charades
16
+ BATCH_SIZE: 8
17
+ NUM_ENSEMBLE_VIEWS: 2
18
+ NUM_SPATIAL_CROPS: 1
19
+ # CHECKPOINT_FILE_PATH:
20
+ CHECKPOINT_TYPE: pytorch
21
+
22
+ DATA:
23
+ NUM_FRAMES: 16
24
+ SAMPLING_RATE: 5
25
+ TRAIN_JITTER_SCALES: [256, 256]
26
+ TRAIN_CROP_SIZE: 256
27
+ TEST_CROP_SIZE: 256
28
+ TRAIN_CROP_NUM_TEMPORAL: 1
29
+ INPUT_CHANNEL_NUM: [3, 3]
30
+ MULTI_LABEL: False
31
+ RANDOM_FLIP: True
32
+ SSL_COLOR_JITTER: True
33
+ SSL_COLOR_BRI_CON_SAT: [0.2, 0.2, 0.2]
34
+ INV_UNIFORM_SAMPLE: True
35
+ ENSEMBLE_METHOD: max
36
+ REVERSE_INPUT_CHANNEL: True
37
+ PATH_TO_DATA_DIR: "./KABR/annotation"
38
+ PATH_PREFIX: "./KABR/dataset/image"
39
+ DECODING_BACKEND: torchvision
40
+
41
+ SLOWFAST:
42
+ ALPHA: 4
43
+ BETA_INV: 8
44
+ FUSION_CONV_CHANNEL_RATIO: 2
45
+ FUSION_KERNEL_SZ: 7
46
+
47
+ RESNET:
48
+ ZERO_INIT_FINAL_BN: True
49
+ WIDTH_PER_GROUP: 64
50
+ NUM_GROUPS: 1
51
+ DEPTH: 50
52
+ TRANS_FUNC: bottleneck_transform
53
+ STRIDE_1X1: False
54
+ NUM_BLOCK_TEMP_KERNEL: [[3, 3], [4, 4], [6, 6], [3, 3]]
55
+ SPATIAL_STRIDES: [[1, 1], [2, 2], [2, 2], [2, 2]]
56
+ SPATIAL_DILATIONS: [[1, 1], [1, 1], [1, 1], [1, 1]]
57
+
58
+ NONLOCAL:
59
+ LOCATION: [[[], []], [[], []], [[], []], [[], []]]
60
+ GROUP: [[1, 1], [1, 1], [1, 1], [1, 1]]
61
+ INSTANTIATION: dot_product
62
+
63
+ BN:
64
+ USE_PRECISE_STATS: True
65
+ NUM_BATCHES_PRECISE: 200
66
+ NORM_TYPE: sync_batchnorm
67
+ NUM_SYNC_DEVICES: 1
68
+
69
+ SOLVER:
70
+ BASE_LR: 0.0375
71
+ LR_POLICY: steps_with_relative_lrs
72
+ LRS: [1, 0.1, 0.01, 0.001, 0.0001, 0.00001]
73
+ STEPS: [0, 41, 49]
74
+ MAX_EPOCH: 80
75
+ MOMENTUM: 0.9
76
+ WEIGHT_DECAY: 1e-4
77
+ WARMUP_EPOCHS: 3.0
78
+ WARMUP_START_LR: 0.0001
79
+ OPTIMIZING_METHOD: sgd
80
+
81
+ MODEL:
82
+ NUM_CLASSES: 8
83
+ ARCH: slowfast
84
+ LOSS_FUNC: cross_entropy
85
+ DROPOUT_RATE: 0.5
86
+
87
+ DATA_LOADER:
88
+ NUM_WORKERS: 8
89
+ PIN_MEMORY: True
90
+
91
+ NUM_GPUS: 1
92
+ NUM_SHARDS: 1
93
+ RNG_SEED: 0
94
+ OUTPUT_DIR: ./logs/slowfast-kabr
95
+ LOG_MODEL_INFO: True
96
+
97
+ TENSORBOARD:
98
+ ENABLE: False
99
+
100
+ DEMO:
101
+ ENABLE: True
102
+ LABEL_FILE_PATH: ./KABR/annotation/classes.json
103
+ # INPUT_VIDEO: # path to input
104
+ # OUTPUT_FILE: # path to output
105
+ THREAD_ENABLE: False
106
+ THREAD_ENABLE: False
107
+ NUM_VIS_INSTANCES: 1
108
+ NUM_CLIPS_SKIP: 1
KABR/configs/X3D.yaml ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ TRAIN:
2
+ ENABLE: True
3
+ DATASET: charades
4
+ BATCH_SIZE: 8
5
+ EVAL_PERIOD: 5
6
+ CHECKPOINT_PERIOD: 5
7
+ AUTO_RESUME: True
8
+ # CHECKPOINT_FILE_PATH:
9
+ CHECKPOINT_TYPE: pytorch
10
+ CHECKPOINT_INFLATE: False
11
+ MIXED_PRECISION: True
12
+
13
+ TEST:
14
+ ENABLE: True
15
+ DATASET: charades
16
+ BATCH_SIZE: 8
17
+ NUM_ENSEMBLE_VIEWS: 2
18
+ NUM_SPATIAL_CROPS: 1
19
+ # CHECKPOINT_FILE_PATH:
20
+ CHECKPOINT_TYPE: pytorch
21
+
22
+ DATA:
23
+ NUM_FRAMES: 16
24
+ SAMPLING_RATE: 5
25
+ TRAIN_JITTER_SCALES: [300, 300]
26
+ TRAIN_CROP_SIZE: 300
27
+ TEST_CROP_SIZE: 300
28
+ TRAIN_CROP_NUM_TEMPORAL: 1
29
+ INPUT_CHANNEL_NUM: [3]
30
+ MULTI_LABEL: False
31
+ RANDOM_FLIP: True
32
+ SSL_COLOR_JITTER: True
33
+ SSL_COLOR_BRI_CON_SAT: [0.2, 0.2, 0.2]
34
+ INV_UNIFORM_SAMPLE: True
35
+ ENSEMBLE_METHOD: max
36
+ REVERSE_INPUT_CHANNEL: True
37
+ PATH_TO_DATA_DIR: "./KABR/annotation"
38
+ PATH_PREFIX: "./KABR/dataset/image"
39
+ DECODING_BACKEND: torchvision
40
+
41
+ X3D:
42
+ WIDTH_FACTOR: 2.0
43
+ DEPTH_FACTOR: 5.0
44
+ BOTTLENECK_FACTOR: 2.25
45
+ DIM_C5: 2048
46
+ DIM_C1: 12
47
+
48
+ RESNET:
49
+ ZERO_INIT_FINAL_BN: True
50
+ TRANS_FUNC: x3d_transform
51
+ STRIDE_1X1: False
52
+
53
+ BN:
54
+ USE_PRECISE_STATS: True
55
+ NUM_BATCHES_PRECISE: 200
56
+ NORM_TYPE: sync_batchnorm
57
+ NUM_SYNC_DEVICES: 1
58
+ WEIGHT_DECAY: 0.0
59
+
60
+ SOLVER:
61
+ BASE_LR: 0.05
62
+ BASE_LR_SCALE_NUM_SHARDS: True
63
+ MAX_EPOCH: 120
64
+ LR_POLICY: cosine
65
+ WEIGHT_DECAY: 5e-5
66
+ WARMUP_EPOCHS: 35.0
67
+ WARMUP_START_LR: 0.01
68
+ OPTIMIZING_METHOD: sgd
69
+
70
+ MODEL:
71
+ NUM_CLASSES: 8
72
+ ARCH: x3d
73
+ MODEL_NAME: X3D
74
+ LOSS_FUNC: cross_entropy
75
+ DROPOUT_RATE: 0.5
76
+
77
+ DATA_LOADER:
78
+ NUM_WORKERS: 8
79
+ PIN_MEMORY: True
80
+
81
+ NUM_GPUS: 1
82
+ NUM_SHARDS: 1
83
+ RNG_SEED: 0
84
+ OUTPUT_DIR: ./logs/x3d-l-kabr
85
+ LOG_MODEL_INFO: True
86
+
87
+ TENSORBOARD:
88
+ ENABLE: False
89
+
90
+ DEMO:
91
+ ENABLE: True
92
+ LABEL_FILE_PATH: ./KABR/annotation/classes.json
93
+ # INPUT_VIDEO: # path to input
94
+ # OUTPUT_FILE: # path to output
95
+ THREAD_ENABLE: False
96
+ THREAD_ENABLE: False
97
+ NUM_VIS_INSTANCES: 1
98
+ NUM_CLIPS_SKIP: 1
KABR/dataset/image/giraffes_md5.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ 7aa9270fffa9ca10d2fe3a61f34770ba giraffes.zip
KABR/dataset/image/giraffes_part_aa ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5bac568ae6c6015f82509b6e950e691a89f31662ddf175176a087544143db290
3
+ size 2147483648
KABR/dataset/image/giraffes_part_ab ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee62913b9c0c2080351a7df6ee5b52a4820c488727460375a470acf947575986
3
+ size 2147483648
KABR/dataset/image/giraffes_part_ac ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7cc2d6a05efc1b9b070c15a9a89382864885ac0422c345f8e09c2c520143a1bd
3
+ size 2147483648
KABR/dataset/image/giraffes_part_ad ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:87372e3547b2f39b6ffa0c10952a406e9c8d6a54499f325d35aedc25f7c015fa
3
+ size 1951376838
KABR/dataset/image/zebras_grevys_md5.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ 9084ac4bbda00ff527951384ef2da313 zebras_grevys.zip
KABR/dataset/image/zebras_grevys_part_aa ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3e78e55b9f534b79df031afcd75581dce3aa4c59f244eeeb42ed5eedce4c8465
3
+ size 2147483648
KABR/dataset/image/zebras_grevys_part_ab ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9788a9b699d339bfe74e2265b4d3b68389b474332bf9b7be7d9cbbb9ebc33960
3
+ size 2147483648
KABR/dataset/image/zebras_grevys_part_ac ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f1987838498e42bd052c5cf6f453e8dcb9c0611071262119037218eaf3b4e320
3
+ size 2147483648
KABR/dataset/image/zebras_grevys_part_ad ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee8555402ba584771c43e4b3e4b4edfcfbffd08996a2bc7aae1b49035826c8b4
3
+ size 2147483648
KABR/dataset/image/zebras_grevys_part_ae ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6ec2607254f6b2ddca0e18aabab15a42ca97f4631878962243f6658541a186c7
3
+ size 2147483648
KABR/dataset/image/zebras_grevys_part_af ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:103a6d435da7743935da5ed3d4e376d1dfdf1dc58bbd5c47880e5d142eef6d0d
3
+ size 2147483648
KABR/dataset/image/zebras_grevys_part_ag ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a5dd33bcbc62cb9717854a7f8e2f5e5b31c970e54b4c18e5201aed704cf3d3bc
3
+ size 2147483648
KABR/dataset/image/zebras_grevys_part_ah ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b331e25f572d95f161a1f61bfc065e49c17bbf2d4d128e54c4512dea884f2c6e
3
+ size 2147483648
KABR/dataset/image/zebras_grevys_part_ai ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:435a27f8c8cb1d32823c261eef98b700cf78b843a2f6e80eb7a3284dc33a2f36
3
+ size 2147483648
KABR/dataset/image/zebras_grevys_part_aj ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:43d838db21ef3e4820a0949d628017aa61d5876fffdbeba18639b9a2be5482b6
3
+ size 2147483648
KABR/dataset/image/zebras_grevys_part_ak ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f46efb8ab0a31058ffe4a7690f904e79881b772b20088710fdee0ea548696cf3
3
+ size 2147483648
KABR/dataset/image/zebras_grevys_part_al ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ae431c840f7e3d1e4fd01180836478e4054dabb1cd5a6ca416fd7ef76089459
3
+ size 2147483648
KABR/dataset/image/zebras_grevys_part_am ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8fd2cad718b4f753c0a23ec08201d5974db51ca4d5a7427d5dc5720ede52249e
3
+ size 129174797
KABR/dataset/image/zebras_plains_md5.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ 1f0c62ff5294a0d607807c634a30e04e zebras_plains.zip
KABR/dataset/image/zebras_plains_part_aa ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bff22f5475da31d34cddbe061c80ed4a28dee4110abe932c47aea6bb715066f4
3
+ size 2147483648
KABR/dataset/image/zebras_plains_part_ab ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5dfe8933c8e80b51686ce17ccf02e4d14fb515b9d2f9e75d561580a60909d897
3
+ size 2147483648
KABR/dataset/image/zebras_plains_part_ac ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c6e21fe556ef09a314ac1bea1c878c87c0d6732f5fa45f0bb7d23541590e8d5
3
+ size 2147483648
KABR/dataset/image/zebras_plains_part_ad ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:164934163fffe0671f163bdce7cb1c5156e5c91a64876dd88c27d1433d79303e
3
+ size 2147483648
KABR/dataset/image/zebras_plains_part_ae ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2573329692e751e2f172a1407f1890c5ebe6ec3d6af9e5be91a2542c6a1aca4c
3
+ size 2147483648
KABR/dataset/image/zebras_plains_part_af ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ebe28b7135dfb18f9b4bacb051579a0c2364720a005ef96c7323296559a2ec86
3
+ size 2147483648
KABR/dataset/image/zebras_plains_part_ag ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7c5b16e93ed2bb71472a8b4eafbd1a78e21d7d9a1e620201426b41bec7a034d5
3
+ size 2147483648
KABR/dataset/image/zebras_plains_part_ah ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1cb2a383c57734eb055c88e0da063df4324db0cc98472976e257af2d16f1b3e5
3
+ size 2147483648
KABR/dataset/image/zebras_plains_part_ai ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:771b8ef309190b619d7b6b88f68b68bce351510bcd46ecb9cd58a5237058ee87
3
+ size 2147483648
KABR/dataset/image/zebras_plains_part_aj ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9b5ed5d4828741edac73d54c018bbceff59736a1f4e821b1a4f84bb3888d4e30
3
+ size 2147483648
KABR/dataset/image/zebras_plains_part_ak ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b59c41aef5c8ae0809abb035b97816a767c72d30be150742550b99c5f4c90eb
3
+ size 2147483648
KABR/dataset/image/zebras_plains_part_al ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d3223ffc298ec89e81c99fe6e980cffed9a989468ef741ea72e5432d9e6e464f
3
+ size 91613758
KABR/dataset/image2video.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import json
4
+ import cv2
5
+ from natsort import natsorted
6
+ import pandas as pd
7
+ from tqdm import tqdm
8
+
9
+ if __name__ == "__main__":
10
+ path_to_image = "image"
11
+ path_to_video = "video"
12
+ annotation_train = "../annotation/train.csv"
13
+ annotation_val = "../annotation/val.csv"
14
+ classes_json = "../annotation/classes.json"
15
+ visual = False
16
+
17
+ if not os.path.exists(path_to_video):
18
+ os.makedirs(path_to_video)
19
+
20
+ with open(classes_json, "r") as file:
21
+ label2number = json.load(file)
22
+
23
+ number2label = {value: key for key, value in label2number.items()}
24
+
25
+ df_train = pd.read_csv(annotation_train, sep=" ")
26
+ df_val = pd.read_csv(annotation_val, sep=" ")
27
+ df = pd.concat([df_train, df_val], axis=0)
28
+ folders = natsorted(os.listdir(path_to_image))
29
+
30
+ hierarchy = {}
31
+
32
+ for folder in folders:
33
+ main = folder.split(".")[0]
34
+
35
+ if hierarchy.get(main) is None:
36
+ hierarchy[main] = [folder]
37
+ else:
38
+ hierarchy[main].append(folder)
39
+
40
+ for i, folder in tqdm(enumerate(hierarchy.keys()), total=len(hierarchy.keys())):
41
+ vw = cv2.VideoWriter(f"{path_to_video}/{folder}.mp4", cv2.VideoWriter_fourcc("m", "p", "4", "v"), 29.97,
42
+ (400, 300))
43
+
44
+ for segment in hierarchy[folder]:
45
+ mapping = {}
46
+
47
+ for index, row in df[df.original_vido_id == segment].iterrows():
48
+ mapping[row["frame_id"]] = number2label[row["labels"]]
49
+
50
+ for j, file in enumerate(natsorted(os.listdir(path_to_image + os.sep + segment))):
51
+ image = cv2.imread(f"{path_to_image}/{segment}/{file}")
52
+
53
+ if visual:
54
+ color = (0, 0, 0)
55
+ label = mapping[j + 1]
56
+ thickness_in = 1
57
+ size = 0.7
58
+ label_length = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, size, thickness_in)
59
+ copied = image.copy()
60
+ cv2.rectangle(image, (10, 10), (20 + label_length[0][0], 40), (255, 255, 255), -1)
61
+ cv2.putText(image, label, (16, 31),
62
+ cv2.FONT_HERSHEY_SIMPLEX, size, tuple([i - 50 for i in color]), thickness_in, cv2.LINE_AA)
63
+ image = cv2.addWeighted(image, 0.4, copied, 0.6, 0.0)
64
+
65
+ vw.write(image)
66
+
67
+ vw.release()
KABR/dataset/image2visual.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import json
4
+ import cv2
5
+ from natsort import natsorted
6
+ import pandas as pd
7
+ from tqdm import tqdm
8
+
9
+ if __name__ == "__main__":
10
+ path_to_image = "image"
11
+ path_to_video = "visual"
12
+ annotation_train = "../annotation/train.csv"
13
+ annotation_val = "../annotation/val.csv"
14
+ classes_json = "../annotation/classes.json"
15
+ visual = True
16
+
17
+ if not os.path.exists(path_to_video):
18
+ os.makedirs(path_to_video)
19
+
20
+ with open(classes_json, "r") as file:
21
+ label2number = json.load(file)
22
+
23
+ number2label = {value: key for key, value in label2number.items()}
24
+
25
+ df_train = pd.read_csv(annotation_train, sep=" ")
26
+ df_val = pd.read_csv(annotation_val, sep=" ")
27
+ df = pd.concat([df_train, df_val], axis=0)
28
+ folders = natsorted(os.listdir(path_to_image))
29
+
30
+ hierarchy = {}
31
+
32
+ for folder in folders:
33
+ main = folder.split(".")[0]
34
+
35
+ if hierarchy.get(main) is None:
36
+ hierarchy[main] = [folder]
37
+ else:
38
+ hierarchy[main].append(folder)
39
+
40
+ for i, folder in tqdm(enumerate(hierarchy.keys()), total=len(hierarchy.keys())):
41
+ vw = cv2.VideoWriter(f"{path_to_video}/{folder}.mp4", cv2.VideoWriter_fourcc("m", "p", "4", "v"), 29.97,
42
+ (400, 300))
43
+
44
+ for segment in hierarchy[folder]:
45
+ mapping = {}
46
+
47
+ for index, row in df[df.original_vido_id == segment].iterrows():
48
+ mapping[row["frame_id"]] = number2label[row["labels"]]
49
+
50
+ for j, file in enumerate(natsorted(os.listdir(path_to_image + os.sep + segment))):
51
+ image = cv2.imread(f"{path_to_image}/{segment}/{file}")
52
+
53
+ if visual:
54
+ color = (0, 0, 0)
55
+ label = mapping[j + 1]
56
+ thickness_in = 1
57
+ size = 0.7
58
+ label_length = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, size, thickness_in)
59
+ copied = image.copy()
60
+ cv2.rectangle(image, (10, 10), (20 + label_length[0][0], 40), (255, 255, 255), -1)
61
+ cv2.putText(image, label, (16, 31),
62
+ cv2.FONT_HERSHEY_SIMPLEX, size, tuple([i - 50 for i in color]), thickness_in, cv2.LINE_AA)
63
+ image = cv2.addWeighted(image, 0.4, copied, 0.6, 0.0)
64
+
65
+ vw.write(image)
66
+
67
+ vw.release()
README.md ADDED
@@ -0,0 +1,274 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: cc0-1.0
3
+ task_categories:
4
+ - video-classification
5
+ tags:
6
+ - zebra
7
+ - giraffe
8
+ - plains zebra
9
+ - Grevy's zebra
10
+ - video
11
+ - animal behavior
12
+ - behavior recognition
13
+ - annotation
14
+ - annotated video
15
+ - conservation
16
+ - drone
17
+ - UAV
18
+ - imbalanced
19
+ - Kenya
20
+ - Mpala Research Centre
21
+ pretty_name: >-
22
+ KABR: In-Situ Dataset for Kenyan Animal Behavior Recognition from Drone
23
+ Videos
24
+ description: "Initial KABR project release, contains drone video clips (mini-scenes) of giraffes, plains zebras, and Grevy's zebras with behavior labels from a subset of videos collected at the Mpala Research Centre in January 2023."
25
+ size_categories:
26
+ - 1M<n<10M
27
+ ---
28
+ # Dataset Card for KABR: In-Situ Dataset for Kenyan Animal Behavior Recognition from Drone Videos
29
+
30
+ ## Dataset Description
31
+
32
+ - **Homepage:** [KABR Mini-Scene Site](https://kabrdata.xyz/)
33
+ - **Project Page:** [KABR Site](https://imageomics.github.io/KABR/)
34
+ - **Repository:** https://github.com/Imageomics/kabr-tools
35
+ - **Paper:** https://openaccess.thecvf.com/content/WACV2024W/CV4Smalls/papers/Kholiavchenko_KABR_In-Situ_Dataset_for_Kenyan_Animal_Behavior_Recognition_From_Drone_WACVW_2024_paper.pdf
36
+
37
+ ### Dataset Summary
38
+
39
+ We present a novel high-quality dataset for animal behavior recognition from drone videos.
40
+ The dataset is focused on Kenyan wildlife and contains behaviors of giraffes, plains zebras, and Grevy's zebras.
41
+ The dataset consists of more than 10 hours of annotated videos, and it includes eight different classes, encompassing seven types of animal behavior and an additional category for occluded instances.
42
+ In the annotation process for this dataset, a team of 10 people was involved, with an expert zoologist overseeing the process.
43
+ Each behavior was labeled based on its distinctive features, using a standardized set of criteria to ensure consistency and accuracy across the annotations.
44
+ The dataset was collected using drones that flew over the animals in the [Mpala Research Centre](https://mpala.org/) in Kenya, providing high-quality video footage of the animal's natural behaviors.
45
+ The drone footage is captured at a resolution of 5472 x 3078 pixels, and the videos were recorded at a frame rate of 29.97 frames per second.
46
+
47
+
48
+ <!--This dataset card aims to be a base template for new datasets. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/datasetcard_template.md?plain=1).-->
49
+
50
+ ### Supported Tasks and Leaderboards
51
+
52
+ The results of our evaluation using I3D, SlowFast, and X3D architectures are given in the table below. For each one, the model was trained for 120 epochs with batch size of 5. For more information on these results, see our [paper](coming soon).
53
+
54
+ | Method | All | Giraffes | Plains Zebras | Grevy’s Zebras |
55
+ | ---- | ---- | ---- | ---- | ---- |
56
+ | I3D (16x5) | 53.41 | 61.82 | 58.75 | 46.73 |
57
+ | SlowFast (16x5, 4x5) | 52.92 | 61.15 | 60.60 | 47.42 |
58
+ | X3D (16x5) | 61.9 | 65.1 | 63.11 | 51.16 |
59
+
60
+ ### Languages
61
+
62
+ English
63
+
64
+ ## Dataset Structure
65
+
66
+ Under `KABR/dataset/image/`, the data has been archived into `.zip` files, which are split into 2GB files. These must be recombined and extracted.
67
+ After cloning and navigating into the repository, you can use the following commands to do the reconstruction:
68
+ ```bash
69
+ cd KABR/dataset/image/
70
+ cat giraffes_part_* > giraffes.zip
71
+ md5sum giraffes.zip # Compare this to what's shown with `cat giraffes_md5.txt`
72
+ unzip giraffes.zip
73
+ rm -rf giraffes_part_*
74
+
75
+ # Similarly for `zebras_grevys_part_*` and `zebras_plains_part_*`
76
+ ```
77
+
78
+ Alternatively, there is a download script, `download.py`, which allows a download of the entire dataset in its established format without requiring one to clone the repository (cloning requires _at least_ double the size of the dataset to store). To proceed with this approach, download `download.py` to the system where you want to access the data.
79
+ Then, in the same directory as the script, run the following to begin the download:
80
+ ```
81
+ pip install requests
82
+ python download.py
83
+ ```
84
+
85
+ This script then downloads all the files present in the repository (without making a clone of the `.git` directory, etc.), concatenates the part files to their ZIP archives, verifies the MD5 checksums, extracts, and cleans up so that the folder structure, as described below, is present.
86
+
87
+ Note that it will require approximately 116GB of free space to complete this process, though the final dataset will only take about 61GB of disk space (the script removes the extra files after checking the download was successful).
88
+
89
+ The KABR dataset follows the Charades format:
90
+
91
+ ```
92
+ KABR
93
+ /dataset
94
+ /image
95
+ /video_1
96
+ /image_1.jpg
97
+ /image_2.jpg
98
+ ...
99
+ /image_n.jpg
100
+ /video_2
101
+ /image_1.jpg
102
+ /image_2.jpg
103
+ ...
104
+ /image_n.jpg
105
+ ...
106
+ /video_n
107
+ /image_1.jpg
108
+ /image_2.jpg
109
+ /image_3.jpg
110
+ ...
111
+ /image_n.jpg
112
+ /annotation
113
+ /classes.json
114
+ /train.csv
115
+ /val.csv
116
+ ```
117
+
118
+ The dataset can be directly loaded and processed by the [SlowFast](https://github.com/facebookresearch/SlowFast) framework.
119
+
120
+ **Informational Files**
121
+ * `KABR/configs`: examples of SlowFast framework configs.
122
+ * `KABR/annotation/distribution.xlsx`: distribution of classes for all videos.
123
+
124
+ **Scripts:**
125
+ * `image2video.py`: Encode image sequences into the original video.
126
+ * For example, `[image/G0067.1, image/G0067.2, ..., image/G0067.24]` will be encoded into `video/G0067.mp4`.
127
+ * `image2visual.py`: Encode image sequences into the original video with corresponding annotations.
128
+ * For example, `[image/G0067.1, image/G0067.2, ..., image/G0067.24]` will be encoded into `visual/G0067.mp4`.
129
+
130
+ ### Data Instances
131
+
132
+ **Naming:** Within the image folder, the `video_n` folders are named as follows (X indicates a number):
133
+ * G0XXX.X - Giraffes
134
+ * ZP0XXX.X - Plains Zebras
135
+ * ZG0XXX.X - Grevy's Zebras
136
+ * Within each of these folders the images are simply `X.jpg`.
137
+
138
+ **Note:** The dataset consists of a total of 1,139,893 frames captured from drone videos. There are 488,638 frames of Grevy's zebras, 492,507 frames of plains zebras, and 158,748 frames of giraffes.
139
+
140
+
141
+ ### Data Fields
142
+
143
+ There are 14,764 unique behavioral sequences in the dataset. These consist of eight distinct behaviors:
144
+ - Walk
145
+ - Trot
146
+ - Run: animal is moving at a cantor or gallop
147
+ - Graze: animal is eating grass or other vegetation
148
+ - Browse: animal is eating trees or bushes
149
+ - Head Up: animal is looking around or observe surroundings
150
+ - Auto-Groom: animal is grooming itself (licking, scratching, or rubbing)
151
+ - Occluded: animal is not fully visible
152
+
153
+ ### Data Splits
154
+
155
+ Training and validation sets are indicated by their respective CSV files (`train.csv` and `val.csv`), located within the `annotation` folder.
156
+
157
+ ## Dataset Creation
158
+
159
+ ### Curation Rationale
160
+
161
+ We present a novel high-quality dataset for animal behavior recognition from drone videos.
162
+ The dataset is focused on Kenyan wildlife and contains behaviors of giraffes, plains zebras, and Grevy's zebras.
163
+ The dataset consists of more than 10 hours of annotated videos, and it includes eight different classes, encompassing seven types of animal behavior and an additional category for occluded instances.
164
+ In the annotation process for this dataset, a team of 10 people was involved, with an expert zoologist overseeing the process.
165
+ Each behavior was labeled based on its distinctive features, using a standardized set of criteria to ensure consistency and accuracy across the annotations.
166
+ The dataset was collected using drones that flew over the animals in the [Mpala Research Centre](https://mpala.org/) in Kenya, providing high-quality video footage of the animal's natural behaviors.
167
+ We believe that this dataset will be a valuable resource for researchers working on animal behavior recognition, as it provides a diverse and high-quality set of annotated videos that can be used for evaluating deep learning models.
168
+ Additionally, the dataset can be used to study the behavior patterns of Kenyan animals and can help to inform conservation efforts and wildlife management strategies.
169
+
170
+ <!-- [To be added:] -->
171
+
172
+ We provide a detailed description of the dataset and its annotation process, along with some initial experiments on the dataset using conventional deep learning models.
173
+ The results demonstrate the effectiveness of the dataset for animal behavior recognition and highlight the potential for further research in this area.
174
+
175
+ ### Source Data
176
+
177
+ #### Initial Data Collection and Normalization
178
+
179
+ Data was collected from 6 January 2023 through 21 January 2023 at the [Mpala Research Centre](https://mpala.org/) in Kenya under a Nacosti research license. We used DJI Mavic 2S drones equipped with cameras to record 5.4K resolution videos (5472 x 3078 pixels) from varying altitudes and distances of 10 to 50 meters from the animals (distance was determined by circumstances and safety regulations).
180
+
181
+ Mini-scenes were extracted from these videos to reduce the impact of drone movement and facilitate human annotation. Animals were detected in frame using YOLOv8, then the SORT tracking algorithm was applied to follow their movement. A 400 by 300 pixel window, centered on the animal, was then extracted; this is the mini-scene.
182
+
183
+ <!--
184
+ #### Who are the source language producers?
185
+
186
+ [More Information Needed]
187
+ -->
188
+
189
+ ### Annotations
190
+
191
+ #### Annotation process
192
+
193
+ In the annotation process for this dataset, a team of 10 people was involved, with an expert zoologist overseeing the process.
194
+ Each behavior was labeled based on its distinctive features, using a standardized set of criteria to ensure consistency and accuracy across the annotations.
195
+
196
+ <!--
197
+ #### Who are the annotators?
198
+
199
+ [More Information Needed]
200
+ -->
201
+
202
+ ### Personal and Sensitive Information
203
+
204
+ Though there are endangered species included in this data, exact locations are not provided and their safety is assured by their location within the preserve.
205
+
206
+ ## Considerations for Using the Data
207
+ <!--
208
+ ### Social Impact of Dataset
209
+
210
+ [More Information Needed]
211
+
212
+ ### Discussion of Biases
213
+
214
+ [More Information Needed]
215
+ -->
216
+
217
+ ### Other Known Limitations
218
+
219
+ This data exhibits a long-tailed distribution due to the natural variation in frequency of the observed behaviors.
220
+
221
+ ## Additional Information
222
+
223
+ ### Authors
224
+
225
+ * Maksim Kholiavchenko (Rensselaer Polytechnic Institute) - ORCID: 0000-0001-6757-1957
226
+ * Jenna Kline (The Ohio State University) - ORCID: 0009-0006-7301-5774
227
+ * Michelle Ramirez (The Ohio State University)
228
+ * Sam Stevens (The Ohio State University)
229
+ * Alec Sheets (The Ohio State University) - ORCID: 0000-0002-3737-1484
230
+ * Reshma Ramesh Babu (The Ohio State University) - ORCID: 0000-0002-2517-5347
231
+ * Namrata Banerji (The Ohio State University) - ORCID: 0000-0001-6813-0010
232
+ * Elizabeth Campolongo (Imageomics Institute, The Ohio State University) - ORCID: 0000-0003-0846-2413
233
+ * Matthew Thompson (Imageomics Institute, The Ohio State University) - ORCID: 0000-0003-0583-8585
234
+ * Nina Van Tiel (Eidgenössische Technische Hochschule Zürich) - ORCID: 0000-0001-6393-5629
235
+ * Jackson Miliko (Mpala Research Centre)
236
+ * Eduardo Bessa (Universidade de Brasília) - ORCID: 0000-0003-0606-5860
237
+ * Tanya Berger-Wolf (The Ohio State University) - ORCID: 0000-0001-7610-1412
238
+ * Daniel Rubenstein (Princeton University) - ORCID: 0000-0001-9049-5219
239
+ * Charles Stewart (Rensselaer Polytechnic Institute)
240
+
241
+ ### Licensing Information
242
+
243
+ This dataset is dedicated to the public domain for the benefit of scientific pursuits. We ask that you cite the dataset and journal paper using the below citations if you make use of it in your research.
244
+
245
+ ### Citation Information
246
+
247
+ #### Dataset
248
+ ```
249
+ @misc{KABR_Data,
250
+ author = {Kholiavchenko, Maksim and Kline, Jenna and Ramirez, Michelle and Stevens, Sam and Sheets, Alec and Babu, Reshma and Banerji, Namrata and Campolongo, Elizabeth and Thompson, Matthew and Van Tiel, Nina and Miliko, Jackson and Bessa, Eduardo and Duporge, Isla and Berger-Wolf, Tanya and Rubenstein, Daniel and Stewart, Charles},
251
+ title = {KABR: In-Situ Dataset for Kenyan Animal Behavior Recognition from Drone Videos},
252
+ year = {2023},
253
+ url = {https://huggingface.co/datasets/imageomics/KABR},
254
+ doi = {10.57967/hf/1010},
255
+ publisher = {Hugging Face}
256
+ }
257
+ ```
258
+
259
+ #### Paper
260
+ ```
261
+ @inproceedings{kholiavchenko2024kabr,
262
+ title={KABR: In-Situ Dataset for Kenyan Animal Behavior Recognition from Drone Videos},
263
+ author={Kholiavchenko, Maksim and Kline, Jenna and Ramirez, Michelle and Stevens, Sam and Sheets, Alec and Babu, Reshma and Banerji, Namrata and Campolongo, Elizabeth and Thompson, Matthew and Van Tiel, Nina and Miliko, Jackson and Bessa, Eduardo and Duporge, Isla and Berger-Wolf, Tanya and Rubenstein, Daniel and Stewart, Charles},
264
+ booktitle={Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision},
265
+ pages={31-40},
266
+ year={2024}
267
+ }
268
+ ```
269
+
270
+ ### Contributions
271
+
272
+ This work was supported by the [Imageomics Institute](https://imageomics.org), which is funded by the US National Science Foundation's Harnessing the Data Revolution (HDR) program under [Award #2118240](https://www.nsf.gov/awardsearch/showAward?AWD_ID=2118240) (Imageomics: A New Frontier of Biological Information Powered by Knowledge-Guided Machine Learning). Additional support was also provided by the [AI Institute for Intelligent Cyberinfrastructure with Computational Learning in the Environment (ICICLE)](https://icicle.osu.edu/), which is funded by the US National Science Foundation under [Award #2112606](https://www.nsf.gov/awardsearch/showAward?AWD_ID=2112606). Any opinions, findings and conclusions or recommendations expressed in this material are those of the author(s) and do not necessarily reflect the views of the National Science Foundation.
273
+
274
+ The data was gathered at the [Mpala Research Centre](https://mpala.org/) in Kenya, in accordance with Research License No. NACOSTI/P/22/18214. The data collection protocol adhered strictly to the guidelines set forth by the Institutional Animal Care and Use Committee under permission No. IACUC 1835F.
download.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import requests
3
+ import time
4
+ import zipfile
5
+ import glob
6
+ from hashlib import md5
7
+ import concurrent.futures
8
+
9
+ base_url = "https://huggingface.co/datasets/imageomics/KABR/resolve/main/KABR"
10
+
11
+ """
12
+ To extend the dataset, add additional animals and parts ranges to the list and dictionary below.
13
+ """
14
+
15
+ animals = ["giraffes", "zebras_grevys", "zebras_plains"]
16
+
17
+ animal_parts_range = {
18
+ "giraffes": ("aa", "ad"),
19
+ "zebras_grevys": ("aa", "am"),
20
+ "zebras_plains": ("aa", "al"),
21
+ }
22
+
23
+ dataset_prefix = "dataset/image/"
24
+
25
+ # Define the static files that are not dependent on the animals list
26
+ static_files = [
27
+ "README.txt",
28
+ "annotation/classes.json",
29
+ "annotation/distribution.xlsx",
30
+ "annotation/train.csv",
31
+ "annotation/val.csv",
32
+ "configs/I3D.yaml",
33
+ "configs/SLOWFAST.yaml",
34
+ "configs/X3D.yaml",
35
+ "dataset/image2video.py",
36
+ "dataset/image2visual.py",
37
+ ]
38
+
39
+ def generate_part_files(animal, start, end):
40
+ start_a, start_b = ord(start[0]), ord(start[1])
41
+ end_a, end_b = ord(end[0]), ord(end[1])
42
+ return [
43
+ f"{dataset_prefix}{animal}_part_{chr(a)}{chr(b)}"
44
+ for a in range(start_a, end_a + 1)
45
+ for b in range(start_b, end_b + 1)
46
+ ]
47
+
48
+ # Generate the part files for each animal
49
+ part_files = [
50
+ part
51
+ for animal, (start, end) in animal_parts_range.items()
52
+ for part in generate_part_files(animal, start, end)
53
+ ]
54
+
55
+ archive_md5_files = [f"{dataset_prefix}{animal}_md5.txt" for animal in animals]
56
+
57
+ files = static_files + archive_md5_files + part_files
58
+
59
+ def progress_bar(iteration, total, message, bar_length=50):
60
+ progress = (iteration / total)
61
+ bar = '=' * int(round(progress * bar_length) - 1)
62
+ spaces = ' ' * (bar_length - len(bar))
63
+ message = f'{message:<100}'
64
+ print(f'[{bar + spaces}] {int(progress * 100)}% {message}', end='\r', flush=True)
65
+
66
+ if iteration == total:
67
+ print()
68
+
69
+ # Directory to save files
70
+ save_dir = "KABR_files"
71
+
72
+ # Loop through each relative file path
73
+
74
+ print(f"Downloading the Kenyan Animal Behavior Recognition (KABR) dataset ...")
75
+
76
+ total = len(files)
77
+ for i, file_path in enumerate(files):
78
+ # Construct the full URL
79
+ save_path = os.path.join(save_dir, file_path)
80
+
81
+ if os.path.exists(save_path):
82
+ print(f"File {save_path} already exists. Skipping download.")
83
+ continue
84
+
85
+ full_url = f"{base_url}/{file_path}"
86
+
87
+ # Create the necessary directories based on the file path
88
+ os.makedirs(os.path.join(save_dir, os.path.dirname(file_path)), exist_ok=True)
89
+
90
+ # Download the file and save it with the preserved file path
91
+ response = requests.get(full_url)
92
+ with open(save_path, 'wb') as file:
93
+ file.write(response.content)
94
+
95
+ progress_bar(i+1, total, f"downloaded: {save_path}")
96
+
97
+ print("Download of repository contents completed.")
98
+
99
+ print(f"Concatenating split files into a full archive for {animals} ...")
100
+
101
+ def concatenate_files(animal):
102
+ print(f"Concatenating files for {animal} ...")
103
+ part_files_pattern = f"{save_dir}/dataset/image/{animal}_part_*"
104
+ part_files = sorted(glob.glob(part_files_pattern))
105
+ if part_files:
106
+ with open(f"{save_dir}/dataset/image/{animal}.zip", 'wb') as f_out:
107
+ for f_name in part_files:
108
+ with open(f_name, 'rb') as f_in:
109
+ # Read and write in chunks
110
+ CHUNK_SIZE = 8*1024*1024 # 8MB
111
+ for chunk in iter(lambda: f_in.read(CHUNK_SIZE), b""):
112
+ f_out.write(chunk)
113
+ # Delete part files as they are concatenated
114
+ os.remove(f_name)
115
+ print(f"Archive for {animal} concatenated.")
116
+ else:
117
+ print(f"No part files found for {animal}.")
118
+
119
+ with concurrent.futures.ThreadPoolExecutor() as executor:
120
+ executor.map(concatenate_files, animals)
121
+
122
+ def compute_md5(file_path):
123
+ hasher = md5()
124
+ with open(file_path, 'rb') as f:
125
+ CHUNK_SIZE = 8*1024*1024 # 8MB
126
+ for chunk in iter(lambda: f.read(CHUNK_SIZE), b""):
127
+ hasher.update(chunk)
128
+ return hasher.hexdigest()
129
+
130
+ def verify_and_extract(animal):
131
+ print(f"Confirming data integrity for {animal}.zip ...")
132
+ zip_md5 = compute_md5(f"{save_dir}/dataset/image/{animal}.zip")
133
+
134
+ with open(f"{save_dir}/dataset/image/{animal}_md5.txt", 'r') as file:
135
+ expected_md5 = file.read().strip().split()[0]
136
+
137
+ if zip_md5 == expected_md5:
138
+ print(f"MD5 sum for {animal}.zip is correct.")
139
+
140
+ print(f"Extracting {animal}.zip ...")
141
+ with zipfile.ZipFile(f"{save_dir}/dataset/image/{animal}.zip", 'r') as zip_ref:
142
+ zip_ref.extractall(f"{save_dir}/dataset/image/")
143
+ print(f"{animal}.zip extracted.")
144
+ print(f"Cleaning up for {animal} ...")
145
+ os.remove(f"{save_dir}/dataset/image/{animal}.zip")
146
+ os.remove(f"{save_dir}/dataset/image/{animal}_md5.txt")
147
+ else:
148
+ print(f"MD5 sum for {animal}.zip is incorrect. Expected: {expected_md5}, but got: {zip_md5}.")
149
+ print("There may be data corruption. Please try to download and reconstruct the data again or reach out to the corresponding authors for assistance.")
150
+
151
+ with concurrent.futures.ThreadPoolExecutor() as executor:
152
+ executor.map(verify_and_extract, animals)
153
+
154
+ print("Download script finished.")