From 0f9c6c6212401f1577b3289b0358b72fa03e8f8c Mon Sep 17 00:00:00 2001 From: Guillaume Lagrange Date: Thu, 25 Apr 2024 11:31:44 -0400 Subject: [PATCH] Add YOLOX object detection (#24) * Add yolox base models * Fix 2d grid for anchors * Change sample image * Add post-processing and inference results * Cleanup * Fix 2d grid repeat with dim=1 * Default to ndarray backend * Switch to YOLOX-tiny for example * Remove dead comment * Use tensor.dims() * Use the new tensor.permute() * Fix comments - Pre-trained weights are from COCO (README) - Remove training outputs TODO - Current example uses YOLOX-Tiny (Nano WIP) * Add YOLOX-Nano w/ depthwise separable conv (enum) * Remove dead code comment * Remove incorrect return comment * Add YOLOX to models README * Fix dead comments and add enum variants doc * Rephrase enum variants doc * Change burn-rs -> tracel-ai links * Upgrade to Burn 0.13.0 - Removed init_with methods - Fixed empty MaxPool2d vec initialization * Update image version --- README.md | 13 +- yolox-burn/.gitignore | 2 + yolox-burn/Cargo.toml | 28 +++ yolox-burn/LICENSE-APACHE | 1 + yolox-burn/LICENSE-MIT | 1 + yolox-burn/NOTICES.md | 16 ++ yolox-burn/README.md | 44 ++++ yolox-burn/examples/inference.rs | 145 ++++++++++++ yolox-burn/samples/dog_bike_man.jpg | Bin 0 -> 39085 bytes yolox-burn/src/lib.rs | 3 + yolox-burn/src/model/blocks.rs | 271 +++++++++++++++++++++++ yolox-burn/src/model/bottleneck.rs | 206 +++++++++++++++++ yolox-burn/src/model/boxes.rs | 138 ++++++++++++ yolox-burn/src/model/darknet.rs | 172 +++++++++++++++ yolox-burn/src/model/head.rs | 192 ++++++++++++++++ yolox-burn/src/model/mod.rs | 10 + yolox-burn/src/model/pafpn.rs | 177 +++++++++++++++ yolox-burn/src/model/weights.rs | 143 ++++++++++++ yolox-burn/src/model/yolox.rs | 329 ++++++++++++++++++++++++++++ 19 files changed, 1885 insertions(+), 6 deletions(-) create mode 100644 yolox-burn/.gitignore create mode 100644 yolox-burn/Cargo.toml create mode 120000 yolox-burn/LICENSE-APACHE create mode 120000 yolox-burn/LICENSE-MIT create mode 100644 yolox-burn/NOTICES.md create mode 100644 yolox-burn/README.md create mode 100644 yolox-burn/examples/inference.rs create mode 100644 yolox-burn/samples/dog_bike_man.jpg create mode 100644 yolox-burn/src/lib.rs create mode 100644 yolox-burn/src/model/blocks.rs create mode 100644 yolox-burn/src/model/bottleneck.rs create mode 100644 yolox-burn/src/model/boxes.rs create mode 100644 yolox-burn/src/model/darknet.rs create mode 100644 yolox-burn/src/model/head.rs create mode 100644 yolox-burn/src/model/mod.rs create mode 100644 yolox-burn/src/model/pafpn.rs create mode 100644 yolox-burn/src/model/weights.rs create mode 100644 yolox-burn/src/model/yolox.rs diff --git a/README.md b/README.md index 4a35714..3213760 100644 --- a/README.md +++ b/README.md @@ -5,12 +5,13 @@ examples constructed using the [Burn](https://github.com/burn-rs/burn) deep lear ## Collection of Official Models -| Model | Description | Repository Link | -|-------------------------------------------------|-------------------------------------------------------|----------------------------------------------| -| [MobileNetV2](https://arxiv.org/abs/1801.04381) | A CNN model targeted at mobile devices. | [mobilenetv2-burn](mobilenetv2-burn/README.md) | -| [SqueezeNet](https://arxiv.org/abs/1602.07360) | A small CNN-based model for image classification. | [squeezenet-burn](squeezenet-burn/README.md) | -| [ResNet](https://arxiv.org/abs/1512.03385) | A CNN based on residual blocks with skip connections. | [resnet-burn](resnet-burn/README.md) | -| [RoBERTa](https://arxiv.org/abs/1907.11692) | A robustly optimized BERT pretraining approach. | [bert-burn](bert-burn/README.md) | +| Model | Description | Repository Link | +|-------------------------------------------------|----------------------------------------------------------|------------------------------------------------| +| [MobileNetV2](https://arxiv.org/abs/1801.04381) | A CNN model targeted at mobile devices. | [mobilenetv2-burn](mobilenetv2-burn/README.md) | +| [SqueezeNet](https://arxiv.org/abs/1602.07360) | A small CNN-based model for image classification. | [squeezenet-burn](squeezenet-burn/README.md) | +| [ResNet](https://arxiv.org/abs/1512.03385) | A CNN based on residual blocks with skip connections. | [resnet-burn](resnet-burn/README.md) | +| [RoBERTa](https://arxiv.org/abs/1907.11692) | A robustly optimized BERT pretraining approach. | [bert-burn](bert-burn/README.md) | +| [YOLOX](https://arxiv.org/abs/2107.08430) | A single-stage object detector based on the YOLO series. | [yolox-burn](yolox-burn/README.md) | ## Community Contributions diff --git a/yolox-burn/.gitignore b/yolox-burn/.gitignore new file mode 100644 index 0000000..ea6f423 --- /dev/null +++ b/yolox-burn/.gitignore @@ -0,0 +1,2 @@ +# Output image +*.output.png diff --git a/yolox-burn/Cargo.toml b/yolox-burn/Cargo.toml new file mode 100644 index 0000000..e27bd68 --- /dev/null +++ b/yolox-burn/Cargo.toml @@ -0,0 +1,28 @@ +[package] +authors = ["guillaumelagrange "] +license = "MIT OR Apache-2.0" +name = "yolox-burn" +version = "0.1.0" +edition = "2021" + +[features] +default = [] +std = [] +pretrained = ["burn/network", "std", "dep:dirs"] + +[dependencies] +# Note: default-features = false is needed to disable std +burn = { version = "0.13.0", default-features = false } +burn-import = { version = "0.13.0" } +itertools = { version = "0.12.1", default-features = false, features = [ + "use_alloc", +] } +dirs = { version = "5.0.1", optional = true } +serde = { version = "1.0.192", default-features = false, features = [ + "derive", + "alloc", +] } # alloc is for no_std, derive is needed + +[dev-dependencies] +burn = { version = "0.13.0", features = ["ndarray"] } +image = { version = "0.24.9", features = ["png", "jpeg"] } diff --git a/yolox-burn/LICENSE-APACHE b/yolox-burn/LICENSE-APACHE new file mode 120000 index 0000000..965b606 --- /dev/null +++ b/yolox-burn/LICENSE-APACHE @@ -0,0 +1 @@ +../LICENSE-APACHE \ No newline at end of file diff --git a/yolox-burn/LICENSE-MIT b/yolox-burn/LICENSE-MIT new file mode 120000 index 0000000..76219eb --- /dev/null +++ b/yolox-burn/LICENSE-MIT @@ -0,0 +1 @@ +../LICENSE-MIT \ No newline at end of file diff --git a/yolox-burn/NOTICES.md b/yolox-burn/NOTICES.md new file mode 100644 index 0000000..c3aac0c --- /dev/null +++ b/yolox-burn/NOTICES.md @@ -0,0 +1,16 @@ +# NOTICES AND INFORMATION + +This file contains notices and information required by libraries that this repository copied or derived from. The use of the following resources complies with the licenses provided. + +## Sample Image + +Image Title: Man with Bike and Pet Dog circa 1900 (archive ref DDX1319-2-3) +Author: East Riding Archives +Source: https://commons.wikimedia.org/wiki/File:Man_with_Bike_and_Pet_Dog_circa_1900_%28archive_ref_DDX1319-2-3%29_%2826507570321%29.jpg +License: [Creative Commons](https://www.flickr.com/commons/usage/) + +## Pre-trained Model + +The COCO pre-trained model was ported from the original [YOLOX implementation](https://github.com/Megvii-BaseDetection/YOLOX). + +As opposed to other YOLO variants (YOLOv8, YOLO-NAS, etc.), both the code and pre-trained weights are distributed under the [Apache 2.0](https://github.com/Megvii-BaseDetection/YOLOX/blob/main/LICENSE) open source license. diff --git a/yolox-burn/README.md b/yolox-burn/README.md new file mode 100644 index 0000000..3aea944 --- /dev/null +++ b/yolox-burn/README.md @@ -0,0 +1,44 @@ +# YOLOX Burn + +There have been many different object detection models with the YOLO prefix released in the recent +years, though most of them carry a GPL or AGPL license which restricts their usage. For this reason, +we selected [YOLOX](https://arxiv.org/abs/2107.08430) as the first object detection architecture +since both the original code and pre-trained weights are released under the +[Apache 2.0](https://github.com/Megvii-BaseDetection/YOLOX/blob/main/LICENSE) open source license. + +You can find the [Burn](https://github.com/tracel-ai/burn) implementation for the YOLOX variants in +[src/model/yolox.rs](src/model/yolox.rs). + +The model is [no_std compatible](https://docs.rust-embedded.org/book/intro/no-std.html). + +## Usage + +### `Cargo.toml` + +Add this to your `Cargo.toml`: + +```toml +[dependencies] +yolox-burn = { git = "https://github.com/tracel-ai/models", package = "yolox-burn", default-features = false } +``` + +If you want to get the COCO pre-trained weights, enable the `pretrained` feature flag. + +```toml +[dependencies] +yolox-burn = { git = "https://github.com/tracel-ai/models", package = "yolox-burn", features = ["pretrained"] } +``` + +**Important:** this feature requires `std`. + +### Example Usage + +The [inference example](examples/inference.rs) initializes a YOLOX-Tiny from the COCO +[pre-trained weights](https://github.com/Megvii-BaseDetection/YOLOX?tab=readme-ov-file#standard-models) +with the `NdArray` backend and performs inference on the provided input image. + +You can run the example with the following command: + +```sh +cargo run --release --features pretrained --example inference samples/dog_bike_man.jpg +``` diff --git a/yolox-burn/examples/inference.rs b/yolox-burn/examples/inference.rs new file mode 100644 index 0000000..50e6250 --- /dev/null +++ b/yolox-burn/examples/inference.rs @@ -0,0 +1,145 @@ +use std::path::Path; + +use image::{DynamicImage, ImageBuffer}; +use yolox_burn::model::{boxes::nms, weights, yolox::Yolox, BoundingBox}; + +use burn::{ + backend::NdArray, + tensor::{backend::Backend, Data, Device, Element, Shape, Tensor}, +}; + +const HEIGHT: usize = 640; +const WIDTH: usize = 640; + +fn to_tensor( + data: Vec, + shape: [usize; 3], + device: &Device, +) -> Tensor { + Tensor::::from_data(Data::new(data, Shape::new(shape)).convert(), device) + // [H, W, C] -> [C, H, W] + .permute([2, 0, 1]) +} + +/// Draws bounding boxes on the given image. +/// +/// # Arguments +/// +/// * `image`: Original input image. +/// * `boxes` - Bounding boxes, grouped per class. +/// * `color` - [R, G, B] color values to draw the boxes. +/// * `ratio` - [x, y] aspect ratio to scale the predicted boxes. +/// +/// # Returns +/// +/// The image annotated with bounding boxes. +fn draw_boxes( + image: DynamicImage, + boxes: &[Vec], + color: &[u8; 3], + ratio: &[f32; 2], // (x, y) ratio +) -> DynamicImage { + // Assumes x1 <= x2 and y1 <= y2 + fn draw_rect( + image: &mut ImageBuffer, Vec>, + x1: u32, + x2: u32, + y1: u32, + y2: u32, + color: &[u8; 3], + ) { + for x in x1..=x2 { + let pixel = image.get_pixel_mut(x, y1); + *pixel = image::Rgb(*color); + let pixel = image.get_pixel_mut(x, y2); + *pixel = image::Rgb(*color); + } + for y in y1..=y2 { + let pixel = image.get_pixel_mut(x1, y); + *pixel = image::Rgb(*color); + let pixel = image.get_pixel_mut(x2, y); + *pixel = image::Rgb(*color); + } + } + + // Annotate the original image and print boxes information. + let (image_h, image_w) = (image.height(), image.width()); + let mut image = image.to_rgb8(); + for (class_index, bboxes_for_class) in boxes.iter().enumerate() { + for b in bboxes_for_class.iter() { + let xmin = (b.xmin * ratio[0]).clamp(0., image_w as f32 - 1.); + let ymin = (b.ymin * ratio[1]).clamp(0., image_h as f32 - 1.); + let xmax = (b.xmax * ratio[0]).clamp(0., image_w as f32 - 1.); + let ymax = (b.ymax * ratio[1]).clamp(0., image_h as f32 - 1.); + + println!( + "Predicted {} ({:.2}) at [{:.2}, {:.2}, {:.2}, {:.2}]", + class_index, b.confidence, xmin, ymin, xmax, ymax, + ); + + draw_rect( + &mut image, + xmin as u32, + xmax as u32, + ymin as u32, + ymax as u32, + color, + ); + } + } + DynamicImage::ImageRgb8(image) +} + +pub fn main() { + // Parse arguments + let img_path = std::env::args().nth(1).expect("No image path provided"); + + // Create YOLOX-Tiny + let device = Default::default(); + let model: Yolox = Yolox::yolox_tiny_pretrained(weights::YoloxTiny::Coco, &device) + .map_err(|err| format!("Failed to load pre-trained weights.\nError: {err}")) + .unwrap(); + + // Load image + let img = image::open(&img_path) + .map_err(|err| format!("Failed to load image {img_path}.\nError: {err}")) + .unwrap(); + + // Resize to 640x640 + let resized_img = img.resize_exact( + WIDTH as u32, + HEIGHT as u32, + image::imageops::FilterType::Triangle, // also known as bilinear in 2D + ); + + // Create tensor from image data + let x = to_tensor( + resized_img.into_rgb8().into_raw(), + [HEIGHT, WIDTH, 3], + &device, + ) + .unsqueeze::<4>(); // [B, C, H, W] + + // Forward pass + let out = model.forward(x); + + // Post-processing + let [_, num_boxes, num_outputs] = out.dims(); + let boxes = out.clone().slice([0..1, 0..num_boxes, 0..4]); + let obj_scores = out.clone().slice([0..1, 0..num_boxes, 4..5]); + let cls_scores = out.slice([0..1, 0..num_boxes, 5..num_outputs]); + let scores = cls_scores * obj_scores; + let boxes = nms(boxes, scores, 0.65, 0.5); + + // Draw outputs and save results + let (h, w) = (img.height(), img.width()); + let img_out = draw_boxes( + img, + &boxes[0], + &[239u8, 62u8, 5u8], + &[w as f32 / WIDTH as f32, h as f32 / HEIGHT as f32], + ); + + let img_path = Path::new(&img_path); + let _ = img_out.save(img_path.with_extension("output.png")); +} diff --git a/yolox-burn/samples/dog_bike_man.jpg b/yolox-burn/samples/dog_bike_man.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5dfd4c734830ca2ab2b969cabfb2ffe6bb9457a4 GIT binary patch literal 39085 zcmbTdc|4Tw`#yZl4931>l${VWQ;Ll28f2R+Gh@qA*^&`5_N5p^n(SLlV!~KcsSzbb zWi3l%d6(4NE<_1Q>vQ+nzMt>&{Qh{JXI`)Cb&uOH_jTXrb)Ls@oX7S1)$d6lO0clD z05Isa54I10-*12!P~0ggD~VE+m6laeQdHF-;xzW{)9|(+=o7>JVxvR-LV{`8EhV(H zoAlri=6LzdN6!X^2F@|2KTr02YUvy3`twg(02ue5tA)Y9zg{pnf{U96$;-zt0DYiI6u@B!1e^=O&CSIH zefm7~d%z{ey<@kg36D6*3#mj!9Zbx*&Z}(N*dalF`AJ2~J2Z)pf9EbqDd|0`YG@1= zr+r9AS5M#Uh`EKOl{LY^(aG86n5&zQ?9fwR?w;O0&hW_S*k7++k55g{%+Ad(ykA^e z`@Fuf`DN?t_P0Otf&s)or}dwi{om##2F(l3#f9KP{+SmH9t91A7#H_$O`aVlB%~Kr zTsm{+%kgsXc3druL7S{lA-7(*LiS{byqTGp}(Vh=4&4 z4&}hN91zi}aCQXaIYpD$8lRaDLF)h&?DG%={lCAF#>f zAsF-r{b0X;Ki7ON^ci-31qp}xH^-js& zPBl(G5lqO&l1er!^bChyAv5y z3I=jcGh8ezuur+jU5WR767jr}$qw5&+dj(k5*{;#WFprc!bxU(<$?S+HYfxg40F8c z}|xmf_T#?eu{iPxG-OG0hVW?hobjY<3P%NX30n%%9D55 zMv}?ZMPdI2tUcfm9nC}v`zuw^p3dp!8MHd83?V4~mHj~1J3BqRf}w@yFWRpYUTysL zH`qO$uT;=u_>DLPpJ@Xcd5q?>fj1MIJ?HC`=1XKVwK~DE7GgD+udnUsz|G_y)T@s1 z@mHlO2aCAn?5{k6>EKBl*Y=k6l}`C2Il}@n8Q4Z1hTe49?Uc6+(1hxHnV%i6?QA~& zOgR7Q{pxfLCJ&~)TA0GyiIw*UiuCCBLdI&OYfl&+n(eYg#9i|}D45O5qwkWsgD2KX zrfD+fs5UeVUD1XD(&%y;|G$SD0S)t)>=`6LM+(Mv4?vO3|1q%Hk~^h5KBIMe+uK*H zp>rRoCBu~0Oz!XOe3965RwwX)$uop?o$}RF&I-7U1FiO+6nHN6T|+x8dA{9vqY-Oi z*c-RO#(Qq}+O5HIk_(SXvkB#)HXHWM; z9;m0fIGCpc--4_mZ?@rR6?;jmA2^(spZIiRc{QdkkFw+X&o_HT;)Mc!)+R`wWZ!B6 zVI0>9i+KO5OFRKe7i2|EZe~viu4S=~biJF|tKwPa1{T`&Iwe>*0gBpH$%*Q8`BCB% zPS>z6tTZ3S54YpC*_{{3Dd1HRwRCI#a!K^Y1vnZmw&Sx|5Ous^d}-MI+MW1MahrAI z$3^odT>%HfdYJ+`4nM1$ulT|0X4Dc1DYx;44qC~0;ZyCsRh-%BT$(ITLW7{DX_6iH z-i(LxY*3OpL5^anMZy*drimR)F3FC5RSag%Zej?5^;aMFNhD$OoEv;!h1!9Sc(Lb! zLw#bcql>|YjiM)uiQ#ou4qiG|vLGjPXw2L-A+C=BtUaD98*7%1v_BJc%w5@W=uNkS*%(v`MXL+bqg65lhgq3OsO5JEh*E9z42keCx?P;p?Cl(;3t2dpzh314D#f0J z7n{B!hrvwn1ATJNG9GNQg5SOe{(WebkbpzAM-6IDPNBxK5YS?=BxqU*sRL}IbH84P znp&F{vH@w-4~!($afyr|^z0CK`C6s!3G2G=gSRBq3KEBLucjz`a+-L-bCFfUZ~0}V zgMdNsF1^a1u#Kk82(Lo)xlv|Y&TjF5*k09T#+y6gV_utv~&v7cF za*N6H63VNj{~G0V8xE9~oxH2DA^sg*{!ntded=cy-*f2{Y8fM?{jd%p3=vZDYj4!YxKb- zIGWkm#iz?e?&q6l2eIEiI~wx!L~9&lAib`Kbswy|9efwb6hdAjo}-DdoYuF*lO5fb zqY!Oq=QqWB9OWk%;O?HjoAJQ`i12}UbM+0>+b=Ia*Kib^tBfY9FTgBsGx@s5MO3)9 zeDLB5+d;d2ZaRAOEzGNMP}DIR?$Ghry;*)$F@rK92M?XBsWb8IvWTuVTr`z99VnJA z4H~0Dq=lHDZAsj;b~T

t!n_Ww>6F9KbJHzoP>i&P{{idl{^|?JG9MBcQFVR`VTA zgg|CRLvh59W@h?>0ez^tl+!^rmUb|9q;QQ-MKL*%Y~syvFTOMxE@AN2G$-g%2o*1C zepaS6eMSP&g$P2bY46*gaw&2z!{^TGvvc2v5Guz@PUA)g@zM2KuO+8;AcdB&Rkq-O znWaOgq;5ZAj3!u`pFhbrh!O=`Or!)1)^C&-us6A1c#Ta!d0li%*W@~#Z9wFDObKFH zaY>`<@1)MWtB4Y8Po!H0bt@9NU(2mT^3G1^kH!^`pPq=_s)>^25MlP_4kPQfii!P- zx=cO|EpiZIlpWFV=dWSjA|0AMl}rkWs<7>6xAJ zipiT{t1wW6$gz!9NrykB`{$j7)8s9EY}I6m)p4PeXP)1NVCKi1(rX4;DA=`k42mrZ z_M03?~f(IldQhf`>&}#e!(;2q~Su( zIW>MYGI^89lT$dI?itQHdF}o*V?hnU0*K^ImRDjrNostmvBYsWIPhq*{|7Yukr3*J zwH|K20aRRLhc68hf`A^e8J;>}?7Tv_FPn+6e-Rpk1C8DDvM}fCO+uYVtC`qX08`t5 zvxC%U&gnOU89KNnUjmnVQhp}d$8cu7eyh}L+q-M;A$JS)VudAcfDwah+7>+vMa63y zT`M-r)$xszII;A?GUR9p&zwK((~|~qZUt>(YsL(d7tV|qwn1Dm(5aF|HD`uKL{nlS756cX^KK~3* z(!_J4$U|^)SpWg)1{x)X%Tc}EEEhF3oV39ZJdeSH@86qkk3bdApaAPty6;fYV83lS zP1ZeKI3M$rKL`c$b~QQ9bg+4yzW-%+Y6vgp_%Wb-z5vUvY1JQ;N60uza21N={!unx&)Nv~(pP}LFCc70s z1_9-TRf z{jQm_sNJ zg%nnj%Udt@79F@UojV<#r|aQI+x^48YMu@ntdG9ShVnhZ16uz7MCSkN2g^ViBxmy*XO%4;dPdax-FuxIf-Q+UVeOR;0hf#w6=5e6;|&i}Z%^KC2^Q*ctFxru zGI7Z=w)RawAR^}ab#kL?IegUYhYl-L*;y+#%(%2~sFx>XA?M>Ss#`~-J2qUo$>-?Y zrR1(xA)%TsL8l8N^PhJ}b{&=m_ZkWo@(6tuSB$Vax`3YK3IcoV24==+`=2VFTj?d) zw2U9yrHsftb7*NL{qo%Yp@;mLeAfF^_cX^s(5dFTDM)Q>a)D2pBE_;6oXWN8UmKGRclq2&ajr3x4A` zT{`t7#vCcPBvf4^ON9R^@5R+hAC2>im_$TzWJyG21ulY#L`%(IRBZ0kz%U(NoNcB; zkeaVn;&CM2<2z^i@PS85R*$}DCBCC6*B9=}3fS&)XsN;p%w4fWjyQ7nNUq>_mi(-8 zRW%hKznw zyQsgALmqvCj&XNE<}GfHKmm@^uzO z9j0jzSF4#gPtok(&kbbKB)K@jYWG>~tS&T6tDh@3)*%HwPm|QLu#e65PK9oPrnv!> zl!z})%4s|3ZUlgaYWe?TpvX`t?4TP!NXh!B36rawjs`4L{-hNO%yulB6%0PNJ@Js7 z9z0J|z4Y2juf3k?p?5fZX1pT#dCQZ7lfs+9dyCYqU8H`XVQX(|dm8H*vQ?AxBDJN;_Pq7u}l51;jU82tfd13qCtZcCC6DW&m?3 zygf(_Y}SzH$le~iOonkVLx+9m?w(^hMcK1o^zV9GbM43WbMA5WXwI}ZaZ(yw^KyKy zt1Gu;d(bqvIA**LfB4o$ja*eYUsjt7u4m+grA)+l%vL9Gk9TJJKdU@zC_HNP1h>yj zq%`M1(ttr$F+dA~CDSD#jeHrV>UnYOUV!ohYCju>j*~_fOBe%`w zkV15|tndr1K9cad!Ghg7zC?uSN3Z05*MnD1s)%&eiR3)WU-|kxxgXd!_omB+O^llL zR^x6we@~~nZS-??}*w{ zo%R$FEOs#xiyC`Xeg?QxxU}}(o`6B&dXP?X#LT(slf#8svrIr#L^ zUVS|&&BaB$h^tiE3EF5EtT_x@%CKYm+%cLR7vqF{=F=s~qMjlP$JI~E~*p+ed+$_!ZqUQU9&7I5gb za@q9BRYpZb^>Zn8g@H$J&tS*qkGeK@g;)kZbFrmF$xWCHC;uSC5s}7k8q;&gukpvf zohq7)b8dTkr>}k6)#w%3{Yg%k!byk}tL$M#SL*J$gd7?XW+`xa(%b4y=8m`4Ol5u? zytT|ogBjv=-pL68Cp{wyn68YzX8d{0hRHLXn8Se{x}B{gtDGiCGM8e)=@@?)e>O%K z^SU2L2U^aGxu}$_n;si=Ur|Ij`K37oFsJ|I4vgU9fcmbWCcXij?y)ux9XBi1j@UxkS0ogNq4~W5i0bUWGJ*HS@42x0z{Mf=$f5q z<_41s<--W6H1Y><}!ILdXt@4PIc;a|pOg6||G^C{5W;4X$#kG;hlFQKJ4xYy z?|dfZjkzQaTs9+btk^POkC^Ps8;?c)uoB&f8L|5{gC6{ zqmnlH8|bHpcd+cu#%cQQd!Myt?R_fp@KJ%XMeBO5u*pq7zRD!0+(SVWP2CV#8H24+zj2Kj}eW68h zP;)KR5-|I-aG{8RQb9WO=zxvy^c6UCS1}Z*Ko^x4W<$uAibr|VcHDGd{A_dm4obnT zLGlG%B+R`gRs=%k;gQ|nI<}oKEAQUf<_g9txJ~HD6L+VYlSbxUD{Si_{`vw(W_UnU z)B(?=18(f*^;jq}gdlBy1QJ$t+K?={MSB83k>%NQRb_2>flPGgfdC`{5Zpup&@gT^ z6MB3hRn2bEl{kQ@7Pm!x13LVemU0@JK@ftT@1I=wztc}!4ea0L5o6thxD9E&{}kRT zpxHpFPtY&_KpVf7T_;eK9i~;F%4x6?jxXIbWMonc&ueW|3vlaWc{oU_Y6yCQHZI^g z^8jxUq3zWVG*dM*H>G+Go7t(gArreqq?lX-Ci3~ChAW$uhO$vr3-j`97)GxX`$>1j zMm?)KHbgN~2pDIV`lIBFnA}G8ZCC?~j?uKhHCv&aGB=CB^>La`{8)3WW2L8lR`K~| z9jF5cl~d`_s#K{uY^$mJ@=N2)5B-THh{NqkXtfz%$3zt{tL%3 zKI?-Qf^M>AC)fioOvLk7;FeItSRz7_YAY9DKQ&*3#6i#U3bdTyM2!ERTb5`6iIS{N z$!Quuv!G>9ItraI7+KNN5zmeK%uYLVXN73T z_0aqMz*2?vW`5bsy?CfbSD9hToZPDQe&M61N;~+Lph?y+N7r+n2VitXCs)|p)ZL>@a% zoHQ}x=kT_3sMs4(^*Eb5UMIvN?2uBT!PM@ zaTz#Xxki%u$qh)yd=EdN{067HE1EBRm)8c4f+w1rwK7V@yC!~6IHf@DJ*;>tRI7_}|Ac;)n@#fuDp+?mjtJLz zaXxI3)clU8Qc+YgqUP0t{0LZm+`~~d6V;D(xbhqPW%_d!S`dA&s}Uq!Y^ZT+NkdC4 zM1z|e-!xg~MpySxQ>(;tjnFWYPF)b_aY&(mM80IZ)=u}7a!0{mE-OBQpKEj zY-WG5w!6(z40by2OL^-Fx4Ni(__MM6yZ^_?GF5>dvaF& z`?T0DyXQt?c#&fV2f%poW)shw~dBzWv*!ZSBWc=0## zOWg zysz9e?TDtrqFtj||>M4yI*zNjRXYBREqI^ellf2Jwa2GG`8x+((7;yS( z)4c_~8mqw2c7~Wl%sxJ*0FS2-PpBKyTWgHwV&o5Rzbbdva$HzHs>V#MfjdvmA8uK6 zdnVbI9w^R8&w0aeGK$vwM&x?pJ(5bsi1GarH@i->6Gp=pHTA04#o9aWaiVcw@AbAe zLifr{mb6Z;1v6f%EJW<;hTmMzQ~_S!+n>ApLWw??HiTnU2%m_h&&1$huh^762vT=wkN0?pF0OS@m`yoWf1pL8z)xzwed78@Q)v|`Y z3r6Uy)#-~y7a?BT*ea)Kot`*oM42-=VKCwg;*)4H>7L>?YRR2O+sfK9A^vdlL0QhN z1A_!}S>oe!M8wtX^8tId!l!oc12=TBIarP z4R=n(^4}zbx7Z4HU+1yO%<_t#GYJxa!gcW`_kqO&_L&|AyNSml=**@T-j@s$uQfj1}ps`c+Gs-c#fqT z2`_}IXn;a)8PBUfq(7X3XaK4hP|eQ`^mIKqItabxv69D;SChKc67hmu43}B?t}Du1 z2X0ahQS*k@@nYE;p)nFNK1$U&JTZo&k!0R}V5Ltq{;9Z#|2_g!3pf*sd0Pu_e2<1} znTE8_DF4keOy8*b4Mds%ffN^F^#0Z1Hd6hXg+FZ0NH~CpE21_oqRI$Dh(bqc?c&|q zBC&aO(xBL5zaDCScb5s*D6w)2HJM`d9QJjH>lwLW4|WLAv4GO)@2yr!?rzZRAe#fz z?qs{HRl3jcU#k#US+>xt<{i*txLnGS7&PP1B;R6?1g$^$jqo(l_9GO?R!;689bPFq zWH!kr#8yt8rh_b5zhe8hW5vmfP#;;VfB)D9F7T`wo?plnsi@#~Qz>k`8gB|9LUlv! zodV%z07o!LtqmLs$UNDCrL6ViyRqb|p){R}{{%F(GA1*|S{M-6tKN-mweo&}4cMx~l2)pNVt2rKZiRrYi6q!bv z7w$pBQ-x!tHK?4MxC_4lZzWQh1swUQL!5k{@f-1|)=y}7i^Ybv`h(l?(fztcSXt;V zLW~xJvZdY_7s$uds*8Mlvxt{ComN~X&wl?VAc)lxXg689$rpl#JD?pNT;21;x>rJ6 z!;2ms^3lVaCVfifTzS?ZXURcSgF+#7JcY?KPrz&td8q%)d*lT=Ap95N|AXsx(g_e< z`47lr!D;*$s)Z2HH*Qj_Qp)MKLJc7b^NBoY9Jsc~yf(7Ru2;~*0pZkahcENl>QK8m z{+&OD4jd1wwJG1TK*Fy`dXNu9;y}77Mt3%=k4cd&87|(|ld2Z{IX}q{Qm#gd#F|=9 zj?6WmF5%A@eNZABg#wrH-`hEeqow|Er*1PlP!HFSl5g5+7e?;-31h-4_VgMpwQz0K zU$Y3{QPSC)6$`vo?Kt=?+;4y>NHelTrN3TCr^o+-1R>3lVOjJRsxr?82a{?y46p8r z>2A=VJL-Dj#e>dHjZ>JuDe*=^2Gm|`*b;gj2JXxfxvmPM`6+#*E6JU*kvN!=XBflh zroCH;srl|F&?WJtEi@Q8$ol2?-DmCNo2Dsb1Fq3aRjJ36vvTg#%5`T~qkNNlIsu_~ zSKx4DjUm6^5^;M4&u^MJ5`=8hiNYb3Xez{tW^D~(oKw*3M&+CR@oP*0L3k#pI*Z^i zJ*RFT`g9(dD~Dr=icPCCwX&S zQT!nNE>XZce?@QB;iXHkfXD!iyB9TQ^QBs;204$KVkd|)c~2bFqG9)_MNc=#ifz@F zp5TdQ@~-zE8CXQ4LnsRdwYlj8NUt60-gnSGHky9Ggsk*~Nj15MYyddZvbPc{RJeu) z=1rKL039Sw3&sGBj28_LuTor~3)`b$?!EFOj-^LqU*cuAm#qo05NWZ73b=qZ#Iyc4 zR{ZDB?0gbl04m>>ZXg6|uTDC+(}RG`O%W42p*Iq!^R`e%MXWQlx(W6Vp28(clisj?E7i6n}BE<_?rNxL^XJxrd+N{B6x$mUyh8kDuN5+c`A zmTg(~d8~9nRu3CQaPg<@N6p{3usdv-*t|yMjvpD1t|G#m8wTV=TI$-C{09IIR)2Fn z2>Q}9-<;*n1ZeRE&PdHVJApra4+Jg@iXixT+x;_36sQd{@vf>ev8V^8T3#E}{jrdy zzB0?{di=;HlR27lQD`tW@baB$Ln8OldEYXAE6El6J2}PfRMo4sf_BFFg=(-Gc*JS- zk&#DUn39c}4wkMM1JtZr@$JskvfZ`gH;CqF7(3fHXb`#nrd;_xPp8wtb?Y%m1>`6E z7bN&^c<_(+7=OT$qhRp!G>Hepln+>a5bIc8Zr6m}WFr4$aM?CY+v3dXNH|~%LvfD` zFd#!ZkOQ5FDNiq<;)PU_>0YzzBSZ~|%qmSf-2LWYqM9zBM3ZgEV`Rtjr(Js( zRS~H#4Q``2lVZuqn>7fn2<_kj1T`dQL+;pjBBEjA7W>CyzP-cnr+S~KYbyrTp>d*2 z=bJmx?dI(y8n@2CyNe)ogDD)jezb+X{p&}rBDFHE`bv!5zv1?-Ip#Ev&&hI?r}feC}#Kk<#2s?9nEC;lQ6Mb?nsF zDgNu*uYAyauU_tkL#9;A*@W2Vu`Rp>n%`GDj$9*Mo@g!OXaQ_1e$}J5oVH=JiQq@H^r!m^et z)fE;#Ujt`fnBf}HK7Zr#Aq4#lWwrW>8>qH+;VIkw{<&57w&hlWKBzVKkUm#^Q`6oZ zh`g|7@&ku^hqRL2a$K2bTT2|O(r&+*`(%VhpiLLbM~0sDVhg7mE@TS^DrMIvgwpoI z3b{5q%|Z|L%NzM?mA)%I6#-HSd!1U8RJZesM2;q9yMyc}D0*-?Q>0u5q zdZuQ-4S*zA@=Lt1LddYe$Te})3y4TUYnSJxU8m$A1jYAA9DHM)>2mh_gu~cd>hqn( zmw(}4F2V8avAz=NM~Z$5t&S%{brSUH?ng5o8(yecX-&jSG?V5%sSs)j^gbxU$v$M{ z51SP4H*zVpJyOl=*E`k>r>SyzwF9HSfpk^3r{?vA zuW27wZ1`=#iSM@6XC!`u#|`)qMTgJhu-_m^=0`xG2Ig8n2(YXx(+IpIy77Fc?#}@0 zo2#SAXJ0k+Mws-Us`N9TscC316lk?*y7JRIE1v~7>T^C zHbRbxpQb5y1%yxWSO$1yl{P6&0!YS)Bvsz2$z$zrsc8_!BatkZ9xl%rRPHocAlDW+ zlsEADY*h56M>6@@i+ zJxT7V1b)@^DpGccC#gSitav`Sw=T&-!tnIo5hCL0ozcrNQTq?AJ4MmH8pY?A&-r|> z*kB6KqYj(p*tQ7U1+fZFA7YKGq_{w+9C+_mIvc8{W-q65PQ& ztXZp4p!xYW4(3SBvw0xdEcB5qR?5DTu=XDO+62xiMd8{(DxM;C*z)Um5yg?#UN>-c zz^4~iN*&PbsLB7LttgOLPE+sX*GI4&p9i!?69WXIWQ_CIB8dz@q51w=@vq4kxsXRk zBT8}k&~hY0%jo~u_kzS|1Z}?3NkVj)ER2Q|X`H$m!x59rRUzFxyts(^gW(ILC$4~h`8zBL-rhB(~Z*8-J8^|yJMwajhuQ}f1kxWQGCK3y-@>>!bxvyN6 z%1Jx2X*gep+ks6zz!p!JrN?=A#F(4+)zx-s4Nm=yg6WUDcUcFmd`*c;fb4=ZY zb)C_-*UWm+`5VY`czYNB;?VYQf9V~}eR2L(LW;q6^TvBBRu{i3Gb$Rt-tx8c_i9v72;~sx^B1WKF;RKbK>lf6&6~~{c zWH?ytm}Q|N!lCgEsayb>Z|3_gor9ez)|RwoBcFQ#`a?)GxI=@M(mslC?4u5(zJ2iiqgcy^S=Wt#q23gTovt6c)xW3smnk)l9N9wYO(EMMn8ZVoO%HEg>-CG_J3S%`;%P--@NjDO9#F;4Nlhc@u)#`w(vO5Lq3^-B_hB~-c;B-gj z{j~kODDZi5onbpOcAcpZ0J2;9cb^-kpyBwLw%ysgBpv9d?1P!L{eZ_&&K%9q53iA# zgEMmxqFh@{zVw%LGhBo-9W*Yeu@2f}@dtU8+sqhl;0|yk>N_+-zE#9`R&~w3a+>O8 zfu6dvTfJNLM~5X`f2}cj95}>e$(oM&>`f+Le^;D?O}ah2yt*r%iswH;dIK-nqui|R zed^~sbBACdV56qYlwuCdF3>N8LmfN5hwBr99H~n)5>G}voVku=b$A` z((-k5Wy+g_lXiI3T)&^=x4xT}T%7whygx)#!3-}DS3q)-xb%^p8+ToozasHng@-U3 z6otm-nPg;AfhoKy@-T#O2ldK8c0KIrLeTdrtY96tS!rswf)qXUoi6P3H^Zu{#5c0Dhhu0R&I8cJ>!~#th=X-C!b0b3D#3&HYRnV~_54&i;?XJY^vjewG0j z^qje=Wg$?%#zx*OcoW-duE~c)940hjEHah9BpurXqB}1f<8gI$A8GXYL1XfHHCZzc zo;`()kN-iuZK7%#!}0W;gxNLcINd*YeX?%9*4NEiwX`=A2y}kx)VUWLh~+%Im{F$q zi*%hreXY|fvD4S1m0^4nQR3{xt;Ws6oSRbJ*~+HhQJ$x7eT~=l==*uq@9_N&<%vu- zNYzN5PgCr6tz7;JgpLI7PXCd1<9s%3yCmG3d*)ZvfN#ds$>a~E_-eNL1ALH?QXIOC=9 zjl*3u>`Uj28)vp|`gAuAerA1cK)UuCxBmtyz>!t&dRHz=;puO1>*#{pfww7HV>GP8 z^rbn^##%okTK(aGbdkcB@sDSnE*n_}jrev;?+>wO0pjOV*1YKUTcpea{Zy4XEBQ^M z(TOfg^iYAHijTrrMan`>vN5N$J%IqcFNGR1p9w$y8PFP1&H6eyxK~b9X_$CMY?otOF|b;Q^}NZJpagTB(rca$tmQFW+g?$ zLyV*u=vBl3twv3G2=-(df%)ZS?Nl!cL5rKGh3H8!dB^vBpadlgwU1mikz0IOwRni9 z3)QFl>>r-7QTm(*2+NQl1 zY165)C5UEI?Hm;~-Y`I#w6oA6S?SsYYcG$X1P&&C0AApj)4pi`dE%BbJyx0PZ6+FS z#JUJ)9W^E1J%kJhr|t?AXG^!C`f|3(s*Y3!xD1q1^xEgk??YD}5F>${&~r6_g=AK39KnnjZt)cqZ)fi6ECLw^EIM@h>r>@8GC{@YZ20QCt&bL?YXBx*vYm;IPmd(+p+Hv)eo zCgR0loszf(F5_`8Z{7E37&Xs=ciAp2SZYvP^$Em#hqw%)DOR2+HDJ2oCv$0M72@;5^OEU|B%$k1tyuYyRd4cCYYZ>!y?@)1`5P$FlwVnj zMC|yfS0%=4_%`eN^sOa*PnYi&lf<&C_pr^cCy59Ptv^|1``&eYjNICy^sw6*g(qf%wB9~@4=Npj|?2Btsbzcwvs^i_n!93sf{?It` z_(WX2#*v~Uk_7ah=*I@Pz_+6_i{(ct3 z$RIGoGC%j^SnjmeB_+e031YAS=J;cE3m@);*HFXlsr3W&*>VNkIbFH#*y3xaP2XG< zo6N43$=>R|7m#tG)cmP?PnhgA?2N>{VrR`Vj3x5x$im@;WrzvRzsr>vROT=)D@xaM1-7^e=Uq8w60ai zWJ?%zm>&0}UO_IQ0J!*pu%oiYia|XR8j0>mLx-c>Y9Jm3YE!*h6IaY?TzO@)+ycZg zQ<1P`2mN!*hq(2AeO~Lw-i$%L?)52Hwsn;W zmV|03r#GeB?Pkwwm2C97BqBv|y1uxwmy5{7;?)}n@PAUExT#vN8kU>D-+Efws{e5l zu(|hoJGu)~>Xw|&-;#q(#~6MeQqI!r#=%@2gMgrSmKxGf`tAjUWCd@XKm4|R(InCF zfb!@1-TC3DvyjAs&DWC+vlkP$3%apwL~**`Za9RnQ4wj-QyNc+lO~}%dMjR#^@qT1 zxc)|U%-&4)-x@OrC5BYDNy)b%IN_=b7}+QCx>wt<7_Bu{G_xfH34Hfvy{jxv8A@mJ zy88z+N0zFi_NEiL(^dgEIP%1Nz(U9g%iFJ~6bxNZ*C$J*0$0m`6nELfs81621%<%T z$K&+N3^d|OB@34i>8Fqg`=|Yc$|vd%!OVI7<#MBNTU9`IRi>y>fFmALS9Jp`RGBx( z0v6(G4OV=cEPhSS6@>o`1DO1!LcbN#@IGM4z2}(5N!HOX)~CL|>IA3W)FM@6ogUJ3 zCF{>r>*`Km9je8KlF)D(Qi|oYP%_`--LxC;skuy0Z`ZYnSjJ1G-$423vaEcm!&z<< z=4tl`N%kiLF5S*8QR~G^z3j%B!J-^G^C#|F#C%+IJK>3iqaEx|MR^2&o2Fq{NnD3@ z2edl1z8mrg#)p5TH+~6G=@`H)8D@WvU;Ge9Kv3sNavi3RN)YU`540G;Ox#YZ}E2eJ?zKD`< zCyU_jne=SbI`xI!)q|{>8h6TDzR%>)@2P3MzR}2}i2isg*E+LaRWGgxgMDM99t^xi z@JySXRR{j=xZqa(0`9y=CQFA0+sF?stYMe;xU;PJz8gMr^UYf~?_T_s(BN?masCN% z4ZUh)4#;UcJ>ipCZmt!P=jJG`pWh~6vweci)*)ZMBqjA& zN;xKqiTE8U?RB|Trxcx-yq%`qyCB<}dh6Qf(fVWWRQFtI&{;9rrA`ldNCy1YOaX(Q zMTXh#Ze{sRkBO`)i<%$3$E~`M*O>wqW}Q-gx^;8DL642W2du9_>^feU|8>`@|Er@B z)7hG8IQY}+(56ewMLBynL2ulI+qK+hqOs%xXD(d`tE4}BDpmW^%jM~h2`b(D0}0wE zbK;;CWP?wtonh&bqE2N=>@LZL1{dW=#!Q;S1M%M+lYK-_ zDmpF&C4GARg~=bCu)Tz?-?I|7Ws8D6mnQs|dl~~da{lEWWI-0!04ERmhmLs1-t=|+ zj$4L~bvG>S&M($L`=j=1&8SS%WUs?yG6;JvN17%R;nzb$t*>ju`z&*yx>}nE_8SmY zLoD4WQ+GrsGY>+&lds$P@k%uLzqmqSxV6YPy1F&j3UyRkJoTp2>*Cj6@E(hWBNy;H zeIBN@q!q49QWpt(_}^G-7v?3f&R1-iNzH_C{gSHxJmJGU6W-mtC-%98Ik{m$8M_;T6oGa3uosFm$J#H0Lk!$s(a5?kZ8SM0D4D6&f z)L$fRM^?H2y-%^W(9-5yon$(ZXXbe>uk5$c^A-tr$M5pYZhRYc86%osXx^O)4)kaX zSE({2eLd@BuHA>+n-xbK+12m0ekBa-wRz?njK?kWP9?n*$A)O-O>9*Q%!#mXT0oWW zjB9;Yui~~<$Bm+yiMy^=7L%bFnU4zYt1L=ymt>!jAr}j-4f1FAZnbp+=j803?WgB5 zMf~JM7QadR7VaQqs6_9(D13im`Ri|xVSCPahwKw@F6FHEnhso9Z533?+v)a2D?+tYUt~6Nu2Iqz^R7QYdK9$)`9LWEd zML`dBw=Q!*;=vUXTi%f#$^(NpuIxJBC;Ynq%#oMPGCY%1C;$XfuLxc$fn;P?6N6Ji z$=}F@p3hWWM*OZk_gjRv#+bL;q&`_dZF+15mFIZ%DuxTuuYOobGF^o>J^<62wkY3W z>H*O5y~6gU!d+2-zQ%2N1>=<}3Nb*r$z> zhCbx{T!6?%%>5rJz}13oj9*wJ_z{hXCYwBW$5$t)^LG`@?SHi?P|`M1ryi9+RBl*& z)~9SB=}qEt>9h1P{|ESl5f71P2`PrhEz@)KMaacWfqvPbr`&3ve>qCtxf1nXi%K=yLGIy zNJsuBwj^2gR=0f5*uk0V0B}D*J<*uj`$2?2H@DaLFlA3jwYd(O?bu`4I^^?>c=0=v zo04BDP49OsNJuxT$z|vX@EF1t@!~v^x==6qrsW)oC+O9R-BWp6VBqPHPGSp3!?PFk za4DH+7@izf@-nAX*j59MAx_THL7s{*R-?a_%i%c((GR$-9l8lA){1M+f)}8M$_~-% zEz5Y+)X9>Cv0E0&lzzv{9~-x7FK6pQn{0zjk}#9}HPUA9F=jpbO~5Tu5`lV$ax}Ok z+axs@3nEbCXoPS!=RdYsHptIsLk`};{D|5MuHBMMUQ=ir5}7BUQC9w1zy$BhB*;Q@ zs;Dm~J7D%ux>ZlT3CilmMB<2JUj{QWJ)HlhO-PajSaoMquBzl*Ls&0$S1;iC5>RH;3kzy8j-LFrT0MgU zMz=LIH|nd@axAR2ThW8dN5A0Vt0zi?x_|rzd3aHGUFE>E8%R9Zyb~Cp5j!#Zt;2XqmZU7jKtO%myAV>&wJE#eY~oW5fI~UcdniKIz{gg`$j+4 z!N_j=KAPsrD>HOtx)sugbI)sJGVFmb|GSyP4bHi5kLqb0oc+;i7othRc?9i<7wH5Z zxzrd9nU3^R^ehv|Y}rZH+3~KY&h%zy_jCKg!*~BLmd-tp>HZJ<-x-GFP!u_gD5jHB zY)%_)=ge89laSMm%*tWH5S5${k!DuRF`@{mkk}|DR6-iVLdZG7{d|7U^ZbiHnC=sBIsvg?>}F3kzbQhqn>XNUZ)8J2HzUN*maMV zSNwE4yrcs$Em$kNE+?4RMh+hplI0GUGz%VPgcqsJy);f+*#!*jX3_Gb8nN-_#zq<8 z?9ofV7kb^W*@X^6pS5s&iu95`m^Gh5sXg2eJQn}&C5{Fl|ARMjoNm|%k6XdV>MuTd z>mosIb;VPH7mV9FkC}P3^?m{><31A-TXxHtesM4NaYg%aqOtd}214t6KI9x~g>+}K zNQb8G4Of`T-*?CzX-wuOi%b_aJ^FPQZ`_z3V(~i(N?PD}G-r-_3PrT5A3if+(Z$7w-lH21@U6e}Lz~zXMCV;l(fOW*5RRbNPW! zjz_3P$hlS7s7twv$fQ?bR7Qr$a`nbDnR;6%&gL(l4lEOXHjsL7+twm9SA^Um{_Oa@ zZKbbcg~AS-W<@~-h71zq{@1M2)yklFuHMW}vg%cG_Q|Nr`7#I${bVEeg<`;a^-^pxvWeg2oQD-V8{hNaxzU^5B+1#CU6w$;U^13GZ zLguwatrPz4Z5UaiMM<3v;H8q8oLaSeXaBYaUjKdC;ACbDaQOe;zS?r?cUiGOv;)g1 zM+VnkK=yp=3pdf_@TSoXbG)RkHby@w>(I`7-})+zgA9*JIq%98mJ%bWFXAMtigRFd5s!$oe#!4Cao;NC-TDJA=0mBdByOm!{=83Y z4d~SHNTC5NeeWj~84*KYT`5_`Qg_FcMv!$2LdqUdDNvku45W#?BP$-so9y4_ekmSF z&887?0x9+Q*?%n|OzDpA^5FUe`BF!Ey1?oXs(Y@30bjg^~uw~{~+%qh+B?oCE_g~Zh>HJ(xVRyiAu3u(4qm|MXeV3 z^{R1>n!DgfosX=)G|5_{2_$joLRuY)%Ab+})UPSjt^aaz|ID96xDr zIR|yy_EBo@4#t{vS232jK&i5^ z{FHrkY7~3>x9z2W_AFCkM-M^d+j%^r)Q>IU(U<)BR{}RZr_FZOX4M8C41K^2Eu5__ zVH|$Y){R{{76#;B$X{GRO1MsLj>7HQz4w4yx9&+6opgQW0s-`yK_9 z^f`y~f#?l@x+IkFYCaKE6 zT>89d(49e-9b>c46_*%ar3j0=F?1JfOC93RIQwrEHMSE=GAgZ zX9)3Kjt|n$T5&q!nSMWIeqdy**xgZc$`by@Su^{);$QF;@HkC?tOzIHALv`#Rk9KI zS6b;2260E?rMMH2lT+Q3z^ZIz{ny{7OV4*eiMmg=~QFy?eKChq+?aI5qc~rT#M_(OXXd5?#)TC7uA=f6;^;?oKFPFajq8Wdn}6k^w(F85IJO|AK;h}qs6 zvW26*J%CdQ@ryPaBzVth37@dV2jq>GE91eL4_LM+cFWt1A&Q*z9C>1dpy#R852ZUJ5EX?hV{|-z zucM>OMtK{2psZ57(TmBxp+I%svghd$Z}XZTbRkmfZ|sv0|59lyZrT)%XrMzD6j73{ z{Gcb*FZ$w5Fq_QU%7)GvbCZJ9Nd%n8L~inGv0Ni9N(;9qc@;%yQn;`e94?0M3OxLiQRz+&S$0%~7H3>*e%N%jlYWi7Fc8PH(S{tem{E1>mscERt z=!ixdU+Zc~HZNC#_P}V6_IJ`A65}4u-oBspC4L~@At&(L_ly~w$e5vT5q!`ACnY!Y z^V|)(eNmR9=v9DOUm9C|Y}K~tIKNiP*q4>U29C3PC@Og{powFs_CDk8pR>G&?)Y|E zPLma`m6V;jYnRVG35|fU8b5Wd-w0d zZqzQP(H0$gg1wKb@(=yZ8r+F_SBy*VhAL$jo53CF0b&~taI9W(Wp)oji8a^}U^X9^>c z@6}#ADpyF!N9~0UoTbY{9Dvg z^OTGI_Z2*2A0-;earMsXiG3$4zqx=)H}Oa@h7L^j@#b1S2r)A_?~06wDsKqB^TAv* zRrlGN#D;V?)xvbqw$4M>tzist1HjY&S`6QMiA7D>tyUI6?4^wOdFN`5deVVL1 z@wVEpV3g=ONr%yiEIIxko#g!=S@2E!YuHJr^~nAJJbug<_QV?@6kS(^f~%~So*WJ4 zLjz#oenilta`S}}I(oGTEvL$(D2?t#SyP9~i6iK3lr2J*^Me&- z{uOyM^FF=9+~^7UkXUSg?)5%<3|8xqx)zS#8G!K#48&?NTCSWpkAoFUag!X4F578? z;2Gvg!OJA*y*Op5$GS3>B@ehBm0Bj<;F+>L&rP|;VzgoT&muWhZ+p!AFj=XaI+#El zp<~rX%R11)zhN|?=Dio{QaFw{gkg$8LV;9Smz)_o=s1i6Pz|I0kh6X&ziEn{#18z( z!_Z_gte$7B5aQ~91e9b6FPA*5xV>ectS29Zs|;lpKO`%Nfb=aCu<%l+)S!0f!hcYe z^YL*leLG!@c9zzaF+Qii_3S9ae{m&!wKR4xApKMbXa(3HmFyxo6r~#Z?4L7boksn(-D;?Br4r=(m`|#4f=!5 zC*8DVhk!OJ*}UZ;LI@tI(5vj3xYa3WDc*Vw&pSo6#E?oZ&gsmJH&%cvruI=9xx}x=w zdcN!LYcS@L22MDSgFjMya@~?SRl$I@`(rc+dtG1PHkgjei0S!3ry?^M{;(1c? zz`2@Ejpl*=yjjkJAX~`IwfDlnVwxRc$2aGIms3Z|;31)`H!jETiY9E(!uV}=4QJF>YkCf?9tH4!?g}ewr>kXbG zE(=5(`bFNn4i$}Vos322>Cs}P7*WY11y37}|6{tac9eYNhR(sM3pu@_jb%Ykmai#9 z4RjYexP{f!cf5`m;nDzO5^APa(?% z3sULX>#T0->(3f(J&`dqM%S8Tv~fcrb1Tgax_jzF973DVfhoH> zgJ;!4Tf+8mi!=Em=6bPs`Y=6p7D^`Aq>_XPv$%`mbGS)12*(FUF#0o`;Ba*5&2@=~ z%D=;DyiRvEb5UuRiPq7cIZXM)WJ10@=cQ2{1lv4x*#b`X(9Uvl2BxCn;&tN8k~JxoHFW**lT>~| zdZ&3ru>u=hGJ@n2Q}?(Nt&3{-v!u|Sdm&XEhM%IY7gi#>l^@(i<6}Lh@1RO&%+&!W z+!|IiXGu&>LV)maIDIxs!zAT9hCY&2caz3Xft!b~YFEcAWo-`RiHzFANl7m5S{YOg ze2Ub%vi*>(JPq^+8oo#(z^l}==4~ij_VD!`-;#y(CuCHcaAqw$Wa6pOLseu9X5O{v zZNs_Lu_u+b<(8x7MTjPv5Y`~bISM6JYam=mBAgdM0EejHiCPiwew-v(>7_qTz);`5 z-RE*if;%45N<*NodB7oow#S~*o6r5tqKYQXm<#?onHKJ)@}1-(4pQTGNAo|G8=SPe zl^@h6a~(cbm|!k5LbwW!T$F-)?e<8Tb)jSWg$u`3qdheKglW&=QZ#P#4kD`@9;Qq` zW*no4(rA11ZkebWv~N7xbmBhhzFqToG5zgWNr}72;N@dRe!O30qdv7F`s{g3YKVTa#J!?O*jQyi`)`ZNl&?#wc~pW1=29yx zK@IV2lLfBx!z;Bzh!Bf<3ni%&7ZfV7Yd}6I^snb1IQRhMPTG{$ON}-PTtgb~7Yr?6 zXu>n&nZtZ3xzj<7rB5T3To}gjYm3&!(rJrfWs`B=fwTX*-_r5*3iTaX{A@DWu;O~1 zX8Y7N%PnU-UgszzXID-;#@8lG0tp^b1@>P@W&LPxk4W~wQfzD2T5%gx+Y9QTLaz8b$#E^2y{6Qugqis;T<;%7^4_oU8Rh6q0+-URCXFW+FWhI zXw}s)dd_nd%N>5woZJQXSyWNzB!a>Ad?QJojenI`8L1ab~}XCc=f+ zQ?5tzJ#U0=vUmrivmLz_rQu@PaiwKgi2B+^2>ROJcckf7NM~{0o$f~VZVrEe%9a*9 zEqh>PT7!qIIhp*Fu8~D?z3ot4h$v9_SheB$^hC|R&8JP*q z`??4DrZ94Db)h`*@vnE|isC-ex&jW&&p4)Z+(7`|D_-#|rc`0Nv@v8k@YqK9w!hNy ze-I4JUD}@An+eA2w>&JHd#p(%@M4aUKrPqgUEhU~wtrL&89VNDVkd+e7 zUA-tqNcRu$$hm|#&p&&H*arIa$bMR6W9h9=slEm`8E%lYODtbFHbv(P(N87gl!oqi zl})GElCDGD^b3!7RAFCV`nQKru?cSsQ_j}CT3)#xB5-9~<&WJT=PwQB@b$>ThcXGE z^X^%|V9ASVG4v9EF#Xdtrg2RmIYP|`%U6Ci%J_B|MxW4FSnNt%~naI@pW<2 zHCPgN%^L@H~!~4nv2N#q?@BY=_t!TWN?9`&yxvqd{Q9Oz(8*~q(djQ3ayjH zXK?7Lv-!}&hYy1#)Tm=EZb3hCvwBSWyR{M^(tvX}2@C!ulvJ#f?F!5Tt$efWT`i0C z<<@TJ^ zj8wVjOP+7&c25lGQ#gOcON71rLv7bE#*XXEHoqnet;r8w+?{`*bL2T}Rml)*?e*_r zG4a^{plh9?Y+s&VL6*ZGAE(XPhA(5oo2^#MTGP|L&hnAun&3;4hr7?lnVzCU2>+DY zw2cP&I?bCX@|)IK6h~M~>c{-36Fhx;Z*eG%e_pxT?zHASR0-yv4#l=>aHR^Mf1SSa z@AaXl#+;*M6@e11#1nOsq`&_=RXtv6BKi&m@aYvT+qnZQo0&bpk2(6PksIvWA`>cs-g(;;SFm89vBpv$#~Gp@OO|_n!>F0Xsaj% zdoCdU5bHgoAz`<>~9`BCUC>HjTrCaG)RQWxhiS-NCcJ%MVP5ENIpPurpOap9M&2spUN za{f4p!ejAQoXkUrNl;3|%^f}st{CobqK!}oEohO_iE) zx+vzFWPyIoqD_EwDm-Tm`eb0{ZRWvHhU3Id5-sN3goMf_J#OEVa2G~8If~C(OyTy9 zTB4QG+ysQ9P&n?_r4H0Z$`S=cxur}5H&^hjoqi*hvM6kVY%H^l#&7WN;Xp!%&Yp+I zS-H9_K{nFP(~1lFn?RQ!wGXM!dnAx zosz}>^zmFx!=nUf%AI?J58?1L|9^W@QK_RWtpz83_z)^mq-=Ku^Okaq)1ur-VKY9uO*0MaCI{1Au zt(iqM=OiD3bU?pYY(B&?wJ^pBNr==HL+ZBL!sXmj?L1DY1oTacri}6&5(>+Ed(Pm2 z5>0Mqh)GN9CLh`p=7i@>oL?17LQ<&UZnh-rB0zbUMhWW>@FSnyM{YAtXiQ^N822?l75As}UM@DMH z9un`J9EEo-7{$}ifTW^9*rNTDLQLKU^2^gsdy(7V0MiUW-&-~mV3r)XmC})jWt%Ra zcGrvDxPZK}TQBmVb;ttv5EjSE$0RStM1-H#4imG)PYLWk)yj|eIH0_*Y8G`~>e)x$ z_~q-nEr6S-QTRQZ$ISr;mbt}E=CMNKRDS_Wq$&|C0e}mOua?)=1R0cGjAd>mK_;1~ z_LGLpQZn01gB>dIbILU>JjCv$xxcixZF?k7q;Mb4i-;s$jk&&{5$#e0v_yUP>_pQ^Zts5*gwFukuP2a@c%K9@P zT-Fj=c6wOXa%jgb-9||?b7ZMa8y49sH}k|EuU7NdQ&@n!Rfg<+sQNm^%EukeDB?{`Lq@;uM$j$YFKxUc!e82R)z)A*!yCV#Mq|BI&3lZ>CD*LUAo@P^w9 za2Js?TQPbMJwi7>N81ws?OjAi>IeoFBaaEk&W}BRaO$MRy|>GU zrGF{X*|@Kl_P`;24XiUL5;5I}xZ>aJrSr}%B%=0q}|ZxOtt9ers-K zf*0|Fu{jyI18{EgU_nKN&(TuQC%><==>s*MZX;ow{X!@u=5y@4tJs7XXtB9=$)x*z zIeN9?aULXXWn4Hu+LG)4nL_NZ@a*pNKVJv?r==+cmaUr6^`1Grmk44<|#kJBm?-F4lzw}S=spLHf z=roiF0cE|{KDXL##W7geP+&dx%3Kd5v`jHozptW2jf2*Z_ilTYsE#(q9Dao zJ2$-G98O4JtdZs5sQw>hkw(2ExvzXwgp-_#J(RujFH$Z90H)87BA-tRBD)`(K6ImZ z8Hi5d1mpO%@U3!oM%3(hC4)6Oqo2HAcB6>5A;6JC^@Ax>su4lHu;7T+iImcn4Z560 zD@|2y1&8>kXWyENVbBpuygo`Ld-X#vX!5kTXhWmiS_yc*piHV(YI}b!_7u+WZy#j% zlVcdHisa@4xg;)P#%_)hvsM1+t`ZMO$UqRM;Y%(-R!i0~K9$g;D7~Nx{V+TEX%4jS z^y!FlNJ6OSBNJz&@I5 z%%bvP6-4%0G;+QUlj*Uon`N+$nsB(pZ=wtay(jjJctI6QA%qWo^yg>MYCNbDI@Ocl zXnW4mD_SEyl`B#E?!z)$#Q%dyx8}#9C7OVH0;j;{6kMrF&65&y<3C8ru{eo-*0-0C ze-G_}{iX!;@pz|9Z~U*_eg-X>RSLIAquriM3nEqJ>!A0^^5-}+rshox{w8f9 zdWHVUjVfsD<(7W3(s0uRRlq)cI?huF;}M-b8W+6|WNiL+Va-JO#B}AP*3O}I&%!Uo zQ0O&mfx$CoNHEmZHceKuLMa{B`MVCZ$j-+C)pmIZ8n>Yq^l*2HJEWzj`(q9VTjBUg zK;EC0mCrrI|*5W`-;4t}RGC;Pp|F z(**SL{+E&mr}oUaJ^1iSGU7_mLcH|R7t$_vlJq^=eT>QZ;}w$G>%fqhdm*Kty?VIZ zANKvQWfwv&t*O$9DS5!KwMHdWPt?CO;?3>LuBK3gd^XG_z#AK`u_f+Um6Tf39sLxQ z)NmduBZ=cN=ftWVfkS=^)ehLD!47-%jy)3i|) z_VVCXfFDXRDNd$u;>Dz+xw)pH+4+~R?Or5MXg4|2Y!9;L>lfE=PS*0pGLUX>#70HG zl-p>I`bQ2fT;~53^-I|Mnd!TFNMm-NHC9r)!2kF*M;oRGl1!-+`5A?$!roxeKv8yT zkMiq1FX2fy7lCaJ#y$tDI4*3+iYHb7f+-9xpTtPwIe*_zka_&!ABcAQ#?jKJ1-+#) z9gtKyGuB63cmtrl89%0XzkJ3bvsoAP6YG69f_*2Zz|O> z@6tt#B^qVfNuHXjM;^g}2xN~BF7Eq#mbZqnTxO~tQ|^e^$j=^L^OK4BN&^McmnX3w zu1jO)SjYnHcdXr}uKysfU^V;V{dKA6X>w5}-WL+6XckjtmC_C3li{(eY$u77Zz|F= z_!A=yZy<9gA<9aA?v%a7WmQx&jO3na#i-X_)+7D^6?^A{jc56dvgIRpp@CPA7uQNw z29#!Pp_3X+B}39Gtx07sYal5k`J|QIM<;7e!H2rlX);8cWb1xOg<54vLcRvwo-2W3~PNz?{7X2_YHJ1Og7VQu+M6ySG8BQ&A?}g#)q{=7?u~Q$RfK7fKH9#yfrL; zR7>fPAM(+r$OE7Nfu6_-=GU<0&FGJdDuWS+vqR8rCETPll;kiPVls75cEEVX8uSIF zhIwtw+fX4jtvTCH9H`DThy!GKv~Mv7AOYepAXum0)8@WFDJFO9sixLRo@BVKZ*TNf ziap;|aoNczy}=SOlBZFQszaX;6?OZf1j48eX4n1hrMjpm-sYy#kvnXE@6CG*J7Zff zS#ccV!NDi}J7bl$Lhpa0Tr8QVl0o;6O(r1~MWha9>h{#d8P=NA*oR}?pc`Hf5whMC z{-cb)=gK}am6Y;teRCXLZR01andBARd8y{nXowX$uY?UWx{M28$ET4Z4oK{qQbgQGA0{haMiO77zo-My##XkV82c&=1shK=xC-q8N< zb`F+B$>~ssEA`6(%$1B+llFXfOhlD_mn9|sR1?1RPW`!{M9dzi2fGHb#{*?Zsb-Ll zC9|ZnM~2@iPHo6{8rgjFdccakl{n%t@bEk>KX&9z=4T4(a1?GZob1Ic{58!WzvCG- zQ^Y@guJFO=9E$Sc+#V&M6-}MHW8S(dU8=0ek+}jNNP6O}Jsu2#o?|wt!5XXDqvG4h zYX7{ZVA2HTWN~j87hGxFTr+gt_ftHTq$;={a2A=9I!WU(mKzn)pp(-0RtB15#=cs=JF zFHFAI+K6q3yNo|jEq?EnPMWULoWFh=!+Wb06siy;c|Qfgc}g9nykwJ1_P|pIVL}%I z+T~H78u;mE3b#IAdTS*xkAuIJ$-hSzFb>LomQ39IZ1pJOf64o=>y?56d-8VFW@|iz z(p_r)h0mP*rP3x|%P|1ar@|1!pFTr+1GQa7^}>a2P)xoQ(K+bF(BOQ|WwisFy&AZ^ zi3ZQKR|KxER%ZOed#>ygp^GNO)NzvDc&QfA_h91Dp$aPKk<5%c++FxELZV8O&(Z40 zE0O79cPWmtPpX>aZR2B|>;;3w#Ux1AMg=bZtbB6Uxg2t8-2SLfwKXUV@}2K$U6p`J z4VxVc9RPJ_O0{c{c=YxPxC%j{HS#4HLs6f(hBJ0X@6yHa;S$g|1KXR9Rx$V;g=&)1 z_|w-k5?4$Dr^;sh36|WO^2fb+P1HgEWOl|+U*@RjI8H=#~OLY%WqHNBec-tYA?7^{b2UHL*jBHU(qxnS0axIN>X*-Xml&k9x& z7No&d2C&@)nFxJL`MSQJ#j0(XPCyk|9NSbjNH*ch0L)16W%4BQUH+)7d-7JlU{TE4 zwY23@l(#$CN98dM0ivg_En|Uf~!W?km7Z=Spl}t+5-oYNhci{0Bv9_lpoa^yB=3&&4Kl!A&kG{_Akbb*i!@b3*v~Pjw5Rh0i(4@^MN7>%C$R*M(1}d{~ofQF}@g5$FX6<(Zdg zjv>n2LfA}7=J`PY0N9^qLQgt>5!+Qepz{i%Z$6Nm*oJ1*$#Zur9CbR}x@{2jm}#I!!?a=Y zs5{y#N~)XltgrmJ^J3}nf7$_5Z{%;>pZ*~TtkVKR_nec#&|hz|@?OjL0|rdilK3CT zAd{Fc?toS@@EM5zQgz|`rm5GRW8YdjA6uP>8!u+{9WswJh?*hJCqbWtGIf4F`d6H) zzbjFECLU6XoJ?Y9{klH-f^Vbr{!DFZO9A8$*GF!88@91#Y;T}30o3Ui?{m+H3^6&F zw(?(7KKC|cGruRQLQUg>6}WNfioy5kXAN9T1nrXql~*NTQge3abtnS@YQg5BZ}g7v zJCAA76Qi?X&DRyfB^p#D*tE4W(9w#Pm>+WLfs{YOIQjRk-9_fH2ZChFIvh^*`ogdD zUTQ&2lz#}Tf0sB>u07u;uXgyGHY_UW+oS4(1Cl!1)vhnZn<^cwsH!|fg^@+qDbuj) zZ?IoW_of10pn{PP2b|K2|eU4n0c5D|2=OAQL&7_r{O{?;1 zaG+1=!J_^UCII+Ssv{X4M~yERTAN_@f{h?K0Z@#aB!95}+GFQn0KxXCv8jNS-_!cH zjvcaBfq{!rgCRR8kzgm~E^Ah`XwM?3kp_o{ADJwu@YjhsmpqYoK6X4rf1Spk=%Jrh zb<*z4im^)dXtZ|GF!|6~IYI5;h|=+VKNUIOu31$W{uftjwP#N|&rbQH`ieHm|sW@FWoF`7BX4-Aq%?4-UhIDY%x;&-wWRr9oICI@?2cR?O#q6 zf;EX)^gozBvU$WI`#OFs`@M2x4V%+nWmcd5{>RN%_WVSRUWiez*o%=={-lR-8mY3V ziC>|D!%xyCSX~tswz7h}JczY@4V$s5qZZuJh8?c;?AyIDF=DFD`0}JpBSVU*?4!cl z7lzD*!`EoMvqqQI>MDNzZFZrMjb~Ez2E49lL~wvNSfs^<#o@K`spj3}5C{Vwu2l8% zHbfZj#IW9+mlC6fg@_J31I;63#WrDL4_Y>jZcw>TK&j9zVRCttC&Y>-XaO%{8pg2t zsiS+D&|;EvvZh>b(a=r$kfd%LUIZZlJ0zQ|+D;N#?Jp&qXk*ZAlJ~Z{gWtYg;d@+^ zx*24CmKZEzrvD5PKj&nrglo~mTm>X)&(J1W8lXB!HJC1?uM&@SZ#AKJTM`U(!ZBF8 z8P^kHdk3syWpBmLfwt-qWrDKQ2i@Mu)cBhJ*{Hb&YR!2fdLqZ%yuDpj?kQu8#(v~D z1Za<&>sWMc_3>(5sBnAMtqbp}xDCD0`|`ByvW~+j;ryvDWEqsvcgHr26Nbr0; zuhh4zkk`U0{d=dGR>nq|3If?nmKQN*%Z31iEtT)En3OzWEaye0I0RRDsXySXC09vVU#skH@w#Waz0#ms_#Ny9 z3J&BPSmN|%UmN-Rq_LuE@B5xYjkj@6 zc?}UPFh=W`@r1HPW9epGt z-mHG~n$i zYZCT^!Z%x=a0kz^C!Iu4mL-xZ<*ndE%rujGl^Tb$9 zOab~={F0QHjxkLgvGoa%8jo5*%?l6rxE6C=dokO~XNFoYBi5+jXq25~aWNLCuoCmT zYgn|jOpDrmWsY2oHOxKOmj~BjIl1fNhm2i8dZ^t?599qi_;T^HjDq}~zCQl=XT$qL zmJY9V*vEW-DcxkKHCEO6tN6w!)-BW5^~Q=ASSctGgyFzj=)&MF662;UT#A@t`1?AJ z6288H(AW6St@IIWSq3-hS<*~iw!4s-QVXXC(mtlnTPC$ps-`YWjm}PHi$Qa(aJrrj zm%tR#0@sYbM0a{^Dpr2Am5Tq(AnQdRraW5_34e_9urL(FEOtNw<^zS#u-qB@=SBF^ zYSgUyA0hQodpv&p>j*&myn^8H>4|hB0dAVVyK)FWw!pA^5j`^klx*QR(;Gj{6m2Qx z-CO-=W%wXfblfCU|3M^CE3IN*w$AT+~EQ93Q8f@s2jR1)>(l z`!8CH6B%wz8V+}}cAR7Wti*s7zXsVFW*ccgu8xe%IdgMDJ)y%QgD~FZyf)7d@=)2h zV|iC^1ZJ(IQae*xfb(R!WK?Sd;HCn2a}lH06xVC-cyq`zBT0il$b>kJ{_;!Z*T&4j z4izUlUBC{9NA<|4g0k^ca+KPQgiDrVrt3`hzyXANFu!ppz^r{u4{IiRAGhq1>MyBSH}~WB00!uesAi*kRKTF$PIhI3dPyyhfI_ z7!_C29|xv>zh4rPM$~!~w2$OnIkR2q`2b6+JZa>p%2V+=I%d%d7Ns`4>Bv?drAEj` zpU!XJxXstKwPfsGESzW*54g5|Pqbszh{8rm;wEl;me0>lWSbOPx_luy35p(_|?TJflr6uUnVGu5Jm zmy(53RUu*GMx;kUeIskU=d{R=GmhMXCuANCn}O8-pgR)s8u_MVOqVvq-jvabo$nF; z!-s}s2SOrB4niD6p+5kBEZ5}UpgL+Z4^kz#cW+!@M%~)5;60OB%cB)Z(+}+bfn~I@H zcoGyTo}A7fE~Wk1gvC%^O1?=Swit9wfP>@cDo!AL^PCuQ#?H9O?jt12rrHY6D=mba z<3L=gZIq?-&*+CVel{r|6}~$!0rTqx^`i2Fqss)lW4*-r4)u7d-8fFLS9KUADs9Mj zbb=&n&oj^A$u~bojW-Z7!H26HO`oI!7F(|F)8;_-sU`WS1kCH2@b76fngFw{)n4n* zfZYd{v?0;%c-|&)5e&s^<;JXkkQaf5t(P2v&Dp~+&RBHXKg(}eew!-<>zXBWK)Tnl z{c({fy6m$v96Bsw3k)>WW0?p2WK(x@JTSk*(%zFkpER_a!i%8!?$k=a9xGcB5<0p*U-B;1oTc-|6)FEF6s`c(gj#^rbCnas z$Pjm9)b6FKi;&joqrA6-(}V4*V|L8R@-!W1Vj9|q`X;5n00X3%&iIyt6&d$l6-V+Gx!3k)81 znhC166Xa}t4(#8D5I;5RoAYsUwRF`o-z9=C5%F?>4zT2K;ct;gKz`jysC4-7A0 zop3_PFek>B+$pT$#mOCiO)FMui}oKW!StHe-U!+r7O(65rffBe7pxbDe)@oK=z)}6 zY!wT2^5WHa9nA}UF8Xf_iXN;)Um-!IvNjD0v48rrhpePP3_&ej6t=)J+(Y2$cUIPyz6_4SUvJ$zysXSk#0hgT@8O7udqB#+FD z2>tEjLwBhSE%^O06p7KZ{3>1!oVavRX;wd3I2f#X*RrsqqomREIKhm?dCSmcl4C|J z1t2o$MPO0U5EjS&`GI29yCu!)=*Rm0Xu7QS!0yuE?Qvf?;pEh}-mcsq@s^iAuT0~X z3SDHQbM8#%pm~#WPCc~Td=3~Yf2rengpmC^zY!}Py1&$s&0z;X^^wD+f0Dg;)UM1+ zvCic#)Ak(g{f@Vt{1-ruYcsq$mg-;4b~1>&r_cRhY-_~+al+P#2GZ>LcGkPzhq4sc z(%I21B+jXfVwmsj9f#r|pSyHp?=deaUt33~}bd$T} zZ3Z^Qrc)yoA;m)m`;B&qnOH?9FHZ$9ERR;}Ig&4w0rRJIk}yze?cxN$4o6s=V9%-l zW$Yln*(gH(CAK^QY`Qm1y67qEO_tD!xJaDIutq4$*Y$KY#>PeQsee>H;&RUO86C12 zLF5~^_G-<|*&%>-jA)%1ew1nsQ(g}CGakh-F|(-PVA!rR^ymdyYtyM+K9p%rnxBZ{hZ*YU z`%1~70eato!|GTW{`^i3){Idz&PXFU25Vl(8Yb_LX_zy^7GCL3f}Y0~OX-v4QcM+x z-lKR)pF4e@C?CMU>s^J;Wrogongi4{#mcFdIGmnI>Tk3U4=A*@9Q8Tk@@A99D~(WI zvT1bZJYKYPBCtgwg&c4DD8#*~#R;_XyGKU$i-}(8&*6C?KQuX=P6}m#7gnqyQO+++ zEauRq)U$-kqR>Q6mUyC)d?{##d`XLFFdMz~?Qsk^6P6pEJG1hnS|T=+5#_J2CM_PC_Z z|9=pKykQmKmT#55 ze~Eohdmp!5e7h*3#2gl!9UGHi&VL*FeVoIs3{TkXt0bF0=d<1oGzeDlzGn|NdSs)2 zyAbPWhWo<|zo&!54Me_3KgmRL@_s9kP3Jj2wpf7tEF~z%CqzFUeLMkmK|6JDO7|W7 zevU7Fd0M*C2b3DG*ruVdp}h(hYXg*<0dqV+MP(c`)P870C-o-yx$1nUme%w}3yI!*Ka%Np+d{BwC zTQ@zGMJx1Oh^lqSN9E2!=e6sc_jCNZviVbW2Qf!A=zWz;k+khq66DIwrqi(>>i>cN zb=_w(1oR;q%2|fBzpCW@!P{Hh5+m}&n0JbE$wGfvywZz5sje>oq0r{waQ%F>iBysg zaKHQ`*pU)a<`$YgPo^$goMGtoWZYyqr0!iwihmu^`-3g(dCU&kJ=Gh1?PqP`+FqbN zuzAi3`NcjVjG9>b#x%Xas}Dp~BzVJW3S=S%-Uxpi^C7{p6E6rjFd`A_c%m?yfi5-=P`9-&K&*K>?YExdqEZvi)Ge=30PCkW0!u?h73paYEhE<8F-_4Kr)dkbFz2Uz?wQgxn$GST zcnAw@12cfA=l*XxFrno??ZnO1-Rb;KF(doTe59N)af=0UA0fcF~zcU=3ku- zhVIh_)y$YrcCi%bh2n52QtoZ{@eF`x`nz8^I+*i_yduPKLxbSAo2IMa)mfqM$N2fo z;FmIi5Ie5{c9!W6d`h8_%KY=Tw%rtIh~NXq=2E?M}Z z1Ju!(9PvyG`McZ$10OO50+2{h$KE0B0ep(gp`hH-D>6}E{|~B7k(-c6JFhN?Zm!N2 z^qkPN?x(5)9fh~O6S(nFh-0c%d(%REoVjD(amlGc;%H^oBM1*{RzG}El;X^90c%P< zf`6^iU)YqF+%sr5s`OpxLHiAZa!o7F!0}lFSIm^BpTkY^Ey-GCCA%?3t3IyR(bH0D6k#&OqT0;#n0dt z_V0S*2vDg5bfkSRx$5ZXC^Sf^+N+;6B|q zU9HsR4Al>yB_LVw<+y@;ex|nHSn)P7<&p90g`8JsvMFg}DGV%~>AnZ|!$wtvlPX z=2d(dpAhu&v}#mf4_!XV!Y!GTNV15uT@}zFV(49M5)qjBBRnyg?#DMhmfLQ-E-V>R zd?Ez(0lJHA!pQlQA?-Cr^eWt|Y+gN-3TkW1^y_j?&1(10g@i=XY8XWcfo{Kc!2?_h z@}2uI_b8^TwAA=LOG}JEp;GG)b-|3&;_g*?(mot@CHv8UO}bSY8LU|H=7NNp2^1TU zR8oB{EUG{>?0=U4X`q6|c)Us}4ga^%Cvf?lRzF2+#eT*`0EM49@_m?ix%75wqx;n;c=mRgL|9HDudtl2PpnaU-8xI#0_D#zqsjp$={JlDDvmu>?|;@u%n%g z2u%4yP%_X3KLG)}#3WVO zq?0TH*aIIX6a7`$xv`MMhIQK$g?m|geNg`Y+UJ!ZbGeyl>FY0{zP2#pXUQ$J6yJV{ ztE!-QSUu}_$fNhzfiaP_C2;ByYK^(G?^oR*h06H%8EN@YMv`N1&tm!iXmi%Gr0KG1f>&5 z`bYWHjFFO*Bdxv*yn9`%LcAX=-_O5(Hp71=`|Fv6Fcll=4pQ5=I$eV&F>7Dg|U2hV>Es+Eym%Be*FsFh4>{?Ln zTFs;wu3=14C8OOs8D?*CuTFx|f_zMgL4oG2P{f}Mk87CjZ1Z%T-6w|1&^cEh$0LT6 zdQeoZYzt{Y+*ErL;RGKj5!bFkD#O~n0L+l;?0%722PapL6S)3R85ru8{kGtqJ9}C8 zS|%{^uFn#wBDy1i=>*Mm(srd8RIe9$ZcI<1s=aNtAcxRNUfHvl<=o_)B!U!cI10Q+ z{4wTh&pBRj_2_7MB9p8^sp(MBH@bY(bTw-|_D0;nYwRpa>XUx4OAEAldN2!BSK|#f z)0+g?w++JT=c?{=9>#}FQUlnI!a6zo9&Q@qphOY2r${LjqmY7G0ejX1&``O!o8K)n zf8z6qLgksYY!m2N8Z;+D>_P3ndHe9#~@jLX#Fv?1i-8xp$!}p#nSV4 zzTfGm@DNz_MZKKsejLK>4d)eYWuhq7N8U=(R3X{4zL*m4mzIM}f(V^8S3PV`5UpIu zdu*H7G0Kjh2Zky?AO|FVVGj?{ybV#FdC8eqr+2oJt}Kl{+vDeyuYiQByA z0USbbYema1P$Yv$?Yygx7Nb9uj@&zCNKr*&TgjC7-q4QTy7sm=RU0b9J%rumU!sq_ zW~VX3K?yqkI3YYlqWe_&1m)iaw4|1p-OL1LXG8`D7E|GOcz-?A{Cgq7hGJYjZT3iz25A-I$Y+Z8V)S8X&O;sauJov3gZKr5NV|%c(9}N6Ocfx)Aw_!XOnX)T5{)l}tQ%d~?2!%7(o_mQ zI)Kp;RL+bd<7f%ApbSZM!1be&+yoIQG-wcQaH=xLXSeGOOAF*?&8Gs#v#7a0_NpP87=dQ-nb|G{(l%=J#)}UR7!S<@ z{I52Callz0wJL3kGpE!xSx|`@lD1iUh2@Zq2g$yM3Lg;n+$EgU93DNxHM23-N$%bz zkT!$7MR*Z+g!q%V>qq*+!rIiR0VK-(xNVpr;yMk?GRsw_keuuuNdmn?^tOlV1*IFP zd!+?`-65&=ltYKl)qL2s)_c%>aVGuW$Mn@t(%XQZrz&^^L`&!-T;$nKY(ne;@L2T^ zaDXrV(haR?1j+LEe#+8$;la`W)OA98mBy8ds~{X_$DUEsLN{KG23hQ`4Qg?2Jt7W& z_#f~IuRlO0PyQQo*u4?h_TL>t)9lmae(L+PJUZIL(X267`7hsLR#?GFG!PnBmNrn} zeT1<~lI1nmUU2&9?ajbX%bDY}e)sUwQJ91&ii8w-O96`*&%z1Dc)x6qx}HZz>YG}^ zR-PGW-}?3?wMS%tpI>~nIY~#Hln-Cr62B$f zK^BhtJs@-noY3sZBwC_(N>a1~Ul`3aPSPcbKk1P8WYLzNyAMM-EC6hLe`-%;TLy`ajk=Fs50(vWjKM}-SP(nli81v|M^@4Q$ z`+avj&V9Pnb$n8`FvWuClJ`S-pqtoCv>Ty)HB?o_`K7n#DHyWc*cVWN9@XH_qr>9O z_dufGj>eqg`gem?3D(2&bydzBno^!nvMeZbb27X_z@}tF6Qu$PyY`;snuct literal 0 HcmV?d00001 diff --git a/yolox-burn/src/lib.rs b/yolox-burn/src/lib.rs new file mode 100644 index 0000000..c85bcc1 --- /dev/null +++ b/yolox-burn/src/lib.rs @@ -0,0 +1,3 @@ +#![cfg_attr(not(feature = "std"), no_std)] +pub mod model; +extern crate alloc; diff --git a/yolox-burn/src/model/blocks.rs b/yolox-burn/src/model/blocks.rs new file mode 100644 index 0000000..320284c --- /dev/null +++ b/yolox-burn/src/model/blocks.rs @@ -0,0 +1,271 @@ +use alloc::vec; +use burn::{ + config::Config, + module::Module, + nn::{ + conv::{Conv2d, Conv2dConfig}, + BatchNorm, BatchNormConfig, PaddingConfig2d, + }, + tensor::{activation::silu, backend::Backend, Device, Tensor}, +}; + +/// Compute the number of channels based on the provided factor. +pub fn expand(num_channels: usize, factor: f64) -> usize { + (num_channels as f64 * factor).floor() as usize +} + +/// A base convolution block. +/// Allows to switch between regular and depthwise separable convolution blocks based on the +/// architecture. +#[derive(Module, Debug)] +pub enum Conv { + /// Basic convolution block used for all variants. + BaseConv(BaseConv), + /// Depthwise separable convolution block, used for some blocks by YOLOX-Nano. + DwsConv(DwsConv), +} + +impl Conv { + pub fn forward(&self, x: Tensor) -> Tensor { + match self { + Self::BaseConv(conv) => conv.forward(x), + Self::DwsConv(conv) => conv.forward(x), + } + } +} + +#[derive(Config)] +pub struct ConvConfig { + in_channels: usize, + out_channels: usize, + kernel_size: usize, + stride: usize, + depthwise: bool, +} + +impl ConvConfig { + /// Initialize a new [convolution block](Conv) module. + pub fn init(&self, device: &Device) -> Conv { + if self.depthwise { + Conv::DwsConv( + DwsConvConfig::new( + self.in_channels, + self.out_channels, + self.kernel_size, + self.stride, + ) + .init(device), + ) + } else { + Conv::BaseConv( + BaseConvConfig::new( + self.in_channels, + self.out_channels, + self.kernel_size, + self.stride, + 1, + ) + .init(device), + ) + } + } +} + +/// A Conv2d -> BatchNorm -> activation block. +#[derive(Module, Debug)] +pub struct BaseConv { + conv: Conv2d, + bn: BatchNorm, +} + +impl BaseConv { + pub fn forward(&self, x: Tensor) -> Tensor { + let x = self.conv.forward(x); + let x = self.bn.forward(x); + + silu(x) + } +} + +/// [Base convolution block](BaseConv) configuration. +pub struct BaseConvConfig { + conv: Conv2dConfig, + bn: BatchNormConfig, +} + +impl BaseConvConfig { + /// Create a new instance of the base convolution block [config](BaseConvConfig). + pub fn new( + in_channels: usize, + out_channels: usize, + kernel_size: usize, + stride: usize, + groups: usize, + ) -> Self { + // Same padding + let pad = (kernel_size - 1) / 2; + + let conv = Conv2dConfig::new([in_channels, out_channels], [kernel_size, kernel_size]) + .with_stride([stride, stride]) + .with_padding(PaddingConfig2d::Explicit(pad, pad)) + .with_groups(groups) + .with_bias(false); + let bn = BatchNormConfig::new(out_channels) + .with_epsilon(1e-3) + .with_momentum(0.03); + + Self { conv, bn } + } + + /// Initialize a new [base convolution block](BaseConv) module. + pub fn init(&self, device: &Device) -> BaseConv { + BaseConv { + conv: self.conv.init(device), + bn: self.bn.init(device), + } + } +} + +/// A [depthwise separable convolution](https://paperswithcode.com/method/depthwise-separable-convolution) +/// block. Both depthwise and pointwise blocks consist of a Conv2d -> BatchNorm -> activation block. +#[derive(Module, Debug)] +pub struct DwsConv { + dconv: BaseConv, + pconv: BaseConv, +} + +impl DwsConv { + pub fn forward(&self, x: Tensor) -> Tensor { + let x = self.dconv.forward(x); + self.pconv.forward(x) + } +} + +/// [Depthwise separable convolution block](DwsConv) configuration. +pub struct DwsConvConfig { + dconv: BaseConvConfig, + pconv: BaseConvConfig, +} + +impl DwsConvConfig { + /// Create a new instance of the depthwise separable convolution block [config](DwsConvConfig). + pub fn new(in_channels: usize, out_channels: usize, kernel_size: usize, stride: usize) -> Self { + // Depthwise conv + let dconv = BaseConvConfig::new(in_channels, in_channels, kernel_size, stride, in_channels); + // Pointwise conv + let pconv = BaseConvConfig::new(in_channels, out_channels, 1, 1, 1); + + Self { dconv, pconv } + } + + /// Initialize a new [depthwise separable convolution block](DwsConv) module. + pub fn init(&self, device: &Device) -> DwsConv { + DwsConv { + dconv: self.dconv.init(device), + pconv: self.pconv.init(device), + } + } +} + +/// Focus width and height information into channel space. +#[derive(Module, Debug)] +pub struct Focus { + conv: BaseConv, +} + +impl Focus { + pub fn forward(&self, x: Tensor) -> Tensor { + let device = x.device(); + let [_, _, h, w] = x.dims(); + + // Indexing + let top_idx = Tensor::arange_step(0..h as i64, 2, &device); + let bottom_idx = Tensor::arange_step(1..h as i64, 2, &device); + let left_idx = Tensor::arange_step(0..w as i64, 2, &device); + let right_idx = Tensor::arange_step(1..w as i64, 2, &device); + + // patch_top_left = x[..., ::2, ::2] + let patch_top_left = x + .clone() + .select(2, top_idx.clone()) + .select(3, left_idx.clone()); + // patch_top_right = x[..., ::2, 1::2] + let patch_top_right = x.clone().select(2, top_idx).select(3, right_idx.clone()); + // patch_bot_left = x[..., 1::2, ::2] + let patch_bottom_left = x.clone().select(2, bottom_idx.clone()).select(3, left_idx); + // patch_bot_right = x[..., 1::2, 1::2] + let patch_bottom_right = x.select(2, bottom_idx).select(3, right_idx); + + // Shape (b,c,w,h) -> y(b,4c,w/2,h/2) + let x = Tensor::cat( + vec![ + patch_top_left, + patch_bottom_left, + patch_top_right, + patch_bottom_right, + ], + 1, + ); + + self.conv.forward(x) + } +} + +/// [Focus block](Focus) configuration. +pub struct FocusConfig { + conv: BaseConvConfig, +} + +impl FocusConfig { + /// Create a new instance of the focus block [config](FocusConfig). + pub fn new(in_channels: usize, out_channels: usize, kernel_size: usize, stride: usize) -> Self { + let conv = BaseConvConfig::new(in_channels * 4, out_channels, kernel_size, stride, 1); + + Self { conv } + } + + /// Initialize a new [focus block](Focus) module. + pub fn init(&self, device: &Device) -> Focus { + Focus { + conv: self.conv.init(device), + } + } +} + +/// Dual convolution block used for feature extraction in the prediction head. +#[derive(Module, Debug)] +pub struct ConvBlock { + conv0: Conv, + conv1: Conv, +} + +impl ConvBlock { + pub fn forward(&self, x: Tensor) -> Tensor { + let x = self.conv0.forward(x); + self.conv1.forward(x) + } +} + +/// [Dual convolution block](ConvBlock) configuration. +pub struct ConvBlockConfig { + conv0: ConvConfig, + conv1: ConvConfig, +} + +impl ConvBlockConfig { + /// Create a new instance of the dual convolution block [config](ConvBlockConfig). + pub fn new(channels: usize, kernel_size: usize, stride: usize, depthwise: bool) -> Self { + let conv0 = ConvConfig::new(channels, channels, kernel_size, stride, depthwise); + let conv1 = ConvConfig::new(channels, channels, kernel_size, stride, depthwise); + + Self { conv0, conv1 } + } + + /// Initialize a new [dual convolution block](ConvBlock) module. + pub fn init(&self, device: &Device) -> ConvBlock { + ConvBlock { + conv0: self.conv0.init(device), + conv1: self.conv1.init(device), + } + } +} diff --git a/yolox-burn/src/model/bottleneck.rs b/yolox-burn/src/model/bottleneck.rs new file mode 100644 index 0000000..1b738cd --- /dev/null +++ b/yolox-burn/src/model/bottleneck.rs @@ -0,0 +1,206 @@ +use alloc::{vec, vec::Vec}; +use burn::{ + module::Module, + nn::pool::{MaxPool2d, MaxPool2dConfig}, + tensor::{backend::Backend, Device, Tensor}, +}; + +use super::blocks::{expand, BaseConv, BaseConvConfig, Conv, ConvConfig}; + +pub(crate) const SPP_POOLING: [usize; 3] = [5, 9, 13]; + +/// Standard bottleneck block. +#[derive(Module, Debug)] +pub struct Bottleneck { + conv1: BaseConv, + conv2: Conv, + shortcut: bool, +} + +impl Bottleneck { + pub fn forward(&self, x: Tensor) -> Tensor { + let identity = x.clone(); + + let x = self.conv1.forward(x); + let mut x = self.conv2.forward(x); + + if self.shortcut { + x = x + identity; + } + + x + } +} + +/// [Bottleneck block](Bottleneck) configuration. +struct BottleneckConfig { + conv1: BaseConvConfig, + conv2: ConvConfig, + shortcut: bool, +} + +impl BottleneckConfig { + /// Create a new instance of the bottleneck block [config](BottleneckConfig). + pub fn new(in_channels: usize, out_channels: usize, shortcut: bool, depthwise: bool) -> Self { + // In practice, expansion = 1.0 and no shortcut connection is used + let hidden_channels = out_channels; + + let conv1 = BaseConvConfig::new(in_channels, hidden_channels, 1, 1, 1); + let conv2 = ConvConfig::new(hidden_channels, out_channels, 3, 1, depthwise); + + Self { + conv1, + conv2, + shortcut, + } + } + + /// Initialize a new [bottleneck block](Bottleneck) module. + pub fn init(&self, device: &Device) -> Bottleneck { + Bottleneck { + conv1: self.conv1.init(device), + conv2: self.conv2.init(device), + shortcut: self.shortcut, + } + } +} + +/// Spatial pyramid pooling layer used in YOLOv3-SPP. +#[derive(Module, Debug)] +pub struct SppBottleneck { + conv1: BaseConv, + conv2: BaseConv, + m: Vec, +} + +impl SppBottleneck { + pub fn forward(&self, x: Tensor) -> Tensor { + if self.m.is_empty() { + panic!("No MaxPool2d modules found"); + } + + let x = self.conv1.forward(x); + + let x: Vec<_> = vec![x.clone()] + .into_iter() + .chain(self.m.iter().map(|pool| pool.forward(x.clone()))) + .collect(); + let x = Tensor::cat(x, 1); + + self.conv2.forward(x) + } +} + +/// [SppBottleneck block](SppBottleneck) configuration. +pub struct SppBottleneckConfig { + conv1: BaseConvConfig, + conv2: BaseConvConfig, + m: Vec, +} + +impl SppBottleneckConfig { + /// Create a new instance of the bottleneck block [config](SppBottleneckConfig). + pub fn new(in_channels: usize, out_channels: usize) -> Self { + let hidden_channels = in_channels / 2; + let conv2_channels = hidden_channels * 4; // conv1 output + maxpool (3x) + + let conv1 = BaseConvConfig::new(in_channels, hidden_channels, 1, 1, 1); + let conv2 = BaseConvConfig::new(conv2_channels, out_channels, 1, 1, 1); + let m: Vec<_> = SPP_POOLING + .into_iter() + .map(|k| { + let pad = k / 2; + MaxPool2dConfig::new([k, k]) + .with_padding(burn::nn::PaddingConfig2d::Explicit(pad, pad)) + }) + .collect(); + + Self { conv1, conv2, m } + } + + /// Initialize a new [bottleneck block](SppBottleneck) module. + pub fn init(&self, device: &Device) -> SppBottleneck { + SppBottleneck { + conv1: self.conv1.init(device), + conv2: self.conv2.init(device), + m: self.m.iter().map(|m| m.init()).collect(), + } + } +} + +/// Simplified Cross Stage Partial bottleneck with 3 convolutional layers. +/// Equivalent to C3 in YOLOv5. +#[derive(Module, Debug)] +pub struct CspBottleneck { + conv1: BaseConv, + conv2: BaseConv, + conv3: BaseConv, + m: Vec>, +} + +impl CspBottleneck { + pub fn forward(&self, x: Tensor) -> Tensor { + let x1 = self.conv1.forward(x.clone()); + let x2 = self.conv2.forward(x); + + let x1 = self + .m + .iter() + .fold(x1, |x_i, bottleneck| bottleneck.forward(x_i)); + + let x = Tensor::cat(vec![x1, x2], 1); + + self.conv3.forward(x) + } +} + +/// [CspBottleneck block](CspBottleneck) configuration. +pub struct CspBottleneckConfig { + conv1: BaseConvConfig, + conv2: BaseConvConfig, + conv3: BaseConvConfig, + m: Vec, +} + +impl CspBottleneckConfig { + /// Create a new instance of the bottleneck block [config](CspBottleneckConfig). + pub fn new( + in_channels: usize, + out_channels: usize, + num_blocks: usize, + expansion: f64, + shortcut: bool, + depthwise: bool, + ) -> Self { + assert!( + expansion > 0.0 && expansion <= 1.0, + "expansion should be in range (0, 1]" + ); + + let hidden_channels = expand(out_channels, expansion); + + let conv1 = BaseConvConfig::new(in_channels, hidden_channels, 1, 1, 1); + let conv2 = BaseConvConfig::new(in_channels, hidden_channels, 1, 1, 1); + let conv3 = BaseConvConfig::new(2 * hidden_channels, out_channels, 1, 1, 1); + let m = (0..num_blocks) + .map(|_| BottleneckConfig::new(hidden_channels, hidden_channels, shortcut, depthwise)) + .collect(); + + Self { + conv1, + conv2, + conv3, + m, + } + } + + /// Initialize a new [bottleneck block](CspBottleneck) module. + pub fn init(&self, device: &Device) -> CspBottleneck { + CspBottleneck { + conv1: self.conv1.init(device), + conv2: self.conv2.init(device), + conv3: self.conv3.init(device), + m: self.m.iter().map(|b| b.init(device)).collect(), + } + } +} diff --git a/yolox-burn/src/model/boxes.rs b/yolox-burn/src/model/boxes.rs new file mode 100644 index 0000000..3823108 --- /dev/null +++ b/yolox-burn/src/model/boxes.rs @@ -0,0 +1,138 @@ +use burn::tensor::{backend::Backend, ElementConversion, Tensor}; +use itertools::Itertools; + +pub struct BoundingBox { + pub xmin: f32, + pub ymin: f32, + pub xmax: f32, + pub ymax: f32, + pub confidence: f32, +} + +/// Non-maximum suppression (NMS) filters overlapping bounding boxes that have an intersection-over- +/// union (IoU) greater or equal than the specified `iou_threshold` with previously selected boxes. +/// +/// Boxes are filtered based on `score_threshold` and ranked based on their score. As such, lower +/// scoring boxes are removed when overlapping with another (higher scoring) box. +/// +/// # Arguments +/// +/// * `boxes`: Bounding box coordinates. Shape: `[batch_size, num_boxes, 4]`. +/// * `scores` - Classification scores for each box. Shape: `[batch_size, num_boxes, num_classes]`. +/// * `iou_threshold` - Scalar threshold for IoU. +/// * `score_threshold` - Scalar threshold for scores. +/// +/// # Returns +/// +/// Vector of bounding boxes grouped by class for each batch. The boxes are sorted in decreasing +/// order of scores for each class. +pub fn nms( + boxes: Tensor, + scores: Tensor, + iou_threshold: f32, + score_threshold: f32, +) -> Vec>> { + let [batch_size, num_boxes, num_classes] = scores.dims(); + + // Bounding boxes grouped by batch and by (maximum) class index + let mut bboxes = boxes + .iter_dim(0) + .zip(scores.iter_dim(0)) + .enumerate() + // Per-batch + .map(|(_, (candidate_boxes, candidate_scores))| { + // Keep max scoring boxes only ([num_boxes, 1], [num_boxes, 1]) + let (cls_score, cls_idx) = candidate_scores.squeeze::<2>(0).max_dim_with_indices(1); + let cls_score: Vec<_> = cls_score + .into_data() + .value + .iter() + .map(|v| v.elem::()) + .collect(); + let cls_idx: Vec<_> = cls_idx + .into_data() + .value + .iter() + .map(|v| v.elem::() as usize) + .collect(); + + // [num_boxes, 4] + let candidate_boxes: Vec<_> = candidate_boxes + .into_data() + .value + .iter() + .map(|v| v.elem::()) + .collect(); + + // Per-class filtering based on score + (0..num_classes) + .map(|cls_id| { + // [num_boxes, 1] + (0..num_boxes) + .filter_map(|box_idx| { + let box_cls_idx = cls_idx[box_idx]; + if box_cls_idx != cls_id { + return None; + } + let box_cls_score = cls_score[box_idx]; + if box_cls_score >= score_threshold { + let bbox = &candidate_boxes[box_idx * 4..box_idx * 4 + 4]; + Some(BoundingBox { + xmin: bbox[0] - bbox[2] / 2., + ymin: bbox[1] - bbox[3] / 2., + xmax: bbox[0] + bbox[2] / 2., + ymax: bbox[1] + bbox[3] / 2., + confidence: box_cls_score, + }) + } else { + None + } + }) + .sorted_unstable_by(|a, b| a.confidence.partial_cmp(&b.confidence).unwrap()) + .collect::>() + }) + .collect::>() + }) + .collect::>(); + + for batch_bboxes in bboxes.iter_mut().take(batch_size) { + non_maximum_suppression(batch_bboxes, iou_threshold); + } + + bboxes +} + +/// Intersection over union of two bounding boxes. +pub fn iou(b1: &BoundingBox, b2: &BoundingBox) -> f32 { + let b1_area = (b1.xmax - b1.xmin + 1.) * (b1.ymax - b1.ymin + 1.); + let b2_area = (b2.xmax - b2.xmin + 1.) * (b2.ymax - b2.ymin + 1.); + let i_xmin = b1.xmin.max(b2.xmin); + let i_xmax = b1.xmax.min(b2.xmax); + let i_ymin = b1.ymin.max(b2.ymin); + let i_ymax = b1.ymax.min(b2.ymax); + let i_area = (i_xmax - i_xmin + 1.).max(0.) * (i_ymax - i_ymin + 1.).max(0.); + i_area / (b1_area + b2_area - i_area) +} + +/// Perform non-maximum suppression over boxes of the same class. +pub fn non_maximum_suppression(bboxes: &mut [Vec], threshold: f32) { + for bboxes_for_class in bboxes.iter_mut() { + bboxes_for_class.sort_by(|b1, b2| b2.confidence.partial_cmp(&b1.confidence).unwrap()); + let mut current_index = 0; + for index in 0..bboxes_for_class.len() { + let mut drop = false; + for prev_index in 0..current_index { + let iou = iou(&bboxes_for_class[prev_index], &bboxes_for_class[index]); + if iou > threshold { + drop = true; + break; + } + } + if !drop { + bboxes_for_class.swap(current_index, index); + current_index += 1; + } + } + bboxes_for_class.truncate(current_index); + } +} diff --git a/yolox-burn/src/model/darknet.rs b/yolox-burn/src/model/darknet.rs new file mode 100644 index 0000000..5cf8e29 --- /dev/null +++ b/yolox-burn/src/model/darknet.rs @@ -0,0 +1,172 @@ +use core::cmp::max; + +use crate::model::blocks::expand; + +use super::{ + blocks::{Conv, ConvConfig, Focus, FocusConfig}, + bottleneck::{CspBottleneck, CspBottleneckConfig, SppBottleneck, SppBottleneckConfig}, +}; +use burn::{ + module::Module, + tensor::{backend::Backend, Device, Tensor}, +}; + +/// Darknet backbone feature maps. +pub struct DarknetFeatures(pub Tensor, pub Tensor, pub Tensor); + +/// [CSPDarknet-53](https://paperswithcode.com/method/cspdarknet53) backbone. +#[derive(Module, Debug)] +pub struct CspDarknet { + stem: Focus, + dark2: CspBlock, + dark3: CspBlock, + dark4: CspBlock, + dark5: CspBlock, +} + +impl CspDarknet { + pub fn forward(&self, x: Tensor) -> DarknetFeatures { + let x = self.stem.forward(x); + let x = self.dark2.forward(x); + let f1 = self.dark3.forward(x); + let f2 = self.dark4.forward(f1.clone()); + let f3 = self.dark5.forward(f2.clone()); + + DarknetFeatures(f1, f2, f3) + } +} + +/// [CSPDarknet-53](CspDarknet) configuration. +pub struct CspDarknetConfig { + stem: FocusConfig, + dark2: CspBlockConfig, + dark3: CspBlockConfig, + dark4: CspBlockConfig, + dark5: CspBlockConfig, +} + +impl CspDarknetConfig { + /// Create a new instance of the CSPDarknet-53 [config](CspDarknetConfig). + pub fn new(depth: f64, width: f64, depthwise: bool) -> Self { + assert!( + [0.33, 0.67, 1.0, 1.33].contains(&depth), + "invalid depth value {depth}" + ); + + assert!( + [0.25, 0.375, 0.5, 0.75, 1.0, 1.25].contains(&width), + "invalid width value {width}" + ); + + let base_channels = expand(64, width); + let base_depth = max((depth * 3_f64).round() as usize, 1); + + let stem = FocusConfig::new(3, base_channels, 3, 1); + let dark2 = CspBlockConfig::new( + base_channels, + base_channels * 2, + base_depth, + false, + depthwise, + ); + let dark3 = CspBlockConfig::new( + base_channels * 2, + base_channels * 4, + base_depth * 3, + false, + depthwise, + ); + let dark4 = CspBlockConfig::new( + base_channels * 4, + base_channels * 8, + base_depth * 3, + false, + depthwise, + ); + let dark5 = CspBlockConfig::new( + base_channels * 8, + base_channels * 16, + base_depth, + true, + depthwise, + ); + + Self { + stem, + dark2, + dark3, + dark4, + dark5, + } + } + + /// Initialize a new [CspDarknet](CspDarknet) module. + pub fn init(&self, device: &Device) -> CspDarknet { + CspDarknet { + stem: self.stem.init(device), + dark2: self.dark2.init(device), + dark3: self.dark3.init(device), + dark4: self.dark4.init(device), + dark5: self.dark5.init(device), + } + } +} + +/// A BaseConv -> CspBottleneck block. +/// The SppBottleneck layer is only used in the last block of [CSPDarknet-53](CspDarknet). +#[derive(Module, Debug)] +pub struct CspBlock { + conv: Conv, + c3: CspBottleneck, + spp: Option>, +} + +impl CspBlock { + pub fn forward(&self, x: Tensor) -> Tensor { + let mut x = self.conv.forward(x); + + if let Some(spp) = &self.spp { + x = spp.forward(x); + } + + self.c3.forward(x) + } +} + +/// [CSP block](CspBlock) configuration. +pub struct CspBlockConfig { + conv: ConvConfig, + c3: CspBottleneckConfig, + spp: Option, +} + +impl CspBlockConfig { + /// Create a new instance of the CSP block [config](CspBlockConfig). + pub fn new( + in_channels: usize, + out_channels: usize, + depth: usize, + spp: bool, + depthwise: bool, + ) -> Self { + let conv = ConvConfig::new(in_channels, out_channels, 3, 2, depthwise); + let c3 = CspBottleneckConfig::new(out_channels, out_channels, depth, 0.5, !spp, depthwise); + + let spp = if spp { + Some(SppBottleneckConfig::new(out_channels, out_channels)) + } else { + None + }; + + Self { conv, c3, spp } + } + + /// Initialize a new [CSP block](CspBlock) module. + pub fn init(&self, device: &Device) -> CspBlock { + CspBlock { + conv: self.conv.init(device), + c3: self.c3.init(device), + spp: self.spp.as_ref().map(|spp| spp.init(device)), + } + } +} diff --git a/yolox-burn/src/model/head.rs b/yolox-burn/src/model/head.rs new file mode 100644 index 0000000..63e691c --- /dev/null +++ b/yolox-burn/src/model/head.rs @@ -0,0 +1,192 @@ +use alloc::{vec, vec::Vec}; +use burn::{ + module::Module, + nn::{ + conv::{Conv2d, Conv2dConfig}, + Initializer, PaddingConfig2d, + }, + tensor::{activation::sigmoid, backend::Backend, Device, Int, Shape, Tensor}, +}; +use itertools::{izip, multiunzip}; + +use super::{ + blocks::{expand, BaseConv, BaseConvConfig, ConvBlock, ConvBlockConfig}, + pafpn::FpnFeatures, +}; + +const STRIDES: [usize; 3] = [8, 16, 32]; +const IN_CHANNELS: [usize; 3] = [256, 512, 1024]; +const PRIOR_PROB: f64 = 1e-2; + +/// Create a 2D coordinate grid for the specified dimensions. +/// Similar to [`numpy.indices`](https://numpy.org/doc/stable/reference/generated/numpy.indices.html) +/// but specific to two dimensions. +fn create_2d_grid(x: usize, y: usize, device: &Device) -> Tensor { + let y_idx = Tensor::arange(0..y as i64, device) + .reshape(Shape::new([y, 1])) + .repeat(1, x) + .reshape(Shape::new([y, x])); + let x_idx = Tensor::arange(0..x as i64, device) + .reshape(Shape::new([1, x])) // can only repeat with dim=1 + .repeat(0, y) + .reshape(Shape::new([y, x])); + + Tensor::stack(vec![x_idx, y_idx], 2) +} + +/// YOLOX head. +#[derive(Module, Debug)] +pub struct Head { + stems: Vec>, + cls_convs: Vec>, + reg_convs: Vec>, + cls_preds: Vec>, + reg_preds: Vec>, + obj_preds: Vec>, +} + +impl Head { + pub fn forward(&self, x: FpnFeatures) -> Tensor { + let features: [Tensor; 3] = [x.0, x.1, x.2]; + + // Outputs for each feature map + let (outputs, shapes): (Vec>, Vec<(usize, usize)>) = izip!( + features, + &self.stems, + &self.cls_convs, + &self.cls_preds, + &self.reg_convs, + &self.reg_preds, + &self.obj_preds, + &STRIDES + ) + .map( + |(feat, stem, cls_conv, cls_pred, reg_conv, reg_pred, obj_pred, _stride)| { + let feat = stem.forward(feat); + + let cls_feat = cls_conv.forward(feat.clone()); + let cls_out = cls_pred.forward(cls_feat); + + let reg_feat = reg_conv.forward(feat); + let reg_out = reg_pred.forward(reg_feat.clone()); + + let obj_out = obj_pred.forward(reg_feat); + + // Output [B, 5 + num_classes, num_anchors] + let out = Tensor::cat(vec![reg_out, sigmoid(obj_out), sigmoid(cls_out)], 1); + let [_, _, h, w] = out.dims(); + (out.flatten(2, 3), (h, w)) + }, + ) + .unzip(); + + // 1. Concat all regression outputs + // 2. Permute shape to [B, num_anchors_total, 5 + num_classes] + // 3. Decode absolute bounding box values + self.decode(Tensor::cat(outputs, 2).swap_dims(2, 1), shapes.as_ref()) + } + + /// Decode bounding box absolute values from regression output offsets. + fn decode(&self, outputs: Tensor, shapes: &[(usize, usize)]) -> Tensor { + let device = outputs.device(); + let [b, num_anchors, num_outputs] = outputs.dims(); + + let (grids, strides) = shapes + .iter() + .zip(STRIDES) + .map(|((h, w), stride)| { + // Grid (x, y) coordinates + let num_anchors = w * h; + let grid = + create_2d_grid::(*w, *h, &device).reshape(Shape::new([1, num_anchors, 2])); + let strides: Tensor = + Tensor::full(Shape::new([1, num_anchors, 1]), stride as i64, &device); + + (grid, strides) + }) + .unzip(); + + let grids = Tensor::cat(grids, 1).float(); + let strides = Tensor::cat(strides, 1).float(); + + Tensor::cat( + vec![ + // Add grid offset to center coordinates and scale to image dimensions + (outputs.clone().slice([0..b, 0..num_anchors, 0..2]) + grids) * strides.clone(), + // Decode `log` encoded boxes with `exp`and scale to image dimensions + outputs.clone().slice([0..b, 0..num_anchors, 2..4]).exp() * strides, + // Classification outputs + outputs.slice([0..b, 0..num_anchors, 4..num_outputs]), + ], + 2, + ) + } +} + +/// [YOLOX head](Head) configuration. +pub struct HeadConfig { + stems: Vec, + cls_convs: Vec, + reg_convs: Vec, + cls_preds: Vec, + reg_preds: Vec, + obj_preds: Vec, +} + +impl HeadConfig { + /// Create a new instance of the YOLOX head [config](HeadConfig). + pub fn new(num_classes: usize, width: f64, depthwise: bool) -> Self { + let hidden_channels: usize = 256; + // Initialize conv2d biases for classification and objectness heads + let bias = -f64::ln((1.0 - PRIOR_PROB) / PRIOR_PROB); + + let (stems, cls_convs, reg_convs, cls_preds, reg_preds, obj_preds) = + multiunzip(IN_CHANNELS.into_iter().map(|in_channels| { + let stem = BaseConvConfig::new( + expand(in_channels, width), + expand(hidden_channels, width), + 1, + 1, + 1, + ); + + let cls_conv = + ConvBlockConfig::new(expand(hidden_channels, width), 3, 1, depthwise); + let reg_conv = + ConvBlockConfig::new(expand(hidden_channels, width), 3, 1, depthwise); + + let cls_pred = + Conv2dConfig::new([expand(hidden_channels, width), num_classes], [1, 1]) + .with_padding(PaddingConfig2d::Explicit(0, 0)) + .with_initializer(Initializer::Constant { value: bias }); + let reg_pred = Conv2dConfig::new([expand(hidden_channels, width), 4], [1, 1]) + .with_padding(PaddingConfig2d::Explicit(0, 0)); + let obj_pred = Conv2dConfig::new([expand(hidden_channels, width), 1], [1, 1]) + .with_padding(PaddingConfig2d::Explicit(0, 0)) + .with_initializer(Initializer::Constant { value: bias }); + + (stem, cls_conv, reg_conv, cls_pred, reg_pred, obj_pred) + })); + + Self { + stems, + cls_convs, + reg_convs, + cls_preds, + reg_preds, + obj_preds, + } + } + + /// Initialize a new [YOLOX head](Head) module. + pub fn init(&self, device: &Device) -> Head { + Head { + stems: self.stems.iter().map(|m| m.init(device)).collect(), + cls_convs: self.cls_convs.iter().map(|m| m.init(device)).collect(), + reg_convs: self.reg_convs.iter().map(|m| m.init(device)).collect(), + cls_preds: self.cls_preds.iter().map(|m| m.init(device)).collect(), + reg_preds: self.reg_preds.iter().map(|m| m.init(device)).collect(), + obj_preds: self.obj_preds.iter().map(|m| m.init(device)).collect(), + } + } +} diff --git a/yolox-burn/src/model/mod.rs b/yolox-burn/src/model/mod.rs new file mode 100644 index 0000000..ed5731d --- /dev/null +++ b/yolox-burn/src/model/mod.rs @@ -0,0 +1,10 @@ +mod blocks; +mod bottleneck; +pub mod boxes; +mod darknet; +mod head; +mod pafpn; +pub mod weights; +pub mod yolox; + +pub use boxes::BoundingBox; diff --git a/yolox-burn/src/model/pafpn.rs b/yolox-burn/src/model/pafpn.rs new file mode 100644 index 0000000..c238d0c --- /dev/null +++ b/yolox-burn/src/model/pafpn.rs @@ -0,0 +1,177 @@ +use alloc::vec; +use burn::{ + module::Module, + tensor::{ + backend::Backend, + module::interpolate, + ops::{InterpolateMode, InterpolateOptions}, + Device, Tensor, + }, +}; + +use super::{ + blocks::{expand, BaseConv, BaseConvConfig, Conv, ConvConfig}, + bottleneck::{CspBottleneck, CspBottleneckConfig}, + darknet::{CspDarknet, CspDarknetConfig}, +}; + +pub struct FpnFeatures(pub Tensor, pub Tensor, pub Tensor); + +/// [PAFPN](https://paperswithcode.com/method/pafpn) is the feature pyramid module used in +/// [Path Aggregation Network](https://arxiv.org/abs/1803.01534) that combines FPNs with +/// bottom-up path augmentation. +#[derive(Module, Debug)] +pub struct Pafpn { + backbone: CspDarknet, + lateral_conv0: BaseConv, + c3_n3: CspBottleneck, + c3_n4: CspBottleneck, + c3_p3: CspBottleneck, + c3_p4: CspBottleneck, + reduce_conv1: BaseConv, + bu_conv1: Conv, // bottom-up conv + bu_conv2: Conv, // bottom-up conv +} + +impl Pafpn { + pub fn forward(&self, x: Tensor) -> FpnFeatures { + fn upsample(x_in: Tensor, scale: usize) -> Tensor { + let [_, _, h, w] = x_in.dims(); + interpolate( + x_in, + [h * scale, w * scale], + InterpolateOptions::new(InterpolateMode::Nearest), + ) + } + + // Backbone features + let features = self.backbone.forward(x); + + let fpn_out0 = self.lateral_conv0.forward(features.2); + let f_out0 = upsample(fpn_out0.clone(), 2); + let f_out0 = Tensor::cat(vec![f_out0, features.1], 1); + let f_out0 = self.c3_p4.forward(f_out0); + + let fpn_out1 = self.reduce_conv1.forward(f_out0); + let f_out1 = upsample(fpn_out1.clone(), 2); + let f_out1 = Tensor::cat(vec![f_out1, features.0], 1); + let pan_out2 = self.c3_p3.forward(f_out1); + + let p_out1 = self.bu_conv2.forward(pan_out2.clone()); + let p_out1 = Tensor::cat(vec![p_out1, fpn_out1], 1); + let pan_out1 = self.c3_n3.forward(p_out1); + + let p_out0 = self.bu_conv1.forward(pan_out1.clone()); + let p_out0 = Tensor::cat(vec![p_out0, fpn_out0], 1); + let pan_out0 = self.c3_n4.forward(p_out0); + + FpnFeatures(pan_out2, pan_out1, pan_out0) + } +} + +/// [PAFPN block](Pafpn) configuration. +pub struct PafpnConfig { + backbone: CspDarknetConfig, + lateral_conv0: BaseConvConfig, + c3_n3: CspBottleneckConfig, + c3_n4: CspBottleneckConfig, + c3_p3: CspBottleneckConfig, + c3_p4: CspBottleneckConfig, + reduce_conv1: BaseConvConfig, + bu_conv1: ConvConfig, // bottom-up conv + bu_conv2: ConvConfig, // bottom-up conv +} + +impl PafpnConfig { + /// Create a new instance of the PAFPN [config](PafpnConfig). + pub fn new(depth: f64, width: f64, depthwise: bool) -> Self { + assert!( + [0.33, 0.67, 1.0, 1.33].contains(&depth), + "invalid depth value {depth}" + ); + assert!( + [0.25, 0.375, 0.5, 0.75, 1.0, 1.25].contains(&width), + "invalid width value {width}" + ); + + let in_channels: [usize; 3] = [256, 512, 1024]; + let hidden_channels: [usize; 2] = [ + expand(2 * in_channels[0], width), + expand(2 * in_channels[1], width), + ]; + let in_channels: [usize; 3] = [ + expand(in_channels[0], width), + expand(in_channels[1], width), + expand(in_channels[2], width), + ]; + let num_blocks = (3_f64 * depth).round() as usize; + + let backbone = CspDarknetConfig::new(depth, width, depthwise); + let lateral_conv0 = BaseConvConfig::new(in_channels[2], in_channels[1], 1, 1, 1); + let c3_p4 = CspBottleneckConfig::new( + hidden_channels[1], + in_channels[1], + num_blocks, + 0.5, + false, + depthwise, + ); + + let reduce_conv1 = BaseConvConfig::new(in_channels[1], in_channels[0], 1, 1, 1); + let c3_p3 = CspBottleneckConfig::new( + hidden_channels[0], + in_channels[0], + num_blocks, + 0.5, + false, + depthwise, + ); + + let bu_conv2 = ConvConfig::new(in_channels[0], in_channels[0], 3, 2, depthwise); + let c3_n3 = CspBottleneckConfig::new( + hidden_channels[0], + in_channels[1], + num_blocks, + 0.5, + false, + depthwise, + ); + + let bu_conv1 = ConvConfig::new(in_channels[1], in_channels[1], 3, 2, depthwise); + let c3_n4 = CspBottleneckConfig::new( + hidden_channels[1], + in_channels[2], + num_blocks, + 0.5, + false, + depthwise, + ); + + Self { + backbone, + lateral_conv0, + c3_n3, + c3_n4, + c3_p3, + c3_p4, + reduce_conv1, + bu_conv1, + bu_conv2, + } + } + + /// Initialize a new [PAFPN](Pafpn) module. + pub fn init(&self, device: &Device) -> Pafpn { + Pafpn { + backbone: self.backbone.init(device), + lateral_conv0: self.lateral_conv0.init(device), + c3_n3: self.c3_n3.init(device), + c3_n4: self.c3_n4.init(device), + c3_p3: self.c3_p3.init(device), + c3_p4: self.c3_p4.init(device), + reduce_conv1: self.reduce_conv1.init(device), + bu_conv1: self.bu_conv1.init(device), + bu_conv2: self.bu_conv2.init(device), + } + } +} diff --git a/yolox-burn/src/model/weights.rs b/yolox-burn/src/model/weights.rs new file mode 100644 index 0000000..6d5f268 --- /dev/null +++ b/yolox-burn/src/model/weights.rs @@ -0,0 +1,143 @@ +/// Pre-trained weights metadata. +pub struct Weights { + pub(super) url: &'static str, + pub(super) num_classes: usize, +} + +#[cfg(feature = "pretrained")] +mod downloader { + use super::*; + use burn::data::network::downloader; + use std::fs::{create_dir_all, File}; + use std::io::Write; + use std::path::PathBuf; + + impl Weights { + /// Download the pre-trained weights to the local cache directory. + pub fn download(&self) -> Result { + // Model cache directory + let model_dir = dirs::home_dir() + .expect("Should be able to get home directory") + .join(".cache") + .join("yolox-burn"); + + if !model_dir.exists() { + create_dir_all(&model_dir)?; + } + + let file_base_name = self.url.rsplit_once('/').unwrap().1; + let file_name = model_dir.join(file_base_name); + if !file_name.exists() { + // Download file content + let bytes = downloader::download_file_as_bytes(self.url, file_base_name); + + // Write content to file + let mut output_file = File::create(&file_name)?; + let bytes_written = output_file.write(&bytes)?; + + if bytes_written != bytes.len() { + return Err(std::io::Error::new( + std::io::ErrorKind::InvalidData, + "Failed to write the whole model weights file.", + )); + } + } + + Ok(file_name) + } + } +} + +pub trait WeightsMeta { + fn weights(&self) -> Weights; +} + +/// YOLOX-Nano pre-trained weights. +pub enum YoloxNano { + /// These weights were released after the original paper implementation with slightly better results. + /// mAP (val2017): 25.8 + Coco, +} +impl WeightsMeta for YoloxNano { + fn weights(&self) -> Weights { + Weights { + url: "https://github.com/Megvii-BaseDetection/YOLOX/releases/download/0.1.1rc0/yolox_nano.pth", + num_classes: 80, + } + } +} + +/// YOLOX-Tiny pre-trained weights. +pub enum YoloxTiny { + /// These weights were released after the original paper implementation with slightly better results. + /// mAP (val2017): 32.8 + Coco, +} +impl WeightsMeta for YoloxTiny { + fn weights(&self) -> Weights { + Weights { + url: "https://github.com/Megvii-BaseDetection/YOLOX/releases/download/0.1.1rc0/yolox_tiny.pth", + num_classes: 80, + } + } +} + +/// YOLOX-S pre-trained weights. +pub enum YoloxS { + /// These weights were released after the original paper implementation with slightly better results. + /// mAP (test2017): 40.5 + Coco, +} +impl WeightsMeta for YoloxS { + fn weights(&self) -> Weights { + Weights { + url: "https://github.com/Megvii-BaseDetection/YOLOX/releases/download/0.1.1rc0/yolox_s.pth", + num_classes: 80, + } + } +} + +/// YOLOX-M pre-trained weights. +pub enum YoloxM { + /// These weights were released after the original paper implementation with slightly better results. + /// mAP (test2017): 47.2 + Coco, +} +impl WeightsMeta for YoloxM { + fn weights(&self) -> Weights { + Weights { + url: "https://github.com/Megvii-BaseDetection/YOLOX/releases/download/0.1.1rc0/yolox_m.pth", + num_classes: 80, + } + } +} + +/// YOLOX-L pre-trained weights. +pub enum YoloxL { + /// These weights were released after the original paper implementation with slightly better results. + /// mAP (test2017): 50.1 + Coco, +} +impl WeightsMeta for YoloxL { + fn weights(&self) -> Weights { + Weights { + url: "https://github.com/Megvii-BaseDetection/YOLOX/releases/download/0.1.1rc0/yolox_l.pth", + num_classes: 80, + } + } +} + +/// YOLOX-X pre-trained weights. +pub enum YoloxX { + /// These weights were released after the original paper implementation with slightly better results. + /// mAP (test2017): 51.5 + Coco, +} +impl WeightsMeta for YoloxX { + fn weights(&self) -> Weights { + Weights { + url: "https://github.com/Megvii-BaseDetection/YOLOX/releases/download/0.1.1rc0/yolox_x.pth", + num_classes: 80, + } + } +} diff --git a/yolox-burn/src/model/yolox.rs b/yolox-burn/src/model/yolox.rs new file mode 100644 index 0000000..8c551a7 --- /dev/null +++ b/yolox-burn/src/model/yolox.rs @@ -0,0 +1,329 @@ +use burn::{ + module::{ConstantRecord, Module}, + tensor::{backend::Backend, Device, Tensor}, +}; + +use crate::model::bottleneck::SPP_POOLING; + +use super::{ + head::{Head, HeadConfig}, + pafpn::{Pafpn, PafpnConfig}, +}; + +#[cfg(feature = "pretrained")] +use { + super::weights::{self, WeightsMeta}, + burn::record::{FullPrecisionSettings, Recorder, RecorderError}, + burn_import::pytorch::{LoadArgs, PyTorchFileRecorder}, +}; + +/// [YOLOX](https://paperswithcode.com/method/yolox) object detection architecture. +#[derive(Module, Debug)] +pub struct Yolox { + backbone: Pafpn, + head: Head, +} + +impl Yolox { + pub fn forward(&self, x: Tensor) -> Tensor { + let features = self.backbone.forward(x); + self.head.forward(features) + } + + /// YOLOX-Nano from [`YOLOX: Exceeding YOLO Series in 2021`](https://arxiv.org/abs/2107.08430). + /// + /// # Arguments + /// + /// * `num_classes`: Number of output classes of the model. + /// * `device` - Device to create the module on. + /// + /// # Returns + /// + /// A YOLOX-Nano module. + pub fn yolox_nano(num_classes: usize, device: &Device) -> Self { + YoloxConfig::new(0.33, 0.25, num_classes, true).init(device) + } + + /// YOLOX-Nano from [`YOLOX: Exceeding YOLO Series in 2021`](https://arxiv.org/abs/2107.08430) + /// with pre-trained weights. + /// + /// # Arguments + /// + /// * `num_classes`: Number of output classes of the model. + /// * `device` - Device to create the module on. + /// + /// # Returns + /// + /// A YOLOX-Nano module with pre-trained weights. + #[cfg(feature = "pretrained")] + pub fn yolox_nano_pretrained( + weights: weights::YoloxNano, + device: &Device, + ) -> Result { + let weights = weights.weights(); + let record = Self::load_weights_record(&weights, device)?; + + let model = Self::yolox_nano(weights.num_classes, device).load_record(record); + + Ok(model) + } + + /// YOLOX-Tiny from [`YOLOX: Exceeding YOLO Series in 2021`](https://arxiv.org/abs/2107.08430). + /// + /// # Arguments + /// + /// * `num_classes`: Number of output classes of the model. + /// * `device` - Device to create the module on. + /// + /// # Returns + /// + /// A YOLOX-Tiny module. + pub fn yolox_tiny(num_classes: usize, device: &Device) -> Self { + YoloxConfig::new(0.33, 0.375, num_classes, false).init(device) + } + + /// YOLOX-Tiny from [`YOLOX: Exceeding YOLO Series in 2021`](https://arxiv.org/abs/2107.08430) + /// with pre-trained weights. + /// + /// # Arguments + /// + /// * `num_classes`: Number of output classes of the model. + /// * `device` - Device to create the module on. + /// + /// # Returns + /// + /// A YOLOX-Tiny module with pre-trained weights. + #[cfg(feature = "pretrained")] + pub fn yolox_tiny_pretrained( + weights: weights::YoloxTiny, + device: &Device, + ) -> Result { + let weights = weights.weights(); + let record = Self::load_weights_record(&weights, device)?; + + let model = Self::yolox_tiny(weights.num_classes, device).load_record(record); + + Ok(model) + } + + /// YOLOX-S from [`YOLOX: Exceeding YOLO Series in 2021`](https://arxiv.org/abs/2107.08430). + /// + /// # Arguments + /// + /// * `num_classes`: Number of output classes of the model. + /// * `device` - Device to create the module on. + /// + /// # Returns + /// + /// A YOLOX-S module. + pub fn yolox_s(num_classes: usize, device: &Device) -> Self { + YoloxConfig::new(0.33, 0.50, num_classes, false).init(device) + } + + /// YOLOX-S from [`YOLOX: Exceeding YOLO Series in 2021`](https://arxiv.org/abs/2107.08430) + /// with pre-trained weights. + /// + /// # Arguments + /// + /// * `num_classes`: Number of output classes of the model. + /// * `device` - Device to create the module on. + /// + /// # Returns + /// + /// A YOLOX-S module with pre-trained weights. + #[cfg(feature = "pretrained")] + pub fn yolox_s_pretrained( + weights: weights::YoloxS, + device: &Device, + ) -> Result { + let weights = weights.weights(); + let record = Self::load_weights_record(&weights, device)?; + + let model = Self::yolox_s(weights.num_classes, device).load_record(record); + + Ok(model) + } + + /// YOLOX-M from [`YOLOX: Exceeding YOLO Series in 2021`](https://arxiv.org/abs/2107.08430). + /// + /// # Arguments + /// + /// * `num_classes`: Number of output classes of the model. + /// * `device` - Device to create the module on. + /// + /// # Returns + /// + /// A YOLOX-M module. + pub fn yolox_m(num_classes: usize, device: &Device) -> Self { + YoloxConfig::new(0.67, 0.75, num_classes, false).init(device) + } + + /// YOLOX-M from [`YOLOX: Exceeding YOLO Series in 2021`](https://arxiv.org/abs/2107.08430) + /// with pre-trained weights. + /// + /// # Arguments + /// + /// * `num_classes`: Number of output classes of the model. + /// * `device` - Device to create the module on. + /// + /// # Returns + /// + /// A YOLOX-M module with pre-trained weights. + #[cfg(feature = "pretrained")] + pub fn yolox_m_pretrained( + weights: weights::YoloxM, + device: &Device, + ) -> Result { + let weights = weights.weights(); + let record = Self::load_weights_record(&weights, device)?; + + let model = Self::yolox_m(weights.num_classes, device).load_record(record); + + Ok(model) + } + + /// YOLOX-L from [`YOLOX: Exceeding YOLO Series in 2021`](https://arxiv.org/abs/2107.08430). + /// + /// # Arguments + /// + /// * `num_classes`: Number of output classes of the model. + /// * `device` - Device to create the module on. + /// + /// # Returns + /// + /// A YOLOX-L module. + pub fn yolox_l(num_classes: usize, device: &Device) -> Self { + YoloxConfig::new(1., 1., num_classes, false).init(device) + } + + /// YOLOX-L from [`YOLOX: Exceeding YOLO Series in 2021`](https://arxiv.org/abs/2107.08430) + /// with pre-trained weights. + /// + /// # Arguments + /// + /// * `num_classes`: Number of output classes of the model. + /// * `device` - Device to create the module on. + /// + /// # Returns + /// + /// A YOLOX-L module with pre-trained weights. + #[cfg(feature = "pretrained")] + pub fn yolox_l_pretrained( + weights: weights::YoloxL, + device: &Device, + ) -> Result { + let weights = weights.weights(); + let record = Self::load_weights_record(&weights, device)?; + + let model = Self::yolox_l(weights.num_classes, device).load_record(record); + + Ok(model) + } + + /// YOLOX-X from [`YOLOX: Exceeding YOLO Series in 2021`](https://arxiv.org/abs/2107.08430). + /// + /// # Arguments + /// + /// * `num_classes`: Number of output classes of the model. + /// * `device` - Device to create the module on. + /// + /// # Returns + /// + /// A YOLOX-X module. + pub fn yolox_x(num_classes: usize, device: &Device) -> Self { + YoloxConfig::new(1.33, 1.25, num_classes, false).init(device) + } + + /// YOLOX-X from [`YOLOX: Exceeding YOLO Series in 2021`](https://arxiv.org/abs/2107.08430) + /// with pre-trained weights. + /// + /// # Arguments + /// + /// * `num_classes`: Number of output classes of the model. + /// * `device` - Device to create the module on. + /// + /// # Returns + /// + /// A YOLOX-X module with pre-trained weights. + #[cfg(feature = "pretrained")] + pub fn yolox_x_pretrained( + weights: weights::YoloxX, + device: &Device, + ) -> Result { + let weights = weights.weights(); + let record = Self::load_weights_record(&weights, device)?; + + let model = Self::yolox_x(weights.num_classes, device).load_record(record); + + Ok(model) + } + + /// Load specified pre-trained PyTorch weights as a record. + fn load_weights_record( + weights: &weights::Weights, + device: &Device, + ) -> Result, RecorderError> { + // Download torch weights + let torch_weights = weights.download().map_err(|err| { + RecorderError::Unknown(format!("Could not download weights.\nError: {err}")) + })?; + + // Load weights from torch state_dict + let load_args = LoadArgs::new(torch_weights) + // State dict contains "model", "amp", "optimizer", "start_epoch" + .with_top_level_key("model") + // Map backbone.C3_* -> backbone.c3_* + .with_key_remap("backbone\\.C3_(.+)", "backbone.c3_$1") + // Map backbone.backbone.dark[i].0.* -> backbone.backbone.dark[i].conv.* + .with_key_remap("(backbone\\.backbone\\.dark[2-5])\\.0\\.(.+)", "$1.conv.$2") + // Map backbone.backbone.dark[i].1.* -> backbone.backbone.dark[i].c3.* + .with_key_remap("(backbone\\.backbone\\.dark[2-4])\\.1\\.(.+)", "$1.c3.$2") + // Map backbone.backbone.dark5.1.* -> backbone.backbone.dark5.spp.* + .with_key_remap("(backbone\\.backbone\\.dark5)\\.1\\.(.+)", "$1.spp.$2") + // Map backbone.backbone.dark5.2.* -> backbone.backbone.dark5.c3.* + .with_key_remap("(backbone\\.backbone\\.dark5)\\.2\\.(.+)", "$1.c3.$2") + // Map head.{cls | reg}_convs.x.[i].* -> head.{cls | reg}_convs.x.conv[i].* + .with_key_remap( + "(head\\.(cls|reg)_convs\\.[0-9]+)\\.([0-9]+)\\.(.+)", + "$1.conv$3.$4", + ); + + let mut record: YoloxRecord = + PyTorchFileRecorder::::new().load(load_args, device)?; + + if let Some(ref mut spp) = record.backbone.backbone.dark5.spp { + // Handle the initialization for Vec, which has no parameters. + // Without this, the vector would be initialized as empty and thus no MaxPool2d + // layers would be applied, which is incorrect. + if spp.m.is_empty() { + spp.m = vec![ConstantRecord; SPP_POOLING.len()]; + } + } + + Ok(record) + } +} + +/// [YOLOX detector](Yolox) configuration. +pub struct YoloxConfig { + backbone: PafpnConfig, + head: HeadConfig, +} + +impl YoloxConfig { + /// Create a new instance of the YOLOX detector [config](YoloxConfig). + pub fn new(depth: f64, width: f64, num_classes: usize, depthwise: bool) -> Self { + let backbone = PafpnConfig::new(depth, width, depthwise); + let head = HeadConfig::new(num_classes, width, depthwise); + + Self { backbone, head } + } + + /// Initialize a new [YOLOX detector](Yolox) module. + pub fn init(&self, device: &Device) -> Yolox { + Yolox { + backbone: self.backbone.init(device), + head: self.head.init(device), + } + } +}