From 26901e4aa0a51a1863a796999e0476a4b72e5bd5 Mon Sep 17 00:00:00 2001 From: Chang Liu <9713593+chang-l@users.noreply.github.com> Date: Fri, 13 Feb 2026 14:11:11 -0800 Subject: [PATCH] [TRTLLM-10612][feat] Initial support of AIGV models in TRTLLM (#11462) Signed-off-by: Chang Liu (Enterprise Products) Signed-off-by: Chang Liu <9713593+chang-l@users.noreply.github.com> Signed-off-by: Zhenhua Wang Co-authored-by: Freddy Qi Co-authored-by: Claude Opus 4.6 Co-authored-by: Zhenhua Wang --- README.md | 3 - examples/visual_gen/README.md | 172 + examples/visual_gen/cat_piano.png | Bin 0 -> 455692 bytes examples/visual_gen/hf_examples.sh | 128 + examples/visual_gen/hf_wan.py | 141 + examples/visual_gen/output_handler.py | 237 ++ examples/visual_gen/serve/README.md | 322 ++ examples/visual_gen/serve/async_video_gen.py | 238 ++ examples/visual_gen/serve/configs/wan.yml | 8 + examples/visual_gen/serve/delete_video.py | 151 + .../media/woman_skyline_original_720p.jpeg | Bin 0 -> 178461 bytes examples/visual_gen/serve/sync_image_gen.py | 91 + examples/visual_gen/serve/sync_video_gen.py | 224 ++ examples/visual_gen/visual_gen_examples.sh | 238 ++ examples/visual_gen/visual_gen_wan_i2v.py | 226 ++ examples/visual_gen/visual_gen_wan_t2v.py | 228 ++ requirements.txt | 1 + tensorrt_llm/_torch/distributed/__init__.py | 5 +- tensorrt_llm/_torch/distributed/ops.py | 123 + tensorrt_llm/_torch/modules/linear.py | 34 + tensorrt_llm/_torch/visual_gen/__init__.py | 45 + .../visual_gen/attention_backend/__init__.py | 37 + .../visual_gen/attention_backend/interface.py | 33 + .../visual_gen/attention_backend/parallel.py | 162 + .../visual_gen/attention_backend/trtllm.py | 244 ++ .../visual_gen/attention_backend/utils.py | 118 + .../visual_gen/attention_backend/vanilla.py | 126 + .../_torch/visual_gen/checkpoints/__init__.py | 7 + .../visual_gen/checkpoints/weight_loader.py | 152 + tensorrt_llm/_torch/visual_gen/config.py | 565 +++ tensorrt_llm/_torch/visual_gen/executor.py | 246 ++ .../_torch/visual_gen/models/__init__.py | 30 + .../_torch/visual_gen/models/wan/__init__.py | 5 + .../visual_gen/models/wan/pipeline_wan.py | 521 +++ .../visual_gen/models/wan/pipeline_wan_i2v.py | 736 ++++ .../visual_gen/models/wan/transformer_wan.py | 756 ++++ .../_torch/visual_gen/modules/__init__.py | 26 + .../_torch/visual_gen/modules/attention.py | 284 ++ tensorrt_llm/_torch/visual_gen/output.py | 29 + tensorrt_llm/_torch/visual_gen/parallelism.py | 100 + tensorrt_llm/_torch/visual_gen/pipeline.py | 544 +++ .../_torch/visual_gen/pipeline_loader.py | 228 ++ .../_torch/visual_gen/pipeline_registry.py | 94 + .../visual_gen/quantization/__init__.py | 15 + .../_torch/visual_gen/quantization/loader.py | 197 ++ .../_torch/visual_gen/quantization/ops.py | 98 + tensorrt_llm/_torch/visual_gen/teacache.py | 409 +++ tensorrt_llm/_torch/visual_gen/utils.py | 39 + tensorrt_llm/commands/serve.py | 240 +- tensorrt_llm/commands/utils.py | 132 + tensorrt_llm/executor/ipc.py | 28 +- tensorrt_llm/inputs/data.py | 79 +- tensorrt_llm/llmapi/__init__.py | 3 + tensorrt_llm/llmapi/disagg_utils.py | 1 + tensorrt_llm/llmapi/utils.py | 22 +- tensorrt_llm/llmapi/visual_gen.py | 544 +++ tensorrt_llm/ray_stub.py | 13 +- tensorrt_llm/serve/media_storage.py | 426 +++ tensorrt_llm/serve/openai_protocol.py | 215 ++ tensorrt_llm/serve/openai_server.py | 757 +++- tensorrt_llm/serve/visual_gen_utils.py | 112 + .../defs/examples/test_visual_gen.py | 288 ++ .../test_lists/test-db/l0_b200.yml | 12 + .../test_lists/test-db/l0_dgx_b200.yml | 6 + .../_torch/visual_gen/multi_gpu/__init__.py | 1 + .../multi_gpu/test_ulysses_attention.py | 505 +++ .../visual_gen/test_attention_integration.py | 540 +++ .../_torch/visual_gen/test_attention_perf.py | 622 ++++ .../_torch/visual_gen/test_fused_qkv.py | 126 + .../_torch/visual_gen/test_model_loader.py | 494 +++ .../_torch/visual_gen/test_quant_ops.py | 120 + .../visual_gen/test_trtllm_serve_e2e.py | 398 +++ .../visual_gen/test_trtllm_serve_endpoints.py | 876 +++++ tests/unittest/_torch/visual_gen/test_wan.py | 3094 +++++++++++++++++ .../_torch/visual_gen/test_wan_i2v.py | 1491 ++++++++ 75 files changed, 19366 insertions(+), 195 deletions(-) create mode 100644 examples/visual_gen/README.md create mode 100644 examples/visual_gen/cat_piano.png create mode 100755 examples/visual_gen/hf_examples.sh create mode 100755 examples/visual_gen/hf_wan.py create mode 100644 examples/visual_gen/output_handler.py create mode 100644 examples/visual_gen/serve/README.md create mode 100755 examples/visual_gen/serve/async_video_gen.py create mode 100644 examples/visual_gen/serve/configs/wan.yml create mode 100755 examples/visual_gen/serve/delete_video.py create mode 100644 examples/visual_gen/serve/media/woman_skyline_original_720p.jpeg create mode 100755 examples/visual_gen/serve/sync_image_gen.py create mode 100755 examples/visual_gen/serve/sync_video_gen.py create mode 100755 examples/visual_gen/visual_gen_examples.sh create mode 100644 examples/visual_gen/visual_gen_wan_i2v.py create mode 100755 examples/visual_gen/visual_gen_wan_t2v.py create mode 100644 tensorrt_llm/_torch/visual_gen/__init__.py create mode 100644 tensorrt_llm/_torch/visual_gen/attention_backend/__init__.py create mode 100644 tensorrt_llm/_torch/visual_gen/attention_backend/interface.py create mode 100644 tensorrt_llm/_torch/visual_gen/attention_backend/parallel.py create mode 100644 tensorrt_llm/_torch/visual_gen/attention_backend/trtllm.py create mode 100644 tensorrt_llm/_torch/visual_gen/attention_backend/utils.py create mode 100644 tensorrt_llm/_torch/visual_gen/attention_backend/vanilla.py create mode 100644 tensorrt_llm/_torch/visual_gen/checkpoints/__init__.py create mode 100644 tensorrt_llm/_torch/visual_gen/checkpoints/weight_loader.py create mode 100644 tensorrt_llm/_torch/visual_gen/config.py create mode 100644 tensorrt_llm/_torch/visual_gen/executor.py create mode 100644 tensorrt_llm/_torch/visual_gen/models/__init__.py create mode 100644 tensorrt_llm/_torch/visual_gen/models/wan/__init__.py create mode 100644 tensorrt_llm/_torch/visual_gen/models/wan/pipeline_wan.py create mode 100644 tensorrt_llm/_torch/visual_gen/models/wan/pipeline_wan_i2v.py create mode 100644 tensorrt_llm/_torch/visual_gen/models/wan/transformer_wan.py create mode 100644 tensorrt_llm/_torch/visual_gen/modules/__init__.py create mode 100644 tensorrt_llm/_torch/visual_gen/modules/attention.py create mode 100644 tensorrt_llm/_torch/visual_gen/output.py create mode 100644 tensorrt_llm/_torch/visual_gen/parallelism.py create mode 100644 tensorrt_llm/_torch/visual_gen/pipeline.py create mode 100644 tensorrt_llm/_torch/visual_gen/pipeline_loader.py create mode 100644 tensorrt_llm/_torch/visual_gen/pipeline_registry.py create mode 100644 tensorrt_llm/_torch/visual_gen/quantization/__init__.py create mode 100644 tensorrt_llm/_torch/visual_gen/quantization/loader.py create mode 100644 tensorrt_llm/_torch/visual_gen/quantization/ops.py create mode 100644 tensorrt_llm/_torch/visual_gen/teacache.py create mode 100644 tensorrt_llm/_torch/visual_gen/utils.py create mode 100644 tensorrt_llm/commands/utils.py create mode 100644 tensorrt_llm/llmapi/visual_gen.py create mode 100644 tensorrt_llm/serve/media_storage.py create mode 100644 tensorrt_llm/serve/visual_gen_utils.py create mode 100644 tests/integration/defs/examples/test_visual_gen.py create mode 100644 tests/unittest/_torch/visual_gen/multi_gpu/__init__.py create mode 100644 tests/unittest/_torch/visual_gen/multi_gpu/test_ulysses_attention.py create mode 100644 tests/unittest/_torch/visual_gen/test_attention_integration.py create mode 100644 tests/unittest/_torch/visual_gen/test_attention_perf.py create mode 100644 tests/unittest/_torch/visual_gen/test_fused_qkv.py create mode 100644 tests/unittest/_torch/visual_gen/test_model_loader.py create mode 100644 tests/unittest/_torch/visual_gen/test_quant_ops.py create mode 100644 tests/unittest/_torch/visual_gen/test_trtllm_serve_e2e.py create mode 100644 tests/unittest/_torch/visual_gen/test_trtllm_serve_endpoints.py create mode 100644 tests/unittest/_torch/visual_gen/test_wan.py create mode 100644 tests/unittest/_torch/visual_gen/test_wan_i2v.py diff --git a/README.md b/README.md index 31ecc45440..25533f6552 100644 --- a/README.md +++ b/README.md @@ -5,9 +5,6 @@ TensorRT LLM

TensorRT LLM provides users with an easy-to-use Python API to define Large Language Models (LLMs) and supports state-of-the-art optimizations to perform inference efficiently on NVIDIA GPUs.

-🌟 TensorRT LLM is experimenting with Image&Video Generation models in [TensorRT-LLM/feat/visual_gen](https://github.com/NVIDIA/TensorRT-LLM/tree/feat/visual_gen/tensorrt_llm/visual_gen) branch. -This branch is a prototype and not stable for production use. PRs are not accepted. - [![Documentation](https://img.shields.io/badge/docs-latest-brightgreen.svg?style=flat)](https://nvidia.github.io/TensorRT-LLM/) [![Ask DeepWiki](https://deepwiki.com/badge.svg)](https://deepwiki.com/NVIDIA/TensorRT-LLM) [![python](https://img.shields.io/badge/python-3.12-green)](https://www.python.org/downloads/release/python-3123/) diff --git a/examples/visual_gen/README.md b/examples/visual_gen/README.md new file mode 100644 index 0000000000..4dfd5e07e0 --- /dev/null +++ b/examples/visual_gen/README.md @@ -0,0 +1,172 @@ +# Visual Generation Examples + +Quick reference for running visual generation models (WAN). + +## Prerequisites + +```bash +# Install dependencies (from repository root) +pip install -r requirements-dev.txt +pip install git+https://github.com/huggingface/diffusers.git +pip install av +``` + +## Quick Start + +```bash +# Set MODEL_ROOT to your model directory (required for examples) +export MODEL_ROOT=/llm-models +# Optional: PROJECT_ROOT defaults to repo root when run from examples/visual_gen + +# Run all examples (auto-detects GPUs) +cd examples/visual_gen +./visual_gen_examples.sh +``` + + +## Environment Variables + +| Variable | Default | Description | +|----------|---------|-------------| +| `PROJECT_ROOT` | Auto-detected | Path to repository root (set when running from `examples/visual_gen`) | +| `MODEL_ROOT` | `/llm-models` | Path to model directory | +| `TLLM_LOG_LEVEL` | `INFO` | Logging level | + +--- + +## WAN (Text-to-Video) + +### Basic Usage + +**Single GPU:** +```bash +python visual_gen_wan_t2v.py \ + --model_path ${MODEL_ROOT}/Wan2.1-T2V-1.3B-Diffusers \ + --prompt "A cute cat playing piano" \ + --height 480 --width 832 --num_frames 33 \ + --output_path output.mp4 +``` + +**With TeaCache:** +```bash +python visual_gen_wan_t2v.py \ + --model_path ${MODEL_ROOT}/Wan2.1-T2V-1.3B-Diffusers \ + --prompt "A cute cat playing piano" \ + --height 480 --width 832 --num_frames 33 \ + --enable_teacache \ + --output_path output.mp4 +``` + +### Multi-GPU Parallelism + +WAN supports two parallelism modes that can be combined: +- **CFG Parallelism**: Split positive/negative prompts across GPUs +- **Ulysses Parallelism**: Split sequence across GPUs for longer sequences + + +**Ulysses Only (2 GPUs):** +```bash +python visual_gen_wan_t2v.py \ + --model_path ${MODEL_ROOT}/Wan2.1-T2V-1.3B-Diffusers \ + --prompt "A cute cat playing piano" \ + --height 480 --width 832 --num_frames 33 \ + --attention_backend TRTLLM \ + --cfg_size 1 --ulysses_size 2 \ + --output_path output.mp4 +``` +GPU Layout: GPU 0-1 share sequence (6 heads each) + +**CFG Only (2 GPUs):** +```bash +python visual_gen_wan_t2v.py \ + --model_path ${MODEL_ROOT}/Wan2.1-T2V-1.3B-Diffusers \ + --prompt "A cute cat playing piano" \ + --height 480 --width 832 --num_frames 33 \ + --attention_backend TRTLLM \ + --cfg_size 2 --ulysses_size 1 \ + --output_path output.mp4 +``` +GPU Layout: GPU 0 (positive) | GPU 1 (negative) + +**CFG + Ulysses (4 GPUs):** +```bash +python visual_gen_wan_t2v.py \ + --model_path ${MODEL_ROOT}/Wan2.1-T2V-1.3B-Diffusers \ + --prompt "A cute cat playing piano" \ + --height 480 --width 832 --num_frames 33 \ + --attention_backend TRTLLM \ + --cfg_size 2 --ulysses_size 2 \ + --output_path output.mp4 +``` +GPU Layout: GPU 0-1 (positive, Ulysses) | GPU 2-3 (negative, Ulysses) + +**Large-Scale (8 GPUs):** +```bash +python visual_gen_wan_t2v.py \ + --model_path ${MODEL_ROOT}/Wan2.1-T2V-1.3B-Diffusers \ + --prompt "A cute cat playing piano" \ + --height 480 --width 832 --num_frames 33 \ + --attention_backend TRTLLM \ + --cfg_size 2 --ulysses_size 4 \ + --output_path output.mp4 +``` +GPU Layout: GPU 0-3 (positive) | GPU 4-7 (negative) + +--- + +## Common Arguments + +| Argument | WAN | Default | Description | +|----------|-----|---------|-------------| +| `--height` | āœ“ | 720 | Output height | +| `--width` | āœ“ | 1280 | Output width | +| `--num_frames` | āœ“ | 81 | Number of frames | +| `--steps` | āœ“ | 50 | Denoising steps | +| `--guidance_scale` | āœ“ | 5.0 | CFG guidance strength | +| `--seed` | āœ“ | 42 | Random seed | +| `--enable_teacache` | āœ“ | False | Cache optimization | +| `--teacache_thresh` | āœ“ | 0.2 | TeaCache similarity threshold | +| `--attention_backend` | āœ“ | VANILLA | VANILLA or TRTLLM | +| `--cfg_size` | āœ“ | 1 | CFG parallelism | +| `--ulysses_size` | āœ“ | 1 | Sequence parallelism | +| `--linear_type` | āœ“ | default | Quantization type | + +## Troubleshooting + +**Out of Memory:** +- Use quantization: `--linear_type trtllm-fp8-blockwise` +- Reduce resolution or frames +- Enable TeaCache: `--enable_teacache` +- Use Ulysses parallelism with more GPUs + +**Slow Inference:** +- Enable TeaCache: `--enable_teacache` +- Use TRTLLM backend: `--attention_backend TRTLLM` +- Use multi-GPU: `--cfg_size 2` or `--ulysses_size 2` + +**Import Errors:** +- Run from repository root +- Install necessary dependencies, e.g., `pip install -r requirements-dev.txt` + +**Ulysses Errors:** +- `ulysses_size` must divide 12 (WAN heads) +- Total GPUs = `cfg_size Ɨ ulysses_size` +- Sequence length must be divisible by `ulysses_size` + +## Output Formats + +- **WAN**: `.mp4` (video), `.gif` (animated), `.png` (single frame) + +## Baseline Validation + +Compare with official HuggingFace Diffusers implementation: + +```bash +# Run HuggingFace baselines +./hf_examples.sh + +# Or run individual models +python hf_wan.py --model_path ${MODEL_ROOT}/Wan2.1-T2V-1.3B-Diffusers +``` + +Compare outputs with same seed for correctness verification. diff --git a/examples/visual_gen/cat_piano.png b/examples/visual_gen/cat_piano.png new file mode 100644 index 0000000000000000000000000000000000000000..3b60bf25be154e6788bed0302c38ec7677ad0169 GIT binary patch literal 455692 zcmV(vK~;00961Nkl5?SN(Hw}Xd1Pi)^>Q`<6p|mw8JT&R`$G4{=6&W4a5B&)s zk)dxRkcr?s@^tm-;s48q=*f{wE293#-_^9JzcV7zA13_znFt^=XVwGR$(&iDNHrxQ z;`AmALMKxax74$u}bEh1C*CFm%^9@{WJdVk~d|=Zagzo8Md%%YXi6 z)rOmtiOfVoe^W{ceeTX=vZ;xPenlXccOjxBQ=*K(;GY9wiFF60mDUYAp87j!G-_&p zbyfyexdO=sw%k!Ba}uPUoO+eaO^wV|ZYN3SBL9E-6Q{ra;b(o)(wmdPE&bF(gBBJz zKRVCtvENI zBMWNOrS%K;3!v`Rfb=sXBI|>?@DP~%g8=#`{Pki}r)LW8f=<)AC0d?dzhFtr zzu%L_YFlhKcrhBw>KkJ{w>+t*!qIb=zI6aKIluj|L&X|L(oKk^HA~+YGZ2O^Qf_m% zm8XP~E#Xt7>2Cg0bs53*T=i!aRT2f#sHjK#EoVFaMxwI$b^>)-p(e}Z&MG17N>Ub- z@ATtO?~A|*?iR5{DDF=H3VN7H7*t1B87zL3tD?HY{|uD=*MVu33uD281`^>v#4HDq zuthCe`sVsjtXQMNx8#DZg>!=!;DM!^XgbiX#fWG@q6PgyCZ}ZG6gbo z7#0T`#@AZ$flLgX8?Zb!^~jU#02*RFIm7o_P!kAXTjD`~oy+gBNTsjI1P~`<6IcKN z1pCV;g)cCs<+-sE@;^W);+rRYYu$NzT0gX1M6Zr>XJjXWyV9K9NqDbEwWH~M5f-|7 zn{AUrcPzG0sJ7s9+HV(W^oRezn}n6Nr7Uw^@E+xHw8M@+$>#8`7D1Fb>IuS>&Qtyw zdUaFvl8oXX{|nf6Q1REP#&r87=Cri>WxhRmFB(g%1-7CbJ~M1jbfsJas5CaZ1FIB} zB|BRJf~gdu0BOA*V*Rb&x_{VVWW%HrY3)RYo0ec|1VtQM&q)t0v3fpREia3JZ3yGF`ZAbp5K`r@QK#VVy4js+d+}6^$Z)FuHU&pudWHRT3 zu*pGmtJhCjoTq=nAJ;pwe4^=1SzW9~GN?Y?ql>>Kl*`{KO0t-5UmQY?TzFx*_8Py* z+otdch`{_tALg&6(E3*(C;Ujn^3ZQ&pWoOL&BZRbidw%cJaS6j46x%PtYopb5eAfYTjn&fJU*fKAH zQLH8kX2tVtAxjD=35g^X3KKHY36M!?!$%9fqgg?z6x4xm(dC*?g)m=NL|>@au~l*+ zxhx{95M~?I<7LE)bE`T347SX6WEgx+#f+tF7)nal8-+{VegL_cp3S@4;dZv;*5%C3 z1byGWN6%m0sbJVJdTybRhbe*5{k|raM=b#|t3sBEIg?+Cy3Gk7V(|LOL?MQ3v0fKr z2y57`rcDe2v5Jk}?Fv0}1t&f_PMj6Gz^6t9VDLjL3R5cI?j94+sd1tD~K#}Ij&KU~q zO_VX^NxLck&NxY(G+fMN5lnugxPPWDJ;RV<*q7pCReX<7EH|$&YawPRtv@5u3E>ps zstvPaUq@WpXn)Qdvx=LiO2Mj0I*yFUPOUhRayrKqfmms4abr5{T&hrc>V|cV90FyR zl~;Fg0kZpMsW*NsB;cQFtG%YhU(YG=&edNAfJn8X!mu({eA5Zl+9>lO*NPT$A%(3a z-3mSEpZI#NzeZ!-lFeZ1>9gT+9SNawNDSyM=l-+VJ!C|jhq2~V&Px06qRUiGEx)}^ z@3J+ap5FEWnz1__0&u#A%(1>*y^4J=uowfgAgq!~#SJK^nYByoUHi7(qyk9*@wEfvk4_)bgnIvXx6_Lm1JBID*fpVF^n*9 zF#h%@zWIe-C)5L4ZY7}EfsHpBptGT?nl!4pk#izqi~w+ucx52B5t+c8tgnPZh=ZyU z2s%sw&2WHvk|>ax!iMfD%w`z;9y!p z98;)h$rt)9f{hWANpsQyLuetcE7-!StVjY#`<+6pYP=a?wjuvjxKy8+AkK88;N2De zn+N2=z`Cf2jKplaatt%Gni)B#EK04BtQIezm=iN0V<5cN%UlLnZMd#%lU~)jBz1nH z;^69e;fG2x3Duf15Hg7~eo{J8PWH!emn7$zm8HxPBSw!ACrUW6B9C>b+?svNiiX+v zmYsd)1V&)a6bVM~Es=A^P*fbTRUI3cNo(&STqtk`3hE=`I2s;Jp!j~13K!tzH);Rq zo)!3!Xq8|QITrrZ&~M)ig$un&C$4gy% z$pZxE>Zn3Qs1&Z0bk)x3ZGH|kX61YWonlZ_=r%Y57K$iEEd`{Sdcuuve7%>vfUC=77*%$07E4ty=AHkvK$#3+YZpDQXE)Xcc`W| z@*AU}pq6TyEC!wW$ZGS{^u^m98`Fui0S@+8wn_p1gdTyGi=qx4IIB9)z9WxhgmW1c z0zWEKSZBa&*y#L*dS%j(G&aRl0Gd#>h=@1NY%a>MCE>3a<@*}Uvwb(tUbFlNFr~QG z`ae_O8ElugqS?4}PbORPz7X+mjkqjQ?PwJP&|uMG2}`5-nutIQsA|@5aDseGz5dn2 zqb#5Ki(RvErnrc+ii{Z9CV>F_?#3%%O0Yr94!cCTwSZiqN3WBQs0Z27g$7!2Q4a{|H^a11JH%mQj&KdN=fi3T#A5{n#odCt@bk8DxlYy_>M+gmd6 zb39zLhO#CEv3Mf$EMT}qM4(I<9oobry7?edrobcgkxV}*j&P9I|Apgz5eXKSAQD4Y z0A$b((|TZyTqMS#?dpUvw)!^eX>F%87&3Xels&MyI@JCKKGm&>T#>VDCki2KWu?oR zF%7YcWBD=7&nDj?_)ER<#3MyHL#RtR#DZBVaknw&+D+4jHDE1snWo-mu3N&LrkJ*S zOIr}J#7`LoD;uz6t@8)^Z>ZqS>56``PdZM9#|M_+F-7GM=2> zP3maUpR}q*oyy3}nUHOi!E2V2EpM&1E%Ile6{<3^r+9k`b9GieqtQg&6^Jp$h*1@_ zh=>~8L&(Mm_7O402#v9tX$p}F;|9GFLpdbPu#nfNI*4_l5G_Y$hegWd;{zj9VPy`| zS1V#B{Pt5+r8aFmysaK}{)uz!V3Exs6T+3$5V;?iojbLH5*S1rdIw~#pramG|A z^#4(POwg7p3O5F4)^d#Q0%3;V1{k!=Pf)TdprFLkwRxJM(Z9?yNpKk=30avX6|)ov z=~7BPan2K87^4YNYn)LyE@7({M5#3j1%<|$HK8Zy6gA^Vvkic#*~GC_HJ5Zy{d5%J z_--0ZQ{UI;s$Q@U%z&r*unNr*485G`O(A%MD4TtS^dNK%MhMB&>V7NF_o{B~F?X(JT{r{~CO zX#Jqly34fhRA6YbSM0oJaZSIIGR&N=xDa{KX}D-`A|3Oci>vShvkwgytR~STjsUO} zt2MP@a{k>}EtDiV@ldHhRO}f;`jPdloHaDMEtO?I9ylh8+n6gDcL>@Q6x#G)@^cqk+@L)@1vY#1lQOVU32)6x35*Gm(QVlGvq*8f;A&*|rQu zm1`id6+47Rlk|jDQz1?vGs(b6NWqb~TGK`4~cdkER|p$y0maI{L5SSSw7 zUPz8uWp}X1;=25;LtYyG85)vV!#p(lpP4m>fb%>?4ToD@0w(?FVdPd3(CnF`fI#mM zf-g<4c@^MXfR0P)D!-6GU@9Y6X1%P}6i_WjQ>a(wn6&m*rTj*rJ5~3$&RA$6f>fRp zu1%Ju*AysMtWx86{1OOE$6%c8Br7{gQN0}Se2%3$-MV2+ox=UhsktCE(PyfHtm#no zT-kF7c8|=;t0!_R1*ui)qq`MAsE_N$vCf(=pLc9?8C&7(@-bGlj9BaJbr7i3Ct zp5d1>Yi{on-vDeeGB7n)4}hjcg#_ps8Ib^o%W8sGiMxaweETq48iB-|v&SU`7-@G- z#0aJCk(k3&AV#GeoJBWAq88Gz$MPFw%x4OxRtJ^8DLlP1WjFPRw{u8a8&ru zJTkVLEy@0hOz@UmpDJ)<#P!2FO_A}I-biIgnJQiAK$p41V9Tn%26}>UcMQy-rHXYr zk*>ca8hPt*oG^{VArh2OOh>Ni0 z0y$@sa1Fy;x`#pCa>{`uBbA(H!i-}2mLe=3Jc%N zJwnWEETNI^fYc5pGK+9LWH8Kjlos#8H3MUOI|k~Uenkq4B>Qa$Ircxon#9TwC|?nN zpF@ILY#!7`;p?0_BaoP06I_E9ve^ zTb>?E0Zf-ZNf0})k!3PzVdqFjm9J(hd0h?l^nL?By`27h z5<-n8iM;`4y0GCcy>aXSkV8BkwaUblL<>+OFgRR9l&$#%f>-kCglL>-L9B5~sVO4B zUGy-(ARaOdRtliR9?@VCvMfP5t+N`-f5%u{!g>c(gP9!q7W

bY0s;0#yVy7}(gd zpI+5?iB}fR@8}^JOpzjE2|=uM%2n+hQKRk}pJsG{5^(BQVAt)ZY}7e90lG~xS-r_J zl_xkxMS~`^j76{&G(H(Gz|ozrZ`4obfXuBq1pzP@JTw`0ua8Ag6Iegc!wIcWHk14SN&~A^totM zmSkx-Eal*)Cky*t03jRjXy-Lg1zkzDbkzpOt)8NkSmsMD?GC?#w`kN!P{#OlA~E6K z8C^WLCK+Z@UYii2aE1*ax}o-HiKqh_Q93y)ji}H%dIBPeUa$_NIeFx0Dai9L;ONBQ z%RAVS2k0vL#InA@I*}k9H!b**R^z-7A}gf zn4+;UWh=7X2;mz7^_Mc$)5;R<$E40eMcICvv>wQum}?i3znBYW&2=9LlzSQ;XR)kg zc(c#|j-M1bR<~Am0`n9MQ}Rews8XO7~5rRV~lMZ zV~a6j;}@~Kq_|yuT0Esvj>XU0PqG@o_%b#G6B_`VrnZUP#_&%36n{yH0duUZBbb+l zX{n{5BIp#hCGyDHS+Dd5#F$#)TNGC$o-76J(Ey_6om^03mCkcSJ4AKJc9%(F3LmSj z^Yfx#9k-0A>GucATH;o+mkv3(gcX&t>62tLc0E&Czd_KW`^BId9~qKU4@{uAY~7eH zz%xcd*jmz1oGZ#~8X=o)hi0znxaTHBKu0@t%=V53L#WcMPR(`zTac`YW4b3JmDwmp zIxsuei%b`^Ej|tH(d^Xxj4Jb5&1RPf)J$htOPaPW2pd&Ck;SAqM9H#VKeDF;lvtL{ zk+X7fu%O~O4J%aW-6;bl>&;4)CebG;06o)5n_lFwq~;Ho$6K8#e<#OvCQD3J1X;Bl z;DEGVg%(uMc|vp%j9zV575v&_+YCZ`skmv2fmrCBM$wYYt_<7MIVJ2asgVh2zLaM! zhO$N%s0+l>b5O<_7*qC5#%Yu^orpE6%8Y}KCEstqS?cQUtHNs}5FQqh!es3<0O4}B zHz@(L^cKI9sDc%7`hP4>bw4x$$4YuD%nz6C@tUQ`kDk7Ph_F&AcO#BWw|S%RP^7AR zLFzH|V0D1F$y!h*qA;h6!D%?xj6?brgfo@VOu9+8t*0MZepY)e%2Cj;1^Q-~eVDIg z^{eJEq4$vpK+9!2tfQsfRj5V!C@+JkML5JMpWtQL%D1g9nsjz2!z-y+#tKWzC$6t_ zgnkRN)71dC294auj)?5Vk?mONu$wGOI<$mA%ZvIwa16ZIW_DX z2+DxFNdc%O7rj|pFt{O;bEjyeDZA2}Xt?fTLw~H!T}GapipFw9cN~Ivz1Mt$xpYcN z9ZWSMj&!L*{yb-6jgSJ$1Qn7aSeNPcqfJ~Mvd6sm2NV%YL33u*SMj7%sj9Ugs3o1yYEn&54mYJ$O`XsD+ z#GF8A&RGck)@vwyWR^Jr$c3N~t|B~0&sR3%?6fD%+mxoLeR3jsvjbQ8$gouCHOs6C z?O!7bX)L0x^0^}f#jMO#`(~a65*olM=u<2f+)Fq@8pdmG3>Km+X$vGFtyak|MkZs| zoQ!KJpqzH`dP8WSq#EI40?7B5v7lPVlAO?7v!61nhOaIP8%KqTi~@c}@Hvb6D6m5P z?QM^OxUKBNpgnTLNaR+#Bd9_XfTk@*mFPVA4|>2B+icrGZJ<$uzp{yu+Dozqd8(o` z=d5v?nR6%|hN>R5KluO>%Gi@?^0>~NI;lX~k> zzL3ARvFoSfQh2DP86R1S6%=(ojEry$x|h*MwAyxQWHH;I4=bVQ7C0GO?NMNy${gvp zzP5v+V>N5UDoldXDhgT&kR~~v;!mvga5ciw7#(&+yL@MwEv>hFotk410BTa^d%C`A|OCF|aEu7_(-fL+Vbci5F)e>Vya8Wg`;WxT9cC z^rP6G^sfxBX9RZgpkpmvSlQ ziLG5D=^q?C=7N;X(%!e&r89?fQWnEE6bu zs)x?hqA08)>BpUnkp65UCcI*QCD`*35VWmV2Pqm>>T-pbq4t&$I~l)^C}Gj#$I@wf>- zW@v>x?_GRN8l_g(h~wAUpsNKmyg+z;3}W>eG1tghds4YZt&FtbNm-`N*YSn+z$}@> zvD$|u^ZmT9_+LZCjH7HT>Y|J{C?05EE;R_3S`prR=WA#D-6kdX*yGqctqK=77a5kml| z{41n`!Q^0BG1w_bq=3hu)z(DRYeu(#Q5b7M_BN!o$fzSCNF3H(8O{+J0s=Its=qZ` zMmi|5F)T(s90ZpZ(dd}cPb-;t>On!;P-(5Auaa6MMhx#Dzk;Vu8;h1q$t#87oLrp| ztfExmLFA?`*DTLmF@s~&V1nubp%NsCu2@}4klqNV3gH0iP=VQ!0!vi`$4sy8)PX&) zan`@4zUd__nzO+|K*3#VHhQN(Y864zx`pHe70$Y99t+kntMjH;32}<^FstmKb4bQ1 zuAwQ?LLZq%5Z~pJimK?-3@gD6P?jLci8m?vdl{vn!1Sl!Whwv>z@>!@wCkm6#bq1X zOGvSRzX`uJ`DC^HQBxWMqlQ8QwG{wqlPf8sU7+;k5i1iTqDK%Voj|IFM+2mdON)Bv zD0)*n)O9s3W8y$SI}_>2i{*h8vWSsU=UnlhTOjDcE6S0*Q7Qae$E z3Zk@eO*AjNo2f`qzf2%;aTr&L#z9hkx`a^lV3xJlHxVk&!I`n7)2}u)a+6H-`oX4s ziX3tid8~ocV5k>)bVFNNXO({)xmpKs6Mbd>t2s?BSA_p5w4K@i;Ps688iq&>XD&6W zq*v5NS82&GGI=3497+{%9Q#uMAu?%P%CtnAC@hsDyK#3gQfd_wHrdVEK7uv%ObylB zJ8Ec;o7=;D%RHw-?2N@Q{AMA33((}ibz@h{C=Xau^hd)@pJX6?)|vyTI8?3kjI3t! zvW+GqD2m3Eg9xGE%5L%r@NS5U5v}!IUM3D~Z&M^pC#bwds1?2-7bveM>RB4UXx(=4 zsKwUo;2XP8S|ug0FUEFn;1^x|63Or2t~!M}l205^)JW20X_~qTpAOIMG+dTUF zq@h-Ti$tubA^t6+sQKt>=v;(C3dpyow_-Xa3H?f>h9VQl!9mv6wa|sWP#Y`24$obX zTrYudSH5w=KZcj}5UnZ8PtCz12>6m@`_J+E1y%vTBZaz`m~+1f|3EdPj!23t76}P zn^#kPxb+xz)P)4*9)xXjMEsKpxP=_s5e_mu>@Zzs5MWcoP|KtU<_}pEerT9sRpIK-Q4_<)H^~xp zWO=$Xfys#+>Ug`kWpXq_YS3U&r|M&rU^=6zYkWOtgSLt|jurvWSPFo<3>LaEC!5@! zZO#yY|Tgb(JHA z_rIxQ334dZ*3OIK3rYKQW}|vD=XMBGc^k=m_CMUozhxkBl-PWJZsx7`H#?C zhVZ?~sIDlYw4f}ZlW9Ll--}goD7mCJIJQx(JoS`+v(DQs44g_1GjsGQLDGU`LU>pZ zjXCtHRAHJ78pW9E0v<%19om!fA7w}iRL0eW8-gS&I!CDlB5OO$R!fPnhjFIkL}-kr zOdY9XLz5}!BGGC<0D>Wi*@JLh@wS%Nh+b6#Rj{rnS}*nswOehY;pud+kP=?dLr~Qk{||x*hPhLYao3Y*A?P#1hpO*CG6^5h|bn zP07&gNoyk}U${g{N@1(kjMfoxOS@m$Sv0qDu>QPGB@dy6_Hd&Oa1x%21x#WAZpGRd z9ihc%)%wiTzI2qkD@C!IgM(&0IkW1QtbexK+D-&5{*x+%0rIH*mN-MEdXv}R4 zK@ZvO2>MrQ-O`AdEGMfUVafz>2(^NT*~;1}hbCEP{X32K*~3Mv4@7rTVoLhU*?^d} z!MTC%RZvm5cVwi`g`vjGQOUpHZE@A5Q%hp0D1!xR-SK)sD*CLUQ4XHP_?32^rWDDi zJel4!Py-PGW*7}pAyj1;bd{p*TAu6d$(_f74n_q9AHPeGZ)~zqOq0m^a|vff_cU%$ zH`KfYM(+Zn$?B&JqH(0G7>TObu?gR{!LF-SIGuTOk!n{$RXv&!@TBY>pS4%D!ptj> zLa44Jh{>>^*k-y^n<&s|*pgLK%uY)XRzf6dwI;Gv(GP%5mSSqIw`K>!rjH#s`$caY@AP@jgD6*^Kzmmzg!&_~f_0;O{uOHVPH zHgM(;wu{m9&K3nktXp&<2QVoB%3bw3nPVFU^T2T3NUphz41Bf^=e z5^2_jP7GT%5bLYIjS@h(z|}v55tMD6mw7oP2E!9=xO}rn9%@jP7S!e0ZG`sSaG}&J zfwRp@MKa+4c%ZVNYl+JNr6i~zG5?NMx1~F zB!D?_Tzt6XkrYy>b?PmElIE+bt`uDiLGF;oiy z_wy3boL#&fN+=u%od{Ym?DEM)hb+gyAUiHs|F*yNE?9&t?;BPv!s45>9mU-y4f>mU z*O{ILb`5mAesD@tN;DI$LnhZ5uuGj95XZH>pi&CDp?u}cCKswPY(-G*JzS`%Ni;|D zP{lerG?@`wIzxc;4_Qi-DHuk}CR$1EeKIgR%VZmj56g^CaORC#O8Y-$!crou?;f&b z&Zc+Mj}+MxeY?vFs&uxQ3Tl+Fq`fzxZIcYOY5akJ2N<;=AtJ^QJpy`=FWf)5V%<1A z91mR-lKb#s$$^L%H4%Ai3Z*U>(6%D74b@xDsB+dCQ#pTiR0hvvYEJx6y{Cq|R@92J z?x?vk+?IIoqm~S`DQGtfl7kSy?iqY13sC4PW3shw&o{?`CUGYMnjYmJ9^)$W^w^~o zYP0vUEk$QrZF7zkQ1hpDj7wyu_BdVNQXR{NKJJC2L1oz(Kmrl9Ng&^ynYH_4s89st z(8z-%QH>mCZ>{mOFNWAg`Um>26tr-T>U+D7gyN7vX!65DVIZ8>E?O@pZ04wOAtoVC z*X>qN%J0_@vE3)SVNODs_UQ=JnXnv!SDsCMHV8m1q|g{ODCOZ(l_BzJZ2s#I$F1tl$#fe`dMCr9m+Ksra~~Zco##4a=2{#cj6z5KyCfv zRjTv=$!%>Cm1W!ip%HCMlD(|26bHLWtZfs*2ow(UDGJ8i-bq%hz;1rc{n#9Q^-R4- zs+0}u#)_~B&a7v|4Ns#!2?a{eBWFt&E-Of_8L-2o9M|qzd|0}ZqF6|e6S6%WkQ!v(~8KR$Pi>AH2~w1N%+nM5l@)t z9z}=R6QmVb)x?0*5%O7;wd&tQ)_IEZ-5iK%Kd!1qQ$RR$6!sK@6C`MGsBOuzL-M31 zK&-_wln)vwyMNrHaRfLGgYY%Zh87G%Dj`U*X{&F^4CSwoQLMA8Q)!3HY8lPzrdH>d z6Wj{Htfk;JD~{^hwims+X zADKXK(yRCMN^U@voieBW8{#nCud)g%p^u|QT7WJJ3OLFbJW2rZs1}fLqdmN{7iBz| z9YA7{I#*h`8Qq>}Uk8vmy)A7aorqJ?^KZeY?*B9G{b}* z>5{%EGt0lzWd?z5?J}j^&Wr-}vn{_9GQc)3&KVpdBx@OUu5F>9K~grXK(Fo^5E!~r z9wqf^Mh;r7^(iMr?R7hERfGCk;GcJWRSe_&Y$4wcyb?NV5KWoKUeUz8#s{G&y-Tt#Y||hk{{3I3eVPv8tw@Nodow?(>bIO zv`>~riOVKX+JA&nN{hf1C7aLlmW!V6iP$%$kE58YWBXfTn5v^q7g;B0#T!)wWEhY zpp;@h79Phje!2;bJT8lryRlV?teMKmI+l^g*-Nc^ogz2_ScMOYSAaQfBno8gdM*we zNJnKw0E5cz36l!{QMkd77H*&YW^Q06C);KOd%UGw-Lt#fXl*=BwyJp zGqx3|Wdl$^Z5vtGn{2Ls#7mqi6bb9tA$!Xlm#sqqQS@3E5 zzoj`)=ZEJiu?4GBkQpvrQP-DH6h^c2PszG$I~e0^XKdxN(=}gT_CLW9rL=U#1^uG7 zqT)1TX}c2?4$Agj3z*0zrF5t3Q2E4?Dk9d)EP!B*zb-!ay((SsvEs3Q1r5wC6VG#1 zkWxHRt}rfTKVN&dCVWn7d~=D6nl)HOgCc>aO0~guHAnX}J&&{PIxTj0C!E0}bi7GR zScoKv4NlfkwqQY>9%wwT$J?Tubt4fz5@z8O8r+P`Ua^>-LJzx2hjaa5wwD4t*C_%z zD1C92t}=yk_|!ICVBNqBFO?fv-t0uydJ7N8C>>2cLWvZiy6~-@QL}-inbxT5WZBjr zSW^V#RwC(asTZ>FF!N`VR;Fhz_0sc5JrIK> zWXMss+BbxEnMId{XuP%pSD#>|rEF!CHDpFO z)6+TG%=ap@tI0p8c`ZQj6t!i5kpSiiDP>M7igawS|H%)^E!jr^N?5vu3kohl zxh|PAYa!VA+$H*lg0I!JklkjHW%s9CLU2WOnqfGx2&$8&S60&1Er~t?tRd-0q|j+6 ze5m&Dbiu5CooPX4%QhKzGi$m}8vas}WPcjj)%YY<+EA)%Vsd;(I|R7V)Kq2O0^`Hr z>_W+*?MdS1clPEbA+5nST0u%xDF|>zUU`}vL>myVDvjp((6o#NfC3UQprC;T-M-JXSRS$w_bdG9Ol|~K#x$3y+UJC3O`Muz z(1JZ$)+(gICs3zChA}ZJP)Wq6K}LpKLrqz@28oiJjxM@kp)(TD%s4p)rwR5LnPv^j zd;EOUuP{7GF|KzhkaIhH%}iRrqE#JGknFEkh-p>I3{touU#dIm7SDEzgof7G(on)d zew^HGEVNo%^OMGi!TEv_5!s_3hRt%rm=4%EtJweExuv@ut_l(y^0RZjIq>fA*b523Nf3{BK+P0T#1m9q~F zVFaAX$-_<$b-ZiVTV^9?;jNG%s&3aYI#d?&PsPiUMxSk5tYM68Y()abh%tb`RttHe z=5Iv|&aPARj~KDV*aqY5;Jp^MgU>Q5gC1S3Ym|qlx!2-(7)%abZIIp9-c<_A&?8f1 zqI`8=AY#@WbcLkJS<@Ydtam>@G79jcVgOm{;=9s#=#1cqEaTk9s6(`R$&@-RVFZe@ zviZX^hJ(>Ksa1ed*RMqefrx?(X6Bx+<@@ht2H;T_;$l~!gc z?RInc@Cd89zBWUb49hY#_r*HJN($q&$f=YnOy?ELd6a4m{doAqdSy^$+%%3dDX(ky zd+Q^y8SEX4}GX7R^*BOzMkJhNE@V$rYypE7I7p+^&>TU)Ns>ZIio zVgPk=ixo&U)LH^(d9Z~7@ZmO%tHtKIm!W3rA|xvTUB9Mz&@v=^_G5>MQWSINl;UEM z2w!L0wL9uzK%x(UN$!0GEd4{6d~X(zP03sJJ@PWhoMb&E0|-DhvBFrvy`xqT-^BC^N?Z&3L6qni-RPN^0E9 zb7!r|HrUDRZ8|oDkYBh#F?iCU?5Yu(vaMkXX;V*IEP;}dRVNTpdj-4Kin+9eVnJq& zRtW&tH>WcQs$jO<63LTbbHNZMnV`YjYV2!j99l7xU`5+WUL-h20G=negkOael~@;` zuw-fF_4efq(8yfQS#J0D_U59?nnRXAjA2PA@QeW-e?>2H>ZQGJ(zUUnRRX89q^uX2 zs{FNRs4Z-&%mEnbc_XTv`ba;K9$Fd|iY)=*|5Qp`0lgqC)1m7xNtsU88ps~pxd86nCb zR(2{ifqt1{4^c&RF?S39&oN&=ZRLca3`C*`*$?x?WQncIJTg&1 zn3iruWCPG@`z9$7HZOuMyWBVkKbT$U5M_f}BYCToAI@$t3gU&2)L_{V+4F4s27s=k-8{vDg2S(_ zqy{2mq_^_tofVR;)3bmhVgwh?F$!uE`Z#UjF2yMlFX~V#u_Tl<6OT5OMCBw{u|PH{ zUGQ-}Vx$a`)OnR2xP|B#K9Rz`D$?9pk`PrIQQ%eqiEU1v(+RZ-p}`}fR$3ZZ3_MJI z0zGStOC(v7iwJD24msY_smW1VnXIdYCK<_CEd634>{%?4gWZbIddilS+TW4AJVQy4 zASPW^l6DC-i({;=m4wKxkac>Dd z*-?@qEj5zEo+&-A*{cYQ5gS*s_n`rBWQ-oS4gT0@;o#a95M-OB3tN?AD4p9V7bteP zMbs~h^@zpmCvrvxcd*HvRagj}ge@>#)Y5_%6}goddEA<{?waBqIv0mMxtG zy=mCMf@Ut1L5nMq9MUlsCbTMd9Sw&C{7tw6EH&Y{vm{bNK*?KMj2Ip5i7*sxq3oGe zwfNzJqVAT=bx2{YdO>+px4xbnmOvGjLs;{Rir1d{_v|ika0x1&N5Ymd48P zGWKzbodIybbk;@DUe3U?FpHjC!Z@q`y7mjPic`vfZb0P(Nix>+!$3iwy)1pEtWUa&Z^Ej?u4VX^zo_IQR|v}2 zsS+kVvGm*gI68`<3p{xuA7U2^9cckBclS@q_0u^+szzJJoXe#Zm8aI0FVDeA{7Ylj zSC&!~=-cY-z0JAxsacwPeY^mH~MX7&w3;rRRxpGB%!rNGnXOr8z8}R6LZ6 zNQC8si-;83N7&6}x2>gMUxLL11v4X6imA2;uvBwJa0t~Nk3tuJg@V$79OT=Sd~pP) z6p&RYNV1y@u(~fB88v2N$6c%o1Ei}78Ej8V9}6)Rl!k`9DV7SfK&`5lEP>DlPfLcKeegx} zC(TT|JmXGtD{@yQ+T58ghvn$_D(sl{!KDv2*F~^$?MAh=?AML4`Di>AA;?}*F8)vB z-BAfl@Bk30ClFw5M7Jnzh!uXHNP?H!KXkWcHubg5diyd<7aTUkr=fPZ~==h zu*B?AOrJh^1>R>?X$Ync9~cpiLl@1-Yoa2-V2t%80nc(^V5$;`Mw9B^nsuSgmZM-EEiGw`3tiOZ8P82*Og_5v&!9AiR-qZ<|GKBJgp=8J!>%X zu%_WV6}7Vy&!NlBX5=|5Hb{mG=6K$`tYuik+WNQV28d3mG3rdR$<&MM^(NsZPOeA| z^FoEHwXc>#2-!8%ZWD4835|zFyYv>q7Rs$6ZuydQsrl4E1%^65+fWwT`EO`@yGUe) zRS!+hB>Nc{Iz-+EVUp7&DqtC%lvq@uB9Q<;GUKu4Moz1L006$>po*Nj~hNFxqriph|NZ zjCfhYO>IG~DL6T#U7y!7iX&T_*>bezvd3eq4dM({U9AHxWLe6q>SrS@j*AV3>?Pz8 z270ZOo+7OYRhZ1ISd1iO+q9sBRmJshCY*w}411x+U-U8cXoZW_Ei9{^=(Ba3U8bbk zed=2rvwNCb`{I(;up-!SpE^ct5giYz3oIy>sdfH*r>iw;eeq_#-f~L~ztSaJm`$=J zBn`^fZIs$#X$cpWmKV3|d>Cyspm4)<8JVf4#&kkWhhmk2r~?;$JrcmIHSg@B02_R< z$&Fj6?K4I$=47sxSraKdU$7x`^)C5nX|49IZ@ZJMDGVlNkPg3no_Yu8j4ay(;|0XU zX@WstTi~5;=_j{+m4{_oFAFr&Y=TyyE?9t2om-Oe8xpF!*nX5=uM>Vblx+1w&#Gxt zB@5RZVug7FuG!A=2QuMdu#ho394g27hRJ**bJqjd8C_c7I5|%Rg5O2Ix|{|a{p7lo zHrBeOmnUj35prHxtD&s-h>w=2ADn=X3kf0eT&O_p+*Y0=Ow2Wuv5Vw1fu+iH>fqW{ zAyJ5~$cnhAHA4H;hbjut}r6WpGKAPe5#uhUnszQ%IO-r}cMO)#Baa8n>3hfwt zqGg~RK|1b@p$tcuu~^wEl^XP3G@>YuW*zBNbP2rB1vSbRjc~%sWupq74Q&V@ex)MB z3R {deda1f-@7C_ZUNz!lFwB!QHb)_78Z}v>lSWPRcG68K0v=-h;dG*vTla=6&qip90Iny4>^R&m zNHK*1>)ME9bk2Z^yj+@;Q#!0a9A+3sO!5}23+ICx-dz)`R7fidQ@pS-3??rb2+Ry9 z4-Uyz;~YapcIt%FSjg6g&ec@|L!pm`BHw|oce_nR+PLXr1VC9Pjn-)pA#IsTqOV#! zw(uoYuzIQjrqU`2l$e{6x!`=V%&bA9S+iK?64N3x=d5Xum|UI~&}_1fGWd5kIFzTQ-{O~O=)$OohD7}d zKM^uIn-jlxmBlTuU@BtX9Ha2r#&`0mtbY_cBBwB2+3*`%lmPb(J%C_fXJ>)og!VM(y-^HlUMB(qTZ&vlgJRWXRd=w=Ob`2OHePpo?L(b* z2$3Dg6-Y{wwJmwpmdpHi=^!&)c@i!IoqVZh3~~^yks*i+X0VQuCn!uBK<}jYO?Z&7 zZQ4aw9p)C~4G%k+5QM{X+DLT(anp*$hw8%$1~X^)0C$;V>tJaeFLV0jZ_LJYPbnh+ zq|%+`h8Vq5MSZR_>00yOL}7R|)1#v;f1KR1UzgerwQGtpef2^!^jpB0iUM6Z*7eFC zjPa7t;-1M<514PH4Ml9&?GDxh0qyKsc4HMtjWo8UWW+=5>R}ZA(t;*=y4P}b)agd^ zA>ok5oMYtdQcXTi&5YrsZOdLyNi{cgujeu|xnhV{*ZIKciue z^*dqe;-L@#Y0p;~UV%dmbIm3a=|<2HDAl29>gZ^dK$Y+4blw3(nCF?Gje(_fZQ|<8 zMii#3S8K=^HO2eYU)NbAHd|D!8z4ZB*@ujx+OE?U|?uM@2`$|^&_ zgYZBVug6hudS-`0Xn=&q5qHnqS$4pwn zcMg(lGCBNENi+mSREkS*L3$v9T0I-;NMm9;z!plR;tE81>~v)!5SuQ$F&tadY)J2sk%ioHurT>%-WYgvXBd(Xp zMN+xeL9Hn1ONSE5K4cWcF(ozpO3uc(?*Coodq2vpWp?H`fm^BRdztdoEBm=U;t z5OhXiqDpTh?vo3j4sD`v%*m;ale;vrE6IW!c|^OP`ecB~Tz)P5C?M$?Ay+I5r)V%e zaw?33B3U&~`*YDGvre{LTw&>U+kkHuxg4_C$r2_u>75>YZ|7MlDqm<%dK3_J{mlveuQJyBD9EOvL-5{MAe8#i~=!c zz4Rccb21q9!%NLLcrH|7W5Cgjz@p+X)`Vki8SdcA(i`TiC<^T!1qYc^y+=|lkt&Uc zAoKBav~RaI)z0Xexzo1BBK2t$flOP#St*Y*%;cZa^vGbe&Z(n9V( zY0WVlI*3K}uU>p62%FlZhMM)7bXg>v_0n9;ig5z3=&}|N->efCKbqj_rIt!X7P%)3 zVCSFs2j7g2Z8>L|plmHwPpd9FBr_VPP|I4AA%)gH;!+r>U=VPfIanxyd^A)?vG==V zl^&XJTCv=$;a5Fzz#vXLSj0|3;0f|IiO65|1R|tJF0L-b{pC+5LaEk0! zuTg-}T2WfwFc2=WEftYif;byz2a&tV_ZG)mvUa)l$=cnITpp|ISJPaadhzJ~-eEL! zuz>|x1|!gIm&!#R(hh(=nb%riM$$Y__ch$D*Bkki>{0Ix3ot(f>A6MUBWuD6i4uIz_gOP*ZfH1DKGBns1Y?m-I`~1&@^RDJEu3HyoqJrk4}b ziGetqB9`8;MLDwudR<+mu&n*YlE|!mG!!FB&9&c?)Ys9*z?g7}$<|MTaMv=;ui!ARJ`Ef^dAobQ|s+dY+3BQP|FuU6k+aMe#I zO-<><+yQUxC|OuAO;Pv`$q{gMR`Obdv@^o7u8wW_xDUh@c7CDs(Ya|mR-Wt%1Y&V@ z?Zo1uFl%oPgUik5Q8V|0qZXnRnLt!oWonv34V=xH>wem&X7=b$FkLw>c*&G!%a{C< zJ!{H6fUPpo^E0}_fx=JaVNUTR99L5;_RxM>GOPlKa^Lk|2i3Mh0i$2Mx1BZNBb0S3Ct4c_BBPbK} ziY*Zfbh3;tcduP9)C*O~tC%bx9sqU6k~r0Kc@{R5Eh+$Dr7TZCOv%~!_Q*$vo zo!~=yAB{oWw)feliVa5qwa%EeRpldPlBxL;yn|DgJ;X{=P2OE^)Mq8esSXmJ`0+xF zsdu(t@pv?mW;_V6F9cV4SI-Yan?Py_58(C71exkjGQ(yTJ7kQuN3%w|+USN;!E#;5 z?A2lsbm_8m4c5Hs;=)=xqZH0)WdcPX=~bxq0vU&b1aM&hbq-3A*f>uA?068-i6`8r zg;F$kl;18!F3r*d`Alh-(3`Lk)3yLa@{CBgPz}iLhz}I3=7h+Af0U}rnd*CGLM3Gl zndcJd3d5Quy;?YcI+<}T-{Sc0tm4^^FP>u{>sRDG$&+0qC@ zZ7v8>Pg#Wx)xAm)jW8ZTq@U89oG{QO9F`*%Gn_WpVS6sg499{cyYhNg`JO3;y7q0M zEcMz_V$vQW;Jux*)>P>s)cxvjB{CtbtA1$Zgd9M5n53Jnkk$d_I#pBv5teBq#^N!H z4WA!J(}50D^~%WJ%~6rR25x3=ktwj3h5NyJuY}?H3m0E;cuRpz)ti)wB&L&s6ZdA& zSEXX@X}xu(QKg<#L1Yj%m%45aL7@^r7^!kT1?MtpawK6U%#H4tGHl84=u98VTzRz7#TT`Icpqw z$cPjLRn-e&hGZl-Cn_syB$89=rl`J>RaQnh8C^zcg19IlMDb+F$)G#EC9b@+xUYzp z#E{&7Uqh84u1jfvCB3kbtc9?eWf4Ssxr%b>TK|-Ccaei255bt2gCit}^Y{!L^F&{O z$mqDx7RWUTu(vzDb;?wYtJ;l^nCr4iwJCffK$^s72GwC!mkQV(*mb_erc&=3C#4X} z<=~$b%*wlr_bBRF^WG=V(^E{BQH+Qg(K|(iYB5|WGei{>7(4oP{Hb)M^pYMd>$X9% zVx_rZSC9qI4Rzx?BpS#P7{V1U`3T@*>`0+o$9dXnX9KW_q&Uo6=GG8pvisJJZ^?z; z)!an1pLTD^if`i|gh2$C1_1=C?rXs>8_IzyGH2o&J9qHO?8D!kiZk4p0-7&)=Px0|<;cXPpx5V`-!a9*cCIH_e2*ZDUlJ^!aWwaSqBT`509W zhe9cdspvD+VQZw48uS(;bY#%E;}R*q;>JVfG{ua$;7EyQsM#^Q#m-`LA_rnN=G!IO zZxz!u$SRYY95J$|t&vz6o6lUI&fv9jjUJuCTqfgbe^xT5nf0s$IXB@SY(qNh6E<`h z@<+**MdqxnYi$M)bJI0cuIcE|;!L}lw5*^lw4vKYon{+6FQLsC2rU&_6Tfn&PCgWh ztQ8yTS|bqz)82BTmUtk6m`5Ud*S>O)K9gOKN$Ko3vI0`++wL=fjFAq1^-YBxPL|g% zFj}d*&KPY$(oDpdI*_DuP$eGDG~`Ap<4ww6F*oPzJ@zfMv5Z1-L%p!XTn#jV4oBGl zFvdU`_>xpdT^%;yPRN7&rER-?8jKQv?5T)4&+MxCh~db-gvgaIcbd7XzSf?}7iyuB zMBJW&mDAR@toSE~UL5(F4<%FFh6}6+km$!n`JuciS`>lU#un~RovkT?UvjE7lo0VD zRQE_id(JdJN`K6hV2P&AJgK|MciAedk-Uc?YefcGL#UlpF1m!kkkVEz6A7R~Y&B-C z21M{s$siUCIVBlVmcy;e@PjLp#^|>va~`#`Zy(_RUOX_F`md$Bu|2w|;h zFC{49HcvtzrjWXGqp||EvHzjy#9p)Sr;{j%U96{ks~@)3q9r)x2t)S?6>5%$OL18n z#DV>BwaJCndONq+P>_=t!w)~(=_<+)k1-_m5QA={KM{M+U6<*f12<^d&X>A03>=yV z30CJ8>Dy5BV|)?!b&aoP*+PubMk<}aJVe%(P6`w45?ryW6f%G+WhnxgYMjD+y^V!$ z-lR>m#X(t~B>Q2&?P{eYU{D~mI97E*yanD!50(2=JxUj1gFAS&g$J)KTn(+<& z@Y8HuvHlVo)T_#C*}R_&Z3ZVYS=V@|k&-HM>H>JjfO?WEri0C(s#Sl2h=nU66us+_ zrXT^%+*{FU&30%rHEO?X*4r&DJ1JRW#Yk6}U{++ebgI^|Of{Y|p-5DbNyNIu*1JIP zg#qSBc7H%6pUldf5drO*kVF*FQNcor*0Nd4>75?WKXR^63||aVS#EO2Hj$fzTaJE> zh?Fo#yoSji(bwspqf3i2#=x9aW-2jNFdi5j_y7?&fEYvWR)VX`T;QdqxOy}xMx@*d zSlps6iI!J|s|AM%tqK=vAlo9Ba>wQ1%*0rCxCBm7CIpKsuAjmV!^xO)wz$+NKBS)+ zw1MRlnd>rFLq(&2b_qT?r+7VsHxiouYVnVJos- zk|CE}n2}b-q9Y*eBI=XiSq1_RzDb68G6c(qmOxZlrbh$-vOT>GvRnEifZD=U+YxsF zmB{six{P!0v}GL7G6;gKtr3aDVAVFjGL4h?#R@R2{miKuXG(jhmc@QjL10b16>!A0 znnDz|r%J7Jrb+;|9<8+uMH8V@6Uy|_nJS>tdRujnSQdJ=#O@Fwm1nP~Vzgy9!&;b@ z-jnJpO%ll(n|W9wXU!6;ojEcSgBt;q;VGBk*2k=cFY2$2VtFMOu?ph4zg5*pB#vzx z_IA?;dzE^XfVS|k?lXg1|A%xaa-E4-mpq@+6@WW>P%?w_M09B5LM>UP&(NtB;gSqp z+@%WzkaE;i=xuyRg?lU7_~Q;y{WHaYlh3k=>d4i#To;p@_`AARHECmDNcHtB5-Yv}l26`j-QD0CV&< z+7R41_Y(m^0hAcCh=pTyi6<7sHf|_ARU9#GXyO5AgkQ)_zozm)yGpJQ3T-#777F=; z1-xz^klI)DM2chGw}Xv`1zppP1`s2>WN?ZkIC-f4j#1erht8Hu5E{>po@4471T7H8 zpRN`lwWqUIrC94gEs5gR8S6%1?B7ByT*?KqhEOdRc9$6#%NwK_5LqGSj3LjlTHDS| ziuxPJ%jg}s77k};mKD;_H6v2(%Z@VVYLiPOHbgGpQ)N*RZ>ekuI7%)&=f@F#iB+&o zt~9HO1~ozVmy=oquCmBLD-1BM)xC3xj$H{j2~X0|hcW_zM~nR%hNsJATMov0SFYEb zd^jY0wzwgW?OSMB*pt<((DSqPr1rWD12pJ2t^Y?-+V52rEWhnw+B~eswO>WgTA<}$ z-v2Rr#1I0~J5Q)DtTm%<<>t9t-Aq>61OXb1iM3)Uiw58% zhN4l`arHhuaFK)!uBdbMN)?d>hGWGO!#UOCY*G^kLa7EJzDH)#5;8heB9$We4iKjt zNAGsYqyt5Lv(Qf)NPvJC2+bqTr57i5RZ@ZYKSca|Su?z>Uc~I>IJKPLqq60*mGEWUwpfFzM z(!n|kbGoIW%p0oK2@wh!unA*% z5dZTZ7Jqasjp|^kdrHCtqyEV*ccgJi%Tf7URWw6&48i%rRm;fFaVJ6$K(+e5rOeY> zlTR6%LOXdyM~7~>8A4J>A*>Z3sQ6v{J;E(Z~iGMgu-@LUovZFxNj|dF;sqH z^;FiT(+gaX5nQU9e2KVPM2mCIuIrdO1oa_s`F7|+`|dPBS~VECC{~0)p_>ckv-{P* zXG0U@tei{1h~j`_+~#intI68H@@#ViYnak@JN>{Va8X^~>bOP<^m_dlys^oHBY`2L zmh>B5wKG;nqEeDcxopqXnhx1oo`&WY(y4u0_E6ra8!G~tKh$;vbX8aMT46fkRW%{$ zZrzX6#(0txQztW@b~-U#H4+GI0tX=Z{sEcd=KdnihM_qr+ITLf{F^`#gNLl0L7VVjth+}4E2G5u|4I5g9ZF)p!dEPfV z3eY*iQJdUj5ovnMR;6*sVODpzbXSld zXX?s8DD*6!g#3iQQNg`E-~C(OEz6*OgIkHEGqnuUAOg-ZJB7}L+DWlZH!QvB(ZW_lY^G?KANS2qw#NA98u&HD5Mpl;G26-``hJ!l zDL`A8bh(6fL0VWHt_c{e76cT=Wr-yQ`?JJN$;_qQs5}FnIIP}gXDA6^swxsOfXP%< zTtWWUWTNqY5F<5;+^LK|;pDhcno#S1nxT*x$Hkyl6`_uE=Q?}d5LEn}L4^nN0-suEx|l`w;+4zI=pW%UfZN;C|BzuN~=1RE-A@gJS5TnmdmSb8#o^pzr15LA*Rcvi^p%ft%IU(NLCG$>bRn_moLP6KhuD!yor7hXnPV! zgbi3>iaS*3;~MUmmC&Y+kO0!vV%rUysH{4Tl^Ph;$P|o|>BhJaF*zlO@U$eXT@XoN zOBD#E$5@U{A#`h&RwH@iEwMB-%hMeoQAwcgJUCoFgi-mkkI*3LR zv(5&7{Kq*$r+ z(o;~Di~0r5Hcb6;m0OD?7>RV*5`LNS4f=vAdDW$wKv}UMw+LRc1cXpw!~GF9dRZfu zccH<>j#pQu96w3lvFz~b`Icp+iU#1 zum1^l%51&oFiS}?e;GNmOr))_f@6_XdMZR8tVwo->XXKaIIzn>9+1wjR{2=O)yLlD zXu7@D8l$0fnDj>}j&OfELOrzTVF?Ms5c7FI(%}ZRjl~W!df%c&ZFcIV+9gHoCM%uxo14Dnc`m!LHAIBN#7ec|t^Evk4AsM-t z%4gE;9TY@fF)iW_R^laWtNIP?e;wMpG&mF}t~)aFE0 zo#vF=pMH4iQG_!ogt;|C4ufh))73sF^k^;jN1CCVnFV?vJj(DyT)O^xvWtwid5A!_ z0c60)IM{wb>>Q}FR_4eN8T-uleZL?3M2xX*9fs=Q0T7IH@fhOH)3~ntw1bK~ukE%d6j2fj|@{N^qk`NIG;y=~`YPEDM)A zAy`iJFV&7F4|c&tm5h6}cZr47K&ciOP|xa4KQB`!dMx-6GqqYd{Up97iCQf$YE6R7 zOieJ=>l0D+YywY8l0lwcY>d0`md(2afB>;wdXYzooYWVFZ&yplVxb)mmNsWbmEdQF zPU3-wBg*vw4J%~F)@DcTdNn94sg{NGnwwd&yFkq!j>KJl2s!_vsnz38HK@cAqm#0T z^~SJqsf|l5plJaEp2AFh+UGxI2s==_D>*zn1?MSvOEV$zH)WhGoa8z;U&1j8%Df`8 zw=N>?n|+s?iAF}dUF6WgiO2*4JwYc-nN&1fD!!fR)U4G+E+soB5CE544!M>N7?I90 z($%0cX>cH=Ktl*4<4qeeR4B^!c`1j07`<)6)Lt7l1#pRSuA`Gmj@9cmL>82V7<5~X zb4V10qn5qf3a7ybWeDZ}89mOB5G+l{30Psa-qT&FMmD)Pg-2R)iV{e~oz-Y^_d1FV zDZn{m(-l#P255<@Kw-%S3*W19VDgnX<^*urwiu(voPt%AmgWinm%p7&q4SeP=~Q5x zS}Z$Jo7?U6mdX;dwUhK#!?`E`Gq<`wf}5i^Ix9DAs~|@(Re*w}j=YmuI2f?dTC{is zk|$B3!&z%t)rbT~Hz-Q(dgFkb0GFCewlr>M*d>|T+T)wx7hI|?2%MVlJHVOLAU;ED zVz`CEy0_dkmM|Y{hzq6qrK6@Y8Q{D+;!B;CTQ9=ec1Sgdki8Ly2<*0sfM$|I{vGxU zh_5W}Qu3lI{hSzcW);upjAP=M`FelMoR^5}cG)5#vP`_#CAg|+5iK;cShiPIo94Eu zaFA96rX~?sZOZk+<_QZs8%(uO)&NWpN!8MpY)*f5O_)z60z?Fno2?q5p-HuC<>8J% zKp);xuvuX#`^>(XbBqM~__zu?Gi}3rQ>Jm(NXFBr9-3>$EGA`-iz`M2b+t#RNj1 zV9`RH0)suhm837a!i3J0Lgvm-ma|7m07Q-v8!dALx6?=O^4~jv`$e-84MlYpNhv}l z7w5Ee@sjKhLV>fs91hGYdcb?E2iJTc7i=fT?>So(LI~fR*ewNB*enP=g{DMhCFssn zM88}f&U6$JD$mq70Qj+kv`TtVBJSi<5(F}HR!w1QipbRd?U|_*wSBhpJu1kX6=}{< z@MO`RL$KT_b~hLDoKT&rPuhspd*Sz~r-Vz9KM1%%TCW9^O%B>-Iu9iiX|2~kh7!1l zz~ZI_V2+>R%9lVtqK-1qcx#Mx_<~iRkgssej`>V!A~PRHp-1mz8XBDx5n7!Tl3dE} z!hytQ-^L3;-h!(LK!)upruR3P$(*)C6GL4z4m!e#$=C-33Ip)9@Wde;FUdVMxe5__N%h5 z{-kFDlH6ua=ZFsWojC^C?6;nI?fs5lzrO$ec0VRA+x5DQZIl93myMQu#27;- zau^7L{({gf*np16D4UwKH$R~fNRQie2iGD}@T7`{+zw|X7_^+mqHfdT4>I>M^qR92 z8Mdx~c5BzMPA)v>kmf3X4wQCesArM~o=CNHN9}OEITOcBAKFY97aVJ@r+ARi*cCtr z2x#s$kMt;TntFa^9Xjnpz7WQCft8rZV*D7HLRN-u#={UaR~Q-b8bLyO%VYm8By zHUN2{Vr|lJm4h$q*hSOS+%BvfV5z;KKWWw=6HZnr94yN;ozh5TO+!=RF>U*D&S(h~ z{PR7Y&cPl@ZmQ}TmNLs#f|bA2Aw&vOwXr$Zr^9a#geJPIVp8TARs#%aON9o|QvEad z;rwsHilq#1Gl4QYOIu!a&T6F&CgePbhMAcpVr~S6D^oj}4Vbm-b&BbvDdZWQ({@p* zPH1our8Q}}*kA&|9r?4SyOC1mg!hYri4vhkCyl7O6N0dUv0nW^^AAOq;HkbEtZynN z@9BW20iYnQsZ3b*!k&)aw9Tvq%2uQNoFXuqm+cAuxX-f+t&z)0?Fy$|wXz6!IlV87 zH)BqWA#;qTGEk&Sn^`I_F9i4ejjfX`Zl*Y0H{WHLQ*7oE#X7uFuo+sGKE_&NlqJxFBfZIc*|5xp znvR$QM&VG$C|<@rLE|cb>KE;;cWrp@i zK+jOqf}oHPzhKK1eQRLGJi#qLvb=;RJt`7~|8uo(bqvV_&{4!i0a+iRH%xm%0d3`V zu^4o%jF>F%$EfEb(z_+Z&LAMRtLx@rQf z+pCfR=-tAjWIY2}XHL+Wm32Y6$@1*}DJQ7wBM{A%k#ibM=|ztDbUeu18_d{u+CYv_ zF--Fg+54ruV|Wo`k^4cUO@t(3iIiwLmjEdSOIY?ETJR}b6(~bzj!EfNJs^v##Is$f z_D}DTE0UOy2r~oQ{+Rd2@&1_ak6ni_DlR3<6>1!v2*N8>wX$HBdFg3hX|^3od5#zY zsl|zCFTncg zXW{SCZ#SOVrgH-t9R)@*G|-s?Gssb2ripaJd>MR0>lSj7$jmISN6V#WI+QmAHBo zIcgrLQynb>N{FBx<^%(>5CkPuMpF7tV%_k^yK5*3>}DJEda-eD?3Cr^a-b+htg_KDPFH;Zr8cD}?@#t#o` zC>I&>$5AnmR?=P-?eq0D;7FxBB@{+qiCqUYGY3<8ESbaZz1Zb3@8o&^F@j!B2HB_ z#&kKG+@Bh37h%WN>^96b7R{G{Gc~Ved4xQ)8CDOr*1x*wSe?^ejF~gFc6}cCet+Dq zV{Eru9StKRg_q4{IEsTH9*ic7ifR}+R5IyKxVID`AIE>Iowg>&>Q<7H2omCK(K(P% zFse4eLZc_wfi-Jk8#VaH|5Q+T3S zY6q#ANmD580BmvRaxSEUJVQRC5YefGCa0nKmu?DC2NQZ~#l~FX8M&T`8A^8QOfMp9 z^<7kp%^opoJSsDf<0zR_*XV+VoCLUwwA#I@Q(a5Oyh(OA<`9Ca>Z$)GA z<%JJcwWm#-^Nt~Xhp?-j;n_+$k|#28NV8i+YGxkYv3JHP)raetsR1n~2`6xu*h?46 zXppYdhB#Kkf!w$%oeqTdC{{2x== z;59}^dM%iwWXdx`gAM|F zJpHayGp22B&M^jK17z>ch3XAAu!3|BAzH^b$`P{VR9_uTU3CtN9Y^t7s@k39p zIpWctVkB+LQ5p0Exu!UpYabp8k}K#)R_vKGaevG&_v3A!8RG-u`MO0W1_ojT4q<@# zM?vOL<uPWrFG5D=Q@tj!=%ZcT{9#ZF0c_S^B$A+)7NvjRPx}K-oiU3E=d>A7pB}f<@ zW>r}3DAOH7<(sbLRd8^PSzcXSgfrEa;SWac%xn)srL!_5WNiyILk9;12N_}+glbA` z%*z-tYTd*!k3?=Ua%OCzQa0#h&M8_{9jtv4=#P*qbDmJlz^GBKh;ht#KgQ2rkN@Y- zzx?a3zdv0s|L(ib-+y>{zQ#5V)fPFU5YZ>Ac9OLKY_xD3Jpr8@4Vd!S61*BAWCM{o zdBk4pGo4t37R>~`B24^(mJjs2s}w>m7U17PNyUITWHJP~;|!Mv8TDglcX z3#V9_D0xlfSZI}_bZJ#YP$cRc$EzR`$itlz94aaZoimjsFoODhvB;#%+Ck+c2d!}I zWGID+5Tk8T8&K&t_G4HiHgLk6-kYdus3D53gi#X#!|&*_d54wOsoo1?sQ-2w!N3eR ziAr?KSGcRE4b87!x*Fg79?FPC4g6XCt5mV9^H=E;&`M9{LS^|0X!Zsxd2XJe%FII= z3ahy+I0}tD9OcXf<(=qdcpG6N7O&Zl8HhRYzR%y^=6&Y-G5(0lWyEzLlY15y zjg}bXF9Va%=Um*z2_!R_rM%b(Tg`B_IIOt9Amd^pTgAh%IWj3$@frnDV(@taB@{|h z94C>8v5-WCSA|v2NK1yLqIA;=uNbeeoYMJftJ&{^fW7pTXfADEs<&jM>VK?+U@H5{ zIr|+86bUD)XV7%@h3RqCTBi3Nb4AWiQ2RVXH;SyZ-3F1MDhfQ0(zWeA2M=T#rSXw5 z>et$FZA2Ug4R5XR2_I$E?R-eon$glrR~isG#s^m?LsDr=-?Akt8J)TU1LrB@rlD3;7}vH6tpF2dHQ|n`T5Pf&$74 zQejFuEMP8Gj4=O8Qd4I(Yb#{d*$@KwC?`IT%${y!n-J8IumAME)&QJtI zP^*^mlqLg5bhN%C%8f_%BqVNlQHqLY_l`cu+O`UuRlpp05(l&(9SKdz(r`fbG!ay` zle4TFS~iX&5A71~ViiMTd5Qj2Wt9e+gEJySzVmafv6=H^&%72v{AaEQ|g<{R;$P~97!uw zWP}nVa}e2Ekv4&YHumJ*lNA@t%n{ph%*USZ_xbzV@$>8B?{D+KhHd-&G_IE%iJ5&m zE1zvuq`+0m%1o6F03f|11NxWjnWNn!V?@V?g3g&^3>{(4u;B4N1ZTqpWh=LE+q#Y$ zbF%Jr%&83|#jTRI%J`nF)m73AEh(s~N!U^|F`z4fIwTErv6W1zr;*u9-8pYWn7dWc zwv%FI%uDJ_nzW3U4-lHQ-vL(!xFpaC2ut#1)$WK8PoyPCEow+4CDXFNYJ^5ky`e!W zG$K`)OwPX=+cA%vK*l_BY-5ZuU4YCuYQkJKvFd3;iAeKf4$+`&J~FTcPn5a5KJfK{ z|M|=Pzy9Ty|NhHwneo5>^!>m5;fK%9<8s-@n1D~7iA>JU6JP7Jy4f3;4#_eBjwvgw z!t8;TFM24vPmkmz(B`4|uc=zF*)!PHGe^X_KYZJgWQ))d2pUPih&*h_+%!}Rxgt5~ z%`Y9uRAfx!;5LYY)v2)E%DGDp#{Vf#PCX`-NXYw4aO=Uf!o3jK&@OCW()XZ-grz<{ zJN+kb?H7atnp}e91nC2a-6fJaEb>qk5HF?68014oOB2Hs7Qsb1HJ@|nSlgBL`k_l7 z2w$+JVe}p%Qv;H$5>H2Yux(`na5<1Y={OKTdbUhGj02y*B@;fqCsp(=_Z6grP((~O zth+t~X4f4HV!5^WT<4m$l3Lw$FSf#yKd78(ac$BU!JCr#VxJ=73ZF?F{YENTZxgJl_t1fvD+?Ev>|7l<) za3xA-V+pItHlNN>l)zZ5v_!?`2w%~`iT@Otv0oywY?ltv7im|~lreEyzr0Ev(iDdX z$HrVtwa8qysX1gzD?(e_Ku)eBw#qr$i!t*+#02&^5qSV#AN$W=54`0bak*SCu|?!{ zghNrXEYJK(rpI8{=U~g)qDo!BJPwZ6gb|=efh$gPy>nfIB_r}kQ}m&h^GKlf5ky4H z>ZwzO(vBt!R2!LZ9l2@45$Z*|LS`3WN&%ym){LI}Q3kALs0;Oy6pq+;J(0sYYA8j` zD`)X)HkPx#H4XPM3x%gb*d*e2LS@4&^(()VBPrbj9U0NX`@t%+bJWV5Sq`rT4hp9X zE}X!r;LJ=ks=Oq&cc}##Gjr}-gK73QG}%RE&QZruT`t4H=Ia zGhwe^a{_(s2^2Z;)f7-5>V~j&tV*=1g(oCGzVSvgN zOtAkJYrwu4c*tSX2)Q?A=A(>a57$k4M^0}!fX&e`qvbR~$TC4vLYsia~suYiW; zL?lQ+7LH|yUIRcxID+IVfApav3qQh$F*O)eeVdky2!RQ-TpEkymMJTXG|Df&31wi@ zLs(GvEla{_`UvL->}dZg+bh1V!4uJ0MafKU3#XiS;S+Qt2V8`=tIxbtvWuGf$~jMF zVhG^Ksi9c%!8vDB0ZF)Wui3GoG`N~kE*)nmp9yZeOZnsu@VuPF7#c?&5s}Bi^*JOe z5w`A%9_5_OeOP00?!qJ6#6&0ji&to3a6{VA2JuTem5fH5o#ABkyuWwP2HhCgwaxvN7!ta6(jOEg+Jl)H!fUNe1~|B-KIQ11{|Vy0_B7e zmA+@V_lGc+HC3m&`k*7brKC<41ADO_!YiUlKsyQ<43xBS;zb63C8L!>6UhatnPbF$ z9Ph{e<$nD7zCRFeId8Y8r>E=KBCgnkKuOS0em7$bE;<7@F`$+MRo-*$+%>tO6rFcC zo9`FKt)i-CX%J#m2Q^|9wQ8@{C`xUyYsKE9_6(|G)7Gj@YtPn*N=jQJ+8`y!R|z#! zBK-1u|H?maa^Stafp7_U}-tHSQuXt`b*=$6ub$ zhQ*x)MD^`L0<5MqAAieJy1G{`eS6do+?k~NhGWTF-gkMT+p{rj-e!oe%IM%fpNPu0 z4TkTxe#9tWs~b(ZUDGUW{$Kt1EgF#*Z-w^@54B!~TnvFv>z`LY@7X$VzWe7@M_{Ai zJPyEIfAp57bNr+ERn&TDC(TD;<$-4A`&K%pf#1*cPw#V2PRJEEU!a+zQ&sN99Mmej z^kBnwLURWAUT0UU+;Ixv1rC3Re3)`2i)Kp82)A%cmS4zvZHQMfb(#o8K9 zrR&b0o_fr1_+aW*Z&gOTpe5_B=X#8Sgw|D zelLy4Ot}GXs-mUqc>MOtVD8`z(WhLRk8AH~Hr)w{e#iAJZEH?i#rxGxazR<|5verL zh13PQ#?wMz?1G$86%Il?2l{{+({(kaOO`hy(xM>Ev!u-+FMDLCp++TTm$vOm7$<`xXlD zWx^*^CbLZemWuM5bi#V{RU$O6V_qAe=v+`DvUos%_d}210Hc=6#@9CysG+rH3}VX@;X^{YRc#Luj;6)^`<1+HC$z<|P0MGgiw2!<6HwUfrb--X{68D1h{_Ykdn zq&*mJ&k-~n{S!+J@3p%;gjHVP!lb0SdwolEaGI`yPi^js+zl$R>tm3ipHH)UW~;=Y z?2vsSdCEdz6K^s<<#C~Jy_KK;{(M+Q$38SOU#}eNuvxxTc4eN^bqtV5dSQJp8~Ku! zI{fbO`#1?v&T&JB+685HiSW&W#7!fndAXXa#HMSOs`NBLgTbrWqwcD;SV4&mq7YF-9Kl1SH8@TB|XUu z3v|s772eR8q;_+8L$TXvJ<1$MX*v#mjQw7u!HAa>p8A#looO=5Y`lvqm?=^cYoyCO zso`x<9jaovIsqDO7Guj%xl*xSdg?8CNt z`&*Z{%7_IG<85UZ8n$!W$?~*sQxcq8;=M-F?Vt48Qn!5r9vJ3y9!q-U3pH|ESb2jc z0eQV6+s3{{fd#ksWU(jz&eVp7_DE_lkrmeS<8AUVnLvb@U^Y&Uync%nsVs;ly%o3V zH_(0E@HrHGU3!wzH>gTIcFTa~ra?R(W+T@wYi%mOP6$+gSdqxU=)xy|-(IzxVdAdN zw~S4*5o7f18F$zi5W=~(cxt{&!rnK~{Lr`FYvCL~G6wAoQg9?tDGW~)G z$3F?aR3pk|&s3;Zqylbv$6~B*W-WR@6l`&teEc`Zl~GEZUp6QU?2_3|2olh9pD=bB zQ;>w=02nL2PlV#h#V(}Uod6yhHJE?6iBMcU^8S zHD^<999Bhsnm6iu=K9WpyR7*?=T8MCH;xjoJn~}`5%kHZH{q@kx>20;jP0Y8Y^hNk zxaa2W8!gVk@~3k3fe$2H7q+CJX^WFq~3`=JB}$+XMNh&QqLA5_aJL95U1Vg-dE&G zPw%_Lja+a_5=?%=I;kl1Or7rfctq+E>pEZ!-lwnk_<6~?^-Jw=5bx8ZGPTVhWpXK%EQO5ifX6gA?mOq3HpnR8KRq2X zKeEaCkX21&j2Ea|8x|BD&ryUJoHA8K_+Q0yF#a62ob7P@v)u(zo4Vl|5+!FhmcN$I z=dGi=`6K$BQ7C(YQd?`sa4=^>+|@g@nGci++R36EG0@q3gCNlT8bMg%c1^SEK>|M;uZw?V%)}>63jvy( z%~(Zf+TAu)a>^!)01zKr_6IrO`AU%&`oh94mA$?R?>j?yID4}d#qMC%E{Y%%lg$g7 zz7QipvnU7DK?XanNS3|}FunpKFA{O=JR*J=QCTUp1gsaNH=*F}ielc0aGfaY^@Rw? zhGr3z695ro=LfNI>pk{e6qC2M0K>^uV@jGl0#NxVL5Wy_Vyq8<5DX`{8+mNr1{Y~u zRlZ=Z|Bh68;Z9A6ara#um{(A}>Ujs=!RRk!-1Ih?yCr~5VI)|1KU`%)G2La1O*}F3&W1a8VLY2vl_kDuH0domajp2zL=x5#gT!vUrCk(pG4zhN@)H zT2Ws6GV78oF^j}b6oF-tM0SIAqlSi-v8)52!+pgSV(-i%s|pJcRt7*PfYgT0_u~HT zvVvN>w@JTwqKt)rr zyLs2k+sYn)0Ag=g-V-0vxyO>c_rX)gZZxkkzJ||XoK)uBSFQhj+giDEt~ZATu*kwYHs!cjkERO^;}Db?S@)1N{wtq+~NIdlu=yfeM0dTW~;gxj`yN`D}9&nD*( zwkP)%>8-zr(`vgCrkPm`fRE$*RBRTXbM^m{=V>@KHK<08+KuE0&8-G>c`+Aw_ZQVU zL%16=vs~VuSmx&^i=^By)_JP3%KX*tZuQdRqwizL7&_@}h1{#5%G)VACYec$HBX!R zvO>-2%mx+g?#S^O(-+X(7}2U1&Rnj&Q97mM;;GvU^^qZ&tp{IBS;-(kU420{57|GNywHQ5?gnyDd$R7kxKu`_;CPJN-L{5-yr1JyZ$?3)U z28sOd;Oyi=QGiIUK+LofOmo8QN?+bfY_MT6^O0uo9tm(ynm%miB)j(t z(cJtUPEl%I*Wqct%z~_yxu%>GS=K2q%FN9Fz*GW!KsS`8uW&pGQK)RKk&NG)j7Q z6%!i@6%|B1s0>xla+i{apd=azHO@+dumTYzzRiaxBmie;ym;cg)mI(A?T`IGZg+y3 z0^?8a6kGS=7pVJFxYM3*5mxX}&-N5cd*V4(-MDsOR8<3%!MEubMrrv4$9aE7B!^FA{@Rg-=(^voyU`}DJePO&{uF2FVHAii zfcCu1l^;Kp`yjLk?_y(;n)jOioPm0t#6ewExWd|S6Is&9{``vh=oS64>PHgUrgHx5 zX6rZ8NY~pRXio#pIPE6es@?L!bbhI6!B2z`=JQ`Z8HK#d^{V3$U*dhpqJ1UH8#{i*JwF*&iXlt;l{6-MbHkY2=`A-?TVC zr9WzV^ovtZ`w6Q4qdm`4VHmTMi{>l;hPsvxdbvOLX%96MZJ%=f74|-n5~WmLz-+qMnzD2$t-J2wS>TulInQ@8;A`E~st#%+1#`^B~RfoGAe7#h?R6^>(Q4 zKT8oW0W%l&&o}9x?W8J#>=&)9MW|AgJHF-i7N5R$opez3cDl;36?N3`H>r_ehj0Tt zNp=vDj%c>rER29>W=OGvJ+4!C8bPi;VP}Vw0(c(yNMZi{CD*~?!Y|IHU@;WTHjXRW z^C!lUPNm{Vn99qu*x|vi$c?STG`^F^8;NBIlyV>=HBj$L-P#b?V<){?ceBk6r8E)S z7X1pGqXbiB89P?)0?5h)>Dp12B^D1zSiBwBZ+F8TMLrk%sHK)}rHTJrC{NCc zBBC$O2gWJ65CYmSlfERq3C&{10Md(!(S$&R>O^=c3^IMXs1_SNkp@whl#+sxU) zI6+oIFA3blN87hgzPpQwHaf};;OfUM0@%FQgXD{gbj{w;bsoRCeY1O`?9*F!7VVre z8<7K%7rgsvs@*_L&(MI!4ks~U2kzUylq=HO&CibGMPs%}`;qFg{d;;@zQfDJbl<4B z9b(LmBdnVQQO`kqdhMqKOIyU3$%0@AGPzy-1gE%Jc3;6m7{Z5<$0IPaai

>TzMD zi_`F5-p{S0$_Ey}AKoNcZoRZ}4>8qro8H!+1H4Ou-89ReaH`g_le{X8PD?VZM zo$S|op(HA?&%E;Ac$~W`PulPI5~CZVZW#Gc6In(`B&I(vAx~@dq4Mw3n40Aq$7VIz zj>@JY_i2hm`qSabk7eG0;S%ddtoFYL?+1!bK7B`7a5pqo;4I^JF zgyokwYJ6{9>Z!aCYTW2zp>7k%bEzt(y^41`bIiv?HGA4^o)o}M(A&iBI?#|xPIru- z30yn&Vt4lN5IH-02tBWK9F9G0y(C;-kQ^^bj>HJNu01LEH31QK(Z4~{CF&_@;I&10 zI8A3u^t2L9yD#K+jw&051i?PxW$vt1bYpt#y2y$LX(CYd6EcfkS9^B16oYVJ z4k471XNkZdFkj5dF0qIfj$|?Z9g@?HAmbf3-Nq2CCcS&;Ui(&DxJuVzi!G2CgONmU z1K7j0*OYY!90eH9$uYYoXcJJy$*JN*$gn*Y3)(}3M_jb|lgz5;(5PQQ znQ#+EW)<}u#RD*88eKFq1IDw%fr$1Q3^XQkXlSp;!H>8ILPW!gyNfvC$XV97v)SVf z(xTeR(4`~@K{(wA9Lz$x?IJ?@v$hL4rA)d>$5z{a4zsoYTWzL^DjGC6pNz3bOb?|u_pdu?1NZEpjg!D9I z7ao9yi?kv(PJIPhVTd04waC#8ugZUszXSVMV8x3L-Q?{>$8KM}=@>Nu#J0a<4vKkT z7|VK#4G~)~thyT&@uwXWG#!D0Out`1Gp&KwXwHxJH`HT^2aZGi7yFTMKTjk&EhitS zXnb2=njUKS=Bbb0aUysd^;s${`)ryyaLYb@$T4SrW_tfRqZ0DwtMPiK=&PPAFG!tX z5+UYdmY?NPL~Fk#KYw!H@77oJxjB8_f8_UXg32B`+W&i{Bd_got80!Cr+GtLgy~&v zx43v@($kqUQ6GQSb7F*z zjRrNvTYD+Ess`=vVtQ#H@qZ*m%^K~$K2(aI0<8Wxc=}-E9;BYNF1Wz~nTe9}C`Oc` zO`GUUdEvL&9eEoywk45_TfJhdKg4~NbAJiC!FZ>n`- zKgcP6q<^XQ^oE?U2nQsm4tKD8vI^S^ii)8)2v0WP*tqNj00t6EJ~}-N!==JmL6?6| zVc5$f9A(80U!KODs}HLW>8|ngY_cPOR5Cfm;E5p+d)~td1xa}QY~cw43XA{(AS-?l z_1n5n>~bhtHzHd#5F4Z(_&adi*9Yt|J%TGCq7Vt&8(T4`Zqj}%7DkL1^o7`AS3nRS z%*JdCwjDv(xHl2rzl7Z+ZExd>NQV$L`wIHae*R&Vy{4GOq{_5w*jlNmVZa$}UJ zzJeuUcMnfz>mZpToJ#Dcu}f0zUr`9i^q8Anm>|RlG#quxdWj(PgI40mpwkmLc^f-H zju};B@>Kk=@F-i!q}Gs*oqD#%l#HIM&T$fWPix2tz187elMYsJ0;WTRyTI(kt3GI@ z7HG`SP;|7)8WM7;OzepXkB$8@!j8c}5N<$3bQFqo5xyQBx6Zsn+CMwqS3HTq!jxbU zKuXy|MAL9TT~7k4w17HiPRIpA);&y_ky{R2yU-595G6?-0j;?Mu64W45xFON z7%_`RpySA=t^RxdA>aPdZH~MD3--1@UX)pwCN5;QhHSEg1q8@*uL>?eSyaL#jYsAB+G_U{Q|(l&~;L zkugm%m*F5peMJCq5o%Xi5!k;Zg@{FUOTjRRx}FuvqwA7|Cy~i15&eEpay(LY%@<2V zXX!aOI`}#6wu*F#=nc$(*R}$eu_u^>?c=RuBu4h{!Rjfu2?;@(bMi>! z@W|MyJby<=e>C6PBt&a0E2|E}tsZms_ulU1>M{PZJ?`}I;smKE0NIsKu3Mvm1?kKk z^q1(_KNNs7%#W2QKe+*PrkW@M%S5 zjw7*>{`>#Viozo+5@PreDkmgdQSS_SdwVggE?(O?a z4em7{3&;i=k0k$xDgh?R3F>M3q3`pm`mGLZZVDXRV=*v z!Jud`E!_G3(DOOf&AS29#p`Vqh`O2&)Y{DDNo-BHFV}UMxnX+jYZmmf97=Oz)&oel z;hOWS8(=$k&A`tg$dNCukDt-H{>QZ`{%+Oy{gc|B1-7d`uHiBzM|QW8m)Eam3V(Sn zH)7M6b>VfcK_6go-s&vjr}+e3tU{(uXo@)c#3GIt#Q(rnKU~dc&G=gI?5>D)g~;J! z!G7i$`UrA$olryN3O8u5q9CyTmPS;9rJ4Q8lPPAH>XUh?@gMXwN->NT7_Hf?H%50@ za}8%ch_FncdgD{1Q>v0Ab7|5_{vhy*tL&(Lf!euRk58glw93r;f-zAvUUfZUcJ{a2 z7pY<~iX+vaPRic)eg_hG$BCNpzIfbyF4$E|oSRF!;(;;_w{e2o2FG5U`p1zs@U44} zM1%+%1w8fp9bSaToS(pP7o@#m@;)rCB-~C?3QImB&!*N5pU=QsDel%GZ1;BF2$>wS ziBrr;FJtgSV~kOxED;wwzwQ@yjyY%q~Fu3mjhY^1?=2%T?_%d>MsRfQi`nUSCNA#|&Q0VWVx~e+X zuN#>vo`X9kqKh%275t8NIc@fxT|P9enEm$vS`dxAOd%> z*g6a%k|_Te$^%2kIQ@={I=iSt9-nz_lF0X;VdP~fl++Ra1#vqAQnbe4bT%P zd6IQf5%7Ju=)O?lX-2(xmqPlmExY*sp%^0R8wLzX*LTi|Qn*K~-?P;oB7z=qI_7 z7sx;J3MF$HVuxQ^?loI}amFxSYti$wA{9p)>+E{hE`0d-=Vw=Je|T5&GwjXRK&I`A z7O&V!7gb5L{aBWc`W=oBEU52JUiB#lqkAJ{L3h=gZ&u_d``!=pJQ2KARw-YYj)v^2x3@>@ zc~xx+ACC^n<~PsE+m@r(F6N!nbW`50i}KgLGE~>+H;MJC*g2?r6q*`{cuO_#40eWi zJ2M21^p{)qeal6x@uurP?%R5*cyp@gEgRLxR&(mY1>-9Z%)cipF32Y9tOO@NRQNST zTXws$SVXn-o1VLOHBWs*ll9W8v97{fi{P~c(Iwej+MR&`& z$KZSCFCPV++B&4rATLDb^7i?aDEkPE`L($L1JY?=zs^Zv6L_jw4SeeIqd=cb*ZpVwWAOQ++TTO+i_+pL` z>KFanz9iBxSt-xNWCp#I!vlk;2gH%jLK$+3i3s*$L|&yIsHNLChrCTTYUb*jsq>|i z3?JNsiLf{g<578#s2QBE0D=S^AV4sc6%`YyFgSD>yW%)J@RiuQ*o8Ut-6icGlMjLi zmn8RE_b5#=kr5@NteCj<0Uu20pr6SMWK0BKhC4fePGH!3CVa6lLgex$VPqh7DTj1% zM#7*;XvnR#v(x^)*oa|l7iGM_`HeiRx_#UwfjAsPCg2_8NUN!PE7;viwb&u2sPmKK z!?4PgT^MQXIP@nB7a4PLJ`2Mr@*0 zdnf#(hE(FhBTp{^PuW`42QLqv0Ud?+4)?ixX(NCP(jp!bXi62_Z7!l3VCxEeW&4%T zse|cR_MmXmZcGKZ#RQQTH9^=A#c`l9p%GIfJ#?a1qySSMOd(}}%^gz)8Et+i zw%(FW zU`77=^B1eP2Zf-oSUe$Xtmr=QR$Fy-k zL>f9b7?>jD{uNNEF&DbY2&Oml>TL{#vR%y`_<3B^DZ+2v+)n+hvREk8+_I}Up-fF( zhsHbX3fvUtmgCf=p4XJ5@P-9;ge<7yk_e2I3A)RlWZj?K)m0VUz+GTmnU|P0qB6P8 zI^PQeKCQzt$2YRjEO{YQ#B)+m&lB^F;&hG-0m0EtS1mKhqHA~4_-ziYKJN+w z?mi6g%E>bG^w)$tJ(v0`TQ+v)t*n3J%0!yPD-s)u5cxYa>S7BS8*@pfz^d3@&|U(B zFKBZ>)gROKRTYW?w=Z}3j>QgN67ZEE5-znew$H8-F@guhot<4o#*qmxexIJ=kcUK& zlnI8=p}rZba-!nkh&>?^TOC(U#Jk{_)V+!c1Vwu6A@;)u)hE)nMJMEmlv-N_F(Nu9 zZfqoLa{Kt0tUpGMiJO9|y7a55J65)P z+aWIE_rvuBl2sT21u1Had55S(R02c##}>P`XY;_Em_s8MyRPj#5!p&fbdTCs6dVS^ zf}(sDThP%vlmrL_mPEI+svH!hKhMr8f+H1z1Q67}+xK|V!9=2}#}gEL_zuzVB@xlS zJh4Pnb_91RpWmYu>2Zt9>u>1sprVkS)4rICzZ4^~+9gT-;_oaT4@W|O?sd20_6vJp zVA(r~^S^g@nBAD1Hujj*b9|V-=sx`}j76ea7|h<}es+v0fAxTBYTM?z=^HgU>%R3g zN(P#q=+{t7g(ocovtJ!m&YMw7^fTh4(0?-wW=-Ri_l6{PMgAaR8rGe``(^ zeQNO;{0ZVw=iJov(i>7sB?UJ*JNDN3$e{%F>d^oEN6)Zo?0};xO|-<5!_&Y6o~-sh zo0@JI*ljPYX;S26_71E@T6_dLyPI1N7^y1Mfz z;7wc3kH%+F-Ff#iL);UE8CJ!fC53&Hip&GQed0%t=7m(%JC`sBna(B@cUDh_{MW7D zS~Dly6QoNJe`YlPpYe*FYMzp{>%A`>g3JdgdBvS3wzW4Wzo}uCXqVm^b(Y=BHxelv z{#dRvPh>xvpMTTmDyH;wm*OZ()X)5=tkq;%#kt%j7|`MFF#q0w+JHA?#@JH+NIf}BqOYpuNsz5)LFU;?ScEG<{k&*^rEwQSI5t#R>;WcpWA*hDL$TVR(QhjMIVwC44z$C63@dz3dW zHo7rxJ8(3q)zJ@gvPjuJSeVL5(>}#CNqK`Ds9sUAf3USN8dfP15Z51fdd8Io?WNEJ zOxeDiztjwg1eqq2RlA`);N3kUf((GS+jo=OLBxnSm6jPeG2-&?7H*@D|6!LAN~CpW z21~JqZDY1iKE}k6&q>(fETjUb`sK!6YisM}2|kWwa*2sM+nOa(MERcjz^?5+OHeMQ!>H*ofeL_}jd%&AZx5vMey=>|w}Nc3j8v05(MtD|c=Tyo^m7FOt@6U8$gCR` z_TN{*Nq)QkCVtp_JTb(D7PPrJb^DnvuZK{r8`_mB$ZIU@bk^-#`yf6_f6Pt$YD&0H z74`|s?_34XB!YL}Da7dWZzusR@q*u8xfnGCKK?=@{V>lDf|Ie zfQ?XaLt)*?>WiOr{435AKhVEr#hv!p@lPCk!FbB7Tr(-?4tL!bE@8qAgYG|`iJobn z>m_^igikmJKMK-lcvd@dZn-P-Mx;nGuK=ySuhZN`BKigUh^<-u_sHAtzEZbm`$-otM`+#gZ)cqx?}qS|DjxlP`?5=} z?S#sk{c+MnnK!LPsN)rt7^>&&A~aM7l2N{#sL0Y!yZ?=|l+#wJ8hlP^c+wq`g4PB+ zw1r$B2(G*mkEdRGM8)7$JE6jQ{hTk1ZBgFZqvj(r=`BPp*DxBBtj)w z|1h|yhmt}+ZU_^;WV&;m5T0Tm^ppZYepV z1!Z~}wi{@D(DW8}iir<+*4XvEnTe&(ad)kmFjU5P_c;`3XWJA8vL*@TWn&|jS1oJU7vy@ zLu?k{qvr$y4UJZbSdb)-pJI+y2DFS7r((Y%b>p5N*q=vRFi9@i_v;9>S8}vW`^Hs8 zxGzRCgC=@#vxxSY^ZR4P>`{xZjwWREw^2Fq7b{2MfEidq=fp?@7~9po;oFS7fxzSM zd@?zdi@P|&>Gc3Xb#_5Nr?x@<*42J)W#%@ggk#G`!?6aliWSp{_{n4=Gt*N7XqfZO zn%BC4n8|Dv{=K2uh*uU`{wZ6YrZ%{?W23K7tLQ27b|3Y~egFVZm)xCiNkvQj=qusY zA>lSvk!FU_A9_Z`S_?WiMx4rz+-gHpD&L>ZCBpOO_nN`Uin=YrHXp67J4Y(IFwe=% zFj5P$TeVnLTTR!@J=dxd8gF)e-Ed#{HH|(OQ^S-}P_w+hrOujuNTb+GI{gMqcA>j1 zNN(Y)o?k4jKh9gBd;t%MyT?7Jyv)8@3M!aZ(avy)+UiPB@CID;?zWadi$8&}ghrxW89e-oX zCmpl&zQp~**sCPF7&+k2!N@3?-IBH!plh?mE>en(gJK+O-d{s`ZgKCFRzCKcHiejF~;L z-oyV49+VRK$8Q^~?c_HEt7LP-OOouqpDd^TC$%%#Ce)Q;9C`op}q0662= z+Bd65KW>cp@p#n#$oG55jo{$n(U#bq62ZRLp|Oj9DkC;zKaNI+ zNvsIZ{pmw=6}2VveiwU8Bt6}Ht&kws(zlTvajBpem9h56q@ekuR1uqk+snE9KPs0R zSJ)E4Bel}JZ@)k34Ha$I`rFUgFTAqN)mr$m@`jxJ+VXAYU}aaIj>b2lY|l&88I(hx zpJ|_n%F(mzbX4$jgIbr&yvqOe&*n0c&iV~U9Q;`y?Ty&NwYhd+KU&Dy8&;3D=@~a) zr+L5dV4>fj2ILNW0a4j(x;t8{m#0}KXf^^r0B-Cyb#ko4Iyn! zQ;SCl`c!s5q-(+K1!YDyUDx6(R)dwv|8O&wQ*q~KmFVqMn)bgPzN0EnHIk%3IpA8e zr&r&+869xFVhk<>2sj8>yOpJuArXj|R3Nll^9C&L{Pf}kstjy_`>JJ^cn-R^1WFhg zj;<;u1D@c6B$QGVSw;&V6j~W_>x3&m`HXr!%SK1X#buLsSeUFOA&)<=hPgGlHQL=s zNMlRqFz4lDzb1K?As`0i}!EYqo|JJ{)R1-7sV*`cLaC?q%ITkdA>BjAh zL+9#X)b{NWVHFcC;5$zn?%N}BNui^uj9s?r_q~VnCI~Ub0aK*cr7Rs?SblBabznb6&x2xeu1~~UF zThM9vgV2-!Vb^7x*CpT00Ts%vDn!Z4)^jBLVoG$`T9O;7Hc)A7ni}RA);2HP1wD-; z7-QEg^E`RipupJZi1Uq&Efv;VqM~W0+mZ9WZ_0FdbZlMPzMr+!+o=<*CYb_u&e+IK zeZ^4g_7~3&^Nq!WRq85Av4i+s#l*o3g7T|J+UWb<6Eg(dv^BUU_}a_{L(w?mEN}qVblcm=L#L4PT#K& z{e8+XGz5(##(`&F!;aWi9<}k0?nLsCc6V*PkDGbz-?C&pM@_%19<<8D``YYx3#5VH z&0TyC`@SYSgl+N1z+Wl=OuRp7fz0o$@1BQiBbuIY?Zz*Dh6?a|+lo(26x;vJq#l}< z3>&(A+jOypSLF&Q{>qnbDk=>Krzq`z0StG(-(eYi^UgoVjb?S3gLG8Z}q;lg?ReWxV_aQ zy!5kO-8EP&ESX8tuWoK-JNrg`X1mYwNxo{0rT%2RL*$M?`q4LyXZQaQY6ds2FTHsj zrq1WPEg-U_^Licvk8Ew?4Gl2}1YWHYI6jPy`pfFFH@~?4>g4x3{+R%b%aJ9XApl%t;Y(UIQs61Ifvr-kz&1at+EPm_UQ)itVc+CZIN)RjX znlg&$AB>5LW38SACSNga6Ke}#*XN+E7NWD5rmP+3l%hc45=4T2x7}giOK(Q$89c}# z_AGpue1h0}>wjP7iL7iLRHM4X_r|Ou>stW$@lz&B^L?|Qn+;NWLg+So(IyX=D#=St zV+=8SimY^W#A1hHqi(K-r!lrIcgQ%ZsTnTUdS!sSq9>rd;G_ZHh~>adXA3G9cIWA2 zYO{DZGt;Y&j8<|rs)u4uxy=D}W@6Fgj{DonGa@fF>1c{J^R7(eQ{}2zBU?itEn=(3 z7`-n%+Un$g9s7zKCNhcy&??D+NSqR0sH&EyC{h2ns`z4l>U0|} zj_0^jwlKOHEI8i5eXD#^D7R%h8$1pc*yOp<<@U7{#Q?;oHx`66Gdl~KiE7XpYngI6 z7f#tOD2@%dy>9lieZLVr27mxK*&8OUTrmv|j6%+Ul)2|2LVvn8#AxW=nj3K~ut5Y} zT3++87i=6rzbMs#yP~cpnJcy0($lrL*>;-)#2H_tQB#$4W0h-a!3Hhbx0oVq1Mg8; z2TCME7{NkU)*DQX7*jYj<0k|JJq3APic*il%hbz3IS^njXfwX=pfAATuKx&^8RJ7I z)4D%z^hBQ9^M7FPhp-SIj=B4`X^X?Yl(6f|x7XbkN3Vu7$nYSZN z6=gveC6MvL>}hg^jGq)H>MU%8pAXNuUD#Rr%kS(uuglCq-<@%b;iuuh=GEi*{JInU z!2Hkp1bl?9$)4I12#4Mojb7#qscO&B5bWdXM1eb%ePUSo25OJs@|>oOszo4;oNU?} zeYI-%aLa&rXw>q4U#0-+u8Ebfp;H(Tw`eO^cV z&WnD&IbmJtmoO5;>R6BxF&S>rrU(>_-&jNNU+gYvdECR$+kSafKB0|$&|MQV{K+sp z-t7Zn-*88fu^mBVBYu=UQs03x#xD$T_`tKTgu$j}XpdAEZ({r{l#P#+!4joSTs#N! zAGpn$r`d#FrU@-9?A37UJBj*Uw^G2YyxnuHDT_)&i4}Gh+p_z1o-Zhf{EQeS*RK%n zdJjkZ_g}}=+|K8;Ivsy*QROJckS7D$#Uc~D!(92x-dL8(wo%&$oy=zZHLjN@Z*f#pW!savyCFWDTKHbrZC`a{{Sveu|FTD%1 zY?>=#Lgh0EwjZmM&9!wzwH-)4*^t-G&_?=R6}mVjfwn_Qs~bTM2e{d~ICAT-kC9ph z%mkm>Ewz{UdA9BlvCSZb5|z@aa1=-gRt8S&2lw0gzXhSU8$>6n>g({SQ>UT59Oi~w zII{9IEgQ$0++%&f)B2jt{Hwu5sktu82Zh(n+47!?3E$HMz7_OzYvF}ZBvAdk21iJxvkdB5*%1t*u?o@8Mcd1=83DJ%@0HZSCF=TZA(38R<<7!_CI6ZT0ZcBL;-BJ!@iKIdne^3L6d)9=h)kdII??*ofCIG%Q zWz)`)k@1wwou4?ndKtTpW6xrro=;yfMI=U*clFY?~oABxlmYn|B}qikNh|KLj9a?cy~Y*19TpXWo10VhurhI&$C-JwOBr( z5xyR*H~aNsI}GGgU9uQL*Z*zqeKYCG@sK4!a-!Tdp#fSE-*7*J@pDQ!U;mxw^Rj1; zBbD#%ta>MGMtA;&5K$`QQ+eFKbDGJ0hI{DMcfZ>EBpgaj_f%E`8>39S9DC_u<|m<( zl_a0xG4r1~EB^^lkEs2~QM703w@`B13o=XmFH8_KrvC=i zL%yqke>C`tlv95adQdTj7vJ1BBubV&SxG#gq6$sgJu-Sl+$9jftu!EP-7+92GId&NrqNjL6g$gz&L2FKIkw8g3WF7uO$IS4!x!?KJp zTfLz4GY){3pr!8r0Lef$zam0yiF1|L%Q_zqV+z2G7`EF@MdrhCh!g{Cg_p}!S?L)M zhlhXr>%Xqc|9e@ADs7u0P|f9V9Md?oq^URzYc4`5jq&5UO*{=cX^=F;ZOOti97YO? z2t;t0=W!e#`81~V+pk~V^fis)&o3W=R0#4`a#>Yn$kUio98wH{0!{O9yXGx#wPplV zDa%?ZXwF5egds>PIIOB%Z&yZykmKtP&MmTj)zH91H}*%hXp{^+qz6+8k0hmn&#mU zVz_*~-SYMA6^^IJm~aSjgRtDT>5!hE9&#xuB^9}TT!zC)0b|75M=rUZ&Nv+>;TkHI zEi*G{5aBpbNI|MrsUo7FxojgLC<~V&ii$CYQfrEFNI{e2sL>&}w%LXg~@cRCt3d0z4-nMlS1^{5Jfnp-80ZXkVm%^p8Zp%g~2+MNY zs&K8Msv$%(l&BO^Kq|Q~i`^MAtSJh5n8wpIJ{%8!{pE*Gr^6T|s0wGp*Yhgx*y{s((KHe(H%N;Pk@yM*>j$4Tu^vfvC-|1PtNcPCds zolNUF8*p#?x?1a40sQ;TjCY&l>bjYaj*k?KqjjU%@r62G+*M8|QQ$iT-NIgaw`_ae zGJyS_X`g)uf1MiXP_~cxdvJ?;)M1}>hcvi|4~@0W`~FCeMm$see^_cxMY7kM20(J3FZW&~Ai+FV#K!+jmf1R9s{mqEu;- zYzMvy;2BfWxzN^3&v+$hFvh(J0IO+blo|PR6z>c9Lf=D|xn@AH-}uAU<=$uy{C;OT z6m|Bl#|JwI2i(zp|FF>*c9Gbx(g0{n0u_(yf%Bg2%P0i!lihE;_97-=)SzumOY^d} z1c;!mF&U`XH!>?2SEbrwm)>crHxBQD+sKmEMWB`TZr}%P{hROIWYnCe1`sR()_^RL zmutZu>!b#Sfv_KGRnLJ4} zQqUT3LHBSxQB1qfJEr0T=W}6?SQoUAv=P!XZGrY z9j}^xv2RhuWkD}n8Gkb0>XfmMXG95 z&O8i=6v7aIqKcN1lO_r@5N#|uZ|ic2F+4sT|NDRYum7L_@$c{N?0?;s|fU$60wrv;!*HUs0A%F^N<+aqxL^Mo;LM>IsX{cM> zGNv35U{PF_I88$-yscZ#+mNQm=LZm+&l6lA9n#y^uea-Mnp0WJYk4cDdN|ep@$XB` z^>jSMILz}1gu;>#7nRp<*YoidW60NxK`z_%_;?&8E!WGYTP|fPB@NTIl!u4YFsJRd zjZ?gQta&Rj;kGehmC7}9Kq>%Ib-S*WE2%Q;I3^L{iWCq)F(?E~7)ma+>Nrf-k84OV z1&u>!zY38m7b!9whg!DfvicbkPz({vR+zP9MF38LSt$|)T$Ys-hiSNcT)1*fp_Z)7 zLkb!|i4d_A77!NR)~$jR;q{gQwQic%T!EE{2!XKGWsDO62C2YMrLwZ7A-0-zfj~j$ zVLDFp)5G!SPcP38r$mqfqyU`rJSEg38;mIsp=MM>j37n9FJdbWTU94Q%s}}YJ!|TY5<=tt$xfv81Xv;_@qMePq}7mVg=89ZQIXbQ zspT7CUpWO(6yL38iiKMPfF>dLGTE-t*oULV0!;_pjopnoG!C-O?5^8x2M*Bt8Bl{S z*rmvpiwuITcuqeFVHm34qXJlDmNUd)h;Y=pONR0uKy|ciC!&zh7Ab@eA7` z=I4l`0d(8jWzVs+QGg=q1v|76r%kAuf3!=9f6u?}gy*SFU$-+T*+ZLTZ1Ts%t4|_pMRx=rkLD zOJDFq=(6E%jY8i$8+QM^C6BhYvj&>1Tkh^}IQneht``AsZmT;+;EU|Omv6ld0t`9r zBX==0ug;60QrZG`OQb^-KX-s1hAk|ioqAF^ND5!be z>QGV~K0klDTyD3If8UlHs0`y!Hm)_K5Cj5q7KLFPZr9r|4Tr;AYGxj`+ie`DI1D*w zc;h&Z6k;4H5Y`RHn11~6^Egen^>&y~-@bnP{kJbmUM|-qB5YN2UUFVSjH*1(Ggpl% zlq%QDhN2*{<$@8AfJIa|*IGC?O>iqUMg)P9TcnIi!Q1sRj3d{)Fhj4v= z-PVdnRfahoRH4-R`u6@qOaz2c55tsKCJd)j*p}tvwuOMKHPUcfZijh{f;n$#2r0z3 z*DFOD!eFr=fzlelGq6Yq7zwv!iE*f90}(}u5D_46Rap^nU8L3upu$yDVkAPzm4$_n z6omq@R8`rQx~&C?LZDLCS~4M)oNt%gkW!2>1j>b3q?VjZEzG49Ae34uXkl4zSwS$6 zvbHR5sf-9|NY>S`Wf?`lh*Y5!avWnkjKdt$%frLd=`aR3Pbmb}y2eC7xh#cR`R>BN z6^yQcuKcz6-L4tP>wrP`R`;OReyY*ESm&ww@os;ZCbyUREjipB^&Xn=G!}<4mQB%( z_iiM`10ptRR|q`~1YLpKK-AUhodef&R==p#p=;Ml?(23|$s%x$2>sc(E4W_W0j)t!S9a=)XREodp^vJ^OmtVQ>PoO{dE8O+-4h++8Mq3*%y^Lj21O2q^%@K6W&&vh#zz_il%!U?>I=`ZZ>-ok8657M)na z9TxN}D(L9VPpMi9R6}ide6l)TxdUx&E6JGXmWT)rYfMH6+TAtsyA3;xgFDk|hzl%c zcV8|DEL-2t4w&JGdqm*{y?p%~kSN>7tW^QJd(^IBAQ}X>g-}H#r$z4y9XCh{sGSV7 zuL^d7sG|y7*{%uzx+`|)l=bt7RyD^K+dWZ*eFaoeLu>zqE*iV1qM)tcpLLII{*Bk8 z*_E%6wNCi;V}M;TS$EI<96HO_G(sc2+FdOhKlP%K?u)elq`O+}2wK*k?l+@#7XZB1 zh|Z20z6J28{@t1J=xq-UXzw`D%dzAF{pivMM;P#2mr*O z04%f#)`}QIn&!gt=a;X`vZ`oab3&v*$Py4yg-fl#0Llz81^|$)lp>SDaU4?`Fh)jD zR$*P&6$C=WZP{+|8izQIhrj;(bIsfT{LlZ`wwo}=F-07g5pg@DQ!l9p1X%IQ2WmlY5>>qa?^aXwCn zNI*m+p_a_35E9i|Dl>*qIakI(bX@|#Hcs<6rCL}Ri2wtwtE5E2ysafhT5Cnb5W+l9 zfLNJDdAqF4wboKMPGek`LPQjZwU$}~5rb%LN?W!qi)twa0|LTw+mt{-xl{peITaKj z#*r&;r4U6`6k!CYxio7Mp65Sb`esr7Jb_@mxUZ=Ag z7TmE`qxZo~$z4}yXQn#qg*`%|KA%8%JjjY6}Qs; z&1RPcEt(_%)Tl%vguQWY$0y&TtTs-$8>a(1|8{AsEnb@yOe6@b^LsO<*y_VcSMGg* zJKPmpe=C8At#1;D*R_`WM)l4>^nZw|UAP4#xMK~S>uR%OYqOy(gzEIH)ottUNVB1~ zt2@2Z!4^0jV{DAJG%rIDPQv-_VXKf}B7$}}y+lc$#14mvQ2bUQ*!n=r^67NC5volB z$b}Q{;$=PvXJXx&Y9Fmdt-43mr?~q>e!R>W+wE>U(mS7QXA|!Dn0+9gi|Z%N0s9^F z+Iynq0Vw$JnufP0qxxWAQ`|o*!QfrLGsvPn_LqaTw5dha^ z1BBzltXhQ`Kw^plfTSV7x-4AFd^!+n$vn(7AEr+)Pq%H=N+}M9om%SGd0*D0#*mS))N+{P?RwpAWtxXL2SR05VZB@yWS!1aiFM0s znhuTJz20spf|0m#Aj(;r+qbPH1SAZ~83T>O$efp30Rfuq~NILLe3<)NRc% zq(}%9V~n+G1NyaAK+RQ{nJ{wYylx>94Y1v|Qe{N|fUWShZi*U0WMNeihIPw|N<=lw za2UsF0HAeSBSb{1l|=-U5di}s3$Rdt<6%4<<|z$7eR_F$I?qEqjtPWG`7kGxT((;C zR`X5smCB}^3s+O{a<|iWhis*tC2L9N9Ow+1no7QAqY)@PJ#hzYV&pisdGK zFGc=>Audn3cBaLRnFsEmSEaFL^=@zhku1oL0@8Bf$Os`tv^#<1z#g%W2 z@M!lx1lnCN+)+mRBEpvKZYs=szpBF|?3KRlkU87qJ17YU{y)CLylDcR6`FTg{(BQ`n zv`P8B#xR!5Rjb}*oS^T+9m{LYwh?+CFiZM$e@0!NZERNK2Tg%N3ul0%b2P3R{8E+e zn)GNk5)!Zq0LB2?+DJAp4090y#UX@g9HtY+;r(*0wSveHs0fls2pC%Lf&RZOovefZx=Z}C@3tq+Du^CDiVa0#wq0@fBEY# z(>xyDua8fs|M@@vmUAx8{Eh#-ty>yG;j-r2X+E}m)|z=38$BeMB__;SwzuOA9quA&O8C9h>Z z4pR92y{0imsJD6g@t4m?@{fP~LGtnZ`9&0Nms=W#G2->(CPj$!`0{W*9t)SZ_v`h# z5#W|(7!(onmWL_im2dCs`Q=!1zTNWq;ke$GQh1z)ysliiQEQns42cK;y^&P{niAJ4 zG3G5ZYl;a$GXkPgusaPDa9eZD6#>`00N^+wAh1Y?M1&NH0&UA0Q&7_D+f7)8L)@0a zm4|6qZyUux5s0u61yGWI5W!;Z%J&R;5}JYeG$4+>pI9>Y%zm$4Nr7f+wBdZ zuzIYtL#DRSRu2TfFG>#v)S%xv1yCjHY1B%D+OGNfMexvAFLlv2+P9(qo!kPt2{g(I zx<|rJDmVkR+bFo_^mZZlO~uCHL&tVaFkPZ-a!ulU&jUn9bat||0UE;Q3|ZK)`#_== zPj@8Q(zXPltqa_BDY2u!xAxle0Qx$2{QypSLo5BzE1}&%1&0{lK>;v{uobtn-QBH@ z03CBE?wv5SJ9_;)&|9FXZ4Y3}v26?H1?6nCc9fwL`FCeXeV|>ev`?h7)V>^sjU4Eh zs&s|Xg3)k~_Is0N-v~d&EjR!>krx{KW>W?9$D<_*AhZ^=?c#QWn`(=#--#OZA3IU4 zVuC7p;bqtz%9gCRUruea@L}!OHPrYy1h5j#(wWFE+0Cg!6me(H?z;pLRe@l2rwK8I z0feL$T#5h_jfYY((ZD$2Fwe)sWxX+%7$6XW5*ES201Am%IT58m02P$d5Gf`_98**b ztwK({Egw~e`H-dw+S&q%);JKMlyxNlnDpu4;a~sqZ_9Q2_U(1OX&i=WB2cJnC8CN| z1=1LahUKyjLyQzY-rj1S2}8-X)`xK#fr8YG7`JuJ+cr*zhw~{QBI2i?K4XZF@PL?p z{o6lk<{^c@|Lq?F%!*vfO)oK|VH}oaVI>MAsx{XTjicpS7!cZxv$onISr`#X7!XA; zPyr1^Hn|B3-mW#*ZJ5((emH+RABOR8IL34Yq4#eeC9@WO{qlY|O{C>|y-dTntjl&Q z1Sx39wH7ET4T)iSTX;^Oke7ORm>@tE9jEyj!?$l=foOafnRJZf<$V!WBnS!Dtwf?o z;c~f7ArVsEN-d12*XxZagg_!i1i5m{QbyLg)i6bAy+IWfsuGrUQ_&bPhM-zS2o(uK zDJunHDYr|(5LA_GjYC9+S{Ma|g|mb}gu2}_R|U{@-PBntBvg=Es{oWzo6JhqIK&uJ zOi@_2ydrBHV$NA>(N;W35mizOLx_iAJRc4}zC3?=JRin*921w4P$EHHuDo7#yVdO~ z>y0Xd7EnPIY)R*>nz)rNHS`3Aul)47c0zM0(KL=B1T)n4)S5+>x@&i*Ih%$c=rI)l zunHsg3U&peemB2?I)GN%{d)vs!3>Lmsz=GNYp{kCordq`4bb0hpcC5rO;PtBhn_v- zts7$Knzmcf9=p7o?!DvfZ@kyY9zbywe)ovIwwj{V{nvZ0p=(k5C;FJQPix0=a32j( z=;{&r1pA=z&hT`5Z$MeR7H=;XZvx}%sz?^u?B8X)%Rj%;CCf8@%((3{1n=V6%$?k! zL~z4yiJD%f1{B07Oa0F3qOX|8@H+ZavrWccfbVACNRd|H1Y=(h-@zso*1Qt|EHBRX ztQpV7fw_EmJh#=pGq*_qO|Z!AuY*S4{<~ebk{03+(h)>Y#&3e`T{s*vHk0TDgu&$8 zuml)gWocv$eA?O>P__jF(B26G>;&xs-K$qy12E6J+c|t2iHW~DD&Hw|cjLMcIH%ao zge_FZqOd11==VYl01cr6fmgFbZEl$X@f~#-9(1uIR8SSN%creUg_44()t2)hs7s8= zg0IGR?m}ybivV7~%~sF^MG?@O`89X4PXJt_nLxC|Nq$E~t#P|WevPHHWk&0C2R--9 zLbL9EAX2+x>`nw2c2=+C(GP(Um1GGG)>ap>I|bh3s@LNNYUOgU`Fw&6KuCKywURW1 zLABAmC?c&^pW3>1#*`FD3Amz2zyMpWpc1D+0s$dJt%4!W$Mccms4&ntty#7;1E?~i zVhV&nsuh|jSK$_T1;Bt6ibjsZfQ%SaAOO@bAgYvY1<>(upp+1CT{Yh-A%jASwB9%l z!_&k0U;pJ_3g^rFrd)+%8Y5v4VGPK^ysE-7j>B>(0C+kdJ}w_a95BS&?HW=jrPi&E z^GK0OX5x~WpI%PutrEq@$H(`#j~{+`1`#4U<_QCSd;RwI|M=~8Sm)!E5n~F6(4yWTGgeVoR%Lil~hID#7GIJ^Aa?OPDUM@fU_=y4_ zgk{OoF{*Ics<3ECtt`)7>N(>0wvH>C`0-zXyBdhW_rR7%Fr4Zq8 z2(@BeiXzlpxQc)RA&YY5cAP^Bai9=Gh>?X$E+rQVG3Q!yX@p0J30cON=4qIx@#*p5 z$CsBMp3l=5rzr%KfIziw*Y|q+z;)4jk+LC+N(B`XAy83qg`s9Q0aO4jN4Y~4*Yf~( ztem^V&Jpwg$j-RATX>%hN8SGU8E-bLg>>#f0JtgyBNq0!0+==IeMrIStd0d-3%8pG zUB~p!sx}id_L4(9j+nndE?rn&o(eXq`fT!HSMYZt-gjgI`!~2+EswgckB}| zTE3&I9n||asO_980jdRyI)l*GTLHnE?Cf9~tUiX-Nya-YdItnS)holMqF7yHEy(jDXt{n&JX_}xt9^1%XgP|Ql zw;TmmL%y7*DP8_t5u0i=xy1XK^+j;h+jAOv)EgY5JDI#ob{ipCR-fK44ce_3Z~*6u z2ir*?`-Xiz8?CO4t>s&j#+J<75Sfu+V;tF~K}fJmw&07!9VE>(tUq;brp^2#9% zaTrgh6KhbUI3DNIX`Bzr^ylxteY{`R>l#=xmoY^URS^_rQB({Ap|FC+G$C+F}n*Xo$-)k*7BScCGkYX)` z3k8Z9vx)>ts&KvDj>qG=Zij~;s-iVfy1rjo3XMJ&W^5gVYC*z4 z0YsQ31Oj9Q2@9#}uYV6e{qj=`F%SeuFOQ$rvXpteyj_5~ux!^AkU>DDo=#6MFE4Ql z!w|Q1xqU33etLlbz>q>J*Bs|mc)e`f`E;13L)mJsHBm?@&GVGY#*#uzA?mhmxd;%{ zDz&nz)+(rwQex&>83ea2M~VPZRYas#UDq|n1cF7iR*5mRwxnECgj>EU$x`NvOBkEbcp7%6~CUa@ZZ_OV^wWVvBok+%qfAObAhgxDdP zkrWnzX*L1fxMFwy+|NeWxMqVvJ09tJUw4(}Kh6;Bx~+2tZY-MWcWYtlS{+2Hc@5b7 zSmE6ax;?O0nld+puzw+fMQZTQ8mfg%KoQ6?w(qo_-LeFS1NJp4MrGV_h&~5Xw%>7d zruFVsBbx4Y*s)_SvrP(4c$g(`7SQR5yVbV`xv}cc0uPIUx?yvk0-fbDgJzG~88TD) z9`NcNnA+>=nKMR|?CmaT*Z}m7 zEC2vi0ul>w_ONgZbQ@deY(-zy9h0dg;Hd{yK^uDlpn$Q-e;=s>j(huN0kZBZbdm;v zRK0bX+sivxgP`jCSj#Q~#C|vN9(%U1y*ib(UxVm}YDYsBH&K8c(5XqTC31C1h?4p#^Vpelzj~K(;X_wYG75IzMcXYz z1W$|w1-nJtUa@m}e=aSk+xw{+P_&XT{rL`O{bVB$kY`XTbg)R;zM&JwZE^h8XItI< zujmEeS{8+fSeNAPdGzz(N`(;b|7p9R-|b@uxx0et*jT|m3VV)fJ5gZ#Y8%lV9SG1R zrz>c3wwPcK?C)FDyf^3Iz(Kk0s?kCk3IHkwGzH9IL~6AXgP06bYg;Z#f~bTn!WA$C z3{*s6nq}kHrHs>vVWuHmvP=|}(&2bUOsXMGM@$3DN(n;@Dx7nfP|LcFF-|d%*I5Xy zkvK)J5@HMllmbm7U}}Ze>uvQ)U&qVFZA~d90kL!xX1bYptb<^5-93zI^>N#kA#;w+cZpC~2rUHw&h!fm8@9Gob)z zEux|MGEE{B5SUpjN^pGzs3OZ!TOP@ufBZ=?oX^L+ZO@NS^KqVFs&&iDwp>dK&)%*} zOv4{vzb=^%$FcH=ijfki;j%4d zxybDU$_>kcbs{mzZ9V9r@Bh+@!&Yn7vzw^D$8@n9cv7&eKg?79R z-QgXe+fS}K(6zSrcPQ5?@#4;@ z=@5s3C?JAx-SU#lMuNxyK~qAN8Yra@h_qG)mRcbM2q{t^Wh}haZ4;%+fJ6;}V2Hys z&Qr=wK7TpqS!ce)xI0EWfUQ)SA~T=9tH63^BE6_Lesa zBOrzp)+G~!IK;ee+bw%3lVLs{ZrAO)C}LcuU?W16RhleH%Q0+m1CRiO0Jr595P|69_1m^C ziG~p$ zwdG+lv#_xFaxGa{I1Vj`LAgpyl%^PoShQ3|L}3AyK$M6O;V@5+$KzodAC9M= zK7V?6n8!pxMYwQY>vDnR3VA`^0u&+UEweBx0|O#hAw_e;+6fbj|Fupz&2e_cL%T)l z$K1KQ|0N;>h75L1>SxUVx`PrYl155QTd`_FhL2SU5~zjIf{P62h@ayKZ? zfOA#n=t`}!<+x)L zKl#w23x-LB(?}g{`1YE0@2k^cFM;20bYfHc@oR5y)Sp%R=uB^OP#P8xYhNY1$I-sL zWf39z^~Qe3Vb3YI-?qgdVQD;^I8UnP2Z4H9&eqxj8zb5!6<~*T#QRH>{)mHbC1p z_g!|OM^y7UY=iH()gGF>yHNX2C&U@5zG7_|8uewc&n{tk6_U1t0RbAQ^P^?T6Ly&D zLfzF(SHUipcI4W@5HFyqy$PDSis=rMr|W^GO>`Zn=sL$9*pgb>-^7Q`mA<2&d;`7zRN)|M=s><5LRLFwQW9 zkOmTc`}*bvI53b zLBqPNr|AJe6^Rl|$Km63RiMLR`gpwz!=NGDE*k}!rb7tnaF~GLcDscb9!{t8@l2r4 zC8vQtu3BD;Fyu9-AqK(?AO;{H6`@FVV^F{V80;jr@HMt(veYRHP*7nJL{%W7>-AcM z5hPren1+Y*;m4nUz@S?2`Q>rjrulf-2Nw;UsvAhLJ9#Me|iF>^}3B|{QT2Tzx}Subv>RAaT=u5tyE?OB8mYNmfJdxgOV^y z8sfHXTy#hY2x_hZ5(oT+eHrTOKe|+O9d>kE# zx-#_4oYuZ?*Xu?Vbz|{8I&;;&YgRNfUoLqN6! z4*U$!w#%4iFVN`;>RlpyY6QXW?RJO)p7P%9h&ch!v9m?1jX)72_H%XVY~FFeX$X(acSG4=m4SPdT_(>O76MOq$)-u(7sjduUDlAbW#}eq7Z5+z}P*-b=fC0n+NI5vc=k zZFjZ+y~54D=KTmchSR>P)nQIL3c$`_c7kH$o6rpT&%PSJ*Qh`;O zg(wEfBJJ3-3JRcYil8DZh%f@Oa1+oTiepnKF;}UJgh0wTP3haWZ(slX`ooVulv{xm z=J^=YFxP2$TcyByTVenaxhP=bKS7*b-coCO3zB2WR~QZW#zLQIJWxYo638beG`K~=QX zU0aq#s!a13vX)v2Dd$Q7Q^|+{RJqn%m5w9-<}KH?E?3Dn zD3^$<%2xBDyhRp>DqKYrPz4oR8jD{yQUFy{f!b{y2zbqGu}0wyXF8a1kJ+6s(CW;1KaCN=C<5{!p z4t@L~s(nZL$NUek9bl?;w}1{G)n{r}$JjXq1tjtqshMcBFoZ8}i0qj?I_cNvd9_ywZly~Ba6n2^l_W|y;_>4K-odReDTpV0_-kzyG z04vtAqfd8t^sXM~Jco4eYa+eIQ!yFd;8N^8j0?w2vEA}Nq4F+i=#P(zF~j&-T3@etDS z@%%LW!mM#h!!#0x5F!K;(LgYyVY%LJw~xHtWZU?*CDf6kM3tIBi4qONAe9g2iMOq6 z8CZ#U9LK{jVVc5l8V&~yQ89!ND@&6QWHJUxH< ze0hIM$AMQ2Voee`!!U#ZvMm)DVhXqGf`9-b3^*c^GRrumQmYh+A&Ih72*GY3aaBNA zmz5}_l!hU0Yi5z_<<@j>-U>!k(KT}jz#K`qY8H?oB|%-+TxwMi2-t43CW=xSRnrim zatt9&Q!`ltRk$h(5ri00hy-|==9km?G#$^UGI!o241O+AS_bW`inx3~^+;I^(!2s&yh%{&{YJR_OaBpSgM3`7)4{2scy)Vk`|Uf_*!ni;4!GO5w@f%@ zV_m`hlE#u)83Dh#Vy4$uyB$$y+CW(Wpv4i4djr!B?pw6bCz`|2cE0%#j>OyIbjqq@ zfrf*Owsrr)P?8hd785n}p?w{?f8zmzwl*EN`4QMpR7aZ%chhwVF)-72u474uFO7$~ z&kk|t#Oua^yF7NH??l)qjV_@*)QJ7-{K@DVsVfe3ao9zm@22}k-yItJizBKQwQzUD zSI1;!zw(V9w6H4|mK@h9+&(qm7f%mvES4+Ft{k13v!V0__SNg?-;a!#I3VC&-x!n> zu!yo^QGk$G0@F}3rVt)p&Q-B0d|a-1h7{s*$#GrdkWw0ANGT21?G{7G+xF!je|-Dx z_j0>U1Od5hYfy-U%+dsnGsrSkdtaVgujmXG|dD@TL{G(;@0<-8ovN4YR791k;S z^XRcu))l1i$J^WOvVQ*b^6AryA~3J(CWqq*1AP1P8e)I|Qk1LI!Vn-JajmKnLlo2) zLfz`J0wHKX3LysEYA##JwSp2S8ioJ}wH8286hI0{c9m6yMFRw`!iAZEB62C~x<;Zn zL{;F8Yb_Ko5auiZxUPi|)7Wq(5ru#R0JWC7FI7X2xfND=~w~|!qLA2d5 zyCY8k06Y}X^`RMc_l*s?H7o<%j&W}{?NqH7F1N@^vrcaQ9qxcR;mzWpnKP%J>`#wr z+yxm_K~WH-^@HzBm;cs+C+JP8O}Feya$ldFyE7#S=yp-}r@F3#yKiX<{X0UDW#v>h;k zcLcrLRdhoKHYOjZjYq}%3Pc-LT_1sh$;d^;MM%qj?!m`Mg_DC1Hp?q&<`b)zgmg*o67LGdIJ>sZXg8AEIEcuz4Uk9xh(!ogm70X?r>3a@b%^J1 z`_8REHQ8?}qhmD%1TyLsq08*uJt@2U(w37*)l*&wL0T^fL?jRZq%vbDaJRXmtFvIlo&p&Q8zrNp8HO92uwlv^4CIBdf0eBn+&bsP?0jHtF z7*mWQA}YtjhzRSliEx}kS?hAWmZd5yAWnx=FGWR%As$Z$e&bJ{9&^dlb$fh#`1bau zsv<%NTd9$V0@T7SVWf%%+ZVYz1*WWor3o&9133T@pa2NEZ8-+{$FIMGhR5eez{8i{ zUqAo!90uBMd>BqIk2#mhdDWn$lIwKK?KQF z0ucmI(JF$XSRpHyEC@)Eh$x0|xh`BGag2#D(l{iF@x~PZst8v_&|EiaZP`i$&#uNa9sIams6G8||A*2|F5YMOMcZKv0nE%0BW;|R6z z$!^HC`-7&B%#c|6wcA2h@YI{8n2&EGY2VeMTh9nWM6tS%Syb>0a&+wpZc+LLHE^!xj`iyL*e&4QTDeBHYANb0 ziK|URT4+$j*RvT)6>PiHK!>9x>aiMgyAi#n1gPEr1MB+O7%PHqinSN+?#b{*!pV1v z`z~m^+n2qHsmgt_BXsKtK%#7)(mP<*IY&cc03fIYmQ}B^=Uf>r)$^d$Eh?f?N8N4` zp$D}UK&=C2>k@H~pm&>y%>i(;-a@EtXLc(KD5#;47tY}XL{R9rP{I8cv$3#$TU$oi z`5});Hh$dv5+lLIspi&nqTw#DPysC*szT)PEF{1tw*(FL`^Gki(wzt0#|xfj;9f%e z@4lc+8zdTU-j~E6YS$q<-_^7cnq%4(kSiZYl8t-2!+(fvR~+iQE3i-FyS?e=!GODo zlp?6^H-LQdW^ISW00961Nkl13u&&EiZ_HfYFBc3@tCp>%F@+dI0?~k&M2Q*lro$X#9O^3HzI@$keK?)w!zm0y zsT-G#3oqMtI2>Y%KmG92$H(RE{kr8W0z`4+l}KY6Ial69XGkffIB=<#?Rt7R4%5h$ zbID~bDGg<bFH~jAVAWu+%B$u{YIyUX9#e5IF+Srl1~pOty#~FZaYzCzB3zXKLW*t7EF6N};EySl%(rVsU?gizBB&s` ztu+!36uDFuInJln)t*@aAVdNr3WOmveIWoLP~Nt71VTg+sA;!nk>E!^*KsCSlJdgAF;o&e2b=`E?wvX3Z7s*@D94SYrTsP*TTu6nL zxe72C#!)~*zet9=!6kEPk+kEnrpJ07Jo<4~kMqEtR_yplJ*&Z-Nl@>UY42>=&!$-w z7@ExN+T6G_Xt1`cnHJ#zvL=3}CYoMSyE!KSC~DOyn`Sl)(k;4$X|a(CrW6~jGXR7D zJ0=zN5-M(ccjay73Q@rsDK{8qW}VsU>Q=pR`TwHwVPEu}X)sRd-g<$9UEJr@(OXxf z&FWh8#_KF~F4Ii~+8IT#ntHq4>B&&g*2$PVuc)dD=9YooXK*bJZXRs7CMG=~?2%{M z%^mj37JV4b3{khjXu<{xfJF8d`(AYm z3H!Emgctj84e>i`qPwZ{`z*~yo82V4^oL;0F41ox0Du)}r8^qB-S^ycz18)rDu{Sc zRTKeQ=GxwIDX1X;Ktz((Ox6eOc2wFs#Ia15I%f7)EPiI=L{(J+a&HbgzUUCnafJH+ z`a#oHaPf|VRk!{|Pj;kjF8s}?w;5Y@+?dTZB~7! z>8!hRf#`>dFo==@3P!+0A+p9{oN}dr^W)PQX;6r1m?$O)VI0Q+;Ba`5TDOu#ij)dS z0Z~;_j4=%tnk7TNtWyfp=@1A+Wgcm{tp#)(q7bF&@cx=3(?Ehr*4J;scpM@{T%ioZ zxNLfREfhl)6)It#hH;7^4fEmoBfk()r}Wg$6<&Ve);_K z$K~?d>ubq1L`9J01~KV4D?&`nG>_}^7Qy{`}F*NxtzJIx7+QOLm&!RE2}7i2!gU8f~cAJ z)AEH3WTUMOi5WI(c{YlA=aATua{{W=i{iAQ%XtG^&2RI zP)#yyU)Q{4U>N5CBS=wZMxs(A5pG)sA_6ESPxC-T+nR@I06;Ev7y<>#nN=hWal36S zvTYIrg%p^9Si_(n@9*vY-IRt>>utRf#x#Ud>NpP6So{z|>mkb(2)S@eDGLfTgqUI^ znum0LJUyQupFf=shhw4$%GcL#w~w#e@}ayD=UPiu0j^xKR0$vzs@2qK^t1q>ifH52 zye_A7r%^$b+xZk%!46XFa2nyD2(8(<>nX=`clW}ZTa6aZ@hVV=(6|#ftfnW$lz2CZ z{(65>Pl0)SA`P zo(X4u_|9K;gA4Z@CpdlJPsZIYyX1C@+=N5H>iJpVm~hWW->t84eeJi@t8lbFN!0d{ z{OuMiLuY$It&9kQf+e%7-x}A>@%cA_uYvnLW`Mz+U%Vsnz-bcQn@M0_RwJANL_kCg zZi(SJZIUejfWaFwD5*;@7=$*`%0Y|)?A;3&weGTNna=7Q5x`yhcGC~iB;A1_nRr5n zbe*;}Q;p&cG3^Lg8q^)g{tigFoFaDB;CpG?zsHq*B#o5oT{#Vqs}CGnFkI1sv<^|6 zelpe8&XM>RprKIL4&B#+KL7r+Q;r7k)viDrf-$6Z_kJIOM}pN?$5yf**d~*%e(y;) z?SFB!KdW|2X#Ts=g0z*ErCRV2j)OIW?u%Jx)XFw(h|? zTPP|Z3#dYf1BM}lk;X%YK*KoGfa5eA568zdOTb8R3JTQPe#J52JPvwLRo+TVb|ygK zDk-0zACC{m>u(^n=9143rz)Vp$28`pTy8~2Iea>uaQOP4f4i8cf6ty)BkV-_g^2f*f;dC62Q^|`;%{9*tV=l|K z41A?&7*3~Y%NYOAr6#Yo}V6{ zjt__9d^k+=u-$HXz0~zXrAD+=Bt-yb(Mq6*Rtf+C5nDiw4Ci>xiyPKm^WQnsXlBAa z-^Smd^fS2w5@#m9i#*-I1KizWBduH`bmP{^dbh1=%Eoy@^@xb}$Dx7}l9-lNyi<2A z4HNcz2HK1i*gYp#;yY>R14pbPcO|KenA3h~KmnlT@VPGZ1#)F7}|~KK6!N`g8+^-0|4~W zRB#{G0#s}_*ZNjD;bI1_qi{Dg#_jl`H+Q1{^zbxz_LJw@w{2+UeWAmS9XdEK*NK}Z zXgxCLA@_FU!JEHzG=ZodgKb^8RRkLwZKxapfe@U8wM*2^-bA%SWCb;}VPi)V&K?=( zX-m|PknkSaGfvdmm|aRbnzrVyjiGH<8NBd^CpOe-?OBM_ zg0QMB5Ni@+CcAfAGJW2Xxd@ssh)QnvO@hczAtE4%M|In*o&2$Xqwm#D*!gF1-y;_& z*9GLXllCXLZovH%!hZg{SlZuww=9dKD+xnFA`PqWK|m9ST_PLA>Ol$bDTb)78#<;# zcOw8=!fC6+*CO1;oJkiCAF=cYNawf#%^UN~1_O3rgLBXfzP4a2op<&H&$a+p+a~8{k+%iyBqHGcX!T9q^<_UVfQV5sPzV$wQXGc?he_iQ zr&%C~LQJvb0ze^+!xYR%1_}WKQc4qma1}xrbjYemIE~YBo;9xkT5BEW`Ia>d;V{pj z$Uv{}%QzhlpFWqbT5fMFq7ZIZX3Caj0}j)W)-4Om`E*bb82GuKKfZn( z4(Ib@eSLdl<`fAuh$y0R>x|<@V2q!C`lLbs{O$MY2szi5@4T*6R8ksWzrG)j!pfN7;WRJT41+Gq zx~=+1M+CXvR-z-N7$^)Wt#bi{VLZQodp|$CtjoHr*IH#55fPCHBViFiP;8e@V44ON zX?2QA@5-<-MH;5GEjf;1dKli{%Xl1POj1NNZ@C~Mg$Syp)Lg4%j*(au5SW=FQH*8d z5J*{SWkiTEH1cMg(%~?U!}$30cs?GV&yUkIjzg?lxxKw}x#snXT8Bs>jdg>1W2LGQ zU{z4T0KiC!6qVT^j@ftuiySD}9Z)}s(2#?*v*m9!*q~j0-OGCWR9c%;Gcg>W$QnxnyYu{M|bcq4Oh|NiWUS`jDN&}~E+uY7L zjB3`9um|W{^q_qgA>0)$G5;c}_M}d5J5LHqppK&Mw!|q&uVV^3x^p(J&ByaDJH+x> zo?jDHk4XDpKs+Mp)*J1EWare1kq7`;qMtQkhkfCUaq}ANPR(~v(YY5xcLtdi0L3o< zI0!+b>>P({U*{dobvbqC!M7Ou4(%}B25g7KxUeP+4x{awG&*+g?g(_yZn^($b??3( zO#9p=?Y3=mgl2Hqw!;UDe(Zf$?1OEDF-2i$w^>6Vq5ude z+{hzgh=GO_xIz%ATLBsX$7tT=2!TJkgn)J>MHZW;zEnYRpSo*-ea zQg1Aj13`+hRGr3@#+j>RtpfG7t>?!Ff^d1e1Vjo1pfOPmtRh@_9EZRD{7bIoKmYOT zva(hVk#Z?b1jYkZ$&n*M$ZHYiVVG`t{q)0`D7<~Ufa>8i5z4ld%hwBqaJgP7(DZP) zU6=J^jYE*igm^q2ie#AS=bt{OA&$q$B-gh~%@C)!mLj!~Vi8skrMVCTdapbdfxSem znr8GgVfQm3mpUed7(PDUfBpMEVhF=D%#h~e0RcJJr_+lvT$jt&FJE$IRjFB+rA7%v z!$i6Y|D%?tGa$tgfDXfyUcbF^*&qca7SUWnNaOK*SU#3o;rY|ce4L8r`kOGz=YmLyS!;?iELyP7no9 z1^|aUO-Htlr!AeNJ)kj^qKYWuaHT7BLuO6I3aE9FF%>BWT+OE3*}Sgc?3M;Lx}yQ- zrnlV7Mz_?yM0d`qg7bNxeKi&A60b*5I$ky;p>VHqRjqe+&mQv`HK7my+B0MNB<}2+ z2l1dsz0KTW#~6;1>>22ufvOP)ptwsHI2GtNaVKRQ=zj;>oT-663`ZVrW^Iw-9%D2r zs+Ik^WA=P|oy|lHt<$r(0rd~5-BN+x*U8}+LUg=kqR=e9-81!_a2Chi9YnahW9a+b z`f_5wLveRw0GiHJ?Rj7g8Qhi3K?D&ZRP7TsQ*1Vp)Pv%du!$CDMO3?t={H#IN-#8c zrjd^=qs~y9=>{@S0`8hbxYFn~7kw4m#9Kmu=O37WV^6a}v;NrOoTYq;Enp9!Lf=B! z-@b!BFqC8%2N4vkae~@$X~NMuD6}@U_qwWKiViw_&xs850D!1hCj><>rxySL1pV4k zb3OXv;eMjJ=Vo(n9Z33FGe9L?I}r9;s1Bh`@=*5lTSsdSqy1kF?C-*tb~_B*UL$}a zY4d|!r?f_1YA?8;nb3OtyNwi~1!oW);DUWY-wyYEZG$>WY+*emiikmgC<;D zG}6F%QvjMD$MeI(pTB+^55qW5fJ8CW!WaW$$ZL*?ML7-gzx?tS7XI}ge_ocYA*50= z5(1GDfYRYGQzQk50|JDcOG>Gr3bQE36qjXP*W1J684z!49_KL-VQXUwFr9`O!{u_7 zET4b+R7FwnFF)h|_x3-e3K1O-;dTXOWkU9t06K@$tO?rDFy}|Dy1%&vF1jt5hD5d0 za=png3>&ZYR+q~XhxGZ=%eHbH(#xk8`bKpvukx*I44#_4mHhTi4l|d;<1i3~TuO?= zIF+P#iJVOj|80O>QseG!p%O&4dN-$1C8ip9ldR;*P2ci&! zRTYtx2G9zsA}CUoGgHE09>T1y1LO^ZAjPw_12B!#JcdthWtXmKz{K2!yp%3OG&& zE|my|AxdFZjWLYlKtvA@$HVFHczSp~KR!G?q&N)2AY49{>w3AyAmcd3c+6|g^}23F z*9^LWRL&f!S$iZ=AQb^*f!08r6%cyg`li2)b+WFM+Px>EzqDU>>e*e|V;gP*T@t!aXu7i7BeDc&_l;o(UJ77$MXla~cihGXO^ih}sAa*O?qD0tQk8(H z))d`S@Cd+alrkczBDWHT&PQ}VxG6DfLSgY(^yiZ=Ya`MOXf_+-zNn?fo7r>aZ}_d* z0}qgT<{Nl|daJ2uD?=71bLivRutkkYQI+kAfoXiTT)M_85vqV%eN!JCHbieC;R7`* z-0Vw-<;JZT1&7@$F!s{afRGo?B?t}T6Oz@2zH27gkO?Bm4rtxbm>fW-7)+wjEUa|l zv-Kx!Or&BfC2M4kvCR&pzzH2khHL4&{M1)pO@|xCFR|(Y!9}mFAbt{hjEo zYoQO-*9aXf`|6tV_0KAF<_|o{<+}ms-R$-+wdpk9$j_=P5(Cz@eqGF=wVJjE^|Er% zW4bo--GFG{P2U}7o^9lf0Q8kdR7GKvr8ExHd|WwJN)X2B>3Mp5rgTuC5C)17V?>Io z5K=%4NTIdsQ4k9QTiGZC3L&K=z=y;5^XH$J>sq+x$|?v*wHC@@7}M=`3xwl% z9LBM-rbye$({VnH>GjXI^|r<^1-do&Z60GTb-U$Z9!}4v+vO%A0Ye<7$MgAd**2*Rz%&FFi7Lyoagp;A6v#suYvr$h ze9h|$5LA`9Le6=LA;8DiS3n%oFiuHD^IBtwpMUuLp5i;c6Dq~f!VWPJv$AL;OfeGR zwrx2VL;{UvDayc=BL=-?B1$R77@0Y*3!&1O6g6)QpcHUhYD}%R8xhes3<`iLh7f9D z1qd<35Qbqm&WD$mr}NX}G)%|C9K$fgD5aFTD#$QTLsDi1kzq>8Ny=7(h=67egaRrX zw+`egL?Y0dD7dpnR@H~CY_A=3cY=4168yk{D+z`jlJ^}!*peq9El?_^DZcq~N6y$~usFQn6 zd2~9>h>RV*fCpY0pmfOJK1CfrsuLD&Htb!FcXqk(*8xFWuW-ZQh7r4!(_Y=-&LA0y z+y^X<7n_jzSM6UG!Q50(01=_kqS_7SJHck;mNborNC2&~yTNPoLJV1}ADF)AYEVK< zGHkQWbX%MhA*jP|XdP4=>Y!F6LXn`g2esDeX2QLQw(;GdAc&+WNJy3|V_?Yok#*0* zk(C|a{u}gYnnB_{Hjn+xA~=<2a{N!p4c>w54w$&Z)Ez^A8Ul1Jw5K-gJhGqT9T&DI zw1Z_Tkaq6VlnB}v_PXC5B{sI%4++@WYYQ(nX{UnijUM#hL9>A=i>KqAZfi8TgG=)U z9i`!Z3_JtZso92HJKcrQ*UyPC=)f0S#;w$bcvfBnn9tn2p2^-qOzJRj*o0f;DGE+5PSEUHRGEF!8Q#7J~wsazXT zO09FHC|Ic}RRv@qLS~@|MGA90tha~rsjTJo+qdJxP`_5m^7DWBA(uiFpPrtS<>UQ5 z1jvO$s05gEy}n;VNXL^f*W2|LBhIHIpmNc}L%dyXDa0YAWhqD$F&xH;IReegZ5gNI z&fPh5>s3D|we-0F+71=U4V3eji@Ys=s zz0$2M0swNq-SAza6gGa)3SWb#QQbM`diP9M`QVvkZrBuiF~0@@+AqCUK>r|OE4rbI zh+?j^ja=16s~s3M5=5aT)is8_U92-$s~FVEI$#H0W_-c#iujdH)s7{N$gttxu{)L{ zb0?Durap|iBLo%8Yga>ly2Cq16m5Na3>!P%Rn&$V7ElU>v(BUXzrRxmTgLVOiNQl zY?p;C1yY?S^C#cQMSGpPQ1v}%Qq@@&xN}#)Q84;e-2zd zM6~M(z5>3niao;MsM#I34jgv`-b}rDOkn+R8lXl{6va3st|1J^Fdbo>b(|zjg=1Yy z(Cf^YQsRxHXdDJ4X~9!wR#5^{fe<6b*h*(301#=dT%}|#OF@B%fkNoeLu6gA@1-ab zF56a%Jb!*EdHeYCTZlhD|NOHkNnVZzB$D;IZnsTDu`Yl9_Q$emm4b*Eh+sIL&zILV zvk->E@vvOpD9|*IpME?|$C<`hYlV#i)iI?~a=xz28YsLxofW8-`ucHg2fvoG3Jf#E zfpXcF+eJa40tQ8t6bA@cr6S=pA1=2Wm;C;EnTB~drBWEN9uD(%%Uj*Hyp3^0*7dRx z;SZml{=o1&J#Opr_WE8*7UmEL7=Hfomyg5g&u@R+t~XVzTq_G96|Ns9Lu<6%`cGT$zv*_4WNtlwO{nD>4#sskPQWfBSYk9MWkxK8&JVfoonv2$%PD zn$j@iR?Elxd(CZN({efvhapXoiwNit)9qt{Go~>JS1xIl97B*8AD$kDC4T**#x!C; z6;?tn8YhA(EZR6fWkC``U6)O`9!`hm3J1b?n2~V1Ew$8Ic$@#(@h)Ah<*>3A~(NdJRBD`K!(yFx}vua@$ zWx$9aQE~-fMu>z#K>?*=D;b|(sgsywq8T3Nz=FzD8r@cGlorXl$ zLdJ)6&57WZ?!moe)5~tAu&X`GS8qVhnl1sl88IzKmN4I}2v}CUs}U;#;Rd^zaRKlf z0gj^X2JEzgws&;J=>W{F6k^MR>IKHhXNBHj(@Jc1ZUNj!?N0_b_f1?qD`Ty%Tdjhmvii}!?30JTfzEmhpAySc$e6XUiAVR?*)$S$k6ZXU3Q$c&fXw!OL zzs1ygTEc@p?iG@ zVAtgATldUpwcEF-0zjkytVkh3Okp}5rh}#f#nX6x3e!x(R5@(9s7k2-02Hw@ODQCQ zNGYWlfLUYDQ$>%aZ=fB%=SU%v9P3{#q8iSc%MyG+yZ(`UKd^5HNF;P^0|&ZlcGrKEX0 z9uJ4D2*sF&I85W^a=Tn}S`}0-A8&w62r81Rj#FBeg;M9N#JK;PeP!b%hXRV1$q z#Hb;V6d(#!B-1udV+dg>ye8 z^YO55T;wl5|MbV7zyJF8KbFe^0oG;v{L>Hr+t2^U>Gy|U|NghFWW(< z0!o#L5Gg7E5|JtkH^rg?qKxGC@iE z?kL7V1%z9CNzxEOQG`=WF~n&aPv^%^PcO&QahisdVl72Mwsospm0HTSRIb~y#)!08tpQ#j@|5Cbz?ObbEXW0I>hY z(y_WT){I|Q^E-9XZM7LuHeE-} zq$8r3W(E}hYKH|)xp+pM+4zo-`n$+gFd``k0=dpLovf-m66gd;GjR79&=wOWTF#GhUW})G`6p~$R3UD$OQL1A8>%B(5VFIM5dcf6m`m} z7jHvFpk~WjjsF%3@n-NlZ0Zl$L(C4S#j*$yw1pO}41q;^-6*z^h%hvU(9u;3_{mx#-F5iaa+d*-}=0U%1 z#ldl&|ETWpLfh)?Xs2z4_WFJOj3V`t(-_)FJ)Gt%4#8an10?1(p^K3p&>dp$V%m~X z2}rGhecJ}w#Z2y&aYx#wNZeind@oD?>km4fYyax;Jp+J_iCsp~C?S((8~>gU@9W$^ zG3}?>k)3_cf8mU)F|kc0pa}%*s?ebc?vr(I#oR#S-dZg)*xf(fx1{TV4!mstJNUA% zG+uD0|8^kMv9T!vbbX8cRX}i87QkFI=;S=8U*3Z5v-UhqxZ{a!D_i&;jGu$=N>JK{ zSph^vOB?8mYgi85F{Q4A6qP^`6UG##8EDGkaCrP2PmgO2B~9UQo*qs}Y0P=MT`LO` z#uy=B%jZy4WvPl7i3lko1!{NWn-Uim6%po=t5yM4Lgmw5P`DqT%J8`bNy{1>wW^%FO)>4sG7>9>K3O(zf*-_rCiI3xkcEC_Z> z-qKXe%pueuiEtk2pNpR!9pEuI=NbF%SfwCo4y#UUyTF+RIkE; zsp~CDXD%p6WgNeYSE%v?3t8Wkx789Hg>%%|-z-8!Gi4UF$QFRcF-gR#y#k=txCcYA z@E`t2dApHqu(1jHdz8QXg7TU-tE)JVHb^Y$g2HA7|3H$!L$`fkXsG<{8HXJp?U+R=#A(XY}2!N*FqZ+N^-5Rp4 z00^Z93=)VC88L-7_*SK=`Y7cxgoXJ9P=FA#A`dCFc!`14_`G2NDaFc4>A@S{POM3& zY)Hz|+`)1hHZ%r++OW1t|CVlVE{lO_0N9v00AuEAhtmrFHW;Xb*y81evC84mghD+f z)@j5v-3AfC0t5h+&$CkHm5~y#X1(=GDy{A&FQI%uWfJWan24%pEKt2##7zgIF0x8w z^({&Qsewolt2}Ny4yGZs({8EhR}dkyrK(5;qxL4heU~azK*}H>)q4i^tKN7hll&&I>zz! z_2KF6=l}GNr}t^TUS8+53Elqq@lPK=fBx$_&l$;OVNB=(an92@`p`DJcAD1f+cn47 zwk~Gg-R%frx-MON7(VV1^1MvTdcD4%rt6$z9CpY5{7?UYj(_|599Le}NS$-Raf<6q zl93RSVDN!lvn(qB^=->6h%Tm#)5H;d<1xV!nfA2X4Ovnp%8g|K*RbKmPQm=jZ1?e*I&buPIAr zCc>OE00fWD`0cwcj;Bl0-U>DJ~+MVp?!@L<|hj7sL*}0m*F}#uTT^OoV;cF*Bi%^PFX! zk}!t01?0?`GdGQ6pq!J$+;m=;*C}Es*BQZik1Sb)O7{)`q!7H%m_#H-4#9;cIKY&W z_l^XC!3T2ANrt8gcaL}XclUkM`fR}_h0VWZF1W4?tG}lH(6h8(K;lTd}X+8hbq;zv`8V=)ECr1yeSVbO(u0T ztT839co`xT&H&LBHi#_OR$WyKxr%8k%alT^zf^oklTuT=vbnE@+NxrO>V%sUTM?f9 zH&!iY>K7`{ocN0_}t{70;Uc717K;hZWDwpg+a72UFs(bwgdpU|_8lc_9;6f&4y>UWpjh?4QoT<(5tDWg09XNwttV}NRcLPr z;X6EuvPHxc^6F$!LwPJvHeT#Q_6*rfa2ZM&7wCDljYa#3?G$bx8^Lg@ZJ#LqFNllE zm30 zpKM1ERD~G4(&rdR=lT=<6yAt_X{uP~0yhX~3x!v#4^Vs^%l|OAt>J0)Ph<-t+Y)WG zqE6W;RkR4ddUawQn)QqJ-&hl006=cD5C8z6)LSYpmw;%qHI*OSJRy@AF}F=h??N4# zLS-UE>*I5~IA#Epmjq4qwS0wQ5K2GI(xxV5SysU05!6gLl%kKol3x zoEgv+U&J}*9az*G1SE@a7R;QPCFhj$vT|BMvT%ZwfdPPki8B&`_dcU@U1&nvQa7&4 z%gehHH;&Ws@o;#2SkwCT-+m2${f8eue`5G?jw#Obn&O;!*Y({1z`UfWmG^@mx|RXQ z@rnZX554pIdCAA2bBza~-H}B~$Ai?wA*DV?$%$iJnA79a2f+Vy8L#hgY8|B*7Xfk( zQv?BlOw{6@1Xwb~7zuHA*zI=xw-=blnDGJt$NNLu`ke9_({4YUPZIzQhi;xD3X$MC zNm{rG+z-Q_fBJLg^!9ved}!A6{QCU+ufP2C^FMt0_`|Zq{?LTbg1Fb`uk$jUFPEH! zP#fNhh;t4Ra%OG9CRj^a4oEDulj`&~cK{{EQP56idb zG>(u`4&JAnr*ZYJ0g#lIOne>39$;N#@Udw<31kt8kQZjm)3l^Hx~4hocjRH5z<+u#{_gP($W|!j6qB;Vk{r2dP3tldV&^=10QTqw2#JJa6abVgiCyXA zkQq`EVPGUgAT30J2!wz_TKZMtfT08cSNM`@?@cKP+~y3KUM;}_F_*eJXZmr`A6f}< z^@j@$hJuyHHw327tXjC>d#J=e;*HjYTC-E@8>okmTK5VReU(d8h```f!5K0zWoNTP zu=qfAi|ZXbD3(o~TeT4aU{~rlOlFTX5OiZ!?7L;&V)2sdP8%jwJ#6Vl)f}05FsHn1 zlBNTf5`CiG-77ppDWN2_2sRdlAUD*D<-Hm6R}94lEsP=A7VgIOD=nZg7B-HcvL6*F zioq0{J6Hs%BE4$HG(u`vt=Pm0tVO?BL;R&BX0es^mDLX0BLEP!Lx#2pA}BYdOTE?d zIU9sz7OkanN*OQ3m33O z_ig|(fo1^N#AP$!QgSwL+#VIoEq|h3DdM6p}#DYN_{;o&qb`Eiih3jJ3s++VI3DhT*9hF<} zP;77_c8?O#=tkQY0I(d0O5xYRRDKfm*P4vXxPe;Z3iaTXAcfutlWlyqMg&mGWngL9 zqq+t_O1@fAy+!D`tRC8-Q$pKWRT%&fD_CR)15JHd;JxY*6&;;|`PsVuWqnGOsgjtg z#er>m&2vB&1VRTcCP*>G@hSNI5w`q<8a4-(=;cBx38zu zY3hbyx_p%7TJHOv|DW6WKuYWwR%R+$UFy+L|>vcva>zYB33N=Vg zJGAaR=A6;t;cm!D)@4Ou${8U$_KjHkhXb5Wmr{iw z#v~%nyVAc>EqZ20M2-M}ne1*F3P|L9Jzvtg?uWz2j~~w0>$Hx3Y#ihL!!hM$iOW2H zNr^$S08dE{2ZsUk!fReLM<3kvbPYbVeRnxccaM8VzKemKi)&_r&f^**MHx?H%F^!! zBw3aT9eH%?TJChS5O^PiBrP#UChtLzNHT*%q~LNAmW&9@;E==#Fp?0*OwJ*ZClD`9 z+XNT-z8QA?es}!v>CuN~jR_E($LTsP)11~-Ag{}u*9a&%XCgx2IWJT?lu0fPY`pW% zyK=<=F%u?$RWPC^fuw3Vp%{3*Q=tjd2q<Ap_Qm zp2eiF++A7J!HeKq=b+OMa80O^LHgc*SVzJG2y~2@hzDuTV?n zo&aIBdI%!MM`&oX*f=S92#8W{T>+qg7X#{IGdIz<2^&9Q#F!$ay4sa*#4QIC&1pc9 zZIWWaMA3WEaPttr+HXt%5XlB0NEM}_{M~NV;VY46Zz*^V?Aj_AZWBV_3h)dh8fd7# zQC$!dp>oX_4SJT#8D%f(hi+0h2*EzP@lQs;8IVH*J=!0=oQjHO%$G80FgR70ZSyQQ z%F;;VZB1lzcfi`xm7ka6#}+q$Dz!E~MNu^-Yrs)A;0C~h)+M(}zP95C*4_fqjwt>e zqN+sWZPQ#dx=utL6suY!0>Z3?k)|(m{`dd; zmtTL`9q&Ip{?P9B!*RE}+u@17efu_^Cq!To$Jp=MwrNAx^}D_E4w&|L1B)aE=iK}I zn{&9o>p8{cI_1pmUDq8ET8`88e0h0j+K-PPKHU8=UVdFx$jmH&M1;(VaZj!Z^Rl#U z)A$C2a%KddGp^GbCHMPb9!CJYe>hCj>XS!=>o~D+@Sz!eTtQ^Y?Ap-fobw6~k57MD z;<7H&w6sK*@%-iM?;nmIhh3jI&hN8#ezzNdSQ4#ko~Em0L4=%BQJ}6Awx*J-TpAEy zN;$5%1mekou%wuCp5}3ld3|_#`nP}i+oz}11^@VX*ZB7C?*8s7iJZrw1u z^0Lg+xA*IHO6!_Z6v=G}P?BLvbpl6{MN%dN?_64AUQ-}oPM8*nb4oMvOpr*=>_%=> zrT_qdP1$lOIHc~RdDZo>7TskQ)-)Pg_7vV|YcQGt3@j*%W5F){ny6{B^b$b?awJlU zEm||s?UfbfYyb>Y(^M@81}}kt7z9xqYRfVLC}rsNbLy#zb-1bNC|EuCicGbBDCnz7 z+@PvzsI3NW%!!hL)fGoTuwuMYl`Vn*k%blP59wO7J+RsojY|}~hTbQaT@kST|h)sgl#$T->*AV#~tgYlx;N5ZNn_OMEXd3vDjn9BFtE~Oo4FanQlm@?P_aMTeG>xqf zh=&Rq@H@%}l-SiEp_0i~Ktb&=7D|RBX8~i}1VC%yut6?RY7~tqLR}g#u&r5-)qzl| zhNXHI6;ziKXp)S#Ny`UD zT=(8mDfJe-RrHm5XjM#Xg>^=H{3~$O4Yi+?H|e^gm1Z(LX=$ViP_gRJ!bT$?bDfi= zVkrD(DDm;a@G+F;J4I+&YbF^0DiWj)wdye=j{&sSqXIj@x*|GTG;OUWLSqKVToQ>% z1T!EyVsIG{!MPB86Pm6i?+}5AP@8)*XW^Kj3lI_5YB-AkQMuJB%56hkPT`CMoFk{4 zbK=MjT^2w`j(yG%$rD11l4M<{w8SwYQris2UFe3;58ZCp2u6-+%`h#~+ju=qFMoV> zzJGjr9FF_8-~IISU)PinA?3&d!TB}>YFu2T?;C17iy+aiap%|TVd$H#nXc0^<>|Tx zf|!#J)Vn_M^8Rvh^ewpIPai(d>+<`zZ#jZ+S|hjpKpvJgx@KuQw=9upA(xsEBuLYS ze%Iy1;J_iC-!FaNBGPyn9pM^Rha|0ZediH|zMH4$L+JWu8Y4CB;r{UXhfkNwxGpoV z`L|#G;8i}gpAbM2%;P%j`k0n^<>TQH(-JwBU{72l5;|vRmZ02qXuEb<=adpk*5Xhs z;4wueg17fG5yk7`(0%&x!|#u z9k7t|J~%|=C^=={DJ0SF{Uf|zrT$szjAb4u&Fl5ik~bxCn1NwO|ZR)Q!w8-pbvLJn9E z834F2UKuiKXL3EXC3=8*GAtKXs@^kjU?`<(doX52&4>UP5x}C(d-Y(AhS~IFjo+4| z=dctWwz%C!+=%+?7KYNX^9D>2(aNxy@dPmD%t9T;?;sAwE14xX zOjv250SKjrLqSn>oy`WTu$Q|57Q$D1SssEQLe?$p2K$N?J*b1SBT$+tYEBf_fU+)u z2ncvvoQiV4!^!IL;0A*$h%$1?dUutAsYZn(fFOx*B{I-9tdO4D0k>bHy;aPi(f65) zrq*0%2w)*dFrHMwk+LUsCdK&@+alNy!{%gwf-VcImCy1mj1#-LTJg3qmnvFVySD1{ z5Wnm6qDQ0R{0g~DUFgbg$FFXd!KxdHm+ykimFufSsr`z;a9vqff+Cm|+o;xLutC&a zu1l@>S>Ps&3t;zx3}|wF5t5BwwdckN8p;%~TwFUjpy>m*8!S~N=xprvYbl>wGbd$3 zoFKwhU(PtxvLmGgh>0w2?5mwe1Y+&Wj3P`_H%kPN87x%*$Pu|l$O8r+dI(*6xNCNI z-SH04ONn`qbB>Y-l#Y-bkqAW6T0+N&&Ot^j_jHJ)6q;s`EFuCK2uVDWcjAzViJT-S zoP{Z|utV`q1c{(&AOr`V)_GZ03dyy5-wb{0_bfsVukV-3bWKUtm|tEm?cqReSp!Fx#adU|_~qg|LLcHSYrI!Z(mBK|8-f2&^cVDRTxBES|hpOUDJdB8AQ0>wZMV_+Sa|lzPDYs zu2DpG`vDtSmepNpnwP$7bI1(rLwo;tn5L1)hY)sMmr_E=lH=>;rG0B0hsL|;Vd&Zv z`(=p_$3x=v<@qHgCS^iPr!_!it)(ms0F7@JT8sYX!jz;^9f{*A=l2U7@b&Ha@zXzuErnVffS#AkQh??@zd>7Zs_{M@vy%?H~{cK55L#6KKS6A_aF?8*Ck%AYnK>}t~xgeU8P|+G`7zhAlLl&yPHl?hw4m1YL z&T>`URsTY*&TkN7(->O2Tg=KBg<%>M%+PMP6t}3~_rEo*Vlj-ON*L_YR57$S&`)1v zdcGR7%32lg!NO-2rGbh+j6+c!h&Pr(zY8F0MnNiMG0Eco}2#hV=fwSYC0K>Bb|#BA?UP*}dpU`wv1Q49YmCL}PB07fX?$cPY-R?XZ> z@1nMg6w~|_7b<{|qTKYRk~n>=lb6TwMaNXkqz|2U|I+j3np=78_i9S=J3E?L*jt4-HG{*7`6Xpg?|PZ zx4#-OMx^~t5RBnN5MTrcAi_xWcQYo#!4}HA#mQh5z*Rh;z-=R7u|Rsb{k-D#Z3?D7 zpl)BqhlUaL(l~CoT=kveP1|~bdi>O#RTPaIKCe%;znB8m9I_(jcCzZ*Yyx&5^qb~Q z0$5B0Kn!H53N3=G+pP(J3f6^*umpNa$&a#_8oL0oB2Nm;5m9O*<>DzKu~w(5A$}VM zmzz_lo(7Ttfe;JjDAf6Ge`pT()E&G1olq!+uu9wC(j&s7pd%2FtQwpEg{su*?u&s5 z5hjNSF0B!ek*IjgDT0f_SyBda6ncWzWpuLee2wVIIZ115T)U<`=Sy58zmHT3NHh*9 z^zCuM$PM;w2aNH$d_AxIerVgy1y4i*m{z%-XCg%4@pR590%2McB4*%j-+OWt2!&t1 zoj5M@WlYPwJ2t`lw9MD(Ivm%wIsW|PpU>m;_I}A(95p#+B91Y|n7h8sh}~{Lhigu2 zOv{*Zmb?4#@^U`j?^4R42|%*Wk>=5Lh)Byg0UsRGXMekLfR5~0(2yTLA>+c`>qSe5zHQo;bDpjX^Cjg) z7^ysqWx7mI?k?$729BJ`LopHFIZl~K00m+qV!_0+IEX|UOF?85VC{Sg0GtIN6Oy)v zGVNDb@bZ&Q2g-L4$vVeC)iCwo-#|UNHPQ-A>P>tAEUlu=tW+DP-vy}2WW^^XyD@6U zUHwC;?(U}Ds*f#N0KrT)-YPd`Evh9|e8qJ-w-)F&{p#o=f(d#ugpc%0g zswsA>6j`I%>ISvcNLkgJDo$#zKsf-pStsK^b-C1piv?uNuofh+@vl0H>${7Jbp#*BM>oybLP`kMr)^1Vh$toAVup>&h zR;+EbDrtV3RyG6;W~pVx+kjdnfDI~3#bBEUQJoTFKn$O&Y@6?~ok^(xNI_chZVbi& zz-{SP*zmN`?^vj9i-zfLRHvc9P`#+DhXgEmR|MG_#OaZ%K3@r7lYugurcr>Xm#xi- zlG4?s_zeYmi^))hK?d6D$*dQfVWXn7Sev?DIwI9Bw@hY%wEAN*VK6DNI6xIMsSV~G*6J|rH?NYs2HhEzJ0pp(dxeo4<;ZRzDE?T7g>Ifh%T4fjp!VD2mwQvDP-qZc#e@g0AweQ z5ebkPI3p1fIz;uB0f0P-)M`P*ECA>{**+DcC*+Vbf%hC}bxn*AZ!THAJ%$(VIBrhrFd0K_}@^;P)!@h4CKVIk8=Sdg{ z-Tv6`kH>k6yQW*_IHxJJew~-;I<`JM+~50-mTBS44C!(`5BsizE|8D&!Z{N_6Z~Pn ze}8|A7&CL}waow?Q5c5fp=m>ku?b-uufFMW=C~$8EO#CBRzIQ$zr4J(fAZrre*N`dO2D_$p0fiy2Aeoyc0HPx*%?yHfNQ4ZW*?Z>*30>p6 zq3?%bKMeihaOnGxN5G zR?0Hs?IK_)AXS!vG{|gzxM{u($89fDxLZOSC28C&pqN2d&09fG`7l<*DBpdi5{F>c zTmVpAblVX&m;@Fw(s|jUm+97LDcpdXU~HAM%3rjlp-t35&4jA}a>KX?H9}(e0k*8Z z4HX*((*M#xXBku3T&W+y`VCO~YLclIS*lev=yO{~MQ(^c`yOmNCpOkXD4CSf<_p-y znyoLVrc#E63v}36ZlUT<8eyhbt+s|V3|0vF`pJzS5&;GEfCM>L&~&qIQr)Hvjsh63 zrbj>xt%8Mm(rU^b0wsi5H&>WVU)b6jz($Y~4&63S0kx5cr~z-V6alH;lnu9_ z1ci5+{H64a5=AWFz=ARwx!+E@3B7Jk%?VJkrczzT-c3?hObjSN6%X^e$_cfR-ndMF zVBN}alT8$Bw}RUss^4S0rcT!YqG=QT09SmbBe1H;+jnh+Rnzgt#co=#!btO&0qk2_ z`b9m9H==KP^9G?wRj*}*%g(Y0>Gi9Uc~c-|Sy-vK6F{%N7l~t#l3U7^>IVX#Qs|eQ z1Lsnvj9$be`4llv%s9!Mo3?k}JMVoHh|p0fN)I`5X;{xJ&NoB^fZz}bSYLvu?fC=& za%N4YE?4#dlkg&IBfM`LB8f2*697)rl9RZGmYI>rcg-9rp4TWZ-QE4CpFZUbZ*P~J zl8AhpNAFs{Hk`5Td6};LzHhrWgx0q-&Fh!vHvl-^_q%?#t`R%|3o!)O0zyjZI!()E zy1t)sj$vp*->laur3Kc+Al(PHVVUQq zZ9DFdcfG3*WuhU4*WynRwGBd;^A`kZum(xfA zXkFqQySB}leQ?B_Qj${gmynQ_d3||#8!t=KG++LZeZPG8^f-<2@&4iGpMU-Bn# zX+<@xW7BJq1zw!UDpcZe(00r z^YapyInEQuv>Q4h&WYE!@|Xw^i8&<%5Xmt{VsXT2T|qJ;04G_Z7QRIG4gm_)3ZJ!$cLF+>6) zkZ&?^5KzkP!t!rL=_tG<%H3#1Kp-2Lp_|HRs6o@t0jiY^jzY~d(;ByxIWt8L00<7S z-~jV6m9a7henXQrI8{v!u+(^mSOWyO)smnJkPx8UZ8WQ-Xuj(DmE&B z?V^nU0J9#u`WCU^z5;kq{9FF8A*wPRu@~QbOr#bT(gX$IZ9hP((1EzRn17P~u4TI@|8s^t*lJc#xscb2+ zc7*Egh(SbC0btp4#&ilGYqx2m4sU5V+hg2jaw{jsqE%^vn;DlLyM~yVx%y#Sg2Cpp zRa))lm8M~<2VW6(IjS1|Hc=7tXbq-ARr-ZWwIb&=K2qjIkOcuViwFW?W^jQ7h+IS7 zk#nvY#J6tfGqw~0IS-*B@4#1;S75z*A|={I&S@L~5DPb_Bh~bpa?J=GNCwU+#g#yO z@a@n|-mP<*k~F)f*?BLxE^+Uhfm7ss1cxn6iQ_`9X`3d*RsQ{deA({@$nbVP?GA%O zhd~hd@^W4K)rIi>7G2XU(;U3ZoDifB{qwJL7T6yL1YE{7#e_u8`^HmFiJ9*{JPH5? z0Fm?aJI4tnj@R=t&u!P{6qjX=dFk4_|MfrrFY|x-KcC+&E@O&O7zJdVm$q{$#?&Qa^N=^W}2cA9nLJA9lkWr|W5Qo*tg=-d_Pw z*0}VcyYM6oP1j^zbDmk;{y1cgAh~PWWn3TbpT=>zoKG>~x`5(E89Ghkf7nF~+#2xFiH5 zkJCK9KED!BN^+Uz@pMiRfp9qXSB^2qW${GR@46V{beVl<*EF;6GSBDt%RH{mk#h(v z({*)3eNUb|l8G3C59Ga4j{{yQDVaQs;wFsVkz#e*+sX62bk4VrLKVyFb;3a zyQ=P|X`bx|s@sk3Q~hn9*gywnO)|RSKh!3T`5TVpku1#Mxjq8S|0Sy(YNiBI>Y7JO3-!0mn=1*mKz0HO58tti{jDp+wS9lI*|x@)yW4Qzj;{zo>uYy5>_ z7(@*j7ig!>f8l&fxm|>kcW+?jmXI>I30&sP39t?c7IdYj+CkNaa7cDvnP0I3OG z*D)dlAE#+NozjxuUtd4{_|&%Tm*3t6pzE7?oSgT^yB#%+@4O=y*C+@?Ae{4>rqea$ z+#d#z=6X7>@sy{P*Azm#K$^~@bIopl|G)ju|LcGGfBxJ1IEyfPO;vOOfgr`4Vs;45 zAvihg2Sm5T*t9-$eq9o$I8O@z3_FQ=nogrb$_yV40~3!l=aieC&|%xP!@i%#`FOW~ zdw&NJN&NnLy1&2wI6N)O^!j}AA^q^_F)@tOWgJH@D1kGJL&yo@nqFUDGp8NwfwAxV z)9LKJLqb8^?S}C>Mqpq8U>`gou1g*-(|8@d3ol<^pFTXYxWn=I^x<({mw8%{fKyHa z({+CS_PpC2nugY8d4Ing4u^S(r`IzhynH>~9l6=}PK2{~@4G&%DaJKQW*0mW5>d`s zGW2~bC@FE%IFSqq%UxOoLWX|VG;Q04_Tm1p+wYHupRT zO+zWiI3*O0c?DqPEIAX1CrIV*2+UD9gG7;toE=AmB%GL&hmwq)1+*|m!Bd%4rx_aq z#wdvWt@^`yw5_;rDVT|yvuey)wY#t>7PU=wg38s6qVB|il{#;mpC@WTib)nD_HB@+ z)kULE@|~V9&93W*FMAXkFM ziXn&;Kq)ow1r-cf&QdkHmhWv+vq7@&;^1i8hL*O73OaS~N^Mlp)={<{+83%67f^eG zczab*9M;;j#TO9(Vip!Ar}twN0^v4J!&-_}xh7cFi+jMOg=j_M#^P3xSa!#VQ~?2F zD2oj#)#Ctsr8%{yhCWl=4gU!yZ3c0S16Rzc7~K**D@g^+AR=0-8S7p^t=C$gZuq^% zylXw6iX&^(S_C9(XxNN*5xiR{UB)I`su?0=Hb_){UlF$My}->BH>i(wkoC(qKhtMIfe0E+u4i28^`3e-i-6dxH@>&*t1SUk1&4K4 zlxfqvGBMpGuvP1k+r6$x2tX{A80?AF8*aFc zl!I^kp5E5q;cEUuZXC{{HKnJCK1DpQ} zRU;!5Tvat~30G~-uxa*kp4IeJ?P_@)`lza&s?&&@k&zkR z06pMt4LWHdYVG#(a zb?wW_h=M%jjBKI4Q|<_$t>Tq#vZQY`C22H4-K&l7yNQ9=D%#kpJm_nyAp)>M6_wLb zU|5#L?90|W9V>ANQqslrWom;KQllc43Z~u+J+3Ff=7gSNXtplk(mPG)|GLwxAYra%@V+~i6VNknbjs~~UqJw&1 z1OT-auG_dvDuR!8ssPYh+M2Uy%A*LUd1+wsSD}I+A#nA6Zdft2%m%DwcI}|ln;`hT zN-Fg%6n$2;MF5spE7}92-gUy;8CNmY`sj5GU@Tc#Qu6{;%&8`UnT|t%4N{_ZGSNg= zgS}-3zH?sjMnWnyHX2tql|>v1Xx}x;m`m%vQhYY&f^&&ngCrRpVL%^1Jd1Z>ptcnZ z*z`yZxHbuKlzrPkAWvR|g)@qnJEzx92?z-UQMfd#P~pP5K1d9iMbIM%=9OLWoO#&q zfB5TP=4oaL*fgQ*++_(q0MZhtbeqF6``7C{#+=6M zI9)IEymTQTl5e{G@#s7J_Vwld;rR1kem*`NQ{tDGGv{)yGYVbKm(cp5@8fk%oJ4Zl zwEkfS0O!~9BJ6kV@zaN|fBW0p%2Sep7tLO``tXx*Xz=GAG#(tzR<%yVAC|0)0KL1!M~p_$A{4mJm;pIS!sHutE5jNS^Au+}KJ0*mQ}!+l`+i!k5~nHVU5VCCO7Corrt{<&Kj_*CZOis8wqFcuC!mx zdESz&0k`24V4_LEHlWRmGtvwz$}&4*dLAl~2C_Xx%;u}_t!r9S>j2B_0KjV1n~G&5 zGG}$;r3Bn&`l?0Cs(G|xsKV2_jG}F+H3TU)W#kH_U|j^jGLSU&umBe_Xj@ss%>xHT zxQqzQB3LSWT8W44g9gA98WwvXTO_SWgJ?V&ZR|!*V8sJc%i!s{G9UtL+g5#|N-1Kw zL$Hki5utR`A+Z=KgIM!4u7X`AP$dG*dnh!5IR#b#1yJ1)qI%b-LUsTs9RviV=G0Yl zk5GD{0#*oI1B7PKOJyvR0d<@-e#e^y)fR(gqEb#JZ^XCGgKcBB=ilsR-72Y&v7Sx= zEmd|ywLsEC_zDb4l#ZD9`Ueq}uW*q?vLKe&kT;rWm1x67B`exEMqWtpdC6o7F| zq46T<@7%{fefs>TA0M9{563&CrU~x-{rqqL{s+gk@B8E9F*FT1Kh5ji5BDhi@_fcF zJRa^t-v~&YmiL!eKsX%x5Zu~IUhkL7<#L+t9}XTD-(i$A&*z87`@gP#c{}}=loLuI zgq$--j%&P5qeIUuYh2oy(K-C@n~a=EO_ zoYM--jGTF$FLPd3km$)dPl7C&fu(r7fdY^V0hyCT#4NHhGK(;&D!H}#V(Hj5$;R}k z>A1@K;1;dG%K6g`0aTPeZ5n1%H&q|q{3%>dY3&PU@=HRGAknR%QJrqy;;j}dKdvdy zTS9MzcS_)Gj}p9bwn2@C=|_beYG%yf*b~)>L^NBr>D!Gzs3Wf28&nh`Qi26#4Gj?} z+ADyuzP;>5$yWi5!GI$W0NKjfnYPq2sGvKn`Pn*gy|`BIoEcYCW+>W7tab>h9F-~R zVyv)eJ1g?EnL@F}#~UcDV=GqFHbD7|I@E0qw;h7pWv|i!tY%cPf#}$z1{3XT)q7w| zhO-Y9aI2ujXdI}4OsEFFSgATKs@9Xq+H7w;(arcbyuC4_+X5k=w+|c5Q~S>9=XA@s zv^Oo>SUI8=P*yyqy-1M@dts@lWrLL<&nXd;@KOBXyl znq`4CdS4t4)C_k!{7_fD96@EZwksDT#bqhGV>NyOpwNL} zJoXKk-u{Rh0`*33)v^7`)b8f{>#^g46n*BTfe0c1ju|);Hi24C;lr;kTOOEwh-M%KA7vt$7*-jyQQb%}&H zUM~Og-~NAJe)}y7`@Y@Z-Q67z=)54h;FoEI4E@jv3JSH<3UHK^*32m~aMySDAMTo_ zVGd6ppLWNG`=|Te@zC$Ob&d15jN|y_d_C?DfBlEQbZrMBF~xbFzJ7VmIp06tuPJ@} z{L~Hm)9Jj%2upnX_9Bw|ed~jp-mkn!oaOX3=Xjm2^ZwAhy`6nI-}l}5bVW#^Z$cOD?)&rUdbm3t?hnhFE|*J6(V^QNhGkt*U|r(*eC~I-c)UCM;9^cWu_Ph#eb-In9M=rMA~KH)kvm<_4e`l0DP zA+qbZtxIB+E2T^LYYEnWLmEDaW)r1V)C15y^W3Cq$kkm;3R{5(&W}iGX(mfCPk0 zoD*{KhzeJ3+0bThrK+Kuv}sdu6%wg)CR+@lq)IFMU}i?JRA8$1_!i-qB{7mrMA>@t zpf+v1VFjh=zjL#1!+F()D+;|)JEC5xHNiA0sFYm=&4CeZ_P*();?`Q%gc_yQXR1!t zz*ns;r?CVxB7knKaw=(-e%7}or=tQ-CM#QRosc#@Rfl;fMYSv$c zq@*}+HU|VsZ9;XM^Cm%PMY}*@naSM^z$mt-3|k|1$Wnw?VH8v$SF^a`8z^=T2t-Pu zcoCq|Tf3knBtj4_60#v9tWPrvtO%;FlNZbH()HDumaGL0Y%6pyi>=aA;A%!_v z7!4-|$qgLWgKVfkDsbBtLIq6Mw+0q%CrtYF?Mi;?ze@#Il@x1w4#E}T7kGqvauLn% zsROgYiO991arXYj` z9T1@)m-p-C`Tgy;ug{n3G#rNZ`L*9QUDq|C0{~{~u+O`tzaMtp z0V4Ruw{4$#>Gy}6IA?B#_HcJ>LKoLWA>=53`|rQT6q)n>u>bL|Km7F{{>)KM@8{pY z{rdj?emzgX{Q1wHKmPRN+xs=RVZEkrzyFO0J;5?g`@;~1?)rAUp0CsS8mG}Ydivop zPHV=s*>^jT<$9gQad+HzyXG%H|K;s+dYLW(QDR=!6hfBB6O4|?2cIFuw7j3s4<8>o z*IqA|X}SVKD{a?@xbiZsL?A+k{qAtToAdJUa5u$O1jg$HSTb{7xof-O(4nW-=l=I! zenXJM!@#dOjydPN#zlmO{r*$);qpG8Uq?(_cEG4y@t#uVak`?T8<1@D4)jcc3_<2V&u*-gLd@}sHsxJMrel}s?Tg^T44$(btZ;eVEbd} z?H0gc^)LZM6=qSiHC}Pc!O)FH+9)$sAgZRyFce*x=V;FBrW7rvRCtZz;40@~0fHNv zs?_h;uo#%x0#GN`xXTjjD9GaWJsYoS;QXmBW-C#8^Qp*Res(@r`=}=6#g48WCtip`NL(pERP$ih5BwE`lL*WRha%KkoL^G1g80{E{NWlSY z$jZn!5Gb*E08mIS0KjCo2Z{{Q-wCO1UPDG5M0Hhn>pxc1L-<4a`PS*ZC8^-NY3sFmj`+%Zy0f$9x>Gc~l4TQ*0rsVe7ZeiDFQ z-PhY*fF<`!3@9uA%3OYfP)@v*8k7nFjVIIh)|?Ns5nI9~g54ds(GVcQTQaP{Z0*8O zqWl7YqO35LHD^Rd2acFrVu}gFt|4)m0{NDjo-<`e?*ox*+NNv(A&}i!7XZnaQINm^ z0s#^dx^2Dn2o+rj=r9{wtfsp0645zyzR4-?cE{u0)8h~S_bUJ`#f!Up*gF@*2Xr350s5v3k!2ZE(=!pjy}h@)@VLJ_-XBBPyr0g0 z`~Uo3?=PoioH+5Y-+lVw^XEVR@P4`c*Z=zeIHcWS2tIuL@zFbXeZTB>9jEyE@*0Ot#I8bPgcWN4RFooc}_s;M3 z!~5&odA#>@M;-G?|!9`ew)m-BdipK@ON{jO=E zWN1RqPWJm>B613&D*Otjlt`oSL@lL;rYxoagcM_D()b)65wVI3;l+2$&O( zmn#B3+~22^n`6u8ky+X{tgUCxISV-yKw*6S^15sLl<$T3@I%jE)5F~%Va{vHFbV*$ zq?Fc}L};GYw97dIBy9Vp9h%IEh3B&z@AfHkC$!Eh zYuf#Bx8DzU`{Toh`@`YTHH{-TUFY#~etCN(k-PmNe0WNU*L7XjHK+A_J|R-@PI695 z4xXHcd5R%80O2go5qeBH2MW#L1tcwzIb{)#Byo|W=)T8FeDlPy4 zbWmUxTKL1*D?v?F1~mh!V5&9MsvlLgn?`cX)}s?_Z(!pFl-aO^AW^H0inoeM0t9AM zo&$)sJ*{O;@wQdZZKN8EHK_k@1f60|6ENMt+R#B$_3?J!Z!35~{hcnCfCvx@3zXZn zrS5?Or42cWO%=fw695Y7sOw9mKPdumwuRJwK1xJGEq!kg-q4DG-fu+$6lQQ#vE1gh zptf(Xl@kh3&>R~CloCSSHrURXZd-LHwm_a}(rGCJXr12-*w)aV{-YwhDluTBi!Cpk zSw$^E@gFkkp?WGBc&b*rGB6sYENf&SU3<_L2CLj4Fg%Wc8SLQMdkaQURE0J5LWPHU z!!C~WZU`c>DBV?$4A|rh+M73~!VZX#aVs@<1B*m=NWD5s(v%8iM9+i>2ryfTyQ+4i zus_PNm2h@xpj-Jv)qe$5iup1KrL<#ZZ1sP@v|@KOEjvH2;+i1f0KIR!CUg|M3r*8{7aWxSeOarKxBw!;z|0I$7z73I(grq_s;c?A3pe|N6yFnA?Lg<^Un|WzQ@b?{I}nJy}VzRaS9an!|>Ce z|8)Q1zU%w*`{}>@fB!2{`16lH?2fwxKJ;K%_2GmwXN9{^~791#6- zUUG~SeA~A#@26#+L*rwN({w$b&#*fjj>n%H_cuTwPB|~v6`d1FISCPfqnvrVj%{fB zp(TXFaUYij1%q!B#tbYB&H)0XoUijW0ke}38iy`~=ELLTc)mo5%e0Chb6)0E8d#Qj zS!Q~Y1j#u^1ef!OOu|Tyx*rGzV@U%T*14kAG-Kq!!-XQHlNmNu;T;&b6)_8S&lh_X1 zdTVnqg%UR(S3_o6)s@#4_UISdhF+6V*%%9Rfg5;L`B`wIG^4Mr6>Z6VaH8=c898kjZk ztK1vg?Ti{e7i+UxL6_LJfr)`wEq*PjVhdaeQo@Z5y4jT~Cz}XuETEnPTV?z7Eu*Zv zVq7#7DOQZZr5d#XQ?Mg})m1A05IY#9*AffYiqzIFwl?sn1bh_;QLJ5Cb=5jHB2v>w zksw_@itfa!u@uQ>W(08ByT*a} zwPGL*t>p_Klxl(o>WZw8Ikd%nM=AHzokzgL2(EEW2P=v%q2-`^m|HO)gCSOC03g!AQuAtO49>^t9v{pSy#GRW)8 zdyMP;utV1@b8H%i;v9PByj;fQmihF4Zkwh%w9|DQ`onHNeE#zf|NhG_iPzxhbzEJKEOLH3JMf3y@!{cd zmv-;(W15$Ih|lYrWC@;tkaOn5EYSDP2Y0wY&hZ5uEA;pt8}r8O>*1khuOkpNr}a^5*2kKQ?t)HFUNiHVE)z)T)o7D>!r za!#{HArQ%|OaRs>fEg@SV*bX5$AY8K(l9oyt=}@&QuTvuIzr6EAQD)Y8UXtas39?S zq#6!3gI(~5L8VgU66-UI9=q*HZ8RK!nyq4&D=SuXh}w<{F2vfC7qp;dG>hp~qFyzx zXx)aQ{s~HzHS0u#h<5i=7YM6Ktc;_5Qt<`>Xqf_4fQV+@uvU(>9?(VQ7Hp^`Ta_=^ zcF+KmfDv_uTGUZiC8i1BVAB%?4{UvGm573}Dq8wL-+=&Z=G@*j*&w0z6ZN6TJ19(nZa+pD)*%tl+`i0^`OoTVL(+PZowkUt45VF)i3e z82@51IQ4DpQ0e+@y6(ZMPL+EMc5m913(roi+)oMZbK`I+iygw z2neBgHsIx-cC-ye4+Att&oWI z=PXj(%DT`ZP-t#zE+Gh|1d!ruk(;70#nna4T)8DZZ^9X+5aSJbYrCm*^tN{8YgLt* z;2GUqm9dJM%~WAGnRwS16&oAbm%Q1m z>%*YUE(*7pV#>@2K;!^Wgg7NYbWKyLDbCZHQq01^w}<`i$3K7aO(!hA^}cH%3lt1KR!NmN1r)P=OrBaW(dCZnK-8O{Qkm;9r}O# z%YWMKc9&`Vw}1Vb=fykMbiUtpu5FtxNWl3#UY=i=$keyX>E6X zhs{HJj7xfZeY>2-ZWq>=mU%rMu1y!>n)+QY4jT!F!)}esG%w2>StbXx-w*r4{yL5U zoAG+dkwfF>aZb!R@%jB6KP`vjXAJxE>GIG2?Oz-;ZRn3q3du9uuj%bXHn zf*99%p1ZC?LJIEK?xtl)YX*kX>nV^Q9$e=L9RRxH;r{;a_?li3+^!$)kB?6eA9lko zP=o9cAaNYuFPF*dM}Cx*85$Kx)z);U5TK;)ENaKkXf7-L+q>j8yO2#8!c zd!?WRwVr&T5hSyZ(d7`@JVi(ugyXtG&JM+UDhz~i2&Vbe`MY8K^`zFQNM%z1HRQBK zNX!pY3w)~)*%XSMalMIVirQ3BH9`7Jgzt(X>BlNSEXVOihj04OTIy{K-R!czzucjrAzgT722uB ztbr8-xLyB^JE%6Hw)Lt&tLW0=WtY{cfY>6!Xmp%$ikmSk(*c93W^2Lrv>IIiwd}^o zg4%5{TOc+yG4V3fC2YJ~T|9koE|*qo%o4!zm~NR!l{qq@P^eb}oSM_N#RO4NvBu>@ zLwqE_<)*Db4IYXC@y+Dp<|=HfA)>%XNhme`Tdd9lY#k*;lgf2kqFDgN&Xr;$sJBoJ zme(~!E$9#B4uu(UfZOW)cCKt|b>qZr`Tt)Du4`XERsK~?dhPRy))}j;g_z3FL0W2Q zSymPSz3*avjGhJ+*`i_$d)=@>qJ^!EtF|@xyJN2Ta3xv61QAJCtVo+`DIfv8mrB)* zsDQQXVnJ#Jxf$MKM9QTkb9rCU6n3HvPzivBRdwAgb*-W$u()>Z$7&luR^boLW2-Fm zwoA68-~Z7e>1b{)d{nvrPAWid+kcVhKc%RKtva>=#Bx>Elp`QOwiIbL2^af1l-{Pc z;wBRafJB%Dkch=GJ7{}=hS9rzpA)Af0OUfK%MtM2lb4*<1fEEki3z#H<5@Bz15%ch zbIuw^U`EuYS$c^@gL_Hch}`i{uO7-QtjjDVr>&byovLT-8={659?<@ppSAj<@KdQIpd#+1eg zoV^bb_-%Rn`ua7+*oR&Bao6@e1|Rw+j_do|n?Sx^E@@nsc^>+<_02p@hr^w7_c5-3 zxoMhtx=!n5Ni(DsFpu*@YYuJip}pH3-;bBeWm>0r-0vITOmpf6pK(90t8)mMr|Z1m z9eoS^ZujBoqjMzRUdSP=X3&-T!4m?4W<8FU{ ze^>uOy1)XQ_3l(HK*LT_HH*kJly-0aT-I@ zA%+0H2@NC|`fg6^bQ#wek!YOfUw`=xgm%Nub%7H*azo#Cp__OVkBAi4Bzpnmby=2W z>D$2*&GXWA?c4QqJ)eOXNJ@&vJS|@Q!~KWPpMH!nwLUyOJ%0N56nr!EUE8;iWg4gJ zI9}e*ahYO_ZSec!Fwe{QHl62nnPwLNzztoGD3Y+-H6ViEyRPNLfY1bAPPRiVjm0>o zHLXjI$joU@aiJ_um~xVwK@0p!2F@`DN1hzBh+Rr2GgO;5TEl1sTd=AJDBQDRJSDe^ zYg#yx$|rGFU{M2HP)(5BL}oUtT%F3&kXf)c*+i&`&J~f^e;dz-#^_YNqyAt0OjW2w zA)7_gI-aH=)k8&tL4%k+3!|0lXVfavw zQT^a;*tasqw(p|a3JN$=*|8nJ{irp*j7rns!wQXt|r#&QND5(tt2g5(6uh*^(e z>$(ui?l>GD594}WE{h8eNqAv!biCXD@bUAo>n_*v z?aL`g_6?D6=`<@5C{W)X{`ybjLy59m>~IGAD8UW zVb|V09dk;5TYt$>P>`A5UQcc4!cX2ecgvInCAenjT85O#l`{2-^E@rnb?uwzcpa~+ zYy9;*&X)l5Kn%apAqsYE;?cOQ=S`s40Z4<062Rixya{vzY-iz-CgnI9pxe@9YuW4yY<|02Ih98ltGS zSZ)YEtj`fLja#N?w)Ir~v_ezc204GBZw#uOK3y{*t zz)XM%kO_o6Q!2dKtgNn;;Y{5CHJfD@w(XQU=i3$?b-Z#Q4bp0)SpW>4m4%{QNGYr(`E<3) zfUHbkc{!SMfz|MX-pZBA$$gg(u{l`!y$T=7|5~T`;(p+^pvA5e779zf9kF{MfFe)= z2Z{pgp_WozvE-9!?Pk%IYW2Pn7;RlsG@zbO9ZmT^y@LalVHS%d>M<~dRWVTA1=u>h zN(r$k4Bsfm+w8-7EmQOkP|*oBak%KTT$-C{gJ(3~47PPOP*v2RhI*}Wz>V*96L^*F za1{xIn)jyerxCXrk`U1*y&Ktv^+V?2fv%C>x&@1Qpq@--E;rOb4DHp`zft%i3`F)1 zP=N!=f`BeFfq3ltlzb9LJ`j2aniIGEjsd_q=e#h+B_cZ4G|o9H$p*j#%$x}UfpddjXf(`+($}WO2?3LC%7R>$);Cxq8&yG| z+xa?u{L_#B`~UI3A0D1!%%`{4_xJaCT~6HTsZ=lP0G;uzb;)3H6Buysvb^D<9O*8}7b967ol9suy)fBSu& zm*e3W+Hk#20(g8F*2MEQuJh9Nejc-Lr0M;5&7VGeXhXBC$6tQ^l>$LZ)8+j9?e&LG zAAKKkl*4|vd|T2q1&2)&F7KD)r#tfQ;qfjEp=&RvAK#{Be0zHXfVeI%FE5|}bb0#x zsff2 z=3&=MW@2>CA>c6Vcm3ggewpSu?-Hl@{ADDPWnC}>5u=mPw*9ULMoxL2^I^AhJ;x=t zjaz8RG4+8MIp^2}7uPwbTxvZslQ0QmV#x)D2|GY986Z375SbAnA*7g6^hK)`Jy+UH zf(S^?wHB~&+?P~0#szTwKvsL`AQqH^vIZ6twUR;!tFJJ+WO}he^Dy}LW z00;@TLgMwJgPC*HFyB8%c`vZ8sZdy}s-Cd)zR`(VEU^+o`X8!*MKlVa##;>C6-A8* z^@e;=qgGSR0x||`zDGxjQ%c`bv(CC=N@$o}FdotFUDnk{DrH=uq9st-bBGjc4O(?oS zNDAXh#Re)hhLy0hv$63eg1$-vLal>Wa7&R6F$gJjyGfJ2rEpX#6(zx6SHzwHfOtzM zs@PaC&LZXCA88ewo3*F}ycuAtMpJP^seB_~T~<-mLUqY%b^-t*k~J``BBu+Asvc?u zAhKh)9iAJ{9joCW^8)}7G_Hp>Iw{@d%4yle0d;O=v=!=U1r0&KQZ>2s zGqa~{mA|3JP8F)^VwOsimU~*}tq8Lo!jeO~T~YzW8^;LhB+c!?qHb`Y{>O9{PX^2+KuE2djMk)RHbUhWgs`G(0pH4>z0GPQ5NdfMzSO69m z!3lWhJ9a)Xpt#TvDM!xIv<(4emL`x;DS8YsMnP$UccsV?m!t_6PJqJ9%&By6D-^zD z5y2Q&B2V70aq-Rz5E7v)O^kq9a?Ux6us9+|z8hK}{D@22X7ZTU6xa9+H*IJ`H#BSH zw5;>8G#vxL8sl!?#5w)?<&U%`WB|uP9`^SB#^Vb>>NhvY=wU*5m| z@w-Rnn1*BP(d8vCb4&|^P+F$o36X)L^Bsr~NQQ1A=o%M95JnqZ8*` z)AucrW5}L-a11#O!)|}r4ZH5Q-+n)zE}?7ZX}pf(@$PQCF1tgs>-%|_!w!i^0KKDS zTDqYXggmE@pFaKmP18O+JWbP?QyS0L9GBoj z-#gCfd^)c&&EuHY-1N?qANqEl^x zYcZmU6Q_ur0R>Ye$-oJKy+hy(!bAwHneX_WCIQi&inkaIRDIbEFfa^dzf!GQ_=(N_ zfdRa#3^i>TL0wQO_@H`hcH%Xk2{n)bU|P2*2r}<+(|A}BW_=clft4GZZC36UD{b1} z*c`PtsN}$AWz3SQ=_|&W(FTom=C(L>ga#4RH#U_Co9BLm3}8cFHPK2zU-=NT5*yVl zGqdRBw#u&npcrW0=m|sw6tluxLO|iBicsa)J+4~#s_#yg94a!DTl_)U%`P! zIutyiJkcG&Q~Ew2<7yDmT82;L3iD68u$Aw-BMbp^%_JL$<@P;oL3E zDq!Xuj5{>6i)f}A@ihPdfUKCvcPvvm2}s3*5tYQv3siy_?6BLqD@LmW+o)#I@LA>SZvJAB4=X3E z)%8S#_3BXN&jfG-dF6#x#JiBXifnKE11aAD0x0)770{KMNTMen5jDHHP>Fhgwi8!* zdh-#%oK%&2K(Lx{MHL_xm)E=wP@YmzTjkiym5{9ui|xN21B;WWLefk488*LIWd^Ab zwG8CQF-u}ZbPU9pV@buczzl)dd%MiE$1M3CjL)clYtbVgEqXhPG)!IKN*IU1({UalgO&%Re1O zd^hY|+wuwo0(;Dn0fmuYzP-@0tn)Zs*J0P+{j@I?%N#n_kf3XuAe_!`m(M?b5R@1* zAT}P}-`<|Te(f6X9Gs@Pb@cxFc0Hd{Ow-#0NtTHdWfpWkH{C9;>qJYJ^8T><>(4*s ze@+q;bxuUuHYDL7&q4VkT`>$U1_DfPYlYjYdCC{X6gU8vB2y*v#&o3A*%h@{RCqNNhNV;$*1!Oot+^o!g$Sjx zF!d*m6Pq-Zy_qAz^2NBZY!*K#|pd^ zpki1Jq2^VCjoD=H7I~B5q>|oalAvhYq*M$bBG%<9`>6YB2}oQ&4~BcU>zZ<7!t6nS zH4Ilvo#^FJR^Cz1kP@g+ZZ3m>lwPO>3zs<{La9yiJ)Sb|M3Eg@syG5L8`1-FAL=)i zu(h^YW%gF)&SGok2i+h#^A&Ur{{t0Yhjrqw=myEgyKfOS6x*RyFxNnC=IZsh5(3gLECz`4NNm= z)KvgkNNqfurDDI^gT)S!WW#ctZy2=Subt-6m%rrirj^l=# z5kb>WQNeLt9OZ)xF{cBmn}%AMQB2W;K3x$wIRY#}Ply==C?`ZB2_19;K@fH;qXRFE zCvwvgiF{@t24*1gF3rmU_i3u=^a7u~A3+9-n>-5XN{V`syPd_~U^p`*F?(Ql0d0t(>#BhJO zJ3QV2coGi;DX(dbEP0uy_ivZ^y8ia-Hxk0&c86W(J3(~bdl!5NX~_@w4-VmSxw_9@ zgrlU!L+ibBSPss*tn;|;_eViw$jrP<)BAi~*X8lUVHwBY{{0Wgd_65WF8$8u1k)w8 z$FOFUEc0dRcQ{Q`x9^eiJYR$N!FNCY@UsZ7#HaW3Wgfe(%@HVsrfJhGc>&3klX#+> zc)uS!`Lx8(pFU@X*O%waJT2q5FVA;}biBWBhMpRHeLatt75#Ys@F1L~(;WJ~+jVY% z$EU}x?U3J>Wz9LCPH(U8&&T8O{{9I_Fu>KfDUZm{Q~=E?ql>w)Nn>YjTF6J5JZtO>65L1X$;_ zAKGc1&zCa`Pu_*#9k}^A9q#u;nC2`D>%6vImpI?;dZgX{aJZabuGb4f=Ny7Sj2Rpq z9uDL&OAfAEmblCdWXVYwFf`ty=OpMTI7a{^+#oniDJF*)h{!8f4838vys69r@mYL}pgCtiV#cPL+w25~a)_+D=p3t4cMA#sd|#BsUgNo2BZ{ z3+O7UX)|}4Sh783E9?a~x(_v)X0CoAl)wlCvmIi(P`MA0EOl^LKT_sbR6XA)c(V#1 zHABgK>8jfWq%M9@yA^svg-QRx_113}aAu@W?TcNFuHs~y4PzZn6{FfM0qZW5aT~lW zuN1K$c(%?Ky7i`hOSz7+QyNg;UX8F3J8HS1f)#@$)l`8YK_i%M#2XA&8#!i`a%_kYwX35Jz*f$Np+Z-W-UcI$w5mAB;D*w2 zTm-}_)@Vrx<&s4JwDw`en@UUCwCQGPRRC>KjTtq-T<1 z?}o!Kee_sx+rDkBR1w@Td{PfX`J*nRbuUKqB@vMlaZQmtQx(ov#AR#jcN1RhmIddY1d0Llc<`_9Ij~{otWApLz zryu_MQ-64H6yh91;}9EUVBlq5Go@u&=5Zk>(>$)z>{}NjU!KRV^D*UL|M}lyj&q7# z8vv;5`}@25p&xRF-TtsU24**eHtalnOS@Y z?a=o7esL+ZK2V4wP?T_cDhac`AABad6Y<&|#+xnqxrkGRX10r6p z)8T%%E^%`6{o_O9n$R{B++i4AzFqoVyWbBl-`)}Z@i3gvBP99u+e>=F`=|SmbAM=1 z+F{o>JO9hSa^@H_BjG$PoS|u(w5BG6tcwJ}CMCke-`ejWX+sCJm zrR!9rnB7fRfRX2tdkh>jA|%kR(LtJj663J4Zwa z?2)551R{(v5)l$ZF74`L0zqLECT5h3kQGXqDJn{U5D_6;Mi>ij4zVR&GgQC@+iy&- zSiZ5j;x~04jinLV+(^YYY$TjgP^wERA8Snf~rC)L<0aJ zLS_PBAg#hlR45Fn?(wGTZump7h?5~ob5l#dMD5BTh^7CY82T+0JPng-dQ>S^z^3U` z^J|x<%IoNQo3*Iv*H)0Cl#nfsI9MuPMPg7jsb#t-JXRf#W;dWl_qNy<3Y5;aq+=*S z2Biu_N^5oF|IE^DSP)8<-YqgULUh9-7G)7E*v72?2IDMkuH@P{A`oHC`zqiG5v1s7 zF=Py|^+pFoE5xF-K{dd)$V9p%C0e*QB|EZ`kmc8Dyy=*r2{12Kc)uxXPlr z9lFB?gax2ZySfJDlUuzvlLIBgH8-xJ{(7J*VAa+p6(?_}`Q}?yTiv``46|A#p&B&+ z6G%YNG`s2sZD<_SMY4T1zXr9PYk{UZJ1JRo07e~eiy(TqR6iR2-YSg?l*?2##Z4?D zK=~m{{@qaPR&1{bj!~G~0k&V4MQ0JhoBb^s?M58x7FK}-J*)Fl z4KsW15FxIsB*-y(=Q8A!lQ1G@0C7$L8W3=Vh>plHqCOAd(8 zQV3x;4DT;vTvrfDDMHGp^OzDJ9`^GzhNd|j?~<(N@wM-Vwr!auNsddLFPCK+iG0p+ zo)_=EciFMG|!jw+uP~+HKo-frYVuf>AK9*iq8M`Z@=Du znD3vSa-QDb<-t)Cy!YXU51&)afBXFxVLGHc?*hl%>^gA-4R=IqX=S{ruw}zyINga;6-mYyB$u{C@4*KKBiXG@WOF^}4QWet0_g;2HA$!-0iQ zrx%B`=7cGuq?d0myKZ-PfAmfJ@OT{7A&xoam`>*ubJ3%3FRy*q54+ya)P^xbR4uIsY+UFder$MwS>!)_d>c#4uTKz8JobzS2c8rL*oyv#4pZ$sDg!_YM7 z(Pbb<6aoTDOerT`Ck~#-K_Ku1#0fb`P5?~6C=9@oGl3wMHd%s-W>nRgkXCNYxb)JM zsEDyW!Wns0%8uWg-46lEHI;l;;`JXkj--5*fk5bnki6f3Bh_|%C;b6Tn8uwEvi?? zsTcN*rvYU*i>56(w&nk%RPGh2S!pHtM5^fkGj%r(ztM>bGb*UPMH}_obtP}0Z$(pJ z@VYF6NCi~|oZNn?Xmr#66$vTzW%r8#ESDNUv{Z4$MP?T(h*X?aM=M4FY2VFidf>+F z7Q{?Y5rTjPqR=|lZ__M~D7KG{8!DCx!R)jZ6){w$%c6Eo4W-D&Ygo3Z7$P;}epb^)j~}p>qG|cBAUJm5(e!Ra_;$*-ryx0AN&b z73Yn;lbb!R#cvDAtG%??K98zXDjx>wA(5iS0!|PCD1!h3L9ULAwvj~<0cV+vh#)}L z*W)_`3s`dS$ebfxPt-S-RdZC)8(F=*>eOwSm3BaG2-k8+#NbsuHl?gbS+06xU+YHK zT$V!9!i^(h`4(C-su%$anwp&e&8nut#(~z=6JRKf;)RJ4Ll%d=h2XL2vul=>8Gs#% zxD2^zoC9n^*zfl~G$4SE+NMJwa_E{y5+g#}hV00@(&QVoZw)}qNdz6n3=9Y?;0XaJ zGb^j zA8)U39Mifk?a=S;AD?zdKo?i`E`-L1t`D70IW1#!h~o0HBuy!h?6giG*D<{N*41lzADi)0$Rs(x3+v zY+$@h0nZ+{8n=M$_H~

  • !KkqZ8)w#SQMsjIP3-xS_BA0l)D80w5AW$I%BC)Tg-kjJXEcGF+hYU)&rwhZA5K) zj}^m|Xs-sFYq+dZb!azL4g1%@-9(v00byB2yTnxprnPW$-i@so=5DyY&&9XHd}9t%FiF3N(nRky8z3ZjKQ^bp$Mt2uyzA?;xpd zqAutS{buh~@1}5573~zrealJ04FoiDb>omhw*(E^>4!EbYy+?Buz8tqy8w1f?d7+x zD40f}GgR?4s07=C5QRZdtQt&pN(>i@c7{)v6@98Yt!TgK8D-VMj(Bxa;f5&}*hJj?Gbn5zh_;$2ks5QzY~M|62?@yM z(usjZKpYXeo&{Eh#K8|kc1>veoMpPs*oG!FtvEv0?%S>(3h~Z@DM}MctO30z^j1nDfK^!y0qWOHOH= z=dWMBG+mchd4D|}_Pb?C-Ox48o!+nK^LW^Ik#`SIcfWt@mN9}zT;rPNak(^odw0C! zjG^rxj*rj7vv+=)=5=0k&d$5w{IaB+k^uO&apW?KZ-^Y--5uV~XW-m4?RdF(!U(i3 zyv$2V$&qh-$bez!SO9}u#?v$}d0kgt$+=}&+z3tR=5=m|@cjICJnVrpB0KLLATw~z zi83`|Xm@RBrezG?XAtMy!3RJJZAeSxn1^maU<%YWtwZYjfDAtL1k5>eOawwC2<(VS zm?23{4#j~W3L}GL#Eb}3G((Y3y}+br(vp76xh3L_G_bUoVu>r^B<8o7MikW}A^>g! z3&ww->L@+_3VuLAplPZ4!2h6Y|Ne2`8=(^Rt-3=Hv`~_oIL%inxx^Ls8LMQOaA)kUbhi*a#W z48(RDNWq6mzN!GU8Z9NlEu}s^D**$Ui?>&~&!Na4 z;@jJ!s%TWXEXc!fl>y$GLtp}5)sbc>=s=`Gs8U|=2F9@c%Ca~{_yCg1At6L8RZXD?IDtUUnOQ)9NzGx&Oq2pJQ8H%`DK$t+k7X8tBq=2pCV`aW zx~|Ll4AAU$$N!hCKkKq($!!3^epQm1O&JSSprcN<{9hYv8|`Ds=ei9AIo!l39}a1OVW zobz#r0>TBHs-irW80UC$&H*7Iaba>m0Lx<;;tU{PfBE%(`5NN*hd=%Cx6i*{ zu5XvibsCsUnV;5iK958E`5&XuTymP{@&EaM|9|`s|KtDgFaP?lzkmMaG=KQ{r;nsE zj2;~-C=jYZAdnaWprd#?Jv}e$=dar^xXbO4)--d@Srkq$A6{}QIWN-`$zxVQR7W_* z^XR7wU6|#5d0ej7oXfJNaY=FTPcP@|>+6S4AAX2G^7Q`Y zNmTF4>I0qT@qXE!KMp@kAO8BUzY=N|Esr$}fsn>|a46Ty^87?$^jpqS6xDrr{&;=6 zz2=MpM7pl4g1{LcE71j?&Zo=u8pywUH^wkHg|uytZ8*vpsQ{j@9 z;>O?tDd{*z-h={9F{)}w1srTk8is%<%IwL-IS_~krm_)&Q}&?f0aSnmNIU=_n!ahV zJFOwq?#r1q!Fq$fo>Eo{cfJXUZ{O{YfOZr`+a1IHJ}|nY zF0RF}p_7n3(+k?^wUkB+hZ)5PeHv_p$))dLEw81Z zL`dGnP*@YAg7a>qX~w0j8N)cpmRa2}#iGJRM4mNK%yqDU?h2EMnzd!p|(y{wNjP>!Yz>zY7z+!Lr}r>zPx>Y{p~M* zB~MS&2X}IrGa#UdE>QbD2yd$b3KT8dwjm*?Q&9>yjuQ~1oYN{q#RUKakp|&8DGRGg zNn2WzNFfi15JNbh{IYJB*URtAlJk~xVHP4Ha6SxS2*W&h=PSQmHg=v;O3Sit%L-Cb z-m-8m=;Gj_Kb@X)*23GmtRf{&!42b=Z@-^EKI0b}{T5E6bHo4XAOFve-0!cSUtfO< z9?$dBFhn4TBZ11cB~TCrMm(KH^myJ*_jSErFOoSSr*A8>dZgR!Hjne^bSl7WS_xoX zN`90v#t$FfUAN16Plfa2@mQAoFwd9EWgJEy!n==8*X7J5Kc7yw>+Ne6B0_NI)7euP z;;5>5Om$Nc(V-`VI1J8tE@ioI(}|}DEJ~iXWx1l@LkNNtK0S@9Pp8-U{&=k0Dhx!P z3a^i(g1`jIr_-7IP&=(%3h=DwTvT#;lP}Q7SNOt=^kh2(xywQUI%1X~?Up4toXw^v86Xqw|2>w6|bM)p{*RVr-3RDpU6j zM)VM&O~r_bs`jMQeW1|C(y@t|ls(I>UT*~KEhw6TS8Md%!AHXg?Y+HB0)krm@$WAL zfOUfavqgsy9RQLc1Y-p{$nF|e_hGfU)IkxDKv${_AcD>1Fl%k`9MkjF61A{+y%N_` zzxK8PN8r`Dzb;B9KZw$BXa8`s7Y$w5+^~;(pN&GR0;_G$u_a7`75Ih5u2!&KA41St zEi`m&liG5fU#s9e?p^XjQ?w!ryZSByI0V$DePDbTIOFb zUeiPcsIP*rMz9X6_Jap(PDWh|Q7cylP29*hENsPvt;&XN*bdLtwotLraut=dqF;9K z_BCuPq85Y3eu&%O+h5JfLkH=)YnC>}=I6DL9q#@O0VuUwJlZgGr|$Y$QBZ2#+1kJj zs8x8}R@(&Y5FBVTBW$E)b7kt_dRNe{xprrv*26KEuyc^mnt>h|FKU?2DRs)a!ga&b z+Os|pEl17xZL|}G%~LS!i8lGpgm>|*&mvMbRnV5Ko>j}wKv3(l*PB4Y(H-gDw{2v5 z5J2zDBt@4rCPq?6V{l1|(){$~ zhX8=fnwBLIk%}*@!w}+W$cY`fBD`&Bj1d&J%#O$hA_Qe{-qo%qLik+oX`0&p%WD#`%BERK-H&NK25=;^~Wy_}wZdSL}A;JxEiSXc@R zgGd&o;D}WSUGPp36^M{Z%szTh96T~}z6+rvuOci8-aGR6c%-~;!V*T0Bq` zZFwxdr2x@s2%_raIK9vOeyoG%joYCO;r)l}<+g5{ zf&!^RSy%O*GH`pO;5@WMpxq0RaU(A}VSYMX9FEDn@okx}Hwy?lM9%17`CY zp6N#x0i?C*gJwG0S#1%@4$r!}teAhpPNk2UvQ1FfsoG8VC0y|zeIW^`u?Q7TMoQokp`SMz4)qEmAK0N|0J1!OA} zP^v9GY;7BKFg6l)JWbYzseS_OEp^HYh`z!T>S=VihvaeRgW5yeBlb#u-Qm!>F$!vT z9uON0SCiVxnl`a$F3yga`a0CgTYX(@IroVvn5~AL0Y2FLof6x{#T3N;s+ttmii5Q8 z_5W!>L+LuH-6~Sh%3#Cpy;&sQoP+)bgUtH{_#J`S4ISy!T4icXcz4)TWwQY;=wOj* z7HrMasyfc8nx+H0Lb1DUZA(qY(e9(59T;nlW{26t-CQiw2GrQ^rn$dk?G6gEOEo~x zY%n5y2c`?6j}`ze7Z za1?zAB#bOVqRzSK2MJJ0i7|Q~STMxsz2wX)us&8P#e3(R7bz(L5fY*nMs&{AdLhtS zgotvjjE%KQNTo`(O+b}dRDr0eR#oM~!j%(KsVI+yi!!rehWhg8PH1f7=Z@=Hu zw((ZBbpg~E0+SX2R0KDi&!-q7pvyTeZ?~Me@Rm=maL4R zTDF|#hhgv^-`y{_ZOOIbQyBahLMcjs<2*SC zr}G%6u-@|&0t4r?{eEAH$mmcVh!jskq{xExq7o{)mdg&%ZcFcav1;ska_doSWEY@C zV6YY80Mjt7aa7}k+PUras6C&WU8tV_QAc;i2LUX9V-I#Hpk?f+Qu9tbmC_V}dF4%^ zn66XAiZVMk=y95+C)M;<`-zpTuN~jbBSmd@|LfXly8zntYgf7ltJI8DSIcc#(Hdwr zGht?3_pw1AiS=)-tFAUZ-Jgp_&Q-l`OJ|@+Z2N$~Dz=z~sW_;r`?~de5_}g1u(>pe z=s)RZ*sMm??%LY8ocJ^O~X4xu3c1dy6DXLWNMi&`;Y--Whm3LU03 zBVqJrm*<`LHPWsFu?kx%C))YEofm}fZWh;-q?Q(-Y6*;c*Kh5}w@I(Pxz$Y8JvV8O zS@i|AHE-XtX7=5@t2Q>yc)gm%S#R9ffob#CnnO_Y<*~0(4KOud1Ob`Fj*+w))3tTC zF(-#(tLBVB=a6Ba6(ZW566{fDyP>mFGYYM8G+oWoT_R#gowyOD& z2FrVD2-URI13`Bfb*t}&<^-X6uNIy_?aQov{kxyh*#GZX)4Zu>&eateFPvI?E6 zZmj`OLxxoV6l|Ud0HQ#377hIMgg;w(?Ca3PS^7G6%-p{t&`yyav0p2pr=||=(QRj{ z!fKh3(wV%zll^S>LtNFBHT$(UwZo_gNFWI0RImt&U>46I;y7v2Wz7LeRopN{3dks`U2oOm3O5L84Ri33ppDdf<52Z|1f93i4IgCne0 z4MBml5*w<@%!IY-n{wtVFsR@>GK)nkL|8x}#DL`H`T6C;$Ls4QOv7+GLvZSl&=FGE zP$6&Y!dd3?G>kr{l9rq{QN>|$;GitU2e+;3^81Deid<4oYY`PBElDIZ4hX2*eamT6 z;dNa@a6m3^*~RembdE7H!}2IMURA+69|8$*DoH7)E$21y#>&jf>As+d2tV!%NCD<` z+n5DNeF(!el#-Kd$S}{r0X^Q9r|0{d!tHJS_|toIF%M)-Oo( z?)~%aw$0O2*yqUODO9rL>BBkP<2(;J=f8r!-7YFwMc>}8r}N~fcyzDdtkPyoHgJfj6Co7%8mF^84rCCG+|DoO4;$9H;T^+eK9f;CfvZ zcwN?YO3%+PX-#noYe|_CD{l!Nm&eTl7G9}G4D?CvX?7BFwGT?pM{5tgPzJ^Z;tF$8Ob1=V2UVF;}~eT@t#fZHiF z!!gy=gF+prLQ9CS`B<_MRmTfk@k=oKfEG%#pH%HAjZNsuVWwr#;BMy%8#2VEwOX_a zj0>^xb-J;~)NBfZs#?*?-ZrYOycLrIBj>P>E0!FW>Ko)Y|)66T& ztEzg4^CGq;^VRkpqNU#xJw`6HkYK~3225LMwRRby8mq(>TeI&psTQ#GsU4tpYgWxG z8Y5yT01lDBb_EFg?sptjWseNT)zSO*a0L)$v4J)bXeEFZL8Q3^t!sv%&(`UZj0IOv zAhd26?Eo7{YJ1ZJ-gv&Fk^t=BvoCAkblZ5t?_Iv@7kA8YpoGAVO82Qu%mB6a*ZS^C z!#<|ND74MJdmg<;w%Rif-)n+fKj01@M}w5uLGyRGqqh+5xn)%M+$70fV8^;AOt_vf zYY>zAp@rs+>>h?KPQ6P}Pkm+9dpAHh;3qUq-971kS>*sMk4{Cuzuv>jde{v+4}J9x zj{<8tft^*=?>zX!?)HklO+M;INd14QU9g=J2?nN6C@=-*M?%&uZ(CszVjac-9Hx{} zk%)YZfFy{{J4cW*d*^{XpbD$1q>{%Nz&Qn#Qj;N73#VKoA>bT&@n2}aK61BSZSQ9*1bIv6fRTYKPI6gf+BRC(z`Sc9l2OrZtZ`;P4wrwk0DkYt! z)ATey*7bHN4rx3^@7?vbe);Vo%)ui#u2f=5TUs{(Q3d8A%=JbpFPhfkh$x5@38O2S z^OlEU@WFE~OJ-6E-iHtYb<64XZCM^$VG(A|xfBKx^u!y3Bi39}LGl0qDn1}U$!ikN zs($q2>HVkjjzW<#O!Qdq zq73S$VH{$V5kcKgKmOtRdXK}G%hy-u3d*UKfwiFipNyAVPMKmPD>Nsnbs%33yd2tZCza>^+uR(8>os1zaZ zhdDrj<(|^A1wS}H5K7@3hj4l~Ip?4d7cM0`bklj5&m$)&TYB7Xc}sB|=F^Ez@VG)+ zC>QoZtSqb^QJs<_RaLH?lbfaQR;C`X-JiCO+<3^ZXxiTpidlmSIvS~R(5ec64y|OY z9sce#cB7zRY5mtZW0SxJMyP>tZ|U<_hDk%K*B0-}xrAH8ItS<~u`Iiq($SJaq*tF5d@#fYr?| z+uH7m^k6T$_BK0$#zN{Y09E4}x-}TocD!{T{R*(#}L+2LKTTC=PkDs1+`08|@VcR4zYg9_GdBllY@yZqsf1X>ie zGi_v5XpFb(_`%lma9p~hqb+)=bU{Th3D-qGx73ve-!7L(I_hdhq$G~1= zSBZ#PgMb$E#_obNBd(~1SZz`IAUg21^A8AnA&h!z+Dvr!LTs3{gD3l&w(w(n8Z-~7 z{j?)9Y{gpiVEXNRH9ooRN@qITqSxP7#0~bu{h;-eg1A4rNv>3BGd5pET33RuSg?;6 zLGe(Hx_PlR=fjS+0-&`aXu^R8fBRo{rPvn_ZD2duiD;(@dtAQ_vd6wn$agmhEJslL zWeZ!i9)Y$6wZhN_vuZUd8;~BBy55=S$&@_}RsjwpsrfFQ(yYmZ z#2LaQ>Pu!IWD#^Oj6>S;x+U*jPKtmb1|oMlpHyJWB}P|DA#$g20334K@>aOW3VEJq zN5mzAilRzU%>__FB`0y-5hwzA@9M2NLL@*{5n-uGGzdi$RFHsK5NZYukVA4pEaY4q zCQ!Owu0(LXJvis*ckk#4$6-7@omoUg$zkEL+}6jkq(_3xA-ZL`3lvp;yj_>avaU-h z1yGT6UAD9pAgDfb$r%90^C%2}5MvYpADnk#9*6icggIjCP+eoYd0o=7EbEe(RT+e3 zUACMHkSYp@sFWooDdNe6nIuEGWkgWX{75)DPl`eVkuWZ|ZTjH1ZTa=T{B3%9dVc?O z`*w@N>EloD(^j6|zx>;O`)`%tFQ5PN`!Ap0zkK9@71R&$al4P>oO6nUcYrPo-uplN z;pa!XzFpp!a}j;q@AG_GmX*N8aUh__vJGS0vVQvM(+^)iu4z@#`(sII+tzgYFvr<1 zZ%fXlq*B(RD(i9qq&S9)yPlp-A^3IM3a|&-*0gNP%gdRZ`}1G_`gq*eHLFO7At!$Q zdd*{*#_^gZ7ya>%17qUMpPna00vJEMd@QAW`TXUvG9c!X0Wq!V?jGy1IC4<*bUHcj zh-i~7FXj1ZjbmZJhL|5V7Tt)&3dKM7v2DHgkxEwU0I(p(Ya%QgtU9>qXFr)*T{2MY3QFToVfmCRW!2 z8f(>x-0EJozr#Fh5vpAzY-I4A=_Tz=KMXQ-ov@$pKE&hEY$v9p_x-E5rx@7zsgm7r zhMMQ2-@OU;Gt*YEy+;+S8=PSj#OjI~jcTf*0me?A^&`>TzxKK2Ni~#Zb2J#*7({7I z>G%8E2(g2&*-namM!OePSIZ9Oz9ijQB13%jxOZIFIvKQ4)yicD9BOY@JE-=M)}utb z54h8taQx4n_twALPDj(U9mIDvtQ{VYDWL4F5$s!OUI_DgTDsWck2oSw@N)X1tS@M>+koTTBmmG$9LL%iwSQH_bqRx?X zDoCmrNEoG*y7nb!P(YBJ!4r`~P;f-J=loRyOzNFh5hbiG8QK;Lw(W&gV%Ooe%RoIq#5=nb+GRZKay@E#+-Zsbm1N zKuo`7b?C@}s20iS)RPk}fTRVsT!aD1g)xjn)Xk$Pb3$@}1RjPVJ|63KDLJbT;BR*q z95^>F%j5BI6rNsqUZ>L(6pAqY(;xnM-R}ST?JpCD6Fq(YdOOXtGKT2Ixh$DnAdjT} z>G}CQK6&t4*#NLG=aPqESeJF22Xv%a{fPJ+f1;m%|NP~bzx@IN2ztLiu9wR+Ohepq zPQlSU4=&>N;8gPRczi<)Lx?$WDJnA0V_tJf#Si}X-+sT}(&h4CPR>DMMW+a`uA4)g z&x1NZf^A#Nng%~Js~`N!yV*w)l>6N$CsduzqpZxA2LcKp34nlr zpa`gfv+}jZK4JIsy6;H{y~b5N*a-VE-tm;#yH=*VD}%13OaYr}?k?<6=XbEN=S6jG ziN=R?wce{L;XcByZFYV|!IXH@wEZy_g6w(6f|h>Om^h0i9kcfiiVW?U4V<)6cHh%o zy9qqLyBV?XrrlS{Jblxn06WWpM!_}Exqk#Yduh~Qr}ot9Q`pkhx7TzRgSIWlPHF_R zuUN;3T_x{^9b0R~9RxO8dYnMR7LM-3XHT%|0;y{I zNPXf>aQCI$ccEG$!!}^dQC;5Zg*2=rmUM(@gC_lMGDj1Cc$YDEj!3ixp6cw{6}M)W z37ZYXcBJZgY;Xy^9jl610ST0`cCM>QJ4OPxX<$2!V2IedlbSE3yD>I@1^b{4ko5oU zu%PV}fLhO1sDMvZ6p?zvO)JL$ZQI&=TH90eMgWYr+@V90hE9Wb8E8A#;s~Pc_KhJl zBjlUls)?5|U;4eoGysoKuVbM0xhsk?;vRaa$4TrWh1Mv39b_%4B-(1p86#!t@~|m- zlwGM8s;x1|F|V~_bzA%vzwfcigHwpuVyB1*%~z_$E*f{;(l6T2p=KvYQwP$OY?GOm ziD$>pbf}4WKXvA&>=mARSj6yTS2Xr7qa3w?9t$MRYukX~`i*M{ZftX_|$Tu;PGRIHeLM0e}z#b79d^3NsU;bEpE|kq=&wR2T?F zScM5tl?$^66xIUj$eI0ULZQOdV-o=>g@|%tM?^jlVp)-8P-3qYhISu`{nDm+qY|3HuXv#9e8wfyINzN}kT= z`FZ%i{&)ZO{#f4LzDYqs{i%8knAw8d-fByN?ub+9lZ9Y)i)(7DE z>3JL{L|Rkg%yAl5*?#=+@zcAHpWnWwoVRsT*8AJ_>FH^h;^m^lIL6=~d;`_{wPTO_y-udWN$&G{apoOom_ibIbB>{+(k{-*Ti0V=56~DBbe{P|Zrl71cBR2SA zWJFc+mce_-SZUfFsi7Y0NwC|?{;5`_@{q*V&a;3e;(mwW4w$ETp7qV&IniB-8HU+8 zD=@QBB^5iStYxR{W?%hY$3fOQ>F8lv+F5&bUm4t0v(2&hSHK;Jm;vmLD(qUWf4al3 zN?|m6-l(9q1K-cFA%cb`EFq_vCFpTNv(3<$ECUtYN~jqlsEvxCrM$GfG}F@62G(Tj zJ~7>m)V{o6`?&8@1H=dHi#;-58K!+|gwWb)mr zx(n9lqn(m%zqCWKL+`HI+IqAo;o&d4`_jjYhiJI*-`YQ7%op@+X=n5RQk&p6@31LE zVDt~6P1PF4NCX1_b=g8s%GZS@EYws@0Iv?>zybwkw9B07`(qC?WVTos%Ek{Nxz7!lkePgel6JQ!3-A(>xauVe#I( z)8HHu&=^PWgO0>p0LVwj%kMBfX)#TiXah6L6uU`O%bl_ zHkaJ?2Z0N#9Jo(Kt%bitm4Hy4Lqy=5Q(BnUv@JO;PcQG{NFjP6|I43$yZrtJ%>VMA z{S>$qxYt(^{6tqZIIQoKExe%Lgp7+`m22 zMxxH44?aW`(VR014)f$(D7n17-Pg5XzLAf^Ohj5VtIJ6eCE8xg`7|L%r^EJ0kGJ1W z=lT3{0`h5DH*z|{w)ij(it2-Ve*gU8?bFw9Z>ui#I$ci5d5`Gg;HCB;6^V2B_|yBB zzdZl``ZeWreSOP6r}*&$IYq)SgpVIT{-=NW&!uR|j719Z?RJ?@ag3u1zP(*h-8hfn z*att&r(8B9Kyac(g-{6{6H?leJWZtT_upUFQ~K$TKZye8vWX($>HX^tK#ngWT)<{Pg@=DYA&pdCA3CKHtxOLLC1@*>t5^{ zx?WK*_6`~lI!v!3PE(<~F)}3#2lNJ@EusPqGwnJRbZ5WJx~U~H%BOKtUbZClOxzRF9^f*&&p{t6U&{~b` zTZJ`4u3{1+H396JYb8Ef^R#B&0P0U_CD#u9br%LeM4<*GYYwV5^aNmhs<<`z`Hq zFW{kV2wE|{3a|EJ*tGAunfBxTN~@0C8NfsCgB@n*o7k7#fFpncA!#MypqE=l)xCt9 zno~kHy(Xp=irPLxM*@br8#~x!7D3fINoW;dyE~1ooZpUJED&$UvOnZ-a+)rxf{5+l z^(pQ`Yceeey_UC$^Zv}1bx~JMrBzPRW9%EWmdC8m6lnqtmCS?V)B1Y!6)`}sEoOSq zm=^9d*l5SH^P30NVhe^^x#jLr?mp8#1_Y?>2hHQF=dJDnb>`p3USB9?Eb4z{*SSrP z)Rukh+fgFmU^G>)O2w?yf0AUtF5D{wPrK%v@)HFFLiA7bC zfMAG0S?-r>$_uCU%kN*-Es5|rp61iJl%l0fXIIuV7MzB0y+0U49p09-E#>?)FK^|} zn*fgEG@smfo+x-E3K(19$QK>IwWHX7m9{hP^f}9e0uZY)gmmmK$ueSw-miz7cw*L5UK0Qw_|LyHqj0n>Z@Aqx+!w(-nUaxON%pyR5L?tEfDY%e5-!J!5a6SZL?_CVw z9-NoFY&ii5DWpen=$x-mp6a{-DW&x7^V_?RfQtD|&d-BrNOz9G-S5k`iT6&jXaXM* z5F{f@d907&8c(y_9;>j-Pa~@^^Uptj(#b=@k;ZA5L%6J^C;>RlrKFNd9^Ss(#&P)Y zaoExpy%*HHN_-w3kA)nOfTu9dBd92$BZs1448uI6jcd1U$wJY4)azv}%z0UWIHgq9 zZT0}lu1FxQP0wos`+l6OUf=KO9|-ko6M?##sM)=+M~n8z9qdWQ{i*Fs-_d3b7h;Qk z)Zcd!&Y;BsG(n3t8V?4Bz)aa{Lqi6J>eJ)^(U8R)RNdn_u*1ey8&z%Md(TQUQa#~Y zD}p@S-l~C=eYn-*HtTy>5aTj>I?8U9Yb2yqUf3sSR<@0&&U7bMY{7f%$BbUnlc?Hn zfnnu@sCBKyR^P6^MSARaA3314eu8chY$MSwK6dZE73$IsO?q|{0JQRT)wUo}HzT-j zL7R=?QtfXvq}T+iwE$_$SNj-sF|aQhVW`p#ZQGyS89*?sw(onxP!0Uq0&Cq+ONMXE zMFXi-O7;{ZY`{v}B|or8GXR<#LMJQRVWS2k714=U6i%DJgaF=1;2N`oL&T~X%GztB zLrS$%Q^9C6_T40#M1oaX#WwvT%l=)-x)#z^6NL&UcGTVcqy~4}c|BlU*f;iYwp%2w zor-;5`q{AQG$}S1*W|ud{FfeVHkAMd-}^uI*|!(j0@TgvZ2!T_0X(i|hy2wQFzt!wL8%!A`iZn^aFwUp*JWhij2PCQ$R>_g`RJOcg znO|na__yCaU$2*v^AKo04=&)kJZ^7yM2#^3f&gX_PDO;#xiAD&RduW&MYk>Gbp@p9 z^!(w|Pp79BP!V0PZ?DVk)e*Z$LvX2-BI10Q&l4aD=iBuGh~p^|(DmCAoL2`xe%sP= zTVH?w8lNT+l9cYc{PRElW6JCQvi^H2Bt=tROD6Ry$e=DQ8?m56KlqRS!{u?i-|k<( zeM^+U>utH;9}lm-q+D`Q5$Bwb{^j{uK>%0Fy-WFv+r2FIX>#x5*ogV8; z@&Ttf3X==&=bwMvwq?r+SR`wavOUsDt2;RbSW_B&07i5ieLxf!2>`Ywhv!)9IQihc zbLo*%;;>@$&JVN#XU=8Wu9vH-g6OiQ?e?IQ0*mD03eOJ00EkH5sTLJft=&ZIw3^BT zup3hd22GA(rh1I`)82V_=vo!JYS3;h3?BTQ3h%ZOdN4~(CG0s$_OydSFjK1lJHSNi zw+UuLx?kNCnS#cqp!5v zj;41-rM6J-%dNHmT`NM1pM1A0U^CqzntjEZarLkiT8B*NaPz>;wIB$XJfLZCyL<-P z?E-{^wTUtkcs z;jX>~xIYTb+}f7yTXh652kxtxH|#d}IHMMQ0~6eR9XjpTP$O3Aj1a*?3&|m~eWlHU zTBR71zWyfMk49%$3^I0y=_rpC4DE~8K8r|}DbZ*ou|-j``#UE*bp}3!=>vRJGOnsXMY`S*z&V5EbktKXw%Jos3}@an-(^wvFxm z1{%#pG5psT6O3zWD#y~}>_iAQ{t-L!Ld%)3rLjZR9%Chi_mhL&P1~PgtAt1Cvy%cQ z_X=%0`VUQr4~wSMak*tj_rdHzRc)ZzAZpK>tTMJo+_j&V&rLYVG2{VHN zp{SsM^Q0n3RP_V_iWE>45mY52VQyVP>|%)us-+0k`t=AgI%V0GhYwyk z->wTClOh%3!yUe`gXrwugki;oZkKOKfj#cpVL|>j4li+1ga?& z$?PIM)=e{P>r#Y2eEJ|+ub035{+tO=TB!hj9l`%tdIM44lqrpU;gEnPe1-yUNa!i!T^;xq^E z)@6+@fHKFTS;yy50in3}=a;X~=ll8)6(K1pb51g`4QR!(LVS)gV@YMhl5oM zbY+PaR;ph!RjEyHo40z1|? znKo;x-Aq^mMtcCU{p&lEpr>8_2bPF?|Xuc9BP}_#cAK_ zj`I`&z}54HN}cf9WurA;GDfZ0b=_st7Pqf=d+olkZIj@Dt_&3IsO0bWZ-*?lbGzpR zYHjABX`>FZ5BRKoa}Q7&lhozoJJQYs9Q(s|_dvUrJFaR6t!P*uu=|3BL)+gJTAre| zw#uzXT_Z1TzU?X6Gu2E@^}zB$0V;xmc351`g!V6hReLeoZwIXwlka-)?~X>hE74b~ zd!+~MR9CH+b3*N?4xk1_L;%StBKZi21$`D#X6F!*axQ=XjsTQ1OVH_b8m9?_>PigL zAR;-ls0?F>G3H#hEh8cdq=kd`%$!qmkWdg%3oDme__P93H8l+g$RZ#GQ30fuHxcIV zkbG@HYKK!*KvYCiD&A2X2S7|kf{()(-@be;gBH&ic^oH^ zb-mv~B@8Z1P8iafM1+7md2|Hi3UgXY7{;{a`}L91hR(l#|KX>f{&0GJe%$Zt@_7CF z<=f}qIVI=GQHX(57(jd^Abi}{X_(v;0C0Yqe*5M3?U4r`mbIw5yySG-a?0-H9Khwv z+dNOtAI|g3w8=>j*4q||-fp)TCLe~|Z5fAQK2Jy(qX(gk+slWS=eKuX-@a|P6+8%V zNrhF%gKaNBYW5d*sDCveMUk>`1u$7NfC5AS~b84BAlkXvG@7Q`=ZI%zo~mdDn-L$o{YX ziT21AdW1%b9CYYGK^@RZ(^q;_<}Ej_71TUH0b7^Dxc1-ZKF5|3)N{hX9&J%k8!uwJ zFSgrQyE};r+MvS@zm7>K0G*qt&$4x`V7wEkW_bbN&~AYCNnxL2AFZl(JY2^j3N7-p za}xc*EyUdMq9L>0TDPC5CAXWC z`;86IHLTIZsN&&9s!JnPRi!r@HDFbH(;5eDu&;8n)+iGYWv`LZUZ@CEL)vQ3u^uL1 znRPA4PWvL67{ zs^pox_oMouWMNfZ6OBISk`l)moby~%L4EM5I*uU>Ls~O92dt7cYxX`ADv0EqBDqqC zi;{1>vzUt%t_Ft?9f2})ZMjepE`l)vVaOTX;VOGm&OT6v_dNp6j5RE4r4?X z6%bT#j^;TKP1`yykM;4mzkT~Q#0fo?lpR4zWt;*~xLxnM@N&<&z=uyio`uw!XZ7TDO$}e)#dzm(SlQIv2<3Jjdw2e*PLy?)2__d%a3=!w(;))iE}6p|Rm36A@8-+rw~>a?PvY5!aDJMB01?M=0M?RW z`V{lZKm7E=$B)l{{_QV-Ak5!>e|;LC$Dohf=Dm{)qmMDTM=pw*3zy7c5EaPG=rDLM zs+?+(WI^XdRI&~+!nZot*NMhlxr_lk|0QLJ6R zO`BmW;;`e*S}PB+mD&f38ttG4Q925&8hZzAW_K+iyT()wywL%A8XU3`nb5P(tHx`I zBE9uOFLAOX7K^LwESi$;t0XGW?v^4bIOq--foQSWJ-(;*!xlj|4$x+@H!<3Y48xfFD{R>hKL9w$ zIrLiE0KVGzo!+p>7uW*Z`dL`fuw@I(p>dT}ltCNVtjo~P0(Pl79-;bu+>t<|B5)^W zXy3OP^=)V)mU{Q7&&}8-L#1}K01SW|V1#zZ zqV;^S{Wkp9PF+(%un%d^H?-q^fY8;CI1p6*Dk0s;U;sw7H9 zox13TlMlEoDX-h$0vAa+Yo@3eqA$6mlo3ntAp#8Z2!zawZ! z76oAStxDvU%p*pQkIf$>vDVj^5yZkOG%!bFd$I$F{iB*8HX`t zE-KSJZn@;b?_b`(zP;wm@7})@Vg+(TK*g8y^ZkDN_P4k6C?9|R5YYV}|KY#5K)?LU zU(fSNpHQ+Y>U=&EyVLWuZe_VGfGWx!ybFlR-qXi-?_zWm=rle#pfD1m#xVjD68V7R zIC~l#QkHys+*J#C5G?a~%q9Ei3u`F?pwe>LS(5-dp|U=fjhsvjs@s}w*SjY-P7w&# zWg8rla{vN?YU#iHNE_($`x7Gk{@drL`RO`e)3S_lLc!@gjZZ_$>wUR%eyD0LrCwE8 zZyP)By!X)y$o=+ER#f-)_5SYp2}l)WTN91r?J7vR-ZoKTE)F#%0p;-&xzM(3dFAC= zAgL$shvzAHPla77{M2dt$0Ibd4d)Y|~-Y!S0;yz|#7uR=C;F;f}WuS`n5uSg{x&Lf!c9ji{+{ zG!WQqO@*_yMnU6ub~9k8vH@siF54D+N+l-3F zw0ShOA9L$W3WjRyx}h;v&_obg#~6zeHV>&)Uu)vrtE%q6zq=*}kW=ZwrXB1kwza1w z^km}SJiqVc#H0-OuhxTF^8(R4uPWaixq$+b)l6!d&*GuDhc#9Dku$KTmff<0M?|aW z19+HR+cG`)4Rsl7{Y5-fcxji~>&iiYLSLeevh6&ZibnuFgu$EM2WegQ;3#X&7c(SH z`$_`|601uE5|%iCA7*4&m+kR-aW1;xa>^hWhKL9ymk{FeSclUTJb?m+fG&wbDaHHX zUD(!*GsiK880xG`W^$+^s_Lw=HKIxu1wl|P44?%R3#$l%uoPhd70}w*q^Y*LD&#>0 zRi)nkMim83j|~8&$XW`Z=8`pYaQO1!`DuPRz54*p-!IopqPnCtUvH1#$pv~I&hyK7 zx!&?sL?o?gdW!DM+k;;|zFY46zyHVobh|AMWqO|e>7W1U#~*%ryIsC~{gT%;Z7FB= z6hm;Ra(lfZxM7?zQs%;IVJ^9_3n7G9SXh+&^zq}Tk3akfP}axovD`10x4bL}LPS*A zRF9-CU~mx;&SnfDa$e_mI%TlCF=p z{HK5Zhxb3f``5qz>YUevDe==Y0b2U)nz#FUmr_zH@1CD7*CL{s)0VTUHZL5BS%^>yERPk50O4}I z#$g217~(JujvOkDbGY3f*Vik64nthmRTa3vWTPwV=K!2`**O?|;XG#oYmif@0Na^>s!FnUB2d6VqUD zKmk2RdG{&-pebs@nYEgJ>u#ydD0H(31f*2|;(!|O*NBf+|E^h(J#o!4{JO?#)<&(i z8$$oE+DuK|*DP^c0O&Ti&V&#xMDpEz)%vu`KK90>aPXPC8mE>PXD}MkR8`NsqHde) z3urS?0Iex0J^86dSJ5IcWQN;frgg^cb)5-AXg#%|;XK<*Y>R`~QQ6*k)-D~Qh}7Yt zp*jTsX}Co#BfRx&Z2d-#7hdXD)WoSH=!R=)Ux>EEWDm3@SF>Ddkx-IOIU5qIH~?EB zfdLk^I&eE3*d@1d7HW|?MDW_b%c_8_HwYeN#Lf*iINu&e*hgol%5EMQ98rS{4Mf)2 zStAx3OOuHGGv*uAN7jJ@0Gb=prcyDxp-IE`3SeNfeG>Yg4l%nh7_#4oY~xe|HDEC1 z@LijVVZ~lhWp}XJGXFiUGJO5rs0__@iH2GoUD1TT4zKZ~Ek0Ryrj~tfBC3Y#_XaiQ zIrc>0{XZRywox^iX?x$Y=+u_CLMl}N6zC;s62;3+DQY8T-A5;S~ZAG?aox{n8F~8l{#3AxHpQNaO3X2;@BqBoR zh?xm%e^MHzA(dPKa~yomaZ6iD8-m6dQOOY@0h5-L2-Ue(xrGRnrTQBptSq1mB1nQL zh$13(k4HqR>QExItKXmsB^TigqU4mBRW)-W5=2c>n9H`}({p%wd9Ub$8&0S7I>hyU zrx+;^3#?1JTo(oR?uX~XJ29Ps4l~9_#&n z&-crgM8@g-PyfxIZh3n=mcjW_B*c-6zJ0rjK#YTIc|OnN5x8Px-ttQ5gn`tF;5d48 z;dZ;ed~yVyv#yUN-`8C7bPhB+zOZBZ`nR_)FRxD@BshP&++RPxKCU;1D1cHE2b2Jb zg#k3N5)>j@m%ObRf!=?79_N#6SsAA(61lSF$87`9VT!lwgOhCQnzm)0=kffsJRX1k zmtV(Wm`_vAnFZoFIOh;SMRQK;N<)m~NL2s>v!r#6KHe_3aT=907lCz4kMi!rIj7{% zqa$EVDe+d2h#U^l38)H(5XTP@Nf5xPf+PV?jF~gWiF}Ms0ojuaJ}`?a5TPSV>*l9{ z2!$oah@g@ek)q7~2xHUF1|1NMB05BE!1Ur#|8+&*{N}0~tR=fy&%-0|pk@(3tzxKH zB5qSY{hDD_BhcdmUE7-lG4ibC)9*?g0j;@sC7F7yypAZbmT3j!qKtrR7Q#$T*V{W< zY)jFWz^sEID!^K9qBnX1Gj?*YxgDi-Yt+^RP35*nch6sKMh0cf`cxCv(RMSB286J? z_q$;+v$dDB>+~O1$Oy;1uKdd$ysU^>`(hZ|xvv}S#E{lRnqxhRHXyrC+J3l`4$=n% z_Wc8Fwgs{7zgC;Arvt)4SQ?boz6cE!v<0uDCZkf&(0XS*pd#>A3QcPWw2g2F?$BQc zO}ciB)C8;jpmwW6r~nSW15Abv!9vir_;1Ze6a}-^mG$b<+R0>*mXg_5xI?aH)RCGl ztcVy48t(|R&%M=ts}PD@EuRV{Gr;^s9qA?E6lazu8Z*O0J|I1t6AbR)m?tFfF8iwh^ zPd^Bw6umvZzJ9&sTbf^{)6>j_m&@wFk53~YriT>SSPExQMf9Ws9*A>ETP_0g>HOj2 z`=aG`xjycX<+f^J@+vH<>OCqe3OY}5@S0JXhjHM-vK3(wg=sz`ERSV9o##LP!yjJ1 zy?%ZDTE6MayE#mL)Zz2rUZ3B^=bxU(`E+?)K7aZ8<*(nydHmr|AIC6#{&tZEOHrBR z-TNmga`|?RLr?`^4P!`;Z5$%2lq7zN>pg*%ZB3H%`w#D5zrJ$Hrw7$(q=g z`8<66_L{cs>)Y4M^`SMO&~m*Pkx)TYh)_fnWS9birY#Sr;pOSP$o-+4BY!@}``dN% z84ya5nhxn4D!^@72&okHpTegfKcsbgJkmTyNo9Q`1Z394#RZ>pzTZ~&?1^BxuMSk0 zH`b?@fu%f_$LRg}d|n>g5d3h8Y0aog!JVE@gp~3sTuR<<*EI(32PaA9;8Wh9$UMf( z+wgRD$Q}f>gy4b&b(-GBYFxU8 zhf2cjs=oC#Hw0Eu;9l$(dQ_)dj_)#_4?qXBcmJ}HwKnah7ybB|c zNlLqg+CyXqG<}58y9=#$uh!sVjZ5`4?z3o5HvQf+od5`xdly}k3BZH3YiJD-v^JA8 zVuBi=tK;dfFe&LKqy41Y784TdF;E+fRmK+qFD3!D;wImw~picjw|YJom;Xa8iZ)*z*=b;S~!4Bzr`i z>e`^aKg24E*_74LebtW@;2xKYnh5q}TN8Dy(9Y0&Ex?8b_0-f}I|pj#0j!OT?#taI zsw#zjter({^R@xlKf3p}i(&2ETQ}Y4)VAi`SFSUz=&fcGs=XZ*5xE7JJvx^}izv8J zg;nr+UqT#0j3tW#DWe~~3z3{dAm;-hA~{jPNQ5+=#+-Q?JS#izge8RFJiV4HGfQFt zcEt%Z0XVG9T@)0mVMb6uQ58UOq}&()Q5FC}P{amqRD=kLP+G>Xf`D+Y@*+}%xs(D> zRHX=~B?~YFKRr!9{`7NZ;8fOJQj*(s*_Kt6Q!3*$OhY_9%}My%my3eluG@T`)*-9l z(+?j$efr@2;NAH4_4RLm{;Lb*DAG(o1gy*JJ#Xrt0u8h-IX||{LP*Y$AA=~0Ku*~Q za)i$L7-KG+*KJ+y+m^I2lCp>>c!x@;4Cs*@AvytSoTJA8%8Viq&*L~xx9eqD)@hz! zUf#39W4T{%_vz6kK65;!@PS?ju+c1W~50|f35nR(2 z{YZ%9XwBRG+k=Rrck}a%)4}r+4S`yWxJ@hA9puYk}>al(iJ`kxFJM zxKfJ4*{q0#%yI!-X2=3FvwqJ!W# z&a*g&;0QiFJC2v~{=!nzf>T^m-nQNh(9djMJ!|#-q8C=uR=18|6tJ&*=k+Wx5c1tsh z?V%mT?KI30<*1PmyG2veY32?Zt$?72^qrMHs`$pm9n2jfR=$bQj5mRO7_?Q$ZSe;9 zt_AEK{RG2eHo5~A>DCxJ;z2a*r;Qe|Ayqur?F)3v`}ZmsKzAMsci>z>k^$we!nIyI zZ7aTy`ADVq6+=Yn%iPpITH{pckoUk0?FBe0GTDoKK|5%*bCi9h-FrAd?QV2b(bDB@ z;pz}8OSMbVZbj-(tSX?r)|RcyWhDz;H$nDB>Sib)_+216FxJC8ly>p3eXp*MXzR8G03f0@S@Ph`sL2Ghqtb=g za8=cXRC)JCDm7yUhVTqvS$YsQJs@CJZ+qRiI%!+co(9qUn-(?iAMWCP0QgN7u|~47 zw_7njlg=rk7X&8o5 z!PUVcN5{bWj?=q z<^q%QcpApZ|Lxb`REo=t$ zczS-Oh@f!!a=X7>r}OFMr-22RZClgI%qZw00cj2S0*Q!jo07xfL`eF$-Ev7eWfj4I zDyT)hM^V76j%qfQBk~wn5eP^T2&GsaZ5YP$8NYq`{N?lK7^fe9`tkO-KJJSYo*p-|9-z-Zy$d6beg8O-!6dQ5S^zm z4fA{k)dC!bkaNCXuQ7%plCTcbusycz!5~2BlmS@3eZBtr>u>+`&;QkrKYaN2|KWdo z+&1TMWkw_*6&8h7|MkV-;=7(D|6 zk`yL#aio_IXCNhlj~`!R91tjkC@Rs1Aq*vN+bWLywk0h$J)MRZUsQY;yrXShu#od^ zJda!$DY-)HJ%i|U8VMa|K%@{{$rSt;qRUGL5kWi`R{>}*g=8}8lZJ3A;+wS8S!)vgH}r0fKzJziV3 zP0QIgXl7iIU{4*^ZejM=QL~t~wtY=IAi)-j?a=VRiPelW5JNS!TtZ#3?`^1dw7>u; z3=cSPG`z|HJ7S7!fs zkG}2hg0?FI(&3r?-by`p#AG65r&p~w#L@q;FZ3Uv)QuECOR;Q7q@KX~nJ(jqdu-4~ ztOjEC;dC5l(asLu4}=rij2av~WE<9>Bigr*AQdb|&_ATT0H%d?`_|W6u15P;#Q}!I zzh8;^DC{mB9zRyuH~=LCv~=3aaZ=42I0{)O7pa5%{WbPU1OTEIpVeI+saM6T9})#t z6KjCK%Xi!tyX!7AWlq@n_|Dz-MX!UZOJG2~g;>ltLjWYC%$ik7J>@Y1P%2CzlEAhm zAIEVXU2ri5?~#a{_cb+5n1wwkdGa+yZ5roXmt4ly+h_KDk`V~ zs4<`ji4-EJbrh>%Py~m_3?L$aB49sf(AqRcrDlXEW)(sp!Vo%Gck&E{}((q`M6BOJ4K!^RJRM z=ahMSdU_dySLITGGi>WJkE0M|DeFD2w>51Am?)4N)Io?oENSJeMBX7HIb~OhFqdsj z2$h5)Wp+e@imJ{NA&98+KExrfB}tw_5LN)5PmzLG6-O?dP9>*hyMO)mx6Acg2tgI! ze|ouHFSl=x)4TEfa{Bt)B|Ldf%2`#MU;^g9{FlFM${xbHrkMP;Y<}?5I4!q4KTRKh zddWGzeSJGW&*(@&2^E#rHN`kh^OV*VQO6;Y^A14~SXEixZm*B^zOGO6Jfc9#r8*m} zVUl*ojfzNcA%>9z(_>v8X_yB=NjZlhO5ydfrX?wW5^6zE@XnXSTofaa3niCC#gR)J zBb3uPCZKsd{qX7I<#O3J@Sa%su{^@r6-nUdIgV*d=a*TSACGm?u_Q+C<`@uVU6#{n z_M-9BK<$R z5ok|WQ_|nD6^1y}9L27y(VD@S(QSbm1gOzev&W_%yB(x%W}s`W{k~_n`rrTAk%tja z4chc<M0I{DD@|n*+59` z`mBARLo|CP828o+2=#*jbzyDq>dy=ksVX=mKoDta3}6pgwg>MD47+UCFC*E0*94CG zFEWpbsBLGv3PiiC>T!fk&Z-I^#Cr3ttHr7Uq(j~I6YQQv^KyFgF9pC_>b4Hsvi%fH z9Bt3K$g=mC(NQ^w4gauH&TBZ@MMivb*= zpmQG8#bNZpl}&_|h`0zL3^ArHKfikxQFJa2Ll`u3(IVu?4}tFm6t-NZF_Irk;lX=H zl(q*d0Z3+X2+mXOl_a1btfJKNB5HL_Q~+THRF#U{>Viqi#UTMe&Qe=kx#&hBaE=ip z4kLmRkjCje-`54+Zu9e#_mooJw!E!H#FYd^8Bn)nJHH#heYrr^VUFWGy_}yRcrL{S zNa+zLcN(4@hLo03==!>pEmNRx*Q*H4&x7QQPR7x%CDY);FcKg`CPFuQKwyR7(0eSB zxCo&G5~#lb1qUeLz4MM#7zu`97=}S5v5NCPdVEMhM?U(L@+gH|2*c@ z&L4kxuZmC4Q-OjWPA}8>{rLmpufP1ht>x3l^WT2^I-Q??`0?ZC&%bZm20ltDxH<9^ zCMT#;z>}se-L9Am6X|kG!x-Zj5M0SwRapcBk*AG|swyGpa(TP{AOHD3qqw*0^|m~^ zHp0r|^h_5}P#lKv?&W36><3Jo-A2EE{Xzxe;NPyd+x0G5hST7fw>7nPhxMX?qm=UP z+ckcOuiviG57TM9ubcPiBB|;ShX<{S%B3u~Z5+mNnzn_VgZJ;A9=C*AC}{0vgAsFP z1PMNt%-bz_?*Nn&dmqrF7LA^Uk#f$H^I?c_7*3~IQT^z=4~|^QWeVO8ep~Y?j)b7f zy2?1j>$lhYb%DYmhV6P+A_{SPWCG_rsT8U@R)9!r>VTEHXh_FUgPQe1Ff-N_#?e%C zwPt^eGuidP;eX7!Vb9M|*a<7x^W*x<&{TDcL=j@W#z@V+wMS9SVQRN+cL&oTAnZB{ zx|wVp$86YOPCr=W^k|z7Sf|->RlCF>dlGU*jcSPaphCZ!mSy`K*2PE<3)h&2w}&+I zV1}|IygfW+*;3VF*cdu_aDZxcFpYpSEYU%8dvjm^4tLtX>@Q#d2*8YEwfsbtJG5dM z(&@C$AOa{=C0j?(23dRB+HIGH>9H=Ei5s+lnsKz>qsk^@7Da)55!3)aSlVD0#m4Y8 zOjNng9VsjHy{_1!EjV=m3uXzMWj#PWJ1`zPm~0{YcJrl$VY}Tu6yL5QrEOX5@qm>F zw*KN^FYB9)U2ZK&3qXOv>iVh)FbT9AQZO;MW$f6ygI9Az_HZHSals7&Hc9Mt)GREy zL+vKpO&eJz1t1`I4+HmIt>C9+^46hLYF$*&stB9YU>c?GdOy^F&{O+B6$F9yK^;(h zKa;y^Fjh_*#Z3yvbwhB*snyQY{w40)hnS9ePD5Oeu*#apR1ss7j2H z3p%7}K6xLU^V7Q!W5~H=Dd^Az9EXWn$1$=}DLT!8#BaP3Ac0dQ=i}%cE2D__4n#w6 z2vf8*}zTK9UH7_}D%sCf6KfhaZDfi{_?NZj& z2RA*>iJA1iX8!v6HlN0Eh%AVRVW91?F8Abp@DZHzP6ILsh!!An-V=*3BYCI7!m0(d zUd07a5p*P~MC5}bKo<3$LW~p~Bn-jFVQ}OaKoJ13Ku*7692gXUTnNtR;Ao8F10P&6 zqF!EKN&VA@`RnWL?em=oOObILU(RQe^>JU~JcYq+>oSI+Od?zo!~FbQ)=Yt%Cr|(n z^E~G@r){0)F$|HFOVW9YJ`#EFLwMY_b=g!17#v}7luP;RUw#3`+x^BxJQ0W#>8DNV zEiPdpa^8neKYUQp<*|A1?vLg5c8$R)NZQJ6xsNfZA}Awj&Wxxo_*z*tZ~3|wRXm+f z6cnBC#@muVfBCX3i2%Jv0T5L|S=TMb_4(->he$Cz-frt-na@!Le27rw_PECw3h6u} zAaG%F5GEG_A(EmfYtA^0Pa*mcV~7!v98m~y7=2+LhY^WNDbwlf5KGC+ZR2(2d{^do z&+}uEZ@<5#W#g?V3%bz-005^<${;EwftG%K{mAxIW!X&nt~|k<)Pq^8%1yhf#ctZa z`^c_?^$^`c|>HPlE zrt=!S0V6(+CJTBd7+5$-`oev`Q1z?3XKT^wK1>@;|I-wy1=*Tyt)pn|t#)j8RP?B| zQp~=l3LS5ETMRqA+h2*;@i?|v3V@>Y`-V2Lwi0`ae18X!L8F~_1GD&+9;gah=^Sk# zRH@herPXphNXu?_n?=Tk*xD3!whxS<>|4LT-^|qxZYprHF&Y@P+_(xKxd+y5t+kUN z_WWb#C%Z4a*IDahsW0-~M*-uL`j%8lt$ZgL4zI70{d^fpvLwKYG_?_@%?0R~94*XT z&q_a9_0+ot@@@YtZLNiOvF^QHkI^0HSk6?JhW)CA<)cB{i@KDP z%m-JozMsY~?h~}bQ(s;obB9e@8=UL@nGX2-ofnfR6h+ipjY8X1RN3d$Am~oRw$lYI zyt_AGs1K|!YJmeVCfN?5?Q{ppE!|CRr7JyXJ#e zU$!|V)I96)K9BBDDFQyDUI$wlXh@|NQ;4AHAN5cGsR1jrJipI@f+zN$K4;37y00?vT~ zh#(gwL@7lj6RJo-t$VaqD+8&3B*}A(ab`SkXK~(pMO^N;ZM&5skB45a z59i!43|s`ArWCfdJk96x%Q;T*a#@Esy3q-XBR7m*Nw8LHcUV|F_%M5V`YY6 z77;#;W1Po0LJWSnuZSwCY^l_HBo3YTSdcjj5iQGhyWE#|8^y7y}!az?1KN>6*#oFsU1p2(9TfQNNklwlm=5WOl& z5%O-FV~oQz2Jd~Ph{*ZrG^oIq*pc(mrHug{Auh|xDXAp%7~J4Tclq|^e!a#RPM>?KV|DP?8MVGb)~@o!&Dfgdn1XR1>CIdxwTSjMgKG+O2uVPjz<;2qI#QM;+{ECrs*|lKq>Y zwu2IYJ+B&pYQ(qpI;-NRduRCSk6A3JuL?FYVh?oL(%aj1!GOlJ8MXlX?swaFe4G($ zYNBM@x6e-P@@U76#+WuEQ=jUwu^nb8n=Dl`+eyEkV-8?7{i{S0bb7fF3+8Mz1{*AK zzX~9k$ajmqzY|+GH(LVjl1mLswI>+NH(D042IlPO9Y~uViR)qQ1{$!@s}1P2Ron#= zjs0tWO#?J)`K=w+TO81ojA;u4(F&c7tAp+ifC-HiIkbm#H|toT68cPRMEf$g&-YBb zE{F;Whdj478@sL5C}sO3LZe32d<{f!L<;q0d0W@&tBEKP%07_F`}T^JCUmyZhay8g z-Qt*fXR~hY*T}R92ng*3d+ScLTNH|@=#qd4i3=1R2r7A2^xjLPoHrn)5aJl8X;MKE z9fxo}pGEY3y#i8Uo~KY!@eW-e@gG10qn5NJvD^P4kqtG)&WnfpRIFQ=qVDUheDpdDc&g zLgN@;U$3vfe;dXLrkJ)eh4CJ9O82y-l1dT2U+%$==ci{Px8;=9w>3Sc=`>G6SyLPa zLUQPdXxq}dW+e%OL+1)Jsmme`1rQhtqMpwu)dEb|uH_gkQLN@6#^L4tJ0yB6OX7T* zBP%CmNSZPS2kJ1FqQWIhE`?CZIbn66DHRMbpQeH;!oYgJUEjSw50j7MX(@N~4v>m$ zh`_8V$=G_t!`-OVAQ$>YYN3o;vuG2zlmM( zkfZ%!rslETV=%H|xAvxiJGWvQ*^El#tI*6!HGK#m+K~zLsYtCwulp?UJ3VL~dZm?` zHbva$Xo$kb2^A1nFwPQn8bzWVi#C(>9YX_Vgb#?iu50}_bnpQU^4L`XXpb>$&=IJj z?qh`R|p8AOfAHE}G+mI~o*=n~N+0^p6b*B@*x0NPSd%W4ui0(V3eF{)> z;2MA++sJympx*Uu@q9Xh58Bg}?EWJX0B|d^4A9fi+U9~Cs^e{)8eK!IFspy?5ERFq zPP7!OU7k7s>Ity@w_+q5c9>izOhAh2c1Ehp?0y&kK#}^jpu@SXtJDhHAVAI7x2xR> zd-M^s5mEc#EgiNUzrAYn9wmfEH}8>j1Dx8b5Ou!R4ytioO&K*sg|#&@nj+YdQ}-|T z$<{Ah-eDU?uS=%@phAd1)Hc0l1ok@4m3{4WAavKl^iUnJn%wqXGlvZJ^=q>RWg==W z2-a%=O)Dt^04X3iCyWKLs7plzRf6|1I6tytE{qOHd`=~o;?OxHH^q{e6`6US&qTDY zD~sfuh)^M>H6gfpo*fa9%Xt&k5Gk#ofI|#Os0Bo8l{n{tc!-Vw2?;bfM}&w(&LN_A zj=WO_=SXV)6M>3W_5~}P10+Njf+O!@2r&Q>7pcPK5S@1lp=?>9I6n#3!Yk$Lx7T@` z^18i!dx#W3#TeedjDNoWCK;xu;d)`Ib~TM z3lQHQ%k8o~zntIy_)PHh`t{8RS}*DP1fAy?r@)D04C4^SaU|!NVH^V>YL?}ZLsoQ7 zF_ZJ?B`w(p?}Ou92q=zJIA6ZLQAuTe$g+9j%W_+9H%$wtML7X)qynMbZFe!zK#s*h_GLx?ZF}& zD_Gr7=usFOgz;6n;|B$-_DrGHdufBzYP7)c3Uyd%8$>D$mZ#opY#)>k3ir3{cYSsetrqgd z%AT~{LfgXzsJg5eusFQCp@;*ET`e#)@jJ*esMT!Vr@c=MkEgif_l6V20qGgQh1kw(2y{7xV8X%?ezLnt)Su0=_7UI&{&; z0T?9Zlrj<#d1cLs$C<`)ltFXOiU25a7@4`Sl%(Wy&Lzgkl1oWQ*4I!RS7-jKys9Y1VE`o*RKysl zk=v{Q>Vrp*s6<2*2o)3wogd~gM6ZQ?aKVKThg?(&`ABh$0N{YU4+wxR&-0v0em=i2 zV?jQ@bN_n#WxHhbsEo^feSV$<^Y!hH09xd7xnwTqmy?fP78nMC5m@AYy#=DOF?vd+ z3`@r7nRq@8s^|lWVqVkzdPT&?Bj+VaVF4vY)|wdL!WhOW5+z^v;b0GKcBYiu!q{gN`||CKO$R_=BZmV! zSIuQ@z|~^8Mpv03wwbq2c4J>()#hX?W^m;g+v)bD*8;WZBKGDde-E?}02SHJ zyw)*vK|y=Ac3-4E5U{o2Q$wnCwL8pe(e(-@M7U1Hicq&_6ZQ0n4xkUL<1Ut+Ap<~g z4pnQRmdeRB*o^=T)$LPlW;_4~)~KuxNq?k#xR?m??Y0!`)HltqLE z6^TWZ91Ajlk^>ij0>y|vBDokyRWcX!5C`v^AI9jtFPVMxAq*vP^4a+iLIBL(IU*`4 zd*@JKUAAc&MU@DAh-HylJV^_nL!^4+l02ajkfRWSbIu_;=WB{50wJOG@{!sZm>jA) z;f$!#T28S72$S~^WANSy3n16C=RA}ml7t9r;ZVtni0his={Wf>pD#n4KfJvEfB(<_ zABXVM5AT2b?e}Sp>BIc={$=p^w_h)Lk?C|QBI(Za>}iO|NP~~DG(4V1G-@t z;_P$_ag1?_gbtL*J3<#?csv#WoKN%lJimN;|K;_9q5SY+`DOVW=kW8pPl>sJdL&8W zNB8_Z+#c!vdQ&aiDv!(K{KH%l>$-#&w=LV#`)45G3=UzM26ACr3xY=HeQ*q1SjH)2 zi9Uq+9ADq+r9Y{)<3o&E$WeScKR?ZvM*aa3f&gmKVII8;*UN(;<(!tsIz0`yd(Mf^ z=PAbUxIdP%ou5zB;8WJ8aJqcEI`6{Zh?o$G5LDk@Z_no^Rw+5Z|M^i z28EnN#^QnlL`QC0vJ?#?36z}5BFiwu^^pU)aUMbpJ~%`~bs@SqL{9|5k6W(r#F29z z1vw{GP3y90);PGd=(em>N?9K1ae@4h^|9S=pb7G-c|#E3LMj9b$W2c*AOt0<>(T9-?o%`jSZ7;djmZ8` z;|2Ced0(GaCDbqyp%r*RtFEv^HwzLPU?C(H+oWCsLMyzjRLy;#TUf=Np;u z02qt}!$+N+gIzEHo3v|Rk0y;>XzTc-&H1prZF{>|nQV7T%j8JyAy~`fsX|Kz2dJB2 zZ-NeccXtIW=-ba$eUqpKp=lq8RoFKVsQ#F=3R}0**bti?xHfU^AfQQr;n0TV+H9~A zkrmA}z@%M?biA#{0h_zmE?!#Ao~|Jp(5YS)Du9wzK9kNH11KsT*51&%c7$uRWzWY4 zz#0#4YN*}V?Hsi+Z5_gctqs@+O|w+UW_qbc{IPCDe|Uw&yHlg6-zBt~T2Paxe*V>@ zx>wUitZRbM6&3*2G)ZU~{Y@OTacOmP+JpN`JLOJjuACmwUC$C|JH0zh#;U5K2ozy~ z%7;)vmlOp(4TGO&!5~%5IOk&ER8+tPhu#m}7{7gk4w>XAzo{_)@CpnKXj`N7<3E71Z;t*U&#N<4A zH%#M{rsb_DLpV=y8o>L{zx@rsy}X>0>iPMcS+1XNW0;?xGDXkInQBVbG=DMp}Fy9lp!`l_V8M~vs22;`pRc^-X;Atb8X*@!M z-Tv%o#B`mh;Qo_?2S^=N9N>)USv`!lJ)|)@*eE8@Lyd)lvW3Ig%oMcVnoTt~-46)5 zvp9{+YxuGnr0PKT)jMeB2650F{R1V}S}E#bX)TQ0U)7^$Eyt##j2^w*m+-r#-qmoe z(Q9hF?!L6>krAedJ1Bx(`D{ z0Or2?s#ZL=mA(b73>@sqTlN>UIq$g5Qj&I#N)4=_2{Ryx)a{<;PRL5$1NJ!0D)-Vs6@XYH5F_&wqwh z&`_9Zup>U%6aZjkCqEv%@6r-eN}cU{Z#UhX zwO!|vz}OiDkOYWmeRd4UjbMmGR22-_e&~%gr35sK^UnL;dncNl_uhMJt&M3Mhi>Ss zb=ElRyg_i*&FeZ0{nN+i+iiaM*xhcou*!7kwr&k6J^%9b^~>3id;aP4o4ierW2E%? zZ(rx@IvzWXMpPrmxF&0j>#X-3qT!gFcX173C~Prq0STU-o??&>;px-!htI$MHZLqC zi`S$W6%&GUM4k1N4 zmfK9aaa`j0Yh_#?(JQ(YETvBzrYu$nX=z0 ztD44BpL1v5n>nlYZ+8!?{;z+ye_stMk+`1FIApm(h0ZV^HSW?myI(u54V}g+`lU<^t>c6cE3QnUzUZ2Lqcx zYJ<;};%zsARcm($Km_?0!Nvw@_rl)@+xNSf7|ZXuL}VJAS>s=Xr^|v+uq!+@?q8HbJerD7d-~JUrTB{p(^C- z9aT%f9{kPjPouz~C>p3sh)to~HI}O=u80&>2w0CPQf(Mt7iU+uZR)#LX`Cz|pb~)s zD`0NOst9e*5U5;46S&hpJDYun+{<@%J#bg01q*&{WFr=zhD0H%iYNk^u2=HlJP-k* z>pdEb*A%woI_n4TI%lm7b5I6r9RMPubDffiDme=%NMw*wOaO?^8AA#xERe6nld%At zgjod@(U79^4nPTjs1=HW%C#2}0#M~#*Ay^!=!%e|hq;%QH9!UpIb)3~NEARY4y{8` zKmtQRM5>ek0KpowYzv53Z&Tv8_uJRcFWbC`a6cLZdpI4g>w4(j<>p3iQe0qNB5B}?yZ@@bF=MNvB6p#!_-$|7DvcV^lPx z%^EuP9XXN&%BX~#3qG3+(AgMcz3Y3Aph-j-&^RRYU2hyYb>1r(YmDo9^!?EFy)|w; zj4Dw%`N0e7+qXCC?dkCpbv-;EzW(*w?XsOdo?c#F0nl4}yRQHI^KZgnj@~5n*d?Dr zNT_Bw^xfb&MrX(xIGlP;95+pY74UHEEEFzy2@(?Q}ZM^ZVEF&3Nq(-Ev(vjmL+B?L2{zq{GlpryfC5 zkRLvLoE`@x+U97D3FEpQ#(7-Yp@QCApl)M2+OL=s&R2C z0Iyayr%@$>1cPE&l{p{3tc7ymY>)rpt~L~~8Og?l>7L1&)l@-mRLoLVj71e-HK5HB zP<8Sk->(E{%D<@+yz^IK|HH1oD$4*ncHH%RW2qE@s?%Fs`Z`5us;@r1EI{Em6rizG zlAl01Xy`x^US9XxmY*_#Ng}9^ z!&;QJ>Voh1-pp}lCaH8XsIr8B?J$&j_7#O=n;w?am;+>W5wUGa)kp1%O%#eIxtj&% z*g0bElmv#TmbA&Cvc1GfTb6q2A>(cZ&8@skl30et)%~g%Pcv0qVcb0pT5;1(Lu%fg z`=pa14y})E+vuFy3R+_SMA=8F>1y?q$*vxD>BYuWct?X`ykWd68gR&SvbS^1$z7rO~q)3Jc$b>a@ zUQ!f$AT)*p8)L|Vs`}0%5XTfbrYMjC4V^VsL_!Qsym6!;5+%lzlhqK_7(+-Rf(8-? z6jXx>$ii6ebZO45sr{ERubWh?iil*ah*|>QA>>;6*5vjcxlmk2*$5!9}`{!5m`0(lU>tDXSzP)k+-I8@UO$TF4bn*RV9>xQpoSr`% zCtfr#V_bDOO^2rg2zA5pkNI`ZrW2{Q#oOiu*mPCX=eQ$|?j2RAnii~2sch0+h7)_r#~5Direq8mAVQVM2nHBH zfdGslKY9g7TLMyNZ3-zR22>>Qoz)8%GK9b!!kX*{M3iyr(L7w%E2*8vVOfIp=7$eI z{`Bb+gIwP64}bjOFMs<-m?I}q1wXil$A`EiL`*H~o~5NkF|@hbVNoJkDeX>e6??7vIU6@=Rn8lLDmbO4Z8W1+F#RRJP^)?TGeCGQ#->JuuU+aXj=VZ|EsyBi#2 zR;N9-jwMf_g}L^4D7H!hO*1Pf5}@XWA=zAK8fCY?S~6aWc`BQ8xBh!Pt=jYgMslt> z)XloHMHLV<^xT4fI~s(#R?V*Dk!XEF#gm2p0WI|wYD7q(_Tf<|sexL$inoPo_*0AR zF4CyYj^x*rfGJczJLhv(Agwjx*@Q>$VQh;@-HVVmS!#Ic4t~~d2rcieZfSld?F+s` zl>No~i}LSOuq?S|6qH|TGg`|1!=Z*OtL)z;C{>J~Q`lCRFiE5hc7|8)#q3 z>>k{o!wP-cLP=Xktu9a&@%qsn234E^(4?vXseOI(d$7Kup2>UQg6%UIMM>EUEcmMY zIUkZ8?cXudRgUkQYbme_LJt4!@_naJ>N-;$Wn1=g=o;T%p9rWTyED?1+r1L25!Sq} zqI-B2>yGX*^14_xh$#R_En5JfLbLLPD%A*3bK~+twa7v}DtQM=B4~k@?a&ld-7Xtx zk$gqO+}5McC1YUKoRUZe!7@-xiW@tJN*F@&!{Dq-iCNVUGEj_Bm{W?Kb#Cf}H6|E!)<-}W9=i2vjN^FQJukEfF{Mi*M&mf!!!kJFEjfBEl! zeY;+|Q~&9w$N2_*cQOdpgX@g*?)JW3FLT&*TZ1vicOx->`eDj_JZq!S=||sp-5k|-z3n=M$Qq3`-OyQYL2PF_vK~0u-t^lL7K|x6PkwYl zM6Q2W!Vl34gIpj{xDc?h|KM>#Ema}J3URP z>N*1m-Wt<612pvg^|q!Et<}Ew8lZhw(rD_-{Y{{!=%)#PRFr1zFe27Um5QLWsBP)sZEF6l2#}(R6%zLZGjozs1|u zABW@PKs_wm#gEoFYtfATXpAw&3G3^(x7U}q>*dB#$PhrjlSHm_pc*5iHHOGrD-4?S z<;z>=djk3L`D^bxVNuoTFc_k@*EeGfv#3M>Leh2JVhGFuQ?f}&!pVposiWjfB1w>P zP9`V-S~QfC=0yZ5i(d|LPQ%IaTp(3dvAVm}daqrolX-8hs-iAF=Z?niweh`Ss2oL8 zO^rv`Z4uUBZit@K}|4 zU6f)XD%AuP1l-xa{lDrU@7~&0;vN=O%45f+buFsBsH1CUMH@N;gq^~;+lHMeg_6p( z8^UVoAX|_eOuxf!70u#3_1e&9$&hJ48nl*xsj#Ok8rFeT{)oO`jk{dQJ0y(XU1TYc zl-hqktN zW*W*0fK}DZlxex2T#YO202f7zfw8`8KDN3>oHhx!gd}SYwPMLGmOl zAOHr90pHnGScOB}RFfmCgkcSYXdHo}B}$4GjqR=G3R1b-0<#FK2%<1ECq@+j5f*C+ zEutb(DOYQ~B_w4aV>6OdL+e|Xwh z-`;M+;uT$Us`uEq5AAk6d|MlPAKfk}fzB%u6NXB{;>Z}`&lY)lC zr0A{JE{Eb(R3r*2+O9i3K7fPM^HDfn-!H#B92Zp}J{WpcFpelvSfZ+-L=KI2<}ggl zIrWq8yuoz6PJ{1) zn4f?8@p$U~=b!$JhJO0tg|MMSz_}kz9nNWxzh4{Mhsqb0D_Z=uI zgPp8JGB3zUymQu~UT0F<*$xDz@pyVT4Si=iJ6~?wI=`LI>l%tG72+u5!8#HZ@9gn7 zSwiO=cvTkXjqCB-%bRn=F@WIoFeXkfFSit;_a>#3IEcdA%X`{FOi4m?7J5$~27ee# zoRfG_t0K`*vDCR8nX2TQLx70X$}yF8I(rpl{X;O)h}d!bG__t4^gV;gsY4KP7P!cf);+n;x&vu~d^Vk!TMkXo1?E4vH#wx++PqU}K22Tn?^4OaY+e8ny@5?I?^@#jYzYV}-(6zA1puQXGPUJQkKnjuzrB<|#cSj$z zR@ngD#q$oP*IUYM9BorF(U`Nd?>Q`h9ljKtQoxr=2A<6g$oygz=z64^FS}2*xF8L@ zK|ShF7oq&7oVs!kxG2Rth7SPSHIe;f6}f4+wmr6rBS7n|AesBE2co!NP0+LeY6Q?) z^E;o1>O(+9Z<;S7Rpf`YJWRpgxh7~PF$Hptpeh2epeRHE7D{N9Ix*f1qwh!RI&z)$ zopqj7L}2U(P^6sYhsrFsWoDJr>A`l!5dh(~ZN^vtAfm2!ApnTRjm2m`brwvDNrC_X z|Oe){y=Kfi=6JU%=y^4shCFMs@T z-r}|_Z=c^TzrIa};rP=rCSG3V&wg1VkXzBw>x)wh_?Bk56j|w(E?cuD20`?l22wM=QE23k8+LDxO9b zfa+>MX)#ter@TT6j?6nn`KkeeQPP&22zxpqwoHqh$5vMTuJKu=_UbdUcmY(-sQ?jx z9LX*qupP!C%H@J*E?k(GUus9ux_3o*>Wk|e_hvN35o#w!t8dY$xPJlvt;Z|o61%M{ zTtU?e^-d@#)D42#Yhzb%O`e+O%LfP%rD~ii!ufuN$ew9Y1hp{aqW8&vg>oyol+8h_ z3rI!e+vOlYBqUBMNQx#1MPq3?I@fDA`RQovFbrb?3~MyrCg#XY#;9oD_Xsl2Ym6zS znaB((pxO;q4dt7mxj|MU4q>y_0-}m`UFVF8F^0I!Ycv1`aZxcyh$^|Z2L}l$#Q>O4 znPXrTU;z-I+$M`a(Ku(UC1ahl#(D31>w04i8H;3$vxuYu44Qj55)xX7AuL-khA6iR zFp6M(-}%n5Knfv7wvPI-?*|{^`u62rMD1kKvTV0`G~G6K)6?0p9gk#@kV04!Cswx3IM-RADCmbi_toIYrZe8Vl+;@5tpzp0 z;yPz6CI`m!A*K*eIrfKG$iw$9x^tN;2UBAu?Ft|ifS{GL z$X@u)CqJ6RA zpGqB7xK_hCl)HCg$xSMjt==A}yHo1HXtn}qhthE{gj%*h*j|O(SYid z6qHagOde?2C*kTBl*5Grf>`COp_1xtY0ILxJ4ScsUfXAP6kNr7(RTJtYbHS@^s_*;`@cnMPFXGW0#G0~ z=GCqcukrj!7wu9bnfD52hOu6M91L2J50riIXs2-dA#zyOvMG&^s%LA%yGs9Jd&T_@L;A z!Foaw#RQs!6*Y-OU{PgMmJme}0H+vGSyf|7oKn^{ z##raPNvRupPnL}H-OzP|?+4>LV=a-%Cy0#6XV(y-wUAY#Y6vR`80!H9L;z%Z7&-D5 zogrH1)%9eJ<#YV>!-w+<&H4G8?;)?4`PufLwA>A(Iz{?qMx`^R5@8;|23 z{_Wo$9xwgDN3e(KaTv#89IQn(^vD1FpG3pk%Xzt_umY(7TEk#HEvs0vKm2r9w|Kp* z&X_0(k)A({3ZTm8_sepdC5fsGr^8`9EZ18XECM`lI>A|s;K8-r@RalPYf0tDYV z79oJa52$ci`T5gR+S1$0xf{plk53ZPx&%&fV0NvIu>eR!Io;e^3kV>f$|=Q^LP{G;ig9C33QWZp zGgW6bt5;pv7TRk{wJPXpL5h(oPI9#)dwx{2Ab=Gh-I=8J$cm$ysoyQvu2UO`D>$Q= z+HB7%QmlE{P(hN`US$sr@6HNT z&2FUbQS|qCv3|UO#`^Dn`5ZQ~L)%&+K<%YoaBF5Tp+c&Ad!+zcQgJGN%2e20XuIx8 zg;V$cRP=02K1IvX5=Z0L?C|mO*1Pj%}{xUsVYAHE)KvK}OwiTq6K& zS*tsHUaR2O_UhWG5lx zj8s>n-uDE6Txa~d8?xs*qIXwUXd07NRNzh=|8+&kelzxg}$o3D9M?q3RR_%Jv zSN5GVj@BhOM`7L0%jSp9d5eI;%qek-Aq69rll9KJ(Q-;0*^o_=mThz1hHX<6V=O8; zV~LO>AVf|oERd3L;1EDqA!VzmDaDjBn77VYXPocGX>#2#O$TGUt{;3qSm((YMPr>2 z1r=aaMZw&*udur*Zh-|BwG|i}AP5-~Ro-{_^$LZ(jTV{r~t~ROStjhY1W` z-mi}jr=Nc8LKIXIOs=zW6IHO@qUz=S%B+vihiwT4Y&UkyL0HeP*SKy&-_Mt|Kl;bV z(|lfu%=Ba+if9>LqDoPLhXkh(R7^)Y3qlf zQw6W+%yQj^emea7;mfZtm-9Rh)GX#|p#f*Tw; zcYZxP<3E1*aJih3>u+DY^L})iWC=^)=qwp)IS43jF&&Qw zLsqus`Si5T+j3jydA=^QF~%r~Xi6fi#zHOujRqv5CP9MR<;D^tq~Cu164p&r0R=5~ z&LEg5vR)zpa7uvMcRp;JG1m1q0VgJ~m8}6$We~#BVv$6N1q`90auQKNV-&4*UWCBd zz8ihl8L|il0aTS#K_!J%Qeas{ImR{T8>l1zRw&8lSpny&hsBCzv)-tb`pK-H$&dm% z>kx_1c3tm$@2nM8;gmQ;-nOvB6hN3T-yHgmk*cBBYQKwtywgcd3sn>HSp()y8s_Q55d+F7-w# z-S#ijxtrL%k!h?R&<<(xIM=Kb<#k!ey3_iJ11 zTX7F+@?2qPv4GNw2Wo~I=3&&<>Xq@lThp>J769{RmflbWB(+aAkc@(gP;k-?oEy7{ zP=a3tgKG=0)y^2u4rCU%+#nW;3sPkZ_7%=g(*{~|b-KoQ*LgtuW|nfz_(_ye0gy;l z88!P>Wt&x$>z*T)Lg0vX9dj9n%AEqBLN44YqM$07f&&5hj^o89M8zqPeD_mCvakxs z{=^E$5deue-xDU1;=%!w;2ms*g7q`rg4TsMx9Nmd(74g&fLtz*RT5sYX8r+|GqXDy z6_-};Do+qffk>#HQ~5MBIupwc1I^lvqoKW?)hk)PD3xkYaGoN@UiAtfXX} zQB4Xgl0YQIBvJbj$Xa8FEQx5?wsl#Y@dzY|Mb!`*LUJZ51CanIvMK{G5_5_Qi6ybH zpelfI&X2<|9>(!BOb6c$-gm}0>l|5&WRMI3G9zH_o?2SdfQksm7y&q?D3N{VNUcHa zeZSmRVfMBU66bj(GX3BI&>Fvm`R#pHrC}UY*bSajx?ShzkB_Iv>3p71P)Xm;-#X(S z`r-3GUN``02)vEQLoy&FUFUxM`6)zkUFUkR#-fD~BN7`jDaB=8jkSk|E^+i~taaCU zk;E!G^wxKWc?rX5c=&i2UALT<>EQv?Y)e!LfanIJgpkNMg9a_3Fry(Pj42TUg8-;^ zmOHfP}E@^*MSF4NVx z*T<*H^`oSOijWY=Sc}`7LJZDW=UfVb6sNIU15LyD^z;}t#Vy=mM#(;mEVp?b))_Cy z)3HBxrZ-BOHsK^VSqa;=E{Zxl_OCbXy8d`N7;AuGTZ6UMSf`2{Q;10-Cysyn+dmH7 zsF<8LAdmu6Pe~YvjaNvCD#(V4ODdj|;D z>m1i@Th=XYDRE~lrxdn0J`Bdwwyu~!M7%X!?@|h^aDKmuf_0>T%#b2S$}M+^FxOJV zlvIfXBqjwz0T8M8ec$=cn1PJ9#!6&sP1qKSyoDR5km9P4Bt}F95d=jn&NCuAvD%{@A_$aempvnG|%T{yDZy`prk}d zs3k?O<;WGVaG&_H2Tblmvh}yS?JoLHGQ&grh^r24{0KBIqA+8i&>q~%6tuuZ10Urc zZ~jeH#@PsH<7-L`2TJMB%vNZzyOnX+?IU9G*eim7Hr;A-tGzBOpAkjfianCkphW#m z-KP@1sgPlRzE*%*XVD&9K}3s(X)^?k{L1U6B_P!>Nx?RmNNjMbKrSrViMu3e<*v%B zY9Om^T!GOw1Xq%gT9RYV584-M2ZDLS+DhyFa^?YJ1B?4=5K$uy3%Wxr2}N4=;ky*i zeGdxBrC92~l|MDnDol-tP{|NfD%Ds35LlGzGPFoGmJ6iV!qixKVXaTB4N_55u=evR z$N4Vi-g=KAfWcDoq;6gwVa@9*n~9lwgNAi0se=U6E*MpEwgn2C1Zlnhm{U_#>)k5I zA>(qLM@veE?m2wrhG6!7piQj~vni+o;0n1|iFe#LN+oB!7oC6r2z8a3?^vCKdLOxN zFjY8@fVuoE0jcCDC9-fObL-_AktQA3v=h*IDP0jIlPOTcTXrDEDK) zlILJ@FMA+kY#}m`3@gAEQf~|*y51*N1Z7P`U5+Rr>DI?}b^Xw*vS>zyxGvjxc>MDB zUqAozb(;EV8vgm0uj{-#{cwyMfBN*}$N%_veOqo{X5_y2UAm@tt+JSg+~ zbtOlXa9e|MZs>;(k57HqT`uR8QrhC}{WcHt)AK2(29HB`A@a`J(HINsysqmChC3U@MK@#E=KW}-~UJ!-@n|h>-FvYzAY;`BO7<#N;iOlsQTWUHFcu{lx>ciO9;k0 zi{d6b{xARWp8?=6fBMsPo>dSS2sx&ts?HlU)>=oFw%e9sAdg<%!{g!od}Yz&^K`q+ ze3lrax7H6Xgcvyj;Cx*OsP{fHOJcP)MO8}($WgpEfRI>)6c7`D8G6?rtRHPR5+N|h zbxGke$5pl!ASnk6pvs2GX4^|ZKw$Ezb=8bjvxL>|Yti2=u$9U9>~A7e74V&HC`jeb zuB!^GrY)`O@_xAzlJ%w=`s0ruKZwX}dB5IXx3~~yBv~L>3n68P`n#xE)ocGkcxdH9 zKy#9J(TO##r2xAvyR+iWC9T=Sd+t}($Q9aT)~uMQT6Pv|#RI71RK1d)gT2`o-bucS zH?xV!Mj_iKsxjX-M{J|KV-Ein^Hoc82W+*VnR_r<<1+nP;vht7EfXmKd!JZ9OX|kOss~cYWFiEkDB@9$r6;-;igoYY0 zZ(CceVix6MAe$gz^#NL(mP($YR$zITlZV(wiKQ7SNa;mi;*h1#YT-cuv;jE$Zfk4y z{hmfvn42nvQYbcPKn>~?AXm4_;O&l`(u(!-!|R`Fkhy%m2r(9*&;aOuYAYdEf785@ zya8HD)Cy?s;8||A@Ao(gRbLSa!d;qw6WNM_vv<(^rhLpYsYDer?u38NN4sNt8%Ig0j%uwF#`CvV=q{b;ae)Z6P5Em$9 z7DWJn4Y{H~BSv9ARo~4Inx?4Rfr=S2LJ11z7b$Q#2=(R78Oa-3Vgw4QvY}9wK8rGv zpiOA88;0YfJ3eC9F&O7vOw0NGnn_V-jUwrevL$7`Jtu z$)GbGp@2aV>3WNHm}iY)iwO)GGKfgboC7sUSeXr>fF>4B!pwkVoHN$-!#GUGVLA-M z#RXqw{7Tq zLnelJJRA`9dOcepwCKHcj%-emViiUqKqTMWbxt6nioQ47EiE_h2Io2>0DafVpphdB zbX})N3;?9#&~c2vy`AUzW{n9kzQ4bo&u?i?FDg$TAIEV_x9uE4H+0rJWlb@r6cM2B z22yib7D*CCwj~&Xq3=YFq$DKkwu-8#tZ`d;PQr0ZMlr2y$de&!?Y4w<-Y(a<^BxI} z!Q+R?I$W0c^!V`j!xM8_&++T)X22kt7$PDWV-qJ(=*XcWI+K;Twd8svu*2B(!^q>% zoyOPm`P=7T*SL|8k_v#eh8$UML}b3L&KW{DoCf3UIT&+ELZh%s)MI`ZUW zw_KMPw!=6w@UjJE-l8lJ;^dVK5TLPHq`{CFYy3D(r+zr88WoPqEo@hbi}Gqf5Y!?d zDjEb)GA5giO0yKUL$Fln)*|V5AuVm1zflvdTvb!H`~88UF0|p3O2jB&ZW4$Dpu%wr z+dRL&p6xI`oW_S~c#LU#zrKVxrxXYj^ZoS7?`!oYD;lf#u^=|BI`hu+)&>L`XO?|_ z&GkOvF8dzqzw^MWxdPbJ%IZ)GCwYfd8n`VA9I-@rTRQl^LN|o$gzr^CGkmG^$8LOf zyn6S~easDD52>~9LYYO0D%K~L#Uey)WdQG=TJ{`@!6|mNd@7@h5>IUWU)!B5OA7a6 z*wIS8nOC1&bCc?e_Rr(~y^?c=dtP|qOzNl#DOlE`5Pv&}1*n+hPOR_oYO!t&nBdO; zm0~SeFWO+Cy>?08N!fOjLEC{Te<%;MReAsbRJ7fH%&cIm>ywu=-+7^$wU+m!A|oxB zpdre#Hw}svDhu|cF3cNMH~UUncd(Yh3P6sVyg$Nq6DZrXbGGO+9-U!G$mtC}!L_|1gN+O_0m_?b4Rl=?tro(BNj@>wR{n&LK8f%OrYboE8tGZoF z>Zj&fbVPZj2?5byL9T{KkPyij0FlH<6xNlacHXgs+k9JZR~61Bn?~uGp&zEl!{bFR z%&GUi80egF7xU#Gzbebqhw-a~Cz$^5Uw(M`_VVQ~-`t`1{c7H?>GApD(&N*K0sOf$M3?o}`!0l-QdADt%l!KFjXs!po{8-7)C(&n00c0QQhNG$ zc=>h?a~hAG>xTdO-~P+v^WpZs&EY0SFV`6ffOP13BHChnJRIHFugkp6Yd`kG=$Fe4 zK0K;wj0pggII3*(O(X$1Z@mg5lD8HOZfm@~Z~oD(yoz_ecOV$9i-26;ZVJ**E(IJ8 zLs&MD?8hUKsxT3OQ(%l+3TpyXv6jd%ViK}sq5*Q`rb9QJe0NY06<%*&Vz_GDEF@B) z9Q(<2-=(zpgIxHx_F}1^r(`ePp@s7C-Lhh>3}3y^=GIpd016t+Toslp`J%PtS$=#? z{~`c1xylpIx39PBSLepl^z?N60TJfqvV?00OIGQGB=wG6GeJ9X)J#>@>J66GkJBEV zsDaMxzhmJ;v{e+_&5G`-y;&J-Xsl+2L3@9NX4z@4scl+qe+6xo$gH1kgiO)V<*&_x zX9QNQP1*^cX&$`p`rq;_5!f2ZsQK?Q`!T| zg4d#G1$7i4N%x?+qOB5GE4zwVa;WZTI6%-`;}CM*zm~+rY$RLGS6d#e!)jO@@BW6> zj8-w;$Bs4UuSV5s57tHiWBp(SyLm=Ls3j!{rSv;07}^4sd$>|;Yq7_5-SK|2a;hmy zelXol>@Lr6pK#gndPkw94?vA+w#CTHt4(&H^meKgXhEQjzR91x%d5O!QqA_T2rb^F zo0a84fJPMRzUcL8F1E>VMd$Uo<%?yWCH_@OTqyKgi7M7}uKQ6S05vW3p)553w&EK_ z^vVT*>g<(g6yb(@zZCBkOWQUmqF0e_*)Ub8#dV5o)Re#`^JS*x^kKnl)mg60E3{q0 zjM{6;ATU?wrUFFHJSSn+)j8H(k)ss;4<-bGZQCNB z$LZ+$&UKyb{Ct^38I`Oh@D3seq!@XQ8;EcsOEjIvL>MH9!tHv)iTvb4SXDG7@0=rR zoVC^(giaDK%S}a98ORueL^cKiYq0m*#w-YEkPKPlhkh6i<8G?p$yuDnunWw4i2ggw@f-v4LTj#prCWQ`@V zDGC_*@ZrPda{2b{n;-1)`QvmrT;9%v^vf^5h{A_YowM}qx34sq@$r;4j&lfG4BH|q zmJY^R5jIGX1<3Xt3Lrybxe^)g9E*UkibjZ+^W`%cM0M6ZeSCDi+va3!DFkht6@YEq z67%ul&~?)^O@#34_4W1b7G0npEvittR=-@Ioc=3Y4k2 z=2`s`8c~t7yuK|&uJ1eSH@^KBz5^lLJ38C4ahuQNiNmvC1NmZvi%)3+AO-t|8 z3}~HqgQ}H-%#SK=TQe@uY+bP^EpfR9uW)~J=_QJo0f)A+De0X>*khUbn{5kPtBf`W zfHs@95-dU9I*Q$O8N(Hg#WL1pv7Dx+yV)0$_PE_fufKNe9zSpTcn*|6t^{T8v zyHW~!FfRiz-1iPOALSz00FZeaXwqJ#B4cMzfO0c5ACf{G?;sHx7H;PSN}PEIHd4<| zS;T!+wUPK0)3jB_I{y5yHrj&h3KVYKXv=AXI{u=0nvmvl$~Ei_(((+j#Gp$CS^d`C z5o|+lvYFeGB0wpsrvOBlAywhXw0y5_dpTDXv^5eqgGjBku(pNQ%C=?^uktPks4NZD zmVDc?f~qR5OC7@AwMI2p8-O4jIiM91Am@nOyhTU~#!trfof}MV$!_x!Hb!OFksrGw z>Fsvg)|CudZwM(YTT+c2B>{Hb5*ECfft5y51w#mgi4!4`LED*5JLZH4%t@+@`I-_8IFE3(G&l8J2 zJU-YC#<8>3hOk}Vlk>(8Zn*{`1rLfU+p=7*Daj&{S$%Jfb72YOFs9TGK80wVCxC5P zy52Ew*W1Nd^qyi2&RW+&*m$|FWDx{a1RWVehr=}AuA(|T*poZ?t~*~}FW1>2efs6o z`Stqn@j+B?mw5?`>z(u7Tf-r;2I`PEAVAldu!R(YXn1%&Eg`+d^V_$#WnD$(e7+a} zV~9mnH7U=_V!d&%ZkmqA$4M99C`2YM>G{)x?d)y7rWA}Na`yS@v|N)Czn#DS>7ReS zUayAGc|VQQ?XsqgpFcf4et4StVL2^|l0w?THVxB!S)U%C4v<`}TVF-hTYyGmr{-R2YCT=?Blhvv{% zh<0bytCodY7^6oC8_Z$_kL6JmW{v$S^sT_|oMwp6@3{Hb-R_t%w;&|)@QKsmer z+G42<80@$;gVQ?Ix}h~3R#xe5Ppc%ARjDk3V)kzHAhSWMNJeWsss2XFciLs)dorvE zUKLZhOK`6?vrrN(K!qix3!2%*LTHr@x?BG`*@o}xrrt^O_dDJ`jr*_iGPdghxbs$p zd%K6Vw&9EXbt25hzTxQ-(yJ)~RjyzsjrW(9Rc$PymLJOCc8FT1wEw34Q_e{v%9@AK zgbDs{Us5PhUByc4Xx%hyVuasaBPl76W!tsUd3EFkze0uZx+A1AJ}q#w%{xO6txiB0 zB|`Ho8x+n9n=|{d)cV=;0}yhCCIZxwHVq16bq#3OX!WCIr@r?FpoTE29xX~^_k*i1 zP*-Mu6jWcYsFJMk>#y#7rV7)G=A(M$B0o@fDXC=kofDq-r*dz3X(@NuWYNUHU|2~K zn54KdcG`6vI&U4ZrfuC2&^Xgg9V|hSx4_O?-*+rYMZ&g;s_(2M!NeRvlmkUjL|HU~ zF{U3KT0{g9R04zGtxuHVmJ$mgh={PLva)g_0Arm&qJHRye(J{2xz72HoI^`cVy0P~ zvj#G`K5ChKy`Y;N4p9YG#9X!oQ~(K)a;$|!IEF|>#-g*95QS6Z?DGgGczApgiC>sI z)_&~5mYlI@Af~9S%>3}_!IHatyWVc+pMUo=fL^ZeZ|~RjvUo@1!x1G6kD~xz&U1+2 zc7BaPj??k^$L9|}KE_R$R}lQ|PyZZ_hv_&>$AL&WF%g7iwSfJho98&coJG=nTc(E* z1(bCWEpq2RGVniMQAHWu2GX3?k#?II(c(t+U?5bbBeMHeZ&$hhgk&XGQRM8pHa} z&;R(1L-M|ps441vyZz5rZp5Ncj0z!n# zb)CA=Sl9W15FSpC#uIB4PV;T!2#!z*3=tV*lq4~x7*gzfrvk>B9LkiK!X{m3fYbSW zHilvhA#O3K|1*O`q4YwYRy`O`0-9u6l$n?l@fZ@it;d`1Z<(U4Zu zhEO^aU~Z2JdyYke6V;~f3Ld@-J=IWQ(%v+4G#ifejQeaD^u zuTULz1Vk{3no@`wwm`=C&h^LPA*5|xm!t^+NJ*L+DsZF(FRH`~MoAD$x}zL9<5kDb{e;J@_aOZ4aqq(f0a=Ln@w?`+d(= zI15{*8?}L(-B#W|w(KKPjkf7N`@0ck0MKfIRk3X>Yz2S-cfYy&T-E&*n>LJ@)64Dx z%6E96jiv(Jf;(0B2wy=pJF$uu3#chh2G&VpzED|Eju>ij-p0g`s^t2tWd?hRvLe>s z?^E?uDp5&$f#P;nrEZrr4h#{1p}HZhYDU2Va7(~tcCvCuxUsqYMeHyxNaCL^N zhwiZsITES1n_6^@Oc5gr4isjC-<_df@uOnkc#g=$mlmazp zN*QZX-lH&oyY2&NlIWKF$%=#I41FijQ8HVuImQl zy>p%QPBG{0Ws8SI1Z^QQuZ94*^g;G!sZy?3JXykAY;DAj2XxwN!8LfvcCQseq;sUqJWnI_!@ZrRw zgPn$HnwRi6jhn`84R7CGmhEQMNW>4n`*?U7htaU|`QcAG`D zKXm=^Zu6oDXazy71tNZXy>46dog-E2$a}A#F(qLZ zlCNLpxU84UqJ&S6Pl%)lOWa;wzIL6Ba}-rW6hmZ9in_&cdO9It3Nmya08fv{!_#zK zZ{wkVJRM%&-v9pVU;p{*Z|}F6RGhQ@Vf^vO9}soE&M9$Ngpceyzb$qcy2~wY%jTSS zgCBfn$+*rV#`o9r%eV8oY^Ui6=@l#zlBgtB5DaU$(qfG5oK@BIwpnLIp!W`lmRoo~ zUj$h_a`C*-S!(5qCEhQCD#^S2sYD7_Fho8xjO%CofDxCRWH=ZQ7) z0XyNf8>_k2TNORbQC{*}UL+O-+VVy=;}_PPYfsoSL^6{!jmqSEeZSIb+~b`}T#^WH(kTIerZ!7QDis-`S6 z?0BRcJggE?mJ$)m4%JbWd1;-)4l*kc&LCF{xY5?hpitR$L{!VKwBvzPKhvbIjpt6w z0GJw_sM}l@1przxMXc@z)I$sk^&&};X59S)-5Ipk#PfU913w zwXJ)i``YC9%YJwYk}eaV;x_1xA)As$D2aOnXwZ7Hz4}g_HJ$gv6qtYvkTGP)m>AQx ztO__Cyc+B+g>_lyRX7rYH3*3Fyl(5(cXm3C)_5SwDOsG7^PY@yiV?|L0yN|dFpxze zNSu^KffUskXN@x!J0uXa&RXY9*E!#jF}ZVnF071_L4Uq_y7T^dy3kfyASb07hlHe} zHF%*hvZ@*aV-bmj6%~*SdU4*Q$cSLorxdq!O92Q8jp?+f2a6W)D$BNpO%f|Ysk2W8 zzy9`myWYZe{_ykh(~l3I|N0hVy4|iR@>_Tx4*h!DIIh#_IP^o3c>a98Uc=P)iV&Yq z z!eLor3NuBu&&ex6xq%}j++Np|csdR#;pyqEZPB z`1mLZj^lQIyQCCi;POTF)&p$joo*t(V_4@Xe*LPTMJ;okmXTXwRIrGJuqG`c~-S@2js_<=h1q=MDP9yH; zzQz=x3VQo$jh9qAUd>rmhc%C-=%Z?Rw3_UyU-fQ?-_tsk9jv;IMYe4YusYZEeQyk{ z+Xf;~M^cCe%5(?T*|lO@(zO>niR~s$7ngfYiPYxUXf!fHmzwp&qimGcUGnpM>5In{1sriV*gH?A z0B8|g(tJL8MnMZ#YVrU}vAZInP+6AD7`8nqCCwUEuGP-8#JB-WmU}{|mDTdf6YL?m zTFsw!XkNs(E>?a#p_C65W~g%GW%B#&$clMu;IE27!O~bAM5r`g?w3HhNj=tmY^k_4 z=c(bw?T-7XB%RC+_kd3NKSXgDoEX9d`^0g^F(%M5@%Hft^QG{2t4K z=DR3+?U(AvmPun}o*64N^b_6d*xI zb|61#*MqTv#Tz0I0^?ZZ`rT3o4sPoR@^g4hJ`Q zhw3Z|l1VXbYc$3J*bS1gV2rQ|M{conBBQF-8AJk6L?UO5bJls+Ikc1)m=%x}6tdFK zOk43)LCK(k0;`B3fw4v_Sk{D!x>w#VtyDF}s4PgTEJa0RjgdVA10u#5=LQPfx^8Up z_XBijKR!J!x6L|4Yi^hAbm)e$i*qnMj~|X}kn_uBIG7QfvA7|=e>=awTny3Y^=p3^ zK0G~7kH@FyhvPAUqr<~wjQ#U}|I^Tq!!(RMTEo+WTbGbb`1{}gF__`Q&mYm4U;p&a z%X$9r;}a=EXK@2fNiD`1{a|cQ@fO|S88p0Y-Cz@|6;xJ9Y@9Ny1hLL+TZH#3v)FDh zmW0zf2kVePVn`CxHOyg656?#;y`HbP+hUAbYZIn5Z0VYu!Finz?vNOeg#^|$Fbf*G z-fo6udVYBM`gXo9&p$nPZUB6>&Ju#{txYDS1fWR7f#>VSA2_BoO`Rm>m=usVI8H}U zG0y+?+vnT$5>s@Y@!nYPo}NDb_~~Om4Ck-6AAf#29fx0k`znElkDjdQr{VBC<^y9K zk)?IX^vVRAT>^%ClCQ4 zA;EH|CPzBj%^Of?CD@y4+|_T%*+pm+T0YiAjc5Z9Re3bcQ__5)J^-u1foeeRZMc-& zqDJv#XA%ly0M%Z#FJjFm((G^}K+R@jiCX}JhLDmn0sv{gK-vaD)izZufYn+m3TM9%AVX!d=EeF_N8Li0^@T0={vhy-`a3m)6(tl#nu7n z_Yv(n!Tqaw=mJ{X{A7(?DAi1+ zRD54KrWysxai0dX3n7}dF1BGerY#$3Eel@PDdU$S!!4|LhX~4T)xxDzSc5z6E6bb3 zBQJsEyP^fRVU;da0R_IR`HE1R)aO8GL5>I@TAcy}Pzz9P2vmexaW7Voj-9JOKjbBP~@Z8#u4xSHIH0o|XqSUm?#z zssL0W*B$gXnn@L*_E4|6qFQ2?0C%oT4vZ0%2tlEXwRnvcGB$6ZHln!WrK*%wv|fEr zC{VDVOtX`74Mi0Z%nVi)>8ii5W@T2eTNeZC)1e&&&4pqh1C*j48tjAuo{O$(FROnk zGu*YTLS`46n^B=20#yBq^{};$+{rW)b_b$mNZ7U*3iq3dzy|rkaRo1Yj0TLsr0w+WS z6=g{=MCOD5WGyivs`Jh|M?}V0AQBY~O2!aiEi1y=Pt)1uDk`3z`6O*;m(fNawAe|qO zbX&H=>A?=xIP>|hzlj*KK5PN4^WOaUyQgn2Z|B$BFm_S4KmFy;k8h8sm{r27+rp|gW+WGGG zep{BC?|kB9g%HR(!zo#7!OL=51-1~@KhQ{x<{uD3NVo9S^&FfgN1KlGxC#@L~I`}QUR&mTV~#_Q!uK!5wo z-=|@+)}@V|@y=S1k}{j4IWewlSmt@w2z}pMZvX_19v>&u)3(m@^_Dh$dOFP8*8Aa? z-~aOS&p&kC;W!PRI`Rsu>H8i%be-SU=&d^(j%nqmheuE&tXo(^*i;yZ$ao_A^ADf4 z+ig}sH2IbqDydLddGJccSnq)-gcxI3)^(XT*IAZ`gnj2ih=#E1o$(ab6Br z`Q7PoT-W7%d;j+3*KmF9Guilrhmaz;&>Qi^9Q!SKMqN-w+%#t7gs7Q)|a}(7J z8>BUqsrtXNcQq5Fx|>xw-YK;P<*=H?9eNgfS9E`VT(M}SZdcZTdOw@Iu6rP|Lgbxt zYSxi($En2(7YhOHd-v9K5030`{JVduxy?h^bF3RtRAE4lY>HMqM}U~&%3bnTBPXQ|~;1PvV5j+EN^KmwMW{5s(lHQE=Y4kgnut!_=(w7WdE>Pb{UxC>YTcv$1s z^^XnzWK;?zs#Qso_N2y9HEv03j()pZQ9oJ@U|R%0+|xR>j8rSa&c;|p3)Hh(4ucw^ zvLrRU`rSEfqLb%BCH=k(2m!Dba;zDGHMpmSj5gTS{v^5R3bt9aqf)ZmG7`e#0G5L) z-@78YxmE_xB{roEtQ3xo%EFskzJcI=bVVGYNq9b}lt-eO>Zs9StT0J-*ANk)Rl6ps zSS@c0VME6`i?Gb`PW1L9)Pl0}=diadd0uEwWPg;hm`!B|w)#Nxe0r0krDDhddo$vJHz4GQPY zP$`|$5LHz`RYZz@U_nG70!1{K8)~8EIwP3xix@*lM2e`nA(0Z1a1!CT22N}oiKg3{ zy1{{{2={$woiW}FGI9#40&1;c=W$!oFiek653g_DqQFn-S)P1Hx7+gNZ(jsq9E~L4 z#Fuz;ox`ASU%z~N{dS(`=MSfEpTB+i+piBF9v)7IX&Pfp>%5JRhb8D>yR>de;@g*R zt~Uzgjb#pD)@hj3q-ADrT-1$M=m$?7E!TA%dXaQ}yBtqP>#g%P#N-gW!7NKM8*tjT zbp?RscDub_ZReQ7wgnLOgJl+3Rzy92yQIW!G`D#{z|mUk3}O;1DmZn0;@G(n5$0v< zoeygu!l4_MCB44CCS**^OX@lwmeh^?5GMVN5QvP?T*Zddyezld?R0!Fy!pY43I~>; zQ-26+ysq+aJccg(@y9=Yd-?Y5pKm{Y`tZ|FKN@iJ`*Jw+5mYzh$(g}$lr=1u^Tim` zpZZTfesa!(0oT!d<*w_e!+_*QKMr<036LZ%&a*!m282V%{VbLgL z&^nBpq{sxI3Dd^DcSty|TR(LF{%?Qy;~)O$+^~gho^Nkozh2K5lh}LXP@EBFiHg<& zs~X9T{770br8?)u9|j}?#YUFD0#&1zJ+gujw5ADVeb5M)qII;P4`_@^sVG*k4>lB5 zRa0hGwEcDuhhOL1h zoyotw(|Va|j>2TiU3=PQE_07`{VR0NHr_Uqua%)Gpa{y1mqHR0_yu+6^M&X_)mFQW z)sLzOqGBVgh^fjV6r!R6%a%U?P@9D9Gprk>qUE&SwM8#nVt$vFwJZ9gK6FPL1$npe z+({|cLL}NY-qZmQ!dHT`2#^KT~ZHU#xGUwo0=#J ztP(rWkY*Lw+GveqIR0z@FHqSy=$R!2&enMD{i!@W$=0;(8Gs0IyzVoF@)TmgZI&=50Y zIYGj#BFZTu2#ch^oKj>3Rb$OMZ(-Y%c^F*?3<};FF@4%n3~3EXMY_%!rwr3TbB2jVzSLfSrl`ZV>2*SD9KFYh0Id_MK# z`SLbS!?xZ;lVlabx~}Wn%j@8J_rpgbKm_K5hMc#@$HVn<`{Bn=$EQOOo{q!g`1JVk zc{&^n7>+ECEs`~02X{D*FTcHZ{ z9In^%m(QPHzkS>08*p+K$cSPjYat)ST;Qo-6@{GITauIw?pZD%s)-q@$t_J^N-e|O zQW+z)~8LlL>Q|=t;`$X9wXAK2OwAMulA@}7`=OM zRi<@isnQbPac+B9w5Z_1LKSpVY|0(7uKUsu>JGUZK`2l=XEgIrH?zz{KoQGrzZP&* zP?3sT?lKwc3-3F2)Br{^Gc}p3V9^HuDzQ}bc58nH6=Px@bB%UYAW`?cLbp3Ix#AtD zA2 z`-4?V3eZ!inR7Yy2vpISytO%QTl*YT{DI%$(0oq!s~cr%b(<=tX`x{n*Y!r*R>CB(ebd)TG|0H9KOgH~5YMIqJI>@L{z!)qW>EtZKtb2dEI5B9e8bH%npxePKH1Q zBIn2s=ttvwHr9Ht3dWml-GGeioum}EWO}s5A=~b;PbnD$n3RLWZHqC08YCnGoD>0$ z$4(@7ra}>Q-lxPdq-EWlQD*jihk%J$Qi>@`Vne7b-l3>K66ZXTF+@PX%A^XC;VEIJ z%BguI3ZTLwi2*cg3=zqH6=pI*LZE1@(aek!Dys;iup}`igK{9k7$ZW4acnISIaRft zQRc9*3JBnB3BYhX49mKK8so4ZIzB5%RFY+3k_e#vajveGcX*UtXbT{>EZ42 z^77?<82TGr=dh($ISgqU`_KRU*7v>}98lVLegAw3DRnRY&`pl~`F0+|ftJM@X9Xk1 zTioVFyt4{IfGN(Gb9nf0=nihVE@Vi62~>n%zJ2r7K0G~KFLU2@z;L}pVO9leY&WmwLFM|sKAave^ZD)N#X7?X%&efCcwOh~a+~I9xGaZp zRK#I&03aJYJwI(R4CB~wO4s)5hC4|Fq`foq|{QKYi!P@S&+-|qaa+?VWj3IIW7`Kodg`{ZJS`aeUS+dr- z&N+|do$+KH5fYL}&g)PXQD&B89dTk#NmNuN#Uvb5lL!k(P*!FH$)Os`*Lt8j(lw0P zFi3$)P_8#ZwRr_c?a(sonPv)$My-0ax$;NEpJlxg`wCPB#2&7hJZwyg$=RT=Wrv=Ye`m{(#e4;?Jl7&+Zn zuBtyYG6rh!u|mB{_)!t)ir^dC)k@axpbcBC$FeY31p})~kVR7pRxPsFYRMKvk*|C< zWGV`+E=C@!h??yFiRL(M-s2XjtkS$sxqUs4A-j)I?hMxUR=U%4`*52-)L6Ra?df;? z71a2DHSHB2)w6_9!;f`brp~(R7^;q9IgwO2tumsPrc(1|3*@d~RNDoK8WvFm6sUai zPBIpkr%X|4r_3su*hn_5c)k75j-uXuvg&83IvV#4FX{!G3KAqG0&-+~=q8$uj2#%O zh=|r0vu*3PZlWS7X+ScFs$J(SIbpVrj3+>5iD_d{wT2whwr!*dk*sspPz>y>%`t0^ z$=i;I!Ws#IMTHpvL{yRxVN^sU>kS!21Arw_MnxENkefjO36Qmjur#U5+BVmSRzwh% z+~G(W6o?>qXh6Ucun`s!X-p~^>d?oOI7K4kyaiBU08v%7XxWH07()<^Z`W(@x_+|E z64uZ;0`ehpjA1_Ctb=aq>`0H@Lt>ua-@bmn7~>v49RB(F^X<$);`+{jXHGBQF5?&< zo=-8xVdzDZCAXr!e*5MwemiXc{$GDqb#LF^F5j-5>&B^jcs>kIT}mp65e<_CK<{j_ZrfG>HO_3yN?>A0 zt~Z3}oZqfNNI^i9LDV>7BSG%&2?i3_!GoH#B>@ri#(sf=e*-t+_ z3y5*{@$upFx6jV^iA98!kx@Z;3u&I`Wxk!QJ01_iXst6qON6 zQ$Kl^-1XaSTZ13KI;+ZGzkdDt+Y4(9A=+sG)XU|49PH!ii5&Re$BmVhg(b;y4UZ2G z)RUl`U(P8-h}w4@IW&$KSc#Dwi(ocpDw+fRy^VoIp&$F7e)#eCzx=^=ewlCIzI_Sn zI!?Xs4It0!#(<25MsKEW9LK{rSle~3v*;k#`c6rhQ`l0BDoHpCcP3Vp3>srJr|lVQ zP3OB26v-N6GgmE=QjCje5J()Oq#zPS89=c04li0PcbckNqLLZekd^!yt=J+*XQ3(N zVh*&Zm;7^8E!j#{YN`A;RJ+p5B-S4aH%6+OIfDJ-Ml~LL{uO{?MnWL@fl?Ta;{9s- zV)wx6eZY!M_OMVRumFm3FT=&ch*d+V4d!Nj zrPU6$(Nvh#VmNTmL{zw}WsX?3uNdiyl|%%wu3a?&_bR|xh=}D10@il(MKY=^mlFz^PRmg8j;hMwuC$YHC%jbR8Ay~j z4?-PJ304q;6h@%G$EuL@RxToPYtNs4!b1G~V}(uJ4_9gv1<; zCjhk05ok)NoDxHdk(5LMiHIy(CIuyg(ki1CWWfNEEZ6P)a{2J{ zN9#{yREd}KVhL{ZMu7b|Zp(H}$#!lS2W5$gV`5`%N}Qr9F96haUDAXkaf`+fSxp?3 zg$#P@t+8tiTilExQgeDZlJmw|5w&W_x)j+O`}Fu6wvBq&ws?EJ8E=mdQ+(s|+vWMg zlY%@PPEXIL_<2c@yfa`#0GQMH{e9@1>-sHhh&e*EQkzx)oJeSd#D zzrU*JIE*1B-N4yyI8KAd$7%fO<5O>}LxdD#3hQmwxTvliH`Jh-Km}EawB*4yR}>q5 zQNSF9$z6R7k^wZvctV3f)(+M=@W?R&h+5#7QruXgYE+Gh6R2Qw`>P{c<27(+{k}JX z?LR9iPz`7Wg4u}49))Pi7QJTD7|- zkmNeu^%i2GlvMYRw`x{eRb+PHvQljYBd`RlsQ$iM`j%YO=!~N61pv%m!xpNEMXl|y zuwI_aldLXv%{(s%Q2|k;u4Nf5R3awdilchZx4kU<&+nJ@jf4>U$5UxO$Jxzb0fp!I5JCI}mbZ2Q@kzwvfhZ?9mjiMw0YJROy4 z?dpUYvTZO>A@fS5Dt0CL?uf-kyS1fncMl3*R`C@;ZlzUVX3c~vLn{Wnq2v3}BItfw zQ(@orO37#y_d3WHm@B!0^#M_~-%oJpM-QL)cRW4mZ3bhzpL1^t2dy3|MOY|-q z6tUUVD zXfUj62&)2bOI&5nc+*>N9Z}a2P)res44~<}f>Mmv+bwJxvvz%V2-`RwKzl+m*2WYN zkRzuQBqmj5VWb?(7EOW{l|g}X-m=N1SQIoDkwP#AAuFIlfR^MFGzew6G;Rq1mDDJz zC?n@K;er{QBd9Xxw8lVW$tV%Vs3{}>U}i+H#sGliFpQ8PKlMJ^q{`ch5|=qelV?cpU*C+y z@#wd0xx8HyNB)?$82ofhoaSXkLgOqMRFo6}AcZa&B;goVMM~D2C@F57;sOZHxx5b{ zrm#d*3?cRX;M~AT;=;mY2*<;LoI@mIECRc(2c@(vk>j>)m-EF9ohnX;F)ADhp6 zPJpY+QO45zgF@(Oc@mbHYW04gp>n9Y3#{00hXjq47?dpt&_r8sU~kYP(RW*5xU;SQ*9o zZXA1K4d`LEw>8|B>n+5^Sn6EIB8X~@MWbl31Fs5XAcUx*#u{sE;7ww>g|OUi{WML7 zL)VR`<0)5p)12s|iUtXdF~*Pzry!iz5K?n%7)x#L5)#7VBOwtelA)ZtiGa$556Hb? zjYC5u!XQef)M^)2LWlyWKqeQJF+iv+yrsk`x9YQ=5ReFCOlgkBpa6K+8B3@lK^$3g z7=;-Otn0EZ(GvQBfK*myW;a+c*m->RA3EC+!~1+Wzg{d*-?<-t{E#HX7$w4V=nu!S zbKUF9`TcUiZ*)2w4<8OA_viPECPKp7Z3Tn<$cTD9-+bq%)3DC*{oC1lKR$7YVZO~A z<9Ilvjo0-Xp@c1=>Si{wRz(u4fZ{E%C=t%L(2u=!q%2{Jpb)o6#)c4$QA<`dM*?iu z4gECUZntfTuItEe){4PZ9iEIh4-VNOSSr>Xv}B$mR*MFuGd46MU3di`&Cvq{1Q4m+ z3$?bePPsz2aCLTwMAcevtTSeC#`oRmyk}<7v~KgdT~pdr5&{}*@sMggYOoY)=%$*g zJ1eHm3gT{p>yad^JtE4n-Cb;~X1(gLk{{JfYF44m$5p82LjhVHtsHVDFs91 zN@f5h%BiJ5MkdlSfGam)MHI?imV!1^HTPI5d(f<>LQZ$f;R(PTc@sbYK`se`+0N2k z>QuqgAj&9>8$v9otDwL9qSidC@s4erD)(y$HB3yEz(Pb304&4MyW7OYWY@1YlDERg zf<^8~&;6&&Z|Xh9EZ`{T3;=1vMg>3@c~Qk$xxzvPD_TKI7CES*Uwx4*8}-v zq1T33+$6YTSt{nZm&Wo#drl!^3;rPh%`wD$OCVR11c1u_X*8I@EIEqsCaZWK0y+tLOp{ghEP-WFVdWEiDtTGE|Q8ed2~lu4B<)SvKI9O*i zg+(K-YRP(QdjH_Th2Agg!phD&OV$t(k)m2_!WMH0)ldP5CL+P5JCmD z7C}{nqlh6gmK0D_2{RiZd2x&#Xl--Gn2dbKa#Wt`k7&C(o>)V!d0p1yRA^&<$PRhZIAIWXO4<-tjvx zw|Pq|Cp|t)>NF*`CZdH9V%XTAi0U%0*URPW=P$=$8>i`bnsTy^_x5(t+jZu^8>*y~ zLO8!IZ?CVCqLMs*c-qqZ`gY+cDk_Y|nG|`u#o63$?>8fE>f-R=-DoX2J38h_Xf?sm zxnY{tH4(6~h9x*_R5XT^9E(>`AYnx4`u_Cza5|l$q_C~aGFwA^KNyQ?om1e)$ERUD zt;_nSKmT9vuixIkeM3ox0F(&?RSofXMRhZT%bHB@^xE!-Rwb{+;57^cO)1y-TUDJ1 zR00DARCpEH0?eQYM8!bstNDS z__V^~P(wGB_GplzS)+>3>Z9r}pjjGh@lR-hjH=7`?`LgSvv10`q2NuuH=?k=qS~T; z5zC6-#gvLiT!XCzJT;_HoMg%yg_z+~W{O}RGD0x`nj>Ai883XzZYTEpg#}q=l-vMH z{(M7kZEG?(Zmq+#rZP5j1Aq(wIX4G1|4=ULD?q74ShlB9S$m{1=NThFW6SU^)m*i( zjCYU&RQ9EAKXJZNfHkEj+e>J|tYw>OveTV)+*b@+EWZ3IuU@A3>IIGhF6+B>_c00< ztPVl>M2S&13{t@!AOP43Tk2qHVnZ2o9Rlt*9bwOSC@{8&8`cA)^)tn=D?kZowr^Fb ztS2rbjS{Y{OQLlF4H^+KM4*`aAJpCt1XaRnkA%VrLc>NCYpQCwBdPl}c!LC5=##v> z_aV%(4Oo}5fw5f#wE!|es6J-n7b`@(eY=!h{r_sW?m2*O7RG zWr-@@58i1gl+xOj2*f0TL?~p$80z}*@JPcX;K+9h*o{2`hE0aShjoiF04>N$2*%mI zA3$N<=4D-+lIwlnjngpC5JARlA0)*_HK z1|1;f791)nkV*p!MTnBHE*uy{0R*tdpdl7UFdCWi6+Q%G2{JuQC@d<$RidC30TE_Sf}qw@N{on{L{z(>lbBdV4KgQD4QrH`(2(`kJHv@Bn9lX?*(vHa zFV5oQhZC#Z=Bp9vyMbigV!VFkVQ^s$PY+KX&ezM$b;I#+>i@laeYs5IY536J-fx$; ztM9sT7>w0<-aJ~4df8TELf>0SG+&m)GK~H5B^Zn33`fyLP{5D=I&biDPMl&)ye2fT ztqerEZ3<|eQAH79QDjIV5r7J;w*@>Rz_NrMiO8Zeie#(@G%2XD2tqNbb)Gs)#(|{e zw)Ml1L_>(<)NlWPw*GWUk}XLO19drznJ*EUd6&A4?gkoY5+Eo-A!pwIDTbuTEHW|x zfkIXHT{0uW-ObLTs{C;l^K4F5RaR!CyV+S)KmByu(zo}ITIBPOugn1)P{#LP-vM>G zuJ^|#H5*muTwMKfUW(KW_Yk;PUA$u~MJf`eoKo8FANQKnMK7>A)yLZw&w&{vYh0ZR zfgOv=Zu#l& zUL1w5U%!5P|Lykio>FEPf^on?XiNwyKxlxXz^&o+p%bPtHi9yqEoW@KE1MDctS}pL zm=dL%b2wsO+Vi`nL;$OjN`*&CZ))V&Gq11bPiy#Gb-!)jQo2_y4BToqb(e&}a6L3o zY0tu~!HAJr4F+xHBf*zxWV@t5>QEV*Op{;Pn()Fn~ z9Hag3W-U<^29}#^IB<><%}4b@3XHwH>7Y#qO@)X+qyG>n^bgrVq8+J_)_wI)2MhsP zh6Dk$jtkIH=b@z!f(@psw`0Tmp6guJ;UVm8z5qsH)lqsDbkbl=qjdsuW(#O%Tf0V7 zZGdFf%@`zTYlR9%16lPG2v0?b4FQnIS_uvs5#%k2;b|ElFywO~In|Q#wtJhTFINC5)3PvHEenJOOLa^hX z0HE`dNP|~Jc(M}4U=N%%UWCUTkHnM)`ah#Q>a|C_7G*8dJ9OEf0AuNku7PmwumMVbe9Oc5> z_Sp7^Sw&M%jvOsM>@^k5buXnPsr#~=9EVt~LTHVXB!_08RS|)Rtim`32!PBj8(vft4Anq7mcyCI7Xr7lfr!X` z%|unCHknfL))M?2lZbA&VxsFgkVEHLYALk)hd<-r)E2hr1 zBeR?$Ir{q7uchRo^5x4F0e<`SkKp4^fBVDdUtWIu>+iq*<@c)adJTZ|aerj7^Xa_) za{B$BzW(;{?bDCv^L6nqh-uksE%n>CEk=KRIpsYe8X5;5QpshnOY{}gX+^Lt?e%Lx zCe0O?*^z=)QA7q&c1+IIB1Vowtfe3VFiKHGQZcUPeQR!p(AYBpbgU-MIYDJ|WfxUj zE(-<*Q8Re|_HKZVe6FRWdfOi7*9#MZfB@8t_qYB2^#K(LA#%80u7TLI`?$TkYJ_lq z+{@jh>R!^v+lOb*#m6G$w}4aw0!>b%H)XXdR;Fsr)q$X zHEXTa`M56U^XYoNoY(~dOxqsUSWU^)1>RHoc)!=$(o@wr0tLw`BxNt+^?W{EU#_Qk zU6=FY{;_Slfu@wbbL(=>CEvIE_44w%UcuA-?d|RBZ@CnvczJOJsodYesunQ<4qfrm z(C*LZW*FF0FC1*qut4VRf6^+W@j1dl<20v5`yh2WMqO+IbsHN@(pF&G9$y_Vub;mB z^y%-_oV(g+1pu?5DyMI!VNHagu{(tAlj|`j);SB+ z?tdO$DH{;HJGtO_vUVKAfw;eqEQfAIjOeJoVr`2X2Q~cL2*XAhZom<)-4Kh>@R6hT zSvHWT!>|GCp(4!LO(2c^ zlFBsMLan#U*QDto! z7*8~3Kj>fsdLC=rZ`FwL??Y^E_;Cv69{1CH;6fO9s!`)JhoxVk_7el67LnWw^$ows zd_?cOI`TUPVTsrQDfR^$;^`^(Wfbfiuy=-ABA7nqB@Gy-(EPEaW(H{190O!P=9rwv z)>0lZNdfgtP>NtAa+J12-mwT&F_oH%O06yj?_!9T%Y~SvmLw(BBKKRVIV_RU1@A1W zbMA6EOD+4hrBs4vBmxwb^7i%JD#Ug1E`%5msTLuoR+WaC%v$9)t70^uf3I3Z{miuZ zIWS;HK&=uMsuUB2(b$%VMZ`H~YTd%2T~Y&(BBnx2grsId$jqh+s4ilzS(4Id%L@Wk z3p0rr2cqCK)qUGVlnM4dE$8S;`S`dIb9$5*dBJE2UteD-z~i>(JIer0kD%*F(Y zDHZ3O0wX&jw>>JecYz#p-gg&*B4{ZpT61>6`Af)Ks!~eMB^Te&p@U^zT_6|1c|LtQ zt*0{qt*iGO-rteTfBCXj5s`X-Y^sJxdrC#}wy+2>P zRk_{XPM=S&pRZp&fBNUY{$939;4YU}K>O=oe_fUUZvFJ>^G`qj6ha{87(A(wb7W+@ z>S7;nw~uchwQi|o0*WhwYE7kj=YorCyj(Azzx?>+hcAfq`|p3`{qEzElQ4(RKYY&n z{{F|?^>q2upMG}8|NNJ~zTMxz@!2ozVY}bKXId7j3TPE;H9!T2V7=oi4uNvGUIszY zwfK~5Q>lI@>bhKdvi`6u_W3h|-h>G{L)2Q4_Q+C3Q!umb+qXXhh3o0dm)BqZ6#n+( z{_Xw#M@bLpq)RL62^}WRuq*5~?!g8Ps$$@YHWM450R|E~v_8U-8#gg6wn}N;iQFe= zBb%YAzY+W0JjUG)wi#13ToK)9Zqjl{$*u(2D>_i<4RR(LLzppq2!q{h6X<;Y{a-Ha}>!7ybjmc{spGlk9d{J{Tz^v5|cBEg7?2xHg2R8uN>mUrp zF<>KhT+~+p59$m?6jN7UHf)IQmh013nAOhOPg;FifbK9IRN<_2-x1E!A*YEho;bOd z6l~TdznPh8r~2B*2J_co+z7*)kf}S^+P@osb?UKw8nAECllJd$;Lz412JV?N8A!aP zLjXKufCF$(&-bLFo?Nxh?B(#jKig~bl?`7%$6F03F)_sm90x`lBF8aU8-fBry=?Z~4d!Zk+0gmWC@xABT0wuXE^ zCex+-7~{NwPfIf$1O_8!)0(~Df?rPN*7fws;w4-@@p`89L@ttZrXX5ME^RCK`$KC{ zQO^Oqs2j~Q5Paa^g2uS0)>4X;Y6UAA2{Adxs$|Sz3I5?8_xs0|iTrvpQ(9IpRa8w> zbFG1cj}8zTzO$-?*c=G$nrcrLZVPYCMpUWNszMc;Ww@Q=_OH${5+Q-rn)jk=Ohk@E zj2$7NsX2DcM8wD_YErYA0NS?iCcQBivs=##FU}d0K_M~)+0_7?bKXO(IWDp6b*lw? zK?f+3@4w!h(&=T1xLiNI#4Y`|KmVnq9Sl|V*Z=z4{jvY>^X2C+pa1etzpBEgPuJ7+ z^R<5Z`1bMG?k_)Fkw66YJ(oQb;c|+X7^SKiD2MNQ# zOGYvv45uZyu$O$jULTLg`Vzf&AolY4RSkdt?bq@sm&+-h*Y$kbZx5|T2cb`%gYj&8M&0As9SlYvUh^Z5Di>l03hPmPp?1z z^wYXtQ>uHq-ya|6>uHInkGK2hm)9!tcE4R;K7YAhYmwi+{krcD=VG|5CGQ9t*wM=+ zSrtoW&_e`h6o(iBpzGfu-1|MBK@d0r#1OpS$!?qCWB{xwB!*lV|J+3nHY-kBdq!&2 zCkYMFK<@Y7?%TJQ>zC8|a=Co^xPAM$|6bCD)_Xg5;6rej)&mGNIn%^dW4wk`>cVV@ z*+X zYCAN=G?|BFK6LTWAcMuTgMB=}*!a55W$ao5C+&6+8Uq}RK^uMgWL7U+7l36EEkG5y?MJ@5gj?;H2N&6QEbk1*n=725$fIXoQMjVv& zGc%$j54AVuIl_gWatNP^H9XoE6Dfi3424ZPp&Deb)IL68ofu}`}7`G&DV zZ7yT)tydlE6`WfYnQm`PBMP4&$T%2%wV!0nlc8V$y?~sxeg@WBGvJhS-+gD?i$k_e zuxi6nv0xnUFF|J$n<;Q$zFCq7phx?Wi48dV493R$>}l9@7ihQ? z`XF%J+IiESXZL)sniLx;JmcB|n3^JzwzF*@riP9=a6G&C!q+djUgPT*^lQ9ckUY6Y z)Pb6)YL&f|T2fXMQ#q|G0OXWKB^Skg)1Wc7G*uwyOHO&u8Ns1r$IP@p)D%sz7O6$F zRse|08lv;jd+*~Ck(_7lq%I*E5u=%^w2(?LGm~Dyu2mdFBBBG{cAJUO5cjP*$Ly#i zH9#aX(Z>0fS zP%MZFI|y-HmWX6(YuPi|hobH|?OWo&HJ9M16!`q*<@NKWhn zuYY`d3n6;v*Ym>6-`+o*_m|UYU6x!b6BZRi^ypn6c67h(=;-D1W&7BHfEQo41O$NW z7fh*mz~J56?e0R1&Hxb{0g+q0h(a|VNJW9zdsI|ZMF1mac8&daA(UGE5;+(UsKR#J z0l}8EILENW5MoW`<>kWBA-mmNt*6KK_~Yvzm-EZpqg=mSgA2B&WeGln+uH-kPU~5` zuT`bkzR9PbzpSU_kAM38_ut=gsQ_ro=hHGq}8TKW$4;atydhBVYecXh@9zd{3=UE^7fJXCP z*c0;TOez{G=yOMhrtWdRZ>9}kKgrIH1stQB2eX~I!FkRF!f`a7%#5k=N98L+GgGyG zLq=trqx03o0`q1M%J^Wm4ipT74S(LVCsVJJoYptHjfRGW)rCw?J8kEjh~}mb_|oUm zhtU>l-m>GW4yVE34ROjRgUPTx+pmC6p9Y)D)mPVWOl&~3?=pfWylVrPOyiVv-;J(Y zjYtRnYwRL|*+@kq8$qKswjPh&A8R}rRXnQTJbQ!36y{%^fbV!r``@z_F=BWD5Ky&| zRPC07NKNIw=OXR_`-87vR@HERQ8FLafDwrSq2_x3xQP@qi;F|Rycd;1M5l95FckrW zl8dS`Ga@k&IwwXsrM>P*=skzU8HRc}@9A;7RUoOFACHf?hPbYT$m9?i(K*LRV4}oi zW+H+BRjWz`Q8PnOW@=s$P!t3tY~2)^3UR@aV{)jVnx*^CnE(-0kkA0VcRqMCtWq^w zEm>6%fC!0@*&z}mFq2duA{QKyLxhrZEux|bMug6L5s@mjmhHBgijN#ZY}cQS$%W<1 zmr~^8>$_@|EKL6TWr^kdxIF+QdcVf?ayotcc)z@!;~F0O{{HdtxIGv+uJNazKY!fz z-+%vhJ)h#Th8Q^Iw56<~NZx2&PTOO1&Y!O5EpNK13AGS;@Qw*o@KLh!!N5`y0uD<= z1Z)m;fLesu1FHhYaH^%YYV>Bz&b3?Om3 zUSch}tW~RsLX6Qd-#>0heEs2izMeIks($|TscKQ(i`BS=_xDFh`EosTaF5%qsvxsU z-S!8(TLqO`Ls&xcTC1wY%}Z5OjWH7Ar=LDuKVAOofBwtmdRgKn-AhWPl=RrQ%gbU= zR0;uFimoQgyI@o#@AMVC`>fVca{o>I!$1-}Fa6(Ut!SUMF2=wA<#TrHjUmPOvX0`*uZHYPwK17)&T*{45;x4!*sE0(G z8CcnFzd!72jOXarA76jDZ}-RJt(1g@gim65NcLf(YE#wNg(wVQF!Z0H^`ahKMM;m+ zg6|Lj4EZ?$*~Bt1;Ui4pX~X=Tvhi8y4tO>Qi7A_*>u_mK&$ck5{pNfUO{2X9)lEG$ zM>FMgSDr0^rIVD1Xh+Oe54av^?a&8pIM!DSo^|vyb{dm48F5fJJc2TO(s&H1sr|tM z9FS!2GZWNz+KA?^cJ)8rJ|5-eun{gUv$^^Nx&lhN<@lH0TS_3i;xE`tu>g$%740GTO!&wE7)3 zaDN|n*Wvwyj}7VdVCEu?TV$AYArVbnA~CK)(}h5felXh%e#KC0PGcC_^zYcgTz zSglrK7_ng$R7Eh?I2jI_}F?I{^NH2h=mSHd^H`f@)4jnfZMl?oam}nlT z4tJjS=Lk5%QPZ$(FbyJi(3wL*0`?up)-qr~x+kW*!_vOi6UL$S=CFm_ zE$ekb#PrC{b6Df5xVZK0*=(vpFV#|xzt+3kiEN{{o}sntPBnuh7hVs1uIn@J3|Bxrjm=SF-SFJB6I*0PEHhB z04=eTs=NdWzC|J;q4S}&IE&Gn0hw;^SR5**UI zUuZy689pmSpeFPrr?-@EOI?FMoxl9D?T`EZUUHi5$1b@I2I9cxoP_~;?CG^Ix^+(% zsttr-+WoHw*)#l~UG@%n-s&NAUW&R4%|=}`1nBOdNup5;0z{jdXG+$tj@ute)ke>~GB91cL~@brm9lsrGK&Js+-TqQFE)6V2fSJ^c^ZBd6Od zaXe(K(_q33X12p&)lW>_mc8?ieQur6f)<}g^L;eI<kr!xI=8D-32j&X2($EManO{gX6$RE#jE36thc>1#`!ZSCH398eg?^zlGO01if6 zRCjBRQ#ldez@awIMqk!;l1J(P4*&aI87tmFjfWiQt>4cf z-M({7&NDXJdf?MR(tg9?Sp50L1D!r$dpO_{!dUiRTBY4bRfOJ3t65gkPGvg|2h(XC z*T4|k9W>jp!|`9N%JXyvR6W2T%)`+kdcPAkWca+vsx1M^6FcXkThDxY@s}5ht2qaV zMG8SRWyc&NhvigDsoA4eAR~Y(Dy6DGh|EmLB&J#v08OeuZ@=LqgLhJDt-?g!^W}Uh zxu{CYnFvzJIVFX%=dG%Q5WPBP*N8YnweE{YW~{YcV3krQRJBoYn%RXAk=Q$Lt#S>a zh$%wKRRmO(kO`^O1Yiw}GLe8eW*Yp7YQ_q|d2l;CvsabZ<-o}G&^Mg#Fa+eXqB6*cC!P;RWu@UHXy9fP zt(bQihN~5NuEms^Gf8XG26%v_DFW4+k7%mmiE#&Tvga`MDZq~hj)zTEFac!yJNEdZ z;hsDQi-Crw=zVez+w@NMS;8~EwgLD0K9~(((_y+FY#DS1IzH)yt_u3*c6ief?@&|r zbeP5uY;Z_zv-201bmRu7IE2z8(foK&0j(qRKA!$5p6Y10WiI`TRw2N<4Jn+JTH ziAl%JJ(*t~aQ_+MbdJR4vOW_geOO~j4^Y$Dz!`n?-D(2>fSV&fw!QVn9KdR`=JwCa1potq_C{Uf91i4%JsQ6$$Z>gabZ8%& z{IoffjcSJ*Mv`FjSodKo0QJ+~#nY3IcQA`K%n2BQ5(jTMHsRQj!@4pAYLR&ZXhVP1 zkXnRCM)bJ81}!5C#s#v$z#nHH4x6$EnGHL3c##nRy7VHpeuO)aYVAREmd+b##W3CT z2avViI`;4|KZDVB(vQjagFB`&%Nw?Y%7*0;(STY#n6Vm&1g1dFh0B@F*YNVmUw`1Z zqGK_8Y+Fug-|h&)o)9^D@0gwUQUwWuCnFM(YEp}^v7=sID6Y{|0GNrZ6j4Pc5;YYB z0D`dkwJwTkU_e^6+;5K;jKl;AAViM4OSFyJ5Rt1C6;o5GwN)83H6~I}$ArkvIp;hv z6EPAu7PFQ16{&!bb4I8LP)jjUH8q78Lp&`^*xIB@&Rfn7QLD5N&uyz_CIZ2G=Y1^& z42dbHq9P#%LO7kHA(mW_tmc}wof*T)ySUgcEnD{E{R7B|Ns@egys`6X+iMZgx-8K# zx|TiSy+{!(ITaRFpt!`BA3m3=pO%wjce{O*RLo>6sfYv@mX*Z}k=QxMjOau)S98Pw zs^*y-G9q!wl^m02$*piaB0KLvE0};8y5JBHfkW_8l)Xd8XuuwcnTVtc0EY!dGz8zW z|2R;|g{quh;{Hg?emO6MjE2H>-!=fys^Q|99BRobsNQpM%%BXex(lD+Ok}|L17c2c z&)a|ahku*O{eSy^|3CAi7-`X*q}qnR{R-#Pg~`>b%PFQ?f@nU5bWiO0bh$jX^!4i> zzy7!1{^{3WtCaP$Xpxi>D6Z=%kPFV2R7(=Sa4spOynXEV?Xl&3f8VQS7x43!Pyg_* z|IMF%{L}U2MYTTu=`E+?m*_p^v@82hUp{~O^r}?$e9O6d?|cZXh@d_?AXG7S$dN!S z22jtn7=a`722P001w&DkE<>nADAUjk08ccA0Svt}bXXTiU6c*EjT4YReSKK4{UP1k zK+gyRrgZ3-2*<-(_w6UO-5#qceeCJ6)^qTuKmBldY`0r;ssaK6DZ#;BS?5YdbAk5U z@xnnnfF2|k9K!F37D83)GPkSjr|Kl_hcx&kXkC@N67aO4Zu@DlfLF;W;PNb z3E}9t3;;IC0_^$&5p60*0L4C$0l|Ca44Y|{W^?DB;Ly-kGZqLs#%&N}XiS*ts{lY? zhBi9wbluc-P3QA$XcOy&Zu_6RYM4=mz43z`$7!1VXr$EEKvjsa!4u*Z9MzgsACZg0 z-wMNp4TBH+4oMA0aI$~^lZ_kx(}B_kz|#(2JG33OW!khxLk{Rkreo2DosT-z>HnW! z{i8Svnv8|&nt9ZLd*WbVILrEKPCz}F&Vj+_Pf^VJ@FzyaV@IDL=Q#X`X4>1WjA8bR z+(Q7`{FRW}0^|1wopAcIoBo~PXF#{fNg{v&(`-0^*87I_gGU3k4@(7)*tSC~JwJPb zdl=6+B-gldIA9+D_KIJ0tga1~y8qHiTut`!=@*WrpXLTO0~WsWq2e%@Mi4 zU_i`L#1Pl@luItPq>}e-%ehL;wN^(=!26!97L-SfCwQ^2Qj8J75wRH{8#_=nRYgVT z9kc>Is;bggoY<2mYU++ksZozUXhS85!NDF z<(>;7tmmkz&Cei$%9{4%+4DkXS}P!NTV=Cqg5Hw>RFkju+C8z&l#cYuSU-`q*xd$Bo$oyVH8w_kH!tdAUeX z)w*r_c26aUbFS8E0#=m+ht(Tk+N2_|^9WcCePkpu12oo(&N(yk-W!@Dk8D5)*nZx8 z;>4cGGc#*3A|_^Xh(xt2q7t`zV9qlU3R>`9M1uDrMg&G9Gf9t3#7MTB!?H#+fZ$f+ z5CgMQGv{czL^8bH5`iFq)mrzoM1Q@W|Md9V|MXw}eEa=3^pX>ZR4ZatK=5>4kf2Jw z-R~|SRc24uAFjd0k_!P=tGCCUnS;lN)n!@Em(%IIFtdvJ=m6|-+eC}xN&!;MxA(hN zMI=w|^ULQSfBxxwIce4V?PJ?Ec8p{qVgSzj*XxT`{O7;?p3;6goz9n~mRhndmo=^| zSw+kg@}r_@Sb~9;Qes-tw&c2rEg5%U^c2|CNYRX1N&gIdVJMXWVp<@xKm%>qbmO5munTeQxT38iPV0SuG#CJ7?9`Z&PSUMMmZQyL zCouP9tPubLk{w#74S#sN+fREVE*dIkepCn6?3*#CJv7ZcA;;mG^(GV)$JhIb14L`3 zbO}fP=j3jho_z*7W2OTlqc(un02m^GwRW8?TyXe50jz~f_4GSJ_C8$aE@6kd;kDMWU8vY zgQgz5OM^xOs`|~DA_wjAXqnZ!MZ0^w8a_Ws7KTI!g1Dy{At1ozHbpWVd}gK2f}Wj z88;AT_(wlrv!LCvzKF<70w5Iy2z)dQ3bf@CLa~t3Cf-MO6k>GVZ4WdB%~h(Xn31W9 zs_+ugc{GZ_t8*pi3PnL{5lI%;$joWqb1B|=Gz`w~w3VE4Dz)TNO7I>5ftUjsh$7@# ziPSkIz!03MTJ76o0*FLNrs$kU@YX}#2oYzST9GOsO~g7+NwGDJXl#&~i3%DJL6u^F zfW_)jdC1J1cVHN!W9Ib8fPe_($T2nZKuI+fQv+l*6*Up5MPdlcX(gt(##+jD+YpUF z33yK_p4@sqnJ6N%Bhl&?zpinAWOjIe?0d>4`0?$VvRyAHslv_^5|c}1(;I-n%j@go zv8h;C7xui!X$dF6Qc68vPLKNr$mmEB9aCIUDk4LvQnL~<0!AOzkiGNKOEDxwK{N0^ zAVKZ2AHWcl*fAnG=aqn%RkbxtMo`BA2vxy3tRi6G*;xpAFaG3x^dJ_4tCr&$h#*Tj zFJ}Zxsh;8z*Fe#0HZVYkOqA}A5Euov$D>HO-5&S*_D+K7Y!0G+6?shh!P9onnKv2~V2VJJEnA3=m3NsB!c4-fWdEI}mQ_yA8Q(^rymZ6L9u~hNs|0=v>l~4%{fo(Ko>Q2ReQjV9pMw{WFT7!CZFW z;B4&k6CrFPrfc%m{Q)g=c)&Pp+#$8Q)C>7_ZzdtC^(IaeNKLe=rZhOJcEP*L6sBhH z7k$?Mn|4gcSc4RWQI@YwwH@v@A$)@H2hteZ^?ae`cb|mAlc0cc|DI-a7-dg*c-#w| zuWCz)a8Qh`xZXjG!87FuotnlO9eIoc3>5|lnkD-Ba$zqii2uT7<9hp)pB_9gC$^rpHg-bQR2vD$iX`ZjgI_NG ze1Y{8;>y7zxmxR2j?9zHIlQGn)Zwcg!Z^q=Uj}T zWJz@sHBdb*3y7+U4-0N+1U^OqI9<;^#Qm{LfxOuHaw0kz05kWAGb^GAIiiACN-;G= zBbb#V+f_4SG=o~C6S0WEBvqW>b;AL zH*9XrT8flZ?jLtEV|L4#*x~-Db&F>;=eiCU7WV?rco7ND~8rsWJupSUYKG*{ zv2)ynl>=1Os^X&O;LyM^t4c~G`bY@ru@iX0-~)RH-a8i=kbu-XN~zrUd&#-h^6l%_ zkN3Bf^RK`D_Hq08?bokg-`?LpK9+cX{prN)wp%KxGMC%?J#9H~Sl9T|dICfvAm<;q zT{YKKPS?|NUG85$QrfTUS^V<)`6ak$LX4DC&inq@cBw_6R*}4=`?s5buBRoi`~2hQ z>-nlxZtph$T~9uhU2;VPAN}j~<#fKXUusTfKtK&{c^`?WX7Qe>9UdpmRAZv5VY@xp zdn5|pt++29f*GTtIRYT>$UG4s01d-zx~oLQz-lR?rQ{@%a@xdltp!X}rCIo`kMdI- z+d(r7(b47S#C6^N>1w;dlWD*)I>U5NKmWQQXh4?NLC`Tzq+V9>lVHk>DY zX*vq(pJ_W~qXq{KJ#Gh-Km)}QcQ|N+L$e$z`WfOptFfWYCg*a*y@4T+m~~__DX>F_ zJ?nbtF6KUi!Q-_P)+nfh`|3VG0{}as#l~KbQ#gBN4xK)Jt~7(q`ilTyG@2Uo>MKc4 zKQRLU!r7_`I%w`$sN;sfxCz|}2>>&3_vz_v05~pStK^0QI<=wpGW30S5s@Z~Gt3z1 zmQ#m)#|GLsLxx7ZUaH{$mfaBR;BO$NHiu4bedj|Q=XD;*%d_@PN5hBLsvx zi-w(boO!7*LrkbyZvekQK)($(D`N8|8?`>|U^aTg;QS35CQ~zNaTtx>pXT%I<1kmc zpP1v80nBx_aY)*RYwtn=YJd#hFaGp}u0PuP;?A$`d`1_E9WjH7R!b>u_d8hq*mop8 zotGFRBLq*(PO6D0do=-e7@}vq1n;C4siLZ;miElV&N~OVZ~ML_GcdLLz7Zh^5c#+) zftC=~%lYIuET{EyJ-HBxP^+H{s%rPg{{HQKf81`j+t+V@yuZEWef#zIKYsiD zk6g>+zCE(kRI=Fmaau!MLfq5Ffwuio#aAuo^SZ8!szI@W+qbvhNdb&6ujjO`EKk3T@} zq23mGfIJ9{6$q8l2wLKoHMQIYJu(robIt{(5MuNk7Pns4FYKL&nrTYASl%A@Dtjpj zKnSc8)Y!wkj2;F!iAFOU#iYKY9%eVs)+>Bq zBN|u?hPuO^>o$S8u^J$CwvZ5elcT{99pDdufVEfOn4xoHm2B>W!5}!k3&9&Z(!D}v zh@%xeo|+1SKBlA-9IP{iT-12u&a@HJsc&q5tW2t1Kh69Nq-{(TY0HtcSb4a`2U=7=Nc z{>%drSjV+!thc(Qg z+P)>w2lWKrj$u6e2609bOjtR0;Go>#2{mKyl`){%fIr;>J2w8HU%P~AzWEXBX)qe9 z8RHz}h26}w5oez^{iUY%07_%?g zupRoUVHX03zz7sLoN&F+deyj!LEaK_H6$cbZ8g=*2d=d)i#N2qZ;y}rvV^s)!Fw0H zD5lMj0J9M?0Bc}}L){LnYf-HTsH(+OYGo!L92*0IsOBOB=%Z_APf<%PNQjJPf(`k3 za4v`{F_EXdXH_G_QmYxM*duSbR8t^AK#Vb{05h-WsEBFH`JVFS| z8bws9)>0Aad|9hVEh?n~@#XWmlv+g`yBIw(mn^C*RoH`b9#Fv01ke*P0Ra)QXXrj? z*Xs8HnJE$x6B{BigJbUk5CW1`Z6!e(Iog~u?9nr+SV~(>CRG7ST-MY1l=IHa;T(Jj zU`C22vX@*%^1kggr)__H+&(@&(!PIuyx(t+x7*iW|M>mm{a#AVSw#&9b1k=zce=bf z&*ybXD*?a1-}1h500fRP#)yhyP`~`}`sY9YH|M<%k%70AORXQ@Zv}=hJmrV`Rto`yIrh_dcG#eEISE@}h(aDq6jF&U+$i4W>lY zArrAwL5G@TyKnopJsw-FIi)>4_HEy`kB@r)@b!Lyw4!)6AVajqlNfi2)k9p-0Hm4` zS{i!800;@0*m-8}{o?()EZ5h|PfSi#Q{J}iV^8ln-&HG^Izj~GRv4m5c*7?#(q$Qr zSHPfCaELG+tk%$n_`Uq^C<+V^NTqyyd=24rKAjoqvER)EXCv0ZG!6TG(q3&0%_=?^ zj*(XTWNHr`f>Rq9f{ixlL&L&A3{775CJ&QF>7u)<06T>6pwA8$6pXqaT|{?ZUpJ2X z&~-Q&C-PD4cy!A7<~QuU>>EOFD!(I}$*Z*iH5Wt2numuEns*ez6c0|MW6`;n!y7hJfTtpYakBiF3%6*08cOiF9){MPMsZ) z9(vaDQ0oSRn|p##fUUopJt5}tk2uHL+)%Cg8w|c?a|2qty2RU_43TUGXr!(?MdhN zd5vv*HgVec+k2l-ppA7in-xazX@|h(8EDb0{e(<}{$ve2C=CSKQwX1Qb^oDbeP*H}?0RkYSp zplUycVgSaDRIHTJ?wp!>W?_=5QnZMc{kCoQCGM?O+@JpRbKdf18(vlyP@%PyRRaZ3 zMpUHI&WvEy>WFHt&XFPLo)MWnI>b`6nwCcMX2RaKA(zUG?5I{j1SGQ7@YjUdGga^wml%8igq#Hw-NFbYRZ31NrIJgP zoKr5fO08Qeq6V#tYfT^D?(bh8sZv}q`hNNaGr(DYI?LA)B#jmg+kkzEjz5H{& zR~Ja6ZjZfed(~P>HV{Kl1FNEHfQ0A>)e!^|Vq`QzBLtf4$I!tDpbAVdqHPByZz836 zf8PvD5r`bKi}A89=hNx>`O}Y$6n)%pDSzbrC|VE{5qS_&0M-rmiQer0;#V;k5^X3^ zJpKtf$<)M>^#>rT>SOtP3Gk+bo$$~`eKlz1jH$oCnqS)xu;-Y-0CQb`ps9|Y zetqz2g4jS93{;3U3Jyx!4gWv@}@l=?m~w6xs}l znhI!+MVyg0gK`|nD^ILkf5p(zO{-cn9y*6Xtp%$YpwY->>^{9`^wM__byjGbKyqyV zffOdmHvTux!{9_4Ou?~t!v+|-oD`Z?9d8{;%Y@Kk6KS@rfQk18&)M|&WS0)yi|()@ zo`5`zFsVU-*}(5D%CA`nov4J){&n<()U%zmBm z%xpYoP+k3~v{}*2c%4YQBUu}l;P8_+_|)02!CD!>yp2Nyw9!5X4jfzmL_6Wc=+D<| z6hAV9&b9ab9K~!pG(1jJgL4C};@%Hp{6Xg7oc%5vB@Vt1MD1c3m`Q&5!*kCpUv(YgE+upHqRw_x7Cd7@wlX;G8 zjuFYU?kU$&Qre|vRRa+f*>Vz<(`h}$6C#ydoeOadgjiWbax$w{L@F4RlxxY(IRx;Y zN|6?rZDfcTV?=0&Eo%%R znATD%0=a<8Y26-MmRz=4N_D|4F*-*jWmO8MvnsTjO zMz_3p0unV-D@DbG0*l$F>-F#d?r;C?Km2#=vi=|c-BYsO9;Gg`)OG= zlaJedPf69*b6nQ-a=M1mg~#Q zzaqd=)3)7Gy65sR5Kv?4vH`m;YKGv!W7s4%1{*TP_7f)p#~){&B>(}Gl4`AMJgxD( zrF#zx8RJDnJkmDXH1PRx6UH4XxvRBFdf3zn2R3`syM4%A;p@ptWz!?Wn8hbE{%N$`vc7!L5r?#6gQzjk*)ls%kH=r!X25j?QETE%^uxyNNR|X$NopZjZPY>(yRhfH`fL9>&tP z%b*6@$VEhQ9G3;xb$$7S;iOE)PNeFi5sr|ytRN-jnlroBbMr20F8B+wU(O*eS`{3I z#Y?3oPRqTdeMbNj+w*1)yt1k7xv0pVaw!$b*ui=}ad3c$>}r<%zDYJRAA%=xOwM_) zS`EBor&Vh%s(^+$6;%-_q7{)4kP#3G6qG;^Ow^={hqinvrDjB`0k_(6YTh%E^O2?2 z^vKL?CVSq&(EH$n77_OBnQD^L|C}E3 zzyH7fxBv8?{y#bAm&=9Oft2^J?<8=2y_gx3W2{@Q(fg0vd%ExB$VKnT*CIZ+>tz+B zu&`=f&f(?toa?q+*7JHH!ewzp^!jowwcPgm{eJ)WxT)G>i`T27I!X5B$1m)N2;JgR zsrgpS1QBbgZgC7+s|D}D!~MS1B7|700y;BSq;Lq=B}OtsBWmovf+;{tOVc4|4AG?d z+q!t^bs`$-!Y6(Lz#O#@5LMIs_}FjnNPk777}w==iT-k4Uqn@Eer&f=AEH^!nm2tG zbjK%o(tiyd+i^jYD$QBbBrpIrmeGceML<9`+0xDPx~}IfJw%j-w6(4Xjt|3k>a=UX zE>;huIQ}$cZI_zELpMb_OgPb%B_2=j4H|}iK>(dqvikIG#3w<|t87L)bk#j2D-`6;~v4zyaY8V-M1Y6&Ui9i8JRr+4^nr%~;*?_U6o$=`?WimK@ zk47bwrWkBuaU0I9154waTL)tX4cs8;Y_BxK`r5+k69Bj21Upu$`?%1|bkLo1FSVr= zb}w@$U;Ajkk5uazqZNyvZvkvEg$Seh2cUwH_4rEwH0mpbHvOD1jylfsGMxBEK%<}> zI_OkRuT_Q}UyyY=%f=7;Ld@gVZ3l&UuO@h!XLJtt`Mw?S(gtt?r76r|T#va;FaX{F zOk$c%O~wTQPsZ~ z*<7)K)rUxG92Im61v^hQslHYOe1z4)xB!5Ny|)=!i<%+>P*iLQ?F^$cZo3U#@Btep}*Mt7H%`aL&`xa%6PhcW}T= zrOM;BgQ}Q zPs9X>PV8~JKOQ$zt&*2@i4w`^^cv0L+qb)!)T}OWJV&npRf#FA-jMyerY)7apW`Xl zVpY6_EL9Oa@%eoAB|koPQ%$*e7r>QVDgipr?G`e%_Laoc`lq8AkRwDEQ2=0eK!k|S zF%S_W022`sngX$rYwQ>xFdHBUfcaMB8Gu@8P}JqT%R(NVXBRyodouq*2==(8%cpbInsa66$RQJ+ z33AG5-mT?_{Wm>ZNKg5R;+*-cceJga3>O)z-dCI zF)>}^!%zV@tmda5ARr-`YS!IDh%RPJDtZKN&FJ;6un%cJ4fw991{u&L-;-mePkl(+ zgDiNGoQGjOWpZyes4#5(`Oqm}2VxlH0r_-w0`)_cniP%<1T zeTcn)Ff@3BL{IR>CJMs?ARmBq!hiv-$C`|V!>Po(D#9@a(}Pku(6bFHb4;UiV*LlI z0H#C?gQpp^)^SM009vV(sSeG|U}##L&}O44QWL>Y=$3oOl$7DNy2Dp&{>@hwrq~Cp>nmOT=i2N z834{4M(bN-W3@*qxIU#BmDMacYRkf-sP0JU>Av%)%Qx;ogNEa*40mkD%gwN$b_oWo zwBB56%=L+U>vt9433)s-ZpX>AzWpyiY%2%@%5rg`i+PG6Q=to+kwU9e6bytvuTS#Sy&$ zL%1|{eSE`)sW&NYYcV0iP#gES%ex_{5DnB?>v04DZMF_zMMh%Jey`$!l435!dnc+z zB`z^6p_JgsKiob0T}2*`2RpYeORo9b+qaVP%a?LKT^NWwm6S^^M&w*{B&qH>7b#W6 zJR>3ITAW7#&IN#=cxr%^`4#D<& zBBZ#6Z{NNVh4s8J8j&Nkk|k>aFmi4QVc)YVEa%{yd;9(Ed_IL3FRxc3%FTmZssb@n zEka<+8r1{=vF!V zgn$GkNlp0_T6>ch46BC1HBq5z%C zLjZORrVN0fA~oIL9`~=zK7?gmu9wrN%jt8e`{VIm>ZT@7p?HTFAJV1O0Xg8*&`mZ( zWcR`}A=QW)Kr1CNdGBM!+*yos=CxubzD&WF3}iE5Z}W;k#o4F}m1BXKxu z8d>)*8J$5iwQn3Ha0dt3$&serhd$~+Zd7}C0+$1;S%)Tb%)`F~(+r2f8a9H>1`<%U z)+uM`Zk&NY3f3z%4z)gM!pWH$0u6R?JRe(@s|`OUjIf`fqlQ|+0pG{|PI_pr=mctN z3O1cT6MrL(Sz@o8(n*j8k+tIx>jZ~3bl47*J*Vo9WWNcPK)Ti&NUCoTs_J0px}p|U zBtRomWn#nLYJvbvZG=LOmJNU{wv23|Sj15mu+7?a4GVA-%$qkPct2IRLH@Z zv2sBD1A`BL_P`7MIrgOSp@&h7O+|o_^YUcobfh<(kvcy!5yCisW4)h}W?}H}X3uAO z%yc^MpG`FwARdNsH#w%kvbP)yn;zVDk=m(mQ8?phcDJD(up6-(J@bV$g~F6FU~TRQ zR889vo>LID5p_B92HMg#uAz4z8DwfRK@?}W?4S}4p8*aAdQhx1D!PrLbCdG4-f@BE zv~aMzFd4osQrbam9a(!edkgI(KvYpNG$27T4$I5u@cHNE^$W$-*dr5>si<>o=F`4| zDFZEG_0FZTmYj1+1}0MTzPD}Buexp9x-2m)Og^|!YZgTB0~%;mCE54w{Ub{izy`(y z5OWa~H3b(PFy*~&mR>%sr{%PS#X0YTYl#60MhGgkN(QsX{gHDfCRH<)lylBS)qoKz zAOJgW1c=D&a;hb7N6Bxrk|ZvU84$=Z)vCRXEg?G~0|_43xe(MU5uisvG5oNWoF(f$?Q*K@*g#XNo+-vafe;~O&3hIRQ*h29VadfR zST$&P%M8$*V{$Fg$WW1)*)tdeu!=FW3y8?7fW)O3duRzegj9<-CL)%qCeUnL4#ddp zz(hg8c|ta0Gcm0amnc=xP(@5l#DHmui`H7SI&w^o9J7IAX79ilfqn375ZBbG20+vYFktJANZmI4{fR&tJ|jm-W1u*&lCV z0VSmLNNjG6CrK4O%j!Hs@gOMPdj>wQr!`zm0Ud&(N{MU88eC-OUD`89%~hzN=> z@A>ez;1nAOfUWs&D-r@diU5%qfYDKxMc$g0#NMCAM5>vnD4?RLLsYG4zo+f?4S8A5 zm($DV*Pm4CeS6>Yt!iOJ&{09+Fz2BUaENa}{62haZ~)!I(+zb4t0ErJvr`f&bxQj= zQGj8)x33@3z*C$bdI6wQ9|NL|YWsLd(K+bh5gi4dhRSPC?N2zeKA)sL01RYlJ;Z zFd-R^zDfha4&?j99SnSSfQX4U+ppQK3OmaIq7x}jMA^}I$93~zHb@Nv8(py`N}#cQ zHa^_2kM)3AoYdSCaA<0=9mkdcYBqZ!05v;7%|JRt8uSe`Ct9D%(4z`!Bg71kvu*&` z0bJL=V+d`l#t5dd0cf)laLX<2OyC5E<1kIH|93k((JmkWaZ6F^Q|@pY=SuX$+84|a zo?eBc-Zaep?t9dO$>u&!Zyo^J!BP&Sfrncb`|TXFc)q+7yTMa@SHIuXSAM+bnQTKe zIksWsSP%3(!sbRQxWRC`;;24^qndy3HVgU`o6*19E-IQy`@Ho=B8Z?0y^fFAs6q`k zsyeA@D`Ib;&-aqB*IzooEOxLy*J8eRZg0PSBMcRehAB_PGy)+vh;If43djal6q15@ zAN?uB6T84ZC?XIsW5udQrj`v;Dv~o2_`u%z;KS*3QjwhN8iR{o)M~D!JRbLlCyq-b zX03{Bd)o50iPV%+&ig&>WzQxmB5J_Sb)&!_->XT|fS1$dd_Aq##Vx_HQ!ylPOgWXZ z@2bL#B^5JerahHy+e^v&w(lv6s)(vsh_02p^D#Pfr4$imhLo~_Eztp+Avhmha7bjS z7--EIfgHP1>fZ@GJ$pwA z_VIq#D�_S*sve-g1?qSlMYL^bQD>p{1Y#BLX0|{PNKnX`-K@9{PQ@A&xs%fPq}P z2JcmEh?t29iJ3h+AObZ5)mqimOlz&GrHbtP+hhO6@wF9@hPbFIKwc9e#n=O zH*A7JKAV|C>n=IyP}O(i^)4!@wJWzJ+3tnN`IsvKM<4*c8;#_VK>^WijwlI0Pg|L`Xzv zj%ZyLG^mKxq^gu$$k0^SvG)PU#kdl?yzjLZsWsL8w%q{Gq_wiMv^`46Dz#R%sv=Tq zRZ#>o5CB`wi}yhddgy7aweBAGwZK}BMxzuVP zRccN(=X&2PFuM>{tTHfNDwnuVIVF`6l&ifEg?P8xU>l*EGzl-5|IvZ<uGU*+4rs1 zvTd0Vq^K5AL?UEI42&lzDVJPTphdlz5g1AZWIzP=gp5Q)!~lpyW@w7+o%7BbI{=Ji zM3hr80CujbFhKAQ840P@CS_aATaa2!x^il1P=w?hgKDjUgv3;|mXu2>2GVZ2h-oDT z?_F5^ns`?bhrF!I>9WYz`qK|z{{6rE$CvZ9$X?2W41W6I3xfXjpZgXT#*R?m-YJNPk;L5mp{c2wUs+8!YeJOb-(Y|>$yNpTPj-yLo;)M zvX;kvFQux8>=mkUhs^*c2*7BF*4m~wHwzvN5aP2Wz~{f>5Xw)|697y>08vazF(9JS z6h=!wK_X;gQUK?onW%wiF|$(A$M$x6e5~PgUSCejCFT9Gzvr5$Z80FWy5)nLIdl$8 zG&xlfboF!?+Gu8`iUveX&exjz@u5REz!CZLq-2Kp?wb60O@=0Y=B|cHhfYk)^&34Q z=E@!>_0#Iw_zzC)-k&g;f&Q;UQ@|+iF_b1uVLqTX9QmQsCV!#{#~{rHdD%7TAW@!3 z!DrG8z(zv>eNuA-)0YYWWKV&BF!;U!iQC^th2*go^F)m`{~k%hGcq{l_zZ~#<2KfI zAY-Ezq4XV6XvCp)GPj2kw(k<*BryRFlm?ShJq}-+tPYaX2x+jA10cfD&}?A3V>NNM z#{`^L%gwQNB9U+mwEt>2!=8v203bjCfaA`90notN9em+|rKU1(B0%R^ZdO!3jx#W` zBT#H*cu=3P+6=sE+tl~HRhq$Z!j2Dvb<(U|34;T7;`9cLL(`uRb3OVao?hM7sJ*+@ zZ*R`ZgIXKR6Alv&I#RU`K3go{=t9z)zngu3rE!=MI{f2<6f+}2)4pMY6$9&jR~=@- zXc1w8Epn)}G&Mj(MFl`^p=dVDrqNZxj_cXOMp|D^z)^dr8z}v1_YT2kHj-$DVFYHq zMqNADvvf$$JT=HgQqrx!4y2&86}df%xu_U|lJmd>0PKl@5MeznfFY+`rI2FUcTrOm zCf)CQ(GsICS)dv_WcCQATC1swfvMvlEy}d0_uhGzOHor$Bcu=mdyj~OT%|Ixq`Ket zQ!Is&ll&C57@c1jpj5fvlT;|BgeB}bGooJuR9Ke9 zd3N3t16B>*fx&uO&zCbmYjS{61eIT|LB#I&!~sc7Dkw;!*K-j7eSF=_=z3k_DS%P% zOG?SSUcY?GTS5ZwToJi^^660s>E&}=Q+#~fQ`)OkQ1gz_p;S;&17jm#FC?Zw-ka*I z2}zz137as`p3(q-K+Mj0@7XysG&LmhL~0tMH$Y+n!jdYnGB5!8;8YC>jYUP9d$$&2 zPbR&GHYi9@H3Lzp1)IyoQO%%5+STw39B5tF>&to1cd2r@uFDcis@LoKzx>bt)4%!K zze)LS#iSHM{Nc-I_FzzdbARk<15tF&1pojhV#a;n%ijF#W}p>Il@J}6)>`(oef#$I{{B&mTtB`1-M{|3fA??y;mfDbQgpA$ zMQH)4?r+ur}uXY5eYFEcn3^I2&g0WY7j|N_)oZC z-D@#*`9x30pQg>%VaNC(V!H$iTGgnBn-Ho25rZL-^Aw4A^=AdGb>H)T|Mu=(Jg?VJ zFF%%&AL*9!-ZSopuORngi{1f!`U00Xq(Y zq5+2f*%4sW1Tqfi9U%1jvo`);F&^qVwkaumlARFgSYJ5~&5T^wuq#nU{V+$Mm4;()C-qXviVH8^FM z{BYaH4zI`BjtkS}lpfNouc}T-*M$pJF;UG$OSP0ORV~Jq*_L{dQwQ5vy6)6S3>g5&>29k>eUIfSE*K1mr-@J5?yDN)c0gzuz5`Df*90 zL_oBjVq81|5MkbnR`sc7N z zrdbeYYoaOCv9lXf?6L#1TV2bl0KiQKYHkk6a9&4bzwiVdc@4Hp|9Wpl{K;!jxGvN9Y0ghahRYhRDKz5|%_2P!^i zEsWKj0;%CK1wd08#?OG+<8&NoZH@@@8Eu8hG1y2vK5{l@Dy7*4ggk_Dp0HDtXUw5R$xIE@_s$>a^mF_mXjd? z(RSa{mX_1vn86_uikgUE@7;txMkEa8OAgL6Ak?g_*8(E4LnN%ZUd~ZhEj8C#(w<5# z!JoA5#Y&4W_1-fxAgB^4m7JvrGQWMiE$c$Y%lQIEpcYrsq_wu9ZdO4AOtq%8OQ{HG zidq2-#Z1&xWZ!oYtEKLxHuV64&NHz?#SmQUAf;-^*hbDoLOGKzp- z8JU)KCFYXy+qZ`g99>Ws!6jb5gITA3N=>)b;#{(cs0mah#F7OJ$dOiMLgQYYz^r-9x_Kkj%o;%mgh zYl!E|63v}2>#x84)2GkZ|MB1dkFVEdE1OC!xe@?-1|?_yvRr=r@yEDE(UNkmMYZbN zAMc)A{CSaDh}T+#$$fgg6wtJ9sQ9?=?78HW)232Gbezz@90gKyo`|SrgcPaOsyRV} z^X2sAhfgoBFR!03F4C4BY1?ieHxP8*fe8SWR1iQF{2E;3v?T*WvRX?{2^pLxh|U*s zV%*EZYSY<7)`bp2D{p`_h&JmElZj-8&Vnhxr%JX~J;6pM`}jeaQ=L0z2-B~5;@95$GPg9&)4EhMCh8zlVsCAoqZNNC}9MXUx z)NB8a)Hk@hi01Ds5z1e6N zGP5Px`VveZV)v!OC_dXy$T)S@*HCrlM@)zG_~>_|>k{jLXoFK4!|Erc@6u%crr&)S zZPr0gzm9{HhVM7>_&SU&om6ZSI5ePBD+4?2p8l~BdussPZ(o-|11aa@Abx^Jl#sil}!LSur~ z!;%>RL5h}CRTLCj8lZ?O03#Y&DPmhO0JF&K(4iG0^*(wZxRzQ{7D**%Y++$Y!Fy-8 zy0D&3%X*42mMkh&tmag7E2mRjE)fx1qw)LwzHNKf6qhxG7-G1FlZb3v!eS*0GZ&SZ zWL=}=qA1J@vyq8a@ZNdnOU+^N7X)G~RjZ2J?gmEWf^(7Iv6((OeW&&BEeUH0`2dAV5Uq36NVALM9?2AavxA7#dyxWJ2~1iJ+A_1Z2-d z2necLYF2ejE;xtQYF4y_U?qf_3Yd{Y0z=1IO+=d9t-01xrBpy80>`vxGcku)39$~5 zf#8BaU(TRb_6#cj{XhKU51&3|OJ z1*)Q!oXH3F9M*_i+H$Q)PuB$j)4p?rYdqin{GRuGTF&bdDUtzH5d>A$$L)_3m- zF9h%I00961NklNg>h^f#oKwmK=$L@PFB}$Eat+a$nMVf*D#Ga4QO?B-$novt zTOF)hWb*)wN(a_yDgj0)4d9a$7%~BdMwx=HD-(dJG!7Bp=$__y8Mb||n*qaUwV|01 zX?P+`bT2jKLIlo*c=A_TX)D{;`!@tW#dsnrIU#|Fs!joQ^i&^m5+`xeOjsPw-O)Ts zMaJw}hL{b6_iWS;8yCmC4wZ4ZZJ>kSzABTbntvHernk!emzML4n(9LMiESF_qcxKn zZ|s7%EB@j9!H!_Q-<|M}(vC z_IySga2xM%bXo&KhxVLHjMnR%^sSow)c_xpZajF$8KX2&M}vUfw@efHDB@tKdW{qq zFmCiuo2ZGN>5c}FY^?Cp$2z`x+T1Y^oWH}MQRfiH`j3g=fC-4;#x&^3Q$7=h4@WhP zNO>DSM^k8Z{72Z9J(XoUhS!e(9cZ@W(>d~n%G$x~Ii&p+?F@B?dLK-0PobTho5uwW z8lO3`P@6x#$9aw29DsaCjWz`BEm+$@LG6t%jQ7nTk*u;dD}9xWLF6wmx@G7h>j@) z??a59vsN&R!7bNF;BwMhwv==U`FuI~xZK|FlF`f(S7c&j_L}RScJYfZImdAN^y%gC zzWESCaHl1zDYF=(|0Z?TRltFR@z{N#!cz?Woy1f3w-~Am@O`Gm%%eh4F3W2D3Mm1PM zL>CC%vP3ofBYl;s96`yp_gmz^hK0)I^@70LwwEOANVV?UP6!;_<91J53O<}KXJ+S} zbIhQcQ?02IqpB@SjO*$1>+7f2Ymxlz+Z!Tz@0@ewLR?%DqBWJo!~&pF9Xss3qlZ6ocp|3OghArK zGbCwM>dZzNFGD73JwMsZbX3o^USnyX2b)TFtk_Vt_}xEQ|1N;d>S0qG91$+VW*=B@ zd}s{x88RF+V<(?F$Zk23lgFB@L64}0?!p_mu8l?U837ITJp6zLP1PPCV}7{l|Dj!n zpKYW)fLV``!U4zZh>aULeLXXmY`%Oz`4-SfqmFPZC_L~*KTq}@KpcNRehqMP#F|g0 zv>k*InQr4{jX!D#%B+`3wRwcbOh>yjn9zAF*Kl(ksC+D@^}w_aAD?)_qx005gaP2F z1vnnoe!0Gp*ehY!SO$8+k7g#r(>K7x5lq+Vx1Mm)b^HVqHWN|I2231Q$+$)x+yKlv zz<>u*vCS)My-s!!$ zB{1BHZ9Y#B+Zr&^F@yGxG`Fk2^Ke1W{lT&IGnNzkR|Q6Yixz*{j9kKT=$oInVZ-77 z=#Z=3QUhpl2py3<*;-(82fN^CmmPZ_j;GszgJnZ$_1B@to!Xd%HkYA+wL%UCq6UE4 zB7up-2$#UPh!5ekhSP%10}uj11(7OP(NGFzV(*zA=!l(ns@PhMiV8DF&rDtwN)@Q) zonO{=hN#w3#faT<4yDvu3V<%_d0EzwQmr+{W#1mEwHAfPUP=Xo!1QuCE9mKUG1ldL zUSmjm$y+ruVyGpXh*cp6YNVw)?~o83xXP`NEPLkz-|lw?+fyn)O7hrK6){B(YE?un z=Pl>TNFjPt%a6@@f4-dJ64-M9ELn09+RD1{dR`qnBzB7f24w#7^B0l4J@&RwANTEi zS`cwL2X>qvCEef5UKpA*xy6u*MTHt*15mD5}fpF7z-Uim?Nt02N1o#d@ z47mW^80=bYI16IU{phx1I4BphTwmC;Ilqy z3wSt}hm@TIow)M*VIEisXMoy_Hh=PH4f`9ORrny22%6b`z^*o&et(|S&rtX6IUUyw zk`ktF7|?qR?1|v&&lr%XyWF9-ac>(7IKr)F$0nRB*7QqLG?V6-Pi`ytf3o#wO_D4* znka@v7c+DBh&)SGchB76T;Zkg|Nk3`@ZnO(nVzmwXUWV6cQ?}o03UQQ&n8o(PGv^8 zo9O}tK74=z_9}V^Bffk7_yjTY*%Jqi=f7;i>#Cjnmx9a}&zl@D!h2))C&7uWN9btG zHLzGW{V>wLrony%_yY?J*n+RowsSrm9=LAZ``B#6p26ld$$57ZXU*1u2HG5mv|T0p z!FHRO5fVTndj>y>(DEx=&U~APna)BiV;g$CIqj%?B#pgLg9fizlgcOm#4Ts8dlpR` z28G%n9W`f6`>hZ-L1jnKfaexZfQY?8#3%*QT?DfhpYQr*esi3psp1Co9Kfu5McrMR zTyw``b2SDUCDg_d5~*4%)1$2)B$2+zF@TM}c-GgmW6rL2pzFdPotPOt?)QHisSZ7~ zZ?hH=02s}JAwZz5#V4uCV7zS}_Ca2D=L`@`K}3NVh|vY|mt}dQ?aiD6N8}g)Ybnfb zy~Gv_Bd|ugnh}uYk|1GNydgZFhj*+h&dGY&f{R3C1X_&<)HH+;f^Up&2vBns$vG8N z@vFwgw;PD4?||}?jQHJcSmG--+gN>P%3(Mh~#}`Fx7liW-M7$ zg&kG}AA*2cvbZcI*P04qt65qR6+~6_cxuagtHrcvMG+Aubl%qrkH@prvaOqU3<>}& zm8w7$B@zbw{>LX0{`AYwF?!;-KU3KY1iHO#kIyFnZd+tEABfq7=;JLg;^X7tosS@( z=DoYF%bpUF9!E9>@0i&67>Z_8Lbb4ZB?!@n=pLVksS~ML-LkD4!RO=o`T1a=%Wd;Z zgu@=65B5}R)xC!3Ocj7`@7Iz`+OvT=S9f_96Ch)11)fl=iYf>1nw>1>A|aw>*2G&QNBRZJyS07Fn{Wd>0i zJ)tIA1Vn1BIaN`SB4(ns?)!eZgt#ral$?*}=i~O%TiQ#h{#U*4QY!`Ii1*L??S|_b;^H`7RqMW2B3jn?eC&BoQWVXtabqN_8aAi3 zHkUo8Dy2G4|N8#-_xE?dgbG}ePvOK_r@H9G1)f2!L7@SfDYhn8 z=Vkql3eHTcp`J(N=*5B(qM8niGn4K{V@W~=H7)f~?zy@Wz_IIM3LDCFR)#*d(4XFl+Fqo_#;#aSv*VM@~^n>=mIs^p5j+ZB?XC8J8I+#B+)sAcXJ6kobA1E-GS^GoW$AZpi+Y5UN zgQ7cwWSyaPV=?CaqDJmDH$`Ed(~c4+nnpNTt1fCS5IGSoj@+}cl4vg#`4Hw_j?0xV z(bE3=F-e=O=&RIsAEYB69Z#4vvJIyT{qi&PxsR$ONst#ar zd{yS6B1L`har3H*L<|%cM?{83gtb)d_!dg3&bbgg07y0Oh?zV)WKsbYQB)CV`rVNy zBy`AtshAw;@%R+2CPl?^&Wn^XlAKz5N-O^Q|2u=(H{ z*P+knbBSTQt}zBh6orrbQ)))Tx9j!#etqosbnM>6=t&7vs(^5PTTSipaj59~FSqw! z-yMYgTZ+NIT`tRIK_p}ZM8|wvc+W+XlvJwdx;d>PMQTbw7`J%6UGtG^wsqOU;*c<9 zQE$%s+vP^W`?1$*PB6xWj8aKq@#)CP9Yl$phi(HEjFWhY)qm-b;Si`KB5Fopi&?x}N)JHjG;TUn_Gu)(6HN zgWl|b=tS7#0?nj@p%u@X6dU%K6e?(2M4Y@&$7#)7nZ#WeW;^dV=w#a40K?PM;WpRZPH99Zg0OBvD21|Hh()n z#1ImgYc~E3WB)p=Ziq|ge!tK~FgYYZAXUY&$Ngu6d~2T^U+i<4?Y8Fhhnv*5bsoD0 zeFl+hCoIO1>wq2Lwn83FASYcveN!5;cpd<2;xh5(Jb*S*Av(5$`7Y?aC`R?_dHlvP zZs(*QUYt}P_F2qD>{oser&OG%AHZZ_Q5yk;?#BTjpo-01Y>PM#-8^HRky8L@xn>}M z)gV=SR{fBiL%zQA`_FH@Y^jvz{gHC1RS@ym%k8=aA3}&R##(FP0w%2x9upC@B_Ne{ z=?q-(A$S#CR&1?F4NR)moU|42)ml|Vgn&dqD^#i0>hPnHQ9Yz20kWD(sj_SlALN}Y z$^G^p$FG0;+x6xMkwe(RQfpRIA}DEZoo$JT+2xcBa0x3aU^LS$cyx(O!E-WIkLVbS zsH!Pw6){mC95JW8s>1043)fc`*S%PrToc>fO36&Uqwe0%p3rUm4NN z(oqOq^daw6D|k;XrKDnS zZx>NDvvpbj@sEF2smtOWxpj*vCk0(&ynlQ$QjvP(LW%}*+@CpB7yY(fJo{7*BzKAN zdF;n=fa!DJr4(Y<`W=8-@WiW6M<&GJ{PXKKO_@x+sK>&Zt)~6B??j$_^otMidifbl zMN&?Cl_Vz6bjYB8hV}tfppi*5RP|7=)6l^%-^1XyhYU}z8-ebc+WgK4Y)u3*>=De? zpN7~j5F?cG%=K8~<+{C_>f`vVlCV`z1?XKTq4jYGLg=|pFW6^j&zV(*1Bjfu6h?Hc zsrOMJVQNAgsH^G26QaHT=Y&3!@f)i()pBoLIDQYO0`Ep}K6fAnoD4zhiq+_%L2$gz zYQVv6(;LrfgfpqV^^$r)solQY3-p@v!2v%2UIvQ;b~P{#U@g_FZ7q%Ok3gJ_ezH>x zY}l^B!JZOwS}PkH(HCWiVJj1>W{67t)`P(`<#sZc6T9}6Z`QNZaQ)K+1PE%1K+UUY zm}*$tUbnR`4j>`-=B7Qnt=Zg>=ZB}{zo62NnNA>rFFwuWbzXa+j=$i5@@95M%_s2g z(EWv8?y+7R@#7Bh#^C3PJSoHz#lyTOVL!yPUm#4Qi?AXaJ&+tLz1y1)h~NIx#Axe*gH0Do7lIbB^lN5l6k)@QA!mlBDIt( zS{M;Qm+DeU57ofbXfP&H#mjotlAn)H{U9(?m6|hvm}#v=1=z83?3jIw00y8y#+8u~ zcLh}b0{c#2_SD1R}L1SgX`hb1FV~bmYMK;)_)u{Bm1M&gr>7 zKaaAPxOyfise%DB;^qy2n4AyhxoM12D&_(S9eW=mJ96x66*KVOi4--JilBgwMkcU< ziIgN-OsZ*>B4(;mRK-L<#iW2_IS#W*R>>+Osa6kY?-28&zTdvoQa-+a zXUEKxYf3qPeESTXtW+d&j$9-ZFvahGel7dq07c9IpL_Aemer?ycaF>;=TwVYt?W>= zfJYw)N&o!okLcsN#V0|@wTjeI(~*hL1>M}bZn60Qsu}{TX?bR5U{VJf$Q+snLev)S zL`$WlQs6*#&IKR0?Mis7BDo$ZKgBATaxXX4!P5`OX83SFK(p@do)LqINQTLKp0E=Mde3L{ zm9$m`Vxr!OiRO?xP10YA+O+z-e|bJ?hT)*=_j49FUpAK1CX)mcCPJqk1}GZ2Zmq4< z3p8(}n2z8#o|CekRs^rc*8oN=>?KOs*KHtPH3cw4Zmac!MefL8tjl>*0=glkQ4*sW z#X+sXJuTz>~d}h#t z+F3T6DNitvFHVvO`me@M>@_bQ0Q#j-9Kh*i@B@`}2-)|`07ra#IyCL}G(xYl+(A}H zLMMecfz&|vZEa~Rpe$=S==_TrC+#Utb_|6O~z)uIsG%$mXT> z^*3$z>#fz>YwWl7uXLVS?Z;qhSCn04Ak5-3{d|nXwYjtH!v;1$o1ViO&fMU%$F5kv zDAZBzlvdM0zuPNg*eBK33e>DtRfQs!ij|7FT2WNzTv!dfij`Ev#1R&$HA^u?0#F2J zQtpq(r}K`9hCAI(cGmB?SO8wXB%c0_~)yo*%?Am>^|N-kg(37%SybP%}^;}Sw# zV~mUU-UaV{a72zs4fg%;Zh3#Z)nbGk*9)43h2(0_`=h3(C;I7^FWcqng0IygQ%xBi zBm4dF4C{jE@?HQDJSj+e76En6m%1bQZHpxrQ$**SM={eX#X3*uDL7D=C{A8U=E;Mj$m*B65z7XVt149D9}|V8w4CEWrRvuBPC~5d)EFu7%Bc zr(lMvqM8aby67YW8X|*a0aLZArJ&V%99pYtEjc4-l?tl0RIR1tTvB3IDHWX~AL{<( zxDZgznZTq}6a`ZeL9|rTvG4o7i;1ZL>P70u{lm2W_3hibIKQ}f+kX1;^ZWa4-J<4l zfcOv?3<%cM!%79-{;mGoAAkLE>#$xEo8SNU%{=_9+8#k}u1U|j&XtSs@85)`;!fqzc`WraHP6*fv zOFj2*M-JER?aS@&DL?n)t}qIdwd-Cz(A(@_G7&7PK^cdIaM{{a{T*!W0O zxj%4bFerhc!n=u>aB+})L^N~72E=N~;v>q{TBZO35cE>1(|ir!U@)jAKmZ~d*t3Uz z`h5++_7s30M%jza{$R=mz#V9^8Qxyx4`=jif?vQ^*R7k~nWNSAktRx=;n@){YVT4) z03_`|dt6d@0XWuf$23Gm15EYRn8K!=Q|p|fAt1J-FFJ{&SsW9`!14mO_W{M9p~=Fb^4 z?s5hVPj6}*kf9b3&WR8F+i*%hX#=WEp&h8QpSGbb`tJt41$N3+Uw^~7;A5k^oOjS; z^W4sr)YcQB0oU`apCcJwN?#(fMpX_N*F*))thWpvYM>{wwF#@ixZ>IekK;IuXLpxY zVQlc&YHi4}dqO`bx^tighN_^ZRiLU>wOY+uky}7PRwU;*Y~II!+^T>P1J_`+6cqsn zK)pF8Ae1V>^K;)*O-yVG-Vu|_3dFpw>$0xv7KywOuya~u?h`W$0GQRBN=Yh`Q)*pP zkNpWIwNzC#F;hhITjT?h8aVbUt-H~~6hrU`Rixyco4=+<*|x>8H$@~z?3{B3%1ot{ zql$OVF%qgOsMu3VU{_fQ(Fjv6jtJB|5G#>m1JGJ)H8hP;b4jIULw(L2xSNcZp07(529rb~NOeNQep&zO=jyUY7hixd$_l1-67#FAv%$qbh0mL+Hakt(^gTA|Sex2(0+x|dwk z2$yxsd)hyCL|ZqXi>MhRQVh;JVo1k<2qlXt#WiqXc4T6z()y~anB;2hF24g1lU(EK zKt)y13e+SuOG#RbT9tBGsU>B=)_261icm%~m0EKmUyf{{By>T=RJ8S~YIZyy`FI@9 zXWCN&LN)!^pG8WovTgD0?RLH0etLhu-QI4uH97}H5zz(D$fAv+va0!b=KXWOfBnp* zzW@Agq@T~{vMtOkDrw*GvOV+e0tR*e{Ns;O%P(Jk{`=p560yfUdGCPf+t-gGc?pph ze(aCW=RKuF2rfiW`{kD)DqP*RUWvdte!IOjg$@QPh6qgT1BuzOCqN|vtzxABWVD1- zJK@q+3K2#Xcm!ku1FbdJn)dzEvG+c%@w&xpk(|qs%OSnP(6A#zsF@wtrMR>$5)+V=nO#*?saWxlhK%S?KlW$I`}XO16VXe zHLT%4G1JGM26NI!uYXQ#fhS__ps>R@>`6b+d672yX5FogcuixGt3`H%X<-NLrfPL8qT+QGfS*_}vn<|82hpmv@DpcimC5t~_W z&OOX0^m=-yB_Ei>ezF}IB^vvT zBlz|TD{SESaZX;mfqAjL0bHV%D%i(4q-t!{aAt6%O^kCl{1%+K8ACgab3M0lF8bJs z0k&HGvmssmLcq!B%_mGeW{A)l%+rLHc517!*Z|`q{Ydnfif`;u6JLT+s09GYUTa>D zvvThInS(iC-JsTnYJHhA8UQxX)`#^X4P(VRtZxI;v5L*vZQ)%J6Gc%=#fow=6Gj%0 zs-9HmCw(Q zRLb7+2H95vgX9YdLZuB4kW^u_Eglg7^RO_rL#_|Mp)T zxZCX#HxFWFy4)_#`@CmWVSw+CrR5Z&+35_||OQXRTd%b#Dr z9gjqern)~fJ9oWo={PNW`?9UW}C+7Y6RDwgETfAJ~E?~y& zd{hKvB3EMBXRHS@4q2zr)l1L#Q{hGb3Q4}-2brIhlW$vKW| zxGa9Fl5;t#7HsAotlvMdGg^w!ODWiiS0)O>Gn)WJXqY5SdpsV4<%RTwC zsi~oq=l8k?f7#YsaPg7uBK6EI!JY{>38$vkyNxo?nGXJf6@j4DOqj;2!JZKU>HG^K zw6dhoEX51*GniLuyT+gF3w4|@oMQk$ZX@E<{q|v-7MOf{woKr>*XB#M87j=x9VEK@fY& zRR2;pi}u1|b`gRBj13jBC18y%v^M!8cp1P;CjGRwNR1400|@lj>Oc*hzX9xRt6pHW zX}9eIj?X)!HgA*;d6=XRbg3D)bDJd0pAOfgkNiB710Dh3Bu_gcdHLWd@Bw2pUed+R zCr@s6M&kSOjib@0F%kWXk6`n_zCina%%Rn=8IZkwsdtncq0*D@8xNnS08dGo=i%g& zhrHr|kzFnI13AXqT$~a6ZXdAWRsqfxfN`0m{b_AdvFA+A(|UR%T~hm5z`pJ0{!TZ= zM(o5+5$`^S8K{{kilKlNv8s?nv#2pRMPk1$%W_%4QH%&+RjXD3gj7pC z(zBF;2sITZr--=ryHPt|MrRK-haMC=hI$P^|r?Cr?FtfHEGScmLw_Fl+BdPRH_&dlXD~lwN@rXPpGt~ObUpaO9e(oL_~Ik z%ttQAkyK5rGEl9^L_Yer_cvxbo{t#f_pdoT9Xq$Kt9}48N)bdhP$E=8hrC|4Tm?)` zEJ^w0?-3Y^iYB{%rgd3V>c_|D&jROWf#e< zG7%#387F8e#!kJ3faBVpYDrR3eK-n^!@6A6B$Y$7sH%bWrVcO0?NpOfxecR%gRp>+ zPXbdz0zeO;A`PzSN3XP7xD!$g{MBB}Knjq{y-Hfb<+i=;`=iuk4JiKS^fMb)0_Na4 zJ9RqP4KgLASo6 z%=`I1#9?5^qT>tMiDTW`Pufi^1em4!ZEjUJ?SpsgQ?f}com$*Z?BS|& z8(=Va{cQ*>%L?12ffjfM6fvZZ6=*Ka48Xml6wZ+7KqL(f*<^cYz(5*#gA>C7Sj!@P zk-5QKBDQzoq$>wJ$JQpnppAJTEf`EVbW$8_hLU0WA49}Cr)4cjK-0HtF4Q~*FLY?{ zyr$MuTZel&lK@WF2G4spLp@+yH0blNaeJc`p)s)5K%zsfMx{+o5$B!xu3e~$PE!(K z?PSuN*AR*c^`LtUh&ZZ*Oo4qN^T){>;x#s&TFqWWP;8fOj2yPRCFF>Zyo(k*djbMA ztyMgd^Nh$579^-eOziXXxx~0#BA7bwuKreQ1=5mBcSj7()CD#J5m~p@a{xjLY{iJQ zn&hL_T9FVGBnvYMkpRRH(9FEs_x&i@RLIaTaakgfZ;O|YgvdUWWz8bVWtwp8QA|~1zs}qqX03>$cs$!&E3c0|lB~=kL zF#{EqjHPNNWEU{_h-knj2^d)r@AspsI;JHq%=Y>DOpb52+qx~!=TYSO1zcR#+uIie zdp>tlA?9t{F7Fq|KEx$1iz7m#BEsk|*O+s)g5FcDxnzyOl`8C=6qQm9@N(POaXCs= zWrmb%2+jhxGHgr`mdFKbt&iuE4A?Ofn?SZ2*MMMR#Ye)Y)><*ubd=?~Ad%Dxie`$; z%eJ&`L(VZGvg3B$c=M}h)z%aXgwg6ztyUzfMO<(>TW)-<*?vEKKs1vq)=vi+Gn(Zr z3bw4#m}{}Pg_Mh`vLihNdL>FwFlJI!K)GBm>$=3?(JVc71~4S&EM9z-sz-^J=+G&H zR$2q+n%Vik|Ltc0`|Y>i8Qhnj-%2UY`9S#m{u!@JuE+Dat5qL~y}w>A+qPCI_dhL^Pc3A4QHT2cmNv zqFV{KTyvGIQd;^lz1Z!hnGpu-KX@h^R%_M*1?C4h6F9{fFmPl znh-n{WK*h>?3}kpPZnUNM-6K*Py%(t(DZOCvCc&OSIq=ZXx0t-Y%esDdIq60-qmp4 zX~`NbHfn9W;BArpJ|AxLULC+I*Er?ETVJL#7W42RX50kP3|bXFW2tKLuq4a?e$ zpjls}VY8r>&+VVWAwO^~>t;6Rg{r+P(gbVc>*vy)uE>ORfDStZIswI)z!+u290Tl*c-(6+uyeFT0ML{6 zy3d9FI$ENdft|T z0g+#};Pf!g#}7&Bd^*mx>KO^I>y58I+Ze_a#_8yt*MR1E=>Hk4?1={lh@M+BNbh;{ z>_o|Zn&U{GvmBt;I;-Dk+qSjyfc_B0Zi`^5^Em^A1H7E3(@V4I9^tv#L}GxZpa^8> zJQ@-*Q-lzm54Fm3-;Y#%@WkXC6H}}H&qQE$9D9|72#m}wct#?}3zu9<79@&GKtNO~ zSrKy~VNaM?#Z(2*9J67<@-_BEic=OM`m(4!@KJB zdPSq0Wqbb;LzJAit6$eetHvd4mkSd!5dpDtgox}^q1I}kfMUrUICP*0rU!Zd@^*_*vF5Uc4aw)aA31$~K1(jS<|uK0?7Kb)$xx-#QiKdHx2t#2L_hBLlCnyz zt-c*oyCYjmMwOqwyoFxdPr!QY(^e%X6H4zw0th9V<2?ngb@%R78M*nS+ajyt>UCh-T4J%AufO#{E1`9%<5BJ-{?N=}FPOU}~XNPhMC= zOOf0!0G>L3#G;^gZXTL{zF_3LK)>NWkkI+{f!>DIm|uqR^WpwBbvFVxQt>4A(x)-~GomKIt zt{%D=PNQwZnvH#cdHe26RT?%$Ct1)O+_pc>H1@?i6SAhg7`APA^_@NGX6Mz4TIU$8 z27Fj+_5Sen1)$JGjW}s4n>#hJPWSf))|tWh*K-6z6U>Dg zZyXXZ#B{oS(6T^api=8cukW@u`ns*0s*QLKtaj1-8x zv3D*w_Drl+vlwKI(fi=jo?E>f=Nuz|xh2FH9XUtDB5DE-eAt$j{-Gks=E)H$M-K+3 zl>?!WDHs6&drx68skP+1Tq2l-;E(4nDwlOj<)~7QkMwwa5>kxOd(Y%9?{C-l3j(HH ztO^J)np;@Ste9Jvi9P#xiA^0hrh$mO55$nHk`NOcYONIv*+>kWi=`w4W#(le4y~%K zsw#{5m3AtDor0FCT1CtNu(4lhueCf~aPYCCL+2aX_?+6GSI`#};rpBI4 zpd?WeSb;p5nvY(K6e!G)lc3qM2>>~u%eu%Cz)CLZNC|~f%7~VaydO`j_VLGeRS7Xd zl~Q-DDO_(M5|fHbjGK?Xoe3%8f@jB4Dj|6v!IW;UN+N>&A!-IdU=ERaiQvG%z*Eho zR#`R=f&>)eA_nB#WsT2gR#Re*!25nI>+*a)0VPEL*Iy|gg@`@7s?~XR%q_6u*uA~2 z-Y=JJyWK9#!8wF4q^bhyd?1h`6{)5sAocU>=jYd_lv-QSLBLw7ip5CJ$1X((aox6M zi7s$SweD$u9<|nG3q-taTL>W^DLwXfNdNU;e);LkPuumnU9Z>e#zX)J70p0QKfu zYIQt4UKL5A8G)EX^sDD6R!dE0(nApQRvmPEHJtKSA9a}LVKByDuse=$s`Z($GKq_6 z{eGkghLtrmtB?JA@Y`j*KKEylge@SdI%5f7KRn@NLfYtB_Q|PiyMiVJ>EI1?P6=^* ztLx=Xu8iM~;MJVi#GUYh7&_Pmq;9)kh!t(&Z{qHCk0phXJp?cfUVpsW2;jaN=lL<~ zn6!EQ?YARDhP{SNOBrgB9PA|1$w0KrvoJxU>9{G-YoV~a+R6`&A3=H5qrL==%vjG@!9FP+p zp2uT``#Xl2$%KF}=ivoWPqzaGU^su)eXaKFNtyk?S~WR=!9?|k{_x2GZ09-Y+kalR z#0HZ3+{c6g&#Cs67&Abecznp;2xImGGjAi$gN`#;da5fOp@A-(FROY|T5~^!>jy6v zQ(rzb&>Dt;OizHkypV@Xk)Chf^u{h3V*W1~?SP7~4U$3HaLQJ0S-|U_dHr zYNA%fdKDrh0IUXkmAm9j0LE~{g*hyX_mP;TWnB_)m*|+S8Zvp$P$9(l<)>?i3j*ex zQ_5iK0uzuZ5P_<(V2t^sV8VBdlOhti_ z8Bvtj0UVr#1aqQS5G3Sy?H7#Zg#ORUZ;sX2k_bVBFK0g5P?d=v9 zN92hK0T~%Ui7f;s0wi+eO3LXdBBCIG3T9R+g63Q#?X^6)?jDOrVQPin5P_&UV<2Wp zBuNEA&+ON*Dad|21+>+E8{tcUp7YxIAe=gHxe&C_kboR9vOiZc+sB(XgS|imUUU3&@(++^DRUi ztxsD;i2fzD(V&Lyb9&=Pr|szapH70IuNDsL^@H^p z-8m7!MmyK>`U!)(EwzDXC$Z8k!QdTXke>Z3Gr2!LmltAmP(AFy}* zfeu?5HQR>JFA&&&TBDu_)Xngjt~jud;kfublH-ODwhbDXtHIoJRQ)N9AZol1&_F|g z*2CO5XE8}h>+|TVXn+mNAzC( zR9!1i-d6x8c|4rP$#()!-_TJn8|R3In*?VcHbh`D0Ra`oT=ZG&k?~PA6S5_NdjV9( z*UK#~F)n^xA`lXj6md*3MnrNeAy6v9P7n!+LU5@R5hI3W3Bd(s&(WK-6KJ9)IV%Ey zl3KWW5iO;JHEIYB^U{bb$&95H`P7&`yJok z-~7d^TCMdT|LZqY{^_S12e*Vpq}F>TbaC-SSZY=?$;!+jc;`Ji(jrC>q8Bw03DH$( zcT#Gu#XPMQ!o{%L;m6J~HZ!5X&y! zByQnx&m25^Z%K11KDw|t6FrPb9n|CV>t1&)Y0o0J>x}|GkL;v+$5N#x*qmBR3ueQ6{(^VG($2P*TA}B9C@~YrAn%mor_!C)U-;eQq+u*nm+6ulWFh@O)bB8 zxN0VLhytSb92U13Xba9ZH8Kp%bQaI}L(9p53Et)hUhndkBMoYFh1#CL!3&#{&RrO{)!BNeAec>h#r+H1_Yb zZ{4c(^}}v@a1cu)q6YxIw!_E^gaIdJiaE^uvgQg8Mxo^^TF)|?P*}$Zr%|*Ctzc%x zk2hhn6);u+aHK8u-}Qj%l!KAG)`1ro%w7m>wRDII4iIX+=V`b4*h8oWHh8|ar#cz4 z8MYeWdG1mlLjx@~O_+`TXf4IBkq!s|(t0*%doh^}fU7-EXx!k0kqI{dJhqAw-2~fU zvgX1ff_B*2KQ+5ez@)u~T;jkaJ^Bp&<{-|5`%%)x!Aq*(36Tai9S@q!Hdw=rX5$vp zSoHai*K4Q-U`B|Dq$3L6G3vy0aGtRV87HUQ<#Ql2AsoyA(%57ehwUmcQaYZiGK0i)|XkBji_dy+qg#p=czvb(_Pcq zv3{c7&f$*n3_RL4$N&JfZ{j%K1OUDJ4V=+qJ;~4)RZ zpu!7KWkkhVH5Gdt`kCZ2*=rRKUnQaK8UYE`Xu-*Yah zlzbdwS?>4eeoMdp?Pr023S^`Ov?>tnDZzmO%gtZ3XzlM$JW} zN);2!HPxcX&Ut2s0KVP%C|S(l%WXlAwThVmvSUDo=QBwvwN?TB_}q&XGbJMSV2Gtc zE!A>WGjvpIeIAb(R&uxtdlz%5_v1;?NzqiZL-yna^YwOfi`VkpAJvoh>sx{9**i7# z<{YzUsa2Jk2;H#LK*uAeoV8?j=mP^P5=QUYIW=WM1OXyur(!AREc>#==tI`5LXzu| z?xKa9J-4@bxgdcds@j$p8lT>@ZSA3Q-D3`b$4N4B#t4yguydS3h!KZ7(eRh z1ZKdu{pY2P!t3drk2V8n714UXt+uArv-(DUL3{WByJPk*{|_(pUT5H5esv0I`{c`C z@LadXH^aT(H-cS3yZ$MHOtH3W}R==&c>twl7~nsQZM zgYD({xr?!j%iEWq-+un$gBMk+irxW{s5x&2pjE@_kO@Rn$%d%Hj?qQR3djINi;L-* z^5bBKx1YZ}KR;Ef=6c)@7nWS~5=bH#6!h4Sk_wPrZp#u^B0h4dd#+V$F5f@zB^NPa z##*$N>>PRLuG{+M{mrVR`_p?i6-@_Oawrar$(Wm|l{4$ZhHjhwDFU@#s#bEXOy0YI z5UQ5evy6J5-LYXiKUY$wwF0a1oVU4+Vh%bd=XuEstHcc)2XsM|y}U15giQ-~dPCrs_)uTMIcLb&E7U zY-i`_#>Pz_p5~Cc9cx5`PB+?&2?tv<&Do0{hQTFFOVVv!vk@IUfX!gnq_JR(ZeYfN zE<5t;v>VKF#%;X@V>EEy3=YmoSZ~;9Gv+qARcH~L&K$I)y>3Vci_w7{f(>p@JH#CM z-Qzz)%*e)C_ZU<&ZatEOFYu@1@bO6+wivM2Po2x$nUoGL2izpYkq*|qHJAhx&f;qg z%mE;)Do$n^5oe(f0Mpj_7gTjpPy>Suh^E>)Tfj@S`vu<3GOPU(g8}py|EoXKhCZw^ zb_?Cvvu@OTl7PLeF0=%}ZsuR2@Q64s@Ys-%Qx>Na0@iLkcc3%5zYd*Gk(jqg#)%wp z^K+^LTG?=?=4qIK@x)p-aAcQA8-i-DobY_Y7)jSL^U)`-IDXR7CU8Q=aju#?PfmEQ z{K$>#3))t!Kh5TtTacY#;@w^!a)eXI3F+_r0ea5WAFDCqq1c5T1{XaPOfTL703)`l zK(EcUpf8`9iS|m*+6OGa;w?poC8D^@!-4*#0Z2SiVjgRU_e9w2SND0 zBp}ZsdX28;v;>!rBvL4lTikJ^{YV6OJd#y(?A?`b?>8Tp;9OZs6)`hM6xRq~CMME_ zR?S>wLsoHA(Rp;@fq-h2Dv^v$)vy+22ZjhlEro9lPO3Qc@en%|H zdR>0`mtVg8^wV)1$0Ms9$@%_iL`cY5q}1~G{#VXN%KQD>$9{i^z zC`!D#bmWwZBZ`~FWdTMNt*O*<9M3&&ktwPa6+?pS`*#1__vhi*Rqr^`_5CMNeSH6l zi18Z3cBvJgk6m-kMdHfWUv3U#&Dn<)9y=2Q(e>@(ey98M@yBn!{mb9~>%aW{-?XOp zpDwrS5`3@%2ylNKmv}J(K&({&%rV)<#-4!5A#7XRwkwEg5kpu4x%gHM07w){mXh=1*dLGQ$Nia(tftEn z7|~QfcnQu-Qr;Ku(UBB2DO3+9H6SvFRM41)slxkH`&$M*gk(LyHz3~` z^hL9m%)1`3Pn?gLR3DDptaG6ULoy-c zcw#r9)(KmKjzwWucsMQSurIITFvOtCg#Kvz&ZM$2wwHO2K<&A1C-vCIN;D1K2)B(_ z%@yc`1;PKnKW%TVXIJ4^tbWe`?0i7q{>h4+LOQWdmxeAUH0Z>Rz$b6lH*;{$(80v? zp-#&`M%Xvc3~cs79qZLiatC6ki=bv_u|hB%T?dG8jHpNRP8k?mM<=lD!qDJgL!b>$ zG%>Z85N-qGqRmeKAek=`)$uq&%f{>5hZ9Jl4PZB7*00l_r)7-w<<)&-@B}&Myk51^ z&}*-PdVX@SsMv+zL{Qz&#L;Gc9`QavdbzPXDCx$CA;Rcee-h3(@#6rpGn)>^CfcZ- zgZLWv9L$-WKY=L(oES``M$$KY9 z0YJAfsF-Mo4uHtm3{*`^qfbC8Ff&IA-W|_J^!$80OR6z0waSq*JLjqdcC^s*xx2v2 z>Z_Kd<(ZC9O@lM$xW<|*SS{&DM{$m0T(nA61IAJ+nh}Ahm86Quo=d4UR~LL_X9nug zu`}YhEQn|XghV|dn7|kvFOk5kmSR9Fp%py@dG*JU4Rl%R?sH06Kpj(X^xQdDY3&>g zwP-6~WP(t$AUK& zR3tNA_&DzAnU!$yt;4Lk=zU^P(khBlQrW-VT>v#Wo{88}$|xpUOFlloeQGVls%nlL zkyEtjgZGqc{rUa<%dbDPcUtv$B)0@(6e*4r8R8n;;%YACneO*H0uYmd8E8Jlpn?Yz z0MW2`T70faOc9L9sbDQ7R=FS($+HT3w=KS?pn_ZI`7CA{WBB^dx`cI!TP^Z@rqBET z$hH0-|LyZyOt*vWfe-2GQw^=dlQ7i+2BBc`f=$9?N)tXC9(*5nB zr8*@uRqt?M2O~8IpP??NMt_YA3{yIl_OKoklb}JX1g3j>Z1HkkZqMVPz1&sHjc*pJ zHOOvn8?HH`w#J<>eA;0@phwlFt$DE#&;oh*GR*-&z^q^X8rLgFmQ^=3ZS+kSk3dw1 zeKXWTp6unu<&cyy&dEvQu)L=~ zV4!FygV1R`#|Ana+ufT^y?lHAA9fPXW?r6Z11buwL+ofl-qWKH=H(OX{ac4rcbVv9 z+w=q`bei_QyEX^}I(rd}UQ%sahCKG7bi>&8I&)^dlAo#J=pEI|BzB?DQOsq$e-K(7 zdX3vKZY=cVGy8{6)ck@U2lAg%IE4QNQd!4mHXk#$9ALBc(M;qVBeKaGpI~n8JG_kHMD290mH}9+ zV*vv~H_$v5xKHdwRB%4BT|sY`u3Kl6Ug2Y6wlkV=o{baFji_o{u;vyH^1mzWQ>Tph zKEl{SoPNwJ&k7S1x9Mn0=}=SawRqI9h#Fu8$O;uO1MCWg2#AfCeO%YuvTPwPfJBUl zC@Q5?^oZWMz-^0{B|7IUnHmri6R+Efj8auGZd*|&cxs+1NeiW^R!N(YKDLG|AV8SALq*9UynsZ#LnrN*m zRoMd=0&yxzVubqJk=89%toQw6y)4^y`}cqUpUgzym*C!ix-82YV{mt_hoafCdFw@_ z@t^6)DKjxKsHl`$OFmLcN6E(yRzQpx0j(6NIfE7FSu!BPx`yE5_3xVx(QgYPMEx-;fe^E_SuK)Ot zf4p61ZRzv`cEI5%-MYK`TOCpqM7aKxyH-7T=wI0poi{+!;j-Z1T`Rl79z9} zJjB=cHoG;w{h{qgT-9DfsPm?3_w4)SH*7Zu=Xb!m$!`Hu1VA7g^Rsamp;wvhEv0P4 zm&OCe1-tRCZU|anaeNHBu^6*?HT<&}Xj`NI1PESxZ4b)<0KIE@qYc}8o0r)uj@lUj z9U;y$qwy6Vju-Y|C(P9E_S$K#dPr@od;=KgO_vkz_sPNtBX)#r2-E|=Q;>~b{?VIp zHO{EtRh{>&=V}kQcyhGeVd|?r5f+@`!jA3-nCZddrh(ndLICvp9|K}xPiDg5YGA9@ z*0Zt($iT6q9TL4p<)<6cK&m#O>>)y$mom^Mf7TqD9{_%TSF1nQox~R=9AUaq0Q20o zeI94E`$=P*9m7q)a?Zjg3HRd7%rExe^_;L1!`ZnK1E}{)zmq3BJAw7()zKrNV_6%X z(fCX+fjzvaA8_eeY49bJ6o;$QS;;>4A2rF`>X$Rn^=rq=XK6W`KzCd|g^_bOw6N(= zYV&P`DhBNmmT0O{3=gqq)nt5#W&k3O$f}ylp$N5BVnkv>6frfUeMhFi{CqsO+Zvo- z)@5C{>-D;<3$u5`rIcDK6RC;;)LL>*L;!@_y0+zRZY94{C5galRN>Qg={=zc%Sx9@Si{KUL0p;YkE{qgN9Bc$U?3kF@Js-!itn3&Va;hGTh?oEo_UE(7 z9ub*$rs!86#Ho1C>3ERa2r=xb?8wFWlygvBg3qy50|iZ~q^va;3)H$#AwnfZW`QaO zvfKT62jqplBXUa!!Amh8cw6K3vaQ>KYD~^h2>_WLLaFtBe`=}3rd7;D!OBt03{a&S zszlF-25Rh`sS?q$dQT34ml&62$ytHn8p7jI_RoyB*bGc-w7EJ|MAa%zg^;g|L^}btNZ)y7p-Q(mxjQWjT}8b zp3b9Itx16pP?60A_v^p?^!?lC*RLN2wuHdm<$R>QIP&1sM_QN5^KmfR8lp10EpT+d z{`zGJONd^Q95t6ceSCcWpZ~A__dkCB;yb=_7%wVX1OFq)$f7#x~P zVMHP#P&RQS9xNa*^p*p58W$KYaM#@c6L}31WgK_xzF@nAQ~-#O9XmBu5x~KhykuDn zqjBbqLl3-?8dOVFi{s!pAaK<@bbj|vn^o@W+HfkU;U`a)XyjjE_cXhX9spoO$?(+= zZ!uZzS2xK_p2vNSmnB|P-W$8t8#MF?j&&2KfGzj{GsS2a7#k>Jq=TQD_ry7;C8e37 z-EXiCCa;lFI=|lm$7tASy~)_g*g$Jog2NzUyHz~kDIgFMjJ^>ar}R{^)>*9I)~-vpoiBw6I$)ApZb(w2WX`OwS0S^(YP~oUIP=*B{ir&RVIwTIh; z=e?@Z{Z~B z=DQ|TJEiyaL{25wSN|l*x~~KMv~&Ut#@z)N2)VUKJ2egfwH90h_zmuCKGxt=y(i^~ z!^?VsmgnEvo0^PG!K8NW949?Ku=SavH($}c6VvWs^_@guB<-#qmMXcL7^bR^tdFb@ zvE9&!NToaR&$B4YHF~sPd*DCwxy*MAd-ya}BklDHVJaz?OEGiWZ2nie` zGZPb$V<)xLnhiu%%|HYRfrP9va*TjrsyXEb2kH_^sA&xu7HrmSU0_?{k+WzG zOIf1NCFc?!k0iAQA4Ij9dUnyv;Yd_-0YMNER1}daEgWA6%(NCb41qoo445f;|LNWD z4g-5a^p23IiV%X<0t8Y`vH&TKWB^14FjPY%$0!+z$uml=&iT0d%NCiUXBR?X@7GIA zMgREydyMPb&u?M~CL-BfELy(({>^G-re)oJ`In!4Sib%Koy@N97Z+mKV$KzcrL+Us zvcz(v<4D8`rPf?D=l%QVaqQRIH3TjRkE8tc$Jg82ugmqq>+3sY`_2h^?rZUBctJZ3G7+bj_1>baQ$-eM77f5w{=+vT3=nbe}0yf ze*5Fk|M5Tn=l}d)zdt??Lkvqq#v;PRG5YA)d#@t<<1+^KA%u2U+WpYmp4@b%vJXgbG8R)GcgqC-nT8v8ukhsy_F`nZ_EVK3?@L^l zxa{fKOkV2{kJ#^!!iZt^WjgyK!6Xyrt4|MogbK$iCu?%%cJ*=CnGgM2p2Fp#JJM4SJEEK3Ou zr!DcawK#>>l*;yZ8aptUSHnq;y)I4nljbZZsOh56`PFd+q=6F~jH8=4fEqD2;M3Sw zJ0%Pz<2ivzBYVf1zu=C6xjNJvt2*Qz5TJL=J;#m+Hflol<3zK3nswOsg78{P8hs>TPNZ9E`FJun)tB7!F8(^H{qf(CrIj zO{Sk?{UlE|7Dd|JsO2;(U{Os1*)SU&)r8oLRFNHtX)WT-xs5}Jj=l4)t(*6rkbLmo z1tMP7&66{;n#(;`0MS-00nCTsy<>9DQOQ+uQPmbkWOhC<0s#>dAyNZgIc3w@Kwd+~ z`*VLh?qK!#cplG$s8Xsa1Q)AQDVj?z>9Id|?@~>*9O<%b%lqnxh}Z=@9(#zMh(h!T z7-C$vjTp@|EUp}N*`f|A7a%NiI_rK^w_0tX_; z>=?vK&Qe8+c@M~dM7309PmrB=Q3?UpB4UtB%|#-+gG%tOiqu+jskLe?l1fEl0}(_t zG7?2aWHoKqxt?hc=viHXL`?cH_Ut{QD0X)P*drl3@2DYaPg>0}`9O{s%@)t>xfWY| z2n#d1pMQR%;3O+BD;OiFB09rJEO;EBhN)r&_$<4u+xpAD{cU~Q9P$0nZ_n?aVO>Sk zYO$wTm6D2rv8Vb>qQx$k7b#|y#7_s=D+ zNXWr^?{-s<(M1o4`N+Ti@}7?T@$mpd_C9bhFs%~E-`=mwb$#CV{5XW+`gTE~qVnaJ zFWV9r2oz#;$8-PueEjqG-~Pw{=ih(->*Mo&05JmPoY6V&{Ia+u26kRbQBcRgM$uF7 z96S*lks$&|WoMo=fI2Yj8Wo2-J17Jj%3;(qeQ{)8ROD%ch8^nPRx@aAAw;ADiqUg< zV!=~kokpX*fB;pkstB9+vVF3Cj#i!cxu||*ALY< z?1BLxu~|*|h+zpXrjn%TUl?Y5AgU1vo9=E;scYJ`Qot zLk2T5a32DWoSgY*Vjvg*8b_Cf^Gg%Q{V;1l==WrYG3-|H|L0G)E4`-sAa~%jivzvc zBz9gow>CgS9NF?_g8+V^BcR)PQAI%F^Wtqw8fz%< z0qBGaoq?OA^W3GAS^9Agrxe=!paDQnu77bJPW(J`T2C>tO;wu^h?*V+Q7)@TM8C3QlA=UN%JTL(^JWj_dXx>asTVsi&Uus%NDoWZN1)p z`uoq@<-&^J|NNFx-riOpA_gP2vOi0$fC@l*NU4Vln#=L{yqi|Z$xOHFg5;m~{n+n> zux^WA7Vms1iBY$o-j;2VoN`XcH7xY;J&B}s%0CTp!)Io^}qdZ z{~BnCaecqMKb~pX%DO~EeLnB7I}@$7u3J1xHU#e-fK@GV;r;XRkKh0J`1}NwWeqU~ zzeFuk_MI8G>*AaP&^0Wp3uwj;0P*A7=bwN6@$dim$3K7n{rmUFu~%s)SF7`GVSl-7 zF$P9okF1u6ENrU}3kr=L3&(CGx8-))9) z-h%=)AtWRMla?Th-C7LZ1x6N|P4&|G2Lx)B^hJsxJNDi!1|m{)WL&*4TkxWNPupfa zZ)s9e05GnRomg}X_hk+GSdC_Oi)c+H1s8&YT=$-|11~0iAdi*_Jb9pLl;(iwq+1Y1 zc9(UC0K?)@PgXHA(B2Z>?1#{G-<=6V@CAG~^!5T;MsHAnVc3SMZ}y}AW(LGwWuS#d z!Co!G%mq5ZJDkCv0k4PY1Hhh$WvT*7Hf|kKcmE7^>R&E1&J1=baErts)5IEXM z?SH|zJ-2TPTF35lRbWWQiREzs2m?SQKplyAHpLT8%RYjTI!)(*m9sW=r%p}}=S9wE zmZ42+4kz-0Ziah67NKXCPXY|g8i1RRq_GLjhQIg+eJ3VHnk$0C!I<=V%ic#s>>;@h zW3kt}XgC&!Jk0APCpoGea}9uj^OQX&fJ6f>hbM)LS`y&cZ;C;B8qK&1|N%Xie3AABkB~!l-nuk zRf@4QktaJjZke9m;mhwDJE_ePoN^0&VA$lkRc4=2dwYSuZ7w#HaMQo738ey7QBli^ zkHa2G@5%0mJ&I*RCFU5yx-FN@hv3*F;j*qF_+?!}@W-B{C=&uAJ5DJvxmx)+j=090 zvbBa&;NEZ1u|q@!C4f>&PNY-~w7CRGU`5kWiCImxb?{UGP;%(lceR7IZCiF$K2kZ9 zfY=eL1EiF*nves?)p=aOt0^LS$02$myno+O6c~v3dW*RfG`wA}m$zGUcb7FtD##2b z$D?R9M^wy8s#*;Vjy*MLRxnYlpAYY|4-2C_zCYJ3F6;7qKA0Jm_WiLefshIL_I_KI z$mCWprlHGAsA!DD;r@cVl|h1eEa&{MYpFSbltwZt+$w;&)@(3x7*wG?fuK;{YFg0 zq^UeUANlzNhkoJwd3=6+hQ*iV{j<8q|uDOPK_l9Bry#Mjf zZ|`5eyc2zX{~)EDa;??K93v>?XF8rodhD9>moI;-rF{SEqdxPWfBf;k{nx((P*~%( zU3_o~h7K9w^ZrBwAih6#Q@CB%BcNicpSN^B7$M9HO6q+mg{wkaba>b=jZ+b zpx15LVpwC0&J$|0eu%(`p1`99REIM3-LT^}Tr=qT8rU67YkBeAQnp63GpI0l7o6G} zVJL;s0H)m&gwuwe|HZ+C^ncL^77Z5`KuxMBb8zI*uqCe22t>_GZqVtaSy*dO4KOq_ z63nFjSIjo--DD*uENJke87I}63wReCg(`KDQ;k1p?;TYZ&M4;SJp~>062O@fgH4|| zXwr%X0}LG2C)X3PI#n~Gcysmus5;q}0e#N6@zDPY({fESG;MLm=?&?01BMN~PLpBN zL^QD2gOH7JXlti!qbC`IXWn+trGrsIp~cy-?~?X8wjPgNFxtY}U~~`&MFG)xQncX9;#8mZJ4UB?ZbZrx6#9?VW$1^;T&afqU=1~OfLh^(GJH3Pe*CKWU_@LT77QXS$-ML`1d#fG{@cY4b89=6|*966i5~J3ZGf- zMUM&v2-yX+>0~lBg!4avIC0umVT#h3hDdkcm zAEo4yaxO)NQ^`bpx!!JXH)7||;#(+!C6&jX3~&np(9)4ARa`=P9x(<-ptUyd+e8s* z+twrPYW3~97LhDkTU1oU43Id45JFVVrRHT_49t=9-YdZMx~y?S$K=AYIMbTafn>+7 zQVk6~GhY`~DOpM`xl~8^<$bN9Dv+xHBOpjMRY{kUQ&p>CDh9_h)mmC}Xq9TF24p6H zSc@T241Bq)(FgDR^?kcuZfjha**hQCC9nfx?;H^mV+_Fu2MAJYNjV?c(0m{OxLi<0 z?)L|I4i}H$^z#Xh(tbC@QiOi@ArpWSZXPnmg}-V6976#Auj83+1^$Fe0=OBmG|q)jDP;|S8)FF<+|t0tVJ_I zab9DD=z+YEY6$ed{p+s|WQ}2sB)KMiJRkS#bt5oIvP)i8ce$=ws}wZ?w*>SiwX7Gv z@4KRahjP#Caq(W&*2NP-NyWPeVxV@ptk*S~3J{eefBXFW_|2~8D1i5&xa0&hm3)Q&FMsf|y? zJmVNs3AD-}!_TwfHV@D6RI)H26An7h6p@<_8Ab@2fg3*hBq|2<82=nYLI5jrsQK1_ z^;NG9MSG@$yej)KxS{lMD$Aj@`_v~-@WYn?hIb5LprRQocozVzN(MLwL#Jm8FdEmq zX!uu(5JnJb>Rp>;0*)nVmD+otqYo8cU>pqlgwSpw45%~~a4>T)vknJmIM8t))j6*I zZ<|Z=Vm4m>+Qma(2VR!a< zJmUZ`N&rGA~~m0i=}dVUzQjFnH-r25rz=hvkRUKi8(GY`ViKI z9qsABd{{Lxb&d$AGng&2RTT+L0TdM#l+4sehejeo&VylKCO|V)1w=yTZQUG^_n7N) zsrmkK-ycsjbKV0U-ivNhs}gctm&@B52uAPF@lo>rxz~~nD0-hD?Fk&`x7&4pY6uQV z^RsHnTB{Z%a*hS8dgWSuEu~5=xt3b07?2Cj0~>jWmt|SDxPU%wxB6bLbfZM(j0fdfY`Dwi8mz$*Fs zUmx$ccT)WN?N1Oz!pHZ|;dY7sDCM#-g`5i<&GYkYgZ1>|}Z z?>!?tkI&zK|L1@F_ScTC%}`^l%GJ0v-;K<)iCxq61o2`0TPt)^ zLh5m*6BWMr;zLiqLXhFvc1(Z&S*$MUwG$9pymGMqlr-)Gx8A zvBEgIo%UsC9pynrxbY>b^NKJB*VXsn2YSsw5xLiU#WO&Lt>_!#h%f2J_} z#`j;f;Wgy=GXBQZj)^p6K~ro7hxdX%CQRxQ2d8Bmtj83rZv4j?YgtnWG$P(JU#v%v z30qU$Go(GpH0+&s&D>` zD>R2beI_s%Ug+t3bkQ69*vqmEp*)e&)R zX4~WQI7}UiLvrcNerr3{5vz_#b-@@B0yG0`kr`7P0)whnR8zH-tte)(y~4BFp&;xL zgAXgm=$!Y?$7MmlgMBF(sFB;CD36Z^m=RK_*@qBS1AFg15usx+(`w8P%)sN~9b#*E zN}wtww|Oz6QR6bP9)biWCQ_GeVdues9f6q|G9sAfY=G?7Xk?sS&dwCQ_s;uVGCN;# zF10F~3yyrOS()hZ*i%YYtJLh*H7<*p9Vs(lypUK{!DS(>Sw+|pA&5Xg#6ndG*#Xo& zYpsZi&Up?*!~nQ$muZjd+60N2nHDr=29=7M z;~K>5*mK&G=NMcog@Md0YFo5YrPRtyE5}mBOc&?vYQUCLeSRLb7AD)`Qi`aEA$spx zKeEdj*KHwGLS%Hz+!7hdc`yK?W*Ec5wF0(GkD@{N>9}zusDZ$G_;`dE|Lxy?S(nZG7?+5~QjMM8uCaoA{&5Gv=eN&~kGl_4t7<8x;6N%@ zDF;ZtY@QjG6>m5G^8WMN8jhMh;~JewL8zv(AJ1hCfR^?mspg_ZN_5^Yu~tDq7yKW; z|MU5=TQM|Pwgmx1Riv(nYw+*y+ZH^6ZCe1sk8fZ9{Ns=R`0XFR{r1N*mv+z8FEPfr zT`$YBF}Ygg^XuoIAAj!0lMvpv@a4At{N;TKfjuLFi8d9)jA)H@QvyL0?EUn*sy}th zRQDqs2d6CsG*DCMZNJ-hd*U_XNY9>*^57?IRF68XQA@WuwM(~fuRrN_tVcN7xX$1J zAk0Rfqo+`_I5^tJcSY8LQ-48c61025uUc=q$HTx6%|3!&J(;T13>>3hay@h;F&i{I zG2YXIF>U1dVjpe%6y}IvejmD#93d$k`dZ=41!_~88AG$J6OL@AiE3ZBcA`u;E$IYH z!)SGjHv!G)QEBbASTj31GL?Rq>{zH~5r%D^2DIZSKr+&`f71ds zGd{X>bXl;yDZNIQ_j zfor(03<(5a*lyMWywEr>u0Hqhn(He&@B|L%HDz}o+t*|T><{`+91r$a zbvQppK1TrOO25e5N!*PX|JaRF+WOeKJYm0x+3%$exI9oXqP3O-0AM3 z?CXU9BQu~w7Mr7j0cj=@Isw<%faZJAyxh|{;gLJ?qL)r|JI1R$Esf^6?`UbPf5&9K z^;>(;;DP&h`kr<&e>}Z?xL5C+J7osc9uC(1i}TEPMLxvQI#xEIQSG|H1w`xq3aWyL ziCGoM#j@Bl={?&nkbyIR04qRIO*wx)J}XMBf!b9^V98PrsYzq>4!KIIAd+e=RjY`P z22c$|?Ev!5Eo%rq#&va$q>6WhXep&Bp*1N|AAX2rK(*SkM(;f$v!`0Bsv-drN(KUl z4n@fk0Fe?oR%P&BO|)NgouU8`Y{Ps~(U6-I1?%$8g z8t)(XC59NBspM20VX0C@!Jy`f2;d=!hNHqB+$NUAt#WNVYJjF=08B zJt#^Qy{+*QqqP1FS_#m5tY!%8Lm=Q7f{(#-STC1lSwf6HtVrIJeDKV!l!6HRGZW+W z%Y_I_6;K1L&N;`?ObM{$oR3|re17iav6S-o{%|goa(w&#b+0)kLDBWH{a^muf4{x| z?Rt3+i~sud>({^j{O3P@|MRy$vdWQ;kB?992mwni+Y%6HasL1KzyI$!i}Ok%dOuV& z1h177n3`SIZ3+C>KYu3_ty!vEF5z+CANS8vkK=hS$Keqva%5hZH}BSe`Q_u|?hvlG zC6}6x!vz=PvOjiHAn$^AKBAb3mgmR5KjnIhQj?G2?WecL^SSwzJf{87s%!+TK6)}T zsbTdsm&fz*=bwN4@%vx<^8t+OC0uUa1@HahU3BcL!pFz`+t;t(Kfe{J9{G9=fB)(I zUw--J{kA#p(5#AqsY4?q1Z5xwK#``|6enP?A8cedxpwNdfox7cv{PyJ>UY}t+d*#G zP~bhXh1m0(;U!pkasj7VvBAS3j*N~5H#&vZ8K|Lzhrj)rPtq$QO*1*INK=@E6HbAh z;HAF?hvn}7oA|-Hrmvb2gX56Pp#anw9)M=5Po`>o^%bw#`ICvDs7J9zDmDN>>pTlh z!NN41&0N_qPow?705?5XO@~SP5hRGXoj4nAxEL4vA)FvCMFIcr+# zF}eY9V4~AjDna2j6|!SG1XVy7xmt~Hgt4)6&jzUIcD4iJ9=V#5*p^}fJ%q+aDU4)p zqK+?LDce_QGDwP31Yk%A_Ff`gS|5OL47oi=%|~2R(^7Xys`~bPUy>2-0Bo5H zEbdG-^JFF&;R_an*<+6x3gCe-e|r^&rdVH=bkj2De1@wHJtTYA2*31vUh z1rP~%d<+9LPqcaxeJ4&HYwUDN;R38D$Zt=_QwGjKhKnx%01drCRj+ELf;eV_Y>*8P z^<6=L9hsMPS+@{kh%BP^*xjLR6kXP|-PRC7N-4(Zeyg>%T!*;C5aP0?5Sa*Kh_A2r z?RHCPK?hR}fl8@IJ*27>L9iGoBz8Bg2GCB1P$aCinmGp!N?H|=Vhrxc9F~<0tmf)W zHDXxU$;1URW%X*VI%=4^JLu=fXRQhVQtEQMZ|glG?vGC^bxDy{DmgPng9^@oY~WH$ zu9d-Sb*&zdw{;ECf(S7m$5Hp=$l26I9e*GoY}-a5B2Y>T0EQUezP*6kU*13Ox8TC1Cy%94U(G*fkNfGh-(T1%}Wf{3m_Op$5%7Ln1- ziRjo%2oVrP)YQNrM0$;swn!mGUYBKw%aWEYrWn(b)-_N>M50K69em>#Ghc54;BgJi z6pB7S4^eRiOc-Je3nAzaDa>dDrRII#!@}SH{^O5-{L|n6{{5&@O1Zz@Uw-}7Fz(O& z^Zk3Sm#}A!%g0^8}y0*k=A1`WQSS+}xyZOwiCfps+ zVY;Z%1vr|6{qXMowGR&Bj9?CpH!|o46HiBXw#;Sm*DkXJ(_XykE0rc_e3D*cwsOR;LwAl*s+G6 z7Gc1GPL(V=X1)wS_J5k@3s-N1GNDQo9XL7q?s8+1Z&w!fJA*7{X$a={MV_= zhTg^~&j1si0kR1)2Y@;);*3_G;m&jN(9ajn{taV)lFBe?tKWgzSqI|+tOpTzf|+hs zeU8V8LWZ=R!gk`CQ-A>Mg5nikMyibmYMbMKPwy=pNn@vkPM|ue@U{S{Zy#wh zIjd~9q3CXBU%@nRJhUA^2=iR~sh5Z3UR7iFcL1AH0VBWg6ks@;ex^$FmK(GK4aV77 z;AWtv<{$#4ng%Y5FxmJ;S6qRCT#MI6~O-n$B{do{$6_u(Y z(hh_$wGo3_btR_S+?c25XKNvjKQ5k2q7gR z;w5TzLdRPI>mGc(vALDQ2^9dmVu%FpF-8ccsI`C=uU7Z{`Q!b^$H&6~48VoK?DO&4 z-+ue6xM~&$|Ly&^zkmPX;GZ9lW3O6`7?+Sl{g);E^bg+*VLxPjdl9g||Mop5t~pB; zM!IeB`FO^_X^XvowzYDO}=pK;cLlbJ#zKs-qU!sI}Chqwm8osAN=C5dU!G~ z-KKkM`Z<`hXAg$%*>q7pCu#G zZM)Tc02tlL)_Uwa6VnDCA0G%|SyImX^C521yrd$PiBd`&7|BZ!sRH23wy3ICK?YLd zM8sf@3}9|4A|dg8Z6d-%2B5BH$T6gVOze*CP_w!#qA&+U0uTdWj4`3AB7hVHbwzh~ z0^q<11g>cb;40OasO+Vd%7KyL^Zi+m3}*M&?O*@psw%%Af1?|w1hzy0;&{r!jJeSdxc zc%*RKc-z)*Z?AiKEW{x&GL}PRFS$IwfB#KXz$p^PEi6k~m-Tjk34xd#5mV%lqKE@n zi16|L9X?~kkSVYCE!`r50osor2NGhWV?SzEQ{PLWz#)K^x}}$mV$jMKxtAhls%nT7 z6B{}rCJqaFLLe7*rLnQUsf~Wd_#phd?jLkS#~y==x#Wxw2elJGt$Jz;lMbXXG0-$u z?f|3A#5E{zQLx=U&1|xPT249vCPjs_H7yII8|JGS7;1Dly-lut=(yRB^9N@`PNuIr z@8>m+Et^HnhgQGD{D!p~rn(QpghtrHtPOrmc4l7CP2a==Jt8!MmeiTe>zJV1sF^W> zX8pGTGltO_*leKKJ{f1<&u*TUv5Ugso(2#{A6;w@X~gO{m4%BJ zCwQFZY%*M3Oc44m)LA`laDG#O9N=Pb`%sQxJDvm62d~MdnG-^1rp_gBo$^7B!haKE zy!?7HWOKwXUrb-+GP-k?p)IQ+yr<>%FW45wj*89(0`w5-RhoVDdQQWngmEmCGok7d zP@7F2eLYcXUk8AWU)JNCDmW7f5C@E%K%;wdZ7xojGAHe9$y~N`phr_rV)Q&RtApg> z05&v?;|YG*eR)BsII!Gs2rp4fXii5zO%HvBWxcIdIH%1A{2QE`xnnEaIDp#)ArI$o z+}tQ`>wRyzanzeS96`7!f{CA}+_|4MYjA+|DgIL~r?Uc$L+UL;Yvh|ZHG~eJW=FB3 zS_5#!v6vM{XOnzD;JB=%6pp+uD

    `3=9DGm)re*V`KnOdpY*!+8?ow)-~(CvzY$Kvy>h69^njb@Z@=)q^eHUUI3W6g8>2nmLOiqeIO{4a_F; z_dop|nU^IY5}|MFa(h`*if*{9(ani)yKgDR<0yu}Na(z6(b1(kVMuSQyBtS#PzNh} zEh^yNvT&j(7;p$N#kd6aSj|i65Z;q}{V-tFZcG2r*_-ye^sdrgUBU~pJ(+kM-XB?V>z1T~SuDZISgKmPb$ zrJCvUbHA^*wVpBOIQN!kNuDT`mg`- zpa08$`QvZTVj0|w9Kl@_0fERJ(FvFd1N(gqKfi4M@XO1uKfkWa2Iy6)J3Su)j z$^nPAd2)b&k@14V&F-4!beu1S_%wFi;>@DHE!)2Nt*3_b5zy(LZF}b#8NK3zq2q>% zcN|aX&zeXF!A8HXiOgpXzW2D;pt~keaR6*>MgXT_z;J}BaE{xIia5AiWAaWiau9wt zm3vq11HYaC%s?~0t%4q1#nVU)Gv28GhAVZe`*1e%>u<9{J1k$4Gx>{9nTemgF za9GA$TGn)bePQN$*-!QXK(| z)J=-00Fr#}3dn(>s_f;+STm$#Q~StBt?Eu~sjGY5AJ zAw&iyLN|0qwG=s+GD={hj;ES?X?9wCc}2ZOl~s7fdzm>3EU1dIbnQVh3m1+Q}mT^a`>*TYND(L-kK@9wu=G z&?%eIoT2;M(Sf~5&}`PkA265#=wo{^>Uf&PX{yGk4*)XsHQ^v3dY_tujpwd~6A4{g zH%-9o9Qd#ZglI4=z-3@B(0p#_gJCN>uQ|InzEX397CP@qQ=@K4hw^~)T(c4H>YT^q zCp%Uf2;*ev0AOqyn9uayvChi$;k8*);>3gl!%jx5e|65JUmi7^gVZ4{;`PLiw#U!s zaLk7e`JJ@p^ng0g+9Yf4*ZsoVU5K1X!H!q@B#iweUT|F8xveow9xe|aD-%x&=DgAPNA6)S32XjHSp(VsiYS)$F>TAk}WWZRxGdBF?${F<@UySTHytLWf31+ao zO)t)X=D3*X44K2=$+B{&*x)V69S4=Q#LUDMJnxz;OvN#f3P%{q$|S-Q0{KJD`-ZYzu&kh`FaF zny8x7!U=p?m%6RcBjH+eJ@&FL3lacWJ){vd$6mNxBF{IYuvUN*ANrOWC|R) z*)?}ZhIXC^5o08kT6ZzEb!8E)r3Qjye-eTmrPk~O<;X`aCD&SO%~E!08^`M*MvBa) zf*YZNh;y`kFMF;o1`Zs(ot;UHOANK9C=CMr?-2&B~u6?Y%g2nux<&Ntf(R!$04<|4vNj3I@9fr!AFF%ql+SH=iVVA%KZ z4f8w>X(sTC_8t0an9GazedU1Q3kI0UDpQeVWNv zPywdm?UX0rrN5n08l?zkwFDSAU=V>9XxvT+#ku2k*uksm8QR>zfr#3(&MN!VzwBsk zGB*QPHN)rUpMWDs>a*WH!Cr}ay?r3)Sk^DC>)~B&xo^EUUvY8AOlKtRY93D`G(Nl} zm*Mufgk7+h4=jY}K5A*5dFMUt&b@8txD ztbs;h@}jei!)D{K%>9mG*8B47hpGXwS*AW4b0SCGL$M>tPzo5(aexNB3G@qZTaQrZ-4&NU;gpe zf4ArSAO7S2{@1^L&!qr!&N))34*8Kwc`sQ3P=2e7w64oj;M;an$ti~W%ch8l0>{7{ z+HPC|G<2y3uCn zLcB3;ff7I>kLc7k!QqHz;DBYrVhp7?BN#)=oVw}^M-2Vf6r*=tKGZ4>g*dd^B%e+X z8UY92@J0Wh9M*6_;RJ1`@;~iDw;@BZp@YxE?T4Nm=HV722mi}@HCY$uqo z`7Qti(2+0GwfV(JorvNLV2-^4qc7cN!F3iU$i{j66SfL`gQ7x!R)lua7XuSaI_=EO z>EBK}dqu!LnSyhW2GkhC*=%>G&U)y4ni}wy|2W?>47Gbd>vKGFauyA*ooLOW*WNcc zJ3p$>oj=o(w!w5aL^|hr_zBRe6;4WJ@-HWw)#vCMUvBq%gH;YK_UiyXjt^-<(sqip zsdu=5x-ohu;{(upJ+{u^7>AS1Yu8+49nUy(b8VXFA;LuVe(3=~51|fOKjjXGA2EsZ zQ|8Y_+@&316gWBY+XTe(nU{d|`ND?%XHpxE+9~t#r`DJ|A}^9M3M|5Kv(6&DeO2|Z z=AKRL*$_yV_l|5ZhmjP5v-=GNVHfM(hbuO`P=GB!PJYRO^Ff6JOz+LlL#P~Pp5i&d zZPGf^)`>E3oZf(HuV&XLeay~Wy%$~`CVHD*?-cnFgCEj0wI_~Zfqi!SOtte}h)~rX zK+UUou6`8FV$ZD4>__!nT^$_}h&aZugt(-*q?lriaak63Af{zmLX2sN-a>1lAq02h z0EBL4M7XZY?e!KyT$bf_zqz_pMK%^g_YhfX4UzJmQClG;PQ3W4fwAOl5Nau9FDZr) z85~L}fQYC|O6o2q?yh3QBw3IU6u@D-Z{^4wP?XAnA_RsQ2o|)k6w2aSat22xUP25h z{rK&%e|};JTiBkTkLSL>y}p$_XLvrJhgAz(cz=I?eBkYVJC3}q3vMoEYD$FQjO4U1 z(t_^HkPmS4aHEnbF$01c)T-1@fOTJQ+soU_lEQXhZ?|o^t%&666eBq!!tM6r;6;km z8jt}1%+1`?5jcj}G!CLttbiUu1+A)ztlPSRyQ&bQo908@kpfAsR^6*(sHqso6c;55 zgy;w+j*O-b=(T{AN(7QCA`pUTcC83XWRLeJxSL5Sg%Hyc(iYY9s7GK1hbrn0rY-;o z>>%I>jMdCkk=z;F5ZpKbI!V<)_J3#jBqXj2umed}F6Oz-g2 zy*@OXbxqnzd7a7~U~1E;4b748cOLwikTH3L8L*sehn@cH)QMkE;^pA49=quqCQc`N z=zPcS2{*+(4*zz*dBD}s%#-mZcdM8V~F@d>RPp*BUV8QFeD?HMJrncUVI(_RL>Yu4dPC#`zc)A|ODt z4sM_i*ac=`V~;x|^C6i76kT&DTL+OEsPENnuVqB~G8!;-dLx%O=DEyH;U4kYFN+Dr z4c8EH11Kjfo;@5-!t>Jk3K|jYtM-_KKLxK-%Kl|J_@L7Vgr1AJy8-TV0$L7dZ)7`~ zY0U1K12|IwPt@EcX^7V)OL3?R@G))YICsW>fU36GVeTdF?=_p^f-O^5+kTnRH6NnW z+i+@_Aq0IGFF*~?$@t>>jSsIb#9Sop<0d-pprT?_M2mWX-n@bSKf zg^rKx)l&*fj1U4K9=iY-xdjd(MYC$k1T`>}Ohg!kOUZ`t6AOAKx0T`CI|!sh9)LiT1Av4GPBomnABQ}N=cE@lJ2_Ztky#- zlp=@7gct%lf;${l_gbI36j6hoCq)4OC@{y!jKod^zA)ZHxRYj$nj!TX2$Tvr;+?l-e1a&M)=keZpQQr_(yW)2P?@ zP(SeraIg*I5@?wE%P7qE03bM;^)x%P3vD(=4o3v=g4E`y;xugIb#tEP-aGienBWuO zba-)IGWeiQ%_7i9GHU}dHnlhcp6P*|vue{dpaud=IO44U#XuaR!HtbW$Lk=Cd6`Vz zmqG9KIlL2IC(jInOlii=z(=u#*4hsK{A8dS&0!}PoqQHtrf~c*s2Z3&r;h}-^S(29 zYSUThDDE6ApS_w(phI@L&Q|E_0Xxfzh`-w`(Q8LcY&Y6I0NGcen0VOFZcQ+ahoCeX|5y=q-@kVA~?ZCSSM6w zVu*1KU7`$fEfA0;U_~a5DUlNK0t8mI9TUw22B1|;t%#TdGkeCsaamU8mh!`{&H(Cq zJc^1KsG8+m0RagD6A_A-sG>28I%9}oNlQC0fMXfrJ_SbkAp|Nhs{_fIo7RS&S}0^%>P_w}}h z6yg>P%*_JRvTh*|k!t}l56No*VvQ-9TEK+C)Q!|)iU6Qyfder)xQmjbRttnniYX)j zP!;5;RX|kLv??;h6eAK5sF*r(vp;~$2uMgk?8Fq7#7-imRs-bdCSocC=u#Ar92AMb zI1(YEilvo-10cJTsu5z~pk6oyK+exyM2{bP&Do@?>9Ozb6;R7jwMZ$ost}N()gsFh zii9m~f#|SeNRFPpB4G&8+=ze^NHz}Wh_2{j;Ev=ea$p4Wzy0~IQq@Z4`N$Ukk%Yve6>L}xb|#XGttXi*>zq8Tl#IE zd(Vm@q(uju zz<@!FIgDp_6W!uPb2}Cc4A_E|gATY_<`FU(DaiiCzik-=Fm2_~#(Hi+o1H-iqh5x1 z>t)CY)SRXXAo*Op?am>bfoVw3>7_g}|X>7Kb zpsd^eHZ?H0*y-b(4Cu%|I9cA)2?_mKW5+;8ozwGZl66T|8<)eZ<_Uh!yUxD!5W;Lne~JUcW2 zXm420-o$fyXAkBOAfA)y-~eWz;OedphTwn(D1b$w80^)uf&v*55Vjz-f)Qhk>E-2h zy)EESY7wb`XbQ(Oo4}G5SZGZtEMyJ>s8Y>TDx?^eWu;cRf?P}d5DmYQmg#^T>x-d)Azss_W5`aQmu7;U4ya~d3}Ao z-ES}VdzBI)m7GnDZa0pRh*Sh2#N<%Df)bJehR`WcUY6U- z8dDIhpPwHw2I9y^K|};X=ClML5HTVoB8*H#Zf0uG;{^RE=nw*^NfnNPn57hFV`f{( z+{h7~;w^Go)FJ{{ab(1e*X>3aIFOsyqo`Cs3d}iYt!CAvn2M@YMoKZ&s`(H-OtnS^ z5jgS=j-rN(r^rA3{B!!_f0d8j%*Bj|G&q?7vx*`^Xu0|TwFrJ94P1A8&c zNB!8#b1%iLoqYz5NXUfDEqxpUVM4ye@RGvY5?&+S88-5O#^CNoI8|bg4A4Me9k~EN ztEbkEyBe`M9|0r+9Q6mf8Xafz^c0=0>A$GUp*$LB@S&~-Oc~1Si|(C}Z^U#QCWmn) zflgF0vcWFK(T6IX51kORY0-(WaAKdKG%wMlkvTIAMEi1LS2H^!Ar6CQ7)tdt2(3*% z2Y&#|+1l%!{b_^G8>hlwcpn=LJ{7zBwAyp-U=$}9*mi8rIB6FgR;=NIY4aNZ8sX2e ztEMlT8S}irXgoPr9Ok-D^q2^r@V@U{hADa2TMv`z>4S5D$N-}z*X+$@4AH*glRjeL zI6{qZIi|uoa%(L_7y;j5*%p#_am>`vXkQ?u1Aukd-P%il?OR+QwRYGohYW1h@cixDe79D z*}WLp{&^r++4J!%&1^m%4^tEId>mD3&Z4Ho^y9OtDq#r3x7TgiZl3k0Uw*pZ?kUA0 zhns(Xf0)&8-`-x{USnccLq!NA*+q;AoSD>kMNxrfq6l4m*#Z-pIRY))61Nlr)lz&dlZ)9B6P9w50@^}EQ*InK2ks>zV>AxcauKFF$D=&;()13pc zos?0d@kY|kG%~{;;K-0W4}k731Ozmg8x+3i{psj-71=qQuR*Vw^>xvty%jAx5T>3! z%Ls5c9JA~|k8=3vLfbRp?Kt_l0d_E6)3NWSyAx4{PGd+AdX#tY76{O8AEo=cUF#Lj zChEmunNFPDYLxPram04?wp*t+JF_}XVXv;>XnoSr!11)j=gc^GuQ)UChD{R&J^|vC z&&y$;F!9O&2II00mi>}~M`j9$vzUW@0c{iMw%1wDyKKy*b$Oq+jJd!m)8`p+=T4VN zW8)C}IV~Mu4ZzS>gr4Zo8(;3uVh_7QxBP`?0+FF#!rhCE`I&nq76zSV`+zTk@4 z$ICdKc;icR0Nl|>8I1w8o7fGs3InX4)AzgPp7{NXb2V_~K*Sx6bdRJBSzmqA7itP` zF7*qBoEolQKN|>eX=I<~&hk!J2P1AedNSZz`|9HU0-~AWdH0Z;sp?=*W$t6!PiG%5 zI{19Je;u>gXgnc&SExQvqYw1mj#>h60}$0LdKAr7bMd`sR<8<0tU4MgFk##Lh=c^H zBBp_{hbi1zd9AcA%aUSBjT9h85#^9x-(poUQ$*OyZe&bc%P#FW z2jY}sU|g0^YY8cUTgmyBmR9j3Z3_asIg+|SR$(TJAq3*Ux7)3yVz^oMtRQBVkE3ci zK90a723pP3-rql-kEgliqm*16y{a~j7|e-6id&4ltn2;$qQ!3Cwq;$3@W>h5(J>#7 z+kH#x;%YUUt08JF70d#oB6$G{tkql*90G?JnL{8(S2F+(fe;C(9#y?K@J?a5ZMXY< zWwZiJ@#XdAi0-%~=D_YA61$m-0Z@n$+^hlt7-)zA98+TRXtmZxX;X0UV6Nhga|J~r zW==6}Y1z;il|l?ilyffSP!&#Os_PnS&04dRVo=?*7AgC~OaRH;M7(CpxmGa~chJU% z80xabpMUyk`SYKj#{o`N)k`5JW)93DCXov0oLJ2Xkpt(dCD+IE`QQEb|M*Y;^zZ+_ z|DXT&<70R6Qst;cXXYHVy$@Yq5(0V;(SQRon1YC_yBQF2vmRz9s%9ot%njUI9QMdZ zZF1BA{O4t3tCp1yBk8q{gg9H7Ng&tMDyc;}zmRd&X!C;I3O_L1T?aP%d4 z$tQ~SFOBbi4x2S*6Z@Wn$x+OEd@_I71)>4UPH+&OJd=O1q?epIKxoX|n8(&N?~v7} z9lmVi?372h&kc0?Y;y;N=T6}UUOJ&Q9zBP=;e;{yr`s@r0ygpYNq3yY@^vgGkU0~d zCWO688$chQi2?d3;RWIhxoe=jKjhM?`RbGH#3^eIh!?>dT+u+M=SIvEQJ{Z0%>Ik5 zA6Jdh>W=GNrKb?!RS4%2pJH>7wt*?nCE>l{GXeFSDeTy>#r4sB6y82{%H_dl7C#J` z?JH-bE_`9C5yk-qmnU2^;Rr8F1@VN;Xuu5Wh7OGFpbjEROWIy;xBE-vq+&T&Gp$wK+*I-*KmW39+ZtjF6a!O8tYT@2 z)wJx{1i+!zB8*Zk1x6wQG6Yp5AR|L!=%wilD~M}|!KxF5xI%tDfq<7lZAXzZAxbS2 zn29;?vL>RKmWW7FtAQNPd_14xqNP4RcPD}BF;K3@`}fbs@Fl7LUo`5_AwPID(l$l}*fSA_EAt9#i?Imto46)Q)OC^H+*g+l4 zj%q}%=+F00DY@1nH6vQi&(DufkwZYW7?_)Dh@>hWEsKLXCpD))^mf19xBG9u?ds}? zxs+U`h}@PHk=jl^LjVOzETa1(KaWE!|4;w@|GK{)fBe&5a;>FkE=A1Tp_Styu*N(Q zwRi?P7_|)n)Hwzsb1)>L*vM~p09C0~%2C~%m_U^X-Aufip;U>?OsJw1*ucR|MZJ}b zQZq0njw=E`RQ|eDE;&KGeGd!g&_wuu7LONGDjp&lu?S2dCDZU;?n^ z2A+C)T7qGnroL^4!~q5(<1lopt(V-tu>rxGaD+qqlqSm z-AEz!5$I4GM!*jzj%rOGA=F_Guc&X3VeMnWvxol0zR%0&;NvMC00Z>4XW7M;z}2?c z06N~$Y_~EkBcQ#3oE)4H9l`rH+irhtfq_7?H*6n-W17X9c zo_2l8UNfk}+ian?gp%` zM%+gkkqNf?vo1?>81gP9_+=^2S}d0c_!!eK!gBtM=^xBtzuATpHK$~XhYwhDLXn{| zV6=1MJa-!zwm20JT>p=K5luXP0^2XRaB7_9o^+Subce=d_W*_arEvhm3mLHea-+{{ z#)$c4yl_YnoO_Aq3w`uztX$}WImiASqBB=xV&V}{ZD|3&;t=1Dc_^ zGcz$JB3J;xOaQ=%Fa~yTM&cMS1g6Lwwsipqk1*jB6ErodYWc_|i;1*^p=Dh{Bs1eB zHXbg-0D!7uie_GNQM2Rm6fbe1<9RpXbW0xX`w5%~LDXk$!G8G_>M6M19pne<$5n@b$V5WgML~;P+ zDWIE$$Wle} zZdODyfFOF#M=8b4U0qe{QB;j10H6U#b*&~UcKDJOSpD_o<=_1J^Pm3ucg;>fMOCUC z_2UP92I5E|B#a>h4v%B6)$?Am9NV_M-hci7{ipx%@$sxxORdrmeCtY+2ndJ}IrPKf z3;+#V-ewC_nYk8YW-vsfw(n2HOccj9z;?QhsRE)K5FrvGk*U^PwKMJ>n2S`YH8KM! zAVF1dD7oZfrCP1Nq#|JIdgKgXMe1`e&s_H^wVkblM2t4-`4Isl1i%Qe1$tZKFZc8< zh1ZA+W5g@>*5ae>&sG(DxM)4CW2$K%fDH#`fXVG6g3XxL#CO+px35ouk)Y&L=MMda z^X{Rm{GxlWda`{#jRCamb01m%*woQP3^-yPc)51~zi!iT!X3cQKeneg)@K%%=+I|a z_Spu6js_at-q1l;u-*OrYNveU-i;?X_E>Wp|Hjm{FJ_%L?`jv#I`(ky(8w<=!WYGU z#+v**?S7iY%Z!{&>9?PTdTgE>^L&EoGbnW?9N}z2&!U=-$8kdgO!KWh3G94(M=dzs zHm0swKe{x|321hAHmkSHC)(_W zta&n&v-z#dEtZN1a5>4nLz;8r$QaUriG2Dbmu~idI%FD!vN5y)A5uO?cQC~5KiFZ# z@DMt@xzO=*GA}r!JS_go!70S=3b$|Qh$$C$_NK8p=XBC{%3INv94^BUF3iM3V!1>3Rf#En} z59`jo^9b0)&E0xEB;qk)eF)k}U)Z?W$5YUToBD+$y`*VOscOrb+}s?@P}S7DsvkwO z`d)3XcqkMHC00};AVQ9DyRF;p{_{^i-|s6DQe+}DrPo{9^Zwj37_gC6SyNfJHO9n5 zPAFxdYr|5-(GmqSiy@?xZZBI1DTLtQs>X~QupFvYI3iR8b3|vN6c<3yO8I#>fxFqK z6LUzx&0>n>aAYLnT57Yz`I)O0SABecP=LVU@#Du+pCxC}J#75K-%?D3D5VlJGL~GV zNVri|4IxTV08A-qk+`OiV%_tytsKa*{`m1DOL=>F{q@_=;_`g#S|zQKks>2Ps6`pD zR0W`AS&*3_Ah~L-MXOXs-1l5cK5{Yj6aq4@+d_!P^I7)HalOCz^Y1w=8!IUFM#Dq>5Lvn+#CdS~R$KwGpxM|KA z5t;GWb4*-vJsyv|KYv%qt>##qzWgDg|yQe446PsQ~r@@sEG{+290{6{;6T>#7r7d7TW#v7=Ei}r0vHgWi+~eGiYZuesfs}fK}9GA zWKwOlDNN0lh)g6!0~(T>s%ovK0b>^VcpNdrxTuIe_k3h|Ep$mQZq4qJL+JGYS|+s{tDVt|X0 z=z8)D2ja}X8k!DZnxGTjAoTM$hgE1f&$G<^RJlz9o>;=&fd&j3h_s`OVOxea?fT58 zV(W^1p5=IP;75-xe9Yj$5TKtuGnBIfdLz#ez=r99-(lOq_D!pBW(u~irj2XEW9H4v zZaZn9*La&Yst*7T$kG5F!pO8H7?`0$iaCree{mjo(3w)CY2oo=BPPA#aKT4#8JS@t zyr*roBVs4i=~fxNr|X@wK4ASoQ8QZA2C46NCLjlEowZRZurbinA`^AEe|b9M`2J$q z=0J^Psfivtlxl1ov?K!}>>~z)-|dosCt@2=sekArR61g&V+A0=T?>>}w zYI`^r7xrafwi%!t*R(4}A>R|#cUNqDI>L^4ip4nk}PiE&3nV7_Y- zP@S&CfnUz~z{~uc&qY8Owb5|2Z$j(=i8?ZCYp2i5RB3(A#%3lQ@O_Bdn2|Y7JzDNV z2YO!(b36AHnawz*Lr-KG-JG)3;on#>?U6J5e@Wjw{UyBM)}fTAsLiAlIL~grdTHls zgfW^!+y^Du!UV3+NMHnTRCQ6WV(r*daS+D}*tVcPi)A$c;8PYdtf4_Wr@pe37q1(tjn5~NJ!vlpoBmK&M39!T$!4xEUQK*h8~On z1(%|x7MQ>gnJ5Ag29F|@3x52lX$|fsRb0$eBxj`P0sy*hskwHP2*EMPvV{G;#1sr{ z|M>KXH@t1j_S4TlFU#s;i2B_3nyZ0T5n_g9j>cx*3KUZ+&z#nn)}*Bn)3QZ${P=vR zN#L-gB}@MP+q+7MDI^TU?h&iVh>JjqONt?IsJT`-az0E1$)y&NtXdh2BUPZ?nszIlv2$p5WBjAM-J#g z)ucKzgJOsb3Sz>ZGZ>lsj7W%)A#q4dt-qq`PHxuLS`+h@2We_51_gjpYALD$z+MIRy*!q@V9ZDTeCAxOswrp_zAgN=gqOq%dmy8hL)Ds!8w83^XI^uug)>vhsRepU zvtjtA=x{1fn8paZ4euj@-RxbMkkjbRplUDkVIu<7apRR`>_&RFYJi4o&w6#uspLoc z|M@<@DEHs#b;QQU;Iv8SJvah_-iP9xT!)sYbpa@IBz?T11>7$C2gbb2uI7m!aFfY* zDNx-rn2Qb2@ucC#Gfzl1o6z3=JH88MTQ0%B3%K(++;|Ck!+Fiu;Qcw8b6pLLFOw)!h5xa1Ka>aY-qJNPz*|)M~9-Ybm*kEGewp(vPsGuq|thvF&)|fT&bT z0f5NN04c^02mlCqf9A4SC>eob3U0v2?HpBb2nz=eRn%*7g}S!_WBA-l-471lwmt-A zioT@K!c!r#sMcBm;ql{hKX#5JkAea3+grjI*FZ?M+x-nQG3e#vCl_Gw*Jemet+Uaz})KxSQW!U(r=6EGs!^ zt)60{fa(SU2u=i24BZ2x6xXUqK*T1(gd8FT#t;w*9Sz+X%*Cb3p@4AIJ+2Whwe3-fmFrlu3mEWBtP9YDfy?jxBut=`oH{d z^nd=#Uw(ULv0{jTP3@zb0#Rcs5RuI7sQUfKyPKNp4A>C?0W5*;YrL;3I^PVFq7xw?il{pFcK0IqQlDkvV_+}ZwoJM zpi}R_rSw;iTDDz`#68-D7h5*;-n3MwD@X)p-Rw3VsMl*f9Z8>sS5Gy6sZ0SUZo6dH zoIgHY;I2Uj`GAwrYi7>jjFU_~-&H687$DlrAvqQQX$KuH+yFw)O&pycQOnbq;sAuh zI1IJYU?nzjF`r2=&Y$(l`vhw-aI3UUh_% z!Epo{Hf_Lw!S+FiSG}fmds0i3LIQ;D_I9pe0GM+&I&9?W}N9|-&&k>tw)UV*S1JgMuc)qwZrr6cv z;78kj3IK#xZDy(cz-V@=4Sg- znC<==2qDJc*p6{G)lzDC{CL)!B@3_gJPh^`}6VmP_22-0IqU7I1m&J2^8243O6Kc6%jc z1>cWHkzGrUfiX}iH6O(tIK-Hufv0K?E-Ii7gl@7wc2y&yJFl2MuFYUOc9BhiI^jMKYY-U!PQ%mep?!i=G-4yr81eR zeSUnFqipxotcq4Ov3e9BAPnxPrK-9i8d#PpCN5Nu`h4z81QdSx=_jl8r?=Pt*Z<>x ze*5<4|NPH?{M*MPi-|cCn|BlB*1GQvs?~ZhIypLnJ21eKXyNd(E-$xbTUS%vcPb*4 zwY4Bifqvr7>fDFUft25I+8dNu^Sp72kMbNL_}vKcSFaBVU1x6yai4Sk(|2$=}5-h z@q|_Gj*ejFgoxCY@!TGGI^jKAqZ^Bp6`Fo;3$)p(hG2lIK?-$PKtr$2CyX6v!)-n( zyuteT`PtN#lPMXmn(YKXzx{#@8+1F{|NCbS8`1Roz%wumU(?CfqN0}|+8hTM>UCyN zPrEjsKVSEyZ6_x+fMkcZ=V9q;K1jCyY#(0t8Am$jZCV%acmP`nwI~0~#tx=n@FG4t zJ!4JhFpLrNN%V~YK*W>pY_o}{O+Cl|_y3v5)q7j!NP4U8c8=0Xpqz$n4qiv}Gj+3j zC(WrF_+hkp#<4q$7|7@AG&(vqd@7BXEN36Gq^a&-25R7NW~=12h9QdfI;G z#*RKPSF*DU!4x)}%VxBJ+Bk-j37n}{Fbk@|SETa=o*QulXzYCdfcB$&&h>>p1^0~8x0xRK3fTKK7v20p=*n>dSR6ZK z;UoYj*zPXjRLGN9w!RsiO3^G<#E+_3tg07vLux*`IjEaEibxfCJ|63~y}rJ^yu7U2 z(v+EsfY}jpt)^quTS|T{Q0rqP0r4#_e1!4&6wx(rE zF`Ap2=FfdU_P9h628FQ1qh?V9CM&?m7@e4}E>}9>~4XZ&k@ut*H?+F;Npi zKs7@Ea6O*yQtKc7X(6_J9P5T@O+-Kl6wuv3JjCE;rB(oQSFK{T61%Iqsk;-029VLX z85=c60F^3LV+w(hmw4>C?78HE2yRkp2C#KYjz9n$x$LT1ks*Y|&2T|3oO2PfT#5f z#Ecri)BuQR3%o?SZ|g5_FZXTRwl&xM{cj&1k39lDvlsLj$lW13%vPTExr#KZ_5z`tp zltNSY4AumuS-L2ZQ>UMr7P0U8Yq|~lzPvMI_)OrqD5Dd&P4fZ6o=l|WCj=b2?xL$k z;uefZ%{25w-2q@fMXA5#Yc^X4cfH*+o6RnnIvu(q_Fm!u0R6wyh_ob@kzolP&5VH> z@X7~_o7C3;XnkAam=XYh0pn)KY=Fm3A5He97fl=1ZM+@E0AWwgn-+{FqejoKr-pWZpyncI&;z5x!es2&^)bmoWM3D<}%HI6MQ1!M3H`qRF@f_O&!VQBRk$Y z{W-oL!Eq$(c&3fO*DQkmxexh6M<*n+GX=0ys>338vFaIqU+1q$c*B(a^}}{{HnCkY z!qyFa!^kTgKy^qZ3{&nGX8zO=<{+_xqKP8YADS zBOFzXC+33|jO<@e{&V7)>81^_g0XrBY-w2Tf_*Ol^X$vyuLl5{LC(3PFIfe12S9h- z#+$!*kO*UW&n8k=2;;&N@?JSgpQF;}rZ4Fs0zUXN9Lj6X=|F%d6F2*O4UW5lOi9ms%8&%IXRNGYY5Qi!n?%K(R3Yspolni(N5L)i}mTJI|&aUj>e%yPM^Y7Vzu#}>N<=Y4HDP2`uW*24IYgz@D)lJtrfLShtTCo#yWPpD z)~fDaj6?YL%ePhmF5MSE;DW?d#SMZ40ANHQ(pD%gEg_^JSskn$NC0HZmP%Fw4=GZl z=W`E8aY@7g2q|tcZl&mtk3a3tM~JkfB@%;))~aHWqavA#JBm~lcfi0{s(KYLGc#fW z08;^^ygwv|^+uoXKTK@hs(*7;kEzBb<)aWW2V|yN6cNBxa&*8*zvVT8nsQ}*gOBSi9n3zkc0V5EW{W$g=Km)U?7ZIs)9GMbAhJCMM zULUoZ?0XhbM9|OAKmN(y-d+Qx^06b*Pq*8zKfQzn73ux^$B*AWi`Tz>|0ub9JWDS6 z?d|sO|J|>a%aaY<0Ion$zXOLQa$;JR_;$O$y{s>V2+Mm0t44tu@yMQku?FPCQRh!M1Vjhf~bb=2oDp{ zHiJkANRFV6A|4|-5(6PIB89+2hVHGfFq0?3*CqaZi$AUL7H}b>wy-;fYZk%#5z;V< zYT@J-99l({nddt(*YIbDcE_PD;Z$f-{ah66sZ{aAXM?*qBd5c1^cD?W2!pq3?ZIwY zPxU;|==m-abuS zWjNJ*2d2NX66bThgE#N0-N!$z-9B18I@_Vs?8p#-j=d%=dEdWMzc(5#vr@&n2YFfvMW~WU01?5ffhTR|zsck3R_hD}{GQOL} zjlRPDbLdA@Yzh&I#l8PeyE9hj`v|x&4BQ=5IZJt}lCp9t*z& z9mf;lTnuv6s!$E9xFaAFIz(a$VOiF+-nQFK z&5!-~csvm3<@J?QELkZMC~K8!We3*!WNEUZs#&wMtnUrqJ zvfh>y*IHGqh9$wHgxkUktNNon5J1ICEhfq-u=;XO36hj*pfwi+T($)O!BI?8463Ro z6j-YV#_eSbDSduC5a8G)1o-yz?c3`&0Q`JD-#>nsl$27sZA@HpHYotg5eSHkAclNo z0!=ZQi_{G6F)}Iuy1CU_R8$qr+=AkP{+oh&j8SDNyE%EzIReEk7@$?-z(gQ90~iHx zM>b$43W1qp3P>d42!@1aAR>sku1l%4lyW?Fs}%#qv~nUt!vmz$BbUJBuK9U9-#?@Z zCnf?8D0|&MpSk8-R1Vk>muT6Kvr$#TQx_1pdahhP8w zAO0~c@OT{0$K#KG`g?W#`P;XD{KKzO_aDE#qx!NC5xT***V`K7x-H9=$SEM^m-X0p zHPz&5RRuVO5JL;Bi|KREdCz%?42tN4?r1JC^u+k>7XNTte!a!F z1s8Avr>2<3aglz)oypL2FB*GP3`e4@Bf-ooYT5-mdOA&oL#ul{6HDBu!hr5vV%JOK zv?Cvebg)7*M|U{z({mi(UA*w=!Vdg0km&$3BfbGX6mi$O6WlpKJK*zFa;Fs-IB8g@ z@r#?cQ%ZdteFHQ5tZ|P0Fr|JMPTSprNr0DEoY(@Ux9tF}Z1yBz#&pf(EpzWd(4p+V z1d;J-raOl0qcXtgV8ezHzecCqMgpQ4#T*0O2`V3eVuV~9M4O?uIZZfh*p!>ow09Ur z!=!>w5@pyMM;MuWgY?1yTL!W@sU-9k5`<2g4a(%JwL4+$^&!w=tJnvj+1Si zZKFV;p9nLd%J03aF@e7Wp7R$x6+##I%O~@k;{p8V?TsnyzzeXS!7&i*ey3NaNUC3v=L3XgU>K7w}VOj-MG?{W4`ZC`K3xsGm{|9Xs|5n@6Q) zYM|O`_CgUWD!KZewN$T4rI-RZ5Fn)`rDaRodVhU=d3$?#c?~hd5Q&3<@A;@wm$aCH zi^mk15L-P5a5VvIWt}|6aJ#R}94MHmh_pq>v&J7102=E-9=@Drh+X9g*|rfee@s9OTHF z3kaHHNGnEgH(r<%g84(GRETl0D8%U1-R-`ui11kohX{xuo+7Wen}{F)A=WC(^0KA- zQR==w%W;(B*zRkL$rPT?hm=eJ%uYm&%$8cslo$a~iko8$E#3(X=;8#8BQqihk;si) zYS}+O{w|VZOz<0nb1R0HQrfl^5K1m7hIC`CWCBdQY)hgL(?T2&8Hf&f&o@u@^S2sT}myvsG1Wh79>>3<`P19JjyfgVGYPCuDOVc zssow=;USt!{d_!Qij4d~Pa%AK#*feUEp9LO+rnW@O%S%1w-+$l*5vl`hx-kr000sa za*E7M+ihWHVsfzkkskM?uAaSCQ+9|PQefs>tK^aseY`)mNa#z96eGI1s)?Bzf;$9? zfdLT+Du9`{?LM_uG{9z_)Q~6;sv#0csV#U=tBAQOxPh7hVHE`cGA8tp2nig#Eeu4E zDhSR55$PWIEz&*u!WPkK#7(+w8An8QwRmArI%es7Uvsltp^&S(Yx>7%?B#-yq1IJh z(_oXphY=uXX1nJLPSxM7+@!G@%EMEAjY}pwH5AZ5g@_jgh65ZX387}AhiBW#66_H<7@WcA$QZ+ZYqQbAeI8qWy_>?O(*1m=pUrHh zWEApk(Z4qQ)4+5EZsV@x0E5D4FPO-uo1ZI_Hp1^`id~yZ<^Twg791# zLtcBCl1q}i_wgG{=S&%d^MwG=!mLx|e3DXr8HZlZ@8lD^)c6o-L~tDS0sFIh-^bjw zhV{bE(2vp@PSnXB4~W;NWuO!|gSV6R9DwmmiNi5=KBRk$0{TceXuRmem}5|eguy6v z(-=kS!Z%2-89<$^>zM!fpaBzol!qR(a=G%tmJc%SR1IG+mILDDy*LA}Lzg(rb=*X0 z%yF0e3-jF+U#rnH5;9s|pkJOgCikMYPK5&fz?1&GQyO758walkXZP_kC&QB)=RNe6 zwv0D5gLZnos2Zq(sI}vWs;U5p<5A@h-xaDOrqm8MShwx%+qdm@e|>w2abaR&Mx+>G z3;_U4+8NN6ixPNSFs0V!7seRx+l`6Q{P5%B^L>9la?Ykwivc5=EBP}`MpfhubDX3yfX}P7x_qKDsK(PE#=sA0K+PwgDNY@jvwzj5x$;tERG=T+2j1T{F86SH9mgjqxPf+J?sy zg<;c1oq)QofEoM;m$*F{Parw(ZG(+)F<89; zG|L4{ns4~%7X|KTJ3C6~wh1mu{%q&u0eT?yywiPfJe|>L7y&vypxzGXwLHAb(|GK# zNE00RFe%=%a{BDsHEw-@(W5Oe_{*^s<~#wSmyR1KtxrxLr7k_a408MTcat?8kqMEz zY)*vTV`-<^Y`Dk}5uFTLXUvQ%05TG-QOuGn{Z(uZk+A~{;uYk4=mXpa7~Om zL03x&IVWJe3wjX^hq<7qlw5P~;lKGAm;0F4%ZLvV8dHOPF zAr&+5UJ4uyKuo0pI|oHFfMO;<6-)sP$<-`Z_am%J$#q>_#TblJT+@=)B}7I*a7$}6 z^;$}OJRZkhav>yUWDaRbZeB{RwbWXU6{GGjSFFc>gRVA$SO3S(eCw zV?^ds)veYd_xoz9?>{~fL2?a5;eN{py}!Rd9!EX0n^B@~udm;JdcD285(OftT1Bls zN(xLdGEgmrmSBfb;Ft)VYSAUFj^HAkoG>tP2+V;^O;uegqExAFwyleUM@~${1g=`M zYL%2y4AD*Z$B{pGIkFLM@dl2cAMYg}F>nklE>>ZQ(cH`|LFU+CD@FY?~fTz)W+Y5R9Pg0Kj&OV!?`sz`(rcxNSFldyOHe)_wO{1>1S)#L$Yt zGSw>RKp~hK5iR#6#x=%eKla31KAsMMM5UBk4lrX5U`l|ZDu^vxsM|~Onx$4p!{Z^x zqc9Ny1fsn-xVu;@AZ*%LAk0PE@uYjH_+hstB@VgfxBI5Q=>4`58Zo71sb)k<4CD|) z2!W6w5F$TpjTD_8nCUU@elJmRZUl3?TjMip?+GKthq(tqFr!g;=u3B!&H79 zfOL)iMc++krC)s_yWuJ}Yk^Y}o=n()AnkSCvmN1=34ksh@s$yodj8sJ99-MfpCehg z{ee9y(#(ZJ18dV{PT&5Fp27HIwDp2WPc?i#d0yV)bQ20+l=rkGeh$NU(QwH9ddswF zUt&J(-uB7lVMfk+--LYT;dPSe3r95oE6&jOoVrOOIE=s*Tx4WU9CQ+LvIstAVg!=9 z(f-0>H5hlI>a$H_syfv)%&|M*z$D&rYQGys-L97O=Fm3zba9$kbdFj>5jL>v#DACL z;IJ=?GlGuYVD_@$UjklVn9(mU=}gT9eZyEz-qs_wy|JDAo@3ZC-U-Kh2o(qFf{AG^ z!Z;QI^k4f1n6dsQfEeM%3H1=ZFjek=K$jyjCIoSx%N2Zj4+wzd*2&S6{+(r;CiCb% zh4N%z$K>Fkj{6%sLhY>hSRfAWqr%33blA}&4T%7@*q;2Y^JJr6NO0 zsg{}|5W}*hfN|ZHkfN!HimGT;H)sN&W+BqH#`V4}X>~IoFGpcc>+R+a0w$%FQe2Ik zLSiXJ#BR5>03F)WVWdT$(H#&%Ig&B638c z$Mfk`O)D;xZ~=h*_;@^?DMTjP_nny%2NNZP6oWM?$FUd5K;nRIs?~{F9u+&dnIWR7 zt17xXp}R4odyxw04n)sKhIeCT2Q=WfZ*R-D7ey(=GXM~yRX{`tXoUn!%)C0e7unYs?Z>{qm1ikG-rxW54?o?O+ZrQ*#)w)C znSfkWBOxN^qXGaCFnTL42nZC&4XhnWss;e6CROWEG@?ocfOe34N})j=BoK2(#sCf& zfp7^8)rH7}U}6*y+|__WK$oJ<0O|nlOA1B63@L##qXPzJMo>cpLLeq|Mh75B0k^>S zh(E3Ib!jKA0XEKl*ojff)q7p3Zrzb+Fl|GtwKJ1PQ3LPQL`GJVPenHZRU;OJ69OSX z-^)17mBH024o(ZFt{G5oKs21GQVpVAx1P!l1&@r zFxB#SGWPfmjJU*!V5XLzJLj%o%Fh&((OeiVxKBVjUNb?*Wk#lTg_DQ8yb%G$VOV|^ z%Q*9!V02?9MU97@v8>6)VK>Q_Hx5D-0Y*kN%$D`8U@L!p)?}MJQU}^sh&tlSlh3;h z&IBwc*c~Q&K<>#x_=TGB-iJn*QQK-dMtP($owC#?2BxKUhgMl=RG}Wh+4069UJc;2 z9YUvgj7+#ogZ&yqz1VJmi^yGM2E8|$&eOCempv``9L>oA`&b@>BJzp&`*3%>*G;)Y z7-;((JRFeNM-hqv>jB`Ly{~fIuJRtT!~wS9yqWspCo|8rhe+qN!9>0=BBw(u4AHy@ zU!Pwez;85hM49I&7fjsy*;Lj^VD+Y-vNxYOFrwp-#wqXE0V+W|!l7%T-nDjRPg5Ae zaHcy#3xAAxf-$0SuC?=#T|fu^?mXyKKVG--cMcqKKqON3G?%1bTuR5QY#7)_s4r z3SJ?^bxSENfkRp&K#Tt>39d`pk0%1Ian(@6V)2fV)3>*`m-~yV|Ms`PO0A}b1aV0r zMiu@1lx5qjXcbA@mO?af=9p3_xuz5pxaNXPDRIpjQ$PkIJY3C*m;*fZOH+-~c7=U_3# zWn0Z08DdH)tw=%L*X;!nKHon=SdQnjR51f&W~L0qDUbU5m&aQ9VRB#f!)v?FomP4zCWIi{QmLSmgU!PuWzs0 zwniodbP^{Zh%U7Nf|=B|&qJ&w8xgVss#&dKZh@(Lx?q5ArijQyRYbvwRLuf2ArZP8 z68aig2~1rG1CyIF0igpr2UM|0q(s#`5Gm0TpvItT5CEwiO-o<^F!?Z#-RJhMKcO5w#{&pT|AIBzggvb=T~e^7FkJ|u!I*SGa`2(=5dE?zcaC5Wakq|~5MauuyBoB6RD)gXy>8_W z0MFizgt03R-2O`gb$LG_W=a+ej5yl^r`v-7j+ZvmU_4p_TRwpGA@p$rLYO$`?9h~i z4m-y~eIj8v;Q|6SJU9eu^lhZXVK2#hMdG7c%$Z|(is(=irzV-ZYJ8GXei2{%B6g?O zHKh$NO=>7}NW~dmh5;G}Yksb8KarKUoUOJCyS=yZg%{D7DgwvSIA?H3#YH>y0i7N& z4ssu-{^{O^UlvR2QOh~##BLYCu~}vU-pO-Mt=0F}c!QA!=FpGICgSEnn4_ABc@?kf zd#TTCd-cy<9@Y2id-20j5d@rxAcXa{y}aIUZ};2FZCO%?0Sp;Q)s|%mFRK<6QL&hx z{M1KCsn%oPcM(bDNyzKEF$F}Zt*|QNQ3@k%TZj}wh`AOK6%egrWP3wryLt zl|#I}-2+k11>8z5&;7}aQVkr}ZQ~f?l70qswIu~b-}k-b{qgxk^AIuJ7Gf|H)p~z@ zjf|z_`|Dev00_j)NcqUjF|1x|wSaLArn;<)qnBL7jXA6-1x9yIk(VU_K&@iph=7DK z26V{JBbP#fTp;HI2}=P)1l9L^fZ1z`$FW!OpWfc2YN-}u;1FYsS_=`l8=}UD9@G>} zK}Ct!P0b7m*a3+F-Q3OE5xr*Gij%cSD5Qn9EiEDKpN|jwp+$174#+XC%eI)t7?hL7 znzqFpktl_b(%R0kSYv8?O>WzE>{4n0@U#RXK=-_7t?G^lBn2cZ1dyt=ihxRXRdoQX z9yqMGTe9}0rWl#c%$X68OdS-L7yyt~ih=h%qX%Fx_grn?J+kk+u1hVVN6p{9-S6uX z1WjmJ;&aa}AaWcT$i>v$LgJ)u#91Ax05}I~mmz?ewUQs;?h(`+OQpyN7!WyNU@%8w za(6S2OffVi!xT_T;g%nV#(~@j6wRQ3lfx~s0vn*UeR4$3Obzo^14IRg#3^84ye;t_ zF%c|8i5xJC2*c@8c8xR?==i%`>;t0=q}0;8 zu9ZGVe)CYsQ+;16lEG+8S9MqG-%!cmGiZ_kc!^MsHc&TBv!=iZW44};%3c#9U6{7s zoQXbhm=kw*aO$@=Kw~ppc5;5+5uO8JFFk?nq>5>zW`Oom9d9DBX#nPUph4T$JdQLS zgE8-&w(WOep8qTJ3DfaLz(&S56y6w-ZYJ8kPUscRF2Dd0ziVcdHz$~iZE2Qt7o z{A8|&-l%ykj=6v_AY-cm_QXc-QZ=2~jw8?Zj)`*`lJ5P{XJN=wjC%_WTO?w1)dv+j zltjndEgMeN)m_A^LaCO;O0iP?ndP&{qxw^zIvF4kh0yH$ihx?hiq>302q`2)1j4i= zcb6&v7y`TcvMz+cjE{$vT#((=9IPBbfl^E{G7^Y55=xa)G%e}Qc-s~ts#^1r^O4Qm z5ZmbmDWzrI!0h?)FtsYR76F5o*P9~{>9(dSrRJimf&2b^Dp<|+^YLDuhdHE`k=TvO zQHVID6%j;qyT2@P$+g6kOdU`|N<;{*ZYrhLBE$iryQ#amds+e@_!As~5s4WAn5dYU zIZ!|}(^^UeB&h-*Qq=&712eyWd;<8kE+3C4A#E|`T+C%%Z=ANs!A+5)o14}UqPvQ^ zfdOI&!Bl20Cn7b=3JwGgj3y4&^S;Re2?$wo{jL5*YRSdG3iBVMi{iA_|s5L_!B63aV-jh;C#Cywoy`52V_DujWG`98VD`M(5mvf zgP9?*xg*%RL}+e3a65x5#;D>;KzAbo=1>g8(U35*69F;+hJYN15JRBI6aWGulSiT! zPXR~k2V0T%wcpUs``A7&3{zTY%7XZo`bd^D`g zc+fC#KDrE-H^KS3VWwxCrrD$U#tCS~4#!D|_#DacstZs?oJ}5M(tdTA!Q{b=Vjsl@ z1_m_obF9&E>|8wIIu6!pj1-JX8WVJ({k~Ymv&0EZO9pr;)jQzU=u^kO-GE|`ezvJ> zfxAAIK7isWJun!;&ZbOa)u$0Y(dtFgyR~gY(g)W4nhG^$WkR%$J^G6767FB-2>K`E zD6M`24rG7!bUXqPM_uDNjO{MyfNCxWpBM;dlX_Y$c6c+9`t)o%IKwZliNm;fa&dDN zK#NGuV~0mKt}A4MaX3@u&WDdpKyX=z7g%?Z=&>-+FQ2w!shW#JU=BEUjJ67PGtfE6 z=(#?z?fwEE?{fgWBzBC|?LKe0EZEVgF+tY>n7VKhhZD|D)C$8#vdKz9YqqzV?%wV; za07EN1#_sTMa>+=KmmanfyA8^UMcAz~od9TAvAKn4VJ50OeWX9$5~ z3~z5Q$8qG6UF>-74)DZh+7c243QWi`(Ep#Tzw2@($FW4g4S=MXB=_?Xk(pJud%EwM zS@ZvY(S5nyJ=0a46(8rgOJ+tAz&wC7DSBd6MS95Pk{OKzHa0c@&dv14=R=56;$C|_ z8W`8x_3M{!x7&4H7k9mX{$$2}95A{p>)ZP!m*Vcd_H`-Rx+#|Bve#{UJX+U%+j>0& ze0jToKv~3GYXhg8^5y*|i4$dE*^hl$mpIjF&2C<6g=z2I+&Bpn>e0;X*jo}Zb2F@$ zwY8o_T)1tA!^}0MybyY=5Mtq8d#$}>>CGVg_H}*SAKnc@OQM`HzZClZ2BC5ZcJXT;rIn3+9h^wr~`aBb!eVsb?@ zYrWOBi{!f9FK_SFy7vC|^{drfjjvyqko}RV*DN`a@W5>q=I)7D zGAjw2D{F5svz#R5d1paa@->&8RDz{|f%0$ehF(e~&VIeIhA}4ZZa&-?tIm}dazE)*`*;JK; zoT7?i$XO zAf{uBmUyR;oHl)CV}dGkU{oef#_>sjVG43imo**1>-Wz7Rq=rs@$^*Rzq$g#nWTwV z<4?`-qVowHj2A)6bQlw5geN+=iJ5>orF+lSFtb<3_5Ae-%APM`V!f&Sd;W&aM$i2D zID%$60Yngg{(Mm5SxkwX$MjM%x5&7j&}1M7d+G@1UlV2@HPAWf&O>qZLhMdJ%ugIS zA@b8vO#m?NY#1>*5iO>UZ9Mr1PR+~-9LwmS*~xFLq~UoB?o4chD4HDV$#2ZAEUg3;3&VNp2#(4*481eE2FZVPb^nm=cUIXKi46()O zZbww{m&+ZSX1tju*p8Np_B_BNP7FK96UYx#O=p6f%*@OUZc$!mZVH1NLCu?6Ggl`g z>I5S=s2hwam7MY-l2R&fU*6a2wOp25vYBpmuYY!7$#=a?Wj>Cr9=pSxydLect|euN zmnG*^T=nu+%=Lb62D?A@ecyX)=A8uPQs6F`dbNJELu)GK+pm9%qEheHP#wl1`(q=b zd|g`Y+M64+_PX~VxrjlIUOO=;aot)Br#&rsNz0HiL2-jK!Ei9;u!| zV^41j3j5QwxjAg)O(KyK8_BAHO$En;Mfli75|QKUuSLYQR~VS&*th$~AHVv)WBT-HGyAgqrPC8#9rUkbu1; zNyJRx==|YMpvKBbJPw?}h?$8jF`US(6P$@eh?oR?YL~T}JLSY)DKWW_c3QG@cM%}* z%0UYqN zSIDac21bF!`37>2Y2{B!XpH0t*J{1Xb@*g&{;VwDqjOfchwuyao+Inm2mAj$4deDbbr z;be`TdjltbGWL%>hiSZGo7foxLJX+lC(=0I^JLE^lpm?>c#^N7gCjtm!GnQ|?;D@z zlWl$l+2cXdUytI1+|jw@`2aCBXET%m4;W{2ph7qm^XF~8#tl57Cyj-A`RMui7?s}V zHi+{wfc-q=1C`Ae$L4+kjOX{xOMSVRfXyI`$a0+N*xTm;BATsYd@2U!r})`#dP2BY zjoR2?C;A#LH4{ zm+R$vY0b>k$x|Uy+iNwrcUSYAio55WSs4YrYfgzswny9U74X)yw;nAa=z}ho9E{mz%kBu{Zd;jJ8*YDN($Im~s>My_ia=Bhw zKgeONo0}j--3b9>b|wqlBEqN1!V!Tvg6~tuOhTmQZiqOWh}D#dv^x=lnV5U)@V%G3 z{S@|;Q#tmHNiH{V%5~p={`mcmfBxI!^M{*C&b_v(d+XJ@dpCkH*qZg$&CCoXF})Su zY1{TvWW8KyuXWo6LXu2T%5t<;t1=O%VNANIn!5*JBW)Nl9KoHs9DcB3Aoixg(Q)87_k&+>zyd& zXtN^3PDIS)W`+P0#td!F1O}L>WYN|E$QWY_VW?7X<^m^fhptNsiCKgJLQ;gyTURr9 zQzsId$xnuz^a(kz$h0l)s%)-n} zQc^*_5TMr0yY{Nu%{2P06Uc4ax+w@b**H4=%QU&ecTFkHEby9Bye2 zZ*W<|{66ztgHM{-rTE+666q{1b~+gkfBE;B$l@27Q+zc618AN&CVbynr2IPRAy)DX z{e3u)0YDvN`dWd_FVaMGkg8RG)P62{xWA)I1yWOV95=5y=Zon)f+(#p~vt(Z&nU zc1p)y1Ec|(=B_#kxaY)(^XNMw9ElJhV5dJH&9oIyMEX2w1H#WwdRhUW`*T9E*Eluq z@HuDpNf`S4pRqm9hJo_}!l_Kyd~Tuhq+$er26XkVZtm*djGFP@?a_Q!RJbxRv8ks; za>h>pjuDJ7v)N={ts;ZRe%zTZmXT5A^BYcuopvXrH?+H)zy z-c@Vus;a8(QMI*_)0g-6+xr`n{rHc6?vE`mSyEzP+cqIdm!j4IG9ZbVxAj&`y{o#R z?QP!=k!aF}yb8Lv*1dPhsa93Pl9RiFNODfvT`i?lijz=pnoGIfZucKQmvv3Tl;!^O z6ObjZ*UNP~_CJ684#&4|-)`6SbGyT${>Yh9q|n^FX;*WCFsnPkh#2NdU>0&ykE8;@ z%$b=8<_Zk*o}5gbm>kRiQ7}Bj1ZLD@C(!N7#jS2Xe`e-`QCt7`^N;QE^YhO?RC`%6 zpuP8ATbuzFcapjtZVIBrDP5%X%1MM1!N>ENh;gpP%{T|lBnk*r-eB) z+{_rN0Nh~W1c(}_ISC?-;8od)0b!A7=Elsz25%M};K|g4sKb}SW@N@WiB%(4B2SE! zB}i8#OB_lA)oQNJa9MK#NR8kMBcYNqGaCp_!puC_4u`n~4PZ|0YA`*sW!+{spcuq4 z^%oSajvYSQWWAWFDRc&0hc#2}k-;S<7D-H;(;}SIT5E@CBRCJdM^BSI6FkGl!B4L| z;T=sw9bR-M8!`O^O$#+XY*=2JMw$8al#x+BQG$)nr_*=YiIs-S9v&J#g)uXmhLIhc z*ygEPqYCu6C<$q-a&wu@eyTV+ExlVs^l>X{L zo=ZPU)|>(O?5_IKnqfkk>4<^nUH#?b2RI4fF$jg>cMwh5eUO|^k))W3q5PLX zMEd_*&sSz}(7^*U%xPWl>8j6xbU4$oPs{``4rWL=Hhn}qbbeddhjFO{jEQ(SaXLNs-wBV1X)1irEa?;2&YKxW z_j!iTgL;BaI>+P8jW@o|&$85){KXS+&hP%qf>LaO)0p{Ob8@(mjeaC-Zsue_>s~vW zWAC~(-@6~?$^b$_DwqqU}L%xge;8;qob?;`^>*d(@rbo(Rp{+MpGc&JyC4e)znlM>6I0;KVG)YqLxsGBYs~F}bk2c22^AdK|~&Q*QM5q>rC} zbnDy4&)zD$=R{=MkJh^aoRXL-3u#kREo(6|7UsNI$H$*Px|oQN4w|GC*?aA#=-!D+ zE@X7yb`VjIqA)p*3Luwas*RKl#3We?kRZ3~BFr%4BH@?1Qga7&@6gTS5KFhJs^%%N zn%CCdEG04Z#LR?Dnm7GhK?wZ|+*=BS|AqjET=E6iq1QHSm z10=#!XwLM6!(G*SS8u&HGm8$~7*%+~ZBrCrhL0FJH>W38HVhHN-Atk89U$&R%!!3$ zRft&&$3;)e_m|AZ8H|qb)d{2D+qAIm{*pNjNf4dkG7s;;k!^XRnotorvkVoQKd;~Q zm5Z8AV*dFlho55woM&<3tkw*J?PLB;=uyILI51P60U=*fW7GZ)$o52Or`(4ICWyh` zlX9DWgkK!W(<2NAG4F1AvD1G|elmRWsmz{k^e?a2@GEDKPgBQ7&mjN&>AWx?hRwcS zm``^cnXAc1o*;6vr?J<X}l~fkEnxADr<1c?f?bnI)ry8>EkmE z0v9)r0v&Q!Hx3l&u$ep!{#8M6=h2L5Jo<4!T<0BWY*&v!`N{J6%W}rIzi^5kP2FB{ z;!ncmiGe1}BR?5C)A4kDnDE%iX>9)4yk`Q~6K9QoCPpMorV1xt9Jp<=YfpUY&vQHB z&V-pK4j%Y=9N5V1Ju&I|qmONOZlrS>^2^)K4fG63{5$}3-bc{R!*CEi+2VOarzVjC zmq#9y&aE_Sy~l&3^Ij%j9KpZy1MhW{a~c8-{03u zA{KHg@7JzY_nOy|IhSsFX-&IU{rK^d+UDov5Z@<0WzIt~PBF@MDxZSS3Yiq_Nl8U)?Q|3hC){Qtb zxrVJF0U3GNlgOLW>u_!nMkK#PI9iYsKnPq%a97pVT-*Nm+_(FFYz%1YESv(psG6EE zcT;mnW(P!gy6vD~bTI&Fy@D$P(E~})()-7j|BF7k|i*W6(q_sJD$+>sk_pNt;iG|s5SreJD zdrFDediCDT!EkbcbOl3U3Bn^g17^xen24NmkvKrDynt`y7}Ea)l%qOyURMGBLooQOm? zv1G~wxT=~q(`wdKn_E}4P)jlP(MAlVv=g7l4I~mJB*lA)tty&qlO6Va8vbIXScXt_n3G8 zvWxg!xbZ*+EApb?gzG@k&pR3)al)Ti(P%k&LeBtgC-QxPahMd{SeBXoJO6)(0o*}u zvkG+rgL&uYE{lFV12ddiZL_mz#V3fK>Act|W;2qQz5eN}7@d>B##h)0M2IMQ95}{I zr}Nrqv`m~cKqFQd&~uQHCz%%qH~P5XsiS<>m^{A`X913%?;MYed^Dtf@_H{vW$tZ0 z#|s9BL1$2PZo6lJ&-fgF!sQoUaGtz**EIA!2BsLp_u7NepYdy)5H{Gk-{9v@fj8|X zil%3U5#a6t;6e+f>TYBjgMY>bS2Y8Tf!$0*qCjG~-Y#!%Z*O0|uD6Rwj${l0IWY@Z zXjw8qsYuTGr~KIWN84NPwW9Xc$>EGFIj1F7B$s@>UEF+o>=F0(+Cx2L-IiQZVlz)~ zx5!!tvqi#*Su!d0J;(tGc{GcgNkM?H>_@pL#SiC7ZJ+0>$wMOyOqxv?V`>CKO$)}viY>879e?d#i@ zR(FH_?QehIw=I{_&5n8yQO>0u+7%)tdUvx(w5J7vd$X9XF*tD&1_CvPbrlwON2VAs z<#1t%-Nh1)Lxr{1+G=$&Yw9B81UJ*(dIz8e=9Efikac%5PArZ~x~MS|AIGEV(fV$! zmV~-{VtjdDKR-5h7r==SAwS)`<%f$c=K! zOlXG-p__t9vIwa5rrlG@YN);St|XQy6-shJW-`-t5!G($UH$WD{j*Er+foRguhOc* zkc63dSmUY&1r8=kk`t$Z zbg`pYaFnr-L!$sDqqgQAdds)$bDDd@{%Wfu-!i+a;7O)24T=10 zM#4noP)u&n$IW|;vH`?K$V(uOs z(l~tKHA3zg7n`0G>&z|UgmHrn#z`HI>$Zt6&n(`+@GsFwAfQo;H8<_thVzNQ=akWb z>&F7(43nOO?fk(8xizu<%WRVqC%?ovcJg{p92)5Une?CgWHiY+&+Kc{zj;Qd?ZNY& zC}8fKzY+EXPh9z$7>%qdjEpeT&4a#--91A$BAa>J^B(=#*TDWUAU4*s!dLg(UNK9BuDQF0Oswj%8Wbr8uY7It$FHepZ;?ZdcClm&>B6 zpFckK$3wNRw=aMH+tzO`D9l8c$tTAjSBw$@Wh=G@2GM#w%YsPcF)3m+@cr*aF!TN_bDR@hoNGUA!Oe{>u5!UNz zQHO<(H&2H_v%eZyKJv^?lb9hQQyXE@1Z8JO;924kYL9^%q{Xv;TR#XIj1)wTNKUugZRwhFW+GuhqGqkt z#-RWtFl1S?t8tPf>;@(_1B??b1T1SVBA5%BLiw$>rpr#ZMM zyWYO6+Px58-!A~HI+;si2?PYlk}~u5*c4{)dQ@UcLfX5#r;^~V+5>a~zFe19Ypd!Y z5tc=I)63=Z`SZv2*#G{Ie}VDi@yUe8=fl))%hI*)+rH+d_X_Z`tY*FU?w|^B+%8EBN=F~g%2hv7at)DqlH5|5aM9PoPV!wnP5lFgcp`K;>j)|`md6;6_vQ-U#E zYdvb+o9<@jYF>L0R7X4Ny3ptSP&bEJHy1BSnCNI#NJ^2~y0CbPuuD^t;D+4X%repX zww97yn1c@N&d%0AZYi-4o1tV;n52BkskybIYVSr?mU1ZtZr(kS!#bthwoVQ;>s^no z>Xby7QYo2;o!lIct*uKuVa~azqbxc6I}0~6?PjXP1dVYCp0hL)R(!mfiH#du{k}D3lGpCf2P+pg{o4IM%+O?^f1|SRCZDIo&y6<672`mD5l!|!7 zs%D`&KSSb>1eI?_AKJ=r=O-lrNqbDpqIk0 zfo&T5Sn>E_n)yMg#1hg;VfaK|BP}=@TM#`-rr0wlwQw>u6JB^YA)k^OdYPc@nCm>* z2DhL*cz6$bUfTqr1Cvc~W^+}~PZ{{=iDo7$I-#D1n-JGr9f%QoaMcqyMH0R26ILu;6!enXiRbDscoAv(%A7fVg&#WH5*%H?*8+f#CPdQ$Tr29GCZtnY;RWD*%q;X!`-7B`OfZg6}I5g|7}>ZaOz+f7@fSG0FP86u#|nrqiYbX0}vlDN5nJ)p#1Yj8eMimIxS zHAyMuEK8OooQP7A+wH0j2G){Q4UC*p5|P3sC3BL*>qX7**tVk```(G^vR(@3>$R9R ztDVf$0ds=6+rI5bYhZ{|5iBL8R3ZVS%tF>01eCbnZadAW?+^;$Z%&tN~8t= zDHUzXL^=9jXYLMF+xGtBe*fHRGk$x!{NtBjuFFbp46ggmVyv~9CRbQxH*zC#4jaJ$ zL5Fsm`pM%29m0&j5s0iFjl2z9WJqG%+|1RF-VY)=h=fa$w3Z8S?b^H6+N-&lnZ)E| zfItEQCy6kw!baThIw1rM4rX^B6PIj!vp@5V?he<}4UR7!)rB)#a?%Y$ zU^<%u5=9B%GY|OU{}GhIsrm{}7WukSOEIwNAS8n_7^3(P@XU{mywh0B$=$rLkLSft zI7RgIP=hg`@lK!j{Uir{JfOHQBSX}goYUjcY%4&KodeTuoz1i5W zbF-*Z_&R4fF`UOfWOjI*9m%z=zwWiR2=?=_529n>gM{?lHq>Aa!#(_iXaXY-76k0jk{b z?3sc1Tz^Sx2HqSxvlD)gdZKZ^V}m@oH}*j_4$Od)lzu8x2S$s~#m;(xK`otZ6Cq&E zIgHOIIGOhn4`4Lxo+~nT4dy)z5M`K+mFO8g1Q%$SF&r=L;$t5l zWwWe$lG0FhbEtXiIGSzUw`yCptx+++Oq|?nZLz4rVy4W*2Sj)&IWf0d zQ_5>8#IjyibE~cA%t>(c+M6zyW#9I?9RPwnNU2<}$FgJ=S@Ke_^x9q3;7sYu_jj}Q z@#FsG>pL+X``-6$`}ok_F1Ih=|MvIW?XrD-{{H(PWxaj>+ZUq2y^+vzEA^^ns>|VEaqTu61QW4ok9}iiW7ZbHB!0$`27@tk zyorbju3e>MGjsJ^GT_>~xhKY`5*P-DQWA>spsksz_GYR8XOY%w65*74ZHd`|roALK zOXSqDn|tK&m?^JIQx!3_Kyk?&PQ2zsz>)+`mo+C&3@#<<(P8v}6TrZWl5WA~;DBQ&?vvGp626gz_RuJu!c|=Cve-Q$*|5!Q7i!5+WQ& z6G@4^L!&xpT}$t#)yy<8g9FAm5mHJ0AYnEl24&$|+jbm39{b*GS+8H-ZrAm)Mwk7IInQF=vpN8GyPYdXxa162YNrW@bu5&lCM>Fr8#rwV)$T zo@;dQA6|l|VdqI*gdNiemI)^w3X^XySri{40eYzt9qMg@MZIuftO1XV5RGG=!kggm zf(r1_pbH^k3AXGM>{P)ShtVJaa?4Ke)&YlU#ab1>=bhYfW^f=f+Kh z8Y5I)cuD=gj1Q$#O*Zis_(jq-*J7MNVv1cwJPw1;KrN0eCSJpYJts&G<1kJzp7lNh z0uJ{0gqY*+eFkgOyqpNgJ-X(MijqigV*Y9dCE(~J={_F#c|DQ3j^t}3cFi%u=m`b& z9K(Fx$a&z-yPOAN!sAm*a1x?)HkXXk@qD~-d*gJES7Jv6e`n@Vh1$Ed>aC$Gx|0)2 z%GbBIuiw6X{qpVY%h$3NN@5`IuGX#V*t-q_Clau5ViNAv7!YPvZN1mpTI~iW=EUvx z4>U**Hl` z#L_$5Az8Mg7RqoJ;#?9ZHU*POqFQS%g&4inUTet(X502)$g+^b>rsU{m*S4rnyZ@` zCjl_nF>@9%J%egQ+l3(EBE)tlGbM}`rsmE}AXVp5vZ|RY3ukzcD?~V&MyzM<+Fh6& z5pKCTdhc!yN^u!Nl9F`QR2GLr`{fd;A|j&2;Ovgvnr}xnH}llo)cmMcTW1y)&Phs2 ziN)OC-j-69vgA_MR@GpwTUA3hH;XAx+VyCi%!ujjcHzV~br^CMb`qzmNG#1Yt=zgX zz(Og>CFd?KMAwoM!Bks!i85G`Wy$8Mt_1=!xFr#7%8Zm4B0UM3_NH(zg=;hInh26b zp<$RDezf}W*t#L7)`Z6hz6n8Q za6Ie92CxGVvm;#*b48imp$0~XBZDKGwkLQxx7zlG9VAI|PGv1uV(Qk_wD#Rh&5Qv; zbWIU+(@`lC7K@08g`-wAu5FM_KCSgMk0&Y_9Zh}W0E@OwJ}H@}xQd4Se|Q+5OE5gH zkJ>;RmN`7%Nt6r{=0q#>baPK%H2kR#*yMh`^|b0f`L;8G^ECRC4Z>v126l-*pOo3S zY7e|M%Zkj;DP1RH5yy03g;V`KR16+uHt$P4oOw@>RCDWIj@NO~& zBOyEal}J;L|9(lJkx!&DA=lu9&QI|9AvRZ+M&k^^b2TuX3Ct;aRU61c4H5wfxidKo z0N5 zv3?lab~b;Q?cr=RbH!QHX|HYdX>6?*lJW$2m_j&8VLCC-)xZga5qXfG+?)?WII1-K zd<7H{!5KMCWx$h|_1VSGXEW=8I?jXWC+CFu6ejp^jJX~5(s+glh-nfu>vzKaL^x%R zD4mOXAR5&l!4A$Ibv_iQv367L7MB-GMU$zTd0wUQbpW|VAL?Nea9WUfYX|^+^oNcX zYjf*Pn2R90Bf<-va1=p@1ssrVY-D#I;oKzj&&LEf&3SNh|4hvKgtvjk=PJj|j;osA z5*c(RVM-#x%IqSND4FE6Ue?QHy{?zrdVO1$>y?>mYmfWm@wm6+a5YYhSw38n(B;aY zEVA!Ah%-rR7HvRX{n!uU+R64_zkK<=F4v{3X-TI2;g8z3zBP9h=29{k#@K7iqVCjM za~SQE5_dC?@kAPvg_tr+l4V)po^rX~u98aA+V2g{m+PwP$KIGsyJ<6!)Y`q72yt2S zvRtokX#J1vry0F{`Q>s+E_&=ukDgK@@%=dJQQdL5T$iON`tdjjsjLF1yOUF1GCR3Z zs|xa*OD@M^2_RO#EIFmrTPF}*vMK7(oJ?C!l=&iR*7m-v3zKU%icD*dDNZ={tyNue z0<8D$-OJ^|%(WhHoVj~5Q*(`csIm5HZqcFFh9q&s^+ZrJ0*I;ikrPriVoF8aF@@@+ z3Wf<&S2Hzcs$ETea&Kg&%~YF#AR-R958hfms#3^%4@%>@F5t56Js8uY>!tARx*WCD zss_~F*BldO$;?YhESy=GdCkk^vaaizmZBHaZmstYtDgsfm{419M^Bl%k*m`JVJ1}r zk&-YIdY0s^6(K^>+Lp{BOthH0s|#}i!4v{*)U`@tI3?Cf5SB}J?X5K;BDW5f!&FjA zLhXp~U&B~TfxQ7piAALL-g;_PO4!N0VTzx8 zn%SYW17bv>2lwXjD|4DSX4F5DMOuPjQwB4Dln-!5GbM)UPX}ywfG1~06zsqHslGm` zJ~y0AUC1X0Ju}aN`W!eLz%Xw^2pmka=un86M{-r}PQ>0UFhXppY+* zkM~(XV>1bzPB=WNFZj&YVk!py0wm^D$7@gU7mQ-+9aA7VP~DR=gAeibxNQfGBBXKo(}e*u!8;a$Xc)V>Q2>g95jtzO2g-GNQe5-y z$Muh7=JQ(&QDbbg5PXCpLx42ajOZ*W8b#eR5k9aS-*0*=ixne}lp{3O~2 zwhk2RQ{FJF%vggs@3XYk<}2g!Bls2wQlokmQCr$+?i_Q4@{>t#J}H({7LLQCmxt*6Zc6UM}TwDVIg^ zBkgJpt{`+(-K^esV!B@1ejF)DCSKPpSxPR=8!0iwi7v}h)+G=;b2^U0kD7C4PRp`1 zRXdECQj%IJElWG9cbAmPdSMoT>VDt%y)5N&y=L}pe-NZ?RdR}cw%R&@Z*NOZskhcz zC#Rfe!yk7}Q5j`KESW_GZmAS$&D2Rmge9f2u5c^s>WX7KRF#ONq;*a8sHU280=yfT zkaD)xOHO7kM&7{-dhcy-ODRO&o0=LsdRJl)xc9EzKp+uo-tBaIP_<#CiJf4*!;P4% zX>i}F-Q1UDB@#6=?L+`r*g<3#Bjusyy&F53nAeWzWZ|AAkl(TzLruoEorq1d=>j8yI(zTNjFd@Sh)LbhxKh{F zIie-tt$ z?@U5I+wve0vn(Z*?Aq&Yw!1b%v?duO`v|Q=)iWdVi8-c82tI-Wa0!SK5hQo&?luTp zjo*$!DubKpQ6FeK5sNH2r6f6}Rjr$A(;8Lm1b{f?1We2VQ=N7iFQCewcJ&O_&$u5W z14WaJA;PE^7@ZIz)9JJ8*l@BVw>a4kn(c5S+CNKqbc#<-GA(GrP+U%%Kk#l`=HxA= zqaDAa`Q`32@n`O;7WEuv7(YcLpFzCQ91H**Op8zMz@J)wpU>`@aEc|gmwF2dkfA#M zG=QLaK)K9qk!4_^YrKQ!k%Q$gj7Rq7wFAL3Vys{O1>w2ja$I!?#6=v5ZcnY z0RlZtWOTw)cL2kI`uL?!)@@)V3jWVP0zt|tQ4&r0RS-Ygn;~BcEttq!yov{>g`e&L$vFp84EGXAUrKiq5i$0e|N0Pr<{v zRe@1kJz<1LqbE3!hH)i7=Z|8DE}z7YGtnG!JXxIQ4f||M5qA;=jUz%h8K3zcA5|U^ zMVw*LsSfl>M-R??q$%fpNA);OBs*$giRir7S&}(!ocJZTjq$gGoTZng4^odYoej+_ ztxx89q!uP)hrfVV8X*r51^WXd&(r@rhw*u%*f-RUBVRy(M=z52R!5{i3@7Hq0Q4TP zU^JnQ!ofHbJ{2M7UX1@8g2nLMa1Pp!FbX=Nw#DJ*Y7Sx%@n+4v8YwZQB%C>ME}4mI zZT#4`t(uubTdiQO^7?T(z2uo#gr*P|IxD&<04$_g-vFtfp0YpU96 zhwn_3*7A1y`t6rrs~$gp{&?K)y?2-iv5*|?=+;tASabVq%evIo-EdtOE|iy|rk@|5 zsU$dA-wmEqZq3N611q3T>!s|EDxC90?%I}1p|mp5_PMz>gRvx%-0HqPy5t45v9RP+ z>;CvWTv2jjW+!y*a4&0)ZGSwP_MVsAYxmaEn#sx49VU_p1lYQ)&6icx~#il(U1hTln>y6FXGBwIlW$Gpo93cjo#CaXA>AOX{Yk ze$w7N!WXhW_q+=cB}wKErkuzf1z5~`S2bOb>(RTJyQ_kPIEgw+$wDcYWQ2XIFyUxn zn^+|0Qs@oZb$e`*SiQHSFNNyS`)-o?a$N`{iQ%f7E{Tm(JsL4ml3rV3Ia(Fw>#`hs zbpjc~i3ETUND>K&I8vf0D0f$DCW%|GrbbNGJDGRY9s#85z$&V4DqNVDgduXdl(+Z4 zegF3D?N-(Wq}7@#nL|kgf^H^6Nw^_|35h^C-xwFuYG|(ApyVba%}3`%QKB#6<7Yv=5Hs5NSwX0l`q#~D@Q&M*~QPUQE<-p=@ zBomqh?3jAB;V`G|GW7{ELtIQgn5q*djKXM~#@k1eQJuwS7_I8(UC)5r&uI!EqFGcp z?0+Dmz!Q$M#T9u-+Gp=3pYt)p_=e3UfT9_<8G5&FX6mhL^uepLzROmS)9tFCwn@yOat>xQPaR=!;jGv?^7g?c>tHVmJ=oc#2{jE z0+g(Iv!j8$_U_iZb~iQE=$}pmArfL{h=_#VQ^6|nNyE_OT%BiwzzLGr#e8C!*?%~l0Z+se za|+l~Gj_7mg9i-M8i-`R!C!O<1NmX{b~8vBA;cixrbce0Zbv2bRMZ8?4AAomn#Q-# z$?Aw?AKBPbOE`-yhb^NqHIL3KkDvYI-e|(6$=jXK=ox2VQlnFsIHQgk z$eiz=QD&%AXkz&j#>YQ{`5QoXoQ4Uv0_ja|?a7XkpAY3!viMxd2{+@HGXOeKFNm3E zv5%_PZr!Onbx>7nSug97T`esO=W<=SckOMHlp*Y9`+YOhREqVc z>c@|t?pBsX$edMf*RH+RO61F}fFOC%)_dzEFOqV%u2o$%Wn#j9udVl6;l!eDZpuRD z)>}`Kn8nqJLBbB_B-h&w!rBjFDW#bErp^w^Y+adHO-(K3Z0e?(M67G8?RveKSywH@ z1hn4G9F(**Rd++q8BV=7aw3vIZ>C1%fV9@bJu$_|YHzK%lL&P+xPjHpVzx|I)!`EW zHX><9b5{ooizL?WoH+VVTrP{VofA79q)tR8Lat6ZiYbv36B7};u|q+WEvqS!_tl%~ zkk}J}6NreCJA}Dw&#ADmFkddYECpZ%U{1`p>st4t9@Y;d;=;?aMReEo9c^>Y2XEF~xMrUWH#Q~s+A9zA2b4WG*jVvsU8rIM&9b+>MI02W{M z%yQV6F2FQed(A)+6W;Xj6iP&tn1o0YCl+>iLxX!@eK_&3wjn|yzZXLYJciyrini}lnAR0AbYW} zud)%Mr{T3RMT}09Ia^$vVivqwdrSihcMoNHS0BbxwX3)8UA4#1v8aX(CeZ(qMIFo* z0q}Div`?=-VfV1&K1rad4;00LgYh6IvzUDfdb?3 z4Cg*m$%93~3#CPKa}G5ZfY3Wl9CQvy8ua4ex8NKV=3|S_9QOF`u_e=qL0(+@P;pJ* zf4)7wo<^0%*y$A6U7w*8oe*TelW8jExx|<~H~(g82r$E@6Yjh~!m-7kEx8B9ZRe!C z$-{z(Mrb)B&H=n=0+)HAbaqgC7C$^!$+Ypk`ocN+91^G@$vJs9eb@uZHIYpNAIx?eX(|xn941{q}Zydw+YsU2osN{!;5c zdX3h7yFX$Z)uSa&yb|S9)|@4+*EQ#C-Ac(Rr%;Lrvj}SIx9j!#=U5;Aoy<2N)jZza?&FPAGBw)?iO%jI%4hSt{h&EVy- zs;PN{nQCvf?T?M98&R*VkJ3@9t--A<#mtCMmfU++rdmH$ZCx*>YTYEWrKGicJ(9#^ zq>d5#Lvlhmpp2;B$hjvv}H3LYE zl5;B1Zfc!HyppL(Vt1(alyk2~t(BP)CoEJS9R#6ls_RmkDjSv}AVG*z&77&#*puWj zT9=SwAfGHNP3Bj{myeUAml%v;@SyJwM<3$+MYwLu>l$cY>y{lxFB&JT{PEfNXh!G&m zDxAoydvwCH?&#JH#A-@_b|rUWNMcnRRU1iCE=SX4xgcFH>vFrULYiQi*gA_j6NHH2 znVH~j(Ir`}18lu@fO1KRB}qxCxUFW@tQyqa#Bo+PA~fR@^AK@p)TUJ22Pfx~u%)2< z(!z_Ao2!v&>pr#EFbXZE8A43!+SSyEqxm)wn3D*D2+mAQ3?Vn_iiVk^B^siW6NF)? z)J~FWsE>wuj{lrZ6^1nqbQw0+M|X(HR5=Zm+khP>c6j2nSEK7VCx@MIFdV#}0vm+Q zG|iZ?&zZ|+@k{BX8A+(F|napA1v4Dh{C;$de?CL zfhr={j#5pyxplR`YF$SnHF|~x-!>%kV;xUUc7EMh>aj56J@Bb@i+B19tek%|5ykW} zJka5U9@Fm)LIyFeZ2%+yPp5VIJbof+caP3z5F#RGiu!#L5q9~@H4Jvwhs%yZ?LM}4 z?3t#jBl&n93xdb6m{7$X1=f)0tCivI$flZ7%@H*27VA9}tFsZ0{Oiq?-Ec7hC3Apv-a^|4W0mIL~opWcO z4{sRk0ndTadTvrn@S$~xB8hp*;%i^fT>NeP|B$?4s>#N-3kyXagd(1uOPJgd)cts8 z+>1{LY@kHim!1lP_?k^+X805r84og|+9%kZW@08q z&+;(vJl(NuBh5Q7{BvDSUhTP@|MuicJLUD4F5q8U0si{2%Vo{w%llU%u#j0lw#R+@ynlQ|Rw5v&Y2UYpB-Yy9ky2{4 zw`NNznxyyk_Wtg~?P%V$)drSzy@1KB!)dvey;iO2YU*Z`N?DB58ONi396z<|Wm&2! zT(Zc|j~@gsm&F;|{n6^SADc+V{iyALx;uJpb+3=lyID0tZ>{&<4psvkoEKVQ>b7onq5Z`xZ=U^A$C3!>z$tqw3vW8BS@)ZB(R3 zkR@1xiCY%QncS4f92UhTOibiCF~I6DRlwD{!4*y*Az{j#l3DA$My)H;ldt#4*$o#- zlRIG0&^S#&yQ-;EZJn4ClaL4z3neG0X+(!^suWY&DB}6iP;>TzYkluTQPCVoOPHC3 zNL1VKxb`%naZRJG?&;7c6qrcvtj`!!n@^|t^yTAc#}uE$`^{hME}d_}`18MJSAxrV z`LFq{GkY7{lA3k3re4+eL-*E?rdzYUT2()~S2Z>7=G{V_HG0O)uBM@Zb3{KgHyE9= zYI@$|fCLj-4c1C6X6KBy$r{Z1nf)B^!Azk|ND(lY=9)a2my=1Exa}l!PIx^5-^92+ z36S{AbHqEQ+ad;ukjy|UbZ5+ini~1Bfi(stH8X1|+VxrRxA%sRFr9KsSV~0%7J5flaw!tsD$?@C!lv&dImv5KL_3PW0{dhd?4+4(8v$KjG`;l`l z%et(ktjm|TTUgt#UvJC0w#U|XEX#7a-SXwiiR!lRcTJ@@d9Icv)BB~iB9bJd?uS;b z?J!s_sX_Pqe!Z;!_CNovtN!x+ySaV*`E$R2?ydEv?>GGM@xwbVx5Z)GqoZqU)q0a9 zZO5jE!Y!|BeY9Ro3KQhm4`C^5HZvyf+Dt9y(p7_OGmf$VfGMY>+=R)Tq7zbY>I|yX zdpB)h5z4joZknm6_S!15&;VzBdJR__lFZr~%;4U7Z)!<|nOp0see`28a!2b{YZa#6 zp1 z>CPmwu5aJI|9ZK6ef##Md;R10-#_niw7y=lMScX!if{byfF zS=P)VkDs*&+?$($(Q9RMW+&pDScqM%)&{Ub(Yv{NoL!H0N8GzH5fPgbuZfsdslt|= zV8$Y?S>3gDV^aXtqK6JeD=20$GM1Eb?(WWH>O_*5O^w82xJ43XP|lJ#kF@Ei673*z z>r5=dQMX{xZVgZ(1`8+QQc5w`uC04_SBAr!BQSMP%%gEQ*xhBi4vGOM!}&yGie_kJ zf=En-TOCDqq&6*_gO~)+ImHJ)Y>-6;8R%NOK}dv?u)uRn`Xb`!Z!$h-rcwrXfhnFo zP3~EfF@V%4Xq`c~JzY|$-7&56GYvK5cLR7%pENw+3~|HM%hdIpRU#9rk~^Ae@4i*t zs_uv0kG@sCS3SDz-S+NPy{om*3(rLA5HpP+DwJm@<2dqH&rtuVAjhN|LfRkPkZGW9 zL!bHK!Ijahw7^h)Plr3b70%m_+c8W(>+?y(yUwwDPuCw`LNto?UeImLY>@4 zK*4CUjXCXh{sBEv;h7nnTXE_~VX!e#(8mTlzur&GV39)@JI&1oAve1G88Q8(_Bp!y z4c_JqZYBdt6AHr9IBAnYiw!avE}Yp}o_~dTae*qFaIVYDF`g}qM_k5FfH*#TK&|u4 zoE+F(BAWCVKV=a^>kvOOjrNtNizRo!cgMvp~7@RqXXP!VRxVHG_$TLh+L^x;X#hntJ4-r0FSil#G6EJ94nwUChA zl~c-I7F}DdEM(@)%+bqMk}0e!m{b9eUKnw%$;R#=Fj-fkP~ob&Err}wo0esfl5$Sn z8&%idI?GZ@7KW5W>H6jS_w;LT^}c;>_dA(u*EsD4RWp#Ac63@Vi`9NVK9^;^UT=9> z*4w4zQujkx-oAWsvev4be_d0p?eRE7V5Zf3bSjnv5jyUNHT!HI+oP_RvhANgKYx7u z@nRY;JO46SM^a!H0xy5-omVb{$J42{kOWNL8c#9h@? zN7;QGX>xaT?bbU$3J^h6t#@;ah@Su*#37>L8H+SCVd9R%>}ZW+?)#E2%PN#44;yZ9 zbtGeRX8}V@$sOz@DVdu=6e(Y?zy9rCuh(y1fBCg{{o}`fY>z*;{o&e$nLx*WETx$H z{l5M9{QUfU-0zQVKU%Bqfb;v?+N^rh?Agq=*4a(1MGnycAxepb$cT*zgxaIhz+k~B zqq~uUqVj!+Kqw_iDfR9mVniLzlC)_vGYe*A=#Lnl(g-wJGB^{910Y`3oKq@U3L}{c z++9^0+}+qkO3FgQNepHtYNp;OvYpN;Nn&z9KqDOvb06T?NN9!}9zJ4thFM2bYnh6$xNDFm zVfE~6FmYxNrpcN<8skH0cmy~?+BA*5pAaox;7KdIcy~-+H@Jr3R{ZJtX!Ok-?8qQq zrvD1R-VI%Q>wYx5@BM!0eeWN8e^kHM{%E>)+q)gz)vc+Uk1?HRe~N*VLR%K!_iV92 zXTB2i&L=D4lVfr}&G^};EtZAO1$c3*Cp;m*=`T)ue9U~{FJFO~#yi0ePHJcDq37#~ zISFSGAx_L$B#<27G&n8evmnqxp;tS{O1cw8Z&YGF2f~g2oQ`lXx6>W^Gy@W85jhqcy`hjsg>b0_WrM=LO7-LZFx}I1=;GG97BPSFOfX(SYXYMxI#8 zvRrPGa;&kbL5W3#RTA$XTitgLEFgI)tmZ_rEXe^bGA2HYn0e@{+?<$75(nj!Q_2q4 zrZG3uOiAe20X8Swx5M3Fyz6nj6erv6^!9csxtF)M^|qE}&60kxzCG@rKYyC_lCvas zc&qK$+oSJ^-` zYswr=Sk|(t+WXfpkNZPJyxYE4;l5sPIpx-Nxa1`{JGv5)BuQCB7_MErJBY`0OS2yH zb;XgM0#%^xTo40pv){u^0G+El#(PN0fF3HVL4|q1G|H}xfB;Bl6*8vYtH%Y*T4Pi zzx?Ze{rb0GP4)LbfB*6Gx9#!S_hVfbxvo~*{&*~y#y>%Pif!tYQ&OO11Go>sJ zs%k`Dn}SH4$xu6*a@(7msjDz~w_xxjvlEu3te1;3gE{A%Q%<>%sat1g77&457-rh5 zHSd*)lcXdmF=t66$zd^Cw_774kCx{_ps*WYs7geTL@9CNoL$_!&o13P3jBlshU<(I z?O6~ojrDZo!=#4W3QgD)97PF&4Nsd`0*a}3_?YPB=3_Rfy2BbBKxX1ORGk51j*MP( zNr^40egZnFBc@B*X7&lX3C!N6~U&I6If@ZZP z9HgeL`_c4s@ApIheAJJ<|9JF!wR^R#^}XBHwW`@1=;=|lKs1wT1ez@y=~O(?|7OW) zv|pe|Y4YSd{sQkt&_25E@#Gh#`#qoJ6T-gcsHPX4u6ZugV2I!joRgJ&xX$6*2Hqk{ zOj%ME&P-{@jba&*qBC2_-=4`&7>payc9a39n&IDEtDz#~IY9)4#0j7=(g|al-h2u);V2Cv zZsOXA<>~X6=mf7} zshlu}){n#NPx@*yU11}bV;9lf*>nAzEV2osAu4E|g3;k*5M|Q<&|e~>(Stq~cCOf% z89hPusSS7@!I#CHbA+R?`V6n;*`FT-(CqR&-Z80rQ$sg&b2Vz+kB&#Rd-cuGoFI}f zm)n;wxA*tk*DvdO;Ups5YTNhY{<%HwcdgoMSL?#@aOifsgnl`d9BR{)vt)5+b8D@c zTF6P6g(a<*l1e6Uug%n)BOmM5yEC-6uG+Q+@yEU|tC->6|M?%6rPNjpc3E4=C6~OE zrC)9$lz_aJoYJxHt@Ymem;{yzu|IzP-1mb)B`-NIUw`@PSo+c1(S)-w#LDd|#P<2~ zzEQp0-jf(Ha!U91`TqS&TI6^fv@wk6J?vIp+MPLY5M4U=Wt%IUzC8v}nL|Y6Z zXNRe(D~yar&6^EXEWug>v#Ivl;BL{gm&i=PGWvqksFw_$NqZ%=b8wJpSMzB`;czo0 zxZ9uub+-PBOdpejOu!+!IBHevv~y&Hmv!A+19VvmKxxVOdWDIUT=M$M-~aXNw}1Ke z{Z}~s{^!p>fBw17$ql2tDF`T`^H?_TXZ{6W3L)?W39m2FM3+0u{g-ZrYB8li;T+t4*uH6a8;{N0OC?ZWW#kuTnayvJn)!T=H>=tGv-90byHGnYMqHk=LM%2+GcJ|HFKg_>JfP$ z4#tZH?eR2x!PX!$z?eza)481a(5JVVfjm9+GZ+y!CS@3|WBR*kbx(%i>F3UmA|hzW z+SF_Jt?K8YKeqn+z5dws$D`kyJ*sZ49coowXLZ~7G#YcRfYX6tIzb<|`Xquz&#Ulm zfj~9Z@l5)R%l0#EGa{EVJ>t{1&z#S=ma*tFNj;niD00(2AQ8=6QuxNqoFhXT zX9AEI_aVd}CL|#~bI=|D$R_BS*YTH2i+i(40l7aL>W-`QlP4t%7SlZ-C!IOpaOX^z zO>>A&UU}Z*R7!zjcE!A7KK~~$;A@U<fKhx!F)@8cBse3m0l-gjL-7A$ zGe`b)bZtB-w2`a|(}|p?00Cltbv%;b?V_+HM3?h`I3}YCCMV)Zc*O6R3A0op zjulNZao*#LEO+J^92TBtRDYPCCLA=PC}xJ6&ivSkn@7R9b8WqLs*0ogu6`&gz&Tyt z-hchuuit<9etmz>Icqm3h!{)Ejc{Tt%i^AyC?y6z?aMQc7<9D?X~W{Z-;kHF2f$R|qEd>R_NFN% z)v6<0Gu5bf55X4dakx9Yx833Zo&-J-e3V9xGbkYv9YjlXs2)PAF+0*7%u#hg!*vV~ z!_02x4q*Wd5O=hCxGS@`F~B+V^|J2UL5bGoqQ=XmT(9rT^|q|nxA$*fzy1C8^-DMG zs6T%DJr#O;yDsTFQR{vG`16mSfBf;!fBW~=s*(Np_;GB%b-g{N+KALyT#Tf$R>4hdzn;sq~H1Ux_KAE;Dnj5|l zC-@jQIOP<~G&%r=l6p{HqZebiixW%?VDRMXrU!}idAONbZi$&-jP+q6K!};Rh-7A& zOdFHWrXMuWk&k+6&@+o|J^+zjc$}f;~kS>-FWt6 zd#Jdj=LY9iL-!B5xpIRUPWg3F-Ub$;h5EO9MCrwUa}7K0yj+{LC_se@1*j z0G+9`Gh;q!K?i2B!pxPAf?_}OqO*~&P0aM9p2njc!H%C_>Z554aDFk)I&h4v<;dz1 z;LuS`NfUoywBa3u@qmuO+fRaia07u$$_s_hO}_vcXr zW-WX$dpIvAK5q0f9GiL=g}LJ~Ljp`N2@23P2DA7iGRMWw8^zd@l5@((2R*1<19uj5>-w<>@dNJoAz!vx^LAU4UcBKK{;K^ zCcb*w%;$y`u63vq>@tMB*f{G`_|`2Dw!AAjEOpO53wwEgkNA3;Rr>*ben5vI@k z?qfjMxRIPioAz2;@7n;^Kq$Yqx2EJ+GI6?;WF!$#GYJa`nRC%=PA+BpO37eVb0GUePln0NCG3Ao$hD9kDyX`c*m!&2x~YrdBYu? zjSom@@E^l3jmCHEW*|f{iJA9qYF@iPs{PpeACLaW*8kYr$I%b9X09id^CXSJXyY{f zCxJMrqWM4b^@$t?19M&z1~MYy#3GVXlFTB)2~iC;yWkEF2S2}VCK?fp+RQ5YKqKyR z;uirvM?t!qO^(DTo<(9QiLW^)c!~`6q>92WSTqnEi%vu*lzbup0x5>uo8_n&u6N+L z2ucPzqIoA%Z3JNS*B+WU*g)wxXBapT3bW~$!yQk2?mpWC63r5m`8pyX9j<;pL!aA~ zqQ{FX07X$Gd&puvgj{* zHYJ%X9|0P6e4Mm_z9KUnweCLDlzx5{G04ea=j;WVG*v+6GdO!$wW!JUnc{R%NH09R zfJQ(8!pE56>_2`45^!X&8PjRe6Eb9pk=c>gx8WWnJYFyL6bUv)2A;`wSn{m!D zhoR0hOy`x7(-^@#&!!J*-sjxafw{&z1~YsfdjjD}M#6ELo3TUwYBa{OhF60l{+673 zEYN5z!_Mf?4(bTWaaz{v_41a}B9cIU9J{Ht*7kifbGUWwWl5zZcU#t+Qpq`&TsX9Q zOg9DHk*36b48}ZE#^GYDXo)x?J2{)rDofma^vC_1d*PKKE_A zA9Z`&_YiwMj;&cg+7YwOiLho4Ee>-xv<|M4IH{GY0I`}p|tx8EUF)=Yl;{QUg&>n}Mk<$C%0 z{TGoXm)sR1Y20q>wYOT2-~ZQt{PxfPdEXyDe|(^u6Aq(~#{oxKm?50m1>KNJ5n)Q^ zVz=%;KDOU}{6?JLDPM{>;b=xh+#KCGnKf^92N4U~u1(v%*KOZbt+fvFdUOJj*d^uL z_wU#5zvT6uGq+aT=jK|I`z>VxmQuWdU@Sxq)1EkquxfWAc47xJN52EN!8LRW21}BO zun>{Kjm%A*+}s%)!xw3?QUHk5U0u7Y5M|-SvJ#&=* zNg&K&2IixKaiWxD>Dqi|?M1@r#Z1yOTRNj!B7;#>&P}cb?1N1r#?z!feH(M^`}kp^ z9y+lLPmC9B>cC@y&4`Y}{7zbGmc$K5;-FxCf+?WZtad$`Klc7{v>#jlv9%vt`#9R8 zt4-NEUfjvk{L`s*JAKzgZ_|XoIlq`+~4RC5tSTNASgq>qWCb@H#UX$a63LAZQ49-4yrgQI5jlS<^EAd~h2y@h;}Z2XGFbc~bQAmuH+YEJzf5 zjtX{jBeO9bEhdOJ!``hqbwI&J#5u2TZ|m*ta=8Mes^(r>YqcS8DlC_Tkq%&yyyPX9 zysQgG48T%YSmG+&tk!zp?ry}CqFB<++_bf(y<1All3P0fD{0w2yLG!f=1Sjpi zTnh-T*Sa@ro=R5J^qJSog)?i_-c*RLmut%TsE2omq?8jAFPGA5t=sYC+tz5pLFwYQ4v~Aq*a$FuZY?DIXG0hmo63mom8WklgvuyBIOZz@t~u z86VAnI7ZVC{XEP@bUKLZ5h8>x7R20JBNkP^xBB+|mP=kQ>towQdWCNF_)q`&KmFtH z|FT>!*8KkQ=fD5k&&U0zFzws+^Yb$;a=ly(c%<$2_Qu3;{`DXKxZb|9EGZQhA$V&o z3Dw%nb>H?&UVr)Z%m4ac|8)KDz5nP{-BlU2)BVA&#_yMGSyGl#QZ9*jWs&82-|Bz+ zx8Jt=w%7ja*Y{NtF;z8o>a1F$7jE=GH0`~s_PzJk)ijDf%$yUiQkLuc```ZY_1AxJ zUb=TZ>b`Ago08?67V<=7>`YX$gkq$(UUFU}o10p9klM62qr7Q473&S1HCXJFz!BjY zbQ*yWfY9kZrv+7aZJj{Temzx;xYD4uDw@bqD5Ako1y#7%n7q^k%)2 ziDY3O4dmR3AjEd6jW8|o6xB_s%8(4bbcz_3ap>JoOBaa6U&k|odCpS6$RdRgo83C+ zrA|@G^lW}Q#L4}|A`fn+t2Na_{n7NZwU49!JlfBr{@m-L&ApdKmcG!aQ9mz*;(3nvn05|$V@5NzV0?@nj#n9Rh9bfzCXH_vqFqj_oM zMus^UgIg_9t4UYS?@i84 zblsV`Khcp-SUtef*r}5XJY50uhLgEvj5yJ)u8doBWkaaM- zg%X^`1e2#|avs&^iV`qOOYkgo^|MZA!r37Z9>P^0b4H&)eFmD-5S&J5KBm~FVgA|O zjo?PTTkF=k)n@zQN29%?vq;Kiyu&e93q!f)tM3}WH zODIb?r1ECgY6t1Q*Iuh>x876GJ;FrA-4&``ADd;7y0>0>$)#1WzzIoG?{2Os3vGS8ukDr&@Xr+%0S znVVK6f}(Dpl_?!{V-l8>vRszS<#GXY?|oTvS`xDeq4(DIrs|y1*Y_LDgwyfXE@f5i zt+yok^XE@yQZf+|k=iTE5RVX;V4O(R%zCyGz3Nj+aH2#a;_j`tTyodmYO6=hB@3vx zmL%obR%y8uCRgp8$rmH@29H^sGP;Y+s63)bn_-lNW>?2lMIOzfVj!UTY&Fqzn9iYq zjbjgv9GZc6Y6A#>#eCREYn`fPVuq8}?#v*rz2%%Fr@Ht5@jw1Yx!(TMfBCQPZ@2&Q z>;L=9+xyRtAKT+z>+$*V;l1aJwAznHy{?zF9o|)I@6t4r*S-Vb%8cKQgA;ee@jG6onF1-C))TmTHIob{k%hgCEup$J^bWAOB_ zzQGgoIR-!*QM@qXcN%>zM*<`4^D|*_;*D3AMRXd?VI*n3jM)uf7&<2!Wm~h!xf7jA zvXSY3A;ZY=*TGytT{;{(aw9=f#awi=+U;KJXYD_>`g5`QdVY(=H4-fE$Rd?bHM|X@zd|wbP?0v4Gc+>nWQrk zBcB}H0Q^M6Oj%M&vZTDEoS7JEy^?k971pg$Z`C30|`L_eWdzE zRfNsQG(kU23N3DB$W>@)x8|6V@iiXApigw9Ho3#tr95SUZg`>b2y8*)5)T9oaq~Go z?#WXnjO3h8?&qYA1|Y)V$~?Bo3~9~^*ccZ(5@mR43-WBoLJsGd3J<(YPlz$m=E)hJ z<;r7^4Q{~6Fw!@#ICuiuf%zt<|2*mw-Jf{z3`gAOZlBv}1a|X3LdwS{45B9rq#>51 zF{@^<5hD>izY4GI<`6S_BRZVi{&-w~Pn1cM+jBp=7>y%^^Kpy^bzc4RG`{}#Gx8db z;iN}Bl6tDv)Q)P$VMq17Q#W=dmPDKgoJ7DOcQL>uvl%zjQWgL?vq(ybQ%Xb}iemx^ zu?PWBwbq+;-Hxr+qwYs*wH{S_iz6e#EW$Fn9J*VD3a7mu4s&)K=xQnFQi>b*>O}4- zu}C6MBJ8k1L@l?s_icOZk4KDR<&++uAID=iGbOt}w#Rlq^mxC0xxK%o%rP>wx1LKf z?@>0!xgYIV*EMGYuPofPSvBicmR0_LZ2jqzEXi>v2Uf)1RvTG_aX1$HgjyG|)T`!)9|fnN$GC`Al>YsaoXf>XMld zvE+rA^KnTrDN8Liro^6dz?7vRfGRL^h!kQ935lR)Ap%+g2O>m*Ts2Bh;aTTS&X&Otm4KO$y@xI&ZfV5(bXLxQ&U*k`Z%^!2tHx$AB3@_O&8XJ<76Qj)y&> zsYqEQ1YQpFxE-EL&+qQ+&hMPNJn}e1CY+{$DQvczaXS$Y!?ZyPT5XUzoSoks%fri? z58r=yxj%|RM9`X5lqn!%2ntm_VatYuMrw&*NPs}oGz{Z(@xqHQefs6+Uix_2Zj?;W zisV|0_pTWT0jg+$JVsWql4meY9;{FhtyMHIrw~i6ZULx)5Ku(~qi!>x1!~nqRFF6l zwTWEX#B$wg_C~@M$-Bw+h*<%u)NFvN5rByUQ(%g2g{oEqt+kjIk=nxF_Q|cAp4dy6 zfV&sK))RKhA!09v?gsb-ZW`dh#tGbdjjb(V9jC0`YK_P{E&+fxLa1RfaI&qJAN5oR zHa^mzNuUFssAjlzjefMuL@igDiyRiYS;{`wL#{w`E-9PxISqZ4yS+xeO)Ukwb;NI!!gAf`Om{;9knz$ zvChq{OdhPfBm%%~Zwg=_ZD1xE7-J8R0ovr~-n`#;uctSBVm)+k-NE54tr<1FHB1)F z%m=$TCJO6~_2UQTV7BvF{V&jQh^>eK5L=VLt!?XtkgiVBCiWE zi3gTkuAg3?pWit@KRfJ?b(mu01Kmv9VZT2PBM!r$rWr8rYe>OFLf{Z1fHKiAPKZ!g z(hy3{c`1}4G68^SHQ+=M5SplvPxzo#`~spH2Ne}82v!y!p=)Zc_r?HT8%L(nc>w@a z^rc4yV`$c}&qN_q@ilRo$G(>l2|*M*SXZa;*^S()u8Lhd0FV%v#SD=`2nbfS-h2L~ zKlzKlccn10leC*ck-t24E%*M5-cHLI^_~ zL`pTG?zIux7$w(`1Y0X+R1r)~6o~>8GY0TNHdp?~UY*k|I<{dipj{;=dH~HHw6{#yb1Vltrmd;p18E`T~79G#b6YB|??%dsppEOdfRTx_%49wK$m$)yjVx{U*YZW_|nniQ9 zBDOjkKQ*x8)D=^#j7WF$C%1`h;$R3Nr4&;f$4yGZwApN?&1Ra?kV1?x268cP&za&W zbLz09`_Z19j+&p4qehudQc>oaV!Z*xk?dJJS{L zSqHKwAn5n#Tc_WynfTJQoH2Hs=*%CUcHNqf?rs_ok(sQKJK6$ZGjE~LhC>KU1N#uw zwdcR>krmvx#{{=hW$l{GM;Wc!ChgX(@B221!B5fp<{i#C%yoLc#Rs?G2Xx}s(MG4a z5K#LC>KZ2Y_y*c}g$|252PW$9)1g|gL^Bfs1SRSzOl|vKOc4-oPqSSYxc_cb9_sE0 zT1Czpcb)){|LQuts~@WqQwZ4SwDx~gJ8hiy%Jw0*ZW-`&X$C$HqTRh;$GZCcJUeyK zrk=F0pyOYElh%8)f`97>wj;e;hNy4oo>o=>0@pnu1g20(Tpbw$g8~X7F=3!MgvcRy zBE>|i^Fu_OQW}Oa#t493qVA184pe=NCL(}>MJQ!4k!8+h>Dq4u0ZA1oP*25rnY^fa z!#3qPXDCcuYl$)ClIJ-M!(ae;p3{&98Ag@Jv6eEc60?B(zRKHS=VoSx9OKTlh-8xRC$PqrF&N~NU@yncvU9lF=lNfn}) zrhy4#96tTISHJj2-+JK_e=zU!2fuy$z4zYTY<6)No;-b~wVpqB5r(7{z$)>0esM8w zH>Kp`%^{?%*|ErSI4p>|%s0!jNY%XLA1$fG`9RW2jR~cb~&Aa9m3H;XnP;;p#e9m^j2LDAf!jS|l3)rbwGW z+Zd)X4r55;#M1^h+s%XLxAz~U-HwN}RCxO2>7z#tIc8{ z1Vr%m5kh8C1<}I9Ap}(NF_>;<7!W+NZM;18r__9cYE}UvW(Yp0zE@^D#_Q~U@4V`7 z0nj2^GZR!)ShSK612Rz{ih%=$od&Z(OIbvV)MBdIc9DnZfaoG%hOV|_<~bkg{-940 z0Q0eBw}MpY?_;YWU5f!a>3}Djy;{+Z3h6~-d&-@H@CK}-j}Rr4gFB~16J2laa|t|{2Jrz_{5-(6EHZPK0j+dVcGlKd^0qu zGjDrtZAYiW0jFyQT;-ep8|TxPEto0SbT~ zYqlg)v&=1`IJG&freh_#>DKi-DyLiVMBhd2S)5E{Pyc%CV14~sRjjr5Rdrq$s?;i$ ztB9H^dE0?0ia{W9{{VyPlWNpco)gh!M}nU^(+4IxxDUwzk3Rgr_sm*>dYuAGi6>Q>6h|sxvZMh~lKd#E;1e@sw#^)<1jxQ=eWQUH|w8-+%h- z{pVhM&Ws+u|1dE>_rkppQ{Lx9p$Oc2{=u}}n1SUwO}m^AIh&d?)Ai-m@o=cQnwGK@ zBM`O2amjNr6-3N}2B(8R@e1h7&-Vx!0$ znTEil1yvKV0xGH%)J&uyDFq-v^-LdIDxZmKTiQ!e-jvNpD5h9nImXItbRJPut|49Rl+Q<8VR5L`;pUbk3${W7_Y+2^O!W zzTFxTA%z%*Gz@8&w%hG?H;&^pZO3UGH&aSON-4z@LI{Ba2WDa>1Y~MO3gmxl1N8{{ z1fPBlj*FWURqKex$sLcERYj!MBJKgqOr+Mj9HlPDWq(}`d&vum0)SKzQ&lS>3Z{mw z7P5P*R+qY!~MT zFj?V^A-Mc+>$C&wRgeu{+wBx>^QYD{KHBR&Z6&MLteHaZ_gLvHSQGO|J!OfFLLoGd zSHP(lZvX-?fY6*iw&+Anv1evo)6rpEL*%9g(bP=EIv5DhknT$9bPliMk#>07FN3c^yxIcd1=-LaagF4l}VC3+q80oY+8IALi+LJKNU+ zF=*W0Eer?^9{0C0uysHmu8g;@L#LUoufQ5TdLiDeMK%Bg6;p*OR*RIXi^x(fD;6U$ zG!7WUu-U|MAXn68$G#Y2jA;lQm}3Y~Yq6{xqNqs~L?pwUYsr$AB`>px7$|wRrBpTK z$jIbdP85bArj&e!fqEvU=31o|@xE6;tF@@u{;>Dq16~)x6b?6G7}B^KV+e>;ay6+O zdEAaS*H>3JS5jqnv5P5frt^z)LBOl4J;%Vr`@=CWc{7YfGaC+RBvxO_X)_in$HOAE z0vH97B^SVXK9~r{NNG|56B~CU5zfm3X5%y>5||8WtWZl{N>K*hoDEED3^Ao0?P@Nf zHBBQLEQeznMnhocahhU?d6}^aAb=@SaLouLNHK{>sX4}Jnzja|N||^X1~e-oGH?(i zBKEi@hE%DlSsI2aMT*B(N`V7`S8G8?QVY^8huAw)`w+ag9TAMcD^}2IOWYy?Qsdyg zNVK&c5qPZS>Z^b*ut{E~HB18mP3L#N{Kx<7%YXJS&+k8Y{O*T8_?y4}@VyVd_=jIe zW4wI)cwpLWhYCOoY~yruIG*3Tx7nQ$)A4W|han%2`@Hn`N1;s%WlOEt(-n zQ4V1kKs6u}2L`ico)6atB3eq^fTj{_u0@Jo!8A@r$U{8fhe%0R8z#=cDQ$MbN9LF;{LGR3XuWbTwgzY z_~FCHAKhGEN94>n)3Vz}jua3O)G>z>EUoK+8gxbjA~T`WBF{u_}OR&x$LUYITrD28_r^Ue+3r zqgN}5R!|WUAW_pkJ*%@uMyiV5ZP2P;PNp7LH`I!T8b4@0gR+})Ckn9{aO>U)u<2qo zom-2(CRA4phvr)4cATIw9Antu{E+-C}RKXur|srkc}C{);{*u+@@u_zeBhs6S_K z`EP8hshKw*_Jx90(C&#!ZG$2@Bi6=h`5tR>OMWlfbDus7Rn$5=(`P|+0OP+@M0;+d z#Svb}w0iAMX0{4h9u9}&ewp)fe&=l5PIpk_5XNab&PNbMfWzS^ORXS7iaF=~et&U(HtjYyS69CB zF%m_RYEmmeJ>KkRIHcVsmjVKt^NrMUb-gcnnKlCxORZIu2QooXVaB*ki~$YC?QjHD zE5}-xIK@&whiIIp65m5m)({?G#vMj@}NiiPxH_Ng_ zj>IvSC8-fGjjXlgTsX#IGcL#DT8WE>BE|QMm;oXKLr@SeV{M`n28yj3qPMU)1IJud z09Pis8SL9up>JM?B1kP1MWUc;iaHG2&3JMD#TUN*um8p8zxu6Q@V#IB^1FZiS25W) z|MVM&>+4^?{?2Yl7tfz>@9svPO!Q_sogCg?2T@|ANHK6SIOK)&H}AzZATyEhD=oe zm5hi2VlY)h@kuby+lO1nxc{o2QVIqJP;0G7%od55DJig$3;FiyA83TY?U1|I;dGq= zGBm)(%!-N`f&dyYARz`O1|*_D=s>C(h}G&kgl5tz_qxV?v)|e~3K6h}v+dh%>pDWS zCY^B0jyF%zYs#(rld?nD`5k4**ZgO8V|0Af-WH3Y%MfH z?G#xEA*NxPrrp{3ZnxW=?Z$DOHdBmg8pjY~8e&S3iDQZ!B6DD7LS|+cab{-la?;-i zaJRqds0XnpbD(1jhp(zCwMs43OlmD<(fKIHo8!&3 zCr=tcVr!hu!`=LSSk!~GbY#Gm6aZ(3qZOisU zXeaioAJxWwwoTIxCp99~R!G#trPT;_N7XDvKk6$`UDp8t6kEqrdu)ievbyb#t=~BI zNY4z~ZgT>!MVU>$^2C9(-~>;1)D6~pTPtDb@Dy68)1jn4a4`^55hw!LFdHsrnXn)h z15w17LLA3wJ51A%#+VX^;50IF2q6+PGqLAb)tCY>iULzWGXsq&9d7m%2#Ja)5pKpI z4iSh|O;t=ShA<2Rr^p=4fCxf}B4S!NjfNT$YPArELtqRc26};2wA3n4pItt?xw(4! z^vSdR6TF5L(>PAkG!Ek?#gONPFb>0TI2@imt0J;J-)_&(O0nVou;lrp4<7|GM9NxY z%?P#}kG05gKAau4rnni$aY%6*iAYNUwd3`HFr;ay^IY~bFjigAP*tbx6gVP65d|cb zy4++Gh>^)y6}P)dOhL3P0zfe)@75fr2~hKrQ%vJD4l(62N-jh}p?Y&7g*2pGi=-if z=g=&V;VccKO3jux(^L<~n|d?EG{g}(#)zUd`ozYJ1bD2%ArMn3H4p=!sZbyk6BJT4 z163tLG}1QA#@zWpZ=K>008Bxet6cr9fnjR{0t6=l+UY|EL;;n+pZdgSU;fl*E|2!g zm%sY?Z+zR-e(=|S_4ZGH1Zdy>=id%YAN=kKW!XJHT|EC_v)S1^fAr+pz56fRyYs+M zZ*C5oX_NE(!TawXZmwfYS@X^QIs^=Xs!$kGSqdSK+iAPm>~?3;YFik3Uc4iMBJ+k8wDaK*9+1(wd9a2zfdi)ePBIR79LQ3OiV|VY}FQ3X<<|psH zFC`bkyooe!>UKNaIUl#X?b&YJ?$Wp+4#p7-bJ6STn@0~neE9g$eAqL>5W;q3$TMk8 z7|6gxh`qka7^A6xR;EVNV;hIrY0mY`n<==4pVJVGEkaih)(Eg`J3uSuTH{SO5L&AW z5;F#*z!VY4nHqn2*I%<9+@FY9>#QD}V3oD@E@!J0)RtlZ#E#>=agrzi2O}T8D{4Z> zW@4_oMj*qI8Q5YQrj8FmI=VncHs5 zoWNW`S~K6`+!Xd$ryF8o4jjg5+-$a+&Fo>hnDSpw%6zf*QE4OjQstZMqjAssc{YSw&L=G4rW$F018O z(7>eWl7fM?LZcJy(;!r5B^oZg1w1;x*BvO`Qbxp%_^6BIQQtxBhb}%$Edx)DFLB6y zdnEy_;SXyY3Om=+{CrPLgRUwy&HK76aI2O_uaAcl(ZfJ%_H(2s2ot?l*;`@zE7RWU8fX+zEk(Z@j`7>F5Tzj^4Z>~inW zFr%sU38Mf+u=Wl4JPa>FpoU-Qc0>het#1~Z{HUMb9$&c~vYzTaiM@d~VDx(t7y$a> zf`N8&{zOkV3t?-YipuDXQUU7C}Qy zA+Q;!=rU*TA~b`*7y`2~rYKTP)xc`bA%@-d%y)&E1q!>}+1dG>6jRnxN(EC6L_kO= zs(H@E$ph4C0){bhsalILNUaAvVoE7s7>4O$d;Z|=139K(E=E7g#w*7D@Z@#&LC zLmcnlzt8bb0z`lmP_iNbMlcj_3zdS00BIb?%?=S&>wH*3WKKL^A4E!+f-J?%wwqxX zhj~6Kh?=Eo2qDy3<1~Qj0=U_10<#07S8sTV=^NdCTz^X8@v`Hsy(2HUX417+dLo}~70RSLI zpW*?4gsnu&54cyMiFVfGMA1+nvH)4@?&A4ZU;XAwAN$mMj~{;OYk%;CuYCRS@818( z55ND$PhP+OVEdU@K7I4}*{^@`+hFv^5go<(;?Re*DpgcOTqMoAl(PXBX#ZQu33hmm)A9W&=CFdw&1<7k1;8D3m2jUasdG z0K9W?cQ>7TS{_4CfH(~?1XT{l>zLB{#r^ZUcc;xpl!%A|Nwqvz3Z7L_WRR*uOy?J8 z&%f|eq^QdwMBHvR<7St(n{j(Kj+gv<;As?}YQXNb&O`#TAbt8E7F7+_NJK2tL7lNcIUX(6;Hcg#r+ z>@hN!N(Ds~=13TbKs|%&)*B3(RND!#H#{A$oTL0JdT`BqZuOqx}L`&rR3`U)PoIk7mvG>Iv2&qN;*g&Uwkp)2C0~efOQm zAG~+<_>n9JG6`TP!en4jYxPb$RM#H&rcp#QCze9XkUExxfjMgtx@Tu5HK)s?60Q}JTuel>@GKR2DZUt|`X+1$Mr-429+9yssLICjCL>u+fTPE~2 zRc9T+-eQE!HFnj&r1dzMHYGhXFl}9^zL(eNX(gRjiwqsN+&(Kk9O{9g!+wH}TpGqw z&%oUxL;bV-HPobi0jLLay|5Ip)&Kcfrj{FT&>Z_YT{DTTjIbG|Q)lSy*I>K3^#y(p zes?Ew%{sfaS^_`j9-Mg&0oqmpGC1Ycp+$TK)+-@aW74r#kJC;zr~@*()fv%jRNry^ zo!SdJW7>LtyZ=PDb+_y)`c!feD5AM)E;bigDjXKO&U#Z|U*T9`0m`5jK<2Dhmy61H zIPT8RHk-}a8Ksm$3~E+u$qSh%2L=vaxoAE_pq5ffl_DZ4B?i8@IP>XEaU7T^4l&Un zqNe64SB@xZDGsHUd0s+@03fPrdUm!^5d**hb1r~TwASNImN{{p#&NefyK`~pW6yp3 z$>q~$S5KZkc{U%9^;wN6Z#Gj(>HO?`k2fXPveakKp5429XS3N!F4Jar*Yx`ODvn$Z zBxQ*>5(KHsd_3-9uOI-%0hn0CkSGl-V)M;WrIuRFaEe2SthMCBUcdqdWGE$D5n?P$ zE=xJP*rjnK!cvx6br@qvvCM@4#&MjlZc-Y}=9JPf44yF=$B|?3N%F)PW6ULp<`}sa z??_iu<49q*Rki~t5A9UXu%p-YN*)j`{6fB-%<+~Zd=Fc*F!AR|54l7_P{eEwUX``kB%Y4@Wy zUQgrZOJDo?{`$$^{pJ7p)*EjQyWJ;W`NW+E+jri2mxb@VbUxj`Na^hH2Oqxs&U<$s zymbG?7l7=e4?eng|3OIc&7c4Dtv7ygasS@g{k!ww`eJuhOSyb{T=wNSABgDw3lHvJ zybxki$+E=xd4JKQZxf7 zNUc5Cv?^6p6^$uu&(4X+N)C}dIhMvLjw6ReAsV6rtCpLa>&we0Pal7Db#-|->^-xb zk8?}`8JKJc7!dzrw9l382#%0*K~1{lVdzt_eS< zaXJd?5T?TuFf*y5giIU}iF>j|PaoIEN;;X)Oek}AOKWTFeqA4 z5w#K^DNtZSA|MPtKHO{T0L%q~2w|-lv1U70tKSW~S@Tf04CF2MzVeSP=11$*QEhxS z+(LA2(cR6fnxO%rg3G{xg69)qQLSJ|IF4}`hV6EHwmaLMou8eZZMNI9v-4@%Y<9bG z+<4A34TBerxEQa)v$ej>KYjgqgF*cnQ*UEjJ#A-AptmMi7g~4f0p0mmg_7&x`sV$2 z-}}|Ce({@k-oCkfswFe1fk_o`fJkk8s(9kCV;@2!074;vKt|}|&A^0c$UQrR=o1tS z8-WSV7`09qKn6sF*r*W0HC@*C9}>A6wLaa%5Dm0y8y!>CDnK=@y$sRGJaLJ6Gw~ra zt*NtbeL(GSy?xw@t!{0Lb^Y62-zr7&J=1tT#Pvnq2NZ|_f!1Vh$3TtkYmm^N6}K@w zo)%Rz5NMFi*1xE#51nh_m9@rT1OrnMDJHp;B2`72U^uLLZ+=H>WMjQ*z*g|pLZ4Gi zgC28x$CI4^k_-QyDiNUNCD)H>_@)u&QX)M zI8tW_t@n$xLXW;qxJGoRclNjS3{tP;^=mLUpVl{kHq?7srFHH`H@Xh`ec|Brq2M^b z|1LP^)-2@8gPwxSb@{q2#r3iD^*OCdx2a@iu9{XWlxiY4&oT=v6^;cK#R6E21c^h! zkm4}JFffHcJfvZorVt||)mlR&6~Pott>$XvVU(!Jay;Z*s?<^nm?9E+WdaAOvXqRO zLWnU$2mlxXKxz$)7%fx^DRy-z)GB~bih@`e*i{Bpv}!f5A~oM!GKI(#I6Z&=!M!_o zF7I4Fdi-#Ib2BeFFAErK&Ze`o^W$=?MVI}2h1YlP#phmpE?tlBzn6!3QPpXjE|=wa zJZ^^Rd^(p}kB59bTt5WOdERVxo9&jFmTPebAw`g293l~yIjbCw2OQ$4Ly)5NP*f_9 zkq`(V#)xRg!{KmqOdQ7Tcs$NpWplofDja!;sY)KkA;yqPMWXH5#sF2M*6Lb0-oc;> z%t(QP1vLf2X&P~XlJg?VG)@ohK0oI}DRZtxr4Fbnmf~QdN*DuVRRV1DTlx?-f-~ZEZ|LF(6dh0v?`QLx|+jsB2bnmrSzmOEZ`(J)IhWN?Pd~)~P`Q3}VPu_j^ z=Rf|%OP~7GCqDm~aU9-x^R2*iesTBbKmE}gum5zIhF4zwoSDv7GnvfC1p#Z7X}5Xq z`4=xP?z7p4AH8?IzkK-NM>jV&+i4mH+}~W!rIw|t*|Agv8a7kh-^5`8AcT+-<}84k z=j%nPcc)?iQszKPsM$O4YRyHeq#>wG5EC(SAPxZoQzRhZfQYc{k2xQnJpSnE!^c;b zPirkTS3km}AXSA6jT{Mvh(ijVt*~lEg(#7$f?ue{peEN|BswyE6 zBLg6z8a3bU#D%9Lv};l}7-!(*lR&kqh=C|DMniR)L)As0@f6?o-O{WQ+#+GRkFdse zk&{=5E#u-ql0CbtBF@$!B66VMz0t(1s)mBtF=&fxyB1+LgeyXAg0Aj{Jj2q82YV_N zdal#jy=X^ih$xL*0RR^=1ix_)OSJJNK9>mqE7PJE$YrYPN1q){}#;a_OzA#f;o@fm@rgmRcY(WpCCb7 zALQ(MxdaLD_ z)=XE4Rlqjmv{weU(8`Ysb|CCWvR%KPOcS2obX~v>=3%8lPV3r}tR2QR0Ip_LRRL5{ z%&OT^by1s(%@woSB2bWk!!T{8?b-I?e0S#}#c>>mah%T1wwv7)IH-6t3Ijn^@o9*p z=*;zqH4q;k-2OKGg|~zO~F)* zLQ@&=Wl^x2YtBopbt$EqMhk({gXiv@o$sDrUOj#Ga6ZmbYhLO&40kpu&w0L?QOrOw z@N*BI)0(yDe7K1z-o1P0*|R4~a_9V>RyBD_MEk?dGS9p1@oYJa!$cgWX+UPLPZgDX ztR*i@G*07w$#Pwp2&%C?%I=0km8)Do&ft@p!{AGDma* z3@HsmE#eckh!_x<+4}&nIUq(fQ$^zKW><1nt=EUEyJz>(bno)nlbF)}`ucDmWa;zJ66wfZ4d)q*Y)>LKtg1pp`9FtGa!&m^&18wUesvn^q>`Sd4V z``RD>i}O48zW2lLJ^KEKS2ve`@bc&W{LlV%Ao#!j&;R|=yYHVrfB#EgdUZ(n`cK}x zcXsd7uYLUC<>k{~eRTESvtPXN%guE5$yYynf46z#^*0X3`SV}-;@iJ`;~)O(Ula2; zzVRo+X87GN-nzNmKlh0{A>z}^XX9@B!Y4klo6c@7pFMu^!P7?%mz<9`hsY@n8wI<( zyv(`gr4|+BPz`e_F(o?Akp~DtK&uMTH4>^pSt=qBBLRg(h(7ni%f5@Mh_s>IF3E=k zRw@N%wCi~Rlk2O?e*qxpO?i{MGVoKN`0wRt3jYZF%X4T#1+6j7A(T$#GXuZ!;OvqbG9msh2 zZyh&yN*7!1p>L4earJ6kd(NOSTdSqD9(tOgIc0~aDuO*v=LsseioR&DqE)b4DHK@C zt9vN!pQR^%)L$Jr5I9(VHgJvDMar}2+Y){VD&7_8k##Pa!eG1yyz4Up{>Dlb^rwqc`4o=lu`&hsEowpgncZ6)6x90tx{7OiRTR zYp~KH)?7Ta-O^x(HCKgz+~!_-e<-kP*4_HMYtt|yA_N3N2#AcpiU{DH)&%I=u4ze{ zp;;d~;mJ4*fTo7T*b%uw%M$pLUI~$&S#DEZ)j&L9Zy*4^7S+d(n5qBWtQdH^M#Fs8 zxIOD?72tyg`Z?+t#{^ClX0T!cXlr$gziV_W_E`WaLDs&u-jKn}2*|{Qz{Ft4%z*>J zD5f>fA{D7<%cr(s_9w_-Yh`GML?`&uvv})?!2XIJZgq6h;fA$`+UPMr@|2&$vHk(h z9Q$(tkFs5@K4v(B1>$+*)7nsma(1~tPXN}GO-!;{ssCiZK#-a@m1hHmC+hqzFO?v7NX$jKgMoc5!iW@9wnSPMb}NaTvyFGcX5YiYW#p z42+0SHESU-EutV&%Y0Z=N?sNqNaMiB9O5u04uK=Z5Q(T(@hDlf0vZ}CiAlRksp2rj zv>-DeazJDSZyh&e4!-FuS;<4GS*ie;i1^`+oF3f0Kg9It(MS8^9zdnmVKea*0B9V- z7=no!!TtO9_cwo#cx-4327A6{|VXIrIB1PgDAAhtjOUX5J9CtdINuG~`pZnII{+l}wo_q71U;n4?{l}{hKN_ZsPki!I|NKw?A{hSrfA@cU^wEc(c=hF1 zKl540hadm!t@B}b_rb;Q-uvL`!>6Bm@#8=I{_Cpng>Qc2rF-{&^|LqjHT6Fv z`rxns^MATI9RAh6{kM1T-GA%NpFjHO{X2Ib+*}==J$-h5_ueNz@u}sIpFMf_@T2z* z`x~hh&;q4toQ5&xrQ~C&wThXDF@bkQ8d|N&C{heDBiyt?`&OIE5ukvTJ(?&5C`&D+ zh}C63*IG=gi2yNZH4~_`%m*ee%Th}|9B=Y`EV9&Eh&UH1B7{UB97rl6V;q|oXMz|Y zxLgkdpn^)Rh8UTcLsY4ryzXwnkNjHDvKprb@mkD`3J9J|BlNc?Lsk`N?SiL(+YDFT zN^L4v4%ZYh5F%4B05xeTF7lBEYZQ*UP8z_DueulM)~)^j76s6L*ZoCj0R*89#wJ3S zpCUwPTB+@Qx?5cT^Z>i#ob_9THubP=R|kfzFlXiAn$d}ZwIPfE+(gtMAR=1u zorh=!fjo^1j6jGCK>iHpqx$0tB~``%Bl?H9Iw(=NXS$!4fVWMmak2rF0x zwtz^H;*f@Evpt)}&2D=(j*|;~Fh?e2qDYK{NJfAHhG?~lXhD=xm#Rz7ORZvv#=PAO z({Agvx{6S%5Gx>ps)_=jikW1ePDYHxWS}awRt2;)M5aiAnYaZ#E=)=o0;@sb5H+YM zfmKyA6;X&l1c5I$({y?BY=3iOBBe@1KHqI9P>6v+F~FFHyZ7%XNL`jRCR4e*dUiZ6 zEYl)spiN6A_72 zRRLi2`ZgjdHI2hKZ9$EJO3l|dmqXehGKa7|+udB7nlAgf%7O@lR%u=&R&lmzOfZ$;#fRrjy zmnEB*uNfkdA_6e9Fwr}9)r>-5W<;<+7#I*CF!YWn6Uk=8#7%YHn&(?_NfQ$K{T$B< zj+(%wDF7-IGz4S>pQFiMJ?#SajfrLK;p)khsi{FRLqtX*0tR&HR8VXkgtvY6YSns> zZ=Hp(HSh8J*SjWuPHxTC7aIVypbnu`Q>}U0TNv(E0F3LWTaUD1We2=67}raLmalLc z+Fy?M)+g6EOYGqw#Q12iEzbhzVH4Jsf-j{D{MG9RxkA7q}P&dYv(yt+JGUye6d zU%If*pKq&IiGfu`F)D&ZbP+uiJuGz*DQ3B95v!`6c~moLZ5w8%>7Uk@4h&9{;Qw!^ z8UO*S7|U&E{7ivI+9G8bCD zfKFS-n;M&7N8?4!0F1m+HGA3$AavfW5s`g~+Y^Fyxa@ik8;jYvsE!L(`lqEG+dL)c zCZV6+e%gJfp3d$GH}~u5zX;Y!r`t~ueCi@1cx==!yg_^H@N6{-4yD@uS+fu;{dMbl z`T@4iww++2fpzsrY^Jfl!qHXx3`11)mOblRX#EVZ)?3;jx%HR}pwo@3C0du6fdIi!3~)X!A?SuCpX^1<+VWN=Qi+HXk;a+}0&rkaAvD*fWae>ar-pGz}Mb&i9wsRqO72w=B!e)y=Y$6o#`q7lG-pUlgB|JYQa` zXgxbSXJRQ5m^46MG#?Jje5|z^Se0svm;!>vG3^hsEb~0irK*|E^9=!=ou9{)mie&c zc}yb!4by~(hr=P~1&|UzE#-JP8YltfdEpo{I64(W34!Kg;enZWUgi`B4h+QOv{BH* z@#b(mZl_74rkK*OA;hwjdA>2N-qRj9fCsVI3(bkhKt)vznFv9uAd(>>sflNH5c&)i zL>2=I5y1BOW}G(P{Nw-O4?gqtO8CaFe*B9!e{5Cnj%WL0x%2#kZ+-huKlaH_{=+}~ z?LYnapYDJ1rB`2h_4xSmr@wjoYpM53YGfq?7!q3* zg$(W_7^+AuHRn9<4>d0$MQSl5KqsV{xk2&{G>|G?#FKakrvmmC*{%r#YBXp!ZrxCxN@DyH{V#@q0s8T0X;R>Tg!_k&a6IG${k8#3#}U9U>EZrA$I?iQBn zse)zgDD5FVuJqhm`Lo(q?5ja&?wcG^tg$e5h>74eqG*7?0Vr`8XxLcXP}*qRSlC#a zln09gaDo_!V+awMi5a~&2+5iR6|UsL=`Y_|At^F2QbsS$ma zf|I%JP~L(N*!CwhD57nx`hR-6K2NoYLVE$9uIbte>b)}6%nU`Oodp#Y1vLUdw6KET zzI1(QIxpIhAg&xzdmb$*>R`bU<;odZf74TOLi4fppy%0Z|oCa`P6d{o_qAsBdPGha}VD5(f9x6Z@>TXPrm$T|MJh?ee2iX`S*YI z+{a$rob8@|_~_HGeCD|qKla{l-}%iifBy98V-$giB6FGZy?YOs)79}Bj|B;(npHps zFhoP9U_K$NSFO}q0MJO>aHwQ&0Yk!Uf(W2imORh%@o+5p=wt*jfg@cYSM_8jLqI~) zTC3D56r90oH4ME825tEwyaA zGM6orvVMo%6RJu}ooFRb&8dP4nV3`%(VUmV{NvYu^1uDZzkKVR_i9luoC?f7$l3)v ziHHdUQ(%mYk&y@kfwvR{ghZ6c7?IH;U<4or3>_gGICSZ3Q70{0>&nT+Xm1o~${^tF z1y&86lxl*&a6*2f;E|=PhjiFyW{vGzzqAitw>Pd|dJ%4ytZu2Q6Q>LPyOXQ!hpBCs zrv2OA4G58`k$K=1d!4u?L;@u8ng>8I?n=T0$ZNAIwQup3W4F@>wz%dLAl+)7?Fd-2 zX;z&@v%*rPs%DYgDl-uc2r2k`nM2N7x$PAYtnmRh#VBjx3TWpVp&N*f@2p*K2RA3O zi`J5e6=M9mlP2} zs~T9>F8U`wPkpk1Si7Dn|nWmtkhxvfb z389FToJF+eB|t1vWvLu6=USE$Q^-phVpOnNl?bxr(Z-b0JkQ9y+ubRpTwgu~(3|}g z0;D)FhiTlILSFV}E)^nXpb8P$Yi3OmnNijIRESYcwQ3*|TH_By2!Li>XPNK6@bQ24 zg}?af*Z%awhwuL2pZ@E^_utt+e74!0q0z%f4=?WA`{Qr^$)`U3%CCO$v+w@xcQ(7- z7e4zM&+<<{`7v60>BZ*`SJ%IN=ly({zwpPeK6`qQy?p5#U;64Fe_a*c{`H41zI5^4 z+rRh^|L(sqZ2sb3{rTe$-}}pd|9?$)&dx6GJo?>-U;k%czyHF=Klsf%zkd7m{nN|& zW&so^P)fPj?WS?u9}dUEo{&R~CTb$3lr#)g6>8z;>Dlu>CP09T7VjrBRRaQ52QMIo z2zgoRGS@QKS{a-1181RpY$;G=Fd%A0CZeEashu_hCozePsVlL7nTRMG7@`6Y2lrGx z3$aCoGufUYzK%~;(E@?cG3toW$$NzUV7LojYdyao={5L>Z0F)^VTn@9x%ZBn{z zs0Jb+kf=77uD$!zeC`(#vKh5eQ~mOKLI_q>1M8m4(d-H^yOnM@vD@wj1{;X8UeM;H znLZ5&f6twCV@-Msw)#3CH7sRd-mKxf089Z*feZr<+hKEWxOgFM?_=6vN`?d+j3UH@ zAsI!A$&i5vS}EJ8Gk9v!Yv15nTA$F?Kl~UUhA%LoZ^9$BQ zKx%6m^#%f{t3HS|-8g?u7YzpgyLTa|Q%Ow;qIHsXmw)1SRv^_Pf)oC~#lUpz0gXsn z5g{~MxP>mpy|;^@jY`2*qUUU%>xDp|cEDWv5wR-0A< zNkd59PXMhZ4|_JbU1>+u>sMUk$hG2gwNrg}Sp$L4Z?cz;*l9^tZu8dj`ES4Lw$I-I zyI3IBs=3NiEg0 zR7I5;8Px!(v^~Ed;s^Jy>MXUCm;wT*>3lqz4v4(lO;rVrQi{Zs zQc^>|BSWhq+ueo&OQ~3jpN@O?2Sck;VjPJP8lFTGMMNT2g<1+QmRd_LNR*c>qPY}g zAPK{eKvV%Ek>xrc^R(F@LNyuFwB25S!Qti-5trj$N+IMxVYAr*TFE~A205^Z{O|w8fA_ygX9|cl0J-^r? z(Bwogn$6opQkN@#|fBRoHo9T0}y>_vge*Mc|599XpUw!rYJ7>T8)oVh%AKH{FuyPyj@X2t5QePaXqm73<2N-SajZkaL!rtyXMC!bjTxAyyyd1Rx$bb-G7X z3`|s|DiV3_#~U zkxo&flY;I{6#+d*M6HL$%dA#G^3Lvg&naN5_;%M^K|LGbBn=@c8i1+!P_NZwH~VYW zy!aZs<85nZZ481#w%bMyP7PqzUDYYZ_j{z?C+RF;%fTUP8~td6XnlGiAbD{BaHP0N zn|s5>^V8jrhqHTOb8aDeqQ?;Yl|~52NbD7Z{=vSqCvy*A0BjAcSnq7pRSIi$op1{O zd$k=jz6;D$n*xdylM1EGH&+iIe)#C44?gc@FxyoE+5zR%bY8G2Wvsf0bX1R(gxH7R?C)c2ZJV2+Te9UbtIHUGYfruFy zDG)GWB#MlYFcJm^0t^U@5C{Tcq$VI202sWojru<75WjKC>jDC_-qNN;Ulpo<1t{v8 zAW#&FS~VBvvuaiv^XJV6s^}vnK)}@)ni_C_WY~f8is3q_xWxuR2S~So9a5WY2LwR0 zDyex0gng(Nv;qn5HpPC7vGr|udkZ5v;0r!c7#kM|E+zz4wWwAAWwbz;keDstnnASH z5cEUjhqpzAy?D&Eb6TB5*BkCppPf!9oTw=S=wjZ`!e6^}@Br{cq~BuwP6649m{)Vs z&q5EgI(>MHSwFQP7yz$8>txsb_%#9@3~uGyyVzj=^;=iff70Nj(i&j)ZLk_6zwFlR z&~Mpa(%HC%81WRnwbeFNTO}NPU*qY$`wFyc>ZaBklE8qfG_}JbRuxKxQt_De<|v1v zhpMw!F%Tl8z~eY6%Al%*h76_{m=PhR$V7m^fu)Gl5<(nOh(j#Puq;ch zCGFBOAD5*fVvK=yy9dv`ke50iXAYpXTwfjL<2;P@Y`du@T479)BQr+>B0>(F=Pag3 z#4!-!q6&Z^G7)PqK#h>fQUPojQqDD!a)e<>=nZj1A@V%WZ4W8+R2c>9b$@ z=AV4{%isLN_x^gmxmu1zxy*BBGbW-bY_}V&<E!^G^+4Zuu&7LEZSfq@7}H9!zi z3SFXs3DKw@Stdg>uWFZS%x)nJ`ZPrMpbDTqU|W%iT$v0Ex^PJE|930W@+WZO6>c#i z_%1lC=wWf^nrfZt)cT5tqiETn2^|C^Q7%iBXJ^rjdKG zKd{#%Vbin|wIW(gtC)%w(JXotSwt7rS#+tI)pFIURz<5pF%@w^U67ON!fm<)9M~~I z2Y*0~)x$s-09Pu^F<()gToiaw#&(^)@4 z2wv>5;-585y50cv2-~0oHE3mN4jjF+iC`_wZB!ntcII@a>q=Wc^6j&G6xUwSj{W*T zZV`nYZm)0AzwObjD5zN#s0vlUiMKh+Ty-g0#IiW5vub2SDUG|`#hrWi?%sbeO`CBu zHZcPighUf9pi-rjW${{|nrp3Po)~(JuIR?gnWXv35O4FEPVh$1)U4B6{Dh*1MI zIYp*ms)Pt0AW|SghjADJ0|FCUU^O)xLJZ3>m7&b@tY({OQ>8j0uX!F{BV0;;Zyhe)CV2*}95U{p)>+8HoJ!d#DPb#wmUU;fGe^wLYO{^*S# z{QQmY-8}o~{>8=lxC;XhTav8%>+4zdV=sU4wb%aWKmV71|J%3U{KThT{)10{M#}z| zzy8&BbN+=deffBE{o^10;O)2Gd*Ou_|LD)Z@|)j&aQ*Db=fC)cPkj2*lI`;GlME%5 z`tu+CjH-U)kN$9TF~0fMJ753W*Ds$w$x=W4#cy7I@c4&6_?ycoPi!s)=VhPkqT|_E zYYiAS!|uJ0es{P!oJ~8mx@3`3kw~nlflyrb84XIwrWS{&DT-(=S-h*O5k_VTK4m?o z*c24k%nCTWt<(ex+*rEO8)|RwG(c6WEx5Pk}jgg53J_U*09lfF+Z&TeDbiX5ywUXtpe`oR0W`jcd8;GP#~->f(EW5+U1YUw*h zGVpd|B%lBsF^pl{@v!4z7q%Ci&f@k?Ji8lrcf+{DG*FDz+=QvAsaWG9^+Xr75Ery5 zB99)pUz_SRX;lwA{KGziS^}hpGrh0HrK%|K{z#{qEm=|HrRCy*gA?!ax|q zFaYXdS)R=Wk%$ph1GjEX@85Jai38zTBua=Fd`hUVtfL<_ zD^;sV1LQxeidYe6zqBY+)uoL+J+Ug-IR{$1N}DJ2$}?y0d^9fx zqDT~p5+geV?3vui6v(?xkqF5e0oJ-!*F`o%Lm%h{1`4cwuw;e)m)3jHOUA*}tcnR> zRuzMW`FsvUWk6RYeWxL3|ESK%_pw$2zEYt z#iF+MBlLx7@ei)xvRze&T77x&RwbNcO0T>%GSm+2x`kV}7HJEGp+AXs2@aJA+O)HE z39wtGl`K5bbZeK*ZR)N^p3oP^7Xq8PFfXliK_<7W3Oz5d5}5tteetZ!k}mw7(y z_xt&9EK*9XC07ni6t>%~ilxMexGam*vgD$ol7Wbg%~Y6;IG|x*N<$pR(Tn{VID{~a zF{Z>!s$Sw|WFR8PdZ=b978n4Gk>e1NNe&t!2O>aH36W65kSz=e0hoyzT=#6%c1C< zqBj;ff&vFN1wtOC!R9QYQi~|16gY-jgo#p$S~SKu?KT{OsgzQQ(zqFyd1i_!iqwig zF+~FjF{T(5)C^OKuEFV6gEkl(AetT<$;jsc(Pt&%gV3|M{(7|NJvAfBuV~`_g;wzy1Eh5ANT8;nOdF z>UZz`?w7B>F=u`LV;}q4H^2Do>e*la$G?j_oZY*3zTN5J=EpyI{qBR!yKleq_}xd( zy?F1FuYCH4|Mb&WKK1Fd^YPd3{OVu+&;JkF@4xexfBEd$W5}>y_KIRz0^w3JBW%Y> z!9M)xBQ`Qn$z@q;B&<@UCJd8T?Cpr}hcXb~|(A|M7-^rDke1p^y{fvKV)aKscbMx+RUsAevLY5*q(K+TL0 z^l=0L-fZApa*t$bwI|p!6VMejx*dd*nP^FaCisS3{T~3p!~g^Y6abiv!H|f65(6lT z`7|Fjgf`(30LaL|T+{@5epeNcs3*egv{GKU3b&ZR^)k&v{1wHuF4UE2Tb&cG*tv!M z+Wf~F7=zbvMWl#n1Rf}Dc-&Ij(R3E4Ga9x$Y&mUc*o0|I!$@f~PXRZ0>%?gAzLV~g z8i4I{6RjI_gGSah2~M*P=IrE{J9TH>8?OJ_mMst>fGp+FqmO?4lb?L&yWjiOJHI>5 zf{09v1WY!LfdCA2QO#9Nl>i{C0U)^=m^B#_a9{)oEtT0vBN!8)b@er`)J7wJciHs(L=OWju>`_a*2` zXEX=df{C7yJW+a%Py;q>7nx%JnyBHw&6TYjbkF4jQCN z$zoE8(OeY-bvc%0nZ3t*n1;>SCQzycn0eYv3PqJ-NS@>hO*NC;?-I4#AOeSggbmyI zCMFJOYG8r64%O7vy-bRVxFIrANHK1AlbSxoc($9aF1PkuB%!MLn9K6yiVR38PI25! z!!Q)7RV2@|A;p*|AQ1)*NGM{ekfsz;0)sghFdC+S1R(?9Hz58doyLW2Ur#Dy1 z(;t2PTYvYr|K+W>e({HY^hck6`SWkR`SXX*o_yxjSCPZ(KY9Iwcizv2XZP;D_O&lu zUf%rV?_b{x)7=*yeEu`9ajif8*-tO8pMB;FAAj?QKLLhU|M*LW0jl0P-~Qtde|&cT zWA~=Bpa1yp-}~^lsba3P=pqGDU^7)wWY}zWhyBfQf1P*;j9QJ%kid%8QX>Nhia0@_ z3Lw=8ftXDNdue{d1};;qDAkw<+}*W)$7X}9sXSmmyejH*p-{~AL%`=DIF(kbFc1;} zkQ$1h8el+X1XEMgip*YBgWV?GZvP?!=Y7B(1KRI}j?nUk4dmR4XdBDb4NxzcZy2uS zR~shsgVBs#m0Arz#JN~*2!Rk$UDp7;Zw%~|{OqO50H!L?f>&sAZmVxOMffdTMZbf# z#3xAC(jzUW>V#g)rIJtJZApKxs$wL@kcblv6Al|n8ya^QCmJ@CCLXptY$#0>6NZEo zkt4(eNYYRmfsY66__!O|W};871y3mB%b0) z34-_GQm@Y=j6{K{Rp$8*8yxI2z5vkE*=7NdpvMV~#wPO`t9DdT`*a6GEM`l!T&Jp-e&BR7*24goj_d? z2pFu|Ifg61@~Q${DL8D}Z=EXd2&XYn&SV-fLhEwE^&Db5{rzAAfY%@R`*qyrwID9J zs4FhA2AFzFF04n~sk&3OM~98l6C$R1tBRtLWNRH=8x*rrWZ>8c;M5GYT1`i%w5DKf z?YCXaJ9pL z9z_pXkJYkbQ4mBV43US;w3#ME$R!_-2SDYLz0RiOr4*4`_J@65miaI*^U>?zRJEFg z5JL!3G|Rl$Q30xmKY|!|7^ZO;x4X?aY(fZWjG#maX^eixR1cv_$pD&$VVWitETwKXBZ!6pV;n*VA%qx$kI*G@Sxsa_K=u|vuhw=P zYyfTQN5T|?xg!OXBC4WZ%Ux<3;&Mzm7r2Xx1u2xdZa33h=ELDIALiw-5Rs6E#A?R5 z5ExOIHk+6j0P|50A&vn6LkdI$<^%2^#F$b%9v1`>QS)SGWXv1_#W9f}fy5YznVAh+ zl*B_&5z8end7MT<%B84jS+c9|mnDZlF>yXF07%RnQ&C-VCNwdKh#`csRB(~WY%xW_ znqfAEFMjb`pa1M1-+$r7x8Hg5_3!`3tH&Rb@!dOjkNab(hvzpZn|! z_wRi77r%P==xVq-e)G@%=#!s*?wxnudH4NyUisWB@BZR9AH4Ve3$MNMg)e>O>3i=# z{ouoQF88zAzy0<%-hK1U?|=V?qouj-i!LS>0F1;_e<8))boT4F-ZTSBA;lrj3s6;L zRY6oTkXl6x02+u3rg+D)LkGtq2FlFpvtZHZ4Ron>L=i>uqR7miAX4OSRgh72@FvqZ~paf`R~mO7Svd@MB~K*t#9?7JOSRTXV3q zDkrrO={<_R6@zyx3W(J2>{J%hoPPU(SGjfKz>N(eMWD!G2x&uULuul1OVbvoZ5TEf zCnwmDBgF(1fEby9*!vPZ?`XBI!d}ozt=F8dt$YTUxELEY;fFpesKwN}itaVc>Q~t2 zFnG$_N6Q$IdsdZ-fHl{7zx>^Izx%)cr~mMaU;pMX7eq2|*((04Z7wTyO3$BUFJ5Zs z4Gh#2(42#lTKmp{&V3kwwIOMyh)A|hfb{UC4JHSu)HcKl|Ka!Q?iraHxOJ#U0D)Y3 zQvv~*wOTF!LW~VgK|l&jG0?=gMT$%S$lUgXD@04vOEd8*y(+m%u5~UlS2>nCS6f6D zuM09;1e_idb^Oy?oqKbtQ-;(Re@?b5^rf!4GH_2nMF- zw&|01wl7TMo!uBK&pgdGvJL*|s1=eAb^(zcoPqerm**t)u~<1%&FhwG(VH*9UjnJ+%| z3E%yU7_HR(ilG{?TbBwt{3KkV?8&V5MqX1lKiHE4Yx<)DL048Y=vKVNtA4rW)fOH5 z8km|lfm#tL0%E9!0w4%k^jK`43q#`AK_b-B8}diwC`6-A%cM|^AZD?syoq`5)$sce4~~bEFl1!0;FN8 zqFHl+`tqm0@a0#({qaw~^62Tqzx>Ys@%a7USuSVOrYw^4ag4Be@ZjRP2MqM=;nUy# z_WjGNn?L^-|NPCje(~vcrf2n6eS8Fx=$AfhX`;-SUuk|b=`i`Md_ETu+ap!9$oXUe#>4M1plOGgs}Bzq`(0&QTsVI(GUwn*hgrPk8y?MYbk1K z&e+)^@N9p4|LUq1Q!r_-w}7-we38<}&ZZZ3o9A}pc8I}F#-Np{b=cOlgW7Iz=mh3U zt*5d!=eErn2E2u$)+8WyTi_0+v2=aQyJ@;L#_Ck!-+$78B_Tm)_Zu69*l6jFuf_Y) zdl5J^mc^X&=;_-Pxj|E1yoe4VnQPjS7D;0&Wv->hfnCZU5Yb0>k}{DO10x^@HgQQ_UurI;)LaZD zh8U0yK~yy>fQVG9<`Iah00I+jwwuHC4aZoHc`*x-6+p_dfPDOuFMs(9-~PgvzW(r| z55Dtv|L%kLeyvN{4AXJ0$Ll$gQHYzncVGI{r?cvOdG+Mc)BV-q3$MQRt6%-*YJdH2 z{-^)p#TP$*wZD4j?cYB4;NDMv^pm%L{X5K7nRfRs{^VQV_}rI2`^L|H`TnCvf&^7w zdj7>1KK9bntEcn+@X9MM{`zM>xgq)TH@^7?FMa$6fB#?Jd;f#5IlK4Z?&m-I^56f> z-@o~bpI>Y@%d!Wopc-N-%Yuf2YGUWp#j~r+{r-49Z5f>$l3J<;647xOs!6S)DlP~C z;B1DRD0V!VK|kr@ie3g}sNgi48JM7k7{nZt>6$1&BtWUz$BA353XKDB$KKna%&OUO zsfPeVgeV~s3@2(GTie@{=m8r*UUejy!9^qi$< zgs|1m7!h*hamUl0w7na)cWKy#&5q(gX#ft$kwT;pkr^rYjD+T*ooDEd4?x_PK*z}1 zLv3hgqN7YiY3QxvVz7?uoPxD=37rb{x}_D^BAON`WjS6yd-~|n@#^yW@zZkHOIhaQ z&CT`I<>locKDhJRo%3T|4mr=I9CNK|MO4&km7+R}WU;DiwBXhEKG)2w(HTA;!^~Du zWw`y+Va;kbbhc(B+ivmc4K4ZV8@%bKLZ1`Sc$V8tszWyfM8ZJqV9p1@^2zD~qiY;O zAR>BUj$u?I0hz_7vVoC+&;nre>@67+OpLo2xJm1_{IQ2*qUIIh z)zvuEil`Ul<*KvjTy+th#j?U8S`4eH0E!FXSAURlB7c29p;MCB4r5E*0^_aXJj6f{ z2?nN!m;wc8ux|(<5l3b~49Ef?E>cG9;>N!B6XS@y3v_=wJlKveoR1IAr|l5PG^`Yv zt&rvPL|atX*fKaFi^hK4dcEiNe9Nm@PfK^Kb$TImP-yF%jQ-hcYr^T^Vv~RWJ-pQO zpDk*GZXv9RF{63i81;~|QGXtuI<{*}{(8G7z}7Fp?WA=^joQqb^*UQ9V*nn#t#8%= zS|b`)VAsNiZVzvL?7FVE-gz}52Ivzb+Hdy!v>RPCFskBA9J+c)sUQYbpo%SGOVOoT zBjf<3fl&$}61j#ahrmNhYT$0p{D_(v5K#~{3Ml~QI;;4cCsQ<#x-ULy+rThI-o$Cz zY^P~DJ3HHMHq$tU5MqiUGAM|ONR?8g)+(h+tx{E_ip)!1j?0o4FcH;KBSCULiEN9XQu#=36WN<^WOE*Guf-X!LGgCzemCoH-TV8 zQOyj56R8mqG9y>6F$Ip(CeFDe92vtfZE~G2pIz;*uWw{;!GL+5vzifd2tkjA#}We% zV-f-6I*vnNuC;O)hbb9o$$*F)B^1h-1M8xN$KmetFP`o0=Xrj1b#?Rjk`Ny}fB(a$kG9*p|Kq>; z3j+Dwd+)yW##>{IZ@=}{qbJYC5RSaerr&(^OP_k_)z^Rcv!DO!XB0>-OJIBD^PfGx zfA6P1{n_*PcE|mb4?g_p6Q6wb%U}4y`|rK|o&Wm1=U@2PVZQ$4r#`X2I{xT~KOC5s zw^m2XNs6u^WF%Z%m--IbZB5u)pFItu`sww zDumQB`+m$lE!#~XuG_Q&8gTB*?F@QHfvy|8hp(s7-Hrj>SGUH64ro>~rvn^VOHaD_ z)}G|(fW~fPIJbQ(_Fezj`iAYSc|pipgx9RSpVa0OS3CzUhwf=yGDILkV&uSS44aE| z@gVM=i`%<2ok18dBp^nA4G{nu5g?hl3}uIs-R}0Ty_Mi=kvOaux-NF}JYawo+W5}~ ztUU+cR^(R7wUe(7d!kSVn6No5Cy z2I}gCZ3#%!yD`X&%ore`1%iM+2&)E!K#7C#PFc7t(vJ_d0WYtA= zE;?7Q%#*p=Qf*nAQq>hzymuAV*~*q6>_H(~YbHfxH19-510-OCNbE2!Fb&L1lsGW* z;W%IBEP#j*0L~)ciSbUL`8zQK6g1vE$Cm^_fT*nXH zlAI!7#Ga`EXt{&m1IQ=luvr)n$zATILrD=;0})VzQf)3$6ql@Xwqut4B8!?KZ~y~} zDQ?cD&31cn=gxM!8#faoM*;}JiwBA*)Jg-nunz{#Gm1f3vMXoRT#!N3YE>j;=D=~< zZ0_B^x7lpB+s%-M7&r|H00}@)rPQ2Dk$JyAlu~lZ`E|08iJyspb0=Q4qSr}T8O}$7$~GTY^KA!4;;soQW{FF{)Cn}2WB8Bdr2t-9uE6? z+D;*cl51s|#%&rBBF)F6nS~Tw+}I343>8GxwG0fA0HM|zH6jo*NfRUH7{m3v9|%Ms z*TNx$z@<{HLTsu6X3HYA&@iRC)I94DH@Oyy_2zJW@6O#n|N8&@)vy1vtIO-3{^Eyk zzWI+gH&3FlE|_+tvK;rzv`Y`}-oO9+ix+qAKKbbJ`)_}6ww;#iBPCR(uYT>DAA9~2 z&z^qx?t8y^^3kI^=Yxju{@agi#>;X&?uP&6-~8Lpf9@-8D@*_!mC&h5yh0 z`oEU4=QJ$KY+!5w6Npp{6+>>H%<_G)Vu z5Q%*drVCh^Xa(1GFxRP6)#}9q0JSOwS7AdSAYiwRo=oz2he+UKf04cV2UEZ?!o)B} z93n6wAXd{N!o`p=GC9lb7|cz8vpz_@wV(_6uDZ3&z4zo@uWQj>(t&1bE!nrm=f3mZ zFk&}`t(pzHtZ;*}x zhk@ec{Rd&%#_25X?#8pbG+jU(@&8BGpMFb{CHH}tE%&;6M7%xM+N*bTH-IM5(;!HY zn1yp@G?I?a(LX7D&-eEHT`-0w9reK5NoQB+oU zRlfJ)MY#LD*X(CMGy7TYF(q)(1l&`MttbUr^&mBKI-?eBmiaI)Zsr=l@<7|Y`XdU? zAd*jQ;~-%OT?PeeCPJ!e<^J~Wum1XP{^~#e)u*3*zS~WHSt(`an3~0gj2kBC2)o3L z*fD$SWyi!o4)htJL*N8V2@wdv6fGpp)M5y|(@t7Ns%oiHO{$oh6_Mg9p)PoxS&IXF5D`X0K7?d2ZyN$cb}A2$ zh?psko&jM(S66KDxJz_{s3WHYNT_T^rUaEi8G+Hb6~i%_%L{wPGbbR#WtUEfn7s{x z5Tn!9$qf&xU0tQ9O+|K9Hd9#_*;L(%ZAEqhJGIHM8dQj!^BjAD_zhP8U??1?j~Hx` zaxyDb0Wcw~n2w1abo}IacznJ(J6Afh}3Zp;(R@7a!)wH;*G!oXmx{e(4>m0c_`e$QVpzpl62R?5yQh#=2@Hx2 zKpTLM1+@98M|jszXzYJ<$zO}&Wi&MPx%FDbrm9oXu|N^ws3U8%F{j1YFS=oKv|Jq>P1AILe{W{H^^P)^NkE}nq*5!V zOiX2*RH0uihs7dyT^V;}SB#(*==)?DN~vAnOWjJXfF`QQB%%sBj$@+K4+~K#wdSs4 zVoqrqCqfpL%-j#1sEk|mvGhf!KBwJoq>hkSCM|XQYJ2_U>C->{?jQcqkN%tc_3o!X z|GVG*`lr{|&v7ct#nP~t6efZWk5(tgrx&k1c^!^Aq< z;rD*f@wUR9&6d{YK2* zF~xvNj!caFdjKxriwH=HfIbh{8?XpS6_8PVdPNjyAT|KugQwzM6_JqK4l#^>$fm97?hQv(Gat4cLCUM>)cy#MIM~N*yoMXLaXJRv(MFkrK1dPahHigaA zhS7#OgSoeT%{w%5X?78C$Xqw)?FB11+b_3e&2)j8tJp^>FJj-B`$GUkLQGximfi71 zcl0Q)E_iv&-IDqRbv<{3r4CaDqR_iSnArlq%^762_XbDsHanRXmCq2Znbrm|A+p7L zlme}v^eaU*I!BKNO^XT)!HP(orq4fr_HX|EpZ>3Z_7^wTx84@oO!}mtuJFYGiM=!` z*gdyei6}8!so1I!9D#*{^2ak1z}6Z$q4~;r=RzQB1c*q=$A0>c z%lsPuhvxGU4I(XPir{KdG3FN&L&B6eo=-qMCr_gw59hvGP~~b`1yvD*4H8&J%xIZ_ z!PsSr8JI8u^u()-UEh04M}zvN29xR}wk^7@a$m|_k^53NYMW}KYE_uj^#j$~*d(zk z2vTZK$N-sa$#CA&<3;z{$?ECFYB?}dRjb(8h}6otT#UoXi$Er72Goi)8*?6XyEOr_ z_?y8zFr-a?y4ju<45HrKOJjwa zex@(B!JNN@*}8z2;Tec26U_k0BgN<{+M7ZR5zBoi=vT0uKR}mm@*SlIuk~FO#92!a zfMLwH+2;;iLMbLaG}q!KrB2s-tATUQD&|q95w$8z1W`j|1t|(*SVSuz5)-GsA5KnB zma9eI4~t=-#FUa&m+(j`m4Kz{C{rymmdUE9>28`zEg~W+06<2Gm^m$$i^Z^9E|w{E z%xPG3oY>_syzr<>-K;m8?dJCW{_^?D_4H2ihoj!v~L_JUUvfR*OZ-IifMItw3Tg)o4^2Fq)B>VFL3(fIg(r2M{otS<^_GDOT`m zrRW#8{eA*41`tGMQUtG<>2qeE=up{u9QwQ+`>yMU#V~ZsD&^?tc$%jBel4nXoH(bH zNUC;2CsPsAepnu@mYkAG-Hqc|rhdr7(4|BwkowH*HAiaVHI!Dynleti2|7$#Q%Yqj zx$D3zC*F+3U@AM{VUct0ySmGLshWD4#&NeTV7$KFRV%x?e(><&fBU2V^B?|`|N8p! z`fvX3FFyF>7gct;m1L=3r0qD?x?@b^rgW?BjW^zY{N!~-{Nl4Ozx?cr3|6fQ==XpB zhrjm+Kf1lW`SD-<_05ZyoVw+4SEXKleq&-O!|7X({`>#q|0&VpZ~y);fAZ76Bg>1$ zaQEsK6P^rb|M&<0`15pE2>$QLX@Bi&De*Cl7p1$_t^3(tR zfBL`J&HexO=l^!OSjt!?9f6Q7RTMVRY82TuJYF2%-d&ez%)?Qcsv6W%#57@YqogJ- zx$2{%G{%FnX{#2S!zu$2L1Z&CMc2YKs6+%Y8Po$SBOs4?Ri)Gk0GU}e5|o}kQg(F) zFjb&r3TTjtGgFsSmvNDKNSGKn#$%hQNl{=R3K33f^LZNOzmM_4zN+odJK)S2wSF|+ zX8~GFJHr!b;wT4*WiEpbHwz8fg)^oWwBw%1bJVm4l#N}(*)h*=4s*W70Bn9U1wdrr zjCr8E;5<;bNc~ZNbiTZJoR1!)#gXL!DFG7zf%k{a0K!B1xR=HZVf!*LDVjt)*G_q3 z+vX=Uw=G23bnp%F_|0QFHjC*KUO0rn;0=6erl3;6wAL!Bcei)Hc<;S`^>6<5-~aTN zr6?g2yH%_YrJ9n+s7~vG9eRXLIBI`@+w|E zMxf?JX&&`5IflVm2~C1g@P}$5R#m)lQmyI0tA>P|HiQ>b>I34Nm*^?@;yQfWlcNA2 zVSigP^KczvN5#&z{X#eVhuBCOFL#c%3Pb=XwY>~+YW#`MOYrP%3drY!6@mOgSYSWI zOevVC6u+nl=GA|wiatSC3_%$zqb7iimJm9`Oy+te1JUuoM4jQ_0>V>B#K|bKQNh`O(9p<#Le_eR$Z+twAzqLOvJ=zR})s;6$~} z`)UVeHU$xO9kg=JMk0_|Bjv)#1aNMssg=sL-;0utMrV1@7OS=YYh&N$b;fAmoY9mo zxmx4$XH%tq9wRVoft6v?2cdl}AO$pRQ7*Pf%v$%xTrdJSiy^f*6CCLVKA_oH#C77H zq=XPy!H8-1@8eLmJ!JGcL=1otXfOuaPz=3B0wzq1Y)T41AOMD>sLBL}j0BW(?ib5d z?)sFne*_4CDH97Jnjs@I8>(oXrd=s@x7|)ea z9SvP?23nzlNU6JB*=*P2v|VrR@9*#LH|v|L+x7i@8K+XK%Q28(m%G02x;_K2ilnYz zu8tl&yf}YwzUUSYF3wI*&lZbC$|*4sGeS%mYJQ>B9W>(+e~pOKzzI+@RsRu&bitfA zop-&>xoUTJURV^X>HQHvm~ujuL<9zEjGVgE?RNcgxhzsvn{hYp7ORy=oyOg6CnnMz zB{DLVzF!R8BK4iBjGJwpN=n@2&cqOTb=(2t?tY6%DxxBN$GcsvYMfJ5wQ*9g)$tL6 zm2oGcDG?`KHyQwGrE2WO8Sz4qFZH+R$Y#mi@Rw>KT>cDoxcPJZyc zAHMnKH$M37Z{PprPp9o}xmr}UY20kbI<8AU-2zmJ(TL>k z_U`fHM{m6K_45bEfBL`tyQdEy{QPIXJbQTl=GVUQmw){ifBC=v*~R(!{rc5g@4o%U zgSY?pfAjxV5-KDzS^_9wu4*7CCOMJQNGB)9H}^LHn7UkcC3l_F;%8Jvnolr5P*sdE zdqTYz*LzL445gb9f5;3P%AQtK^0MjDu!T8D2=6WynrE+ znyOXMWFUxS2;dPFBeQ3JLaP%22uXcbMoXKvcibCVi`2gtn4s;gn%la@*127^*|;8< z&zX_h+eDa1k8pDPPW`Ig$+*7~mR%iX{ zVR!N%EswcdV%H;glzZ+6%!B35h}uOpTC5d%6C%>wht?hvMiovo68?R*xjh5F@+{yh zubjzvu=qd(bX?b{O#>4&RZ&x^wbXI9-L3C#UcK6`@3z~`cD?=RlaD_B_ybsP|KMw1 zt6Da@^=`MDiirvlszQxfPm$PH9}FNSs@0%sRRgUcK!AHBw7fA zK(q2~Ybi)X+2>g?C8R{i3{2$X)qT9FH$QgC2sqF%g!+V8YYs4|ZP>hMG?;9;s4ayOq5BL1VRpdeuHErpRgZ$ajBnhN zezWPva3lNwdO-wZz`6*_keUXtm(daUyf2qfA=6}<#JE5Q))Dq-9U_@#0|ZEf%al&i zK-f)EJGsBK?GDt4s+ockLPlE>UM$knqv5Ud<40%9J|S3D6T~?+XJC-5WfTw2<=i6b zPZSNojTsVxxtz(I^aF@ej5wUI|4wjk6q#{nnEwkBuJtis1aQqG@A>cs5dZ%U88nDy z05QcN6iVILxOcZ;wlG|C(1$Vns^L_dyA(ah-bbQt!CTtak9Mn4gTrXJ#m8_^zc!PO z4OkrBNUODKTeGFG<7W|q#p`s&wXKHK+Jp4GAz)%g0z)u{idF%7l})XRfXpU@=k=XLfob_nr62R;j!7mW;qas+Lmj?(TQvPDH^Jh;vHIqvg;q zj*pK{j*m}HkC&^XqoY;dcZm}vmMW#zGRd}XE?-^V++4l7x*4~d-862u+i9vQ)zCnJ zh_a)-p`KYaY;@y~wp4)jd#J$-zv#lH7@-+TS7cW$q5UOfBqTi<`@<)2^v%OCyAmtTDPzy5#!7Z9sS zgzdlk@BXLHfAx#^zxZ&O`ns(`1{E1up(3CWV0A~L#GF!pd-o{;gFz{^Ukp;~9Mqed zRPobio=`VX0zlQ+Cg56`NGw%|*pa>}DuSwYiGk1riOCF72=c|OM^2fmRw=cb$B4B` zLvAsj9$fYh0gOaURO$pm2&_fa1y5Ze_i$62s`pOCj27D`Rhzd8OA`%;bE*NnFlDwg zZX025T@Y;+U-K>RL823m*w6~LhSoXq*`vQ!{SVt}f>Mnl}f{tI>ESKxE{e zx>dJ0&&Q7z7f%-(IM*qxdiApfJBdh6XF4W*}8|yXo%s_VVS+XP#Nh`10AfzAL+Nnu@5N zESFz@`Vdhj8Sn1;X_}_7xQISTq;vd^JX`L?%|6Dficjj*s#-)wv2BsjbO|K;bs$A6xTD? zbwy->h~V;P7L!sSY9=gN3c!z|`@KVAyls;fpasHhfGh$wjF+@xfjImwE!ZA+oY=5B z2N3)4W3h2>2(WL|Z!e9(#t_VwXFVE*jo_HT)rk$j6ltDI19P^1A0YT$;69PRM-2e5 zx1qTI(LoX5Z1Dgv)QI<+3!{r=-bQ9^{Kh`%+^PjF^3EO;A{d}(0CNuuj0_pLLljl@ zNov%kyjZM`PL5Z{$Hynf!*ao#kdV|6UF($)DAfw$W!yN{dV7Dr-Ho+YGh?Q{%ZtTg zb+lTpPR>uyj*pH`PES^=)zEj8Qb@+vaU7@1S2vfhE}wt-{O zi5Zfq0Z{ISl)J9)k5;RTM;FV*^2yUDN5j$S*~y~s`mSRpL?!etKJl3`Zl=JR7S-%0 zqD5KN%>$^lCVxaEq?kDB7Fo$=V>j>AR_N09^L`h&OEn@dSZrK;$abkIr#vj`H0Dwk z+wJ7Nw#X<_YZWjw5GF2DRj{t_DPbb6BjiQO9V6H@O+IIl602&h018Z8N(Du!N`yAa zR29^!RvpQ85^9xNs)#O_`oxq_sHBv?|GR(q&;IaFrmg<^!(aXSmp{3`yIS^(4*G;p zS*K}~s>c^cRVEdxQ$2m~=xbm9&dKSc&pvzh>dVjTv@@N?GCleF*Z=58|Ml*E`ErV+o;~~PzxnHFyImYF@2{`ac08Q++&zBd z!S{dgd$%t)zxnyEQ|_+TySLwcJG1@I|N38T?zShVC+qu{|INSrKfq3Y^2?tPF;ycj zg2<2z6$B8;J5Lo10K2}YjMFsboB>S0z^m9AMzJ{b&Xj|N=`x>XsG*~cD(Z%?iX_(& zbUjpX>2GHsfZ5Bd!Rvg8z#yg0$|zDrHGG4bsS2Z$Ue5P`sEU{w10oozC@3fA^weq* zBg`C|>A)kl+FXxI4^7}~W%e`zwlBYus<7GgSa=@{hZpq)ga;vW`v9EZZ)Ur2E@#5o zUpKYO{Sj<<%Nc;#OeVNoeEQoD zKKSB`&tF}>8aEp)lhp}LMKvLB@9$o0?p;~G7IDU()Xa=s0uY)eg8?EiLt@vwg+#z+ zbGFo5ja@#os1(t$)XArjijE><_0a_|w<*kDIx*(sKK({YJXiyWaaI0!X&%b%8>7V) zi2wn!*WOW^PfFM^HQ0wt$N+&0(KP`uls(!aMafI^+OvfPzd~m;T^-cF16D<6AnOTS zhkeyR)C9nLFC>7hc?i|&T=z(R#w@YE7%iN$i?_DNY^F(UzP>pj28TU}9!b=h0SM-Z z%yZPl5#xXn((%w8_5Jg!SL5cs17S4;Oo+&oK$&@!@ZloAcCvi^Y;}IrXEw89CL!nR zml1~(5(26MpzAV6P~~a(nSMUZPR5}zb*Z&|nzHq=T02=mcR}2eTadX66(H)~g4S*a z)b=F{;hqlw;^7$zTQpmXcFEpj40G_*Lek*&{h%YFHa$jFVI&J|N|8f#^XK%Vf29De z^sV)O89>wyMqEFeN6)>e(SeX}a5`;?u^FQHRf^J0#5vLiXqAP8b6o7myTxr@fQX2# zqN0k8qGDL8j1`Lb=0-p=K`~IVGD(?At#z8FoPjeT5i_Uc%H8Uu-ZYkJ9Cy3zcC#DD z(aVT>>^#xE7?$VfXAd7fcyNBPS{>z_y*0AbvfJ#|>&@N$`sV8T>hkjL_V)7S(ZAkm{3KoOf{rONCfLBmzV8EHHa9O@cXv>IWSuV*AD-&+-E892fu^6L}oy zy^NTYs8iLdCWX+DSrR46I2BVBRViZyRWa2nOauU8#+mDOLZqCNgNtdmQ?t~unwCik zh`R)cxyx2fs-#SYRwv0Ft7_eqVcB;%ukSXlKb#70H)UA#-~QJ3zWuG=G3Y;i{>l6A zzjt$eS;oqiRTMx3Ou>?+uH$J}MQYdQr*FRT=3Bqh<>ABk-+%SxGbt0W@^Sb5cYp8e z?|%R0)#cCL`_G%(+vRGd)#`2(E!E0f?|k!jzWal#=PzI0-zdV*|Nf`z%e(WZC!4!% znRXAKJ}eVVoAJ%J-hBG#>9ddi^Yd3%XAd7OdGYl(U;F7#{_fMyzPNb!@YU7xuYK#? z-}{~4`wxHmr>}0VI?RfrB}&3TSfL^l7-AJm06+%Xr975#8jH_z4fCZM_5=|CM4W01 z17!$mpj8cng(D_Fkg5nMUPgjMOeSK01msEwYL*a)NEJAtv${l(IQ4l@-FdUBsW^TS zGZSUo&rP=k7}y68^_zb+xI{G@Fzd{*{^={o6DDU)gdHtobr zh@eTr8G5pkz`#`1M^RCu0^L7yl(j)em=QAih$l+~$y1yOa-tkUSsg`o++rfLbFi}UK*M`ed)O2xGpg|jV`vp2X2!%k zgS^B<0EuZvik|aL%!oiK<(33%fFU*~#Ab{hYX@zrAmE+f4(3cX1Xilgs**r{4gtkL z#Jr6&z#oLJuSa+LXeqV@&PT1WGBfUn`N#mY-;HCUQG%h;A}dqGS2y=JKx$eHi)cnz zFq2va>U&-=ocHPVv*qLC;dqfV8(6hy3TgNZnr?=n4^>2KKw%HLe3JTX^8D&T&f@?! z2br`#&j|Y;_@k!4+cOHDA=%7H?Ry#yAm0oJ-CfR@9royEfAr$Q_h~e<7#?!)z@ar# zLDWda-t?aZ*lNaN{;2te=5%RW+}0^=tpO)wb=%T542c*emcC!z8Z_wXk8jRd%ojX= zF0}wSULJxY+CCgW)#rvnQ4j_1KP}aE1xB%AW(brhB_?+Hb1kJz(}c`mgw%QKhJZ04 zs8*5fX0u-3UtM3{-ra~y#sGkQ-=~~Xmxk3Mciqr+N2}FxwaA$PY&-7CH0^e~?QVB_ ze|vp-b#-%jb9a4rf4kjn$8j=McWG{EWD@K!bs3Rz&POLFj~_fbT`nIydbAvlR*R$6 za_GB6Xp~xWc<9@sz$~dB(XubPL^+A5Xq8ZT4$YxCZjYVmg_%+I8M_@1k4fm$8@c>^ zth}c<+`=Ou5V8RnnYuBfM6tL@wI(qFcJbXR45`#YX4HY2xU4aPAyev-nATd$cB(~~ zqrR$+Vu&f_ZeU`pqlzdeHnBY9KBqd#SZgUo#0Y`9j0lTi*`F@oeDfU?`tsG~^DjTY zx_zZpG7qYoYNZxYqYhKQSR}?e)gmRO?&0G%zV)5&kK^=<_kLQ}cPZ0Y>hbZ#*T40> zx8HpG!(adU*T4P;sD=Bk%2=l|?Z$Mp`0lrU@6E?=ee~Pk{Q5V)`r&`|Pp+@7KKt|& zfx_8J8B-n}J-&GH<;`k&_PyWzeMI~G`R5h#@BQHSWx9KI{o+6W?cYDVcyM!j%^m;n z2Y+&Rb@%JfenUkPP+w3(90GG?vX0t)XuIskDEg^~2FeI+*X4ob^ zA)xOVB|}jFfLcw>5-K7!e-5gaQX4+x2akXV$)bkWYoN(>lt@!fL?kBTR5VJef||6PR`>-4a=dGgq9w)ITrn@eQH0AC>UC9W44t#+dH6xUo~i% z)!9KsT^YiBBklitBObLi*uqw@1J;cU(TE^-m=<|?#KUpwS3De19`a(z-N0SXd6D`R zFIL zgH#m+q^RaV^kOye76j}V7+iST(HkW9)`m=gJ_1aQ(HM*Y7>x}WERpxLa>u^Bo^B6}Ohv5JpK%=7|+L%mLqZ^~D ziK+q?)#`Hh!RlF!(W{Q%gO$}l2B=Z?>8HB6b89%GW#J3}jI4!R2&e`Lt(J)as#Zf2 zHGw)LK1y`D?9LbI!6H9cbw>+MENB2BXNG(3J8lgvQmC*GyR0#45Y#`+>>D;}$jjVk zHV)fq_%Hr=f+}*gbJXLpQXIKGMw%~++~m=mo`w0U=Ic1fZ252R=Q#Kg18~mD+GTiO zrf;V`0#x&}(PrU#w$^-wti^8)8#GX8AtgYn3Z9~E5^)CYrrIO5c6V+__HzKekPyL4 zF%|=Z8Es}UDqYMD5OYi6MRk_Ts-P;UT2(C7s#&eDon%{VTkL*>omc_d6{SpFN+Pn` zPQy^ulmK$a%$%7yvs5W%EK=`o*7xh%%d1x}U%a}zS&!3Bs^;7w04g$dph&>E8wgk{ z2&t&v-`(Hb+}zyW-QM2Z-QK*qy4q~EH`h1o?PeNFnTpXtC~s;4BEU#c0hCw$^6dQN z(Z$8Z#o2PX%sCH37y8|1stQOcVT;1ZLvu6&17b2Wtu9S2wM6RF)Qubg05dlR*q^SV z5mH>!O#6DHyXqWqN3PA$CpJ*G4|Tj~1_7B70jrnEF?hg*tforB%mOth=p>t|ij-P? zYJ`FUVg(T`?tC~=!USw604p?8Q%f1t011|>rN^w@qE{*CZW|~i%*T9mbli8#iwEaO zog%!v{`~6t<@#=uh=;{uyW0S1>KWBQ36LX~pWtYD^6uBZ`S9V}pFR8ZvyXn0K-b%i z5WV}Y@BHH*{BV7{{mD=M)3eV%0>sQclv<{pCB1m^@cX~}gX3ZK^I!h-yItj=%Puuf6-VcR&63RF{?%s~R&P5GWWZv6-MEd*Gu;I4p+Uc9*)GJ6_-4 zGqH#PwBe6{W>&qp8c;->Kl7}+h`RKsAQBr9g+K%<0(e3wrW_J80U&V7zTT!l>|l-* zl{gJ~Fa@wmh^5rx=^O+X-$G!fP{>vQBr+s_1v3JpnpG7*VT z;XT5(y8CFIs!=2a;bd*L=PeqZW6>!5IFP+;B}5Vxg5d&f79s%vLp6<=nGNrm24Bg; zG95iwoIdGJ9;ek=S{`%1z|>R9kP{Mz-$q0ZYO)yhK=U@bvUlQxet)q;@dg? zjspu(+z6gD4K{ve1@?AhP^$N%*2{`5~j`TX;# zDiIlgp|q8V1eA9-1PE|#ggN9yplEu>07OXW156DF(Z$XZGaw>TPMkbNq{xVDo_28t z%l(I<8K^QER%OtHmH-GK5qM`ln35|au~B9MV}RcCK5CvH3b10Po^=H?ry?PAZheSn zQE#ZUYFZVFLQ&7pMyG|ff6rt_5Lw{`FfnzByqT0E zbxV=4g_!!_Nn0QJ>I&~J{6-DT92^1127$fQKvb*fBqF9&w5mo9*C1FuZDo9579+V# zqhP~}5nUz*o%->QAOI016LUsf2~BgNvsHgS^hX^%KI+dFyzHn;Xy6Lso(3`i4wMwr zumBBGJIK(|-!oijS$S8)Xh$Y)d0vMGdS<8u&}!NoKSkm%FzQT#$Gf>D*mG(vc(SjY zuvtn2FwP%FQ~_p@rkvFj8=RbLmY)swtRbto2gd^YqBnp;gEhlH-x+hvHjko5 z1Vi$ssQFvC|JgxC!yg0$gF|2%ckFN2uFWIcxJ=sLlyAJ)`W65~yvG@_DvTnN*d#Dk zt%{RaF=5W4pjD`6&Y75elW|I2mrPBh)>4a1oAq|P-Q3;X-d^8q*6VS%E2R=LfTr`vi zw%X{^4-woVV#M-3U)I4(n;O>|+AIPCZ_1IBp8@5 zgPD4sPE@L@)|8XfBDvP8SP;cXl5*^80A<3CWB{8csZ(JFQ|L1w(YO<0Kq4*B4VjpV z+F~)BoSdDVo@q5jxxIODcYn9ttif=|J)(h*sCsmKG>)TG+Bf=P z@%{(D{^eg5J5zr4M^Vn8HN-4(5n^BZr!^WATJ|L*4c zFMj-IFR!jr8Xi7=K)GICy(A=M9J-9FK`Zop^z`ZC-EV%KbN|a<{rckk?45Vs*sSk= z{@yRIolZIP*piXj1_npF`3RtdD`r2Gw?Y)YGf38OSuuAAcpXtnG!T?aEI6{zgM=Cu;t=GeltV#{fIEEyLS0kC_`Zdg8e z=jiD-^XVgAo?^eiuBVic60#3J$JsWjh=ZJn!uL6-*@gy;3o8sB-v&VkbEknDN(0Xn zHT)H7w=MjQq1k?>RZRn-g5wo5V@6_j1S29M#g$>q42e{slalz99E%}uCNU7{MZV?FlMd66teFyg!j4q+WNLU0{~P<=4LNWzI)H5$g|q#y-d zW`r8eWoFFta5G~xQ)M#%MO6YJGXf=3P$e`|b>cHh_dUH%h+u?9fXRRxk%p!O5J@F4 zi)o}*&BUOn7SpNfq&kU~XtOn+z${*k*WjHq7Y?81P67l#1UY1dd`*L(iA8tz&WUhp zvMI?0Q-~5%mkEJ9HgI*28RSvx*a&SAj80O7(4Rm{u}`4YOaOCPY9r!pW{4V_6$~6= zep3>CBlzTp`$A(Z|27aZt1!dY^5M?v)$t12DBVz0@C`OBuVah$oR1U{Dr1Q1a~tWzmdnaVVd<7T&cb$NYvw=UBpswtHHU8dsM zM#4yya`H-CAxeGbVUdS^@$m732Nw?>Ja~9~bkucSPT3oItg5mBNGU*#wMJ$|0l>rz z=50VIRn4KEOOI>xno{DJ%i;^?iE<+qQS%{!$Or;a8wV}7j_C2bBY@CA^?RAOT#MQf zAPk?M2m&+$CXVzrAQL$VM6krBN!4?4+9H2{^5&fAJ*N(n8|7>yIt8X zm&f1!=I^}m=9`~A`}kMCdGGeseV125KOC=CA3ghMSL)Hp5pOFJm=a@p{Pyc;i;MH8 zAAR)xNAG|1`~T>NInjq7eEji8A1!mfyS?8`Fs0072&povHXV2sHLX#`9uIZE~b zea2X1;7sR1UR+Im2kM0=EI7*|`EAdcO zRTzy7P&Fn#sd-JCFD(PG0fLbkf)N0kG8lVP3Ry}xAVeou3>))%b|%l%TT0ccLXl`e zb=*@#i|Qy^T6cnXFhq%4aASLnkY>g=BgMkK~a;9wU3kvO_;F%vO@&suOf zmF!)&i3ofeHZXAul zGo%Fac-dVH^kB(ntGwu_V?+~0ZQw**W7zpG@9S`=fCee@c78}9SO>rc{>)lusG+y7 zH%x=k>(FSd_Gh19nj0n^w7F{o_4?LE%C~oe`QsXmwm)sqvRMTCuF}0>ibFlyu)X1$ z85Mur&a(JO-aUkDPBqTK79S@>kl5Bua-+hRueTX zGH%Ar%NH+SynJzed%eD2@5bG@8*3Fu=gf%|Q_tPd5v9xs2K|uFPERjRPEJlvpFVkf z@!;%ex$H8dfoU~n1=A{sMub(28BIx0a$=`o2@nkZgd1pW(^Y-T5h9r%V**3;>Jsle z0WuRas3;)j@C|3grU0H8S8ZD>P`VxP)sdnCjBV)(u>b&bYni`mV8>)Z6nbGrVyuNl z#FD6KPAMkCQnJpfsi}Z>oK*n;cPc5da|OgSu1hJEsaq_De&{);X`D9q_qW$~qQ#r? z`(^JeVd4%zfh_mP9ha%@cH4fjc<0S;zw_4DKK|_E58r=pdv`-jg821s{O&*dgMa?T zvrqr?2Y)%OH-yXpn04H2tLnqYZ+_>S-{~m-{onoFr=Ne6Q@`w2gxU?cXVmT$fG^)J8t z{F5&}Wkuua$5dwtYYYJ5aTyv^)0`70RMSZcA|~#qx-$VVR5cJ#LxreAms-_#xwxAL z1Wm+h-hz#U3~_q3d#~K08~%%)}Io6N{-L zBCr}(#cDVTaM8$abvO`JH9{aoBNSCYjRNAxCd?HYXb^1;2Y=e$41&$UpIbR=>_wQ< zc(X`kK#)@qsnfs+VN=6Qd63_%76LYcV*j~rIYEp@g*uHu*+h{O=gcV+r<5`>v6nv) zF(ClfTE=lp2&=_$-w#9-#xiQ&S`d$WW;D(q&({WVHSIh$`vCKm%}&9eVU;RXqLG|H zRma_AqAAhPGZ1?#yYnti(~Q$JZnoDizWDVoe)5}Ny{F^0V?;wlMP$!*TLaet{?J5S zCEr?(yE%(?CGD387U|arD)C*99?i zB0>U2M$hE>#K`snYGYlP7=th)Qera`?EuVds&!JxNSjg>DmeeB+GRfOc*S(Y-D^j~ zqh&r_<)KFgMKeTbT5}j2O@l@hVO7*0rZ|lN4(@{7o@1ZXZQN45ea~IhRVnXbOpNAW}?Szc@NMJv%=?JwIP9SHp53CIBmC z5)tQwYc0F&_V)T_bHCn=+x2!+s}Pf_5u~Hl>S(b#J2}5NKU*$WDJ9Ol+1C5}`|I1= zXJ35z?DNlGUB0@%y`QG-R79&8m{czutvQpanqtmrx#$USSS%hqc<}VKho_6vv-8u{ zYT5OP6C-Gys>{=4|cH7F&xk#-=q*@OKP^(IHJ$FGOL~MXSYZ1<51|(^9OkmLU{WO&-Q?1kXe!~d~Q>`_3 zS!$)kN2}%3O(HH(ro@nYUaTIz_V}Iii;I8w>3@3p;$tmW){w5R*@+53-E{cto*BRG);eZwUZv&HcX1U@rXt4cn`} zHT`1a_ZwJq2G1cOxu>howbM_~~oR}He zwZ`^PLRELW-Bh-V#oh7pbXYD@%FGOnHFD%CRpv%gVn!mD<%7udL>>e~*xZj@Yd>o3 z4sJHp4AFJUrApNzfa--}GSM>{(cOpT*x?k}B=pJ?( zHq7Xrc?yy z<2f`Y0FC6VA2BzgoG>>HRxpV00@1+K`}kEOWocTfgrtaQHLdC+BDA#cNOTbgaMrlR zxPO>iT$}lxqZQm|sj&$l%z7w-3A(|0X;Jn7{VB;WXxEB3M6v(RG zXlxE^aIfv6z#Dt+4d>Ct0BvtYDiQbJZ@(YmCBocLG(S=Q%Yl8Gf7Ui^yy?7|+Ya@O z+w!0Al`=I5a~6XOasSS9mdtrO^IG_Lj&|_L^9O(t(hPg4J)-u&nkj7|FE*&=mKSkv zUn5rtttJoWY=F~sO_{xcil}dKnp#ZUoD=8FfRu*LQcjX(zR+YD_Iqpu|7`CZ?#RD4KS>IzBpDu1=0trzd9@Cl^P{Mdl8HR231l zbx9EdCnA8{5drwnQ!~KCu?f-4P^yZ0UUW~3xis7SoLvPFTn+-sAYi!v1}a2RN-)Zy z7lmFdj~JEX5#-1G7LU5!h{unS2*Efu0wEH6#mN4`e7|T_7vnL23?RAI!dk_%FCtc^ z0t#k|j6_(fY7rCD&bULZ)+I@Os*|XSl&O?y+-?CN=OiMDGG*?1%6;ODScfteELGJI z(sFtF^zoZ!@{^za&HCyRDW%2m^>@Gf?RUQ~GXB+H{pr=!^VMR>Jtx*`Wva@3{`U92 z@#MjiSC^L`fBy0M>JAhL1QYJWI_RG~dhO-Q=Vi0woMp1*qOYP4A78xl?$;8h>&vU3 z{N%?+$45^dJ-EGn^}$CUGs4O7;`7Tdm&58C?|$RcFF$(u>ScnASjYgFtEfizRvQoQ zMTQbYX-cWqNopmcoAuShlSgD+RRGA;s)!k>m|VBrh?UBIrdnX4~EFvrj(z=|B8f?`|Kj1^`fXNbNmo=vBzzOAg+ZqE(za z0tM8jNdk?hznZ!j>Uudm~(=K?9ik_ig=j+B!*_ zTWhh>%)iVoxOw}aW*q2vqsKml8*mN*je_^`+$#cj@f;{3ASeTZDVi}ELI~aY6jw$h zv}jpHBs4@IKxXa`DFY$yMQ<7;@MkM#RZL8aXjLgSS`bPI$eHg%RS?gzcx$bD_6F%9 zY7wmMRs&_u%pp028T*)-%#4TFow)~f4zH(C=hI`80jE0g;0%oG~n zJdM<2Gq*quSpq~VtzFxErxET#94$Wwt^N;N)YRUq?R(gt(ESJC!J)AkDHHk8F^Dc# zh|r0`xOzKAJV2uw;}0oXcU>%0u}D+30RrHhI;M<9DW?D(&Px&J1Vm}K+lk8ctJ~Y_ zyQ}N#%a@n;ck8JL803@^bHC_@VR3wX#LTAF4Y}4@rm;xbZ0|2$J-@oT+^+9R9aXIq zRa1{EQ({g`oDr<+QsUHi-C|g*hGDhr7yV+{Ef&juxf*hp304fnu3o+R z@YCPyHtT-1OxXYK_kZyE8*hL3(FebI|7TjHPm7LH0=-{v0O`S_i-%91o*kdPc=6)1 zPd`=>ukIJNoO1>}JvqvpUR=H~!eO;2w;Rq}w)L$y-+J)qV%qF}^{e-8ws&v6^KO^v zi%*|jy}Uj?JzC$dZ?@Zi^v&->m7o6RJ* zGo;9`swxn+FU=BnAAOL`9 zcr}HY^qEOxFI>afVB4Odi>!0QUe;{T9B+mt$JRgvZR5pjJgM0vAzUZfo7>0<5;Zm3 zmHOrN`h%M(Z|Gagi~i&Rcan1s+9PyXg0~AZ={BDrbE;qfDrJ&dYoVt9(-JdYxhMiE z$dt$JIQE?SoHLSlDVvIjNKi($A8guYTDT$vh}3CUcDwEE&CSiLSNAv9(`G$w*D_5i zlT<-dP(=h2RjptqwQje&oBQ>4H=Zt*4gwr57`DK^F$MsrDhg&wA}VHT$r~HZ3%qRK zJQ1+Tm#wKBK*TLk<2#q$`r}dvI#fIXWT4nE+f2Oe*A^R}F)t;~^a=#LKj1{5bqEHA z2-?bEhNm<4F=y}M_dHP+IL=|u!54du2qGC`ri4gh0LH#2&DdKgz!IXSo4`Pv@ir9qFJ9F+v+(5Z=fC@&U6PH>;albX0?Uhf7ZcwLiq6@ zf}DS&p|8jt#0y#|6I;pJC%0oiBd2)i(4yDane$AD_PV|8^I!4XZxv3V?r0o!w$RAT z_Em#@(5`u z<pcMQU!3aq9Sgw zOwFhNfh!6{6S~Vm#6I`DGa}8^m}Vx@29LOrr>K9KLr?%DXzCJ;4+;v_Z7&f5Drx{Y zoD;UxcBd;{dB2mXD8)JT5-n5KmI+Z)m2?n*0jOxfsw$LOsurwTt1C>AQO=nenYc`Z z2^BP_D8-hdpq7vp11yHs@yXfA+4Ab@^7*UJz~sThC#M&W-+Aq8WjFri-~8F_)l0_Q zrOreR%eELnUM`Q`c=Pq;a`CHQ{AzQ5t3`|yNJKTIE?EZBn8B|r=NcE$;YeJ$-@T^?(grOefb3^JUKi5{Mi?M>c9QX-}(IIXLon&4zto& zRgf~Fp(sI$v8CbE%oQg@V8*^kNI)ibx4wCB{^)+ZSF2_ONYUMbP*n-Z2c8o9L{(ox zMM~bz?7!4PcF}%Z1Q5oW&p<2YN=b^wq02K zg$NU*eV?Y;Ub-%-wh%cHkTC-yDAZC-(NxKkvw;`@%&Js3CQ@saTFWH0lxZsCv>m7Q zdUJJk^Ww$J=PzE|-`z~p2vSkRM9o_cR2?}JVj_T`)Sbah<{g-xe{WvKX41oh4F9!R zM9f8v)S7;$&9s^M+zvNH7|%poEZLUV&6Ij*8(+G$n@3ccX|%KWPhGCe&xptD;RNSe z#(2!lTUq-Kwgg;T>p6Cr%~7LK5N0P1hz5e*_|xW7?Lk7kM`Q7@Z9SN$KKO)&21oz^ zj1fWGC^d-e6NY-QHB{IB2NvudKq4a~4pavWuHy(fixFuSm`{;$rbWkHpVwdVgj>1anHEQI1n#09(8=S=r5MtagRLom@S z_b|K6q8O;58X6!GXU@bq4Fl!=`1JVX^mtfw{i4r(_G6uxi>RoK)9&W_=IZ*@<%`SP z+xt?)KQb{dkCww?v0N@PQyoj0q~uA|s>=1v^|Q}D|M;U%pTBrA6}fDoE;6_V!0R)&_oTa$}~-*il&5p-*HO)kh(5; zI{_d8un34d8;G#PhbJf)0a5aabcu*KBe8d8QEdAN5g7PkJTGI)b7V(M7bG$mXBF{tV|PN_?^R#nA5RZ*#;Vh|jV z(c^c%@txDfS(m%3o6FyP_$w*XkQQ2jk<{u~YtF;dH=bUcKe)Vn_35Xd-rd{~f`X5R zHU#K0QPMtl4_|+Lv%X%hH$5d(kcwS5y#DrEr$?uEw^zUU@VA@w`1s8?7nDAK_T|;h z-TCo}Dr`6RZ$5pqKk7bx?-SOX5JncOoQ(x0XoX;5<~fWF8mhO?w60-Q8HRqd-JGAu za&>ff^9rm(08U&*(a_MUYC`fACPXmo-s2Z{oIpIVNKg3Z*08wT1hE}g` z39i+%SXHX0LcA3^hOI(aO;ZEHDzclv8)b*YU}lL#0f3@+&hMHKJ$DvXJQ~&A;;6NL zdTX)Y>~@=}wmCqxeO+yP5 z0oIqdce|-nQLZ(4AD?OB1YjCHAAyOX**kY((YFBx2V#D(2LLqlDZcJ#gcc%eh)9Xl z43WXiRE*FBs_6ZCvsvF?US7R?b^ZME^5ymQ{d!v~R+cJx}mCU&(IG&OORO zBDopxevfV&!xD+By}R9q5#b~8D}Q`X`twAZ2S1<($Y?+aiK53Sx`3 zw<^v~>wFdgfPD@O00OOs;q2(-!P$e?Uwe9Tbb7R0E>}aBx|9jcMCw$=vKvKo)vsaG96$o|hFgw0xoD+lPjq3JC2yDlL^1|6BUCjbGi^l$0pX%t zRux^CQN){!5L+jGb2^@qBVxCCzHOc3frjdvK+}9gTPut;qBp2uQZrK}18C8h2)Zw4 z5)o37C<@DJfG9c)U9E%+rkZjZw^QXJ;?q}ECe4^r=3%v19<9dBxSckH^!6L?K6voh z3}hTX_~17$uf8OMu3Ji()T-243C_+g-gx|0oyO0feRlQo5=jZYrbn2VDJK*FoF;p8 zdX~HX`G>y&GtPhp#B_9a{Ko6=jN|U-KmYml)$PN_51u@F^yL@NUTxRQq3;Pk`Qo!h zzx>A6zV+(*a&xm;bPJs}P=Prl+z69db?F3ZC%YX9Oow>4Xt+SV?|o{N;t<}K}1)KY4hrs(=K~_v58K(=y>{pDu=cJkYA+MaM&?Owgoz z5D%V%1=KM=4?*L2v*=v=s^%Ejb>NyAIOLh;BMojSW@O_0l_HX=5`o2p&>(NUNdn-2 z5ew_lW)%foA!{`pVNcwfH8u_!Q`KSv-_5Ro(ufh?Z!y@^*~VsS+L`gibok{YG&bu) zGivfsvXNR%i48*tky>bi``E-VMmq;S55COWtwoS)`*Y}f%WL(#+Alg?0rt($0F9WN z5vLt2fQNm+){5?+&Cz2fGQ`j!G9;{3i=b#_q)xSrMHV6=s-l9FQmu6w$Mt4CP2*;} zesy(yb9+}+tB8npDSI{ca#$Q69UmW^Jbd)<;^KU1PA4q$!PWloD_e{y_$a&mlfd~$LA;QaXf^z>x8T=iX|M5s{8BvtEJcDt>L za886daf;cOMxHQ5M9Rd(sECXJWFo#euJ8*8#LP&J`M?>mIax%9Rp81*#_T%$ZVCxW zA;P*Agw8vV4CX#bZ4Ba9TNv)I;Ayz3*E}p#k`kR~0=9AAq9@lFihtyamLq%luPyvuA*&9z^zqz`( zzP##kS4+)IY0eJKy;Br=LH&y1Bkvk8iwouC=^;b@}!i zZ}++T^x6A9Z`AdV6H<|4Kn#FM#XI`c072D@u0us^oTlXhk$9H$yt%u}x$nEhdN%^I z=K)2Pm_)q?7aZ)mK2|&GXkc@9DH@oH`vvfNS%O3e2x{tvy#&=jm@^VGF_~&L zFa1fGr3w)u0TOr@W@cja2${qT6{#8}AjE3Mrl?-BhfGivfFQLk8FOVFeo>;T7VIjqzIuOKQzs%$11-KG@e3nCaj)jG4w=y}#dF zzPkS4lTUy3o8Nr?#q;fUtZMFn0Cu<6HwLI$YpEhdz3jkKA~T+dvFtCe#Fs|SoY(VuyIAMIt~@_aHEq3KBtsJ4pOi%4k3>={K8QvJgkSm*CHNTykT{r8{_G^2)h**NwjCKp$6J-syA4akB zrC1}@pv9aB(CX0+YVH6K_cG=F^_)p=MQ?yDtqmc)G;dhH2Y>a9wddU%7W%+EBY>NE zAC{8Y8&nNVu}YW}Fif0*n2;FRM9plPs*Y2wrIuRCxSMwOcX#(UchgjMyWO-chE&Ct zeL6osyLkBE^z8J(!w1KwM>(^o?55rA^{dNQmv{Gf*H>5D^%em`QN=SLj6{rpxl7B% z;^=sFa(aAvd~$wv{>JN1pFVkfdUAGhvKqR9y!J?~N)?seZYo94loMhMiResmDq>UN zlv2tmBchkQh8|8le-62h7819D*S3A!>N~WYOL;sxv%SiIVUs}u!)C#ZiTyW&(<5f) z;NI9rXoaT%nBz47vjClLz6j7_-g@(xt#%cn<`H8Tf!hExV}#c769e815R?!TfHETh z3yT_?nTTd$XK+hZ7x@DNWHtb)nisk6MWk-W?X=t6-`tj|oGh0BK#aR}HQ@ea_14q3 z7qs~J^WVO_dS2F}hZCx5fC&(gR8-M8cjGh-tNiHE!@vCT-;TR+xmw)48mrca4d8u_$ zH1NP!CIT`-@X;1twxH&6Z3yNvr?pftOhg7siE~PI+}+(>Er#Qx)#`S=cHr&;2R^zK zW09SG3Ua$K9}O;q2!$8vM`bPmpaBzmVIhWiHb>Q_ff}To42X%;g9bA&@F85jIS45+ zF=L9}J@B#OLN1@6jhryGaW}2k+sl{NzxnM4@BR8$pFMlF*_Hq| zh`y^Va+9he&?Ex`{aOKiTvVFU*+QVUs-S$`x4J!22YV#GcL%P%V$ zdzz7R-6LygS$k-aoEf%0KJ<|H;63KLHxIVJEf(w}LTpflW-+b?+9=Y!abEZJAFUz4 zsR~7(9f*Lgc+>#m9_%z@)piGBqn$D6y(q45yRavJ0ZjMXC;Y3yJhC)CV}_J*EDz!% zuaweuSX&6$QfbXw8??)s_iwWv`{KO$68(WPD_}=ftOyWeOq*ab0ZfEIUFwGAFf13# z#e#`zt@pQU13Gt-IY=#_A;6Vl|wdpPgNtpPZaNdi?PG;-t&T zz{+mCd~x~NXP<(*Qbt%Sw$4hPz@svOq_60u$%#s8v8uy()esdkaXV|Gm_qHEP$hI+3~v%~e`UNX*f|VO?racYE^HBl^~f4B<`8n9vZZV-hqI@Ug7`3?^zhWlgSb0ojMO zi!xHF70fs%Q6WEnA|RSmX3lwlX*<;_UDuzUUp#*F8WH~Xx4*i4`J5RrkyIm95zC2< zMHRIe_LO@bi=Ll7o>V{i=+jl#GjUbpMBn|^w`?Vk6?z@z8 znMM=v;neH()_@oN!pt_iU9C!pXuwQL1`5Q;jV2DwkT!$TAAbDIQBss5O09tG)ix0g zi$8fHA+93RG^rY=oHzrZil~~E8m*jWVh9kkx^p5%kL;1jAPPFe$pT5SHE|@qpB78?k+DcfBXLXKYQ;NAAk1wZX6xl2I9a51_QE*nW_3b(>YNYy-g0u%nCG;q@lO@ z^5e?&fV1#$oxA}(^6W74bpFD934dc9ouX{t4|S@5aSqo2LsJ(3W7>USa%^rZz5|G@ zci71RYM|e5BbugLd3sR)01)jKQBHt>+8#=qvH%C)0ncVPP4&pk#!j{{ZS$9A2FZ2&+t|Lq6&Ge3{M`X_2VUh^T?I{5mlpDqu_Q zwL@TW#?TC+)3HG*YtdUeII%cCESP~$JdOhw1C1QQ1Gh7y{;#CIXT#u6$`eoo;AOFU zm_2h)EmqkR!v}}kH-_ym5j+#zc)Nxfv3*+mYr8|+Ur@BxU^{m<-*x=cD7-xkY}eI3 z-3}Ws+GjNOE^yny`_I=2IFqBZ35c#Bhsyh%7|(ky+TBZ!d3=d39I9pjKB6OHckwPt zAZE_F>xSiWMLA7cwJHJ{8WEOJtBl+2cDLTB)?L|_G8q6eC(LU8qBa( z-3u-B1@L(cdyaMfs~sGSkbw;;)gU}(;>g+t#Yk27PV}KrfB*_oUHB3Fic6_Pz=mRm zhPmsPD+EwO1A)~l=R6!OPd1yo4?p?sxY;aHF9e)Sr^&?Jaw)|QHZktIMBSTjy#3{i z&+cw+PF{b*l^jk^&mX_``foq@^=BV`cKq=4^`~!S=->b7qiH9ICZhK@BjHn>SK!V~Ug*@I<1@Ay*Wxt%+s1_T?4HF)( zju3IP+nJdUAxh*d6fy4!51*w-!9Z!NFf&4Ozw3F@s^Sn28+{ZblElEbipn?&)zWnX z5qDh|S*ofoN@9QtCSu45G2E>eJh<(032Id^MuU(r@EQR`O)YPy>Iu-V_@X&HZ+T$= z3t0*TRsb_XNCbd@0)`-XFnAOI40O(e9e&@f{oc_S04TVko+Dfp!U8ChN{O>Y1k9cjO1{T@#rBd=w9%MB2!xo+}jUTXF}c#ueJTvV``3IGCt0J6W| z=CS7g0?sfF8p3N)XS{#wY4?R}@o8ws%$Rq6L-X=YnxEpF%!NJq*IZi^^?A8i1Et3P zn|nXq&=wp#W%pX`Co^ud2F1K3%vUsEvN|g}uV|YmF(W?!n7c7x@AJ(fMYe}@v%e3r z+u59bP;+p(zI^j7#7E<7l-v3p)TGV6J&#Dn+%RYR|On(!iL}2Bt-# z;~^h)blmeIQzk$-_^7b_ajr}Ot zUi0L^S8UV#mT~C3QaASL!KKEJ?cn`q_&Wb`+Xxy}3++4m`wqzHn_>=^8?#7e>b$=> z<85s}Ixe5VAGA2t5JMIcq8(4o6h)y{Rbz8`vjjQk+;zxgU}&x(%0vucNG3%#>&;kp z>)m#<-f5|3%A6O=W#0{5>KDUuu~-23JW;&6zrVS?-mW)SS1-SO{`~HCUB)6+(3r!x zfu|VBC{yZEmpJvgTlK@LU#?cmo=0FvNaSrL%v#0Y7zN0%8#+KsiE6F2)V^P?juwgX`u_U)%V*Pem$?I% z%pcLqx(u`^f}x3Url~3*9UmQ=!pEO|M8t{7tfoY-z47*L-2D0%Kkt^^?fv}^fBz3Z z`}C6!Kl~`CtSrNFvD3ottJIbOckt} zR*-61K`W@Z!wb7(?*7ed0oX$gQ}QYBAWE7M6Hqe9NQ!Xr=;8N%_aDFZ+T*@U^FRgf z>~>B4puHmYXh4I|QL!JZHmqpI1!^Lys;=@A2u#}_ncsh!%Jt>-`yYJx&C?)&H8BlqeqG%IwnI2G;(`B z`DI`*SKvARgZ5N|E8O?sG#t6de1QNW|JAV0jFK8!13)9sgDM#ff4%@Fa zJk&^3Xcl88!RC!SSEkH&PIIlu98QImbE+K!>0^*gu^|GTGz`?v3$~0aL|D8p%WE{P zACv|W=QDzP7R}ni28NnpS9@Fsv?>}J%|D69H2(s$a0};OY>zeGU}o_Sz*~dbacr<% zb~6YqRM=zI1AqomllPTfba3$v_yTwnrFr!vkyNW<-~dQ$8Vk}c=dK$%W~RhMtR}Ln z<90Vq<7PWZO>bkrby7RN+hYwG?K6QO2ru+5n&F#&09B*!Kt}d@OcN;BL zq=rr@nU@T3@=PWz7Q^w;@q_d8M~@ypeE8t>^z>-CTK2;*bSWn$7E!4xBBfRz3qeG= z@A_ep^T3=%!78MLK9QnG6<}_n9;#NQs(AXe3H|^8dM*|A;X^ZR9%j2`vIda>+_#3E zif)*OrRP1&j(PzRU7m6_l?IL3Z!urQ4|rfq3@k?e%zWH_o3&7KU$q%q1lx2RoHq@& zjWgRFjl&LLF^mTxA^IYL~N0 zEfHJ;h>A!`1OUYL&Kqxk`o(9jE^qp-8@hCLzdkxTd*kibfBYZ+8o&o})4Shz_y6;+ z{-4|R?r7+cO{7%SsJ3cE?D0JzDM@=kMhw0xby6lWv`I~> zc4_dexA!hG5fDvv+D%&k>+_J7iN~F)h=@-HZk{RfB{LV2VsJR3p_q`Ns-Vx`v)C4{ zAg!jgiWA-<;wwNL!_g5@OaXxq9g9g!xTC5t7Bw?XR?!lp8X7|9`%aArs{*2tk%@tG zcMcx`y`u*XimjSCMgSD|VZ@mGJgo9?)E%9t)%oe;clrm9G4~j8qKZ_h)lZ<-qEf9E ztwrl({_j$>7AZw*(P^|Y)p2JsY1yF^)Dcb81WantD&5Yw)LT$NV9NYQfAmlP#h?81 zt{V_tJOk%~bT{CtD#6ACdFGAZQq`%=uyGbFpSt|2NDN(3ky=H30G&uJBDL14RaAMm z+1*@Sef-gf&p!FIZg)K)vjyU@=F1v4A+E&cT(re0j;7B>UCgWB;B$NUjW(E#^q#ic zgT?u$dv^;5255H5a{={i?{QzJ0}bTH;=3me<@`nsL33Pfb7P>i7<3;b(!thm*v>77 z#&^&G;tcW{gX5OZ$p;M2<)wZ;rL#RSZ0wPlnGr`4d`5M99;Crq2M-OFS(9cnK-7lb z8@Cr^p3PJrz+9Z?Pd(7#0aloMGGggtp<#|BnxSq>(8xJQsBWFj_5#{uV5Wp>po}^Y zt{6@-9(TOzXkg9+ea0bSM?g+*&!)z2?BMv z6l}N;vE^qGFi256tYDi_5IC<+4d7rD`$OAJ@n|q|2Tn$|eQs?Vf}yFlEO7j`54;LX zhV22jZ3qC^+W6aL1MXKVHVGW0mf@gAwOxK(pUt8qX8XuyetyjowV$`;@D(MWvFuNB zreho;#_i9MKz#bVlKb>QT=#*~4pZj0tFAT+(8OG!ii+|iS_)OsN=hV#6O21fDffNq zx~}gDu#{TMG>*Hej$;{j+f6MK5&~i7+;<%__x-RqS{R`ynAzRk?al4Y-R=7J=IVR$|W%VE(Gw>lR_ zt*WL)>Nric2om;?nDek$P)e>+uWBlo$z?Uk5UW`03&7CpMU3JEKrd#p7=PvYiDuRI zM$w<;Y(yg@Vz*9SYHiTuqg|XUc9EwL=UoR)f#;ym!uOA2jAyjKgKj2XKcIRi1^ewq8z<@wX$;_~^9ncr6^&#H)2l{!shDN~t7n~F?hEt5!96)9H5 z9&hW9-n~5e3_erNk-alsPd|%DEvcK!m1zv3Jq! zD_2KUZI$>gs2@;HYL!|=N}0ww?qqi_+xzYHRhD|zr|daf_upP^V5nd|{7TJ4L6Nkj z1;Z@t4=S|$ojY!r>9lz`wLzmQf_mH29rH1@=0yBI2ZfQ00z>W z2nfzKZ}Z26cXAum+I6gHn7}+0$}^^f(VZW9ItGkB1`#+!Nez)qmC(l#7&}`7O5my! z<`OxCZ3{I!+d}7X(XrWRPL}~hKoSTKn>A}s#pb6yQ*PG2J8PiWOx*>D18Yi}HouHL z0yT)1Ul%;Xhx_LbgSvj&3@fj|k35j&>iOvfEB6Rr|082b!;qApP; z;CX|zr(yH@w++^)poRqYTZs%5X3Q`r?hF7e&D+V}qd}&x?i>g%5}W635s5$axFYr9 zp_wUc=rtm*15^kW3@-p4z7HbCEGnna5PXy}zznDY#m8>Nw#0bA!^P#ykRLD#OXJ19 zwdNf%Z}b`V9lQw~d`>&C2N)6K4G{pn{~U4NzU^Gwe4PNTQVJq|nwd2OGs3yasy&(Z zP{OChmX7$*%xX0eGlS}i^JXGYM5k(#86kF*O%wpSt{aA)C`ql;INjdetv8$d_4@w) zZrW|lRJ?>Jv!V4v-w(Z1m7*!}xZB>`-LCI8yY2Snim4p;HECMP|yB zmW!iDPaZ#g`r3nw^YgRgzU!D1BC0ykZX!~O?8d2#RTVg;E_Xw}%(*AxzUzpnR!JP~ z-Ao+%Z*wIOw{k;54i7O0p7F$+Mi)eF4K;kzjw`m%!hIrAvu}aSU7RUwU{jcB;!W+i z_(8;m)%*`OF92YX4G4{pIcBg{5^%t=+QHjmTwz25Eg;mu4FF((8q;&5(mb|G3k+Hl zleXvtkr@C$jEqAj(rQ&xDZ)T5Y5}6$Q}Ki*Gt>gmB&5g;1Qd^tE_QW$bM=Z1Q=e}x zZ~w)g{13nS<@?Xx`|vy8`re11e0F@gcT# z_U?v_Il;CTjmIV7e#|Ku15*$)iLeU@K|xKc8Xy5JLk{MN?O?XEUEUk3fJ3N|u3DF%yG=f+8WO%-ym- zeQ^5dowK*TwS4eqK02jgWhohwnu-ZlWvxEX%Uzs`mTIbk2b(RpUEEtM$16aDxu=2p zQ(kgnA{Q_6lAuWCF%s|}n{B7*eto~&?trZyy2UVXN?!8mD_TX$v}*vTeIm|Mi7 zY%y*lz6|4(&7^4**Mk{*?SR-gjdz!qw_iTHfBt#etsf2hI&Tf-GqvcrL^TK>52j`k zW9(XHu=zqq2I1_RBbbpy^CYM<#T4AfA+>>f{DDp(&FtFVxy(o`wtLHgwb#VY=jsgi zFJ=cnJTM&ob?72}#XaKe%Iz>UhJdYJgqd0yKeUn3js62biTU*Yc?bw%&JAM=A!GHz zU$>QwyaXVa>G6Af$@Kz76le##=@I~&A3er^EH8b$Nf@7O%kqV+7cf5MIG{98U0 z)mPuAD_AUO+cu!k^1fg(vx^`TW};=nqmEDewCZ_C*eC25`sA~Y!J-{@KPw$$H^;$i z^gu&SGj}z2ss?%E4kLOA%pmQ(MN3&m7iWXWv6G@j69B-|&&f!Aj1LIKXRr8-KnUCdR#}D@BD08Z}ZdwjMQ{5Rfobec-^Dfq`LAF0<2a z=K<$u)#zg*!m)VA8&xq?sv@O=5lPj;vP{fODmv{-&M=l~v)k-;+ue3|e{;9F+fHMt zwfbNN03aqp9yjAO6ga_gx8057{q3gK@%H{k)tE@3dZx*f9ZU>9w~v4r`(=Oe;Oy~} zM`tG&Cr8J9zew11x#yg``A=2r*3%!=M3J!TJ4EV-VYOTg{gC_4-#_K-#yzLhs9YmL zH6=4dZUx;)h?L08W@PP;Er`mf@Qt6iQwGxDNYw=nkeO%S>}Mj@eO|B@##%ccZX27* zrUxSJ^BeCHKy9XV=dCj*R`&5kD}wVVBc?XA4YViJovet{8GR0hs^<0<`8OZF|M22?u~>fo?6bGue4DBN;KTQ!5;3Y85w6bqPk;5C#?RI-oB~cos5Kv02ViePsjJOjiU?4ha zk;K_SmGe1>rY3|O@h^H$5CQ;uGqra>yWKM+W-((%H32XtHibl15ljFL30bRwR&Q3| z#5!{b=s1ByOwo2lH4ziCnjaxkv?7^el8ONs5QrI&0s0RVYf_1+&g6YG#g!J=NxX)cpm zixw?0@!4^u3sO6WV`hlUp*uS{9+o|4cG%QFMkChTT8hM5n@AOvT1!z8t=`sDwW>-L z5fcsa*eeZHsRe0h(84)Ul{!tk_1&v4o`3S&_b)&BxYODX*+9iiM2o5+nvtqWggib9 z85Nt1t+`4wMYsa)=FRv395hI2t+UcRwdO(!b3uJrgneN?C`6q=VwgUl;;*py%mtK)ukzp~JKUp14O<^xe0T%DU3X3XDQ6SgqO%x0Vp z!M)AhArK(XNqs!n*=>Lf0CGfiGcu2gakG8NCPF*@@lw^6fQG|JCEj-C^1Pm|Jrs-V zRU;!{epqcGXU}>_CtCZIgQpnJ;JJaH_trq0Ey9QbX3`DeQEoG`INR-&S7lyv?Idy=P5+R|6 zCI)(3;ksamKpyN#FelK0NKbvbwo$MHEx>D7-GdQd;sDgdQO4oot`TnR*Qsq%YVJp#}sDYrqi<6uq9oIamM%W#c*8 z_Oi&fQ^7$lxkC_iWeG4+F(q$Fq{J+ox~{J}!4Aw!#T11I6Jr$+5mW?3k%|bb)#|hN zKUgjYL=myCf8#qp|HaQ?oWAkyH$M676H)#8x4yle)|b~WI--jbLVp$;CRsk{?F{5Sf4z z`FhWMrU4^)w$Moj69pHXLqv)Yj))Klw5kCii>jFsQ-TZ?i$+#gDGX%MN~)$L(Q*fX zs0LG20L%$MfX#fcj|wQ77#M<7P@-B%F^N(|N`^vAhD_P=@Vo!)fB4=X{f~>23q`6` zYn8g&?&>7tXq$U5H5Ekk(8x^P($0mR&1xb5A`y9Gj_1{(9f8)En~5ilKSoPdCe zh(};v$L4k&eAY$mb4JYbtk6C4F}I4cvYDBxii(O90xUX;nw46|-L%_HyWOSii#8*yFdql7(?t9VP@}&BO3QQ4+$XjkSH!lX z4CdSgwcLgiB3`P8jmN-9qJ=~B%cADbF(4hHf`dY6G@c6}Qv;IGKRa_GvvOWz3!9Lh z*Nep?3ybKU7u>F@!#J5|p-P)-uR4iU)ln4H5HKP1346k2q6O2CXxZ_QVUc-A&@*<7 zKFG@-=geF}12X~0EJ&!xV#Ph|>9<&dd7sQ1uw8$$LqZB+Ry}Q~a3*T^hY(?g>G3Z_ zKq60iG^m7*^}ICP+B|qrnQKd0Dt4?JlF9BjYg6h{4^Y^MnTu3L&N(d>L+-k+ z>!j+qn@p|m`z|jsd5bD_T@q8QI8M`Uw=H9-wIUM$_lse@t zBtkHXARwYJA~r8%A(fPg3T5M*;eK5ZNlG;nGxVu$oDxP^e{pBxy`K_;g4G}*4_`}nq^NWi|&%b!KUhkM$0d%aZ)75sjSswM5OH8> z1XX1RiT=z9)qJ!O0RR(A)OG+8p?GG~m{C-bkU0UMn3Y$+~Juo#xBZguwN*M8^s|LFfZJbtQB(Ij<{ zK!^G>Z9Hli+eV}nY@?JskB&d(z>hh$(a$J$=$HC6suE>!~t6h&OrLN$ny zKW1n?Vz2QHaUmG`sm)VTtRj-C2ATx(rhIM51oOJu+0U6{9X!xHg=Y95(onO@H}Io^ z^FwOdt}`NP+*^2hoQ-PxTQlYRm>OH|0Gr|ST!$OyX3hPbV<;9Nk^q`h$$P9u=;BcU z>@fPK9aaKDD+X*I6wbgv%@TX3mR2!Qo5UtXKsB*sh6ov{BU~g}FfI};GWU#&j)sJN zf+15v-xl*IT2IHVtaE{ za=#H;n2m1+Kw>cP@z@B4NS=Y8Hwd5`GH4FT--mEEVcOE-I1kTuco<*;$3tf7(s(x*z)PKVQlAN4oLjF@lnyW(PA+38f=L201H)F1XUGj z_&zYFXLVbI03Mezx#Iv9Rup7TsRKo|(Zyzo06^@Y|nFMV~%-aCUM0-Y@=MCs{29nW`2$JwM&ttrv^cX1g`9 zNRom9{Qt=Mvt~<{u!NTLDKK&snl2}WJb8V9^5^06}s*{ReM+~S7wBV`{$oe>T~j9=~YAf zWwDAII{*ar*C7)U7C?NxyuN??Z7Fr_jfpI}Iz*--5j&tEAkY0JWQeh9X++SO2KJ36 zVhhs6Con>UdkE0j2vx+Le6LM5RbqDK+M0ktS1>~DB5G#f;TQ%*0B^-?h}{f`6-a{H zEofj+0ihv$`^_)@<-hzlzxlI&_VD)o{_eiiGVNwEk`8}-`pBn`wJ@Wnh9Xgns6a3R zV@F-yJjjhAWXD+d1=H`f%ry+Ng8noC<}iYNnKh#7%@?)z-qtK37?c_~Dj`PZdiU@! z?e+jb#L+VZKt%x~l~P0itamZfwyx{4oL*l}&tIOOpTB(m{Pg+rmrtKwzI=Iod0Efr zwk&;FRoZrQmZvSHe6x(%w;N&16&R<_eVWbmVA!s4q^r+yR^N6crJ`ZN?B<2T<`pV6c_#SDFfP@jiLDH?t64MlA zgf|tAzsX(9oH#uGi>=LS#5BYsodL3(@pb_$e>WBgw&lLT7&k5|Knf=$(HF`*d(@%( zVi@kk*c?DG)R>s!l`7ur;-(*RV!@2hG9nRhn)v{T0GNi{^I*&qZq3Y;0VZYwn;2)p zU7SYn~H0Tc%Cigipv zA=s@l7Cf%yx-9n}@~K>AFdTw0tdrw)+G`&ThMx<3uRBi#&)WGcPb8 z8_}T%{{v+DzDHcc;W12JWVCKblZqQY@~hh|+zHuBLeI9`Cb#l9@IjDVSsgPNmnb;B z!O4R)F~tbFnHdrVKW8GIL}04X#MnFy0-{5#ZaGhz#)xO2$PqNKus3LG-h@;X48qH3 z=85du1-e=Go^>DyW-49UvaVq3x~lfe`PxO+WnGsZPe53RcE@?QpZAC3!{dD|%t~Ui zJI=fPaa}I2XM1_Qm{>Pi*49M0@}6dQ^{$KT=iS|Le|I=MJl=ElnQ{=_;{m%g>KyHw zJnjOj-H?=ti0U-$r)lEC#AE;_qR}7ArA$o#0g(sTGoRuJ@^Qd6}jQB~&L&%#AM?GW>0<@*38)Y;%4D$AEyb4pJ zxFi~Q{-1#vc|jNg0z}&d1XW;SGXtmB5K(~;MB>mO8ZuKfy^6{KVl2oC1$TNCX;zsl zAas$wYHz)Z&ikoy0Z=9e>uc)|hlh_p{wym1!n?;eMDoA zxofShW^z2-EnhwxdbgBRc8G=81e%KPw$m)gL_TbueS~enEUQ=T4srlN-E?V8m6t4> zk5j2&loADiM5Ul9tkMWcK)7&Ux~d_V^vKXgX6X_7`P`3ZM#It@qZK)|czNo@F^{Urn^Bp!*D#v1RWk6vhySFm0A}P#nW$ zL5wJ>_fcfZb`#kwdKm73X4?q6=@#mj@`svfB&#$k}w>4BV_3{h#Qy)(|lF)TaYEB={lRiPV z1Xdu=#cOy6IBDTq1yRlx_%;{A0=6B-+LVl-XWwSYUueas&R1%)C5> zTaKP*F8MfWObnMR9g$Y>q&fgE!|`ZNaSvXNobxmFtyhJvvWkkSnKI%;ScxWrUBQWA zUnst)I5E!E$Ba1e!!7R&G>OkJw&vrihW^i&!q`opu3>?roWKq?F3e6~<1&tQq#8Mz zz~SmAa|SQ~lW#x-ZpJzT8YmKkj~ns5M0gPQ0bWRrNBU(G6}=ylD$cQB?Efv z=L~Mb(tyX)+ziN!m)&?^BQj+}{xHJQ@t@Pwn3)Fzl*n;FT});tcfcviykATHGY)i! zbvP_A48N~uI&>7C7GSo@%L9)Ac@dDvX?Y~~G0bU#rm~7ORcG%EFsr^)P1o+JX3(U2 zx?XGR^}33v8HgD%*QrkPyx$#;ha*z~rM60)>;3%$5xu@VKYxC`JfE-2vbIiG4!f!K zzP2vXnRq|VyLm>Ub;myT3o)9e2C^ zw40a<5{iszHr`~^&mj^rGbMAC=q+1*7}#w?({Vr$G5ee+karxIo>z=!9BKhlLuVVF z^bf-A$@Ab&+K?G=kU6ZS0bqLmk>|L1SDSDStsilEgi#CrRV*W48TfU189@2*e0RK4BBVj3_#A*20LjbhQpdpa$B)%4^qUxqvt4lT})0!o&UH z`SN6lWccpg`+xY|-)pnK`WJuw55NBhP}=XNXe^2tbJuRC?MSOuS9RW`@9s@_K$c>>i()t|_OKfV5lXJjNI^!Pqk zDrKg-neeSwPr3MclQa`Ghw?B4HYy{*!LNa;q}vRtqFtml?*@lv*1DGqSn5?kB%kFP ztB&5vhbTso1&@v36)L-_uB(XX@pv@PDg`IdahRAWjOg0>+O_xddU}2N;fFu|?ce_0 z-~H|1|Mc_EFRw4lvb5G6G|>X?0mALF!PfW|3==s%ZZHFz-T5j$=m=`C@402(sT;O|@BWx`sqwj;% z5&;8}8G9Vo{6u+wE+vFYR1hWvuPxmbnizJ4W`>Dzs#pn%$8AIYEDcT;58RMF9ze*X z5!}{vxcle<*IRN&O~ZvS9};5*BZQg)?K|LT#Md~Jw#P?gV8~ICCG7-$%qT#QhUIgo zL2t-fL@yys>^~?$_W>Z#ybnTm<2Ynx1)!6R8%_$tig^+e>xWq$beALIuj&BP zR)v3-lx9q5bw@!*=lz6jv95kvqWUHB z;yRT&PmgaN>QwqFZS9xKsYBz5tCeQ`vMjAPLYk)OxIf(A-#t9uzkB=syI*|!{deF0 z@|WKp4!fz8-F|X!RlByno=)fU>D*dVvE4lH?{>$#!{P2YO%oBR%5XxRcp>+15!f@k zJd2G047qR$um6L{6xCZAW!uAw`*&tBF2<5VAd3#=4w2pANevXb$afEPI7@LBW6#{h z^<(0jjKSWd7}(HMhy$lts(!Sa?cLn`q5E$=mQY1NN{Ytbr4IlJM11U*Ri4zWa-g zy{+Jkb5{fpFkvY)D$?WJ5~^1a03r|(i*x`WF6Z-USMTok52xh?M<1AQf&h`Ck0+wR zwhm4{`7g+Gi99zhJU2v-xdQ-#ilT#J7a$)SCx(bjq#)1(F4kHWSqszJB{mb0fxx`4 z2{e&<-2duNe*16!?SK0hfA!D5`^EQf-@Tpcyx&boxc2q+`RV7MpFo8Xy~KcseGG{| zT>Om;K9H9PImcrTmI44w2S)I>l^X`d>9-iX7RblIvTLr0_O8+*b52Cn>+l@ii5WU8 z?#LDqTN7Nd)Vf^GZCSiqw8xaFFcM1dfT(H&u(r0W*XPgApPxSe@ee=#=fC;e|MWM1 z`@7%&@qD>-X?`a@|1JWH4usXrOp=GQI9;CF2wrI&h%THpx2X=sGvzeRUcv~AQ3n7K zCjnzfgcoPiqaD$B0TP7aS1Ib2mjv|jr)ilS_QLS$M{#0sB{sU=0wDpLvtKbYCKxrM zpwJ-tZKc%<8ZWG~nR(ukZPDA1Z5xpRuk=CypLT@+(aqQ(+h-V10wz!kjw{j`A!}D{ zAUxPw+=imeFXH#3@WP3Q(8`{TXOK^muT8U@tmCE)Fr@KC6GFPgDf`iiRF!^ zZP*?uY)-iQVQgZ#T+R`_5tZg*=OhN(>>wrqJ5b@)9uCIsMD_t~T$Dket`y<1-XoNm z=-VYdvF1h$0wny3b7Pt2mZfNx&pSB6o65Z;E^N>*8;@xJD-O@)8q;e?W)eeQhv$G{ z=rigca+#!P>Pbz9$UjuGEN3i2+)*-;821U7e2=rqnl#ezioguA?MslLW(kzl#8lL} zm?A28>Lpl|$BVAr3(BalCO@a5o;G6&gjh=@qB70<<8hvM?wJttyxWyJf!X=>^z+A0 zUtUgtB+{$)^Gs9dIL+U-`xrKf!4CoXsp&1jSnxGMya%GZIu#{4$*3x8! z#fx1p*A++#7C<~6kMnNo(B`>3+&}#O$3FnrKm8|v`FH>DpP!zd{`{Z(`P1j?a=!lb zho2mCugfycrMK&HqUpF_msPr;*>bte``Rw+a1H(Bs~RH$nyPw6f**CH2o;u6h^RG1 zGXfH|=hM^u@y*gMW(G*n*cm;@LPXSB13>aXhquSG48fS8_mT%NQ$jNG8XC{0^^9!- zv)%;>h>?+c7l^_U^fDS1Lx##lR8T;ib^{LfPXL(e;qa%w`Sbt&|NcMxZ~yoI-QlnY zuqI8Un`&1f!-@IB+jr8M^i`y3@7@Okr2EXgTXMp-8XXAA5D^KZoY94xQlj8 zGCn4c*!!z65fc}m$3pWwLad?JvayIt#7zAMB3(rRtam|#$21=BQjj>Ez+7S&teVsW;T^hm>U zW8nt?0Lk<%$ku&B_X47T1YQI2)$BL`f`t!YfG9A||Hf-Li?H!c!w4qGa|S|_^tWtI zX`#FzI!gze7COc^3mRiT@xe)-cdE zH06d+98fBzv%UQsQ;Rwv008ot6F>+AR=^5aMJQm*P!T7x9n+3sVw?*VvZ>Hiun^Qj zl_J3|ZYFq~bQK1dHrTXn`SBmE+b{y#GdL#jBKu2Ze1{ps_iEuDx;;abWx&vlwWp{=cz z<$Sud#qSTfR%GwogrZ%#^d=^TM7uig4!h(1!+v+%A9i)FV1OQ4(wI^Orlpd_a8=iK z3c;0OkY#4Ecp0E{o8=fLW*CI$YD9=k8!``Ba$Q^!uEKL**2uX#_P?} zV&00o(6iI{*H3bm!5U(OHvW0(H% zHX)Qf4qOZTb^Gpr_0;7Z(dK=%kz2WG93?>>je;N z<*ov%B35`(FmDY*gw`zDlx2ehj~gQbVL@a|JRu5TwDzT2JMQl;%L%;oE~94-Ic_P1 zJ)x0^hWiI@nsvBY#<^Xta*0S)i3$<)t{}#MifHEJXQL_+4GD}2Nf#onWdiG!)eKB^ zo~E}S-rqmG{r*?K`B(qVzxlWS?f>ra?YrIm?z&z~bgsn=8Hf;=z48Lkwv5RP=!i;u z*tJ01>;ZKs8lyhY6jMa1=pA#NcE2YN=cuJ#(Rg zb9CK7k8dfMY457)b#ir@T!wv)nRg6XdAVG#m-G44r%xY${^`H`=YROy|NQs=<#+$( z_4U;=ru}l^O9hMwhbfe`$CE+_B#Kq=reTTNhR#nEfM1;~xGB1{o1~pBDgwbap{K6I zsdk|WUc|wGsnXVd%=JK+Sj-5K12i*@L@V#LW?%s9oy{%3aD+FENXTf?dpeq;jdUKD zz_^`+bW92!elV~S91(DmMGh-AhGR4F1RKs{$*@&SC3Swz^HKdO8`ppXQ+voZX$=~9 z#WiTSAPMp;RxR|YeU00E*)`Q`)Xyej2xuCP{jkS2kRrl{AKmt5Jk6mpf(A220ECKO zt3*%;X2N~J9l^f9u3#ms1*d|Aa4J*@3zPT99w}^?)T50L-r&lh#xPcD+rQ+eFx&1J z07f1cZ0HYTYIYjCf~mL$Iu6X5u*4EOjF*ir)tVa6hToHa2VlfpJ{&+9AZgfv7l+A8 zi-)70-|0jf^m1OpwpbJ9cBZu($INV$AK?J=33e^Xv-~qIj^p$v6Sgg1RCGgf#W%Qj z?8kt}mJ)cwv2XTz{AdG$1}U|fZWvxi`rW1)lY7vI{)s{BQEAo1x&Z))JE{smToOc@ zS)D2oudP!l(pN;Pbpp~tRO_ykN{CaPc87U4S0rB7b-7+&&o9p}r^3vL*UJ@6r`?21 z%!Lp|w4ct6%6_*$%)7(9Kkkpm!}0O{VZYmPK>%#MnGeh+<3h;uG);Gp568oOtu^k^ z8;hC~^=bm%OvGydU9deh$_))FIDO#DyRo4J7Q?Wein>t*n8jHJ0GW+Goa8O%bMqY| z$^R>)ZZ~FQxGS3sr544Tx1>j6c`jU=4ShD$jf=n_lk+S=VnMipkp#pX9+^)oBGV?l zt*!8chzwpM3Csn|6jeOiD^S{WIP9*G@$T^S{H3iOiB#o! zTIT(~{wRWoDp}Wka%drXmQY z3;;OUyqo{|U;L{-{qw*0r+@xm{pO$i+2h-{cXtn$YkR$1|K=b5lSxCQxnQZ45ZGIj z6h_3tgb+p9M8x^0P>GX(Lsh!dajW4%xgdbY$5gztUgrI0&IJxR1q4$71r;Bk1yPF{ zbc~9LinQx;5s5t7q4D#efkkgT2<;m14~d8XK}DtaF1@eqdcD3pefjbCfB3^6{`kk= z|L%w1{qFqyI@*K9GXxS1_sGtK80_2Vc0RJip~kTeh-G6BN?Oh;Rdv|~9?JK=!|laV(n!hUm%`(8kC|1eoPS)95&429?7rHDrLSvd>hXbO->F!-Lo_ z2N;$>#w-E1M|EvLs6>Ic`ZINGgvHDvhtVwK={KNlL|YSR+K_-ybT_Cm@&v*Gz$~{JWz_}7J_aG12Y-}BJ(iXkfz-<&ogrogHoottGmNI?dHC& zul@Y`a#FKGjC1|DwdLA;G=eLYwRIIlgt{-g<9zpU|K|PU-J8SVZeJz_G!ZkfwY9#i zYg@godYPuf-F>Zl=2E5#hAM_^*nx^EJz^=&61P~&J z-Qk@p0EA6Shf9GloWsonNcn_Bdj>a14k3z&hqDr5yXlb3lGGUaH=-*z%!iAeW`388 z&}O{=V%k{hogr@{OTrTv5LJPZg}9d5z(9IyD^uMc4kE%_Fw{IEk`IMI6zKx0qbVby z836zp0vSu!$}F|a^R7#)Q=R4_;uB`2H$9y$^-rcBe)!@3c)XmKm(%I{@4lO+`QQJC z|F5pS&A42b_rLz|AOGfW9`-xh&+F1rp%g*_>0Q_U^y!O75E2 zfPjJq1L{m@I88pU7AYD=62)0D?CZMLWv+9Rl_IwhF)E6fD4M^5y6cKJ_`}4)KAIU( z;#wpFU?No)EdvA*C1NvDL!e5CA_hzZrLO`!sqj3{|Mp-1H~-!L@!$Q^zx>PN;{y^1 zcz-s%+f5H|j?&+>wzS^bvWPSl1#PC*Wi_xhIHw$_Zob_k`;A8FC|c-^Hx7Tk1k-?~ zL+Sp3X6~JDZrHW7O*EX+1j??c2q~H#0Qv!mlI@sfpwaHqbD|xcq8Gt=m^RYJJ(3GU znL0N11w^B0?Kb#GaN~*K7mHB(DRACfWaEn1^yBX~A5Y zjZB@iYMULg6iSx}3|B1A4{jIw>ct!K2ZQXaX@hgv;2dBwn(64cocrShS>ksRnU{ls zOk4*x_ELw#WMGMavl)5bPvX09_Fw~eeniaq;-IXPp!NrJ-w1K@hXPV!ig4g#OEPGC z%-qc_D26by4e~VsHq3M*Sye#HM70Y#A=kThvnJvbQN2Znpt}g@t)*uoQv>JBT30o$ zb>dR%?1-}fBbVK7$2c$7*4IVR+N#s8%)9yJbAzDi^UNNP%@7N0<|EK6%EahYu$c78nh{yafY!C`>K>qo89Iq;Iq{E($wZwiG4#kfgwZxN5v8GW zW1+zSRTU7qa$kjznxZMGVKXq$I?vPLus_}%kB@KPz5DR}clUR1KD__t*T4PEyKlc~ zWIulXSsl$NAeyI{6)L6he%e3m@2Nzwre*zrsoaE3)l^i17&9lMoQ((ZfmdcxGUq5P z{DP21h&iKPH_A-t8KoSMjxq_(Yes2MXKcVwi>XG5j4A^tDNr@+s7;ZT*0wBt%#-iE ztzEVu(XZ_9(FP zR}w@c1dKSgLFU_rUJR85Nz`e!n8`bZ8>1e^(;t5;v%uWLgzC9P4%&jBOBA7@n-hL* zkR+#H#O4ll;CRshHM*;S0ebgr%g6PLX!_)cPyl>1*2K7D+!x#x+*g_l&P)?pVXTbI z!0cUu2fP`N3J@`!lv`&Y6WpYfIdY9`In18q8;;7o4naf8t|NhW(w{Jh=@RK}jHPin zJ*cC&WX2q0$Q4Ao01UuqStb^QdYn85T&@sD#w|pvks+G?+Wf>+&**HNJ3Ucam;h?y z)rRQ|gYNJ4t7>>H@q}SaEqeYWUlNRSVESG| z{4hjTG4;i*mjif1z(gOG5zt1HAj8c84}mo_^NAP0g+R>hA9LYC00JhIQ62zw{6)m2 zPSZRQu>q}XN2J5u{;GN_quk$=3(dBy8F0Gk}QNd}Nrrlf$ zskH0$vYZdPy7@6C5|Q<~x@D1Wy-A&_cI>3PeJQ2R^A3v-bwXqxe2Yi`0zUV~chE9> z8xbOS8>u&T<3ON+c{UXhQUYnXLCj{^w%2jHI{`!31A~QD_s!47AfX)03*@4xclXQhfBFCV|NOuEvetRJKDGTk_b%(Yw6>t_naEUB1PoN|d^yi|cc>yNSSo-L zllio8H3X0@gs5H7gQ$uO-(EZbT8iM5h#H=28oDVPY;7X~HzxL&%5T%OIzE@|vSK zRaNPZ|1v3UNVFR}HB;*-aN~f4<#Y9y?X1!5OHiK7WI2aIMb~v*+Vy(9UaptR^?Es9 zE|+Clm)`rj_TIbqhil79XuYjqqFvYPrLD`lT+gTT>E-nF>GSLPB!+vCK4YzGx zEJQw#V^Oy3g#!=sP|}v;(EagCJ0=iCqe^&U?anPSO9c`r2OM>7%FWx zCg2)*bxxpU&8Y71bC8plA}6;IFcVH_ zv-52Q_s+dlniwa-2%ZuV0h9OQjOj|@-rwYOZO>$ItJMc^@n<3&s4mgH?`%#Qj%o5y z1OeGSEd|iEl>t*F&T>FBQPmsOh(iFh;(8GTG~9P zfteFHLxQ#&n{COTh3!o*An!1c{B}+(lNj^1qR4YNBikO>@M|`N9B1C;Dx6)*Dme%; z+@A+?A%Z5{+PLOpA!gb}+HJhSxE9$D7=85wh!FtDjF}J+)u;ffp&&D0S5PfP>Zjj~ ziG4u3ig#p{E(!p(6si-K0sx@sreilBK<#`!E!RF(KI|tzJfF{|_V9RDD}8x>xhz-d zYNn-fohve_NL$-nce`o-;oZBp?;elG<8HrG)83_&BBt8Z6z1JLO>>#%`-g|!ey-D8 zCk7x#VGhuV5ljUX0JtEa&+<|c1+!Ws<_Bg@lRIDF#>;{sBT*Y8V!i6l@z4liMC((F z&TRx~86b2Bcwe<3f5SwAb_6ua5Vr%1K^2D_ZthqLONyZZ0-1GJ(Vlw`F=YZE>&gs9 zXwQa>hbZS63DoZaJ|;jg1ui^IrFCl3*L6W4oB@eN(7=2|__{9Ada4Ek6d*?8kU5GN z01`2CotZA=YRQu@x#GIE-bLmP08dX((&WSYZ%VB{{`6xtRM2HGKuc0)=6o&{ZEYtEz7_F()015yz&m zi5h;sFtEb{wP=c@W^CTl%#|d9ArKpassXVvEKUBafBx71Jk%T4|bd*}O7IL~xOq^zGYuEI{Es8~*6ibrBYcD)h%08LPSu`Nx zf-$I`(t8gD8K0wK*`d@C+Yqlf3EK-rS{HQ|EvCw^Ym?sB*1Gg{ZLRfXSzBx8^W}88 zT&~yab!lt6uFLsyS*}a#O+L z#7&s@WTDOCNS+*5K53~v&D4(#gsF9%w}m(2)P?Tz=A5xOo3abQ27_P0u&BPiBRY&> z>%wAsqsC3|-J$~?z!+{;DQP$W15hI`@UifgP0U0=0o0%v2B+?+)dmHjAkGXk;atcu z+{`o;mKeBcdz;4EJA!bxPg0Mbm`WEDEPlyJVd|3EH={mr8{Y{uM z`0@4=;&ucFRvtI;kDoqpoht=2Gh#$$R3PG}sA}_s(4h!c1Xd{><_UVUm~;>1 z4mA-Pm`GpNCMt$hOCe?hXiM*^T&Mfv@o?Dn-mllj%=eFXMELr0=_1Fwy_%>2pDv&v zpi0IBYG#Xw;r{O7yKlbz)8GE;mw)n$ckkaFk9#B%(P^4YWxX_0+aG4G(>%@l<8i+~ zlv>MF9Hi$YQvd{?ULSEs?}{DiT1tdY{$lr5aGn?yYwpwwBAS;;5@OEX4Xj3ln;&Fm z(ajwZl^_S?q&t)+F8p=JYX;s>kt`$xXeiKByb4>(Dc2F6GpC3MqUuJ(JWLjt9TAC8 zEgfjYNDYoYh#OAKUxWATH3BmRLe{RO6y}0}ZCz1}ypJlYM{}e}Z)@*Ox=8C@D8{8= zDJbG?_RI_oxF9I49f%RZv-3x7{@JIekFU?CcX#*8 zx)9?$&)3VPwcff^wRtzMpO)U6gHmGTT8zAil-5hD%ncf#F_R%FijxHH&o~d~s>uf< z0+MelA`+5CcQzy@pDw8oncZQ4v)DyF@`xznTrUDR!GT^Uq+*0@f=Z-5bPJi7N}Y(O z`FJSxSnBTo@PGR6|KczI8Yk=`-7^aT5v&vjB&s8dl1Z|0)Hk5ya%~pT(wi?tojfx& z6H}3-&%*2)c$-$U;Km3E(CbDixc2c+VV|quD+M>!BKmlRs`rW@EYT{9ynMkYXQ?*{ zk-jX;vRq$ZU%xy*uj}c{m*>x)K7Vnm+R77w-~*u-pc}U%Vg}m znF_?DRy05qFc2gn!7xMW4S+&jk9>Tw^nZi=2nq!;Nk$+*4VUi5b8_n1BD>L_7;GfA z4A=f9EV|KMfM7=C4M7oseMA9oP}00wNL4w#JKv~G#=cP_Zq^*@5RKc88c*~V3L9|o zKRN(_zaK^==7=q*;Se-W!p3o-;bb~}6;TLuzQ=NwV6ehwmLShCt`h^POf}1AAcj?> zOB{)p!)*It2A^<+o~0ML?F5b=(p}w9AO!ar26_NKBTugn`wq0(fR%~Sg4@jn9U^7{QiDJ>NHI3W z2=T--rtpTmIy@ubfGAn;o5W-6nn#K#ST6&}IZw{Or3j694kSH0T#^CsQmWj#0$^0! z2iG^~WlztEqk99*oM0L!D~`@cPv2haUSh&W0~pN&ZczN~g||P!kVC4waTG@yL_|z+ zb0OloBU<5$60JXJb7%uQV=RZS3vER{!Q6~b97(8tmxC;{ScvV`EnhtV^er3bwR28K z09*8qMbE@iE(Fa7mWR3t&64I~Z4J8tVOJyKc`C$Y2FtRxrE#71yWQPkZ=%=hGSAc9 zVK1WVr7_~}FpF7R<+?1F%lUdex3)Sy>DN|D-R<`G5BKlCdHepm4{zQ)&hrd_rpj0i ztV^5biHH<|u#|a<5sl=7<*7`B0HWS0Ly5@HK-3IW)KsNsiZo#*Mns0 zE{?7km?6gyCIthv!sJH8x7^!FxU)_O9`sJ|y=Bq(KVfmjSJk~B6i_#q;p3#2;D;$Q zWPF1|85)=u5_l9#)X)sGDT=qyjO>$<-UoT$ozLDH!XtWk9T5$fzzT!dG)-KJ_IA21 zNHk4Vz|;f*JaJK_x7I{@vj0-a0J)2>5)w(QUd%91)h-=_urcZ4rkM-9zP$7%cgKgf zZ{Pnf|KZ<*fT~{37eFN7bzPVcOkch{AMftl+NB8s1JKeJL@dmUEDYRQBLGGg6Xe3c zrUHg)pq>}$tz%T3KCmnxcFhPL>tQAWA`%aHsZ}OqP&GBJ#3IIwfs74A6p#Uwh`J~M zih=?PAeE_1^X_omA0CeP_s9FkX?M80f4IAUeEa_0et%!5J=F4_{{C;uR1wFN4?;99 z%tTzGe3Uc4EV`*dHsDPP5o2#L98I$jjtq@L%d#wMZ+==80Uc{vvN&$=GOyj85z>)5tO_1iLPRhM=?0HJxhbImWc@m^`))4@ z!va#8t~inz+prkSF|ZKc7`+>Z9HIv=ZhROTc(acQ5X)<3SDRzRL#hsgZLrBcGdhgCVm7i48NJ817A#mFo?kb5iQ8T z2_O)au+gRf>^?gycU4ubh-TP9!ea(Nu!307CdNuQv;UuJfkIFj3PNRM^wC?E=OpRe z0jFdC6BCWiwp(kQj*Q)IEW}8(%_~XlV?&JN=TUaJ#j63bk?#nL1_Doxb70N9qx=}S zY31j0NSxTPxQL6+2ofhKQll}0a$EMmTLU>Eq9(e}r_UxdHxMrYuMc~OE;|6jaBj@j z&|)rXj9c#JCdZcVOd@$CqvunN1)-bHGut7|6o}0wfXIhK+?WG&m=L>m z6Jul`F*DTYJc)sT&8am)M89}9uhw*K&A_&vp6SrU(MUffwPO?}Z<8<}&YF5A0vZUo z?H*8Tw;EEsj)0HQMQ=a<)4 zGkgE$Js5oa{BbwWtu<34Vy@+UzO<$Hu9tQB@ZnoS=u4Z9$7$x5lYkl{YgZ!!v}IkV zQW4SAm{`dpp$LS`q-un091}ClsW?PsA|YV!Zh*BG^8j}?q4afQ=4i>Qes;io@R*t@ z)CtwFiz3r}I6S=l=DT11`onkMzj^!N;o;rk?tXVXPSafKgv4B#v6NbX$a09gXQHJP zff;D`0?Cn^X7RxYZu=a*IhbSrJ7$nFd1@XKAtN&)mNL}ytwIYu6TLyq)8QiZZS>dm zyh0!69!iGLL%6tM&*8_0sH>`W>(YCZ>$>#bmt|R7dwqF5onD_`UO#>M^zqZ@pMU=N z`RU8k)6>h#dATkk-E_;~_WN-~rv2FpBO;5~H$!kjie7Q)Is4AG<8W#s zH6?131GPCDfbRWxJjKu`$7nem9j$W^LBi&N)3koyV}&uY>ntpSp+dBxG#}v|VLuG5 z3uI2EGPE+CL;NESslK9DqG2tmO_(BI!qks6%zn?)=U13g)q zy;wO*EI4wuVprlP&R|7|%x#iaayz_>xo7|urHEngL!iP17*87nO3(@z9109tG&ngV zP(}CFQ~@wkI)IQ+3<(GbZR7vgFNncmcy~1NIeLa@q)|lTQF%2WudOaQRJ;%tKtiZY z6JsTqm?j2Ca!gj4n4ofaHqrHVV?KR}vcF4!4N_C@J?ZPEiS?W`_al~QB;GUpuS^dZ zr~D@Jx&f8Miv~DGY31*Rzp>F{d5N?^;SAuA&J+;;<(OlbceDZ0ajzIbB^W#+CXf%f zlwq;x;$s6SpU)FQGhp06jiJa8Z?A{o5&p4WQDy{~-cO=8M+pGiTHEFo z45$jWWn94)W!rX6)7jnDB>BkA&$3(`Pu$kL3lG7G1n<*D6ts1d?rmI9M8%+Y6)-k4 z#WGheb+x3yXt*L&ZqP7uqVc4UFT_9*7fD(_4DUXpFVy* zT^3?Sfa|g_(DCkg*zdmi@b3Nl_jiZ;b(aX85>-BuWga5Ypp@nwCmx zdbwPPs7`fV*7XpE>}Oi)W*WWg~*0b|Y;qR&6|>_#RtKr}>V zWFlZfz(U?8U?_@EYs{o#qG&+kl(vE?KtbTC&eOYZzx%6y`LF)+U;NkK|H-fRclULg zkP0Gs9NhnpV{UHT*XG~BU|KBfA|fh8U4du9X<(`#*gR{qyGM|3M~s;w!PT+ny*bc| zuK|%gZ0fB;kcldec1HygYyY z{Q2Y0pMU)M=TA>hAAkP*<;(Nsa#@xpUCab-`KDNw?sp?WUU~RX!`Qp|06+t$S?5QI7K@sEgJ$Ka3Rqp>|# zl`XiIF%AXH0JAG$u*Cs6{YqOHYJ`B&0y|zKFbJ8o&Ct5(VdSv0`QVA~Ab%G7orM$W z)b%GLNUlQ8Mi>SA*WIi-}rW)l5aPt-Y)E4$?tX zn?eT!W(J@_)9`1W{rr(o;adp85x?dRQoe>fZ;9`6k7a$c6}_3n750RjjD4hc;}0WCoU6}Giof)#g|BIL?AJcoj$V5L+h5D`Noz%Jt1QIV`85_qmK zc0%po&G-u#n2Lcpj}yFw7by^9x*`Z>(HSlZ{1TJJ7Na4kDuTCERw@hz+P%!gS=AWB z8sV979>>@cX4k)nq}kO{)v&}2=TW5y;V8JoQt$wAfJ0Cmml6?}P+?M{Qc9UrWYx&47Rl46GQAhDT<5|ekfGbSeH z1T^G%OP(lA$m$La7Xu?CiSQXi=G$}?0PrSTqbgYqjTlR*RCZj-Zbwvz%5J~Ee|&iR z_M2b*=Fk4>U;b~t|MhR@!_NC<*=EZ&y<_oR1gL_$$g~w#3|^F@W{gm#ctKDe7O*NC zhHQ8)g;G*(oB|aYLAo@Hc9ZZDN(E!|Erf62jpBfbooFNCm`0Z)=_p5_YJ_X!<5f(g zcah8Gd_JAK!g5*9ua{4spT0al|MZ8Se*EdjpFV&7`R7khPhT#}DqVW-u{7QvXJmA_ zXO$6;VYV&ZP>6wosJ&a_VFUtDWEwuR_rn_jj5tvcBv_n>yfk<_49na{a}XDSFZZZD z(*!c5dt(QaZ19pyNE*-2v$K6^ZXl^~9aX52s+rmJk!Kh5d@z6L<3zj(LohmMrex6r z5B%8sDDN~S;5kc?7_&{L_oZ;f!qGbd2a@!`!8asJ0x~lLjVaq;J}K3eO{jtazc&uV z+03!VnL`=yWy5)vVAT;8;O&a{(=c1XdZDu@d3Mg7Dh4BFi8KST zK*+6MNb|9x+&C0m1hXRQ9r7HU)eUi%TM0o3<1G>j>9myEyn)>BB&Xv1kCO>0A{e+H zG877stmE(>!UM#?+XYh}BKx(2X!*PR)!-~47ia1;cfb#1G|L{^a|(tWaaAtIz&_lM)#x9{&B@4ew@1)Qb| zXo#gw)8V)WrRV48Ww}%;ZCTH4-QVx~(%N;oTrb3B-tQkD?q6SCuj^&1b(*H*!}0Cg zx8MEp`w#Emy?t}PKg_+WnE+Z}*L7Vz^3t2sTAlUj>$(!VzNNy^>|QkW+L$s<8-E!p z0lO$MA=xToYGk%_MMOmHs?w#e%^N6CVKP!t4{tJ)0cej9Jf-wyUy{Ua5K%R>Fwfxj zHl0_^y+tImuBt|;O_>9C=>qxm zHDAVYe~3h&NL0A)<{g)MU9PRIU8K|sk@=2b$V6C57gs^5YFMHcL#!L1inVq1&SC;) z>IDJjp+CPpX^H?mP1A1v`Qy(f<|T{st_mnu2C}xUU0N;We0{yUzyJ7h1^}+5b#dd{ zyKt$1434D^k$y}5gQEVY(WORe|!_s6@t{qDHGziYk0g2+sn&J$YjE9Hn`(Qz=OVUAz~ z0WAH^#C4nQ`bV1Gb!EIbBfI{sb9NG2HTV-t$w)HIO>(vr36o&yVn*a8V-Ar6p4oX! zNXSG{!J3>AVy*M;0IkdAdJ>dtzkYuH^uxy={_x|EKmPRb%a`Y6SsjA<;;2|w0tpy6 zARH&ahJ}Yg2}CEy;NCPG6Gua+>Br_0(?(=vf{kAW!(AWBQb00n7x0CKHbDUO&Mw*7 zAD}0p00!B=O;QzG?v@1RNQi=%@lFk{0prJR9lS?fLcqX{gK$b~Fc4B!ML`&nyxXyG zmUhh?%xUPe{EdefqKj8t%mBktwsDP6cxHg?@OoHx8(W0wx`f_Ise@#Ef}a~+MEV?o zV8e;IVW%vOoTW3AW_low@ijoqiuL`^y^R_dV5g)(5g;pcVpxlBhUev=iF3J(KnR4u zfR$)MbK;EvDx;58m>3wLuz68xM&Uzxaa)-{+3`?0gD(Lx1c^Fa0=UUZ^LLC8{kUHK zG%<@`a3nZdDwl|e`r-GbFRo5(P-hALVs$NO^Q4VL^{beziS zZs`gJ%7%bMRJ?1f+wjnB5m!&QJCGL=8YnTa6eD#?)y!}U+6>(t>U12;IDn})HyI3E zOw2q@bFI_)d|H;PbWt&4QUgNf3MRnlIIWu@6{ioKRWudrU4oKP6$R-E#Ol=$W~x?p z6$o3GJy$b*dU-CDh)BAc!Md)pde26?oUht-xm*wX!}ETBdOaWZg^^qDb>2}a?)WfM zl#zN#B>-|EDqgtYLnGBNh6*SEU@5$=D^djm#6nyE$%`JSn3`2)Rpr8tHy@Ip+U4J&oaDI~|XH)qj?xdoq zZs?J=8Mu-(YY$b^v76VnF?qQ0@b~u*33#|Xx~dd^x%i)a)i?C%8*M1yQO{gnZjH6E zsLqTb^e6wsJfs{WOpwK!a!l7H%EHv7#xV35flQ)+X>UY8rPa1VB~+LK(y+*tbHIqK zVpiQP3X>rqU5_jR3$37A*=z1^{!Ka>D*h>9J2$t$QPt;1A%J`dHWToepG?GZ zKFyEEe_VLgpOZz0Jh;Unx9A2zB16JoAW>x{Ot?hnzdpkeop~cDHyUQi{>DIpkDq$6 z5Q(n>MZ#Z_YG-%c@RPn~d^P`f`nPdR8m(^CLw`F3lc5wA3eFK;eN)oYl2ud1mqD$3 zfrDz4e`BpAK(7S&P;_`M@O0pE@;G7{G}si3+C$;$Sy+sv#G|n(v&_tL@!(1j^gwbR zDf!7-js**2bWG~E%k_{Orb`(Gx;&+Z7}=N@@;;n#h032XED8mN`&$xCH~LRffx>RI z@t4T>{%Y3oJ(Fap41M71*4%fkoE`q9Z$t@MdwYt%>}*_^U!tuHAb zKOi>{ljl%;ql!5>`m_D@afHOr`?oJ~)=c!OXLPR?j=o1Je?6vS z>GrECfzpl$WZPDf13nc(y1u@=g1(^CCEqKVbjYMO3#eO>yZjGca}5m6x=DTJ+nt^5 zkbj1U5A>qDpTDh*`F3|TXlHe{;-QI=nVT$&5Hm>J*6S3lLTp{9cilxmR<Mj0s2*Jx@8xgx7<7yXwSO`?u~0|V<0-eL*~ zJ&^1W*Rgr5+&9|zKc6+Wu;H+k5p3jIk7B9%ts#S{1OsLKd1!V3M)XQ|6d8!Ol#eVR z=~E_dX=ha>`0oXkd$v2w#PU5?&IdTW>bzy`oXaYuVfbEc+N>GL(a5+DE+`WnaG5qd90WWR6xAQOqBYvd{+KV=S&*2YK`6`x2+ z!KR#!N{urZ)XY-kEMLTuFDFsAV&<({c-r6wJg6@t{STA`42ooJ|?frpBq{(gTF=%xeq)9Y`5a1~fC zApUewNgFDXugZ2@M;P-=FKki$;|IY8o%M^-BD<5rf8yfP((|NHh>T1W-df@G>m8Nz zy#sXNDc?pu3!J7VZywuSV@aO1zIiklGfct9iq9fD2b-oXk?r?%HI~R8^EUlMdYMMt zQY!q7?tTWVdt>}>1u{^UvSf}RO9pz4`^mPrb-#Ssk>pgSHPv!kk0=OcFTx3euC@&( z57+sp<Nnd$~_i<(RJdNv0LQOLzbNS1l28 zxbRr?8=bxnr>-Lyqj95QyLAvg$yc?&X6kZ%a_qn;)_{E0u`PxHmodNhzQUdCu} zfCQ&?V8?or2-@%Ff$;2l+fLo=bsI|6alJ zKrOBk7@l#X5KG!}P)Hm+9$K`**&~E0RDH(5qxhbDZ zwXU}gMy0?*>ko`9@b2`VbW+d7i*qZuXYjWyuiRc8FykJ2qz#FBy4zVgHcucein5b~ z26F%VHn5YucxD=(<8^{uGMiH+NqX+OT)}W!%6sqS$H-KG*S+ELLFP)7bZ04}ycN+D zcPy$hTw4KXB6^M2R#VU2LPeDWnfUDHBUAn4PCPofY|n2u011{^{fxcZf9LZ=d$kX59l z_C^g=F@xHlgsv&@XVhYFcSV%KA_T_Xt}|DA*CxbRW{)-(tKCglFQz!ksOnHLnmQ|2 z@gDqro}paA|BEash%_2x9lcpYq>V*%K=5Ct5d-*qywnpLa>QDsI!}*8Nl&Vdno2BD z9#5`mp(6qN&GqSS=ZA7udxyQi9rDI641{u2rJ--aP0;h@<%35@XqKV{^AU;tMVvNf z3QFG_MpovXoFzk}K&=z3=qLnoJdz(K2$ErlmEmJDSoMVfsVJlRTxJ7|G5xvvS|=rp zopZUo0desX;@@k$>>b)}R-XARqm7Y*)krUe<2R&HD^Xrbz)QyvP!$QuBAWvK8}tWR z^&cN^vA*Dc+b`SGA))Q!n<`hL)BN;5Vj+_~g`2(qfL{C4@ykW=e+mETQ-~1enPW`V zN?jUb&JPXj%OoxCWGu}MNN2vRPO)6MQauNLG9aC!N;DS^*%No+<|FILbB)^kO!jF< zn(7?0dC&T0(DYsoZ{UXioOIx-C|+LuG8w04tTn@0cb+L?e8lvH7gVg2BP}Y z5zu=R3+c&IGuMR=jbJ)gf~pucRSb24NHWGCjN9uxzzSP)`Bh3W;FN>_hbxwws*}a{5)uBKM0-VWmFfIxKN7gz_w0xrwLWKjG_2 z{WNi2;oQ{phqJ~)FYIBBF?msBIF4#_!lQWtwy@M?OF-eRW2u7CqPSrd6|Dxoor8i6 z1#kJEu2G%@vEwX>R{^We2mC^V{JU1fR84;+-Om`zg{>z{`t+vxP%!1o~`+q00Y;chN$ZZVc;`dKqM$m~wI^$8SFFrrKn@4O24Ebn%wM03Z=NY`8 zOOlHTMV<^Y?+Q_ThtrBJn#STYQC%1VInHf zj2nRkC$fs-6CXoyhUYmo$K-YUjxiz=TV67^ZXPPjDzbX#DC?9l61dACln1RMHtkWT zyM%Nk55EmqCS?oX@Lc`LxXzZ+ho&lZa%FDj#$_e*@X(uIdFZ~f#!r4F2}=1dKd@X; z-neErf~$s2!XXvSg>g89@qIPRLY6)}@QH+xKP}52XbyBgFHA1jylp^ePFRo3$V5_W zCu9f7)6r9SA!^Jm9T+MU?HrBr=}v13lwCo4sGpQUJhZlSHLVjKw=$SgE@+bh@2Sq# zyotX)yZH8*s#ae4p7jO(3^vFBx)CO=Qwst!bed%9obsCFb7NPDMECrcb9*Y z1jf@RY=OXcUq^K42Wjz^gU=OgB~Q2~0Uj~-px$kDCOK@52tU0Az$<~KqbN-6^3R(-?MoVa(|vn!dn}Z zhp_abtTB;r<#^^&ALzJu)b(i+Imx;IEr5KF32S1i?;=WVOBC||{;k15jBpI47*Kwm zgTyMnp;wJe5BJW^^Sg2-?_KJ5>5oIaVUP zDLfbBfU`=oQ9!DVx)1jb4~ z(iQGG9Rc5jZVn|GehU4`KQ!n*JDOcc9&}G{>H`zM4O3d5cTi3i(BVO+g0_Nx?z2o5s=Pjfa>S1T!UU z4`_|q$)WmOogQ5sBAROJW;}y?`q{g?YA0Jwa(o0v&0MOuRi}Po*90M>fdLj4~upZ4zot4Zbp<`f%$MZvU(Jdf+>_bOYq zB-0)j8bOd@I9nXib_aWMiR|}R;mLd`iX5X3l8V{>hVDeq`&5ww^e_l%EJ~)CP03tF zmotncjyx(_s~+@iGNhK_Mykyu1{Z+emT9NFH!m%QD}imNp*FxmNGibZ#TT4{&& zK1RgvtCTDUm~dNJe~!5%`+IfAgc+)4tP_I_53?m*16j-3ezJK3%IS3hR3*`}RswX+ z!Q@U$;mv-#LG`t#i5$Q%9}Le~yzPs|=4hL!h7DH7-hH zJ*RPY+9Fo@~VZdOtVYB*+Hi?qP8UXFwPfk{Ttm&qCmzZHi?jm9DF%c zOGKMgo46}tc)U8=`<7zeRsyKQ(ejr#o!>&O6-I&&@Irc%v|_iy$RU6GVFb{XSDOy)am-et^?;l zU2rex+$J#R1E#?Ny=8Omw*&b3P(m2gvs$NXVs({`P|wyd6R}vV-0EhqbaS!2eW)gG zGE@0DHkMo{&T7s|oZX>|kWwDt^}x%=mC%$fypjbjJ~iXRXA9pqe{RRRV`5^)+LK*3 zH^uupL;XDO{&uWUA8sVEKS~%;^8zTRm&`D7KW7yqqnZ|p_gNii)M{I|=x)smC|Ax; z%edVEo;$A2RO?Im094ia!POwK(cS(xN1UQE!`tbx&v9|&7u*vBx3%H@(4-`O2y;*_6gTx2MIJy#E2>A z^V-!e3+JE1UNbHR&8rg3=b;B{?e5+42sniPC!@@HqdHSlfZK@GfD4T@Ogo0i=3lgg z(vAiR-34=ZXR7Tir?-kShxhoGEo!RwIxNa>JJ=Z4F>=79cw8UUg*hk!tetq4P93Dp zDV9a{N#0s7a{DoO$`YsSjo!ewswFj*NWQ!^h9+i2THyZB{ydDl!w$_PDDZ%^Ic&8RatGWEeQRUTkm*R8eGG;v zH?B=%=vN;7&tBA=qZwtNQ+4j$Yig=Cqh_Yc-JtULynFF-foo~dfcVl&RJQy4Re3>F ztsKHy^L=%7Kj6LDjJXdWzoa?6eMDhk9cbb}1Q5qM;nb(YTa+xAuZOi}>nHc;l`dkX zjjQ-BoSolG{ z&y5SCqRZcq>?7Ka#=r4U(t-Pvbc?rjy5XHG&K#So_)e4^}~b9jJjye>fIV;{58=X-O3gP1?q z*xtXt-&k|K9YZ(BeVy)KaQozk&ttw{D_Jy7#HVJ-U zaHkZabUnuVH2CN+F!)!Pesh^h4J-?MF~?rc0J2-|AWR`?0vsFQ2*`08jqtxi7EWj1 zeL5ky_{aFYnU2~uiwFgz#edX$g8-T=ssIiIc((<>_&fON`trJy6FXUqYx`47fwatP zvI`4=wy8-~l)PI@9)UNvR!(UMg~Yk#Q`WM7v~?1e@~>U8QSo}iq!DNV=gByp@x>*B z?86(cHTiGyAaUktK?O1P^Y>n_NeS|+xp-e|rBpZyzK|EJ za#E~SRTM|>{I0|>*twtm)NYyKo8K3HrsEK&W%@2`m9Ae?DO$8#2_=A#=2jry zy|?x>albeJh^lXpb81!5O6uO?GIY>?-6Jx^Y@K1LK{eRr7?vjdu2Ca0%Vj08>0JCb z>lcj8_Frm6Gg5y_>Z&nd#l92fOb{#376VKIEMp9G${#l@glFVea<6T4(xrOVW+DYH zia#S4Wg_l@JkCtba$?FB@J9Fd^z?}K)*^HoY0rSEc0bE-X6B)T+us-Y@~Q6NeZx+& z6>s~=o1l>Q4G0~XgN>eCAM3>>Ta5XAfc@qT%JW?iJ*(KTjx&@;T0&rf>h27e>x z|3M+l6d6np$lRnPfTW9Lx))jf>9)_r+hRuPBw8-Cpb(tU`vHSPQ>s4mWp7U+5Xs`t z7cVas1-{-nlCZ2rJ&i7QDroBF`=7ey@#BDg$Dm>PhkV@!y5t}cq_i+-CZdZjLuA@~ zHb?mp6Z-wPSKlJA3{=7KE_tjnnR_heaq`Bi*Hk>{@uV_YJf5E6%*U78uY`}av6_G| zLs9BJ0UMAp7dA2!;briV#)sb z4(X@_VW&zjbvYrj(z@9}zCsSQUVb_hmX}@pvAZEGdCk7d#lUgA7jz+W8R9r<%q0?UvI8;88;kG-rrAV0>!u( zaNGfmE8<@Hts)f?2jgEC`14z$grMj~=TQ+O&I+YGPg)M*l5&#kW&J58i~-` zru-t7{n)EdCL?j?j3M{aoR~1<&9CU3RdyO&$xVipP8TKuduB{Gdco+-*PQll*V z92Q()X}W~AiU!ViqI7wv&LW*57EB{SAS$*qiAZ?3S_Ty*jaSmbyh(8w+%YYbsDjIs z!lkf2+A+5m%%Md>Xlm|ok;tOxkA?Hejln&-9MYw3F#PekcUvN2TklKOJ3J6P0Jwqr z#PB?M)Q(srFQc>6shb{zRi!D&WfHFcK`vp5l!lYm+Zv9NP}mrKa!7+IJmX>J7bRk6 zd5l*cZ!X+ob$~!B&PB`@=M>8#v4g_~SCB4TwpiUE{f(?UHTZAMsF2?4|CA=2j;oM< zzNCVZt?|$gq&0D-C`BIUE;)p~xC=eumEGWMAwn%Y;sF-G8Pd^=8UDbX+R6oGvMdbO9{%@SHgUygsXpGH|bNS$2P(of1dcX@`RhtkP4ctvGp>3b7^PyUOl_>%oj~XhiZ?+yyMIC7v|6Gn^=7Z zYUv9nhh}Ob1pHqUYn|m}2d2Racnyn8pYc? zrQ~g~g;!RrP;LR4YGlXtXyQlvl_H`tdDU}zp`;y|F0XI)LY5gW#@N>3)G&KJ5DGFE zVANJ|HdzCN9)6*B=LaW`F@byrlLFa;T|Vun7zfYSvNB~TS@y2Z&L)e}LSY23?1|yu zIm#yieQ|@|o>5AwS5G^-3mR-{`ZQWkGY}64kG|Tbkc*eozYF-{&-Z4}Kbl&+@56cC z_v9Cv&BS~QWBW3$&9ZX@IM&ZkzjfdJ3jK#L>-Yb6a^YuoklhJkjGj5GewV7cDAPrr zhTgvINYqYWLx<9Iwwq{=XA^m(%n+o5sI{QXiOv>UJx-azXoqJmt_qN;@H3n*j|SDBT-p)U{*6)7~81m5w_ti+Vl8We5KE9TbENq9A^ND}kkQs||} zv6?|5FDH6f3)tUEKMz*^9X2;k+8^yeQuj!H^+ZNVWvc&<@7zU<7wh|bf68{K@D`S+ zKYVua{0BQVH?ywhnwRf&VG?~-Jv%}0D3`5VN0{tYAecPL|Gyh^1~O3OwrmobA81R8 z){DX#!8H@|4Mf3}7+KH$;kJPOQ<)Rva7C#M6PeZ%&vct76~?H%y%tKB#g?2jGT-pl z2}HY>(X8n$7f&n&t~r0Tq)|JQlHNf!XE3>a#xuUxPij zDNvfK%+H+@)XB3Nc?ihlDhH4{J>oe%3ftX`Exlxv1LEoQ!fl`PYflUj&g&?fErB+|5nr{2eX*ev(C+25YOKZLwsajf%w?l7l!GC}l zqTJ*MQS8~->4Cj(A23b60W@sFXPYNJ%|IB|W8K@2Kb9W6>sBmZ8kzzbvJqG!@-g6; z`a1hw06g*IJjA^&UFnq+*9i8T)z#^TIQfmo;VXRt1`yIoO^LH}%`q_>a~xQ??8~3q zX0;Ly;%v^;}+flh;qYv<}RZ0hTaWj;tWVD_7 zc+#T{YeG_xf5lh(KxIqpg?|{%x^;2@V`22==;2|6qVlTFV8Ge$uLh9xidB$ofYI_& zUH8=&N!hd#O^dVb@F?6?$Ns<|Z7F^Rj&ckJ_|KLvHU(Y=YTTLaXTP4d?+Rab8eX1a z#6`^-o~MO>F`Sp+e+JVMyMl6B!EGm^ID#4DU9DvNvjP*bix?>HBc@-&<{G482*-SQ5D~ z9_r}XrX0@X*W~;}74cmMV?90Ab>8T#hJq^hgC>f{9RPu7^ENRu>IEBV%aLS@>k6<- z%AlhPtgdy_X!1BB0(y-zuI{}8?k}$a@Jk`kX%Qkalyes@HD#AeBkFv;8=`wgvsGZ4n%p}^bKwnOF21pA`wx(y)>F7@wm5Gmp-uU zU{k$)t^Xd|ib8#(q0|BeBzjw8D?cu0O8uRlg|RDd!%WL-e#dq9tFgXQGx}nOo!8R82&L`FHnnHyy~S@_KBaC72=ZvqeB&33+mhY)`~V5j(U|{zRkRoYP4*_eON4|5|@Je_ec6eICtB!Nc_( z=J{Gmq}8B3?{8s~uARJE9@8+d`@~ngsHU5akT=yJ_lkjJhBw{2H)39aBvOL8gFdR4 z{7>l_`d6d)>v_9jfST4hfJJo+RwV7XKI; zJ+~NDWKm%{b;{l~=~@147@Jpd+AZGrJF(dWU+Jf{ENsE`t>^W0#ezCV&dxFz#JC{c z6fXEmechte@O+t3Oxu3~h}G^}5gneC%`Pt)X=@&S=v=)e?6o2pr+TlrnWAikrJIF1 zN_e>ZKoqkM!X%G6tzN#UhHUppa3-A=+YR=LCLC4DdWJ?5B_;w$C~E86>2ImDR7=Cf zEH~>j9nExQ+BPfwKaRs+UT^jGYmabp^3pd6)ENT`5c0%q`^}RIe_cTtWOb;d1d429 zUp-s!5#lC5$3Dc-VW<_Ek%AsX{0BEf_Tby;HPEKMKe{-jPG>;V_bV}RUCnXZ_Yc*t zKM68Cr9Df2>|Q!KJt-v@>j@0~cmDFS?e5#t5NcRy&sI+0o{z1S?|TK{?9YQHin_Z8o9T9XowZaS^ptz-7@~#+az;8AS4)oguGl{Uq1DiW|9r& z(ljRO;tkr$(x|b$_AvVNFf}S=fhK^mfV=T$g)cTL_1F{fDS{j{*VtiB)>&g~hpJTd zmyRc4_EE-i-GA{db0VZ!$HNwhi65k)A)e=WO$tx8PfJ#>VJ zY0!)?L8%I4$u_z7^1H&P4*cdwOHw64*;j|#*YA)i*$yi>X3_*bJLgKj!cBu?JGJr3 z_&m9$QV&^PC)G&{OZ1VYn#=OCs*II6gvlu)DprI*}G~y#+RDD2-G20vq59`-= zarlbzu>?wwL?R;`4%Wr2kU}ly(T#DzA>=J4MrN1@GmK2h?tzpuBw70FWNJ-zmwq8{UskqXMsmlN8?M zWX&1(lQF{;{IPV1H)VM~vWD8u&2m^TdScp9SvDJ8vbWpGGlFDv>>T2aNYQv}8t5UP z*iW{Go_=bht@CwPhYjF%+eT%Kq}eucZoLt8@6e2@)T2@JqI}gVfD)p2tn2f{a1X-v z&amCJ{XW(pY$I&U636zZBa}4sUpQtjadNUZVVD%1C5|Gz2A*JxRf25tt_H*xyCZt$ z$l1yVEctEC2oN)y0^l*;!*+8^tzXpmEcs@E+u#>%p;@6?L*?#;z5^1EhDk;%WVb#5 z{b^`Qo?$*i@LR=_;g?qZWt)>>Z3juNahWMo4fI|mJuW9Sw)p^8C13Hv6o)#TG=_A| z;&36t8@eWC!tA52ET5yg4<9j@8T$H6A@*W_&U~9|ojbXV$z#jzyiEouO+iVzl@$}l zkI_g!2X_fx94@(8W1o0JGHO$ad ztYb@q8@WrXYp@#U5vnJ?SRk6X4x6juS_2t4WTfomV0FF*BhFv0U7-KKVC1ySOzxQH z9&wtxHvXFy5)__mi&Cs*M2Ryr71&8*MB2#_Y+1ybqku$D7b}0}kw?Z3NWJg%e!cjt zo(;mPYNv0nTH<;ORJ;BAXB9^3<4?{`Z^y=Vz1~o5_5|KNZ=V9{7fLSg8o=W>JhBa@ z&m8hDaL7K^hQF;y!6hd5p(J<$!%)boT;7@ejB>W78Hj#v;|9<3tWymo!qo($?yr7J ze$L?WPdU*^8Td7dFD&F>7vQTsJ(U2Rme5FrE6c*++TjKo8C*~WqNB;KJnqWO!{w{{ zr-uMQyVlUaeF!LeMYtzh+Yh3ujTSgjcFaXY?~-DbZuV`Z5<<0GeUBWC08C5|$1)f7 zdt6q*P-YZ-e3HD{)Q0)A!M?a}>!K}{oEs2A23oOBZ(Wd&xqb;6E^sDxO8;)e!7 z{==>aU0$k%$-Gttv7x%3)AKO&v+IMlpLDC~Z{FMf-Vk&djk9v_bSsJ~?dB-@a5s-k z7o7eVof5NxwVLdJcgL0_!=)!`h|lHVd#LqpjYBDc>$iL!bt#6S6!UV(z;9PlsZ)}- z?^XY0->qSxQ4Ue7U=W9&v7#YuT_#($Oi*q&y+KR*@U+ktj>&S2_IY!2|AZhS<7z}M zQ(p#bB_J2$s7r<|g-I5ZZq?nWQG%UEm{TpO*qoY>sE;345UL9NAto=wRww zOU;k|zTzAhCV7ZgflHvu#McEAGW$HJ1 z+$_%lGhWGkh=uj#?(VZ6`0{xyax|2qU>a_|sbJ6QK4AC8o=r27RenRY6_64iA}ce+ zE=Ne$nk9&Pa5xsTP0vs&e46~sSTyw8)>!gk)y4o9qjS-o*-{Q|&%b|dx%x(zZDB<9 zkP!0ULOo9*7mEzjtu@}t8Q$3AJDYcfYAR)|oBwSPH8d=%o}6yi`@Y)JlrVHONrh!i zM9v2~vh%j}wX}BT-^P|NNeriSs7p$*In_8if|k1B2Y#ATRg0?=Q}BQglVBX^Yd1*^ zgXPWTdWO~%ta=s0O87LRVRqxGdzsLd5(EAU{CirpBqr>4dG)9vwcz8{)o zkluvP*z^xmT9CC-l^wmqv7xj?6#rfHVCsaf=jsj1w=+Fh@9I%7@VXD>U3<7qOMViH zrv0WjI?f4&I_W04D0Y)o@DFaB(2CzxXFcNuGj-bh_mok%CxhLfc(rY#YRAtUb13G_=9)iwyRYWLc zqH*cCs-*+3z0ceR7M6xd)~c+4YpNFQRHtyUL-j)^PfS>!8%3GFdT!YVIr!f8zL==6EuNU<^@9@R zd9jmqkU!*8*dFSF`Z&sp$u`|xOV9L<`c!It6gNqR%$VA#(<6deknC$N>@*9y_c`4! zr01=x|KZUfOTmrN&pqa6d}LoPjp?j9WcvspWP>HZ#N;p$MvxFROmaA-y^Nc<`rKvn z&^JJe!10K=l=3LMlq*z zsOY%>l3}J#Z=Oz|>&ea2s^P~9%`XC=xz@Vdhk2X)Nie8Hzd+C0 zZ<~}3-kFVxu2!Va|5Z#Xl$R-Hkm3H|Dj?#BADl*FHVu+-t_n8_MzR0GACngyLcU#h zVUdzDeYM#YacyI;3?%KQ4a<}FK>>gv1xP+FpPrsBuYu?2>i%Xu<4ZrwRg;yuo);+Q z{A@Su@*rPFVN6I~!IzT1X7_^6T1$daBGv*FX&`NGzNb)HJS)(~Lac|ZH?MDw9+h1g z8excYW!fh5IknV@@jWv%Rh-aTZ6h(4%TQw(o5Y6tGywT=ulRVGt&xMz@Ke}OH#UQ ztCEUMCPokJE#J6`zoGlqp&9Ky#?M&wXbKXWlfP0OJ>J7DlX1y%8dyn@l0U+9q9IlF z?}g3|WJ*7d7zyP$rhpGB${8CtsF0gekq^D!lkmDyx5uS=pN_aF$nW@cqa;5lsFRgb zkRZ^Ex|BtUKu&7ZvG478Tkt+5HVfEZe1&bDoaqF?nRCebUlU|Q$$`bMvX`JXR(N%o z|BPWG122PwwnWi$2w)6{Bw*DkJznm|!;1&ITKs_-omQrg$jxLc<7&*^*=LEY~K?b+}1x*>j~=|2~j)S z+KvtNY7&-hYSLLeUIm)0wtCw?2bnCDXZx^Wz$?nAXH%3oJ$c#m5icF=jvK$Ib)A9b zbIanP57+;r*I!)Zp7s40q1o*14MhhWrow6ffNNvp8At+XX*PQR@$9icreOjsmKWdl z*cpe1xInGLQZ>F~C*sX6Wq`DNn~0!7fwUWYt38G9kEsEa_>8!5ao^wT>bak3#YBe} zuY}v&i#je2(GEutBa7^PI2M6NekQi}Aef@m8*Z+X>1MqdfamBD66}8uEJU81oUBv> zl&+%U{XKaFh06#_OskqSQ7&)@NcAwlzZTT@Z|DZ)el|VNk;3@?>BxEwBKZdrreh3=#5>fnkvu-bsPN0FPva(@t29OaSjxTF6brv-G>bReO(fX&KTjk z$4#+<@tiol{+k-GL?J6W1@nfP8%ygWNH1q!9z-C#dP2r}y_+Y-47 zMj{gV9BX^rxJDBUZb*jBz^GlJ7`DO&4b$;h;ublkht(ik&{z)J{e?+B(&r>{!stJs z@raCfXu2(1!m!3S8j(Kq{THW%PM<1!T~>T1m=m?#g#O05Ds_Cj**!+slG-;;xOw!P zosR6IeGuPptVgQglsAq_pd%;IkF*oW7V{xE~g716Sr zVqf@k*2oS>xe9HS7*vv6K|#(FUgbF_2aeK5O;jINN0QXm=niZu13@{g3VgdoRBV|Y zdhaSLxqAf>&jBU}8y5Y)_!su?o7(yS1zpWlYwLeehK}-Hes87#uo2C#V85+cu1OCO zB^%@j#Qu5)ViM6Tf9CkEkh`L*7JHd5{T`6TpWZfjTx{o;mU7|MHCYy{$~c6@9do*J61Lo_$bwoId3>@U1`=7=X1`g~ z%B50PeJ!i&pUsB69q9p;@(T{>HReV;nOw-^E)>7<&OfML!bFmpk{F4noW`PqaVZ^2 z*`hJ1_UCagdqWMP*gnS&G{%$D5tmHdxYg=o-oIQxi>YYwx*vDB4}#I(tJ2*Qww5mQnyKGKh_5F7hw8BfuCbp8~(sQYTV))6l8XL_9vc;%vW{1i0qV?+j zXpO4U?UbXM4!eWos2ol%ppfIbE>APxGwL(%`ZQEb=eqKaESYCiB8xnsSbr1!^(eES zl4&drmt?QP*?f8H(puO5NRPlq1prrleSZYVo@f65@QaBTn$9?z&{5n*r{3b@UF`C6M|2+x@3>7%L5VI^2+X7$mSAF~1JwEHSqK z=ScujSCA&hGxS~&AA9@X#rp46`7VA+L(xa7i&tb$!rx@K5<`^b2QL!IZv%S`yg!8uXzlO?a?_%x zn*64gIHV}vS*}3XW^;T(yr$sp48~cr&yeIljDCiZ!`kPS&3oqXwsyCr@r31?6ty9>Ntdzyo%~U**0c2jFi`Xg@i&#D+ns zzd;Qp8Rc_`Gz8bkT!hx;!eUkjhIu$1asR{qI6Eq`hAX3#OT&W=DN(Tkc?&4+5EHH; z?Fo#3Hp;}rKO22nnT_svWri2EJ4S`1R3+_NgcH~9rqI5FO6Bi*Pe}3mw+|!w+kAn) zxE={t0$}HN#nq@WVmNTrqbmslmo@+MD5Uns zXpCTU+vE2xml-JB(r8(|B8)f@B+JcLxW#{AZ%JVvyAZnpVEu^ zuOEABHOVWdFXs&1_ipd=*VxK+4RBdP{CakWN_3&xCoRC3_Tg!Np>hE+Ie8Ut-*3z< zFQ8Fu2uR1+$$4iM@P>1|^zC_BwemY4NP3#7jz&hLS}595N2>v50cVIN7}D?ge1Nn3*I6w41%VhS*EHyC$r%#8nUs1`4&-rEu*I-DT>}&*jnm-3CQo zJx}B7H8B?M?eGu4w9P?Up+X5w`0e2C<-gFT7bhhTCOr}vRP|TWq6~arZpC^Zp8hRw ztRem+oX??Y63^q4$u}g`OkMK`^M9&Qm!Qj{k}2bS_lD{p!8>;mQJBW2VUX6_v>Q|d zf=~5X{Y!St(<*JUUA?mOAFI@IvStXlJ^$!B)Ej2UDHiO*L z^trs!YqVN226-irpZm-^Wp?x@k$rV!^p61( zAl53jBXN4e0wSR}&(RWt8IdQ9H8cV1Jd+a9Z7;z+FG4XtCRH)bxXT#2c6D3O7{84_ z2b0NU5KacJ!WoMgODFPCdf5ZY>jNFsix>XCQA`Ks= z<1mc7uq}mAsVo)Mp%p_dVz2!03TKlg#D2+aaN+cx&tq3`$flSWZSn`Lo~Mr5<&aLU zM^DHi*UV$cNoMH3yMAq(g#G=)mka{B+nrd%3&c|MIfjP$(Yq{D1%gkcv@vOURK>ZF~Jzql!#hcev>i6Y{C&ueQV8 zzC`E4_1*`UKrvK?%-at9DNo&#K5#2?tRp7qoncLW*r_jjHfg^2Dd%$vu%o{bLv zpvSA%@TaS@9z)~B@`NVX?&=Fsjy22pCb1jgud!U?40H*--l0=K8q|0GwuNhqMzNZ# zMFE>1QU7<>xdvVjZV#U9lAuA5X{UG|)Or7ADY0cB&UftXhcCq*Swt86Ggto=NF^4m zm>nez4Erdk(U3-sAVXxP4PX(r2t%F%L=C=3J3_&y!9mYKtIp2OMR%+@w{! zk~%aC=h5xbn7P+~S|d4$XeQo+qc( zM2d6C2>HCTZt;R8W~QqCkU8_Mfv?q^uKtiCly&7ymV*~wtY*)XG?+;+B=8BaG1LMX z=22qVIsGw+6UstUpiBK;>8lx#)=FpBKp{DO5Qj6AnIj)fSMZTyM&YM>^k?br^!74X zF|u!t02$XeyDQPO)`$sDhhLJFqPLlx^V)E|QCUrEA+j(o@|X`%vb4mX|3RH$eKW!G z(!=bM`wD5@$uY&gF@=(majiW62w$pnR{1uCVg9(+*5aD{WH}#~#Ei}ZZGxoWucV`W zn@As+5Es3bV-l)=C&_@VpfmW^#1KtAD9I+dTiMc8AUrqqV*C8zUv&agZL>SvJx1mg zk>T|Nj)fbGdH7!RgRp?R_aPgS%ATCC(yvg6R-4^F-QQnW0!!Z(W9Kuy&^O0+KiHQ3 zu|@J~#$0vQjpFUnrMd~*tf-_L!zuK;<^}6;RA<6amL}gSvak_rPG3dN2q8rEJbT!P zSxBaiW1#obu)F)s0I`&KcGG8+w<1(nIPOrg0Xz2r%67(((ykq)p9Z;YEP$4(-u;j* zOA>r%XXoPj&a-a~x$3&SVF?V~d^~(w9ogwJ$=rLs-*|r9S;BvtmtEA?ci;T50r>mg zFKq;4UEKvdNdOk)&xb`Dw9XAhr_LP+& zDopqCV%G1+cj_7N_gO7Nq+IyvO1=dpm5gagwl(ZpU7Z9$mfcc4b=UKE905D@)5~qY zL0BBMRLAqbf3}IwqUG(~@_o)7>i!P9a8k<&N09Yi?ac}CwKqBcn5cs?kqV<5{=#X<)trvUa)9WK@Qn5`VEu4BjzS2lCgQeZ-!9mZ zH_cW1YOjQ9=Dtiu^GN>2mLRgtDiPV|+3f94oSFoWyy-w#pZILI8x#%!ZCt~Sw>IPF zmd1Y<+tvY$q`!A)6JTALFi+0@44)l1q5&U?2 zbu)f{alZorF3IlifP)G&lZOO^E0urS!yVfHGN*SB+r~q)gMW624%|0-`LvKy9W?H% z(&GN7cOmGG5`|zAPwL)CDmple&Kxz0clcC-PXlik7WXZGENhB5piGm>j8_x0|81LMA{yF0)=^ZMz(Pxtp{_fL0ccRs7|?0*m5 z%snp)F~VF`PF@}cIt(b2IK>GiW~MSeXc3}X45KH-?AS;i7Rm@!G0DdfX;=%)0zxbBxY0lzNDLH9O{`I58 zJ4`IWDc7W|fY0UbW14*F2P2ZnQcS-_aZz`fdF;%N==keDAXeBnePgFu_cZ?{uL-1m zS{=V=iBw1&d4IU;XBjW`w`JX3AAG4Qhv z=5sUsi>(xOc9Bk@Vh^@_0dXk5)+COyNblq|RkW95x@C~0U?4_89!Y$U^@eyrqCkB* z06mopfswm?FKU{x%M~`;U$x1KNtP)E zYYF+}PcAYFwR>WW9hrMBqt#4>cT+K^pbn0Ag>0skU5Wcd`@ic>F`5pETc~2P z=Ua$6dz;KRAO-{RnB^{au6FL-)@VU6LFL#Q0Z^T*o4@GNcsGwZ;=&iUIPU3`gm}sy zXrfehF^c|Z9^Z6|YMsw;BiN1>Q+tiu^Sc}x>S19ETj(}zQvnxrc z_aaE5Sv6u}a92)}>?|glPGt;sk^yeeeM#?oyKday<=!;{9-%cQDsMq`b|%4w&}K{> z0~v>!Ufyx5-b?WHcw3v5Q9V%dbUyj`c0K(C0X&^7t1iPYDJ%>%#+ZBVZ`3TwXV97I z_I!n;6HX%hh1CL17GgMl8`|fU-6x?2{PRi_%zoOJ+RC!<57Yamt0(97TORQoe$0{i zkCv_hkY*0H%kUZASxvclmE&AC={WZRbd>jU6YPv=7rP0B6;30L&pJ1NvVI335Z&J1 z-}?po2mAX!0$)Ge`iRn4zwJT9E``Lq)wcee%8d4T)k8<2M10Be8WR33z3?qtld4_o zpG6LSHT~DZmmL`Kc{cf$U1Wvd$Z!V-B4QKR!?4KR6^&ufhrY)I8g?b?;0@X}Npmr5Zw64iiNGlI` z$urDkD3B=z23y?AGfEz4E=a{~xPVRZeE3v9@Ux@K8-49IGA)`JuA8t`Y}A0l@gZ3Izla}+) zLEgGbBn6f1z~M!J%N;Yj@x_7Cv5H`zJDz*d4FOW8{&i>P{hIer80Wor?WE}f*98Vq zzB*F?yA<;APll&nCf_l-P^yf&$T+Gf>T05<4*HFH{e<|Bk;ui$YH8Rzm-+Q51W$y* zudog}i_94n5aZ~#O%vF;eOkl z8$ci8w9Tu5bi{T&TW6=0Wt*uonhpu>XIsw%ZdCT;)XU{zb}NUb{z+dap)sURYG?oz zA;8mt(Z#&u0M+y9e#vAUs)LqKOIrk#0`O_t;Kzp_9Zy%Tz(FA%O)}vPe!hAiA`u)E z4DMY&HL<`8HRSEv2bbYuLzj=lkDn zZCw)WKg^oFMf?7IbjE9K$86`by%5yv}|vaUFPptP5)&j);U@ z0Kx8bgLu3KQvAVl03d7-4D@keaN%_2mt!V0Pm6D7XiZMPEz!3VxU^AxYP^^2^``Tb zlXfjG)D!FRxwT@5io5GyGVQ1{D&xrz(-@?AX}h?n$~(BpD@O=Ud|Am$BL2tI{E?Qe zgyHW>8eN&M7b2x>&x_CJ9ky`XqNmtc^jpSv7ias;(uEnX9kL%4Tk<+KZR59@OwhDL zKhF;jeEnv{ICqJO2W7C`Myj0%DT;z+Yz^=i>tpQU)xy(c9Qemu(cfT>a38x(jL5%` zQ}tD5UHYoJI#SE8%hbVpBA)fK44+`kQEjC0ywm3F$)!;HYqlS~a0%n*cjKAADkd*r zpJ(fB8|#~G4;6KU@H?WpR6gsRPQr9@Z+x^47#JK=N@&W*H2@NxmWTViuX!^$g z^NrWvuU~Wj6%q&%_)^tR=)L9(_ba2T*uJmf;7SvWmo4MK_(a*6Ab*1?^fSg;qB)mB zs(u6{RJnZNHCB3lu}3GtQgBx%GM0){_nA3q0lYIYL65JT9D@E+V~U%|;T-MmEkPg) z*u0`gM(NFCPxg_)Ny3G0d5z5b{Ljqa&JL3DpYr;+7d*&N_v=zfQ^Z|#jhMX{ygGI< z$W_#y-w}K(Gdn|x3fQ&W5xw0i8vVS1k!Ut36m^U$m=KcH8Y*2%pdujh578(Y;(p)h zwo}Tz#^_pO8yE;=9FzJ(KjbUGU-cK7VEu(otYi@M@NfnIVD_4ggX?Dp7x!1cstAHob{Im` z^Bku^u&)Vb2p;YMw1hYLgq%QH&_uTsOM!gosP5)>eJxMm2~BiWbhOJ<)A4-8 zsG}h5O~YBh1c>-=fS`1Ii3+Qu!$+=ktFItUw%IbfO4s0%RY?1>iq%HPBBi&zLvnd( z_2@Mfjb3TfyS5eR+z1$GXPZtRl?Gskbrlt{d%SEh_WAW0m{Y}fCg~Jx$PX(TA;q>ucfM^jgXF2n6nuZ?- zA?_a@?r#C^kV%mV@TwIRnRLLG9kA?i$3EY#t)|<}&|eWqCYf4yn-;_0sqP#~LA|K^hx+#OCw0AkFQf z7tw?w=|~Zi>LiebD>75j+UL``BLxJyr((N&PbrVsK94yaa$$Ur81>k#58CyQs=uiid!0F4?( zO|)jX`eaU6^IeDOd?a9L*Rmxi3PW+=2LT-U?QTdL3Kj}2I@XOO<@Y$QSoEVBl}5H7 ziq5lBAUj>^iFaNZJhs~w2j6J+x%EB}3s&AzScozIDiK*MTtf@WgOS+K^D8r(6uis) zVJ~ndMFp2rc%udOHs&&N&7b%2u2;J!lkeFlT;3i{MduPe9%an97gV=xj$G;Fo%OT- z$cazkOY^v*br>Y0=$joQB-eye%}o5BM#JAID&~*H7^-GnWtLK^n#1HgQR82l4?_!knHsV^+tN%HQ~g7GX{gbD z7VvfX=*fxXf>&~!^YOMEjh!DzIcCF8O37o)+&92ZetK}jYtUb4rd1cX2e9M=bE$Y9 zA>d1pW7jXgO(cSicn){&Zhiczr$fv8F3a1%Vui5d**u=ZB%Zmv#K7on_(rfNX);jN z2k6xwTKV4H%{9bH_RzVCR=T7f>0XwoI}^MI;l-}xBbN7fFAl}8S%)X~uYINh#S8P2 zTlpdGbAASn=bjt;?X?qD$JArsTrpaFjkoBF>v`Gqe%6;@oD;gXcP?+Eq1YOfvron7 zlF7ot6Q;!nt&`T_*Xw?ZJ1?{=bB--&e+Z`WUjtd1?M0BeWlNG zO-$CeR^~AkkRxJV1sE;9WU-20v~9UsESi1{`Vkn=BSt$7Or{>L8fYIPa@J^)0s&WG zL%aO6H|Q#@?8~hOdwb2`Ms5d_C4L2a6GSr)0dx7y_<8g7o?Dk;Cg5qfc;$B-sJB$! zA!l&f+hIj`yIg)k5?y1D)1!G6{EHA#X$h4tLEFW8`55C;GFNVHJ3qL1W?EeEU~Qe# z0w%vp5L)_OzVyl~cQ2Kh`*YSK{tiWrd3M^Ir15w%FJSfp`ShrVPzxxhv2wAY^gxx%6xB*V#nSP zO#RA?^BG5n4Cc5_c;F|lU5m9PHv|j^Q%7$9Qw_Jf*scxuz=TOZ;C@=-s)%AC)NtsP zY{%at33JFK+>N2^_OM?q8>s!YMVUNlWNzT*VJCPZT`Hv)oX- z$HsX?x!SJ^)iN|vwmqkel?4*0$P$gkkwc|-7nkm!G)}|SvZ>9O5mkCgin60)vzooA zo;%uany&O(MOSfdc$qNhZ~pm*i^|a{z&7V*C1rK?+nuJGFqP21r(dCUN^n+UxBO5_ z2OCDSgwXBO-0?h>QN8ZY);4~{?;4v+eSg_1QyfL9>Pd9@$7BYnD)}2y9g^`4L)6{` z5GzqxNTPg%us)Q%-pKxa>`KnMVVhnpuLiHb?zf- z^89cSa1^K){B(7(WFmp)z6|E-u9qm~e58_izCSy6JFX#kK$tw+&maP$F9YA3pBnW! z!N@M$^p>m+dfVh6RET!9I5aQX%oAu02M#tNKhj3bP@4HphJTCV|VX{^K_FM zvk4DFXRFoPHKct~ZkEpBlq9^r-43QsXinB%k8Q3BjGrBw-#3K2dn@H>YCD>{;h7Fp z#0pBUM5yaz)R!e^TPzF4%Pbr<&0@d_@RX?q(bLQGupz36zu+ZCh+)j%coY*nORwci ze+t2=+*3EZ>)ne4Y+oBG7h6!CIu|{|pC5`~k7tKVm*2@mljjcE&uzbD@1cYfsHC#^;i^#CQKZ2_jJZq&Chc}2zu6O81O1f3Y zYVbH!#aTYj>rGR>qNsfXW@Z3i+3co)qp33+j^q=A3{^ zPSHl+#`?yFiOI&gvBZW%;SFG+nQh^-7XN-yskthCD^z#5EpZ1G>%&om-e0v5AAbf8 zF~T!7OKS5Hm{Z=V(Kh!-b1M{q#|ZYnqInx|M#Xg{FCOUqKyBxCX0G zyK0tv=%wn4T=$IF49~XMdTw(ZmrA#(9Td$-HbX^*Uv?)<3AZi1kLgUN5`L!xl3Qx!Jn<$3fJy52D}jg#+=K&SVRkBqOUX>sPDN z(J^NZIm>)D%9;Xn!c>%*1N9SJ^v@Hxx)jAOjP~|0`O0E{t#%}zMLcKYZf%4}p* z`1ie<2Qc-jC0qDegyGpgmN?_P64okwn+A5y0-S&cczX|I<$J%!i)+A136Osb1SVvN z3plU@cl|LbtgG-5DbjjYY5XcF)L8$I3N2bb9d{*3MR%NaUAlDclrmb?WM#`Ap_C>R zme7=~meqs^zjRAM!&l~L$aV}I4Eyu@z5KZXadR?s^w#A;KZ5YWHkm{#<0|ynTVAES z+;F_2&XAuwH(uFShA@2<8ZUQZ$P?@vT)ZctWuCQt#Z=N{>VPf#-3%(W4MWo{dS^fm z`>ylt$JZDiYz9~kNdA$R6f;8~7waWD)82~=YvF266(WaXOcJY8xrltXpL=6}991#n z1Ut9r2o&Jpa`N<~-(KrWe=bN+K7bNKFjtnTRT&ibA8p{lG zlK9ypwp?;wxJu$;v3s4P`#zy_n|*f$yj!cqqQ2D#pB zsfgo4*S>JWtS4Y;y?76YKpqbl^@H#3+E{V=at|aVd*6XP1(6S!Q53*%i?POlD!PYTBl9=?>`TAge|h0Eb$un zb{Wgc$(3_=8|L!VP`}Pt%zZx&nk=4K=QYxQT@&4IrRQg2QeWqtv7S}VJ>LC&Da*m3 zu7d~aKTX400|j2uxDmymp;CgLUGaFs93z05bS=(g@CN+1vYE>qD*t+iv5o!;WL%}K z>8Dg*x?>4%QWgJo$_S>%O{F@6(J`uWFc+0F3`9v=`L2Ir;Vk70g(;=#f$WWA-|pul zz)4#Lh<5%FcUTc@K290n%EBQ_K*0^9>C=ti_5XVlU*B*tDS}(a#Jhq z#X1kPB(e#XelneGuOWySbV$MMqG{jUjZn&9U0^9*Z1Lh6Br$DgU>~jh1}gY4@aq;m zdDM@tBovS0jlu|Ts)n5Nhs}}MDz~`{dNkL!DYn@iwRT^7XR#h?9dCE0R@b(04|=u| zV&zebYglu{ae^aurw*oLpu{_c6IBtvWXfMf2H07B*wbN3i05OPJT~AXk+RLOQZ#4p z#jGaN4B9Xo3q%uRq}>Qjpbu%l?C}P~acAkbH?{wAsc)9Qa&^f4Jo)_hOU*QaPQGC7_)JU-*DJhNEbxrd;zSFImZ&jQznltW$7?q_RYu2MT?M_Y??x~-q;foJq_v& zzM-P0{Vd4cyLiq$-|tp(g4uG9w<}UDYZZ`suWlETBb%k9G2FT@%xkLkPVrRSk~(6F zeKM@)5}PbhYRoO=q{YGW1mUylENJa;z5_|h$~vS%xc}%_1X52DEibljK+xPRu#{DC z0<<7~`%JC|nl|7UrvL)u3V>xC_B}lU-;7_0Kmt}z5L0cyPV6S>s>YbN0-NGyt9M~P z+Z_i6e+cN?07~zmA8XDQ8?-CMr~jSXyI* zSa}v$@y=-nbjJ&}%H=7Vpg|1` z0AsbR@pA6ogJ#3e8H@fVJaMu0hbX6Oo9N}}J!sM~0W8RgtsYCrTao1qX`(d;;WOh} z;XZion%_S&5I=ttq0C5`dYj>tO{=f=KGm`HDS`b%vC-e2*1qbdq@{B{7H(a?{agC) zP5;yj*cK&>)laEj{k~aGU5cl2Ke9NoxW&1L@0@x50PaKB($cY)e=y?q;UOTFN6FuIu#2%D=Y7Ry8Dh{g50v0YdBnAxHpL<=*ayx8n;nFDR*@C~%g)Rbg@`y(Gf_EWKJL!f_E}9X-S1cciH! zCoLO*A+tIBHaV=6hh@N%HTPq2b*7_F#I$7C-yF^S?8dIQ5vaoa(&fV0hatCRSK(Ol zeh~Nd@)`<<0^$LTOGRha;&=y7HIi_sS>~gdIsdxin*P}bEW+-G6yNeR2uPB~+esjJK^87XxH=@%7E_cgORAu_3>ByrJD zC=Wwf7B5??15<}*Xx1Aa7c&jgMl8`L!WSg$Q)sdtbN?7!q(2=bMMm$vrB`+?Igs(R zWT8SU9AvNjR?aqQyLAxF>o0^8g{xLTPVn_t-F5(e=rCt=!aaz;-)33M=J=nmf9%Yb z6s_jp7d(%rv2&~yOm0D_g9gbK-c876&91PsRWOmhe8Ze5Vq28FMQ z8gQice~$(JQ#zmFXnF06c()4 z2T!m9Sw$yH%4Orr$|ByJ<3@W^l5ik&_m39VsCc|c`(qObE$H#lfdcR>?&3OjGs*%K z%Yn6WeO4A2%mrlIti>g;N3;-}t+x%_x-Oe1Pi$A$0sLmJSNz)*Ud>YS^`e0{E-g!F zHH-P>%QPn!8?Hw@c4`tEa%3mNt4azE6%1E>bn<78KK^N<*m8;1g{|=B`{a_UQ=z&m zT)Zw_HFdg_C=^kierH8y6JGqM8FM_fG`llW*K&Ako)r{E=1yw?>*z`XZ2%ee$!FDR zofsG-{)Z!deB9Z&x;Z?A!46NCI+m7_l5UQ>CcFFvlcFSTpm~?Ksi&8>McS;`;Z65- zBD>f0eqbh|^=hqmQuzG2!}d&-gjx@tG5b_rvSI;$yqMM$hWvgiD=NqgGol;DGJmKn zMU!@~-I{w9UqvGxaw8MduarRuKcBOYV$wPYyne6M_K^`w$XGcN4{0{`o8}$bind@3 zN@;%yKKqUOWQ6x?ypJN9s`I_4Py!`j*g|_{&2|*K7 zO;Vb4MrDjUxVF-yRf-oYdNdL`2NXIlRc1&zR0q10HGAn3Eg^qUYw5E@h-Sw&Gq3-B zSQH&jU5lZ@(&h2)Bxwro@8s_-35$|v;WbXt{ix#jQy%A|us@EGcjF_@p7#h}5OIbJ zdEOH!8eKJ5o=&<-@>a8{E!8qGS&du?3sX;s`6|E2SPkh{h%`f<)J+pttZm_^q>)W3(2R&4T*SaG}QrlzVE%0e;8*G+Un z$1bab{Z$IdTl6*z#R_3cnZs;Y#5s7o^cXQURpwOV4QJGi1LG&;(n1x&E%L-t4kDE? zk_yO_mUeI4W8yzpC265cbvo;vxq1+xs8jkJ5)i;Q3|(%>>VsNfaS!{-$+I) zzO_)MGDwnJT{DV#ngj)mzlLR1erq$^&}#Ah&gjiup`sxk-d*(yKX!MTHY09akz4K; zmh;Q9GM9HQgJz>S<~r{PSr+8=XHO&pz$Id8M<~ zvksy6ZoObb;~+zmAfP<31BMa9}N#?R!rL^0rRXIT8X(nkXI z5LN<_rE=|4tktseWm;q_UW{!unypIMr3?)=d26Vq(>i9rj-zo7tA8rY zHfYdT&+lZRx2N*3*-@2eNtavbA94C8k*@m7@}81icB>IN#Jbv^KH^h+X0K9pll%uI)FR&$Pi4P60M zU~b?5v!re}q5gF~kc^6~-Hzmsv@~43QYJYqvan(Z%P)nd0BL_=;$> zZI}jQ(rl4MgolgOLyRJ%%RQ60bZrSMv2!m!3aa{x=YKziew_`R=#sdBFUn<=*Sa_+ z9lJgUAbPk&D8WwNPK!WlI)1)71u!G$sM#hR__WWBmp}+KSam%-ARxfDQzX4UM^o#GepC&blopd08NzB7-fPe7D3G5m`niOaF;XotCJpa9YL_IccEA3#15Fa5e<$I`HpGM%dOzgWlXn*rzR0499OWRbr_~ z4LBZIz$zx;?(v{0e&oR#6^hCl7t}m1l}NnS%30en$GirzWS|O&;(1v%DR={q#xlY{ zfr+=i8^y~Ji2k#8kCL+ZL3?T357?#GFO35r?IOHQo?fHOtho1%7uK`{I(BodR~02t zoigDS7Io01K^R)pRvLSFrE1>2id037(34j_b-FpGEbCryS8kf^lgX*tTChAgPw;D^ z63qM0;=Imk`ql z@O9t@P_1152ltQw;2ywK0)~LV4q-sXJuCt&$RD2I!B6nxtE=m)uHzrx<}wFKe=Zb= zTHKt*-jr99L?+ECT5HLL^3(5a7Y?k1<^CKUs)?Uy;p4lSh>!iERxyc|4$+u0OY<8jW{Fsr6@UG{#e3V&GEtMX>O}xy{|Ca?0?3=B4NtvOX74ON z34Ns0j^XlqF;s#{hmtyWStk}i&aFY`Ho<-&$BK(#{zfF*Hk$$PBNVcLI~J46u|cIb zATlFCXxZDy(6a3B&1!eC+FwxUEbJlv@^!jVhy>AZ{@PYUUF%+c-!u+0y0$`>bQ+e#cEoT z+3Sw9pR2BSo4&;of$d&MC=M@wQFN@1w&*krVCv3^v|v>AiX*Y%z_QV!9V1nO?1btGv^ST;)Uvup@nR}bl4!&z>Mo+5my}N&y6O*b)xc%k4W&L2@!^O9gY!(S|9X= z*iSY6sM47&vudxV{uo-xg`IycrG-Xr$!a^-b3OlhLKEpNVO^rsN!7xIjf7Q{y`~}O zoH`v9A%^1z$!;WfRZA14_kPw|cn$nly*{vWp}bK3O6H%_y_feG66*}|17A=AIngz) zK*(2Q)PckP-a~zG{x56RKBfYwD<;<7*IrrCrfqf`Q`jQJ z3zHUeIT}xQd=d9PMbni$OE7psIN*VE*Y$*0YRI}W7o@p?wk9Pp{uv3WG4KQ+nxWfv#AZ3`D;LF7V z>omKsv$=)<&MfN+up(He+Eiua7e%S5DKRR!{xwoertLw?5^d&Qll{b=@o|==Ow*3V zTC2y>ESu2!Y@_S6mA@2!Xvz%wFF?`8q3(yxbbmiPgLK;(j(|B)33fF3#L?NZrEjjF zx=qA7rbaB8tX!p3p2)0gpx(DS?l_)tD?J{G;O~Gc9oR7VNtc&*>9Fm}Q}46UD_8Nl zcYr-YNmI4Yi)6AZt~u>8PcdP;EzAN_4Y^@F=&uATKVx5gd`=WU4%wVUk92L*9L79} zM)mBQsMvj#7eOCLIPj~!obV>~?Z$A=_*#$Vu-|z?%Eh!0W#Ks$Vy$Xqa_*D{eQv*L zHUm_{Dq|y?P7&hQaQ9&GqUmGbA8WK72p9wbLBNYP0GASA==^kXkGKK&R_i7kCmjuT zZ%N}V6_$}1UcRby8%KDlh%lFY4{hv;ZC#MV-JZZ&uo{!yyfdvoE10;Lax3-^X^PPR z*GPJ_s0DGAFkyWB^KMtxI@8Oyk&c!2o~FnxjxjyCZhV(=)e^e0YGyVyf7zBxI5nP0 zKDSyQqgGriOKUQCq|0eLA<1XAPQfw3J@}bHU!4}T72u%npbViy&EmaH>;yl0e*_?@V35%McBURBgmmKmsJikXlF5~QZ%QVbw)nm)6pL0&})oFl%Kr4LRl{55V$*^>p|)<@!R}i z7_@8uYDBe&nK>h@bY@?|iDQIAy6$_ahV6Q+mz$qBGS$mmPeKi40;&QLOy&iQn-|}! zc?w&2Yhr&r0DBt~)qAUdbxwEfBnJvKZJ0l*ue;1#gbenBdC*Xy~-j)&Kd{uP-5x2l~}@ z;%=wga8XBJOHOMq8;utb>bwMSPwj2Ktj)r#wnHU+#r+NoThV4mj^cC^6@)5$A@zm$ zc*zK~%2t*dW6~%od!zv87mmd3r|z&Dppfr4zFUO|3a?bC>r={79zreki^|Bg?dBma zFU#LCFDTDI`RwqRX$J{mtyRGxx6Aby2Xoy*Q}HpFKsNvY(t3f`b$svMEznnwcj>B` z>v1ltzBfidQg-+P%7)^MIcMHzsm||Y7@(|?D`H6vC1jMZWEd7`wdiuLDh+buO`&7= z7oFk49ucF^8_d@mZ|&+oMG*tB*ev2!0Krd;0~5Foz$_^0en2)75z6<27>nS8gYX_ zAi$^M=EavY@BjYPdy~%dLCh0_O_4h|>Klf9D;Fj+C|*IUAKQlkHj&-oTo4nZE?dcm zh#02!L3!`?gIgzi9&d}_NA1y1Nvaoj$8wY}V@5*oI0kVsgvdkXzC`8rB_B;jno7$2 zD$4de{Vo|QQQ%$bN?OY`CG6N@ZCaODQ)Or(I;G3FgXbdY)Z?6D#{ul$avc=ofcYDVdtNDFLXBt>$Y7tUEO&p0tZQk| zMP_tFPbVn{s!a*c9)yG6QvH<;-ZYI%mEoeP!l(;I6i9rGRS!^~(@9_?GHbys)ZhQm zALk+Ug2)(3t$-dRWMPHG#eW+J7Smq*$9$H!oxZPSfUdegxBtXKsg=Z* z_Y*Ho1~OJblxa5}mx5X_)YD_FvmAW+(&jvg*m0dDELZ zC1$B9$zBhrFZIp_B{czX;B43`5DUQOR)b)E|KaM~?a{TXKF($Rr0AMT1vZx_APswb z9LG6yyQ4aV>imfP+x*@9;_w*kOU{0~EJ(;UVR_;%?)2Yz0{{|tR{%Y0pGqaKb~O0$ z3a~Bv80%v6BUT~n@V=Qx$$9dH^a>f8oVC{E(<_xSN-T@RH>`k8i+i@9)xygIfiW~Y zsO+2Yew(2mJZ;5^W`eA@%Mfq^Lh$4Ul$-=_?H6X7&5NAdebHTAb3ERCuk3Auq=PjMex#=B$NgY*nc0Gf0lz;gmrSP*_)#ti^F zqU{EJT`r|#03aMHYKVScGpuyb7vRXYzJYmsmHfJy`N3m*G>v8SZJ4^c74Nx<8K0YJ zRay8Ki0tOCW*doVO(W)l=_&=~rMd^*iV0eKO1TcIE{9(m#nN9rKN@|YR&{}1GbR#< zemRKlJTTqx@xbOU&Ekp|hFsjOwjXynCm{eR4S<3M^6Je^67Vk32NcBrEp~$*FAz6H zOA<*iP&_h&RxShac!FuyQhnU+Pu*|16EEIVFAwL0j44jIvB~U~d^_rMz?kBLvy>Ey z?#0pli$@Ka#r)##pi?4|!<4qqDj>`+!eoA;?bh%_g5j6J=A$cXIaQC6E!ZQ^pQ_t4 z#8GK8HRDStiCwfqF!pdPH7y?l+RCD1k;V7?yP?}oO5jWqmL20{Cm0@_+T01y0vkJM z$QQ&P=bPi)mX$838Cj)fM-)!qvtNQ!^_cBv-FWxj^wLSZMu+DYU683^SEoNmSboIw zC0ND@iylI*J8&fK(MK0bR#z_}b?`3rWy2~zRN~#^`xF4K`&-zeF2#(t7&wtrEG4U} zB2&U6HAK9ry&|`;Q9MT?eM-h4z;K^6bZ2>OB8ua+T}a@Czx%O5k2(Z%%Ny; zc@{Imzi_(HrN=a|KAXz~YkjBQa-He^SZ`c0PTQhKOhWQHbF8(oA{y;`y3B5hpGJul zmWZq*DZfp~noLuGQ*Z-k{5f&&*J$6L;oC|gj&o7(n;rzMjp+|G7Fvn11_&mY5)JS{ z2FEo!3ui7kAv-JQ{NePO;U001E}LXT1aNxiA&J90J=$>t%&jKd0ETLtlnPapvZ!kt zieEjDQ=UNNkF4*R`^Hg1rNUNG3D3&^gdHdAN!n6SNG?+d4LjC|2%MKm2{0=wFQ=m@ zOH;DM3(B_o4(eN7f13YW`t`0|gG5-bd{+HUB-cT~iI3NJSDj)fH$VdHXzExGY8z%N z@wsyaIe0Jj#ze=i9My5xWNTrtdM}eSR$tby^@qiVlMzAFF_M)0kXGv&tljyhu^WG3 zDBtk4-dDt)V&cy43y!!KC4u@jAnp^O2{@oHcM7Iv3A<_*^_I`VNa2|qLT&LlchMV) zLvPj6_OIl*ENfjv+G;anZ`Cjr(v92+>7AXHCh3=g{gHkdGv8 zefLWN3z~~5dROKlgWOQkBC01nK+U~{^KNq9=HW_KFPcw3C-+usee@Ty7mQ+_U9RWr zKHY+*$-V|Q>$?Px!g*1#M7D8q#hZI|%sr6;ryo7}mm#XO35Zy=&&k`jT9rH}xyZ0T zTCeu0fDO+^ht1_)V8GgcbmN_iLx664b^JK}|2^vkVjAdPZU=w!~$t zBR6I9tZ=4LCa07DeBk^g{>_!jss2f4roh+*wv2d_qUu<$le^rwaHenTDy26@#QzJ> zaK+5vNemgRDzmI@O($thCq~YXu-GQW?2-9!{;MD-d7bLLj|r-C_`n1w60SfrcYh;&R-B{#dXsrDx^fg|wrNPk^+$%^(#ny7y`r*T z|M`IdmDHC%)bg?7g(Ss<`x2GGWr(YX{Ujp1ssthF7ZX~^t^Zo{U4`212o$$K0kK;JSjrBs5z3-fz%eI=q}BYp^>jiRM~W#+C4@z^^@LhO&TXO@q_#e z5!#`yVQ6#@eIKis5euzocxV(6x?eg^Q#l}R#YopB&0yTRsVW=&+JSuVnU2q*S@w;w z%|>^CVVRV?p74UEtSCMs8bg7`J#NC&tfdPUOs85q0*C`IXcJFm@6Qq;Vlp~DZC^2n6sSr=z%_$ zmuq%&cd0t=3!(a^d1FRGr1;hjOezy0GMMAEz+b2si`C8=x%zdgNnoJyvfY?cm~Y6D z2QXJ%yS4#zLyT_)@~BDx$E&BS9XGSr=JCKa-vC>>UKNsRf`e-!xmXucpnTpUAxsmkop-RVzGro8rY?zsrdtXH$hzpHk3 z1o8-StZldTp?O5)CI1@UVR3Hb&`Y^(>|4;@@Joo_z}+`SR^KNk6VR4GwUKm2vhO`5 zfAH8+LtA;fJ#1h927AcMI6!~+zzlU-E1>1?elO}f1ZseKZ`!|u^Z?QTu&t>DGJV!n zGtW5G-((R$KS8@u#X%fvTAv1JwuI1ARRuLOhNNB=)9qD6>?(yt&7rz7Bo}UD(HAfl zvYf58g!&{W*Y%ZbS7BgCtV>i?^Dd_*hL}7@haSN#SB_DqC{hg z17|AByC{Ai0nty+RWV$AnAQAh65AwI_W_vlT6HdY1~E7qw_>}zwYUYtt7>5rF}O?w z3WvsoC!K#ts!elW^3{Chw21%C7Q}TxUkR5uBTL7Qk&WQC%UxyhiP7DE{neMVSaSM* z04YJ%z9xsV0h%raIRR~j7@)K)wIF>`0!PhqfVUJuOg{1Wv$uMocffU=n#C5!nPe_< zzEn9NK%6ECqg|FG(6&a{&GUiskrC&yydp@BrHU4Tf$}KHy=lZlp)gf65rK7|%`6l^ zVNFD15(5-y%C34g693Q zaA5GmXvItiR%^{nag2t-BGboemB$w`av$4Qt4CD$J`u2MY|RSlt}-SRMx_H4PeGmy>*eLT7{`1LLt9=h*K1ed z*q__&>;8B?A5VIzm)K)$&m9g0$mnu+Pv)w1bykzR~Bku_hwnlLtrA(6f~&BcyuuG9xw$^+H^QZF_~tA z*!Q__^Y$FKZQdT^vCT0!C&anI?kT)Yx=LT$<%(sszO3sitxIpx7VV2HU1Vv!OK+|9 zEHBWubk)|EE-`AfYxBYJ%d#xqfTgv*Ue?|hZOgJO+Lzv!zI4qQkEOueeRe6e%6k{2 z|60mH7^w?!)(!-A3dYncaO3UKX*u!VX%j310j}))X2>!hb(-SrBh!SL{~$P>;Dxg3 zP$DV6V(O6y>g1~x`sGP4oncTBev&J5xPY}D_2P-#vY;rUp}JYjGmWt67$frDp4;w6A03afoQlH{w9a}2KaOy zFGqLDCj~**S-3cYKr`8PF64zpVtMI6YP6Fd4nXKtQclEsMhGf*E#2#*U-k(j6GHW} z9XZ;!FeWD*6q{xFdH{8#t4E+Zl3Oim{pv73M@>Slw%?lr&jO=ymSD7Wh(*g8*9<7+ zqDG^1QPKy)L&aQ^7@3pqVgjA_(wUq@l&FY6t(h6rJeb{jBLz07FwU;pm4l{~VN>Sh zK8)jfxwfX``FK7zgI-@Qy;Csv`+SV?+_y0{7{p|^{W-^&pkdz)P*J&Dn)YQ3m_ZugFtlh|ESqN_TC&5J;z(ut;C2hI%-n$JpAC=WryAbH2V$>$mFaa>Q zc8g&zLQz7DLZdQzJGRiXAjF1?r|pL4X3t??&-vKKbJ#v$UUvskpf_pSDXlHNU$43@ z?Ye4T(HH6>%j&6Yj=HojK1V^dMX0oQmDYOG)_l@@>x&O9Rqadf+IwqlS=ZKj>%H}^ z+O+qW0WCmqR3<8(oh1iDazg&eQzNZ3{6x6xdD4&};3r5I#wsV)r5Z{SI+HzIvqc|0 z|C~r=mJ4FgdN{|?qc+vBFf~iLn^KB4x}C>!h%2GEYo_V^4$AD5=~_B90|rPfe*Oeb zK2dt-@G?8Msx$h9c%zWWd)xMH+qUQZcE8=eKEJ*`KR#aH-d)Y$= z>*vS&zV8re5=|NC4jKqC5or{lg=S9;DPd#ECLfS3agUTASl(hjKn%XmWp!#tYAJ9+ z&Hj<%lYA5hfnYCUGb1h+WQ~Uo>b3xw9kDyOs3BU$ks``T6IW9Z@$RzIJ6jq@`XNRL zCPjsl9v5P0IE2*0zSgwkg5i#_jfaev10M8I7zH*^zscemDfTm4GrXD z2i7lyCeE!SGkcuwM9%@qq(a6L89QNiBB!g`BYS!x1?HY`Y7GcQ#k`M^H!f`u3D%9y z$*ItrRXVCuVN-fnJ-71WMbHrBG+uYGT)d-HCZ(`&T(xEt5Rs|(N(w|eNK6Va)|ElelSxY_>Pz*KeP2W*{G0q*^@cCW{^V(ok+jF2hBxdYc1W<_ zl_l1M7csbFR$&AGILU+YGDN?FgeQs>%l2Vrai?RaZ-`N&il!=H(Q7sGs z2+WDy>^b;++H=@$oCXu2D2&=wn`-aS-rKTVF0!t&th!uUUv*iuFVYrW*S@aNId@sL zEv>cInuzvw(cYW3-kSf=mfqST-ma~+Wm(i)w)Ngx>q}E<(GDJwf*fa_v^mxFdl1u7 zkfp3o`7;Fy!UCr1ljwPrDn1NS!iDg&CH>t11ci7+GS}j8?@RodxG`Upq}uW&rbbIu zm1d?cQEWl78)>u39w85Z5>crh68wON6X*||P>#v;S9~CWU?siToO29MeA~x(J|5fl zeBAH1+wJ!C_3`=f_UHA_>+3&%UjO{_=k4wFkM{~0)0xsVJz-9S@Fwafb)C_ec%TS} z22&T{&R>c(NPv=FYs^jZH5LZ335($b-orlBvSvSv032SG5QyK~s+B|OtbscMK_8@; zF&z3*mJWj|G8GEag~kMoW(t7EMFj?*J|f~h-qnHYq!0Y{#7U1EF^kyzvBLx=W8zd+ zU{FIa^>IuTSrV7F;gIwb2no?nt}3jOp5ae?LA9BS9Z6PS(gFmc)s(MH<`bw!A39Fz7s zBW55>(m*2}454=h5rs9NAd8vc8rjFaZW}fQpr{j2pXd$BnNe}9O+#Znx zb!dw4K?KJZ+fmq*^eS0PQu;H2*THyO=B||j&FE#WV_N|J1wQb21V9F zARL6V!KxR^XbA*Wxu3r}6ze%VD;oKhC=zqV!ez%iAJPo7$onST-S-pah3hA18p|XZ zLIn~p<~a+r9;vy|YUj=AUoS~I~U7xyuMm>WH`tII-=flxw?)5M3e7IrlL+w&%3H87Z=?T~(*e=i}M6OViz^>SA*m zdA(ehr2)t!XmuDvN})ZU@6$tGfCi>}L}B6E_2G7=3uwcjC#0+Z1W(I$f7 z_0H>*ZO2YL2cP@gCU?UWL64ey5hWD5XkVAhWw~Davh?*Tt?9D%^`gDEb#1+?6KmH? z>s>s*?Z9MVTV{f z!qU{FLO850<z^}R*D=Pv?{kiA+xG4G+_wAu ze!t(oqGaLi_0OMw{`vj-_V)YF>)Y$=<8imimMO*(WMaYRP#j?bQlS-cl{l@b?qdrg zJuH+L2|&>hdZT$4is&U)b9{UVm1{X=D@#O44Hv`>Gw0wiMUtsA;wS{>97YHTT$rn08D7TG zq*&?G6C0hQKqEcCB`47gTLlATk`aKfco*OiXCkJfMyC~#JkjYC1eovuPSm;tqsK5e zk|4?V)(N(w4~UYQ=q(yeddE0;@Kgf5fept{tR9(g@bUI3h>{pB#*7MOCJARSMYKNg zt@N8Y6p1T!G8=OwSLgTd4J%wCf#3odA1fV&ToWenXS^6LUSz_#JO+t{?7$&5Q=ki{ zf$*LNG;b+&!f<|NEq&?duzbwYP6u48l9W7a1Ud;u!gD|>LZ$_cOTwbhX4W9keN*pA zRzxNU*aX07HOe$%A`%ZG{&5`GGlV<7pxk4om!0KGl31sfTe4-Ngz}`JW}sKgBipbj z)8sHydt<~^Z=nIa|Xmrs@;nXloAGTE8rp67>Ml_TPW>R38yZ?L*E30LDGJ z4stD_$2Q?C$#tj9=I-^`IuDxq;P_}M=ObP2KxjDLoO}~1Rm9KYoYSU_Ic$!xk9};< z=kxh^JfF|o?dyKKeSCbpzP|l={qy_xpFe;8{QmRj{rzowK85V+CLI998>9Qyr2k7V zYRChOOb)|wI~4CF7IGX0B{j^EM=Wmn)Q}2FfCwj%E+kNkWRd_WvBrt3I^P2G|{YzdWX1d`NhFBdi@tqgIAqoT?V*v0ElquHCl`+&p@d!I?~wnAb9Ox*xgL!CIALB>;} z!wwV8MC-1!$6?V9zm0XC1G%`y32+`BS*aCw9h16_bk3CV@7k$Aqm!TkwoxqFcmGu^ zV0xM0(l4o|QoV>&oTLcFygD;TA4baaQ9=^4rw0`1j?E}xyH;|0ffk1gEyHr9>Vo?SnAq>88 z$<*N}jW2Bl)IkU-_&kUb? zaUN4q`a5NT5)@PMbzE zK8H=SX`Iw4PGR^ys0iB9Tkp%V^krR^bzLrPUHY>0W$o+Q*QG5>YfJ0B_oXjO>%Fz5 zwZ*|()7I7{VOnpI_ZH|5KDe|sY2s5Q!sd80EphVJ<=!SmH4uQy)f_y`Nl09F~G%JzTlz#C6 zokZ3rl3CMAP*cN(W{9W+;a%$_ZohCt%|b{rQ_~WBJ2;lGh+bxgq*&+yiD*0AknwJs z)XZyjx`gyDN7-$K*%;qXwwXm}lkRcTVf1NmwumS(AzAnrE=k?GH z4KZnu-Zb8$J}tWPq9KYBB-+Bo7*n5@t+5{R=SpT1zKs8cu7B0MR_{kzpVa#dW%`bvxVozMX#4z1+h7z5PMl;vSJM z1A(lm4wzUoA$$d@%t3_Y5z(#|q2XOKbhHBIq`D#1K`6K|Hx}+mIqH#;0f=YOiI{RS zBI4Pez*Vp*0NC7z_{N&~IKXN1pc|sMH5*}&VFSRA^-4#+ZPBK!OKXkO9Cey;UApJ$ z#5S54JuMC5cENR%gl`;eZv<1sfCh0>;c_Tp>4GJVHm$_;_%JaHGZqYi`b9)ldsY?n zMf%#-r7vq+7U^A!duvTwZ_DC!rF~h%v1eOaZ@qWb*0f2~pK9;XNkl}ocU93AnSHT| z626{=q0Mvb6?zf}Baa4?SWV;X4o`R)WrV&(fxifKX+$=^&&~CnPNlaZ(H?cPl?!e=D*uuXgA|MpP>NX`fT)eur5C zxP6TAd_JP=^mg;)xA(WVx7XKy{``4+ef{(M&-?q^97Da$X=-Fw5_#eSbTQ*^ZY*7) zXzY&zg3vDoQzL0nisCwQIlv0gF((JW-ECLV;Q*2VIyyx5 zM>ab0HB=H0;^tm09UG4xNrivjCSs+aHB)yualrAF6TBQ4QnxGSZju!-Ovw7Tr z2g1!8>-!>hG{?S-na+Ts;7CZRZ-+)lcgK~aI;3kPfgC zQh0#)oJR+B#uOUKRK!5RRbP|9E5tuOCQ+er=y6wL>5cg@@c*iw*sSQ>9pPnUN;;6- zW9*FghKxg;&>vycb74cYwAi?Cj7!TDwcsj56LCo%3TMdtmUhk%O&Fr=Ip?JwSIm|| z#I%DmWL1BsI2L$Uk(~yCM($In2&YE{%B=F3oYC2Y6o?&uN$6ZeT-^UA(dR|=n1aMW zM$?PJ3=nGJRzwxBs*)2%LGxL$%2FRNn?Anjz|`);L8cI%@1>BYsvV&-WR9VNCe6sc zb^vplO07Y zqxIg`wY9#ii?pt-x9*YCE-l}%tlF2h8^KtbLiH3b%1N?|C6TO8{dV$F9Fsv z&7Xug%NsJMA{ly+T!%2+Ws>6;Wqu0Zg@WX;QG%ySu^hzn-n7MTQE0iW+@Um|ZK^*rJuBH1fUqMwm}TPeK~Jg?VdW{WZ_*-4)v z1B=cfoO9Y7+rIDnw(r~aeBN*O$Nlzo`}%l)e}8}f=bztiZ||?KZy%rUbBq`|>-$xY zkg6IGgJy*No+tAT?zoUQeObz!&ECHk)~`_=@?WIb9#`-f`*(u z;D}hfqfkm)0^ZXOC3ZgTxvQqo=i*RHYU``X1sQ#dauye8uCCPt8u7iT5DXkdWE4Ok z=YGP$7%*Kq5_V#91TyKImVc1J#tD@&1tL)6Y~Hv&zXV8N9Jn-nS_NZ#mR}(9P?PW_ z(nm_WkYbr2AuP`Mhr^xmE=cliv3;XmF$>)=MrxOX2A6#mi|AAcvU&7LXv}DI45$E6 z_2$lPM}KIkrm3L!r4s=;JmN2D;DDJOKw{;yMnPJPz>1mlD$J=_Y|utFl_KFKO9#Ak)Y+CE1l`g^ndeQbxZV@ATpL-p-2xiFXvxA<2@$F`_rX$c-i-Tnqhd1 z-BDTwhNn$4PUAEqn$ZoXWP%(rau#f@gFnCfI`!tb?Hg$o{%!?d1I<|AHc2`FMa4xL zO+GP*NtBy=u#?en1U{H0n+~{-Q8>N{;H|Xt`5h}mV= zwOwu{d`N*M$8MHffp4q(a}dy^mMw^|puQq!1>o@VBI6{D(|It6WK-q2y#M1)rO&o+2pV|n51fJ=LBT{BFsJrkxNWyW%WQtoNw2DnbhgA26`o~jLAU+ zk?D3-2>7H_=RZ`XwbmVYdDK{XQ|}O>O{Ga|t#@tOdJ|PSCd&K2n|SMx9+X?rRH0iZ zMRd;SnFSQN6&rmx`mSL$c*i=4h_yQiwmdjM5YAQ*H<6Z8_ z)Rp9TPk{UcVW}VwMVyip&OqYnbYW80;UEa>9%|E1_kQBp43U$NSBdNoBJ~q(8a)){ zVFyH|z5nQQ7ov5@zIpA!iBGC#B$ZWP; z7f;iY*vZrhUd}I>Ulouqdi+VMomhcWwiqpS9Rf+HtGh-d#`7bLob^sb4Z3eXJlmpy zh?qAZO%A=rf^qJDmRT)Ge?049QAvsTQQp~i(yOGPr3Cr*M-{(H4&QO$@0c+3b$K#* zq?wprZ&iszRr?A5)r0D+Qj>=Ln0@Inl(Reo+-e4m?NVHlziKHF5$x zN}f^(L)XXRITVQ1&JHGt!SkpKATjmc62>5`B8_MPTbLoGmb~dc%lfFc8U`g58x_C=o7+8z@)ghf-G)Tjn!D`j5L@6Wh~PO9O;H}Cx)^N zsRX_ESKX`BAxgf^g(Odk9JuF%U-A-Bxt#_eO>rrdQnu*NeVWOn!MnbZgpO$*3^j6? zYr{F5!kP3U5p%}}$}IRacWvQeok+lsr(tGPBJ$p{=sg!6MWA`3&y#^kk}-9`VOU&B zL%WE$6DdiO;W&FP#Stl<47C!vA=Qm5r(@29K$FPx>X8^`TOTX%h4A>B_s`Jau0wPA zUPR2>YmVZ0Bbe%p>;^WXh#s}QU=G$`wX{iFG(E4yy?dMV6|+bR4A;SI`tw=zwvrPm zF_FGKL^$0C@+CXksWX|(dCVzcZAu8UML$d5QyU%pJz%`L2fAIFxKwkW?CgvNlX%+kZPW<>G4yCoG|x*t5sTf z4-lj6XuH zfhYNv(hP^hBZKF4;FAJ5AYuI=8Mmr;@qmsQ6OFw;-gEpN?(hVi;GYe~p$>QaaHwH7 zGm;-+dUrTp$=l6?L>}Z^p51t-B$klF4upWZJ2x|kFx}kI=J347n2;4e(Ttq?m~#x9 zbKke;^YM5*pO4$^_VxMs`Stn!_Wt(z_Wu6<_4#3Abce!q>i`e`0otXID#{qZXLvYV zGmUgDIeHao;+R8s`zu+FTCxF9z4e+Q1YJTe2}K&1jXNm&!yzCSll~i#M zZ(UIO-C>r0;Y7GB$|!Ddpr+uII4aY>py(gZU5mr#us~0=z(HoG1H_|+i?3l+VHC_n zZeU7q$_pfF2SWLg5f#zIi+Mm2N#waA-L=*vQ+($)7R0PN7O+%c7i&nkjP2=G61re-3I#=Q2Rj?}@GrFw>eOnz# zRTsLzelD2h^ZtQ{OY$3*>azJX#H8S(mc%(WmbONGkmr{hKmZUUPYWPC51%N^!r*mIET`WaVyf}Jr zoB?*Omq;`+VVN-I)_MTL*J~aW8X&-k6^{Yose}rKPj_+|FUk=VNL=nwTsSn8^b9Jb zm&ua!akQby;>6|+r)B}*jlA<$zItAGEI{=UBH;m0Ay_TX1d&Fg27u-T`~nA=^b!yz zbLpq9w$m|}U&4um2-;eZ3Zx@_$tF~O2s49FG@3S2OpH~u81R*(!_KtL=H2h$XEscW zTRM?*%P+?Y*?UTkDMYm#z8e;Zl40H9q@;PCfK;iCK%4hvNCwTLpXy!NqUw_XkLt)U z)dW4`HjFM{v^hcUrZT3A#M8t-UgO)+X#2)W5;P_A?!JcLx+JDvKpZPbVTqLL1Y3s- zmyAYzn)SNtLn(uU$f$OoBZCo^CcLhCs?kfi&EGjEB@xOA7ESa1>N5Vz_+-j-95c6C znR|2`ze1F#db89uk}O{^Z~@W04GWF5Ic&_C&ECw6bIxg#7E`mv@YW%ZZQCA?hfiF1 z|M>WP|M+-&yWPHQ4iyNsG)v$rO^xj&ZgPfMg;LuU&VdA&lG^sf?FH&%=`?kUL>{3@ zc1Q96gV4MjkYY5q>>{d>AmM%L)2PwmKt_I3hgi!d9#M5IqrZ~1q%a|S{_yQ`(6EcCEJ{;JzQgnh~8e8!o5 z%EDV^inwo=smr%-ssdh{ESAP2K9E#MckoAZsNwI7LJ?SW+ztM$X?F4Te!Lk%r9(F# zPVdF1kjR#6Ufd8(3TijYe#SvK9pzFVr4L-cT6hS#_%3>ujl)voq|!?2hp|*g@>+c- zGWEbgB@gjFtD}S?*_rR~(V}~{=lVO)kMlAU()%8%ph)uL=%mXT>aiNghSE5$Ev4-k zVAshJ7506a_q9mPhHXw#%{Yl^*v%~3T6&ntVV=bRKBxt?Q+_zWh~+pMFtfC>kO)ze z$w*0hNJxk#KsoGTx(bsq{dEv9IUO>RQAxmLr=@?WZVFCmaZ6`~E zNv)(KIY=6^N@$S7^v$CNTf>uL41P`o1_IvJD2SGAPBOoRzu8RF0BzN8g!Gn^d%tUdg3{xv$ z=V>G0th)R-@9`ywj+rW=+FGV?NO@3@Ds_O0Xp_ixTeLOr!N?}2az-Zyi6xurJi1>9J*ATt9kR_5lG^xKo z)Jh29ffv(ViY!uJIuG4M&kPpjaA=GoCK5t~MJlEyj;YJ!QGVfyxAZORv#GX8c;iE6 zLtckz&aycJ=M~43#%XiTeA660vVPz9{kiSi_ITbuKfmtx+sDV}*VotQ$H)DC=bQ~% zCpzv~R8CvKgkOxXOG|j_Xw3IcntC`qpoa(}8H|NB!oQ0BiB(ki{5(BykXUB!i!&xh zFVgfYBU6i*>mf5!^yyyI@lU)9Ji**u7*#o9X5)pA%Ma{+XCCEQ-DIhXJ_>{;dj5H$D zaeUjM49RR}2#Q%a9;~s9=^n)boumFhIv&D9dSMxEE~{JpY2sj}JY?myBLq3#gZR5- zx0-?)rKlx{cNZ%!L7ruk{;i(9=b)^q`e1}8w3)WeDHA|zDxxxEs*3nuU0z(5Yq2aSlb<#LdOq%)TJ52LS!XU~>W>w4 z1*0XU^l3y%TlzurQ{<7PXyagRJ{6}9M_K!slHmKPaq8>bc)NKujXwXubmm4gK-&3B z!k^5H8iWiY3C%Q~_^fEZ#^3p?CC*-oum>S-`v_rJ&4a3@nZHi8b*A zuHUcn|#*5o&lEOf?^m$}>gtjlqbpVd#Y*3$wl;4tK6sv~0{$7aR#% z5wtvn@uV}(IKbzWFo=&r8cCexUwyHUa*tAh3=Tt*mdYeir_UvxRZa+@)*#XwstZ$j zoql$XLm5A&YK~Lw&x^hf(y&jG7~~u<(jaqU*qqR5-kCzoGlU?S9WTtLVTi1hc3!mb zupBzjQqLlDA&&RDpOwTIaH8ZKw)#0?Mxdpb_E=j#Vg76qwu(9R6+jNqK2}ePu(nW= z=R|DJN`s`M$=U^1KB;w28KTHEtLV~EZkqSYk1ZESbZk2tXUBz-*3$2$`1)^Rcwj6K z(zpZStuc_CM?N;eax~RAAqt;@3PE;nE)^y&&v5)%?dkpUj=;P@V92OXtfEm=l22vG zh6ugl)jtxzs_+{kSVQsTZr>D%GlfKOTJM8J^HN&&2~6DUQ?tZ*9x3SxV^-f8>rR&G zaX*aF;@S0GoLvdJ_0-r>0?QF>O|_{+PFwH2FKhEb^S$@hT2lqh_I(atPM@~wA}Qhb z2i$S*pw4I@xDzz0ze@hK5)*UPQcemXNfK&@#K>9XGF_=Ie1c!W#-*h^zV@Wki}IrSW6V*QZu%di**=ENImZ5YJfF|a=holvx5xdyKOZpb;{D+*LNT#{9_~u) zfdp=@)0wJ>a56z1x@!|q6skE(#GA=wLX3n2A_0WAo66(wBM%N{O(Irp795(0QUO|K zj0RG#A0!p9WWAyu`xo!chS}e?Cr)x5lvH)JXmr^?uQx6JNeD{n>JmKP=$H)Vs5PB? zagd9QR0hC{#uE@xPhRy=TE^%gk_hxjSYZmFl!&WyQ4#rxgJY8_mH#1#ZX&0S?r5r@ zm`zo0t7a|I5Hqs^qLomSs=SGmwr2Vy*@-yaYDvB&(#6tFL^y@?ftF~RtPo+ZISc*nl* zB!UcchqvyKRlQ#K7TQXZ%ZgJXWh>3WHW3ZQn2HO<3-CB!rT2>fVIDC_(oB4pld3=; z(>o?$Ob+8dX{32q1<~Cw4byLdLdc{{x77$1&D`!qK1;0INw%a?9s)fg`D6yc>qoha zZW#vfj9ndBql#eu`~1{q0K7JZ=O zp@oJ)YbqIvkGG1XGZD)o@aArDq%!F(dizSOK#(FKlSBqAM2;r$ZkRRMEf+6U4`*nV z9*|~p-#B!4lcbuzlt5=JOjbY+{%r@kJ&t!tmjr^W-^+pe6838XHyk%z@6HYS#t9)V z=`>~%b@got*bguk2oR@# z0b!_+74;c8r5DuCxsQE}F>TH<=a~DLV~lAw#~ks~bQDLkvF`?qG4|*Fcsw5WySIXy zbI>eV-bio>{88Dl1QP6=@sEmT2- z0>DyXk_r&Dnmm&*el`-dB&f{U;NH79lvf20{U0|wF{aJCe;euW(J$DTRa~xF&5kn+ zQK8SSQ}OUEDZn)G=C@W)<7YA>KovnsyzGWwPYdO+KGh&9Ab@~T5_(t82QzJJ9; zH~}O9X5RpyMcH+HvPIKOH)LTt-B(81J!Y*I#OGouRHTWD_{4gs(o3||`vC+(C7^`& zK-KT}8+IjXALS4aJH>o5x?YebRY!=8|F?Qt)PFrp8i{E}W}rV#XDSc@juumKKW@jQ z1+dNE~Qfu^L02%7ly2+kvrC=MDZLAOJow)?fc}M7<2%K zVVu-4X`m6q^UzH#Dixfh73aeo?b2&e&lN!j-nGE6?g19P5!AaT=n64Q(y{Q?k%cBA z?&Ssz;=0L^Z>)-V^GYcLgH)uUi}L(?oKb2~(&T3qCn`-enj}*q!cCDA%@qjpu&S}{ zJQQ1OwbvzPiYroZrZ-Luw>*xXB-If{7bQIqXS4idf@gyqsK5OFu>V;XTDmikUhqx1 zTalxd#|s6VLu4$jk+rd!O4QwvsBUwqWU%pp=Yk4wPb7a~LTE#3eZWiH8^mWnpuCv` zeY7bQ*6g@x5L#)%$T^Thyn?$B=hdP*HgXw4e-m-A9mvM9J4r-_eZz&d*18}v&x}Sh zI4;SJj=~*(WNFpn}pp-pP-JTCPw@Z1Xdpn8ORqA!&y)Ny)_N(Bb$_O=4a0)u1B^sz%uhO8H63iNDz;R=YxzFeJ-1pta zm~%(aLm-0~mRPpbC*q`qWT%e=cLKjD4{1uSm<9zWBHs6(5;8+Ywj&YA9{H41Q0aw0 zo6;;aiTf;^aAH9LN|P2vOO!nH2o3hV;R&KYkR*_1B0DfMG?-dG_!2qc`@<*p*-es4 zF<%cqEjL;yMVEyMi+f_ZXGqdlkO_8@Ixs{@B%AL`HcX`kUlO31Cvo-76A42`(3-kk zvwDZ$gd_)2ksMX^0(oe}rbXdN;VME72(p`js0d}E%#=ruR9V{lID?ra<2M{7^a+KP z;scW;@1!7&AXXzTib#;nIyEpdjmcm%GlN;orkl=0DsC1`b7dq9fOk0b5m;1}6jwAa zycafYUZ>uSyPrA2y4ZI>{!U+h2E(xhd;u4CuJ^`|F zIoHRjdQ+eL;lMR=>?oAJd(-)e-7qHiiQPC22ATBO0H!xqs6-P8;9UfdNDAQ)4M$*; z1~ee!ae2#u0;FiOQXWaroZBNyMILaxm^%~-e#RDw+i^rwSnp>Wy+sS4YG#@%d=v2D-j&?_2!;zS}P&L3VAx0R3~I5{92_LezMi7k9` zV4nmB{;@EdlG2ohsxET4hc2xL-xqLA66BT5s5iUj^pDk7o=l*mH*!Eirr)L+DlNG> zN7(ATmAs#3t)9ZHEO3zULSnec`9ZR20+EbSGm_CQ^Mh)8T6InGi^X>debmm;qty z3mS&qaKNRBnNVLJmEj4c0D{(}Zq;Ynu!+-u!OkX>Ie=_Er?>PFk<-dKl$X3;9#3|t zy{L}iDJciRANdjSOl$95YVf?bgrA6FL?O|FW_9gNCZ`e*mH`P}z=9(5aBR?Tk}X2c zV^1N;lZNP*H9(#PLE>y7Vw@%_w1#3>(kex2^@)RG1c(Drgf9@8N&!)rhAh`N{RO`% znT)k9Kf>=aTd%~$>dJ)2vC>$^V}(*op5{P5<^4F7m<%h-D4tPOJ%&OVmbm4Mhhv?} zj%1{79*f%d$$fB`?33Hzq-@g+I*CD;h~JPC%!e{h)$dGO~yZEBbeq|Bw^i4}G2lSfbj3?0WHy6XhG2}d?pn?P2N%mQi^ zQj1fq{-j#P0t7t+mE|8JvKr%DN=eyw-6fHn%QS<2x0z#WhZmeiog#XGoa&r(s9=Ys zGX3fvn9P&F6lBDSkK+-{Kx9t``yPrSmnB`*)UUa2DOlln&wxp8Is8}(JuEK@cY>yz zM9}JN92gNgnekBtqQkf^h`GE8YDO6nwh88uS{#28I0wLtA)5q*slmg?Pou%Yaz2+3 z1)FqEqjGMu>1eHOo3|5glI=iztCC2|9~ADI25QoHTUhCUbAhKst1EZHD;%f^iPl4r zA0c=-Itvzqb;ui1f(uvxfwc0SsbeLSA|i4T77khdh6026K_~XgVL<8~seAA{XMrss ziC5HWvo`<03jx~VYxBB^-+iaktxxCl#J@S%usPj*;~2xt_A&N-8)J?!w&%8O+n94~ z`@U^+&V3*IzMI(?!=^=V@{B)b3Yf)t@_#i+(;vq?)+U!Q*PumMCpP39krxP>I{y}B z0wUgd*7cdHh{CkIf>H?rrDm|Xth7ke0KBeBqpP8q&hqq1>AB%6NlRIYqgHEK85ybK zEIQ*R`1G@%;2u);E-r-DT$i7CR*{@3=p0g@mn^ez;2TX0{39)glQPDT$5nV@cN|E z0OLk6gWf}e%uZ>&s6#vZ+O48JS0d zyMIE0OA5Km`WNrA2D&K6IwrU%*T6hG$7(c5`3Ow;iCkSdY-HL*4}(W_u%9G>h|y}K zu5XJ+iB;m1R2@4G4H|^06fTtGLJlMQW3zp5H*ABu$s`A55QCV2QDy>U0{{)rL7Pi| zs$I>#Eb%NguQCr-$Oh?+O|#J<0hT^UIkmzKaja9(q;B_5rPN7Dd0f#VJ0!Fbk6$9J z^6`FHq2s!u9>$LeW5U7FW5+DLv7_wMoeJ^8eWRSJp4KboPW;OA`S;4YvdiJ|j*1QyI&+-i9SXO(`=Sn;1EN!hZxA?E@IWELYV zTr^4;yq6?SR@R!7eW3Lky!cL%Q->lF3=ROKqLI8BEV)}U$hYj?(g|mpngR!@f}9=^r#_sH zv@N;K=?c|?ltQtRv_p>|9Dp>rXC0^&R?|rFJa}b?&kf-NyYf{hjPgznw?E{tV1aJk*o0wTlBy%%vC1w2@q9zH5Li>!u#)jT6LDb*NJp_KdI`K{ z(Y5vL%bggSW2I|yT7YVPx0DP_nEZC|D+O1At430w6ermVA!sU935B477^3K|XQae= z*XGjVxdCpP4^S8aa9GxWj&WNK>B`FF76<+cwhZMy<#yCtBjqgn6a!*TzdLfQ+BeEE zaV~}Q6gEwul90tuK>Eq4iV}Tf3323=CSegVM9IZu`qlK%Z*l4jU8C$!*F$Z6^kG0s~?c^O8!5Bq6Wx_TJDGr;Fj+cX-0Jemq?_hIsgH zHIO;PmhcdE#!p#^8X(}Tw^JomU4rE2QZZtp3|4>*O9R7G$_7sKM9zyux`0Q1i3C7L znJT~Y*ha}lG{?~j3|+OW?nW?yVIDwdPW?;jI0A|Aremgf&MlT`Gtv!+?sJ<*ecW!(Q|{?a&~FU#y}7$sPL&STNhUe&y5fg7Rlb0U2+h1w^W`4Y3T zbdh5362)vCg`A?Bh?c06jQ_~3snLB_DgCrLTgPNAd#?{?VmO8JeUHQ0e1DIwJ6T?tpklQnh--K=Z&$z28qd109M<3_szi zC9On;mq;m%v!V#0N#@^yg^Je@R2jR8YB5F5*BsVS>pIbtBZ0|rt{o$4L4PR|m8SOS zk}iodD5b;|!Ih-+ovT4Tg4A+Z3^RJJh*kc6a+6M3c`oFH$U#T}@ZOvreG&j_K$X9N zSEN)(J;g7Up$wUpXC7=~K1Z@0S^p5r>#arz(cqkLDy?z2B!xRg1xK8;sY*6*$ViFc zdM-rt)QeP^&Kw}k`a-9`K1Cy3y zIH<+FC9+_gy&@)}5p;H@N;yvrQOOj2L#vUjWkOaB0tB0LYo@KFbAU)_X{l zm}gacbomc&Rv7uBBcZn-#+C%Pp9){T2~C7hLu}0o*|^7mXoTT9$&*1HPB~_Fuhb|&^jkzEi@_m;Ru~Ks z=xr&OR;4Ia-YfJ$EjPMCBeLQ7V ztkNKoGtZI)WK>Q9lGf{}(7z;}3)q?}DSGIDnd6nmyLL8_(aAD~WB~yP+*ZZwNbrl(%+>77Gh2Ai}2dhVR> zUC94TO5X1$x=wlU`ebt%vPEG8$LieFN@bM+3!3LgRR!8K*9&lmNuVL{Qe={&d_Vi1 zTkBw407{F=z9|l2f`K68J-@|f!hQzsOPS0aApRxYK~7fJd~NUI{=t-wuW z#1k?$kKjDZnCr;~`cZOFkOW%!gz@hr-s3wXQtVIXOc#yxiB-zOsHN(2gye`*l))m3 zpA|us&-g}1aH?*w7$c;eMm)EVcqgOG1n?*>>*-MrV!YR%JpYk9f*C*AH$CxMbl!+mnp=7R99`*z}Nt>4fjHW7y*K$R|A$QYVz6hgb z2~M{kiy0D?Ee`@_6~2nuA8Dy^YAx-0I+?s&uq;`P6POE7MI5n{$oUP&TM*bn`Zl;t z?#AcD(`0}X5<|$f&GdDULLF4RiZf-RK4m!hkmxGyMb@U$K+Y2wd z=kX(Q@*49VB+!wIb}ac2k*e{`HlL2xy@Nv(DfmLWQTH)|Vc2U7+@T|V#U~My0TUU@ z@l${zPh<{JsdDA8Cb3opx#Ob;w#(PUco^yY|Dl)DaK{c7Zx~#+nCp9?(L`EUT9@(} zpvK||83YuKL_n8PTIOohp@8cxmVq?!L5EQkkwsqSvKJEz!Xg=}I!RK9BGoCgloDll ztxepml|L#X(;zYP*{zEMeOBC74mvsHk0?J4iLJU}Wjm{a}tI}c}VT0h6PnMyuOYXmmaJhzw4vJb1(M^b!iYNhb zCC1R9NZolnei%sROCgF<<}B>1W{sY3y07TdH9P~#JRG09?O@Fd9LPBk4yIZBmqfG{ zikqV-#N4}6sZ~ud3&fqLvwkGlO7Xsu`pD6#GfX~{BcU(^qJi8{2tY&KDnr})H+X1M z4&$#GG6jQr7f&bfx<#sE%=EMb+1#JO!ARh_%mX11#2Orbx$k%=*??V0jh(8tv>6iS zBmK$L9JzoN{nBuVtfT-jOV}GA(k%P#N@y@3CF8Je>3tpsRpKBLXwmpw=+1mJiam%m zN$L${b@O54-tu%ZPKM(lhhk0)xFa9EOLBu;#}g6i1zku8U>@J;w^j$m58^a;8p8NO zVN+ByR3n7PUY#|yJWZ*(;-p4%y`bmZ%rKM7!0Pgm?NlfYo}J?qr`tg{aA6Roz+M_# z_aD5uT-p%_G)zEwATuYlM1V{#ELse#1!4V4{MK0nM;Wbr&U~FFXh`A?fxwifzHSA> z;JMD`M1TuS%;%L@N~CnxeA`rj*=5krb;i|&pdKU93t%I(sn?Fw($N^@({^;a$W=Xz zH7TOB1CRFSPODqJ;~IQ|nr+B8Be zdTa|B@pnX(pi3>8wlX)D0kPr)B!Pp93bB~YNOC>N62sVJLr6`CLd+mbT#<@RC}2uu z?Ns^`u~4pq<@BpVwz_{JCF+p2MT*xezE7c@BM zASMJ%qNKvv#C%UUQf~LGLW`+M1=ANeNC?2?FdGm|KmAPlgwb(6Ep0f9bjB~zy|Y}p zGNvmh!tYGOas8&Oj>h}hQWD?zxk&Je+ji?&L{VfMO2A$KbD~bzH>f1R8zUwDU>FKC z7xs!TtJXg0`+_lA8;AlA-$W8VI*7GMNl0*3crB$d@^AyPFcl4h&Q3!W%nSo9!crzu z$~=)dvK*7s4mJlUOd+}D9%@YgQ~;{h0mDo^>3isE-hzk=TMt)gD8ADz$5Kiuin`-}x++o!6q(>9Y z;gWF*wx=G%I0OH>O{L{+_$$t+n;c~;mgwud0&{Ufz>(PJ%wkN=)lF@zaI9#IQ9>N| zOlwI$<``|9X@5wyEC9XPN4o3jw-lU8dLxz#$NeA`f*&AnJmhLcBUF5{o5>-uY;M80 z|9~s`i^?wX#q@02rr*OZu2hgpI2IsE_q;gGONJmsnMDu@(EXBh-;O|zLGL+lvJ z$hB%ryH0}`cYj8xO(fw?Ibd!99I;6$gp|TmN+FWLilx_It7C_L5Wss^9iMaRH?dZV zOe)1=&Fa1rPAMT)eXWr6@NvAtdinjqmXv4A=MB*HqGd~e$s}X^k&&|w&a55c}3`0 ze`o_>S!e`jj-?0)kM|o8zLAA1&9dKi5@ICoc4#P!7K=hb|3L84@5`FsE_}LNl$sM{5xb7o#7qAJfcn_uT-+ z<8E31$NCbfeL+Ayzl|8WdLmvJcb|m~q-bMQ)*<=(aO6#RDppG-??6ETWeVbd^NG@F z5e3k!0w7?omT%E>%3*{uG6&(-=l`wkO_pR^k|Qxyj|2f!gJff%utm7&CHMUQ4?`7h zxN%{sYIY*?zN$u(0o1#hk!NtXL2Yt3k8JeCv>#XYXf5I%gP#uvw$c1{jISh#E-4BTKD!HLtLz1lfi)3A* zG~f=E^?tFCSrJ}#cykz!@jOW>!0l&*N7vZ_Cuj$OtUxw2?xO}x_DuF|{4ipl)7_AA zl&dSPV+l{oNYgy2>|OOMlEQC-cxP)r`BVk4^B<3#PC6n>iL3(7i#Z&P7}Q9V&p-+t zu1glJtmPH+;4B<(txtG;tPqYX6hb6Px$Fa=QuHE>5AoV9%!tb?@m^Ajtw%GH=14 zoL{D=yj&Cmo7cIitz;vWE21EsC*b6;;K=G-b#<^-Eh+`VTWBx3ISS2k&sw@#C|Zgn zUvDZTxTg0cV}x1I4;M8{r)>JqNqYC%lmJPWIC6ZWyz%8q?b@-i)PXsw)=x>cW0dvA z!$e%!bs)tx=Jjg+mSpc5o(BsR1~nPC5V2UU68Av~1=g`WgG~lx=%wy`Y?|C#k?5`R ze8XRFaQ6PwD@5pKZvDD*&^^i>V2c?p2(X-VX(^W~&|HbDASb{=X&rR_PkvWxhV2BR zl(rTJJ^{Dl1r}dYX1nLNtj<tlSezQHU%BP_q66~t44ZrM5 zLk_j}2wEd9Gf2BHw0Y{EYdJ~qH^L={HFhY07lHEnhtRlY-3fej)w^!OXxW;`b;*e= z`+4JSHyi{8+q1Nc3rs8zvrsdbX^vRSNx*AK%jtRy?HzO8JCV{0%!2!N*vs#iMevgL z&Vy2OJ8o^+t(~EQw~b&E_`dtp?QUbqcE4tcNiNxsgoOA? z={)?57tP|8QVQhR2dqcmD8;IkcgTIypRVdLD0lI^Ha?DU&CQlHbph$;_sRjTyomYU zU)wa+(gi3rCN;9x4JScptT4(!gtahqnz{|Eo?7D!Cb;OQ9cU{?r!d^(QD@4r7BBh* zd*#g6zyEsw`PcpPyZ-%Gy>Ut+{ ztMh|(dl}Ptzu$#AS}wH zMd-Y_16;hxft|W)%`p(qP0^<``_w-%#{gx1OEq@dw1=9l^DWQPyb( zrjB;t7=uPb{kcc!2UMqv%9T2Ry!EDmvigv=HkL+_dk8HX;c-sB$b&Q6Vcz#lM-{g$ zuVOdPYzFpum2o3_7|q2764pCjcVwf~SLDVHx57EY0a9MS6jX@|SE-a8)UMBdNbl+H zkK1-NZh@p)2zR2=UiOqLTFkUqh+yZgvaMvIAZf%oEws-2+vp|^&>+FiQ5V2e;EttS zD?ppqGNqFU;l-N*<(4tKO7W&e^VVO@U8=xJ^m#)L}}}}w6F+o7wDsJ6KM6S7uGP& z*aHPA#c9@m@69j3t4#C0Azl7E+MhNTE)1dIBQH8j_U6|P3v2iH2MIp<)^nESmc3PYdwi;rDZN@r)8*YT3js+ zmfXSVsdn7D{iC8r3Q|TtClr(FD!Qd|8`t}rM$-gsak1J_%N{cCRB&>oZB~nQJd%u{ zP}liRONpOjGy813 zwp3;?TB~7DB*p2t(8|cJA&x9(1+IO8+VS&7&;i{Tp*5nn;***-7SX`jyoBk z*`wV1OI<+Zgd&nAH?rf=?t*-bg49~n)d}pHQ1%JSu6JD1kb<777cB3?*reYyVcN$n z1zWwbMsjNnx(=FVc8sdfsM#=i`z#%w+DkhxkvzWNzXUSwcLij>+X+k4YQ(zc6MJN) z?5_#Cp;2hi$2cnrk250i&uY|8qE-x0Kfkw<9X)d(+lH~BtSm`D;j-$!y_&q_Q;NNI z&g!-nHS}I_`*vUHs<)tf*#@yjL1?!jy#S_}Fbc%A>98q^x>u~QD?`esR`X^wqWO8P z<{f5ub1EY2a8gh0A4P>0n&V$v(pWl&!2w{K^T##q!u7C=9ou)z?4j7X^SFzyqm2P? zI8Y#O%FsOR>e*Qu`cHIPk=)Q=Z z$xw)J%i{7L%vEyWzTv)0g*O!|yuC8CtXXiL6~%&1N_V9A==Vdlr;%aP5*54ngU_ef zTO)~KKpK#y6dj!S^j~Cb~J4t%4FQ7<8(Z zI<1bjDRdG?24#@iK;W%u_A(MT+bToR_8kNePM6KxJw(0Jj*>jQH;b%^;?>&y6wm0? z2slvRFw|9Awc~87O^T}&S)uCGUU<7szxIGST}JvGn$nu?4)X`Yh^%5%m6Q@D6V~FTn1wMUoffzv zPCw*Wki!px$GSHLt2t;>+$Nly#%FP%Q1E_m=0&gnqRBqTavq3p@Q#*z!3GCC$|8RBq5L9``s$ z+`*pGhkY8m8)Y=Dzy9(c@4x)Izuxyh>a;B*ZP0N?+aX7fh_ykDoDh63q*-hqkgl-DD$f8FS zy!>g_NR96^`($HfARmT_Z_3sDd+-9_9Xx)D!xn)?kJlkWp{VAU_K>_>9wh4W4JSJt zy1e_nX1V>P`!1`mW4v+-up@Z^rvs`Bhwt6rPW+ddW`_B)pI+J+<*TY9c@w^)l>`d$4WeVH-4=v41c zY;m?y{L^urQA1iS&@zCCyukpj9$ z{s=_3jRDVjCB#T!RVCfE~Iki!cmvo?tUZnKFmf?A&0?;_z{ zioM+0>YW)<8CQ8@8q(?R*-!?f@%aSeyx=dCAOFFK8^}<&ICC@y%yZbf8JODBQDA8} zTgS-fHYVBck@%UVoUyXchgu~DcD95{P`>Z-7^PwWUZS=nntiW-(K!k_r3}B+(hAhf9bOOR<2F9s;jsCMcEB}_bNH|zl~Js zLK;UXW~Q6l8O0u#NynWqUo16CptP^M$2pxQz3Q#3m7Y*rxGT@@`1Z=ecmBO~s+XWC z3bz!1)ysgAN^nW1Ei~1C$Svcy#_xO3qRoKU#H-MJdo`7tmIA$J(`zg!PpzO;&apL8 zf~!!kb@XJ?f>9~B6plm^0*B}$aM{Vwqr(?N$MFW*<|MlRb4F=52~B3P-_+C1&`g4 ziy(ms2kl&0mIc{u_YclE*8|%5LF0GuBD)@`DD!UP8(mJ23>@NdXh%IlBN<>;Rnk7L z+r;8&wWzZge*I97*dq{m1l&xT4VD2oL}DzIaMRiSy6^kz{l10w`>imsLD_P4jyKwG zJ@uISdPJMdj+Z27&e20^a(%P{>&3&af%gHqAgA$;mEP;x8Bte_GB72cAYd)dBm zy>aVQL1m5W*zZ>LI@+XT6BD-OgpKuz6r916_5~Dbr6j#_q$@L4L#+Vx? zdmZiZpB_9%oVnwq3$pHFuGGEuT1V1FQ2%wLL_*2j?u=0x=jH4QQIRZt(l3e`9HUF8 z>>t3ki9-Rl{nlRpFoN;tYu(cLePYxoc@@#U#9U7>RZ)J_V;3kMR=@Dq3FpoCZ?M zWZTb%I*+E8!tmUIfd`+#IcSV&s2Q^h1-9p|N}a+zcwK->z01VMES8kIM%o4@{F-jT zbRA$o*{ha~um5=8|9IcO-1^lEY>BR%f*@bT_rW3g<{K>Hyx;G0Ugy;NeR04#-G+gH zmNyowsnx9uJffr=CDNQh9evRF(J7Zt7>M8~)$RS_c?^D|kDKDoX9Gg<3SdO&&kPAPWppH3L) zaD{5mfT&-z4}z;yxox#wC3Gib<4(W&24wBFXdPWx(1JcjS96T#QQD{PMf6ge!mOb& zmR3x)DVMCJfO3eLuoiG5O`gMx&*NbDvO2 zwz>1wcd?j0FU2y!qg2KsDmqoEEyMiPo#OTQ$t=nK0q)+=ll8)81F{xNy{lhzbVBsS zShWMxxgLKj%S@0jk?3 zmrrdrqaAK~^@X5O>7$-v*(GB-1o_}wf+{igbm~@l>~4zfd_091gu+6}L*V8dFEBRx zw@rO3*WcevtDzi}^|Z;Q9qVCxF3qHBFE^2nonC8N_)PvH`j52{bB})@`*y z!_jB~$rfMPKP`r=y5*8^C+P&2z4@j9)YU>Mw^E8&(nZGcI__&ib~7Vl@}6Lux&{JJTH%=6jIP3nN!ZtG8`EUQ=o{KOf+Fx?jeXP zuF#A29(z--b8lU7KA}v>WFHGnT^-hP=8UQ|4sO@7UY%o5Ct9F=JZ2+>3PiI~s*!*^ zT4=41@Y)*IK%A_nl5xcG4&|5a*l7xMFdi@g+h-!j)2=fzBbWGHrsjNUbpOBq+rR$h z*ZuPwZ*=(It-Mb+2fx7UoEHyYuX7Gxhv)V6`nz+^Ij{420pvO7^*T6wy==nmr>MB+ zz`lCFLw}=&uG-Sy%>>#*Y0@|t2MHdR>(Nlh2Oqu8@(U7m%cw8+5u)8qdzyWJ#@_f`FETpa= z!Om<0VpitaJ}$>s-^ci(yj_~;J<(Dqw8e)`SQtqRk-h4YG)X^=Rvlb(C}tvDAp<@ zIUrXEcALkfyY-p|c8(m|qe2&bCWEk>XTm=8PIm2LbGx11noz{O-6o`*dn3SGxc8tt z;Q?r;O}lVj;!TN5sKPzHdQ|JB?n#2`=!av(`X(gf(ph8Lr4o{qRKS$+Elt22oo+`g zw6hXniN^Xw=ctoci0WKUZl4~A_9|fitQL#t!NA5#Qj|wE)>R6N@}e{QtUM zCzZ|Td1p+K;d-1+15x?vsz=r;!%L`E15j4fs9~sR-Cq@_mDI3Xo%d#53Gy=@3^ibs zE`e>0uc?fba7OY_rIp1#Q=olOxwbQz#pL0Oa+zFbDum>*s}cU zkTt%{#f*QY+)bJ8(tU({-%`bZf= zv>Mm;k$UIci_7~W3iWk>I`8||;4cXtp7*aej%$1!i@63Usc{P_A@{BK{Uvv#?6~W& z$}7e4<}oNNh7-6Kop0;?!=*^n**VCFpzP=^?L_hjv9^u$JEN29t}sQDCd2L6KJJQ`P5ZX2VA9w7~gh`_& zfqUXKW}}H=fTqJa%P>@-w-?%AxoTncjT1s@U2+w*OO3Y|#`R#Fu|TBAB7S10NJp`r zzk#pwb7-qG!ASP)W>jsn#sve=3u4=skRN#fbp;~gEH)2r5Idy@b@4OKwvjzfAW2q9 zcAGut9kFsUK>|R>Fjzt4MWCu)2bjge;!tU?xIEOfMQ!$z$Ho{19J%O3rBGn9Yf|zM zsIGODJv@{POWt}N=ONy>ETGfyY{BWGaD&CgO%41&JEq?a*FIyI_Tg@6H8BAeUo_a1 zOqx@)`q_a@`7D{Tv@tyNRU zAf_e8N$wk>`>pK~yxL+onk(StmVh2|2{a^E)kh$-xsdMNk#Mx)>j6^sS#L?G3>ssn zI#xH*>KQ0&t|E!pkoBmzmO}~LwR<6K(J=-dvH@JC4;f^`du`@iD{i|Z?D_oJvgBCF zfr}p})om|PZEM^Nyi!B4YZVbM$XoSS<#OI$u17nB+Ier(Y;AEtCmUa9ni^d<`GXd! zD;rq%-F{z7qdJv%Z)~~P;cg~E2CGo#2nE(Fgd^05qGX|D9fXVUO1ldB=#dwA&09sb zB3>Quo%mfy8V`;;6Vuo+i>f*>9m}uUgRI4nJ%f4JHOYpfHxZeMM5^Eez79{~j}BW6 zAF+j?!_YiEVdd%z15Ed<3ZosO8b@u;O0p~_#*JMF6Q3|%W$H)$Pb@~ot{AOZP;hE7 zX4y9IUZ}L@@@PMYM_nGgZckMVR2sE+dPb#DwQW|#$2v!#j{!MYpW`$htgcO&_xp{) zyYBVrb}}c7WWu3L=lM))l~nq~XASt|ESV`4JbgNz-2Oe!vmX{WVSmA6$uf?EE z^i?Qj*|2Sv$@cZuL_}HOd|30Qvhvu#Y8}4Gilv8^{6*uf5<)t6tKV^zJ1c9XQ!g0U zu_Z?w3_%t&p3yX`HIVBK{h5?PHW^wa=G{R4I z-fNr!9X(Q@UBjV`Oh16)|Mk!R+kgCezYA{{IjFU@ZpU?a&g;C+dA-hgwF7s~>vdja zmr(mK<^%Bb*_r*VW!k%Zd*%BD@qS;mzSrNYlXUmqJ{L&s-EcwN_xs*=6|cIA+)A(Y z+o=&a4@d1n6?vpmP)t1Q<;k^FU8cdX2oW(Li+ zV`y#~c-Bi3t)R9LlsDd&e}Mo~IO$f_ev-ig77EdV&i?eSSUUDI8jz1ik@gL^$WTg7 zvk%xnU^js9IKxLDH-$Ux2%Zg0O$igONwE}JW|1uUdAXAn)}FI+?8k2xtRoJbx&NGx?0pk;3}1n-4Ki|B+g*W~WqTtNrDLhW z=4N_1Db?e=?weH=DU!?p)u-g$UO{vQkzJ_84V<`aKRT>CWx4d(Htg zGR|tNP6x}`hBp8KK1aA$|J)8ft32a)&e~E*xfEkNTUttI+gGV)LraHa1v)k^jbur! zhyV#)PsJUUL-yF1{wn2E@)YZpW|b{~YE!({r&0yfgq2i>TU6=%F{Oj*ifX+wMYf?> zbv#B!k-Z5W3XIab?&<=pDU6Dw6Mot<|ctcJs70Ew{TxulmXEImT;%2Obb9|lS=|E>XgJSNILZISRBtw;G zRU;o~VQB%x@HI1Hi5=IUAp|gWOcf(m9_6<+4>>wUWwmzf2gSO2%6 zimw;R*Xwm&U+27f(cA0wMev+gUyMWY^tv2nUnZB+<6_}0H>ZJPJX8 zfO_Z;Ki-fdeM2{JM4aZ`m|X{t_IL{;GyVp7V%GCi8hp@G+kxzW zz_e4&rX7*iQY9**Gn@?I7}@_R;eeQkIgEH>aicri#v(_L{Re))r&QFyR1=1>kg9 zwJy=?-Z~iS0&G=Xms{jk_CRSoq#>;(Kz@dNp=%z2VZ;%0%_KW}nbMen zWVxFoSR*UnKl(lrW4`QX6e3#-fp&^zqy-O^SScLel2B-m=P~N#WZE4x$YwCY-{<3( z=lA@6bVQGqLIxMF|IS2-nB#;Bw zR#4z7)q;#5j&8=$MSFl@wy6iUI#ez z8A?=V%4wKOxk|FIqK~DZ>iyzEeAOG$EEALgYOrh%lT@SEX2GRRJ|DKdPQo}F@+Q68 z9S+tlv$RmcqNsSlY>yXW%}L)l2P(buS_-NdazJl+fDf>3)xQyry^JJxMw;x#1$%Wt z<6Z78JjAd$S$PUdZX7+@iYv$nsl497Bu`x9?tdy$oWQLgpC!(|Ybl!@wLCx5ST@E^ zns`@^v=(K?ILTO*eSsl&l4KZsHed34BKGoUC^LSv(BL5eBHx3K zDSWLOrc&_s8~%^~^}k$UPav~}y0_PMCo`Pa7tcYjdpob!>zvou!66S{=iq>Ry_71a zyWsc6CC4j~cxBYMJfPlc+H2tsU%Ci&?_2A!uWr%VS)G%U7YKU)DqWftgjL<~#3R!& z$daQSnML468%DN2e_+bgP{=l6pU9j<$XW&Pk?tQ&%FFZuw{}tEf~&T`0k=f>dr~lh zMPWul7Kn|QAt*W0BNTRUWADUiS`n?ZXb++%Q>92OX7r&DZ@VU{TbKQOW?S+QQ zO(KxeF4S410KNWCKE%jEmh#9GX|eW{(;%e2v6gyfH(5MqlD#%BiNe)fmPG0}i_?+t z6*sd=In_);G!NL)=&{AhY>N#VFBfp1m16c59uP!lwvq;OX+b*g|pibKkq003tPu_$cs4-GP-@DP1`Ei4yHOa#bdrZd=Oeca<SD`ubAy zg2Tg?1Za}R%{~SCg&f*ZN`%;}cSrjUf+ID3#s^+riW!>3nzHOVG=xuWGn6Gd}vFV z7fJ%%VCdDI*{{u~+ky!GyGM^wa+iwM%R95}w4toRBKR3$RP${28SDiogJn@6<7dG& zZ%BV_>L0GasCJvl>~&b?@h-=ri^c$O;jSD6)!e58+k}E~UtpCDw3NlJFBP;D>NwI` zkiOl*{H-5ZR{uCf9nh*w*-}YY$68&3ZhAVilg3;ryhE}s;R4}0Qq_yzPcqGfIg_a)p%!} zO_$`TaIXzjT^y~8YKW;m4>X5;l|-fVdD_Z)j1 z$W$%VP5R|bZHd52rL1W^bm{pwz3nJk7-3vDu|e05n)f9#2+oTlU<4h&x_8F8aL7L_ z7L4NiU$Lv50nk=@fsTJq=TdNj0dGR|zVV08hr&HL)WI@ZQ;@PzX zJ05Tp9$=lIc}M37>@3N4Hi9bNQR}BWsDg_(9Bk6!4yhEuxPGNTO9|$NXrhRA=OR3$`$yC~XY92CU0G8m#|j^ttNK8L zU;B0dI+I#V@0H$@0Cf{>`bJr0VUOL@r4m|FRz8pU*2uUi|E{RqtOZYoS$%}4HlR{7 zhP;t`BNqvsyj*IZ)w0__TG?$X?eK;nN44JX#je`gG+T-4nns-kY)EKX2&~xW98GdI zZgNh}5L<$X+z^eRyNa)9K!UQe9-e(^hfQ*O?aU=+MGiY!hIXVTfeQHxnlWMphQ)K~QUL_rJIG^$rV>uV z1^m;5xoY=G%J)u|KdU-nu!+XS3(ULpQ)~&GRhe9>)f|TLc4)kJ)4D1daAY1W+HAFl z8kXHz2A%PW82qx3Km7(ymFO zJe)_pg=@;S8eMjtlZHZLpadjdm+V9m$pOU(r~%XvdaQOs)JMe$P+F;M7kC@y)fKEv zQCoS7D&?%m{@TJs`$dt2btn~im!LinXJdz+>P(?PxE}~SLv4#*D z$kCuD1~GxLYFvmQ$m$_8);!#xUt~!gFUR>ipc6m>L9lpUkt@T~rl|ZsWXffH^gT5Akijh9ZTO{IO#(b zK~|Bcllx3Nl^R8g)K{`@)ib(I5Ci3|feUY)o{1dR zMt&yWMnvs$&!DcARHCY>4Xm#GmKAkl8Y(NlkU`2v!Qke{8)3)3r#jUeSuaPwG)a{% zU5Q~DjDQtqBi>n3;^UV1SJcbu2To3p;~Hgx*7h-Xl~)3bQY%Z@uU zz!`@tOwNau?V+W`M6)(%INIPJX)GUofiZd`WeiL)`uT7TV-c)DL1t0Hhu#*w+q?Ru zG*1jD?ZCyX>OV=zmD9Wy#B;ik^*TrUQ~wS}edDxR?wlAGzo`hk-FLTedx^%oZm<6+ z;+$8v4nfxdv{vA{sbj~EY8b1v#$8RNP%4bpGvMv?5wH@x#RxnEe|;0rM)ZhQ*&$hA z^6`lVG(ueR#pZ~YLU)`RPZETx#uHH3P)6!dS$r)wuqP^!ov?NzbvQsyFg1(7dF=N8&t`E!r9qzJPT2#6XT)txQYIo-WLp3o4e zN=MW3+nj}duHxI?V+?HO;P)F$o+}h%YOG-G)Cn52E>WLr8PS$+lVaXAVq`v8va*}5 z)=>@f2=f}5CLL2%@9k9%PzF+7OFt)0uaK_Eb#C20VAptk3O?{)_SZdP-j-e+-`>E;ZuV=-(tVQqC#Uz|v}Wb-gWy)1-#0*Ny<_KH zJJW8_luzWF+Sy4rzmk=){yXw-7JCd{LDfooe5eKJ5SYle;=ZoKyKP7Dj65Z0h@cD> zH2X`&9!iBPqY^l@XHw&lz<7C|n+cF1VSUCKOEDw1&`A_&(;!jV>W|5h{QGg`pfa%h z?Ufl>jUQ%@rm)7%b&r#4<9}Ef8r$hZw{tI-0-ISh!9#eu7!WE&FipgyE@`zjD_e|)s9XqTxao+%HzbD0tWHWItoMd&W+zE7hgc#vm4E9BWvk(DrTot$Z^M zK`JzBDblF!_MbZS>_>}FZUh*DIO^M&wWq&A_71Xvv&>&ZQu+$h1C@`l7&$R1ObE|! zO$B8!9l9{PCb5zg?)g%kV^ICj@OL^FHnQJMmU=0hCK%?YvrOMX_oDXFx9%p`5O%c0 zt9w+vo-8==?H3P<8*`+BFlKSV52FnU0KcZjzX_9YwOr-+6!;N%9#@-&eb&V zCQ$d_RV2D|Y1~Z0>)zz<-ae(N7n4MNAm~KzuqnSJhSm~>wKBo5b$VTL*ju;MThVt};G2}F!9W8PYYu0SZ=kqFBi>MO!fHW~vUVpCToDop61D>x4F*RSoQQY|qlA+ADr{a=!NoaZGPxv0|%xz3ck+k4yV_7;K0FB(ZU zkIumR(C;CLlNyTy7tr?E$hkP2TnOG?ToHXhYVQX;$9bG1F%`f%2E~2f%j%8d=;KY# z;lYcqF97%59lxj`xJCC2KPYmWk!_#bsH^9)(scyuzC{onXZZ--p7JGLMyiHVrt;WN zleQL}i~{3GB&YIt*O&$z^4)ZpL`jS*p#6qj>>K1#qvKLfg{qtNf_JhY^p35S?}ptG z`FP07o*r5&jK+JtTP^w-;fwajv@jhGVDXlaEmuP?ylMa83$2j4)XusZj47YNu~>;o z7r#~#v%5Zer)Wk{BBiFZz30XBIKd4W=C=1Cn(rZqpDxUr?E<=>?sj~nVjuXWiwU)6 zb*2qoBdBMtyRwQ2Ey8n9l^B!;k_P`OWU>)R;_wsTp&HRdw5#5 zk~^IhY^giiTN=q6r~n<9jdB6YN3?#$tcp~IaoJJR8T<3?tI9iS4bOJF@|&2qQeq*_ zC`gktj(^@@%ZYEM$1@pMNr~wKWFU6_KKX*rCj>(kV?V&!+l%1+Y)_K+D z%{CK0Xrwp3NYVf*r~shzbu#<%Xfu~_(>F`PUDIdlqOSU$0pYs7R_H~6v$%tmYjE?K z#`cF5PRQUCFSbS~!I3F>W*+%9l6y)Pch0mj?DCl7cMm4}U0mz}0RHPg{Ugfm8xS0? z&?_FB8^9zf+L5o0*=z^S{Vw5Eo}Gk^k`kOn=l>Ee{` z^}@bA8oAgiF3clrE+fn^9oV(KJKGe&^*FJ$s;aaCjDVK0^YOS=DHtr4#03Av{$e*@ zMLTd#1~LQ7eEa2kJiWe#C)T8a0%{oI2g!?KDVKIYyv&cj;`LC@ z!z!H{(~H}Vs-oya|Aa`g@8{4fJ^Y&pA;d}roM4@Eg0<={j_6urSoMBSW#iQMuW5E` zq-Kmw2$vKrb*=_*FSWk}2fXzrF8Q+fHTf%NqIR1>(==XUOk1i&mvt`6TZNntRwB-$ zfnFaZ^vb=V9wwohwxmHuS_+;TCd^r0zZqrC`CRAb~QGFOxRD|nAfvIE3E zYAjW}*{Ldq4-75&wez=Zv;UXmh3z$Vat*cpHc6}Dr&BM=zh+?{9U4R|VxS&o;DJyT zN{mOb^w35*z7afME&`6DJ!D;KU*@BGjb+6!gD@h|eA5~3I3GhXpI3#_re>`9#Mm-f zBMAE3yajfX4J|uQrMJhfkBs$&T3mJPg4o0e3w&RpYe#WX^d1X>H;Mgl$@cQX(`GH^HhwJW)j38}qmR$OSqSl+UrXhE!$9JTg$ z5uL(2vU#ZjqFcMk>No-deMIbtNE6XH?ta59b?L3F`SajAZC16*-X_n7>jgK~g7M{% z8Y$MnYxrcD9BElKevORoO*S8T0R0D-v1J>ymM<3Ewp*tP#0G513n8HP2d`f6(wC>> z)#Tx#K#2&}mv^YWhurhf*^uq$IWtswe%rqD*p&W!$#SIVeF>snb7kiER8oD@m2`6# zPv|n6mQrfyY5$Y<7a02cU~J3S_FoQjwzOY^yw6k`K!8y4;bGa%SfIG5HChiiw%dwp zGpM2Kbxy#bru*HHtgo33DpP%~?17c-PdNA9ihN++yr4xW)rKU#pppk`kk zYZp_jZA`#4AR`?DH4W}LFL$QY$}o3}V|Tm2c?p2d(NTcsrGI+|Z=Vx`;yJyo{+t*3 zfmCnarsB~qRjoENuYBfdBiMa6$aq7&yIX>F`$QSKmt`mOvinNXfUI8aDRerGEr;#* zB>7+Wrm2Cdjd_$k7zMOPtv#;y-^mVE5T<%*PdUYs9JZDue{Te1?p}gw6XxVG;zgC+p^YmN zT`OcVMt(J0{V9{l**6V)^B0f)-qmr~k)@d(Rgw~jmNi8&b>L@f=K zut!p+=SE;VwpwLdVkLbSGrJz17@2F;pG%7z7dS`tImi(Nc#!dAs9yQ72s2JcZW`q&Rf%$s#KOV?vla17!U`X2f z=~q{q9{sBZ1f1tN&3Sx&PhkoD1fiDWwiDOK2(Sx7Wkp>+N2d=Wuu*AS^7VGl%Zabc z)sF1SQxpJo`eKmSz!XpsH7XHvkGvcWZ(mi&6pausS$3h{91 zLZN72DC6)rwW4TjM(cbMuw11zs!iYI2%S85MDSyc^xLPbN{(CSqOKFuCp=KjE(V~H z$YK{t4N@C=LrDw>GGlQx%fu;*H4V0F5o_~WcKxoG>Y58^z1q%WDzNcZO&0lTwz!>y z$fzTj?f10C9G*U%xl?o0RtBC<6`$9Ogwo`N-gQg0c`JD>q9yrizXUmQqpGUAOLnTQ z*J-p%cMG`p-EGUd+E75wYN}b1TfHrbBMfFBRRwfqimRDxGQqcMmfs=00B^O8rFWS0 zj5wyqYG{7NbEQ)e(R|6U7b1^331sLo-N3%3#cMi8fF-_;EV5Vv+w{_cH6l5Y?W|@& zY)qcvn7s!T?&Um-U3sFsI1exyQ1)oIQdkj1l^xPaw7vaH#Klphkr_n$DvxKps^xu- zXCVnWW9}7LVv28wjkO;=HhDBVX&XDz{bYT!{0n>GeqZ9bW?pu&%r#D!4Y`x89LOJw zHiTQLzvQ$1AVY94ch+r>Oin<8hb88oswaye)?9ANWWR^Z`f#LwD35U(_F?8++ZlY? zo%~iAgry`Xx4e7R6*)e}4G(Gk2vz#AQmB9VFaID}$Tk@lv+vR+HR{wLqKc1Hu=}LL z`XW3M0-{$(P|YKplxaim)23~!-JH}YhEH)*7|O?)cS4hXsyaJW;A3oz#HkTY7;E{C zZ79?Wy}a+%RPl{!DAOG~+u;es3WgRRfU&M|y3(ewibjDEi-al6_XTHM+7qfO+{4#I z!=t6+Ia$GrNw<#9XKF)g*xYK|N<~_ilX<_3h8i}U32ys+lTmPEUyade4(-I0#5A;B6jx@`_^U}Z3xm*0U>?NiH#I}I zL1D7x^WAe@T>*1Jc)EWm+p=7fqL;yM{CLQyZ__XWoNHYs0eo>w(FqV%J>;{At_jLe zWsrhnhePrI(Wkm*^aXs}pr14|xO^l#f->n93&bY6&asHj`(IHOqQ-3F?3;DOoL+;1=N zjOL)61mY5ePUFORsi$-jnyUJ zL$W?@f^C(auVcw+K6L~lAgb$ViWA<0g%g+BBpCHn@=T;$R;N4@XC*DD%Y54T8hDtg z(NYzlmN_;G*e$FWCn&z)vXzb@B|Q2`FKxus5rXE|7{G@qZJ(xzI#W(KAvq}&aWh20v(HpPyOlS$kSa$oRSBq!%mK}8ZO;PD222$oBURh$=Wmb;I54G3^hU~ z1XV_pUKiD~NTkfNr0RoD$wpF=u6@8Ed5*V%GiuXw>&}8Axhfm*mb~xwqpwbyjhCM2Y;gH zQUQ#M5V?eT_0a)@+-0}R}6cIlR@(9#*ekBw9RU}kv#1FY#@tX zWT{)OXt|3ZNaPl>M2C7kVOtHZcPy&d(0t5yT@kuQ9~Mk|xMe0j*UfynW5PmiRbZG6_G= zFJiJ&8XkPnwG@lGSNM;b4+sn*ek1E!8&_!Sv}V@t!_qhb*i3wi1f>P z2||;e_hiqP581QIS0c=QSMN#%W5Ua4QE!=13N0hArsKU@{!tfNfLm3)i{X}<`i2lC z<_Mpg6TdKs+v)RO59;j;_o@p>d?FHR8;rMkeNcF-feTWejd)aKR^VUGiP5Fl7GX6V zekaIN<)}SO0&3i^wSdXnwGY~>Z_x{W5+4JFdi(Uuj7o$|0aLbfwdcul)>7v=ZLnMe z)&)?XQDqa~x11|E206e5&;Ul81MBFcxxoX90d$vxhDv$D1s??=_$A->+UG=}U4^&v z%k~Tu;2`@RsqtIwZrElWgH@|W{*uX9<%`Wa319i_TPnc^T!&(6^B@h?RvZMkVQ(Ni!HT<(2ZW z1Z_;88G}M)UsbDiFAaPr*;#N%Tb9{4e-gbbrhL3*On`|_!-8$6QbR_v?!_os=yyM?25TIadUig%gG4zi8r&4)v>I(H=kBx6dl>YYtYsc1TCXu0XL z@O;1_smR3z^@bt__$`rD4H;&J^(BY`nUWfNVhzCTwmYTgf26hdXWeX>QNS_TxR93f zJ8E;8=B>lgQIK=CCdYh`3gaZj1#e0~^&UO=1c(g+sm1vettd`-$Q3Yv30xusL<_93<#7Tn;Y3OO7SqGJ@ulP+BWZ%TaCZZ#3j8 zHDkRjJLupS#hhW|iQVBHRc(!2q5cdYkqQQ97ZGgaCkIPvZ9_&yiqADsRqRyjC47cy z1yQ%tBMk3wNV(nRn?l(*bJHjY%j5R8CnLLOx{Ul?yw$_~IOAW3jPiTLA531Z&I9bt zvn3}=3e+r(A}pVXD3y3s+choX`0#56JBMKozj}BiCj(a@S#gKA;bh;GST8O(lt$Am zB}ezh`gC+V2BFHHt5U4B2#~^|Wuh%5sQp)UFwNcCU!E0+$}uR)(YnPIuE&o+)#sD;V3rMY<|gSEy=<$aAJ~$g|BDwWL;G+g`0>K} z{N=BF60bST_e=nI%iBED2WIezKP{2-KmaKYK8FYXW>gJT%k0VRTCLlw@mMWwEgBBs zM~4AqHw7?6EO|vL_-v;xX)`-onAiiA_S}L!GwPY%hqD))0`knRS(Z|X5W@JvWIhsX z3YYBzO4^hw>}nUw#;;fLql_3HnT1Z=$%<|)&`c?1codLW9}C*Z-to|lgi4mkofr~q z1!W;-FxcmwRT|N8`z0;;C_oD|svr#QN@FF+K<#sYWL_H23A>~y^g$`grS*OGr0-QV zwOjZI7^($e0+KN#HK^9P#6A&0?RV@%E1Fe)uG!Q%J@V8o;-Ze@z&BV*yOHZw0>lBp6(%VXX4IwNJW_0n;f^f6y&Q|I1H6kxdnSW_H3jj) zFDyE1_g(MTJZ^wnv8D-vY`II?wvs_3SCX{$oxp$j(?3MZN2Y2)@R*V$49=dTmBd9= zFxv&KI&@zyZeqm=kV$R;kr_Nd74bU9scx4!I4O;B18gIDL%^~!;VZg%l1M?>(W-}x z08n;F8aT=yL%!_{Jo)`1=cR0W9v41Bua0o3tCkNON95bd5_d%h_ zikZU8Xk&B6u$_Quhgd7i@~KP@{$}oeU>>`<8Y3g{NiwFdIDVI%ek&2#In)Nq_s&_5ix;jUz%Wv||%5B-z9jZgw655s-=yjVmLa5TBx89^w^}g>I>ewvf zdy)@{>Lh1&mSdntgo^epe08i*T1!fTj~Wlzh@N11Q}K}>D$7-}rzXY`JlV_;=rd>i zEW(eqn`vR1o?>=NKEe1zjRGSQo5uZ~J{8ohe9jX_wCpL`vW&zyC>hVydBIZByK0^5 zpB$WHdT2psl6cEE{Xj?SXWw|hj5IpV@pD7^2F$7OS|tZ>3KQLiM?(kEs}>a`Le|_w zd{!F(Ri`%|2^>-f*)SB^_PcDcHZxbaoI1ub@hO2UyCBu*KaTyqUYxMT7*rB^B|>j! z#Lm6Oy$w?$+dZA7X0)(m=L**7QbH)pB;oj=1Erw9m3e{aN*jIEf?UsFnFRas11ZbE zS-dLKhw8o+XQLmI6wMTS!yzqEF;W@rPOJQF?wS^WCsR*)iVcK<61Z83f=?qHh%L$- z+nR5c(%Pz8>xT$IJdEc!`VFyGA?-6T-?ns30H$lXn0jUa-loDrY5RgXa{%v^Z7@-e z$GLt9$w?<`58#1EqdAXHCe4JoPpeMHnYDC6HqlH#BivCNlCEqzY&QhC5Lc=larDTE=cMLdk4YOgC zHBT_mCjY5iPP-Y#H={J~Tth$+E??`PknluCySW4BgJncfUUdY5HT9!2* zmqj(`0dKwg!CC-^?iOCRx{vBPvPHH6803Dn@XfU7-cmGB`yKkL}%*?5i7a_IOs_UC`LEtPk$aOwJrAlMU zCRM|JZU+}6>*B2}3kFEaDPJSo+wq{z_^#^i0jn@qft09M+3gHKy$j4P+cESuUpB!L zn%bnbY&hR$Xug$BH^MHU@U}=QGZSCkB)cO9Vt83i>s^u@vzo~d&)M1S{oog z%v9*k)lHToo=nbmc@EgQTN#N~xjezXS2z-9~Z8F5;qF5GMmLGp}l;wGrBsiUF$6+As>eMXPI%Mz}$(+ZCs23ez$U! zjU^egOia5BmQi99*a9;g-?9Wdgdq)_@ky}C#~31l&A{xQ@pxq4Kj3#^S7(L&vUt)O zOFwMP60qXoS_BaQ%>thK;Joi^mMcxcaff$oze5{D40|e2tN_d^U;i zq`M@OH}padC)m@DR1IY^H`a z@7ePsx1w9isfK~x=T_99495^n`89FP_)*;y$*8{=Q9~CY@h9(7>@c?%g=_ue>67rH z{@PkrYx=d=Hq+W?A)Eoqo+C)4WrKlSl5IF*5|Iv)%?*|>iw7%ljlx28C4d$@4S@wRpU!9vJ@aKy>4S&0{yOGjT^9FrRM;$y z0HHGy=xS6zc!Qls9vEgH!H3D%^wOHDu9}w?K*Nc%Q>@;FTt777cB?LLU@T=&vBa-16Eb#1c;Hm2KHDa>=hsr zlAC3+_q&dow7sYtFbACPr%!wPv>n0=j77vPb3eezzb6aW;aPM1*fa@$v zq;BI&z#)?)m=1yg+XAn_eyMBGn|p+30C=ba$x0A6-ax-E11XfcB{*~jZs!7z#3C9Y z3#0ygrr}$B8ux3~?X~z>;vEpHT=s}0+bMUZ;4UW#!&y@(kwj2eycG`EF>JUNi%7a-41Xz!|tNQGEyd@<{TK5A=OWGFugdRLL`>mGlS8O zsrKs$q^a4$`=TG-VMdTlDOi9@HGt|3+8HG6u)utk0|i?zrRD^%T*^Mi)0BtVAt+{* z3;QVWpa1mt840m6H5imHYszv1@$o}cOO|^QPsU)wPdKwo!G|RSyexg}wX@ld0L(D? zGA&@sG%o-$oCGKX{xgh41T*DXEc=-CgXvCkI5)ZSt|K!w4~N7$$i$e>oh-w+64$F= zP1Rb;rSK~+U!%56V|15EZt0C-TUj0EnoYhg4lZ{9&!IUaMKDNM@QgbB zaNC}4wVF!|LC?$cdp(}9Mk;t$AGl=(+eFQ#f9|^T4{!M0XQFQHhozPmq!2PM?T6Z1 zX&^|>U`5WJs$y#d81_MRv=FJd*Y%;sk%r=fUJni{D+Q0goFeQRDv0 z(isu$$@)Zp6*iYLD+RTm^qq&V*9$!7^*ZOPyZLqk?(jwMyuNnItq)!3&6Fh6g5;sX zy!V2=(X^=DpSMLq2LlpLy)z56qyXji+A!T{0dqu6$;;{Mk2(rm^a&6c ztc#W|ObbRFYZ9`epC}dIM;5wqCTHi0^FGs%e5^=sYgkK=^{>-=4&dE~6meFZ1efW} zc*xho1vLF*SFxiAgQK$!m4;rC2a_Tw=YQm%N4$G`h5k2KZ}mCn^bX4C6kV&SYp$TC zRVvnLFrQXsc+^PV#-fvBF2Q0vX832DBE$XCEaQlY#&$85jdhxUKn|4-zL~c+t&SjE zE!LA^mt*vCblc`?F6>K^=EP1Y40dhAq4_)U-co<6x5~E}ZHqK7@F0?Pl>6p*C>){% zfJruZQVoo9!jPIHYF5mb&6;YdNqyywnTHX%a&csJwCWj18Jsgh&mJfam1Hyi5pmi2T^;_!Sx7hc))(9@mGgHdnnraP1PDG-G_Yap{Z;vv*XUz~U*GHh6XOHSCQmKGoar{P)8(T``CcH5^P!8lDghI42&6m@ygj=eYsTJ|P5s?~ z-g8$~Rrl7t_x56mTh~XU7PB@7N(SC~3#!V2dD0kgZ*z2#X5~kHa`dcOxj7y?o5n_3 zhW}y6c<48OTn>6=f|#g&64119iY{1Y^xK>)T=o$axc1FAu{3NES57yY8}*YbGAtfc zDkn{5ZW>YSDmL&;2EL{v!AM__w#$SwW2U^{3K-hbD3{b`Aw5K$_o}w;kEL~*+@!0t zkGGC0EQ$VSdS+{4t+>JPY|GKz6cC*;TI4~Z(0)`f%AU2vqP?y@&PW<*hnGV~5qob! zvV74-Ff$)Nt2XiNgC77naR7j~%Z!I%Q506k$F)~uR#_S+cD1;k-YnxtcvHsK(s6d) z8pb8}(YvH&xyCNj2(|@Etxm2DL!4w z07MIRz%rz$Y!~1H%#1N9#~_<1hLviNnETC4S41hqdiS!@e)|e&qDYGs9}`pzwHdMA zKIU9KHJ@M= zkYBGakmr2y=>JG|61NjFPED{(3gqld6Dl6qRn82ze%Y^4ehfg>+uIaeSh-5e-R_1Z zXRW&lv`^m%rjkk?rq|ue*#st7ySVQ7sO_Q(CY31|pWc?U9yl}T+(=R79iTfkT26Ty zl|>nH%E0Dn4VMcJw!7WkQrpM6(p94%-MzpFDPc~B%LqW~@9_)sXGXX@{#BrU{09#M zu*^;(y*c+;iMC3&$b=LLl;z@DH%}x=9_Pmn)25F+uS2?Pqpq~tu*T<;hI1vftZv$B z1!oNkzAQe0Rc0bgyK|J27852V3lkgh{dH{;O$CD5 zmn^DhGi(Ksg%*D9?CMylbT}Z7y}NMxAWi&XQpVc4>?i-re{9g2diT*8sXu|FiSE3& znpW!Nqo4ighZg|@$&+;p9>gdN-891$+_ak1 z!LhkC9ICvvz;LNTLjNstL9URsf#5TrwouYeDYGJAUebT}a!k`Ag^zF4`hg6K%s+e| zQCE>6z$oJThe;3|-e ze2CqBO;bFN;fxXrIFhres%~>(qDxSTwLBo{UfIhn+z;VPO!@eme4VjJ&MVR8hNy)z z3n0o^*LQ`ZPOuDoa3R8&`2tj3(ydN-Y(K+#ViG16uwT4ExI3k{&QjVWR}KhnC;To` zgHZLn6QwDC6+x0$3>y=jg;85;=HF#)>}SZZ55?rW`$!DhY8yb*UdJQ6o~~CVWF1Qc zmw@(8?wHeEE4@<4HrqghtO9Jx^wPJkQQL#OK;&U`+pt(>cl-tYPHKop*0YgE zg5^4IrDQ5fgLKnfQ$TCrCamSE1uhZV6)1zL&qJWUZ8I$#hHN8IYmXV z4tdfB@X-b6!t{(R@>kS=r5OliWc+9*Hef(yl#uTFakd0#X&h2ZV=XrJ(D*uzzO(lR4mMX z{ewWirN{#Qe75LS#=ozA+&gJE&IBHHg`iN7wdM(B(ICNB^YQgMJbaxmvJbC6)VHpb zf~^Ec1$5O}k}a`tEm+U*DBRm7PH0&&Ki)CiQgl1usC%z=LC6ER$S%n4?id21n;cc)Kvmez1qyS7 z@`!=4i4?67uk1?m)9g^27{Vz_*T_Ur^?~75Bl$Y|a)5jpl;3^^y&rX80_zd`IL*$8xf(x<{D zYr6kNgI`G?9K&W3kDp*mcnG4?p{&GR+#e3v2vb{-6~+Mo3d9 zk9%qGLsNHTHi-ra$KvCUuNHMTr#VSR(UY3K#5$jbwl7hpKrx~WJ!Hz%Ig?iW$^`w)>$EtmkG}ZU(H>FvU z$i~uWzX+M;)bL?mCT!Ty><6s)ZH?U&TX3~TRP(JtpoMGpOXl?(s#j)Yf0J3YQJ*n9 zFQ)MUSo%BZvdsO7C?R}2Ni$!b1fRCmN-k|_m=3tE=>r93KaLVf}y)I%)cZKsL z=`}1{Rl3u1e{;wzB_K)l2(pa};-Fa68aQ*xj(}bmrGS*{k?_a_n9b1l!KlCaLmahT z*xo>rLo8f+JMXKHzFv^Nmu@_sN_%559`eEaBncG+eU7;Iwn)ADAf~q$oGKsIXBfZ8 z>DKd~`FwZ?^J_&gm#t0aEHHo-*5nb(GAA5|2{@K#a3NM|MSq;POAX8-KG3`)r~qFy ze7gjr<{o1V!P3kR%2WhUPw4{?S>FgQD2ywI2Te9X%1+QsK={C?&Bz6h@qJKlza2mP zvc?t~cckc0N5Sq5Jw9J+b!-+s>9!}Q42D5LjZN()uq{b>(d+R!;tVrkow40Uc zFjQ48wvIS*sigdztPTV2 z#Nb7h(XxqQr$5#`d3*;U%MIEWY>$-PXDNR4kB~p3HYJWWE5UW6Nf89{sSIr&w&z%6 z&N}f`c}%42xe+ox3c<$9=8-o{7zh0sk6R|=gg)EK{SSV8t64H`guB+k;YFKOrl)_Y zphlrS_Gmu{{Qaf&{BDTv=$(q#T*W@Z7j_hd_hQWV82IN6e2P}!7-M(?? z`@Vey4Ri76(xX~Ya!Ne9&X*@<6C5D+EDpwxL(Y7*Yq6Dx4;n3d*rKD1OU-;S&k#Y^ zzgyiXN>4fdIoz3bInK$h1;23ua(aDdJ1=3@8k7TZJ$pjSC;n{1FLx@KzqL=-k&l3I zaJe*FPAnf*Rt#+@KN*ys*sI!FoIcgQhjDQDI=#?XsW=mXDMCR~@A{Az@#K{5uDk+8 zzC6wHNL6orq{MOQ<#J=KyUm-NHnH~Bt$O>=g@R5F=yO-?skYW)@a;^@va&`&!->Dg zK3I$00p+OiRtd&tig!S8d*+y|)pq>lYi?kmCB;tinY zU{82$Tp!8f+NRaK*-UF~ZL&HhL~2j;SrH zuLHm{0qP1VDqyJ39A>{v#)D@)?BptzXBAG)+=%q?n7qL6K1^a{)Q6Fb56P?Bjd?(y zx@+qY_+I0BxOH|&tIix)A`{qNz!8o^u*Sq5jb zfmIiHPYemYPqp+9#YtZso~S}v%!*pJPj@; z@oXY<&yP6OX@vMfANa{7MRTwH<7Ii)+9%o(?QikToIIf@lX8V>t|})g4W5X*;v>{k z@fm}mTemrulTiXBsY?0`k5;gU!@8b0@8tW@bG8&mw=+IH!~Ky98bXjjlr zXwT*dVAEiw3gRvX$Maq|ln4qWdf9TeOZ9c@jcGQ4DpcL`(rH`;l5di@Z=P4L;FxA&pE7KR9ClG6I=p3uE`dvF8Y)&l<*{ZCrzci*{*KglJj~!6AQ);q z$uJ1ud9Wvt(BBCji<+D$A;vNYGvNFv-aJqX8Je2sD%B}>n{e=E{{l&_fgsMD6Iyrq zF5`A=y1-MDg(s5xaxlS7YZGnTX_ycT|3p}gF^8wqooH-hY89o~=h&Iwr6Q(f&QbYT zE{Jtg)S7>qc{n^4g$-OyiK{)=lEj|($Q1ln z=gK85u5u~+SR4oL)BZ2QN1tubp8}nfd9yWKa;bYIc(B-5|YFuj>f@gmuH>1a<2Dn5A(WA!IMp&bb!tL$`09hKOcpaactgw z&tv&-!^cSAh~hka7vvu_d6aqz-(T`Ml}EB%>)e_pTr*8z*->+#$cUu4D^rU{Jt#=T z^(z^K#jw0_6t=2*pOoN8F4WqYE2}W6iid86{qcC0qhA?zU*U_Iju3I)~tG$f>20GLN4% zWll;pj-ymug1;ixjw)w02Z%Y7Cbl--dPlnrE@KBY9vNzVRepdoCRk~!%4H8S5BA!* zgurBOEJwn=&$Cmx(oe`Ki4vulM17Y?XT;<(0|z@hE;io21qAA{4RQ6hd9@`8EnKgGi}c&Y!Sl0Cd4k?rIXSN# zZ^oE$JrL*p%E&`dF^?33}lm3s`^r)W^RFCCVg3rQ|wE zf%)lUP9L*-2uc(K)~H47S^(qJcpqu(vw*brRu*t7V~r^=SZZ}s~NzNBMZR3W}l7S*;g+Z_A=)f=sA&=_&Q$)r(=EKsK&0kbQT^O{UimM;^9fu{vi2SpJPA0cSk|6jx21KXLD{6r z03I&ubHX0mln^)x{DUqvD}%>S*+a1ylUM>9v5{(feI>GzbFT;!f5Um7|J))2gHRx8y5S5 zH2TuRfWd4VN%aes0{6Huy>RGi}(S%D1+ zKXBRXc>J|rdmgAb_k~zbv@x;6=X#LNX6X6I2Yx-st?!bfA0a`@dE9zF>26mpRX?m7 z_fVU9c|`=SbHD4bELk>Z25RRTduc&OPxP#_T3RM^y?&J7!IH)Qn9J(`2OtH25e5`g zfUvQdioGGa2!3v2@WRac7!B4FR82>p3oS00;+s8*u0+%5z}^R7mW;F%R^|yB)#>(Z zhYM0HGfDU_iL*mpi+fw(#?DzhKC?;dVkj;#37;1{ul&%RpD(kQs^9&M4OQqu@2_bG zw;MsVaq^0&kB%9@XgBHucKCf1nXE-ICVI$W95T?9hdTF#{9NDsNXZ|+{yCvhYCwGuCD+IeMp#Fw*gm9{$0vMjXO8nTRH-)1kw zR&7>bOm^qN^^fs?iw&eBz>4F9{H#8(s8eekmo52Yr#<@1i^D4C792cjx^=-K4(pbV zd}s$zfmK!|wrO-Dq-N(WTPF6ho#uX3S8JwrQ)snE-L>hTHZr{?2it_lp!={zPYtj? z?ugKn+4NN+c8H#A77EqtC03Fw#HOW|I#))F@N%xO9x*~ob}cPyxK2Gdr&4oSbR|~v zQ|7mLtgpVChVciC?Eb?tI_uRR&}q<^yY^pAUsKk(V?}k97(*&^yIqcKO0Pc)sUs4T z6qy%ouOX_n@{W}hmtn*!Gaf)v%HEkklSLD;l4kR=>{S$c(sOI&L;sT_vm}~?Wm~hZ zUt&YMb-wPyW!&aT2xTZprYcD*VGf}j#1_t87VK4gwk(DzDq}U3kx*l=O~d3imnB?- zcl_@8R>?kJ*!Vthuk2!Whmn<$&4&Hx_v`!T~!WC`0unCqv7kPmgu!}=|-Mvq%NMFEXH6x~0pUoClw zEC6S!v4XTUgk$>w)?-?asbc=J^H&d&#Kb~jnZ>Qt(5pdjpMGfDh*+K{p6yFLgdKJ0 zn@6&1o;Ji6ie_f@-IDdQ3Po%Cwm@@*)!Q%Rd?_yNv4tPV!rPGRtP_4=r0Xp{Pqm2t zW&AE$V7}1%vYBgDzIF;|EYj1FUQ?xMZh;}^J{SBXo#sYwQg!bC%-g&aMpo1;Su=L^ zphGAg0_Ugz5)}3LdlbDaguRHiNBZt(R{;|o?Bk)2+p4^x=hj})@|@i7lw;SW@<&)7 zpW^^`dPpxGE`83|3hbjKh6d}hb#VJeDJ#=t-TpBiDQbHOmh1dmdj$OhQ{hRK1{*Z$ zA^e%cOmn=?fsTrY7?G2+opdE1IO}IFjecei^GbZnEAE_y>0r!AD3)ZJYj8q)SM5b^FP$iJBl**BXuU9hlKJPiSR?gI%!ZZAYB@^-YTKgRmthy+eRWc7GdC!OZSod^$6Zv` z?Y_OvlF&(y-!d(Eg_+u{zYf*@M@pzeYa7kNO)ut|itU5pI;oniqC>=J^v;m=Lr4JU z6t1FYmtB>K#HV|(>_beZ3sP9`{Fv0}?Yi$FLYm0a5bqcM?(^++WgdX*SA2qk|7MKt zT4z(%d049ZUmKK+kU)1p7;Y4RS(adJIt=#ODedQbj5EQd8IG*CvWbT_PFzme%;g6r zVcrs0*gnfvj~~g(2TcD+*B?LOLa=_-;h!e2z9DqKn2&kH92W2hSDu5e{lh$f&tIJA z-PP?63hZxoc4TGxLiEX|@G-ae-Smd9&lmXt^`JP6m}{s%)Az4_MxI!+H+BJEEAQGH zRo}E{&6fsA7OK?Ck+LYtL6FamN{-iSOu>V6qZmU}-CQr8Q@o_Q%ndlh&Cm3llciv` z3Em{p0saMW>s{oT-4cQPSojS%BQZkDrC>q4WfVPj6RsXsRhlPk$+i09ofld;ua#hM z9)qvTntw{W{yMIBw0?!MlEbxxE;XBVrLAHLoroDz5&_r~{DA9GNUzhLvT7jAj}Ccb zN)RP4KB}~3}5**mN=_!o^=V!n(dgdf_L$ho)abu;;W*`p)F5d+Elu$a2 ztg4SqP7lHaR`76xEk#1Ena!e0p6Q{Ee%)Ss8jI1x`?MOqJ=`sug5h}XUW9e1;>e#L z}S2P_2o8I}^e-Qh%g2EYy>_kL*`SPap>X)SQL-w*hW& zGPX*(e4yCDyi(G!hiawwYxsPil7}m4$#M6#vp3;b!@3?;M&Nvk6K)L)KdGA`tj9iX zdCKlnbpBR?JwDM^L&H!0;z9_xv~^M7s#+@50BAXXo2kK556_ti|5HN)|6|?rn?H($ z+kQ!w3wRK>h;AY_HA=2me9&t1aD~Xe+=xFkH($!hlq7qs3PV6XUIO3tymU?$1;LJWKsB-=mhX+D@s$NPAbS zn$l&3nv0s^)wRK?SXH@NXxi!W=*V*x@4g{oi3_p^POfKRwI$Xz^|u6!rk3|6_r?U@EnEW#II>QT0y{ zUZb=B*3xvcIz!THVBt&~GL9A$BYIfswGXqha;>cI!)t3UpZhf`b1Q!zPa^2Ii29u2 z{^9$F&kMe<(fNbV_bb+@Fj3O_lk4m*I!si!rauNvMmh=9R9HTuklZzLz9z8}Mpg_V z-s{l! z)Zzl^)?$2Q0)A8>{_suGoXIaaQ&BMZ-kSwuhy#0LcRMepQ;XGYqlb0a^ix_z{Azpv^S9m zIDeqd03)*xE^X$}B{ul>yQJ^q75U;vaQFi(#pg)IE$BU0BbT-@3@?{K!TMHQvWH7> zH{)&^HC#-AOLdFJ_?L=)jc!V+gz@nO?P4<%5Kr5n;v z%{ysGF>Fca2z}-?8p1)Cq+lH}V$+^8?3&i{!5;t@Brcug8_S&Mcp{sctIkoO?SoEn z)bsNeim{IX+y`V#$)@EKH9aKFZ!*9h-O8ioKZ4LKQQ%0RX*n?dz=%zZTJFi9LhRNM zVq%o~1DlnB61xxF4EU=bJg{8DNgEom9faW1cS* zGOwVuA3OVl{&0rhf2eQY;2{E!zkD}RfBe#L#w6>7;ZtwfxLK>zuu!XzG@b=AR@kDv z17^LpS!I}-{QT}7xh4L0C_I-;UUJq7G#CsDMoBfH$0Ll}KM;?PAL8ZM27HUjY#TR> zic0kxHkqesY&R8$M^$E$no7}VkWa*yg(<*Cr0bavHdc&E=Grr*{a-moKL}snEf=6L zwMFF35BH!DlM`O(!3J{~t~+7f@(7njHKgtA;a@?CpDFGH8A9Kz-|DxNNi;NlaF0+p z=OD)+`)8oGbbvW#0;4H_jF2bhzc4cued7s_Jgy~008s1$c}|gdK`nV%U4wD1YJmsz9jqTUcr6yEJJOi8z+H@bgXP!cwTQz?B8V`-CwwiB1ALN^Atq%F~o z7;}x_U;gwDX4<#Xe7q%! zD@>^GQ=Fmr+ojiwO~0X3ZUW$PNY_&0u&mrtU%3YHLB_|gTP^F8Z0RfUd^BSILoUbK zCQ#CrHQ;Vk%bGK1E}mf{K^QChx_gvafuluFSv>SO>kd6;b;v`i5$CZgz2J-{Ix2^Q zi1s+F_oV_^?h-RQJ-#B;w%oobqO;S3TVI}Ap9DPhB)Z4>H+}fiTSS>7LQl;hFP7=* zQF0o=bL7qwvp3h4ceT}+neszfo`V7&Fl7p7s&WJbvl;=5u5LL=>@P+SP0v1x5rj5O z^fl$tcVD`gw&a=gpy${$;bj9Cv7Z(E(Vv>sasPTJwCEd0&Yi9KSraj&+ZTN2wys}Z z6o{H_geLDm9D4hIN9DX=PT>tK_eg?VZX(KYr zYI~G@gd9e{p>(uu2`X1xJ=oq{*Fs$m9pL=JpkdmezW0f;?{ggSYT0 zW%0W$>+QTb4a4V{?T#l33Q?Cj&{|TDc~>EgOlyy0^Xox@*rzoqu0O&i34tXOYRQEA z7TRm*OSxz}2_(bN<7HIh!cn+^mmg#4zq%4fpet6!&C1n$aCPL(V-BC^CClCzuUD2T zG0ON+%4H1|P^QF6Ed_)|ynItTeJX&qvZ{G{S7-gJT@%8QIB?gJBCV%ijE>KNhoIC7 zi((@k!jBYkap>UzLWVc*o`kvOf z8|8SlWWoKw^ifD^z#>v!6b}hUmCtgk#O_YXv}p8R=1Y(~rLCsc1`VSQVJA**l7>u~ zR=XnhIgR&`Y*WU5`A&e_rc;I=(0KZJGzuwae0kFmdh)?L=yuc2L zq}*&}CA(H?VCkOa=;;}O%acpay5vB7jH&8OX}-rlA(AyZB57Wi(Re+Ihvex!gVjW1 z$E){hM)tO95g@Lixf~AfPyEqT1RQ?Qwm(p25$zyE5!ywj-d9rUK|Et}SIW$6O^H?+@A%9!*T7)Uo^Dz4)@c-JU zVCONXf9#-e-n6MqR7uFwuV}@5nY&ELM7$y~dX?@<7S<-jxH8}7z>nEuNdN8ol;J)J zrr`|$Kg*4O_Π9MIf`rZwdb%R@9xunf!=Pw)9SqLsJw^K;3Z^07gR# zb~rv%W}C7YgKui}T|vG-eH(jUJrZ|w1es=I{9Qd984!KptXn(oeU zQKB#zhSMbs{A5F8eI;CbplV>IMh$(K{jy|#RrNHQ6_X{*vw95Zryklgp!iLoa2yOx zpd(}G(Zu78L2f>&G{KQ|XNd=GhD`JG_gO5utf>uIKkbdLnjPNzwyL4wC!ks**H<%u z%=HoIM$YUW>>7vC1}##OftId(otqvrUD&)3@-fc; zIqNYNTRVayXTBIg1U`s~k6+vA&EQO>kCg6&5DT9;bQX!Ol6^~{7XW_7yu+nM+x^+8 z8|sUFRa?FZj6R=3Bf#*X|L5>#6r}bal_Q9Vjq!OP#md<2fvXvw?NRL>{X-qSpgP}U zNei|10KBh&#tJzzc@L7O8t|V2eaqpEkFbV33ZLA!Z*wFdY4M?tWh^z#%26Otl|1Eq z>hr(b=>7;D%gU|4Kd;slqAyQ%Ed03M=NjI_avLed7YHp4QA{FChta5JB6Q#-l8RA2~!VrBcL2*^B42g^9Xn*~Po8#*5 zgdZ`po9F-ds{_TUaYx9q@yX#a3YGftFi82bNXhaN02>?YKP{>_)$G6_0)RpnuJ9EJd3I5B05iPAEThIVVvjKHFxv-Up4oK;0-;71XW2qL>fYcabbsi!@?g1^gzVF_R0NKn+hLu z`|)AV5ud)r0=h@FJ-){skM?OG&vl!`eE$%+-wR324^y$@pyOl~ zbKS$BJ)k54Y|(v0U?W6_yc@w+u6Iu#n>HXe)-+#Wp<8Y$?s3_k722w=^^4NEDs@7LX%fKTvyjyj zdYHBK-XGYpbrs*_;2|r8eJ~})M*oN=-_S7cKqtwJh-J@%sby>WJC73Be2i%e=_J66 z>elNGHXjt(^X&k^m5-6M2P@`2zc{vf2KYq0+-1ub8^5OWApACiT{oK1Z)E77eI&Cr zAZx-tadN3cZDh`*=9`!>z*lz4&0p+E>-tATnpY2%^`ejEbd!4=A^gc~eQ-PPcb!J@ zc89o(lR94h5}~I>&hs~o_3O8)l;>JVazT|eM=lW_qK+$!EgC4pd|xJ$=i?-v#cS?KH2-|U1K$4UKYaW%BlCjAajt7V__@Bcr`28ew#$_o3;!iGVm4r%bO zx?S3QWK(8t+kLP6(Hj%0k`nPPCGGRuMS+ovdJb=sZl#XkB9v32mFaNcflH44L*-{n z_Jcbgw2d@9WJQTovJ(j{^&6|61Gk24Nsh5_MCXg@hT#o=73t&Sao>iSu@)493t|mJ zk>^M;FctPlY)w-2R$2R{%!pjVM4VYJY(!*giBp+B*j^fAv+KbCTAnO0ZJowG2zJ`* zG{S6P8W!{{n6Yme;|yee#`9Md%6G1LSoxhHTof6xLx|@*R$CS_FgZ9`WrsLwpi4H_1!Rvf52F#@g`-sg{~sevR9QMWpIf3$oha#cK_NhU?i4^9(`shLaa&q z#b$hn`c5$P2%_1WAatmLZ9zm9P9FUcPX0(m|6|#UiN$ywpO>#snKIkzOfL9gK)1Iz zaovCh-l&Pks}j7BBWvlo(>HHC`Xg8D46fwQ>Ftllt+kq_nD=xxS}n z+KZP>TRq?2>V9=pX54~t6Ze2eU>&B-gfAUYfap{3#`-cei z|Bp0!j=DBPE`Phs_n*e{AV);tIrl}sPhs7Y4mJ^!)Sp%G^*f&awR!#cnKFT|`qVmh zp)gSV!y}&jeLRD!lvu!8`UAlAmPPsX^3DG(2m+}%zCUSIxl;V?FN?iq!e67&`s>U! z7I=&LpaY*dna@WrxY&zG_%Nz@;WtkIEv%2pd~&pl2`8n}wdQ)~PDUjuaImBNRMX#( zt*~A`OH8Id_W+#bRy~C94|INr7XLke`7RJoR{WH{toifvy+29ME-e0DY<@y9tnV-e zXqFP{f*!>m%jD-H@tCLS`+(5rO7AtoCn(K0(DWpjb{PFcgZNBv>wZ1#X{)_cGjZxs zhPNK)2Mp*9_LeGC!79)^ChcGM_${$JBa~UsZ>#E8Yb3*iK+SOFF|7Rc@d^I_U>^Qc zO6@1eG(2x#%w2E$jSZa@{6GA01}}}xWaqDs()k|D1I2?s3D0`ZBLDdx{})-s-^R{qXbd{`36_A2Rn1lst)V z-sdunk8!Ll>*Hb1yRFLg!0rrrjc&gA%!K{*&mZ9b/dev/null; then + echo "āŒ ERROR: diffusers not found" + MISSING_DEPS="$MISSING_DEPS diffusers" +fi + +if ! python -c "import torch" 2>/dev/null; then + echo "āŒ ERROR: torch not found" + MISSING_DEPS="$MISSING_DEPS torch" +fi + +if [ -n "$MISSING_DEPS" ]; then + echo "" + echo "āŒ Missing required dependencies:$MISSING_DEPS" + echo "Install with: pip install$MISSING_DEPS" + exit 1 +fi + +echo "āœ… All required dependencies found" +echo "" + +# Detect GPU +if command -v nvidia-smi &> /dev/null; then + GPU_COUNT=$(nvidia-smi --query-gpu=name --format=csv,noheader | wc -l) + echo "Detected $GPU_COUNT GPU(s)" + GPU_NAME=$(nvidia-smi --query-gpu=name --format=csv,noheader | head -1) + echo "GPU: $GPU_NAME" +else + echo "āš ļø WARNING: nvidia-smi not found" + echo " Continuing with CPU (very slow!)" + GPU_COUNT=0 +fi +echo "" + +# Create output directory (in current directory) +OUTPUT_DIR="./baseline_outputs" +mkdir -p "$OUTPUT_DIR" +echo "Output directory: $OUTPUT_DIR ($(pwd)/baseline_outputs)" +echo "" + +############################################# +# WAN (Wan2.1) Baseline Test +############################################# + +echo "============================================" +echo "1/1: WAN Baseline Test" +echo "============================================" +echo "" + +WAN_MODEL="${MODEL_ROOT}/Wan2.1-T2V-1.3B-Diffusers/" +WAN_OUTPUT="${OUTPUT_DIR}/wan_baseline.gif" + +if [ -d "$WAN_MODEL" ]; then + echo "Testing WAN with official diffusers..." + python ${PROJECT_ROOT}/examples/visual_gen/hf_wan.py \ + --model_path "$WAN_MODEL" \ + --output_path "$WAN_OUTPUT" \ + --prompt "A cute cat playing piano" \ + --height 480 \ + --width 832 \ + --num_frames 33 \ + --steps 50 \ + --guidance_scale 7.0 \ + --seed 42 + echo "" + echo "āœ… WAN baseline test completed" + echo " Output: $WAN_OUTPUT" +else + echo "āš ļø SKIPPED: WAN model not found at $WAN_MODEL" +fi + +echo "" + +############################################# +# Summary +############################################# + +echo "============================================" +echo "Baseline Tests Complete!" +echo "============================================" +echo "" +echo "Output files saved to: $OUTPUT_DIR" +echo "" +ls -lh "$OUTPUT_DIR" 2>/dev/null || echo "No outputs generated" +echo "" +echo "Next Steps:" +echo " 1. Verify outputs are correct (images/videos generated)" +echo " 2. Compare with custom implementation outputs" +echo " 3. Use these as reference/baseline for debugging" +echo "" +echo "Comparison command:" +echo " diff -r $OUTPUT_DIR " +echo "============================================" diff --git a/examples/visual_gen/hf_wan.py b/examples/visual_gen/hf_wan.py new file mode 100755 index 0000000000..3919794052 --- /dev/null +++ b/examples/visual_gen/hf_wan.py @@ -0,0 +1,141 @@ +#!/usr/bin/env python3 +"""Baseline test for WAN using official diffusers library.""" + +import sys + +import torch +from output_handler import OutputHandler, postprocess_hf_video_tensor + +from tensorrt_llm._torch.visual_gen import MediaOutput + + +def test_wan_baseline( + model_path: str, + output_path: str, + prompt: str = "A cute cat playing piano", + height: int = 480, + width: int = 832, + num_frames: int = 33, + num_inference_steps: int = 50, + guidance_scale: float = 7.0, + seed: int = 42, +): + """Test WAN video generation with official diffusers.""" + from diffusers import WanPipeline + + print("=" * 80) + print("WAN Baseline Test (Official Diffusers)") + print("=" * 80) + print() + + # Load pipeline + print(f"Loading WAN pipeline from {model_path}...") + pipe = WanPipeline.from_pretrained(model_path, torch_dtype=torch.bfloat16) + pipe.to("cuda") + print("āœ… Pipeline loaded") + print() + + # Check model states + print("Model Training States:") + print(f" text_encoder.training: {pipe.text_encoder.training}") + print(f" transformer.training: {pipe.transformer.training}") + print(f" vae.training: {pipe.vae.training}") + print() + + # Generate video + print(f"Generating video: '{prompt}'") + print( + f"Parameters: {height}x{width}, {num_frames} frames, {num_inference_steps} steps, guidance={guidance_scale}" + ) + print() + + # Set random seed + generator = torch.Generator(device="cuda").manual_seed(seed) + + result = pipe( + prompt=prompt, + height=height, + width=width, + num_frames=num_frames, + num_inference_steps=num_inference_steps, + guidance_scale=guidance_scale, + generator=generator, + output_type="pt", + return_dict=False, + ) + + video = result[0] + + # Post-process video tensor: (B, T, C, H, W) -> (T, H, W, C) uint8 + video = postprocess_hf_video_tensor(video, remove_batch_dim=True) + + print("=" * 80) + print("Generation Complete!") + print("=" * 80) + print(f"Video shape: {video.shape}") + print(f"Video dtype: {video.dtype}") + print() + + # Save output + print(f"Saving output to {output_path}...") + OutputHandler.save(output=MediaOutput(video=video), output_path=output_path, frame_rate=24.0) + print(f"āœ… Saved to {output_path}") + print() + + print("=" * 80) + print("WAN BASELINE TEST PASSED āœ…") + print("=" * 80) + return video + + +if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser( + description="HuggingFace Baseline - WAN Text-to-Video Generation" + ) + + # Model & Input + parser.add_argument( + "--model_path", + type=str, + default="/llm-models/Wan2.1-T2V-1.3B-Diffusers/", + help="Path to WAN model", + ) + parser.add_argument( + "--prompt", type=str, default="A cute cat playing piano", help="Text prompt for generation" + ) + parser.add_argument( + "--output_path", type=str, default="wan_baseline.gif", help="Output file path" + ) + + # Generation parameters + parser.add_argument("--height", type=int, default=480, help="Video height") + parser.add_argument("--width", type=int, default=832, help="Video width") + parser.add_argument("--num_frames", type=int, default=33, help="Number of frames to generate") + parser.add_argument("--steps", type=int, default=50, help="Number of denoising steps") + parser.add_argument( + "--guidance_scale", type=float, default=7.0, help="Classifier-free guidance scale" + ) + parser.add_argument("--seed", type=int, default=42, help="Random seed") + + args = parser.parse_args() + + try: + test_wan_baseline( + args.model_path, + args.output_path, + prompt=args.prompt, + height=args.height, + width=args.width, + num_frames=args.num_frames, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + seed=args.seed, + ) + except Exception as e: + print(f"\nāŒ ERROR: {e}") + import traceback + + traceback.print_exc() + sys.exit(1) diff --git a/examples/visual_gen/output_handler.py b/examples/visual_gen/output_handler.py new file mode 100644 index 0000000000..a360d681f9 --- /dev/null +++ b/examples/visual_gen/output_handler.py @@ -0,0 +1,237 @@ +"""Unified output handler for diffusion model outputs.""" + +import os +from typing import Optional + +import torch +from PIL import Image + +from tensorrt_llm import logger +from tensorrt_llm.llmapi.visual_gen import MediaOutput + + +def postprocess_hf_video_tensor(video: torch.Tensor, remove_batch_dim: bool = True) -> torch.Tensor: + """Post-process video tensor from HuggingFace pipeline output to final format. + + HuggingFace pipelines with output_type="pt" return videos in (B, T, C, H, W) format, + which is different from VAE decoder output format. + + Args: + video: Video tensor in (B, T, C, H, W) format from HuggingFace pipeline + remove_batch_dim: Whether to remove batch dimension. Default True for typical + single-batch video generation. + + Returns: + Post-processed video tensor: + - If remove_batch_dim=True: (T, H, W, C) uint8 tensor + - If remove_batch_dim=False: (B, T, H, W, C) uint8 tensor + + Note: + Assumes video values are in [-1, 1] range (standard pipeline output). + """ + # Remove batch dimension first if requested + if remove_batch_dim: + video = video[0] # (B, T, C, H, W) -> (T, C, H, W) + video = video.permute(0, 2, 3, 1) # (T, C, H, W) -> (T, H, W, C) + else: + video = video.permute(0, 1, 3, 4, 2) # (B, T, C, H, W) -> (B, T, H, W, C) + + # Normalize to [0, 1] range + video = (video / 2 + 0.5).clamp(0, 1) + + # Convert to uint8 + video = (video * 255).round().to(torch.uint8) + + return video + + +def postprocess_hf_image_tensor(image: torch.Tensor) -> torch.Tensor: + """Post-process image tensor from HuggingFace pipeline output to final format. + + HuggingFace pipelines with output_type="pt" return images in (B, C, H, W) format. + + Args: + image: Image tensor in (B, C, H, W) or (C, H, W) format from HuggingFace pipeline + + Returns: + Post-processed image tensor in (H, W, C) uint8 format + + Note: + Assumes image values are in [-1, 1] range (standard pipeline output). + """ + # Remove batch dimension if present + if image.ndim == 4: + image = image[0] # (B, C, H, W) -> (C, H, W) + + # Convert to (H, W, C) format + image = image.permute(1, 2, 0) # (C, H, W) -> (H, W, C) + + # Normalize to [0, 1] range + image = (image / 2 + 0.5).clamp(0, 1) + + # Convert to uint8 + image = (image * 255).round().to(torch.uint8) + + return image + + +class OutputHandler: + """Handle saving of generated outputs in various formats. + + Supports MediaOutput from all models: + - Video models (WAN): MediaOutput(video=torch.Tensor) + - Image models: MediaOutput(image=torch.Tensor) + - Video+Audio models: MediaOutput(video=torch.Tensor, audio=torch.Tensor) + + Supported output formats: + - .png: Save single image or middle frame + - .gif: Save video as animated GIF (no audio) + - .mp4: Save video with audio (requires diffusers export_utils) + """ + + @staticmethod + def save(output: MediaOutput, output_path: str, frame_rate: float = 24.0): + """Save output based on content type and file extension. + + Args: + output: MediaOutput containing model outputs (image/video/audio) + output_path: Path to save the output file + frame_rate: Frames per second for video output (default: 24.0) + """ + if not isinstance(output, MediaOutput): + raise ValueError(f"Expected output to be MediaOutput, got {type(output)}") + + file_ext = os.path.splitext(output_path)[1].lower() + + # Determine content type + if output.image is not None: + OutputHandler._save_image(output.image, output_path, file_ext) + elif output.video is not None: + OutputHandler._save_video(output.video, output.audio, output_path, file_ext, frame_rate) + else: + raise ValueError("Unknown output format. MediaOutput has no image or video data.") + + @staticmethod + def _save_image(image: torch.Tensor, output_path: str, file_ext: str): + """Save single image output. + + Args: + image: Image as torch tensor (H, W, C) uint8 + output_path: Path to save the image + file_ext: File extension (.png, .jpg, etc.) + """ + if file_ext not in [".png", ".jpg", ".jpeg"]: + logger.warning(f"Image output requested with {file_ext}, defaulting to .png") + output_path = output_path.replace(file_ext, ".png") + + # Convert torch.Tensor to PIL Image and save + image_np = image.cpu().numpy() + Image.fromarray(image_np).save(output_path) + logger.info(f"Saved image to {output_path}") + + @staticmethod + def _save_video( + video: torch.Tensor, + audio: Optional[torch.Tensor], + output_path: str, + file_ext: str, + frame_rate: float, + ): + """Save video output with optional audio. + + Args: + video: Video frames as torch tensor (T, H, W, C) with dtype uint8 + audio: Optional audio as torch tensor + output_path: Path to save the video + file_ext: File extension (.mp4, .gif, .png) + frame_rate: Frames per second + """ + if file_ext == ".mp4": + OutputHandler._save_mp4(video, audio, output_path, frame_rate) + elif file_ext == ".gif": + OutputHandler._save_gif(video, output_path, frame_rate) + elif file_ext == ".png": + OutputHandler._save_middle_frame(video, output_path) + else: + logger.warning(f"Unsupported video output format: {file_ext}, defaulting to .png") + output_path = output_path.replace(file_ext, ".png") + OutputHandler._save_middle_frame(video, output_path) + + @staticmethod + def _save_mp4( + video: torch.Tensor, audio: Optional[torch.Tensor], output_path: str, frame_rate: float + ): + """Save video with optional audio as MP4. + + Args: + video: Video frames as torch tensor (T, H, W, C) uint8 + audio: Optional audio as torch tensor (float32) + output_path: Output path for MP4 + frame_rate: Frames per second + """ + try: + from diffusers.pipelines.ltx2.export_utils import encode_video + + # Prepare audio if present + audio_prepared = audio.float() if audio is not None else None + + # encode_video expects (T, H, W, C) uint8 video and float32 audio + encode_video( + video, + fps=frame_rate, + audio=audio_prepared, + audio_sample_rate=24000 if audio_prepared is not None else None, + output_path=output_path, + ) + logger.info(f"Saved video{' with audio' if audio is not None else ''} to {output_path}") + + except ImportError: + logger.warning( + "diffusers export_utils (encode_video) not available. " + "Falling back to saving middle frame as PNG." + ) + png_path = output_path.replace(".mp4", ".png") + OutputHandler._save_middle_frame(video, png_path) + + @staticmethod + def _save_gif(video: torch.Tensor, output_path: str, frame_rate: float): + """Save video as animated GIF. + + Args: + video: Video frames as torch tensor (T, H, W, C) uint8 + output_path: Output path for GIF + frame_rate: Frames per second + """ + # Convert torch.Tensor to numpy for PIL + video_np = video.cpu().numpy() + + # Convert to list of PIL Images + frames = [Image.fromarray(video_np[i]) for i in range(video_np.shape[0])] + + # Save as animated GIF + duration_ms = int(1000 / frame_rate) + frames[0].save( + output_path, + save_all=True, + append_images=frames[1:], + optimize=False, + duration=duration_ms, + loop=0, + ) + logger.info(f"Saved video as GIF to {output_path} ({len(frames)} frames)") + + @staticmethod + def _save_middle_frame(video: torch.Tensor, output_path: str): + """Save middle frame of video as PNG. + + Args: + video: Video frames as torch tensor (T, H, W, C) uint8 + output_path: Output path for PNG + """ + # Convert torch.Tensor to numpy for PIL + video_np = video.cpu().numpy() + + # Extract middle frame + frame_idx = video_np.shape[0] // 2 + Image.fromarray(video_np[frame_idx]).save(output_path) + logger.info(f"Saved frame {frame_idx} to {output_path}") diff --git a/examples/visual_gen/serve/README.md b/examples/visual_gen/serve/README.md new file mode 100644 index 0000000000..b68dc7f2a2 --- /dev/null +++ b/examples/visual_gen/serve/README.md @@ -0,0 +1,322 @@ +# Visual Generation API Examples + +This directory contains example scripts that demonstrate how to use the TensorRT-LLM Visual Generation API endpoints for image and video generation. + +## Overview + +These examples show how to interact with the visual generation server using both the OpenAI Python SDK and standard HTTP requests. The API provides endpoints for: + +- **Image Generation**: Text-to-image generation (T2I) +- **Video Generation**: + - Text-to-video generation (T2V) - generate videos from text prompts only + - Text+Image-to-video generation (TI2V) - generate videos from text + reference image + - Both synchronous and asynchronous modes supported + - Multipart/form-data support for file uploads +- **Video Management**: Retrieving and deleting generated videos + +## Prerequisites + +Before running these examples, ensure you have: + +1. **Install modules**: Install required dependencies before running examples: + + ```bash + pip install git+https://github.com/huggingface/diffusers.git + pip install av + ``` + +2. **Server Running**: The TensorRT-LLM visual generation server must be running + ```bash + trtllm-serve --extra_visual_gen_options + ``` + + e.g. + + ```bash + trtllm-serve $LLM_MODEL_DIR/Wan2.1-T2V-1.3B-Diffusers --extra_visual_gen_options ./configs/wan.yml + + # Run server on background: + trtllm-serve $LLM_MODEL_DIR/Wan2.1-T2V-1.3B-Diffusers --extra_visual_gen_options ./configs/wan.yml > /tmp/serve.log 2>&1 & + + ## Check if the server is setup + tail -f /tmp/serve.log + + ``` + +## Examples + +Current supported & tested models: + +1. WAN T2V/I2V for video generation (t2v, ti2v, delete_video) + +### 1. Synchronous Image Generation (`sync_t2i.py`) + +Demonstrates synchronous text-to-image generation using the OpenAI SDK. + +**Features:** +- Generates images from text prompts +- Supports configurable image size and quality +- Returns base64-encoded images or URLs +- Saves generated images to disk + +**Usage:** +```bash +# Use default localhost server +python sync_image_gen.py + +# Specify custom server URL +python sync_image_gen.py http://your-server:8000/v1 +``` + +**API Endpoint:** `POST /v1/images/generations` + +**Output:** Saves generated image to `output_generation.png` (or numbered files for multiple images) + +--- + +### 2. Synchronous Video Generation with T2V and TI2V Modes (`sync_video_gen.py`) + +Demonstrates synchronous video generation using direct HTTP requests. Waits for completion and returns the video file directly. + +**Features:** +- **T2V Mode**: Generate videos from text prompts only +- **TI2V Mode**: Generate videos from text + reference image (multipart/form-data) +- Waits for video generation to complete before returning +- Returns video file directly in response +- Command-line interface for easy testing + +**Usage:** + +```bash +# Text-to-Video (T2V) - No reference image +python sync_video_gen.py --mode t2v \ + --prompt "A cute cat playing with a ball in the park" \ + --duration 4.0 --fps 24 --size 256x256 + +# Text+Image-to-Video (TI2V) - With reference image +## Note: longer duration and higher size will lead to much longer waiting time +python sync_video_gen.py --mode ti2v \ + --prompt "She turns around and smiles, then slowly walks out of the frame" \ + --image ./media/woman_skyline_original_720p.jpeg \ + --duration 4.0 --fps 24 --size 512x512 + +# Custom parameters +python sync_video_gen.py --mode t2v \ + --prompt "A serene sunset over the ocean" \ + --duration 5.0 --fps 30 --size 512x512 \ + --output my_video.mp4 +``` + +**Command-Line Arguments:** +- `--mode` - Generation mode: `t2v` or `ti2v` (default: t2v) +- `--prompt` - Text prompt for video generation (required) +- `--image` - Path to reference image (required for ti2v mode) +- `--base-url` - API server URL (default: http://localhost:8000/v1) +- `--model` - Model name (default: wan) +- `--duration` - Video duration in seconds (default: 4.0) +- `--fps` - Frames per second (default: 24) +- `--size` - Video resolution in WxH format (default: 256x256) +- `--output` - Output video file path (default: output_sync.mp4) + +**API Endpoint:** `POST /v1/videos/generations` + +**API Details:** +- T2V uses JSON `Content-Type: application/json` +- TI2V uses multipart/form-data `Content-Type: multipart/form-data` with file upload + +**Output:** Saves generated video to specified output file + +--- + +### 3. Async Video Generation with T2V and TI2V Modes (`async_video_gen.py`) + +**NEW**: Enhanced async video generation supporting both Text-to-Video (T2V) and Text+Image-to-Video (TI2V) modes. + +**Features:** +- **T2V Mode**: Generate videos from text prompts only (JSON request) +- **TI2V Mode**: Generate videos from text + reference image (multipart/form-data with file upload) +- Command-line interface for easy testing +- Automatic mode detection +- Comprehensive parameter control + +**Usage:** + +```bash +# Text-to-Video (T2V) - No reference image +python async_video_gen.py --mode t2v \ + --prompt "A cool cat on a motorcycle in the night" \ + --duration 4.0 --fps 24 --size 256x256 + +# Text+Image-to-Video (TI2V) - With reference image +python async_video_gen.py --mode ti2v \ + --prompt "She turns around and smiles, then slowly walks out of the frame" \ + --image ./media/woman_skyline_original_720p.jpeg \ + --duration 4.0 --fps 24 --size 512x512 + +# Custom parameters +python async_video_gen.py --mode t2v \ + --prompt "A serene sunset over the ocean" \ + --duration 5.0 --fps 30 --size 512x512 \ + --output my_video.mp4 +``` + +**Command-Line Arguments:** +- `--mode` - Generation mode: `t2v` or `ti2v` (default: t2v) +- `--prompt` - Text prompt for video generation (required) +- `--image` - Path to reference image (required for ti2v mode) +- `--base-url` - API server URL (default: http://localhost:8000/v1) +- `--model` - Model name (default: wan) +- `--duration` - Video duration in seconds (default: 4.0) +- `--fps` - Frames per second (default: 24) +- `--size` - Video resolution in WxH format (default: 256x256) +- `--output` - Output video file path (default: output_async.mp4) + +**API Details:** +- T2V uses JSON `Content-Type: application/json` +- TI2V uses multipart/form-data `Content-Type: multipart/form-data` with file upload + +**Output:** Saves generated video to specified output file + +--- + +### 4. Video Deletion (`delete_video.py`) + +Demonstrates the complete lifecycle of video generation and deletion. + +**Features:** +- Creates a test video generation job +- Waits for completion +- Deletes the generated video +- Verifies deletion by attempting to retrieve the deleted video +- Tests error handling for non-existent videos + +**Usage:** +```bash +# Use default localhost server +python delete_video.py + +# Specify custom server URL +python delete_video.py http://your-server:8000/v1 +``` + +**API Endpoints:** +- `POST /v1/videos` - Create video job +- `GET /v1/videos/{video_id}` - Check video status +- `DELETE /v1/videos/{video_id}` - Delete video + +**Test Flow:** +1. Create video generation job +2. Wait for completion +3. Delete the video +4. Verify video returns `NotFoundError` +5. Test deletion of non-existent video + +--- + +## API Configuration + +All examples use the following default configuration: + +- **Base URL**: `http://localhost:8000/v1` +- **API Key**: `"tensorrt_llm"` (authentication token) +- **Timeout**: 300 seconds for async operations + +You can customize these by: +1. Passing the base URL as a command-line argument +2. Modifying the default parameters in each script's function + +## Common Parameters + +### Image Generation +- `model`: Model identifier (e.g., "wan") +- `prompt`: Text description +- `n`: Number of images to generate +- `size`: Image dimensions (e.g., "512x512", "1024x1024") +- `quality`: "standard" or "hd" +- `response_format`: "b64_json" or "url" + +### Video Generation +- `model`: Model identifier (e.g., "wan") +- `prompt`: Text description +- `size`: Video resolution (e.g., "256x256", "512x512") +- `seconds`: Duration in seconds +- `fps`: Frames per second +- `input_reference`: Reference image file (for TI2V mode) + +## Quick Reference - curl Examples + +### Text-to-Video (JSON) +```bash +curl -X POST "http://localhost:8000/v1/videos" \ + -H "Content-Type: application/json" \ + -d '{ + "prompt": "A cool cat on a motorcycle", + "seconds": 4.0, + "fps": 24, + "size": "256x256" + }' +``` + +### Text+Image-to-Video (Multipart with File Upload) +```bash +curl -X POST "http://localhost:8000/v1/videos" \ + -F "prompt=She turns around and smiles" \ + -F "input_reference=@./media/woman_skyline_original_720p.jpeg" \ + -F "seconds=4.0" \ + -F "fps=24" \ + -F "size=256x256" \ + -F "guidance_scale=5.0" +``` + +### Check Video Status +```bash +curl -X GET "http://localhost:8000/v1/videos/{video_id}" +``` + +### Download Video +```bash +curl -X GET "http://localhost:8000/v1/videos/{video_id}/content" -o output.mp4 +``` + +### Delete Video +```bash +curl -X DELETE "http://localhost:8000/v1/videos/{video_id}" +``` + +## API Endpoints Summary + +| Endpoint | Method | Mode | Content-Type | Purpose | +|----------|--------|------|--------------|---------| +| `/v1/videos` | POST | Async | JSON or Multipart | Create video job (T2V/TI2V) | +| `/v1/videos/generations` | POST | Sync | JSON or Multipart | Generate video sync (T2V/TI2V) | +| `/v1/videos/{id}` | GET | - | - | Get video status/metadata | +| `/v1/videos/{id}/content` | GET | - | - | Download video file | +| `/v1/videos/{id}` | DELETE | - | - | Delete video | +| `/v1/videos` | GET | - | - | List all videos | +| `/v1/images/generations` | POST | - | JSON | Generate images (T2I) | + +**Note:** Both `/v1/videos` (async) and `/v1/videos/generations` (sync) support: +- **JSON**: Standard text-to-video (T2V) +- **Multipart/Form-Data**: Text+image-to-video (TI2V) with file upload + +## Error Handling + +All examples include comprehensive error handling: + +- Connection errors (server not running) +- API errors (invalid parameters, model not found) +- Timeout errors (generation taking too long) +- Resource errors (video not found for deletion) + +Errors are displayed with full stack traces for debugging. + +## Output Files + +Generated files are saved to the current working directory: + +- `output_generation.png` - Synchronous image generation (`sync_image_gen.py`) +- `output_sync.mp4` - Synchronous video generation (`sync_video_gen.py`) +- `output_async.mp4` - Asynchronous video generation (`async_video_gen.py`) +- `output_multipart.mp4` - Multipart example output (`multipart_example.py`) + +**Note:** You can customize output filenames using the `--output` parameter in all scripts. diff --git a/examples/visual_gen/serve/async_video_gen.py b/examples/visual_gen/serve/async_video_gen.py new file mode 100755 index 0000000000..dec93bf3fa --- /dev/null +++ b/examples/visual_gen/serve/async_video_gen.py @@ -0,0 +1,238 @@ +#!/usr/bin/env python +"""Test script for asynchronous video generation endpoint. + +Tests POST /v1/videos endpoint which returns immediately with a job ID. +The video is generated in the background and can be retrieved later. + +Supports two modes: + - Text-to-Video (T2V): Generate video from text prompt only + - Text+Image-to-Video (TI2V): Generate video from text prompt + reference image + +Examples: + # Text-to-Video (T2V) + python async_video_gen.py --mode t2v --prompt "A cool cat on a motorcycle" + + # Text+Image-to-Video (TI2V) + python async_video_gen.py --mode ti2v --prompt "She turns and smiles" --image ./media/woman.jpg +""" + +import argparse +import sys +import time +from pathlib import Path + +import openai + + +def test_async_video_generation( + base_url: str = "http://localhost:8000/v1", + model: str = "wan", + prompt: str = "A video of a cool cat on a motorcycle in the night", + input_reference: str = None, + duration: float = 4.0, + fps: int = 24, + size: str = "256x256", + output_file: str = "output_async.mp4", +): + """Test asynchronous video generation with OpenAI SDK. + + Args: + base_url: Base URL of the API server + model: Model name to use + prompt: Text prompt for generation + input_reference: Path to reference image (optional, for TI2V mode) + duration: Video duration in seconds + fps: Frames per second + size: Video resolution (WxH format) + output_file: Output video file path + """ + mode = "TI2V" if input_reference else "T2V" + print("=" * 80) + print(f"Testing Async Video Generation API - {mode} Mode") + print("=" * 80) + + # Initialize client + client = openai.OpenAI(base_url=base_url, api_key="tensorrt_llm") + + print("\n1. Creating video generation job...") + print(f" Mode: {mode}") + print(f" Prompt: {prompt}") + if input_reference: + print(f" Input Reference: {input_reference}") + print(f" Duration: {duration}s") + print(f" FPS: {fps}") + print(f" Size: {size}") + + try: + # Prepare request parameters + create_params = { + "model": model, + "prompt": prompt, + "size": size, + "seconds": duration, + "extra_body": { + "fps": fps, + }, + } + + # Add input reference if provided (TI2V mode) + if input_reference: + if not Path(input_reference).exists(): + print(f"\nāŒ Error: Input reference image not found: {input_reference}") + return False + create_params["input_reference"] = open(input_reference, "rb") + + # Create video generation job + job = client.videos.create(**create_params) + + print("Video generation started: \n", job.model_dump_json(indent=2)) + + video_id = job.id + print("\nāœ“ Job created successfully!") + print(f" Video ID: {video_id}") + print(f" Status: {job.status}") + + # Poll for completion + print("\n2. Polling for completion...") + max_attempts = 300 # 5 minutes with 1s intervals + attempt = 0 + + while attempt < max_attempts: + attempt += 1 + + # Get job status using SDK's get method + job = client.videos.retrieve(video_id) + status = job.status + + print(f" [{attempt:3d}] Status: {status}", end="\r") + + if status == "completed": + print("\n\nāœ“ Video generation completed!") + print(f" Completion time: {job.completed_at}") + break + elif status == "failed": + print("\n\nāŒ Video generation failed!") + print(f" Error: {job.error}") + return False + + time.sleep(1) + else: + print(f"\n\nāŒ Timeout waiting for completion (>{max_attempts}s)") + return False + + # Download video + print("\n3. Downloading video...") + # For binary content, use the underlying HTTP client + content = client.videos.download_content(video_id, variant="video") + content.write_to_file(output_file) + print(f" āœ“ Saved to: {output_file}") + + print("\n" + "=" * 80) + print("āœ“ Async video generation test completed successfully!") + print("=" * 80) + return True + + except Exception as e: + print(f"\nāŒ Error: {e}") + import traceback + + traceback.print_exc() + return False + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Test async video generation API with T2V and TI2V modes", + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=""" +Examples: + # Text-to-Video (T2V) + python async_video_gen.py --mode t2v --prompt "A cool cat on a motorcycle" + + # Text+Image-to-Video (TI2V) + python async_video_gen.py --mode ti2v \\ + --prompt "She turns around and smiles, then slowly walks out of the frame" \\ + --image ./media/woman_skyline_original_720p.jpeg + + # Custom parameters + python async_video_gen.py --mode t2v \\ + --prompt "A serene sunset over the ocean" \\ + --duration 5.0 --fps 30 --size 512x512 \\ + --output my_video.mp4 + """, + ) + + # Mode selection + parser.add_argument( + "--mode", + choices=["t2v", "ti2v"], + default="t2v", + help="Generation mode: t2v (Text-to-Video) or ti2v (Text+Image-to-Video)", + ) + + # Required parameters + parser.add_argument( + "--prompt", + type=str, + default="A video of a cool cat on a motorcycle in the night", + help="Text prompt for video generation", + ) + + # TI2V mode parameters + parser.add_argument( + "--image", + "--input-reference", + type=str, + default=None, + help="Path to reference image (required for ti2v mode)", + ) + + # Optional parameters + parser.add_argument( + "--base-url", + type=str, + default="http://localhost:8000/v1", + help="Base URL of the API server", + ) + parser.add_argument("--model", type=str, default="wan", help="Model name to use") + parser.add_argument( + "--duration", "--seconds", type=float, default=4.0, help="Video duration in seconds" + ) + parser.add_argument("--fps", type=int, default=24, help="Frames per second") + parser.add_argument( + "--size", + type=str, + default="256x256", + help="Video resolution in WxH format (e.g., 1280x720)", + ) + parser.add_argument( + "--output", type=str, default="output_async.mp4", help="Output video file path" + ) + + args = parser.parse_args() + + # Validate ti2v mode requirements + if args.mode == "ti2v" and not args.image: + parser.error("--image is required when using --mode ti2v") + + # Display configuration + print("\n" + "=" * 80) + print("OpenAI SDK - Async Video Generation Test") + print("=" * 80) + print(f"Base URL: {args.base_url}") + print(f"Mode: {args.mode.upper()}") + print() + + # Test async video generation + success = test_async_video_generation( + base_url=args.base_url, + model=args.model, + prompt=args.prompt, + input_reference=args.image, + duration=args.duration, + fps=args.fps, + size=args.size, + output_file=args.output, + ) + + sys.exit(0 if success else 1) diff --git a/examples/visual_gen/serve/configs/wan.yml b/examples/visual_gen/serve/configs/wan.yml new file mode 100644 index 0000000000..7dc65e6214 --- /dev/null +++ b/examples/visual_gen/serve/configs/wan.yml @@ -0,0 +1,8 @@ +linear: + type: default +teacache: + enable_teacache: true + teacache_thresh: 0.2 +parallel: + dit_cfg_size: 1 + dit_ulysses_size: 1 diff --git a/examples/visual_gen/serve/delete_video.py b/examples/visual_gen/serve/delete_video.py new file mode 100755 index 0000000000..d44b8f046e --- /dev/null +++ b/examples/visual_gen/serve/delete_video.py @@ -0,0 +1,151 @@ +#!/usr/bin/env python +"""Test script for DELETE /v1/videos/{video_id} endpoint. + +Tests the video deletion functionality by: +1. Creating a video generation job +2. Waiting for completion +3. Deleting the video +4. Verifying the deletion +""" + +import sys +import time + +import openai + + +def test_delete_video( + base_url: str = "http://localhost:8000/v1", + model: str = "wan", + prompt: str = "A simple test video for deletion", + duration: float = 2.0, + fps: int = 8, + size: str = "256x256", +): + """Test video deletion endpoint using OpenAI SDK.""" + print("=" * 80) + print("Testing DELETE /v1/videos/{video_id} Endpoint") + print("=" * 80) + + # Initialize OpenAI client + client = openai.OpenAI(base_url=base_url, api_key="tensorrt_llm") + + video_id = None + + try: + # Step 1: Create a video generation job + print("\n1. Creating video generation job...") + print(f" Prompt: {prompt}") + print(f" Duration: {duration}s") + print(f" FPS: {fps}") + print(f" Size: {size}") + + job = client.videos.create( + model=model, + prompt=prompt, + size=size, + seconds=duration, + extra_body={ + "fps": fps, + }, + ) + + video_id = job.id + print(f" āœ“ Video job created with ID: {video_id}") + print(f" Status: {job.status}") + + # Step 2: Wait for video completion + print("\n2. Waiting for video generation to complete...") + max_attempts = 60 # attempts with 1s intervals + attempt = 0 + + while attempt < max_attempts: + attempt += 1 + + # Get job status using SDK's retrieve method + job = client.videos.retrieve(video_id) + status = job.status + + print(f" [{attempt:3d}] Status: {status}", end="\r") + + if status == "completed": + print(" āœ“ Video generation completed!") + break + elif status == "failed": + print(" āŒ Video generation failed!") + return False + + time.sleep(1) + else: + print(" ⚠ Timeout waiting for video completion") + # Continue with deletion anyway + + # Step 3: Delete the video + print(f"\n3. Deleting video {video_id}...") + + delete_result = client.videos.delete(video_id) + + print(f" Response: {delete_result.model_dump_json(indent=2)}") + + if delete_result.deleted: + print(" āœ“ Video deleted successfully!") + else: + print(" āŒ Video deletion returned False") + return False + + # Step 4: Verify the video is gone + print("\n4. Verifying video deletion...") + + try: + verify_job = client.videos.retrieve(video_id) + print(f" ⚠ Video still exists after deletion: {verify_job.status}") + return False + except openai.NotFoundError as e: + print(" āœ“ Video correctly returns NotFoundError") + print(f" Error message: {e.message}") + except Exception as e: + print(f" ⚠ Unexpected error: {type(e).__name__}: {e}") + + # Step 5: Test deleting non-existent video + print("\n5. Testing deletion of non-existent video...") + + fake_id = "nonexistent_video_id" + + try: + fake_delete_result = client.videos.delete(fake_id) + print(" ⚠ Deletion of non-existent video did not raise error") + print(f" Response: {fake_delete_result.model_dump_json(indent=2)}") + except openai.NotFoundError as e: + print(" āœ“ Correctly raises NotFoundError for non-existent video") + print(f" Error message: {e.message}") + except Exception as e: + print(f" ⚠ Unexpected error: {type(e).__name__}: {e}") + + print("\n" + "=" * 80) + print("āœ“ Video deletion test completed successfully!") + print("=" * 80) + return True + + except Exception as e: + print(f"\nāŒ Error: {e}") + import traceback + + traceback.print_exc() + return False + + +if __name__ == "__main__": + # Parse command line arguments + base_url = sys.argv[1] if len(sys.argv) > 1 else "http://localhost:8000/v1" + + print("\n" + "=" * 80) + print("OpenAI SDK - Video Deletion Test") + print("=" * 80) + print(f"Base URL: {base_url}") + print() + + # Test video deletion + success = test_delete_video(base_url=base_url) + + # Exit with appropriate code + sys.exit(0 if success else 1) diff --git a/examples/visual_gen/serve/media/woman_skyline_original_720p.jpeg b/examples/visual_gen/serve/media/woman_skyline_original_720p.jpeg new file mode 100644 index 0000000000000000000000000000000000000000..44a7ed5c3a413cb2e43d4df1ecf7cff6724fa8c9 GIT binary patch literal 178461 zcmeFYWmKEb*ESlgv{0Z(iWXX=NN{&barXoW(gH<`Tky7M(c&#G#RS2lCUIu1m4gjdA09XJ3z*E2zG$H`T zqlWhQ0H9F-u>P$B0AMuA|J9AqnEq>n4gjDN{~!BcD*)zy?H_&qTjc-y`9JU2KL7yi z#}-UZZf>qbFAPFVtT$Xwh$Jv&90R?!Df%Z6H z7=!=`z&%envvs3#x?7|F;9F%J$JGzoysIKmUNrO9?@z$|ayp{hWqN%R7*#9#r`hIm ze6&6&qqAS;-qD1p%-4~~(w*8e9fr^^p!UD(x%a}6De0cI1K;k&m{2~mh1@(en^fzo2=38D>MhS#4@3Lg0$hl<3r1q8i`*5@ij>wg!$ zM}C?&srTI^SFK+C(Tk-}cPx5(w7OR80hoIhMM=8!`Z+}fRTDB9%%W0|bTmtR{EZ9T z5VCHd5ZW4iFfnFZiKaJxsghay%jb77c+bH<&wivfXX(f&$n1$&F*hh-jBT3ITcO=C zIa`1>E!CGkcB8L@u)@3C|qh5&P@{nKJKBd(UCOf z_+Ie6c#=_Sw_xP&kOD8HswzzN0if#`I#NA$KcY`*oJ`iQm#8Q(@nado0f8~w+{K^-b z*4s}<1ll!z<+1G^I(z*CnI{Uy#xV}RlNzN?(&$$%8FmjP2q6X&cxPw*>oDIi1MI}u zdf>RyM|PId(eLEecXAlJTj}4|9SV$$hEon=#F*hvJJIXNS=~Np`;tE&FZ z4W{jAbU8+$b3FRd`Z`mUw+<{G`wVJvIasBF7)j$%s@^5qQ7u)xcTBZt*nC?QGyw-3 zpKfZ=bD_jSdIn2utlA|{OD2^@Uodr*M6R^rS?*Eh;;B}Bn-_H{R@pS+6@JPF$F$BP zFZnjHKB|_Y{8LOA@~hvA)rZ*@VHUSGgE12STs)7@+gLn!N}Tverg8ocHs}kM*Le!H zlFt^kyBDNu0l&mHv%ZR05XUnX)%;FW;AN`V7aO6hRIVQXLEt9SjMT;vpNB<{hiZYn z2c_>GK>TNk+xi1Q2k8@;aR(Z`Ro@eNS@zYPkBVizrVsh0VdNe^4JU^=uHA+3<(3aJ3|*hf3Fh7owWn&4UVbo zU8vtHy_UW5-PaEQ3t$VZsNJey&5qGZ3+X+MU*umY0uC87iv|!J54kTLuK8GD%n3;z zpO?@s^P#jSq3+y{rR%lRv?mN(2ClwzRa}+DCLWf;M{;rTIczx+yG`SRJmYz4zsQX= z$I52RPK(l1C)+~X&!LH!j#};1!%5oJCzPQpVv29Q-Lgjz zt!Bl4E1-nBSGm46HvT^e5>F#Mc&!%S7W7G@j9x%;7dL@(4Csa2zUE)1fl`! zuzfL}-sJ(X14guP_*b*qBKB#oV2EK3JQ$kxz52S6J8lxXd z?5}7REb6uNtzR;5E&TA;Ub@RPv?{Y*UD)Q6M4z{q`1gigX~s>ElG>rp z+*Zfv6=bSP;_1F=vUkGSCJNH3XWJ$I{$6K1WEtfcDtfs(WIr!X7q*l-W|{nB@dn~( zdKmNZX^Jeqn2b*m!MCLoO_3T|Y*kR6IAtkVdkQGyeOr@+RM5xHDlg=W;t{6{UPMk5 zy_A-GcTiQ$>Wek==U0uvJ@8!NTT8cSk%O>H3D_n#T3;|R$N0R8X>j|}bj{G_?>WV% zA}JDs8=R|^Hk90tR}TQin`FIVl%B4_mJ!zS!ZRfoGU!_$&`kGGRe!bu>cU zlKKfHLrUHO(Ix5PiZqcPX?2cQN%|7m-pU;Bdx;EqKlQdol>~=5mJ6FZNm;ZpDfu|u z&C9h=mOtE+ieccF-S^QbU(nCS(IHODF=E4sOe$i**&9bxtw?W};2JaWe!Gv6T#TyX zvABrv02PC&7+W3}@t=0io{83rU>g09S>6~dM(gWwrJWE{1B_F-M>uq|foo}Iu4iKFY`MOlRI=)u(?G`lS;@qtcG*Y{({{~yHetemM!B%VbSLXo zPT2^|PdY(~Gf~Q(Msd=RO*BtiW#d(#Ge$=N<32nXy~34-)Cko|hYGhNL;NiIF&0o8 zRg~8jXWbc1EqO$zSvz8`yQ0!VAP8KW^cBBhg}y^M8Wn#=+`hZF%lJi*0*SlKURuwx zaQYr_PqCl*Yb1NWZ>bCK>>ZBVYB-z!AOHr8%6tlfs5T6BZ7+}sY{J-;QEJiVq0W+y@9xo&F3V3Je2zAOL@w`CIiv?TfBl_`dtLv=&pf{M zr10A~MEM`}vC9-6=ip|w^wkZxQ}t8lZ=li?liK>G;6p@a4z8tlyN9t>LT$Ktsre0Z z*kFRS>;t3#FY2W^)CX1kB{Cc6I%#8)={sI5!0kmoIPpu(x3XvIPp1Etrs-PDT9?TXFq@xl;7GvP+#m_Q| zzV>Seh5*1JS2d@_7G-lPmak6>mHYaGgn(J<%zp}W`)my z*lGWmAXlR3o|G4rZLtkA6fbbH^B6|tmeHG9nsANO*4vn&p0Mm{ct8Hd9D(FAx72Of zp6$;oC4cGfhTC$eCV^+c$u^W%p?`4B?eSqgc{m>ds}OwC@N<_z=-b{@h4==6%vl4f z=e{~K-Am9E0+55Lbi4myckWFsUCa!Rjxa!v3@f$$@?1?U!9* zo9k3O7xboFs@IcNkA8Ey+)!%*2Igoo=4j!Ub*sij{p9^xpj%Tf!IWn`IVljh_DZa1 zvzZuX(u3>vLN?O@)Q(_-vu1M^XzhevaKb`i-}6)%dp6YJ!BOP}Yy_`kWon!I=8cwx z&2)4Qb2H$zabJz4V46P91w(f-SE`qw)U>wjlY||lY$J{2>pF+mGSn95)GsTAYKvhn z4yCJKgbaO%vd$6sGS4jTd0yjcqr}|$b^_bqQ2YEPWTLd&Ow!7WA{^`p#x*SrCRp0B zdg)#3xF}Q=6h#jfZ6c@*xkKF$Xb;0r;e19LYu-*0Ee$O@km2#8wKDq=x++8agH6&V2yHQjecj2lL{xiO2|kFk6hBFLNPyarw-}PXH2LyeT)gT;Z*CL@a${ z9G}HbhvK;dO}36V1^1Y2)+Y(K7uwE}2o?GrM!Zs`dV1~y8YuC7Bd9E!K{(2rAVFk5 zURHx<=q5x$LB7mJV$~zNeX_u(V ztG8ZMVYh85EEdM`?A?}D@dJQqy?kr2lnQ6Db}l%ppU=T}PfQ;CB>(-sAUdKjVevjb zd_K>G$y$XwSS?P+Fh*yo2CpXXx89DPs+$;41|GgvZQ`s?2t^tPjmyKC_JXaCi%f)H z9cAwSTClMFS&&ujI2wvl`V5DL_Tj8ww}_NpI*E``@@>r%Snl#+rpXYDeiJNtKd^wk zR#LB~KmdDC7mS9aBC!q!WXTq^@)CjTik0YN0So zDv~(-Ugub)s@~BSzi)BH-ykU4-O=%lW4GPe)ocw*yyEPM+_nzv+A**96@lrIMY|rd zzJR_$R^XF zfvSb9sZNL$-n3Cf7IDzqZnbL&%d{6i`OxX?oOTwtB=k0IQTE;H%J$TSZRUw*Zq_g6aMGz@=z^>>;c26g=0W;0GniCfV7hHH~%SytbdRt|ds zSs(EKfZLnD*e)w~zT`SZM7A9@S~Y1$nUfp7ZQKv;p(eXzqoVxe!YDqzudnt+v)!$u z7T~^T^w&P8*XG4dQDmD-K~`6yjdQx{_{mhUisA!+a8!w$!gp1TJ6Iv+^|qGaqN8^5 zp9tT3(|n9AqiSag{mc(fMko2ux2p!yw58Imb-n2$?i-xy-2Gi{2&=8{(d)vD@nuVc zsW?9BzbsYUrI3UYtC1+INsw)Yo)wr?SbT@_$uF00?=B`KT+1j*^9AV{Ux)}*3+vbJW$j&Apuqa@FCBNh}xZ(2E{sSa5Jg8kpnfF1yU zyzX^Id%|8#c`S5&4_c#N1>SOn9&wP~kNN_?7+iWS>6F``BGZqRJcMRGT6?-0c);(s zN~~*ywdZm@by;QKR&rpOMK0g`MSaRCpB1b3mzeb+%+@+xB(OeVGa(t3LUra!k31)u zw>R#a41hKHIO~n(9octqkf_;MmR-SlD{w`m0gh{o2Bvp~x9yk74*)E*yQC)aZReTG zXRcAiDo@GlLO1va=EK%4Ui+2X6tORbo4?HpqGoMSbq{KlN(M5R`NFp+tM*4!+bIfhJCo-blS!bRlXZwQV)H_&(jw$DQ3{53!7t8lTEOom=8|x&*261wB{k5{(8^& zlQ`~OKZgvZPlHFl1pO2hFcW&pV_>X{xXpX-5|x^J*mluyJ>lgo{vw^*F8)B^CD-(O zbJ`j_HN%8uEp(=*f|&R=!z~|n2AAdd8Ts>6yz-cB7iR=sOGr*ZEKHa1-U@yRd|z#< zHxVSOwHCt#A+kX_@U**)cN>m;KF&<7<=qp5Ga(4bjIt!eIOE&(*!D`Zxv*K*? zd&v}X!C6v`$-qf{nBps$UjmLsK6jZ;q$Vi@*4vf#K%ZZ&*Jp^ZL8qS3)McMRe2ciP zaYbqUrf7~}Kew<(&01UiJNrHUIJ7{o3ZG=%5&f-0AJE1iso~!oWm*jXwD`*^I$W^x z`RBX)Kx^*YGOkTnb>wM1?8AwfMz|A}H)KV&`ujqUEY;*}6LXNE-DrQni$c(<1nE>a zuvapCa^^7E#qhUxIuWYUo_HL{l*CcNMygBVDMa zWAz(Phy<62O;)4Ak42N(n*bK%PDjo}aIn!!Cczax9ibht+U$CfoW9JqQ?E%jT78ao zqJO)AeWQA%^_-2(&p3Vs&0_ni+yb=M0HzvXL(1854pjV#5mg0eO)^b!0Oh`%cp;Op z7`j>%?%u=b9sMc!tbOEK&AJ??X`aKNavk8Q%LZdazw3GuJK%*N%9)woV zUK&1;e!M?Gb#dP_F~#+q(p8!8q}m3Z7m{0GZf0y+R_t9CS4`JJdD-p8=yzipwhYGg zYV#~fgmt-H8RijsfvP-uUaLipihlvTXgnRx8r&}g3xF4{yj+xzA(ky(P_`}ASU+u{ zNs(rsEGj5Wvo!N-IF7UUI}D^cH{qY(Clr+db>stY18jrIi$vPZ=p*WKPbiSwyzZgO z#9f7ys|8>%#*^|5{#BV(*`kjv2)>84u+b6ZN{P`kZYNL%@(1xZ8|Kfhnx0ejZ~hi)3HBQ#rhzNgwbJ6hj%0*YHKah~ zthwuLQYvr8NAw*-ml0wFY6L1I9F@mYj<`}SeZS_wHjCA47%aFax)fg*B(B>)dP#&g zfJ|L2%dkW`Kz-h%%ORDN+TWDFsgW-?5@;OF1=cl;d>j1wcWb0M_2}}s%4ao!H+M2p zf-M>?$IjVn>EDeAgVklLw1qS}sUSV1NhEcjMoZuGuaIvU|3=qsFu2hJOGxhMCJp}G zQPF8=$&@xt<4U@V4Lst>0V-xmq`^i4xQLu{PsJ%l_zGNha%w_(+FezS#Mdc6&cWzBiEFbC8;+_AFdS%C(yTW^&n}VUKK!;AciAI`MUx|dCb%7KM3pK?f zvXn^Vo4RP|tKb}usoBDm$%ZH`_JP6?-yQL-u+FAeM{MP}aHL|C^@^y>(!q1<=kz0t zQ#NZiJ~9BZ#VGx;2S7m3owzO(W1`7Z51Vd1*XRB6ncy+FY;#5Cqr=A^&q#E>^J+Q1 zt~AJwG6F?Z9J!7#LAFK4h%aN#5KR*!M6J1?!a$Z8`Q?JJ7NsRQt-{UfgkM22A`q%c zG}mgvRsDpKk4Q@;I=NkJQd-vhA^voQ_0wiVjn+pT^tdcUI* z^JG3FdQW%sIX|d+M_u%%<^@upG+n*xiiH@~&gnV+I@}Wn&Kymdi{*L)OIgrlCH=gb z6u3_IhtHtLQK$NLMT{kZ!G++XEvu5~m4OcAN7|{Pq^LIi*NNB?>h5#uSAROJc0l+@ zd~kTE(KIQJTA7Hd+bBr|&b!*8Xjv|~D%trqr`tJ}*$=k3p|rt6I;+8H@f#CGwvo4D zv50GzeargJo`5?nm|$^-`qVY+OQp9+p$L#PR@0grc%GV@zbm_>s#7Xneu^i``m`C} zk|+Ba1-HUEBB^tua`Z&J0=M-9IU;Jl($Y^NZu^IfI=**KSMtTx%ck6k(@#NM^g99j zTt+%2TM1e{=^vB{6k*SIP~LLvrtz}E3S%%gX>JYoX27Bt}~F(VjHx!)~@~f%9P&6Oc0z6vG@)%mWuzaWmo?*`U_}#0e@?G zO6H?STezS^3y9<0;j%8%Te07s_%zhfb9H=BF4%=wDG9=4N-?jb9IMIH|m|{m&pKhq=F;U?k z)iTsh6OC!i@t9%Hxb$nqwfXE8rHb70^bIC)H2E~ZGW)o77wDb;PC^kCg#UX!FFB;| z4Oz-{{^*zJSO@BOH#WV&{oCe|%4OXWaRuxR4JGup1WfKX% zvJ~(sdcFRsZwajSvZlu0Z(_@?T3bWm_7g?{--5wTjqF8 zu7Z3~{56xLk^)p8(qo5QMcsg|=A--JZ%4qXzi%=<2E@4bjOLU%%7)W)M3s*SELaaEiC3vw!?3;mF|#UD@$Fok;JEqvm@Y zyc>4k!BUSV#_`{EbzkhDUfL%*Q!ZL%YUKI(+vA}*?sqDX98I}(py49OW~6E*Z?M+#w(q`CT|Q#!Rq zzZ?e~KY0ecGW7GqRS`qAHdG%q8@Dv8FCO5E)#*4tg_fP9*e~Zie}$#Q9&;jIe)#~n zvGaDh%RDQvZ}2G~sTP{He0HpLlfgk{(xhB-gmrgZIOy;N?ORc5_TBiN0XPR1?w-Jw z;y9ll7NUdp)j`c*@>Con*x}OJnj|AeBGH+hv3-TI0Bm7=-Wf&vEIPSrZE#B;T-V!a zZM&gdZ4T~`cn(PltCjwRtLf9EvkjU!cSIJ$xI47HD#ky%B7^oRd#xx$LJZ9mw3lT* z3DI!U3G&?(-8}dEt8U+1$>vMoJ386xY~@^q8U#BC$j%WOq?lp=Op$&fTxu-xI47s@ z8E9?wXBVPxe@j~fUz zYu8RvBYOc6|JcV){l)e1lKsKyQI_mR2!CDB!d&#_WZ!sK4r4#h&YLtZGJVG1lFO7Z zv#Z}Uiskbh6~@bat9~Z+96cvHP;e64SkVap#E|%L<)}?bqD^f{b|MpDU_`*?yZ14t zkhq2sNYDJ7G2uzo;)~v=?X23Yjcslk#c8`mwwRzeLGpH8e$4b(>@5h}?jB}l&Id7} zk|?)iOz-G+?)XrlSO_XR0`=<9K}*e{NofUq4SVeT zk)2UJ&7c<3f@RQV6w0rBd&%JzzA1Hw^iGoG-!&2?$Up#&(yA?W`=EFRh8u05J$^b1 z+#kUuo{*rOd-}Akc|Bt{wRbjq<#tpBnh?1@^H~KP^))eWrF;(U5XVlqQ({v26u*PG z(fHcA5p%+XCa!mLj>!%+15X9;^9|0;XYqH#N3JKUZb_42)F-r(f3}RY@)zi0+tt%G zqrU(n#Qs{JZ+-ysnS0NJxZ-kLq}=x2T-gvUbGD)*l-NP+dibEGxX&K(+qHI|RO_GV zXZ>a~`k6pKj#n%K-drzx_Fm;}tJa4R9p|uUk}3)(Z@B-+@4u%xGo6jrCU%m3B!qkK ztdYHg4o(EXRO)^A=bqpYLB$O?R_)yTqH1PSHGjO56T`c3T(mudl6ayaJ_i!tS|L=-gitauXf{+x;9gM#Pd1d&x7h&yymo-($dK*#UuLQBs_J^o?VZ9x$Hh z6o{CUk!gI97MI(f^`akg`WKw1!^f~Xc%$0G$UXCBu02W1?2%!^=EmEsEO4hj>;o)2 z70rpJUP}ic{}@$SZoJb|YlIXD32Sfm@5l*L$h6&z^Jzttw(^>FKmYtl>yZFhzfr- zkJl=>jeG>0!gU$;bpHS@fdzK&to!>49V&zcjhS9%B_C+PamsL5#!7D>qTBgt5yAp* z9>b+!=-sexDU1IfxS*cDDbi_BYdA+KZkh&$!-$;cW|0SU6*&VUI+-PLV+tIQOl|P7n34+TQB`eY8 z&NW1_I1OxQuA4%EagNGyq?voLT=++#h=jq?{hK_nXf#+nXZV(M8QMi#6Bn7)sJinf zDd40I4AP@Upil; zVISfDq8D(GLrS@c6+Qskqey2j<>V_v;x$;jFC_#NJC5YqcU0JEfAnvV+*2>adq*6R zE>q2O*XXfBtkmlFP7#%n5SDnMVsGIAI>lgyLGh#JFo`PU6Q^Ad1W#Q}{Jr5gB0qOa zfN8z9HqnCb{Iqm9fJpw*V|jLcW{T*2QRRep3Hjxo()w3r69UC8Cxa@^Q13~m2f!<% z;QcW(+G81B#q1oX)F*kVT5o1E##N5SzQ5jN+2(B_h^x?lvG=&fkEx@mdgV6BGwhT~ z=wPpHBeR;6x1nY7!{qlIX-2V0%w^=H3U4v1g|ok5+E3zMobe8J0dHiPt-~;Nv*O8- z%S=?A`$rP?xacjVtWK}bVDa3VJGB;K=&Q?;V43k%utrVjF6zit_ck?q$5;$pnOQGh zl>n73W+Q?5TM^6AD0bCVe58K5-dMiTx(X%5JTmgi*Dtn&yaLq|MCtH;Uj_Ftsx8Z< z^xIYg^)jw(vTOVw$dzu87A;&<^-eHs*595*vovNcLwEz^;Bb7AO2CP$CW7m_T z3(rNd&7;oie~?-1Q?vrZ@&fXD$JVj~5@_>yhSUPnvG&hI0z%is!RSr5hYKwc>>b^< zv|pc~Y7ci>Uvh@5YE(pWwfhiQnZ8_z*7m{jl}a(OF=esbjXtX^Y6EYgdSOc6e5QF<*bW z*RnB^`ukR>zS^Fr7g21#%SIu`l6{;tkLzMUX!KZ@riXyFuUqX$(Ul(VmQ~Mg84ko- zu$7T1o7A(_nLX<+wt*X=xXS4Qft#N$pPl4a!loMhSa{Kr7Cro2aJm|kdRmC~7dVTr zT&I$sk)%X6OtKYxN8p1_y^9zpqqCPQlt&9K6l@XG`z7D`@PyaI&tR2h2Jc|{C9b%e zGf#DV<4>KtO?G3&-$xM&c9&>P=X$v)cPucKKsB3D*fD2q{ADf$YxSqaUX7{{fZc5& zSfLj_&{Mm>%NkwbTC7xWe!Xs9n;f%tfyiV8xo`V5-+ZRU zdGA8lJ?ogaePPX2L<38IMCVP&kSw;{*@?z#!Jv`qEgZzbyRh0!cNz)oMF=iI*K7rj zjSn|3iY~5>1+6YV7RJxe0S_f2#OYwI9Rdf%u*jo*#@KcB zxhGc5z+0UH*u^s5IlQoo{&)jsihJ>1)}a9sdzom>kfcJ!LyhW%)$c0o6Y-&x6rpMGlY zlH|ye{$9ASq|2RaRt%(yjW4KirE|l{5IWks;jTFPO%3GGuYrpcxBkj8!u#p9qsDI1 z)MlOvN62;kxzlAY;+FCN4B@xA%5Zt9))ow zbfM+9a#Oui>-{xik*75&F=5A*9LMCUQE=1d%=Db=0q`y0GJO$0)@Sj~R4d^^u3AGp z6*Hh5B=2iEMhzhE_qFJp70|B~gJTt3SEFxCS-WM1c+y?>5;z&X64#M4y>PqRo(SrJ zx*hKmR#V&(j6MJs3ysJx0}m6fS6X-^g6`=ytIuOmQZCL|w?^&&x>Cuk?9Wie^<3{XPn~DH;a5drb$KR?^ zHzvN_`ff>mf4N3cH34G`(>rK<|Yy7E2sv!j+QNv9LUEn?Z%`}mS`fb-auao*a5z{c zGuv5O!q^s(3sX}NKCk8Gj*QMT9%<@i8)O=w8WpD#Sh|i*X9b2QR`n#P7W6_wU>7T@ zUl%jFajs%cJFQi0&J%`D?^s}`cS7FXaV1c4;MLB`0*Qgmr1I!td&Oj~O*2-cYidjx z2Xh45AY@X)df}Cv+lst)H9hb`6=NIJmx9dT0Z?zuJ}2Kk4Ml$F>rXLn%8k?f z79Pb&9PIDAEk00tS0?IFD6`s00WIBc&=xk{<5_+Ha4I;v-2{4@kOxa?`M{@xMr?Ze zj=f)5`W`b@$C>LU5nQNt&Uhy7iLKyinjx+9#ziJ*zTdxHFx)a%+V@`STrw8elxT?M zj={2qrR&m|$zwSDLWF0xUV6H+sp#G`sJ)m-CGbgEM;2bS_0}IE1nQgHg5cJqGJjAu z$;k)K#_s;POSq4vBcz(#sq0!XMxeKPrR#Y=Q<2JFJSWRk?HD687)$Ff?t$qXo@E@p z5e+)NjaFmvbA9n5Kc>;5Ap*XuO?N8w=K<&Q5jt(uK?Z6vwV(js0VJ`8O{xn(K z`TmvVG0Q@X53_e}Ru_)1%=_Wp&l8Dp<3_qu;FPYx>9~vil@UZo{TKbB=LTsk(;o`H z{w-!1%p6wa7pQDqgD9qfdscSWp9&SlT9*?BsEfz1E^}U#Z7-F*97}ru*wNT4uF-rM zH($P*y4|22kEQRC#d6VX5g}VHn~dL;#vfAAj?venDb43fLr$xT<|S*^ML&$*GxFm& zkdfu&CSZilZ+1q9YE$TrJt6txKa!I7BrIdq?DTFubDsIs5Sxy*C>3e)cEDLX{tH=N zkwp%vj4Z?PAAIKIelVVYzXU96bEvUl>^sN@^(|f%kMx}tUEG!?YoDrMVgAy!TLH=9 zunQm2#EI;Q`T>bOi6KFB3~`d_t43yl(1=+_nIWe#B_b}B$580;J8ZhQ^VTv#K&;0j zL$z1Y!(H9kMGAB-g=LS&bc&d}DYX1+eYt9!!Ec=FG9Fj0Kkl{%Q(w8dDt3f2nLLAI zO4jAepel!)(-n|M5nVl%dR z;dgS!_#%xLKebR%g782Ix&#{i^iQ)mn39JkNjL=n}aZ3 zzrKwnnH|s*BpdIR0wF;OZ~aXPsK#5j?F8oFnp){MyXF-;k#y3=P=0J+IWiar7sX7JoO1(K+TQd$d$B7ZE@SZsc(;M-(UgnhPW^wKpf zcjwFpyW`Ql3qxYv!!vKx2r{@A(#-A^+q>WLo@s6ISg6uaE|V>y+^R z1K|Mo5GH4JB4g z@NRqDlH-BV(Gk%=$NlgbQgjN0aOwu1DTrdq+}x_$hZ&T9WO-$%wA zvwjDQrBd8~*H4Q$OWwWBJyy$Md8{gZ94-bgfvK_zOxq&sfD%-M;GRAqz##(yw5s29 zTgtznB+=oBhhzn;BvqU{?-}rwy%Z~S(Ft{EsUe5!E*sAHSd#>Pg@e58xiuQ7#Xzac zrnV~ClEzwE3Y9$FYyOcobE0BPPCbRiLco?wNUM`k)8<&{MI%pZ;zV?Dh!bEFw8S&p zL+GY_2Muy!w`AsiGGv=O`pdWkv~lnf=CxZ9P3>qbAHk>&3;z=+v+Fq-b6L1d&M@Vl z<015t+hBe@FSG5^wfk`D>U?=XKHZ8&F>9zhwWehM%+w>)q&%F7h72)WU`lCpAAXxu zLNTyon)aM+B@mg||}V-}W=Ii@!iR?kp8p*>`)Eimm$!Lu?}$53y#$UL%=Z9HzUC3*@!62QJg>0r01Y zp*JXR@M|yiW6btA`I?nDB+32tdqEbY*~lY(myQ&&FdTM0Fm{5OwkFV+MsDv=`iM*x z>38PoN1roEF;RI=uAW^66N3#>Hl{Uu8iN~#kD3w>v9+qCQ4NH*MZ>)5L7wd?Vb!3u z({>+do(*_YzV-`1*ipagdf-Qi8O#rWWvj;0HUUn^B_3oNj&(0v6=p>>r4->3<@m9j z-Qf8yKmD4>xMi?kTRbzsSz3tr>{JMv z^i*p{XAk+4Z57p0_V!4uaaHY&UbX0B-Q9{j~muZzO2O_KEB2aLUp6!NH?x zi?RvDPV%>LM#|+!sB9H>>YhEm(=wu93_`d*lYqrY@in~T5RAu3Ss8LctUdr%s@n_+&Mm4&nF^>~JSR%Vc{htxWd8@qUxYfY6hEr9uK|K%#l!8WLyY+|6G=|L$qmPNzhm%XL*YUoL{`sPrA%4PcZt5V9DgZmec$A!I(vNL;>6v|Vy{|sOl=L+nsEeP zJpf*=F6d^=7~^+t-_vZU1{%qnM{Q&O@ns2aebK(`5k_WW8zj0>uS#-xmQ=}=<93$@Tksx6n}LW@!m*gGsATRh(YREo zsG~l^&nmR#22hp?`KBT)onGg~qp43c0p-&G9BBo>-PC%{ zlAd8206X@*JxO}Drs%+3oeecQN+@mjZRbW#ZsVC9e+Tg(?Nn{=taE> zuBy^!j|?Bs3H#fQ_X!mdgJUpzwgTv)TiJjf=a2H)QnF2!go?G9Y09v#l}v&&og2^* zsFl9e$%X^a3+df@6uV-V{K^a(SN#z--k!KA*_K0*Czn+l# zqW(KxLHAO*r@=z!SbE8|4a3EQRcxp04A}K-(`K44%t|_4bV;y@k0%jX;l z`|P_J7(EY)$KfAi&#wb3HsM&08El^7yM0d6dEA1z^|m{<@n1vr-fd5otB16$V<9c` zQwThx)1_}4Um`H?f<3COz>JP5I@cXID3A-;BTth9=~%jz#FM7iHDX_vBC>88kE<_< z8qW~z%5(GZap_r;Io(>5u1#gkhc(|rMXBXijoyYt-*&5D)!Ab^YBbubf-Ab{^SK$F zI6QGx@O>*{VFGicit7dwMQE& zGXZ0j81|>_Sy97ZV*o+rbg0Y#`quN?HyjFx+deu`?c>zSczGvl7>&9=yH%P=RB>Bv zYRAxhYcg2);8N#O_)3HvR;E0q+mlpI^*mPTfUw|HBFVdfU9pl#=5*VUijliG%~|_7 ztyHwy*&?U%7t)usiCw)h z^r#c=)X~1m0G9{Sj2}u*I*&SlCEq1jM)^i+DkuWdKHiloNJ{`}q;)cV>J6c>cOs06 zf{F?!$fiCx#YaT|6j4P0SgDil)HO1F+UGh6lBYqGpZD>0%&9&lR(4<>!Jcmh*m0Q5DS5n(``Al$W}DG^)l6 z-aE9@mf`n-ocdO@S_DON_iMW#L?0qiW`BP3h>Va>l$jvAWSP0*1qZRmA;uJjB?1!_iOUbE5}r( zkKK41EyGWt^QVtBt6NO1sMy%AAkw^13hEzezsU>hUf1y#!ctt_HO-ICqtd>1ywGm} zELU%}eFqO$r&0&OaYYK0>*#H3nzgp6Jd#@-h~1C1d5!cj%J~BuYpahYq&5*xS;TG}k|IDm}QqrEN}mA{fRg{z&w$hx;~V=WQ?8W&Z6iZZE5t-a>m&Eppid zis-(}?M*ha{{Rn4=K9Ig^>k$=t_MoJ9j?K+j8=`UtMk9wwd1o5fZZx*h|uYwhcV)d zjN-IBljv)FY4C{fDnV<+a}p03VrL?^ttReZ}WE#t$dTEPdAvU(!CPTRF>8tH`m1G zxfMyDrDS|ZPM7Y_qOTGoX8ut5ao4qc&8}&_Zld`6xvuih#up!DHqsj9?rx!Zo7TB< z%(kTldU>8@PP(zd-)J!mxFWf9(c59*|Ox>8%9uc($OsPi-7 z@mOfL%C!CJ?|8;=iM1V1qPRVe&0 z$~hSPE9xH=TB-7zcXRm z>x#vF1%$p@44V39R`C3Z0>@<=GS+YWAlihm3#5RiJ*%dMetDSw8CE`Z{{Vz_-SES1 z+OKh-6yvpfjmL+YJjne|U&6UNtvhS&#d_3peti#`#AK?PJk>O|UzKa2w9@`=D)U=; zQX18;i|69K8j`$EHWB4^H1xeDLaLxwqG{e7XCF7qS$b4^RSkLtq=U;p?Bcv!UXeXW zFLuRm_(z`AH>k>wO78p>dfFDCCp{~Iy45cBm{G7es#f=RVZQZwc-$wof$dAKj3obrr~5+)oRVy#+(3_=Ru-xaehwv`wBeOxO0R}JDX ziCSld^yhn4P~)X}S=SZ1O-By5J$!?P>Qz^%(Am^ zUX`MJOqHP_utjl4C!I|Qc)wS00t>V0bdMXJmQU&M2b=%QLV3!eniMc&-mx@R$=d&)vmGFOTNW2C;AaQ+Ikc zgI{lyaQ-6?H520VJpTX>S`WzNZ>{8sh#g02&NnkEY&orIrg=_e>sgW}(_gC5qso^# z`F0}{cKHTFbvg`!LG7Bdn&zS)Xtz}+`?U>cTLVgc3Zt7xX`a({py#8<=r z0Ps`Kf!a^Utz+OE*ZYOXh$Ou5=yY6@B+DsP z)*`HNn^^uEKWLAE{wVk}@doq3+LR>0X?Y^X%%nCE%JcbGlK6(^(pF=JGRA-a2OYCt zUi?n@t>WL>FULCn0K{(|TSsZA>X&Bbdsr2c2qf5%vc|4UDgEAEN#V0!Hu!@?LvpdC zJB+_B1%E{3cvLk_CO-~ycX%paac_XK*IRcMDD`(dg zWC95d)~LHWIoV#dDt_-V^VMU@>`5eroF{5ctLsc@Q_IQgRs7ncGR$#+YG*_oh61Ym z-qej5ezXQ3b4&XuN{Z$4pGsBJt>AEE>rq2>exB|09TGP(!VP-i}JWY zn~5L_`+MP6z~37|tXGmGB&096YGh`*==8LM!aTcE~!#O#9X%`a9wf`^)2AAd*UbG_Cd^@8w4qZ+ z*!@e<{uKD5RMQ3ByQ;KJw_|rb2*iEnA>T{m9aax=y=z3O{8OJWr|V%848Nweq>fjCH1Kz7)}JVIdqgdROzl zc}A{VUz$A+=*}L-QpVA{KP|jL@KxebJF=L^P&(HYt@s;B(XB&nUEZ8m)1Ms$=Z2U= zV%Xz>U!2}C*R{LL=8omxZ~Ha(oGn`si;day+(kQEQ*9GVIe(Y6W=kl=JI<;zecI@wxy3S+^8CiLB#}wv)tUSL6_YPo_38X5 z##<0d%g-XS<>c{FH{Pbzn4+YyfGW6zH$j@)`!|`>ioa>4lUe;>Usp#eVGGn^lt4Wz zv6oE40gTkirfwM7N7hI6ba6u0Jq1|%H??*jVDkR}f~d=9<#SK#BKo>HrnTdxK(?+4 zz{PZ4O&&!|u{sax=;zI|fuHGAq=e@fuD8gmwpuB;Ju4T9u>P)a!o}Ee&(f?Vhs((3 zy?Hz@y{?n@BaW*X8IpG-Rb^e{m}fYCT%-!1xT-c)O<5-Y;h2-GWSl)eah;>W{P*^ zII(;>>DuV>@u$WYkxpGcHv6lbdRL&fo)vP*#Zr|bQHX}x?}js59(_!uDBR)XvW-Rn z=BpcJ9c!kN)V!ZBS_Y9MwbAv1m-{^*|JD8_>2hf(tS)p5lSWM?6kGzVReG^Sz+9S< zDk_pXE(DZOMHd<@$bCWG8h&9ZC)=lV<8AVI5QLs^PvL90?-Kc8RXbK80F~|i@opDix3(~Bt zV;L2Uo~NZnE>u~ErYlkqGt#qSQY%g$y;xK_X`NEYbjg5}5^;+2+iQ*kEpJ)eZf)e_Z>48b z3(UD$=upI4W7hPISiv@8`@^MuLGa_p&LR6H^sG;8lV2O@QlTIm{N9~^3iq#s8V~mO zjcjzen50w5vAuC$C!b`Han$=B8{!(2rK3Ku(r)Cr-zD6ekdA|;d9R4t$V$;byo&uC zyZxPXh0_c=Q|)YbuLSWw?D^tb2uzW}CQ-n^`Jd@uD}u{oPNs*^Xh3;$2$$ z)^e~m&?*lOc%E@Owey1?dUwSQD_8LEi|6q6rw`fZUVOI)ARe6YTu_KxN7HHU4}Yb8 zmrDr=QAr<~R?Mdw&$0AFJS>ZUBW;NKQ(})wxs*fZV?FD}Y_*HYZGOb<`?|-AU6mhb zYU7SdwLK{D4XvT{n&}!0GaF*%*jAJt5EJFC2M5-^da&_9ibWDR`q#hsKUbSt1vJ-Y zc$aY*CGUw-1+W1aWrR!&i+R07Nh7^*`d5whSfq|AKY1AIUr~5&Eic77Pug@$G^jpk`~DT@V(`UC0Qldhk&b~4h-V;JdyUa5KDZDuG;>YJPUpjRg+hp(fQ#VY3>mE_XIYSfMD zxW?8vzwFS>zcpezRbDZg>#e*ssoVgu#(yf`v-oeX&Q8c*w`%ECk)1k;yBr)4tYj5Y z`Blxw(!CA79%|OSPN#RKDevHYL|gg0c@@Q8x$9x!1A>Zk+*F1}X)}uK6T&wtxy4nV zM7p+J<#SQV1GAZ!$Xgk$i%XD-2;hpM!x&OALfe&GsjO<@pwSBvB`Y1Rj;OIPaC!Bw zM)3EHeC%7wc=YXF7Zeg>t!P_ZOKmF2A6oG-Ib&JszVS82D6so#B`94f9k zu8&=|)CHf}>_+YUaoW6N!utD0thoK*9Ad8OKM=3&6Ub#M%U_-1vUclQ2kCfB;YN?Y zvC-Q216)QIO&MzRoj<_$SW_p<$4c^xABx)KwrgQ+gCh})iu>Q-zJxWMC3Rg~29p`X zbgz-(D!A+psQOMFr-H;v@;UDYcsk9Y&F#K_-dfY~j)A7yX%c;wFuZmESFGIX`bD&j zXuSaFy=&wjiGLCNr=HpwKmg+&mGQYX99An7p0+-ZH?2aA2Nrlnt7N}vamwJD<8=!N zV~n`QMMtQ3stHd205fqlg1W!jpJ^i%`~Dj*bnJd1jmAaI#TathA2)LjD%HKXSq2U( zTILp(Kiy!~Ojh?lX~6q~+P;F5dL+*s;pHdpqqovzNWNKq=;>aq;mr_ugr%mP%dzIX zV#3nBrvt~ zasKQ9&{vm_4C*#a$uwk8ujHIBmT_L+;f*6y-dD(AynNW;*V0-q!#J)_+O+F^%0Ik6!nXA| zE;MoWDXjJ){{R>m70rv{{{XCMBymfJ)iv(2kA%$jwy7P<0IT;^Yhzj0b+$idu!!wG zm8aobTN~-p6?GZy-o0O2vC+IcU~aBmKvn6+eJd(FN}{7(A;eKW;7tH-C;X;*9h^}gsx?)POhpSBGNHv__|w7z;unFS@Rpr( z4x6rN8m+&Gd^3G>0J*of*KIMg&$n-uSZ*N#-av7+S&FhY7<964RsG-t=}oe}veI7r z#hRM4TIhB%-pU70F4;`X57!v|E6vX<^5ylpR;nt;yTdW&$Kj;5Yg73f{{Vu9{66t_ zg8u+(FBwnc4-RSG5Y#m}W;iMxRSLUv`9O^K% zTm?P5SKR*qwMWE@?}@(^{6*s%xmrcOy^+&DbS@MqQP^$A<6jxvMES~qbHMtZ1%0;* zPG8ydr!Dzcj1`xWsif#Pn#X|jofV7UPjjuh+d`RU`=~$z+Pvf9-h+SO zzln!J@r}RPr?*@>bsY4_ue1ILc=N#j00}%>@aN)&w{7QGcyC*G)l`gvB!tEYAMX{f zk$-9r9b0@%_|vWU-$&2c@0b`vbpt$Cvz6m2yj*n9b1&FlveGl7s4+R{48{2h>^grxn<6Hj#i5f-si#%)L>AXEX+=5iLoBdwgV=;ew z@)h;}0E<&lD!;%pnerjO+|+l%^h9~K;B^EwN+0L*v-apYuA-X z-0g`maf-v&J|o(Ad`J!1l&?;DR*ja5?-a9LHW7yz?ewpgz9H$2=Sa3uZdJ}%f31FX zo#1fz%AAb-Pla*SaI}ve__gtt^IyMU0g+dxd9RrDKNTgyvr0A}ZY#a7*RhnbXVX6^Sf|cGJ?mN_(DNO14%BCC7THEax3*lCaHC zmBV!stiHtPW#JItZloR+MKM&olC0_{hYH@qS=&S4A#n7d6Ds&*R<2Z z43Sw@tc|EqXESSQmixxKYY5$2Vy-}~%Y6(ePzG4nrg&SxciNTMh8Tzgs~(?)c~$cY zRAb7|PXU)ztt+H?fzsx<83S(LO1Tb}@Ws5G;=P+$@SK`lzh{tR9+k#i*{lGo{o3=W zUalKs$i&S(5s4})oB%3~X-o4r zH457+ab0SsCZ{P;ah_|u@U5!cKGzr(g<+<~_bMFmUY~7uq1fB1E_aVg`TWMbcpH6sq zbtIn71jvkHxqF}>C5|vFnbbU7yUQ!*HN$EiGicwFkzJTfrmWJGk2f~r8cu~ZXr;mQsgtoT z9fb%;rw@~@IezUt1wVSDoN%YEq9#YZHZR}9^#>10R?cKze;*SLFq?1x7FY!4+asD?wK@)Qg@4S}~Oaio@G8 zPDDu;%t8vi8Wb2cpB`0#t3GL?+c4xtj5r3aFW#((7ho6!Q?tzU@XfNL7s zZnz??tj9fTDN??q`%^*C9+gB{?|iL4&d+gH;UjfaA4wbcIccyBx{{UsA z={$?h)%(atPB^d2&x3!qB)%n*-@v{wfnVjqOCKO{$0EJ6L%y-mZ!b0J^<}q`M)`ku z0l?sKUo`UBUiKf>(p;x=`%wHS_;KQo2~Xl56xpr4+yJlIlnw3apQmnX@ngjrZ-?~r z_ANeNJSW{J?*U(N{?XsGt;Vh4DLe_p@sp7m5(Hmvt=_`^IjI}Y9BcI*U~BxiqPb2*IS~vQ_xqg{66ti zx;6U!ox&`*M<>wNg2{Aq<<pg$ynZ%cAkgQo;&bfr=(6bJ91>S zxf_$%pIY#%>8@g&rt&?FY|ZgA#5Y#UWjxIB#xWZ90=Rpd`K6rPPsu&0QlTg-M7s4I zMQIkA=}6->a@PE!0M*R-jaFii;8r}Sni4x}O;MtdNQ8`buTSt-#2r(@x5D#GJ5GD_ zuMUCQwB;!iZLuRN9HMCPw}t8tv6MB3AX?kCPpLVCDOXR=ziVic6 zO8ETeE>!I4Bj|A0$x1%)=sIV>uL#>R+(`)BKsX;t?c3qchvJD+$}pUB$K_s>?P_1h zw>bl)O$EP|^Em5X6JKR+OtCdDq2agx00uOTM>0Px>59u=fqG5Fp^YKwUrZoGP+5N} z+Og9N@&N%Vhq)YAHZ{|Pb=>vvGpQ}liZu_}spf^g!y1jFzyAQLzHjlT!K>XWcD9&f zu-nt6exUf@#d?2-&}o)&H zlau@&)mJSAypsEcIIeF00O97ZWjk8QZ0bk*CckyG-wpVBD}S=vMu~y_&`%ZSK0Wb2 z!Cwk_fAEqtZ8713fsy!Dn0_ynS$il(=9rAJQH{^XRPZLCxc$=~pgdD2@Wt04L`RqH z{HyfBPm15OE}?idy*hM8JzI)4s&~D6W7S4EVU{-uO^zldK!Z7~5sxy3&o;sRRJ)FBacx|kE-lOrZeLq(^g05Y` zQV&nUzIXAJ%-$fqpHvP9oOf*>O3G`iy?)_ijxb%i0o45}^u8I^AWgF)COZ&Cbl|co z7*@GfrygmY)Wm5c$EJy)m6Il>wz!fq6rL-zxbWr6Gc%$5s~l*KBm^nj^y0pzjN;e6 ze0^#&PVhT#3B?uF(%Q;-Z_2*u@R#iQY2qy*Z>=QSKicE@SHW5&GQlXC4odo0*gg{Y zv#x1cGHLd*GUtwK;&_&hB5p4X?2+~P6>4oequedOXAchAJZ9!$=dr=`_OGY(FMxkw zvX0)v8C3+4(C4NrfsgMKz~q>fJH3o?TuBD3bG{q!q&hqwXAyuZuZ(;VYpK|6+rjK9)A+vTd3@AdV!E9Z z#0he~ZRRk2Yt*m9vV<=!&od96$7d*lN(CopLFryG@vHt4IZ#Od0P4u& zW`A1y*5*qa>6$jckG?C)ej|8R&s4S9V7bORjw|HaoeA1U*Wjwst1VB;`Tqd4)+e{w zmOiz@>Yf|CcQeNs$Ln9D)*l9C)F#}>qPm0d3er=vV8_zE+$R!7m)W`1h``gHjoQcL zLwI`W2bk!?KGjw)4c}Zl6h_5<#kcI)qRSW$w?36MzwFQ9*qA%It~!n@--i^+rMX~u zl`vR%K5k?3&q4TaeSH|)86SmuhM({+RK43UiHQ1F>E4m>%fUJx*qK8SU3iPbI%Huj zP<~Z!6^^U?IG%lM#|ck!;ID^Yv+Mn(KHQ`t9xLpvJK;UFcAy+%7$1dSn^m$={)#sJ zD<)lgQ&u7=NM6*XNTqNaTI6K zAH5Hc~oQ@6Y@d6ZT)5;U|vd)T|fI(6xA) z?@W;fQBp=`Fzy^~L)Ci%EAVSmw+h3I4srAxSJC)yoqclxllY^j-J$e8DCFMH5V074 zf>tv>wA1?v>q5Cr@ku&NgWYkQ{#D|BBDY(OZRNI2hxkWqSEydNM#BD;jJkU2a2u$G z;=g+w3t98?pD~ZZsYjMw4-p0&cC&y>W{08q%kCdzulx-<^6^&APO){GE30$F)_pvf7(R&5ug;4~9Pv z?!0%ZTHYb!r@3Dy8wAO7_aBQ;D5mX01^1k=G)=E znWwehqpVJf&e>Ih%}6AlK-`h)4S!U>1HKpC{4@Ad4vVfaxQZW^+BlaYB)}VTI^!RO ze>Qlpp3t32N{hQk^{0g#H&+dUt17)nG+iTG(``~)t4}RXM&b8dV~kdXg{m3M_wkaR zia-5T^*?}Tv+;j~-$1>#WosT{a<~Wlit-N=Ew+s;suv2Ut$eo*X1>!(h4C&?7(Z!D z;NzalOuTl8{FvxE)#&^|vrh9vU<}|Nt$5a{d3}4R8SPP)G2a|jcDuSZ?Gr!cUu}l8 zPld0Ab9a&2-}vVEqYSRm)7H6b4-|c-c%VR&jE< z(@P5_8zg&ty8YdZK*J#**I(exR_jo>R(8UJ&TGwdcw^KjSY$(iUqjewk?ERZ{hHtA zK7zUGPnJBZshwJG4Lcr#sCbUatQN_!wFmI^uN?6`+%wvFHaJ4wz3aCcZlLzzWrJdo z!6OE|+g#HA!c3EAZ?9oq9IzDEypK|bDxyb-_~1buvgRZ%qo}WxyhV7)zX}C?TjQ(C zXzfcN&m33B{wAEtV+ZJ zE9@}zbv{=SD5HqFNB%12u8^nAU9Hlou6E+wus9r7vrdY!<5o$tmvUQ$sOCeqx5bN} zx~KA_k4TbF;W+wNLB}c)M?Vxq;;#8Ycvb7^Tj{666nl92deZ%lQ6LI1YGr$9DATvX zZ1`ryE*os4fnJ9PhK%kK1nXROffdX$e8U{qwCEO^w3i65j1oE#US1-jTb_R8?leOGPa&kX3g7WtdWVy6URxnB!-Bka(BXlmapuT1e=&%~>vCAf(s zVg~_NwSH5aVpby(p-Ormu5kSaOApN`YI!e;wHq(D$s98In@Pbx(!8Gf;bn3&^3>YW z+*?Yr$T;T}%w1{`N`!%iueQr_#ZC!a`261-=QquvQ&G8zM%afn$K5c&KGm;rI=50Q zHtACs74>*}c1OwAsclh)#d_wLj9mWfwP^V^Yx< z+M|T!a!onWLJ~VxT%=~9(2M9~rqjAQnLJN4cIFt#6~o`&T*^xpz{3 z$7FTOFsk=uFST5syn0q`#FI$hHZfDVIr(ckXJ9L;74v9%PqtFcR1aFe#R z5Nxb!Q@O1pSEAgp5)t)OUq}|e;6Jf?{whbj?g1z~)H)EHI)QVc?V32ObYum^+ z^HEiyQl{i<{e zZq;%th9Gn8OW8xQkF{s7YJMpi+za-siuaIhNhCv15~@i@+%{8UTVx!_Qhc3 zF6REsy;dUaS+UGdYSM`32c>1qil;%NQWo1zB9MOUuY6a|Fi3!4f%#Wcr=)T)E_3y+TKuy28QB?GXn#$= z1O71SpBp?IABpWY3)s*Lh?k9|ans)!uX(nxxRpcObZl4?_e#2iLA^1b_IBB}V ziEK4c5%=;3ZikBf{{W9sv7b*{-8c7^w>+bRz|U{+uf+J%G@b^2>~4OA!ub!@ttygwR}cbn^eV1_m0QtpX$%BjrmTQylH=|>rq|ZT{GZ{`rpESI*QIP(?~Wp z(a*OP@aC1LTxr*eWQcwuy*_KJi>rAOA+u_6>}oSys)b2iJ08YgiEv!DK8N^;@$Vb7ieow68?AY*_2@DcUIjsK ze;TNa{n5~NuhDDe@Sf=W+MX({?I9`*-1lw^_mg`nSQ z{woq#-p(DryiZJ5BjOJZeTH%@9avY!8c)P6Ur$Yzb9rptab3=x@lRY(Jm*(F<2CcE zA*)VTPebZ(+0-AzeNpi1NW6nxf>~BJ*uYoaJ|g%{Ykj51sL2>tnL!{|=8u5a{z zwuv^&xO}U|EA=bk7O&#J7kGY4JDf@)QM9)t3iwVbSei9Ct)cc92|}W}iR54Kq^FYE zg{*lDrz8yGyz5c;MRTWVFBZb=eHOn^yg%XV`)wo1k|M6l`Qp6CQ}gVvn%7db0YhVlftaIrK0S9TF3D7#2QmP{!4}b0CuQYs+xbn9X)6Br&S5q05M!txVo#4Dv9a;0Ao?5CuV%v@SjJyOSQU=He7Z+ zEA$UR)8n?&Bh+X&k>1NI;}5GwUB=Cg;&?4 zw9}$7+vJhyShm`S*`ye)Ho3IaB-%%1aHyniMSR)Bga1xAw20#qV`yd-kYX?$w4;-6}H~1DX|9PbpTH z$rUySr7deu5pB$?#Vv|056x9)hoxyZ-l!x4im75%tc>X`Os0e7sC2bn6pZwD!(f)su9=lt+-Wlj8*Tn2-nI0Wrp9w)9F}O#>fP&GFCnRlV@+L(SKG7|(xe>m|H^Zv*|%fPOXnYIyq1thE_nWb(>6 zP`-fkU+5d~-$1n2ejvx9YKjG==AC&3!96^(s@Oi?Eq@B%v@MpCeen|deHah2%{<;o zQNdMwFHdfj;czZ8czo`(DXUXb@n$r5_O-`ElpnQKmJlgc1h5j9T`;Be}+vUe) z_JLY@e}WC`aUOj*uUPP>h2_w!^;?#RuajZ1_Kak4!LQdb+(8;r_xq*ypPpd2O1)&< zVP$W~_}W73p`_|SJbQ}Fk#`Eca^;FJ;2dy0JARcM>Bk&#U!_v_8mHubMVLx0-aBk? zTBU~hcUPI}wjXAkxUXrtgt#kQE~{c&h8eFym400hn8sl*syyx{DTwMvS>gGD?QvI1pN4o@k_(cP^;EcB}#zyM)K5FAzp>I`vr9dz9x!Js#@rvKGCOpye-_rP zgYOv#UD2=H1GYW=Yt%FuJS#q&JlNSKj31RaIXs@8y=x@3imR=hYA||m2>RFLnSZvf z?}%4u{UbWXQ>PakJ4rKBOth8kR?|i+ZdiQTeRq+Z$z!e9WVze9f+E zmlv`*jBFXMw^r2{pqPQjdh?A(POklDJhZFL*yI2oo~-`w24?-ln(SOK?VR?mGgK(q7-`IHSD_7kVDp#`LVyo-F5L-})MKlPTbGHCf5#^VN z@hL*MtOVyY9Km8Y{Y_Q~fL^4n;u7U$1K4njTLTM8tEzs^&wx zGXBq=DL%+OD_LM|+^#Cp*_Y`@IIMsF(Ect}8~CUzgT-HxFb4vn7z2@BdF&7%#Xh7X zcR`A2LIo?7pvKjTg9p;5sF~45HVSY%dQ)hu&4=$aZ#2p%0sGBJry`@+X~2c~O#nWM zKtbtF13XgZLb#P;qS$K2!$&e2SJ-7q9VWw5MH3w2i=#+I+)65MA-$;b8`_sLHsxcf zHgmeC@*CQ$BtIa{PqmM6ScW0HgW9FoDp4LfQ-oykP}+M4d?+^6mYPQDDmc)f!K-*4 zl`W)eO$~&;D@_@yv`RRur8wfUsU+2rv|{cm%F1#o+Dgnh#ZwWJ#aWF?j2h?VX56Fb zr|dN03lZA7DClGC*pbx9_o;yjb*Up6U@Nsr8A=NJlPAb>D^eHR#b)d^DiXbGIf+?p zY%2xnR-%`7LB(Q*BvpuCZfXXaHNgO2RhU>~6^tawz^kzmMFm*VMh(`jq1rxL$3j=F zXhkaR0+%u`*+kG;Sk%b>05Hu{MgphD85PG>%K9BN=__byK{ig)P6(%^W<i74w<4Nlz6!$oF#0&aDY0 zME$P#6XTx~{ARh-Z9GLQh#^%~Ezz(=eNk)hGeMp)D1=Jig?umj6?ppA$H7-RPNbXW zEC-v$agkqrpNJYpiKR`d-`}mga18CwE&k}Q%kjK72v+5R_c70LlZLF}JVWqiOIx7Q zFOr9*a8L3Txu$#|wzbjZySn*+9G;wgE8uU7-?qPrbsrLi@ce#EarDkReJhlY_U`fg z)-!4Pt{6A~Z5@Eca#i6hL)je^c!n!-Pul`*B239<2b&uCcwU$l`KPMiocSry0L6V7 z@h9S}J}`*OGPd^4?1A`K%Qu%kYcW2x`acO^XNI8~brr09zJH62c{xO=%vj{rU9Im@ z?-f4e^slXyk@EblBr-R3j3=d9igy)M<2+Q6b*0XbC|uvPowtAh9^$&~Htt@Tt{M}! z1lLcdZtcZ!RjGZE>EY>lpGbTs)mG0~+bC8^2+r=`O8ut&mh4+riskfK3vf3sdJI?K zZ-=!bXCQURHS}MAzBv3v@J-(D3+hj5tUhHG`Bxfb7>rBzBz}*=`Lt;(O#XrVN3H9Y zajmG47C3ze&HVoWrFkccuC+}<1-t={!NK*fnSKra(Y_?{evEBAK%Z)vF}3mQ>-kr_ zSa{o6DKGZLu~Zx&QO7)2<=^V;==+sVwXdCfy!6i7!(JrTMVtmmz$wQ{`bS09EiSCW zNTZ7Rj?Y%s#e&Zrp$NR^itKzR;ya73E`g67h{bx?9Ez7y!^h{9TvsXdO5ehhS1ZU}1 zt*46RL4@#VyeGMfg;;D`d9DmcV`CjDEp9JLA36F{_B?$E!Kb20BLq&B(?*3pksHR; z_*n5gzO?>*X^R81Z51@}wYbaFRm7<1kt!7@u|`%qh^Rs-`_&cY<-M|b^BgIxS$s{R z#G#Dq^sS+oLe^x@DyeJO+u8GCpXF6OH&@g31`BN2Cb}OdjPYGG>PBA)!B&K!@gpnB zrjSR=iq`VJl?z4lcB?Wn2r1B1Zdj%^Y z=5axuX@(^=ZPVuBqARwJRtleD4;*M5Yi!yRzIjrfmK@`f)2gzEp zshn+yW9wMgRx>K#fPR(dSDM)NaQ)8b!5%o$^xqgp+MHxs>UMWJYe<|JB%CRZPh}ej zKT%)DN5-$&$HyNB{vgfp`^Q#}lj+;zhU0D`xQPzbom-qMFY0Uh%$C%M%A(_N!TMG& ziabA}c*@!fPZ?>pI^tWel%Lub={N3&NXj-r0Dwo(SA#k=p;^w0)cbxK;+#GjGN)QP zAIe9AyayYQkT0I!-HA&d&{x)f@XlU2)4yjAihmTfUk_bcX!;+ArM`;(>OU+<%IFk% zXXXQGC$)b;o*J;xd=LlxBAQm4qmTM>wMf4~UQ7F2sL_B;67qh8(3aUJi3u8}Zv z9ui2EPR=pbv9E%}`$}@I?zxxTedP}jaJ8sBE*_dqrjOwIINDW6$mKvCb5|v|Dudp) ztTfh3R=By1VQAHI2SdR7xb0Nrjota_+P|%EGmA<~Ya{rhG@&d!-5D;d=Gxch&r0Vt z4L^TDUYYhmcOC%cKpMX-WqpqtxwCTHO>61*H`+FzCHYg=&Y6Yoew2Lqrdo!;AeR;_#=PCJ0Fanv1PY_Z!QP+-@^-*)2>q(BTtk5Jlu|@BaT0u^aX}G@%ou2 zm8L?TRqdL`a942YyKhb2m_OuK@~(N1RHciZDZLXvr!vkfjwcBRCCIFN@2_dm>UL`x z+|mKZx6>8C-08Zu^hqN*PLKRw;a^xs;dl~Xw0UHwpsDp8Cc^43-9fOAP%EA^QeMu- zJzT}Nbo~!4@V=ug_Ayzqd2_@FGXDU3(?3DQXWnV^4MpdYOqa%X^y0hAZ3*I(`JXC| z%UZ)qx09k{ZF)F-wieMIRx>rI)~w=ft-erHWYwbxWyG=P>s@iRh6TzmBC+)whqvbd z{VUd?hl~%NtvOWI<{y^bF;{HuTX~bQ?b@T8TDO6Mu+Dud+&7A4`G`U9(!04}jMOT2 zB>Nl@lOr$9+PQxcc#}%dVKUt|)&qbl$BK1nZ8UlAte7#3b+67JiC+_STiK+xxtATd zt*FsW=vUO_)!v6`@fYJA_Mvqpy}X4ZQ-QOscs=jLNf2kwXZ9w%ms!`RlXlq&C%tjE zT7wbIdo(bYGL=7PvFf^)jYZ)yNC6++?OtoDc$mKMr)WKeb1!))$sdJbT+aON@6x?U z;g>Cuo0~QtUy^a;?=_Ege<(d`6i!w76-wfA$BOHrLff8hB2pi5bnneoGyJ^bt4iEf zWU?IXuSV$Rx-`YX8OAA(b+$8;$F*dmvh}C$-SX2hlUF(mJx&z^Ii;TY8=biNS2$%c z@krNhdI}DDy6kibs;mpSak`&x;>}_PZ?n$J^gSz*W4z_50Nc`(wlPXeW1@X$QJO%7i7lJ1EL;cVk{nLT$y{3{S0RJ2 zpe~D%UCXpaZ~*kHOyKR_b6F1SyNZqHAYy>JIz@ugRpgQMtUKj~Bm1+Or}mc}393`H zvYd(n=+2PjVQK(a3}US;4nFoOPb}vER`*96iz#kzN>I)Z0qzbTvH#KjI3tWl>yNtMA;%7itzrh zeK)Z<^8otOL@AseE2;j=^7HbQs*hu|?(2%RE9zCPp@-y0>rL`UrEYzj{hCj)(*qO9 z9+Y__(zROOYhs(^PzGm`zLcI)R+B;U)ILQ3P{H)4h=Ca*wLZsFEuYIk8A%e3)Z-ec z$gN+panhL9pD#+rTbn|PYUWi>kybv)Jt^O0_Ngn`^6XZtHsh`-KF^LS#0j%B6%EM| za=;p~A<`ad8lwk4Fu5sFtA^B=(;i;3dxZ$|P>r)PTQu#G%IJn}n z`oXZ!3F%WPz`?2Gilk_MYCf=aEICE{QwBlet-Kv7KPzuFv@seSb!aP$-z8hB^p z$E92ixMR|=md5IhwKe0~Koxoj$Za!04uUkUI3 z01tjA-gqj-;j>^F#~k9F;%|)}EAcwTZ+|!LVLYgAwcrw6I!*^_f7-idxu=Jqz4?Q$ zQl5uhCZ{Bg=StaQS?1w^z}s0dP3B^ehwn{vBSS`4GGSZ{dsjPsZ4#WK*2!(6sLcDp zUs}#pZJ^aFm^)&m*xr=$5Z_kneYq2$MH~d6Bvi=|PP|pPztxIXT;tlNb@U=M5hQ4} zxn-3cRyss+^46`iQh+NDtR9^VIo(+exOM4WR+k);+zp*E&2pMdGf5i4JJ-H=8^fU7 zh~+A(^v(nCJEs7Zey z11}-z^cC`d*mZGLIr<)@d{49Y57mzee%n3?*L7vE)-=Y`tcwxmgOiTgUf-2{N8q0u z{5|-QBmV%xC#Zd*Mg9%}{IDze_=d=AA|Mp2l($w8n%NdmfQH4 z^&YkP{{Z_|{BYMaw!8Rm;LxyHzx;e$C*L4+3;NfOMyIr$jbWG1<7VZ@#-9l4-Y&bi zwY+SGF|H0!5&BoqJ`mPq)Md3c_buhOd~sipUK(Y$y0^R_`4XG~+#ge4sNW0xQ>Gmj zA1YKvfk#^8$6?P_h8*@?e^IzI;sDO8(2>Pj@aDI7BP>%c*8c5o=~g;awn9m6VSo4) zb{c-0Wpa1f`E9eh*ESsE#1}1OX$Bl)7~-~~jd7Lh=~UrJmC0_u;ab-15-@H!uK3pE zS2(crIj3Vz4LrJTKuP*nFXBr*N(3Y;epTtZUbPLff#mL|>iX1oJH)ZBF<+Z;25(o| zFJ^t-QIy3iv*G^$80s3MlViIcg1Ac$4qrUM?is%t`nO)u>~(e$$8I+%>MIT}2FWCz zW3C5k!wsD|d$K-)zFSRFb}>9Gi)FGUUVTT>y<*Zuk}=I-+W1pv8QbsaT?MU&*oHH@ zzT+v!XnuK{W%`of`cw`u)q0_5w>E3t_Q%Y9SrlA!#YD<>^sBEP%v5!}E9f6rMIDXl zOR_q{LUD7eUq0K~ftju$xeuX_O+)#jg;qvKY;`J{?@KFw(X z2OtY5aTgr{Ah#Tv`)Xbu<7&g&VpWnpZXADvr&UG=5#oJs!a9|$o}J(;R*OrD2!a{- zkdS~jbL!d0wR!M>$@3K>oMN}Uvi*B}qqhTr`cdRp>o`mou%QS@{OdE!KDmgUWMzGs zLRBek&{sh4{I#DXRnU2e=UUAeeB4xRZwmam*n%=XRmm^d{uQ$c6v!{5AM_=`Nj+;INs>lKP;vr^S44s=Zh9bYSPX>7B;m^eX z0EaqH#czcghxW&X^>7;6>u`Axk@CYaA97g7$YVG>XB=1XU2g&Nfl;~0DuXxyk48UC zSJ|JjkL{oFZ}vFxt;Vb1mO8J8bx+-D{v5hvr!pMyu`WM1$jrn-;19E{{U(~hcJ9v z_!+E!ZP+jn-`X+Rd#25RTHfv-ycB1y17o#&?v*9I$nv3Jx%IEbtt>C@tL~50m$R=G zDqD0pZI8QVHxcPb-@E?+S@QjOtxp*19v|?Xv~qZk;RJnze|ouFPl%oh)Z^QtW>dxk zb*!p!93ZSm`ma`|+ZJZs3K^Jjit>LDc$8UNyr6C)u&-Z_$51))@2vtVdW!N-9Qe1u z*Ou&&ym3f6isxKa1JT3qZ0vZY{{V|-w?&c9O2F2BH`!>myPFbAjBN(I+r~Qmy}`wc z{Jz!T9wBKamvA}FeGV5D?E@2%DroHdZ~H;`vsHzxtbm3m$RHZ|lUvlTFPn7Fx<>C^ zHT|QPJL`?Pw%a9p)GBi5j~1>SY;%({srjoKNs0T_(A`Cru5wtb-os)vJUmSTjLv_D zrDaaU<2A19wQ*T9_O9AkjZR6?vN7i199C4U!nI}ZS+jMn)m-yyqJ)gZRVL1PtJMod zxwBoSKUYO;dQ+5O^HS{DO>etW>+ z4MiiD#ye9cJX+w=m4_2F>bX4di0Gc-;gVfK_=>(5_(lyk)N}f|I+?uO@s~d zG6DEiTdxkrQ*wYS)g-dExgThZ4l9b)rgW8+0Kl)!u=8i@c#3hh$C}+}F$Tn(j@5^C zp+X`)SmwK_<|~53IjSznAi*D%dhoAhW5lgNIUaQ09+Cd-QaU+q6k@$zC}RS;9Xmy{ zoPi?#RTXnjWn!?|dYr6$^*)Ot@)Z76MRd6tjs`JbT}$BoG}{Ra1yXP_Ij%Fs9uKn9 zSADbY7|lFBZ|!EGW3MpCxg+fn^RsF01~$}x+3)?)UDTFo=7i^(iR@T?+Vu|)>dfQ* z%74hE`!iQ#{gEv@`yhZbn#1b{U$8j{?T>ns?7r2}HH$~vG~cqHPvuxk5Uw7;=f222 zDL%;^bYEpYl+P_aDt)>x6b@QTP#=nC*|E~PSuL`6D5|C>2N5&Zn&vRqYd5Y zE8C+!a0W3|h{(XJvc^s-As#D8)v~dK>GM0HroqbsXVGZxYZ^+5;|3gi8#$O%Bkaz0C8Nt z*x%SPcHaKvhI`vzpZY)wlXBONNijf-^R zq)=C&t=)7_)yhIQU%SOxKz97yYR8pGW5B2Nfcm)|P;hgBS4Qe#hq%ozZinm){{Shy zDrn*L6=%zTN|jS@YR08>A6Fm|5OG(chdrsNrJFT*0UQBX%D#m|52+%-k$?qjTHC%d zdR1r%$l|u3kb%GzhqpRa6Mo3U8wXRg3hmcfxLZ&lcEK3y*1YZuf}?KHikw_ZtF#_H zD~X-!O^qA-xkl8^0L}($HYYHC(=iD;_JWvb?zataugY*TWv39QTMmDR`zg`!pAhihT(c z^^ff9@uOVR{8MS5>i0reE+q;}o`C&p^VZ(-Sp6%j(5^0|ya+^;FHMQaUd;Lb)FC+vjb+3rOW-p1F7NKgM9`UWi zy}}=mx#RK8ev0YW*0We`u>)fF9Wm`+3k*b+c}47Ni-)p;l1BcY1QEax<07knVjGFF z_}f`#?nuweRbS$4bGt@pZPTble^>52Ts_k5?p{V*sn&GVH&Zqg>0Hs&>MDa;MQxs#84RmZ(+b%)PnI}DEO*C zchbLSUxfEsi$vuX`^0yvY#a~c@*y`pdnrZZ)1`g}d{_97;_r+<5ihy{b%_5ADgejo7T1xcw?*VOoI&yH@KVvo{~i=XgU zFWaZ%4Yc|d--ztI8>ZYA1`iI}n|qRnqWz%=-H%p`eJk^`TslvSuV&ReTMBA7(;uGp z?D@BjPj+U>tO+h;!u({ z(|Vr1S&LJJk#(jUdejC)J!%uQj8$h~y(`SQ=@L!SsIfV#QpluMdrX_Fa3=$L7~hpR zIJ`(=bi64VJai`mSKlp{p{enqV^zBHG z;yKDgq)2|Nq`00kBOqdpz@a^J&VGmv~k(QQ;JL8vw_;Dvew zopv;(M?)zm9Z9dQJ_dimNk3$-ie4bJ@b;s7sCYM5y_8!)7Ofwj2bzDqEMOup8$f5t zBmwDPlV_e9V|iTYM)5ym$h<_uu2@xeKUch4@uyr=Y(R~L#xOpW@#eqcYrRH!Wng^A zImLT_h;6kQWe{Azw$Jy0&h?Tu<~C9>fXjv+)x=BTRF+RPj2~+J_XC3VHGQ2`vHEsr zij7%9be@NlPpI5(+s-PT#H%D>pCjASy`NF|d#CB~q__7W2^|G_ez9$19#+}6FHHR_ z=_%KYz7xT#hm?0b^7+(sJHEBWU$EyD((3ZGQtV7(xZ8;tpPKh@yCcBH?#w%)bBe^9 z?$*`c-mH04;}!4GsmgrjCKuStm%U|5IIX9TBC@U_AY!`dS6s&pT2GYcr3AhOQL>q$0L0ZDhqSZjHTxVo5mby0acHFq46wrE0;X+#uXRH7Z%iPFZnS zdt`Kb6P#;F7pST)YtKC^wVKrJ0<$EwIjZKloS|cwHc_6d`c$V+M*FM!S4fs>dYWqO zPLy1mBbollZ%R+>c0A)4uB=>hw*sXYlK_v(lalHl&znSYzEOZ}3r6FPmD-Ib&rUh^ zsQ&=r9-FF)k7-Z2+d}}(2;1k1r7fv0QCBtS>EYXkj5gDrm8osuy++}J!x1OmiQ}om zsWZ!}OJsQjdR&d4mlXLlnF1e|r?q>C@D{U>f+9Z(yq^xWFhO)6RuRvr-<2}qkXBK> zj{t{Eo4VBUXqO)K?bG;2P_{T!AC+ZXY4gTDWEHpegkQqWdF9etNgh5lcTfWgclK$F zeb3Up73?Sb#1Ex9*G+4O*vt7>U)Pr@=1&Iiq2-NbDLYhRri)9QjGFc4@RYm31LdnR z_*-ubtbUcB*CH>fNdMFRY;^s3<4-8(6kMm<9GJ&i;@0D5-L0EFN@W0cuY`^hmt=mK z3|pR(B%e<)v&s% zQ<-v{*F6-x){Ihb!q*LINo;mfsNUqHi8_vz8aycAveoprY-ev@YNv5*`$*bH;aNtU z8r19de(lRD@HatK{F`q|ia1J-nBx^bIIQYZk3wk5+mSGY4WM+Y`cAljMCWDOO#%skF*NL^BR|F^`q+RsdB}> zhS%C-+m3Tt(n*Hg+~$!m6az|CTkxkGJX5)iOibFbOrCNnGHTzHlzB;7@*|e)u8Pan zt;YD`p~3X0qPBWgA*pojAaPB=^rvIeq(orAk;P*z&g?x$WQf)>-34D*6}k$ZCQZF6 z+)<3yQg%C}wy{IYMEP;jm0}Wo!P2$9&z_ZL-p#qdKb2=HysGTzh8?w`hAqfFss2~e zx-abMALPLU=}Y~MCtzzYuE6W1mdTiF8E%VK(%JU(u8u7hW8Sr)(5?SD-aRi1oK>H) zxA!a1Wz(S%fSBT`jXymIqB#CyDrD+C4m4V}I%1}d*|XZaHPcgX%}3Z_QZYoaS{X+! zOJkgl+@RcRN(c)MD`;9oR}~^!?fawjti8E^Q)D)v=DHhv94C6MYiu*pw(YIdJ{q`j z6=HgLd_0YtGzD>wO6cG~6cbdetks4+E2V(ZqW4&YX&*}?mxKT28pdb#z|nqii>zZD63*M+fXt9b6pNm zIemj8!_;{-JxC)%(yBEru}cYk$!(bI6|Dm>ZYw4=+CZ&Xal4v_B(*hEEwR(t&cFcm z)j@L;M}_NLWwsavu7=Xw?aph0y>rl^26`TjE!tre{p-@~ENml_anH&3ab8WN$CeWA z9@XnQD0YVu4r}KzxQj-}_wey&X<>UMw%e;c$W@o-UbXs(`!@VtkHt2R;R)D@kw((o zXB__kO8mmp;Of zg~NR-gVc5BusGUD9+hJA$2QhlVxW&~xaxSXoxDloyStgh)@xkPIgcvkJghGbOIDA4 z()>cV@@ykyo{hzK7V=5_Kv&Lw0r3{4tKEp_fC2sddRNjGC|~7XcGRre`ZF9xU&yW0 z)QaH)2*|A+TT^JP!oZt|9jhLABwmV1^a7SxwFqJJkxo99<7e2l$?l%kO@&d_o?+rm zPWi1*?!2?@UQcbSPj7kUWa7QcS(XzC`$z`7vPiVXxnmq>KT7yK7GV`Dw0+;zr^(5s zk3+K5UF~CK;E`Ik+QsBktRwWUCdNsG4460=#aV}6k4lWa%H)oNzomUOULahSN6oGs zO0CY*R`CS$L*&Zi1F)|p)paQ0y2eL8opJisr>$x?jU}2mL+wQX}eI8$&rzp~0Y>%GIvWd+;Q`GM+pn(~q$-=j%rG5c_!E(HLs$2YB@JGR?xZ7=` zXsxL^{{W`57EQo^cqtgoe#~mVI@1ycwTL@8`9V>d{(t`f;J0_;>-O^2?33nQFGSN| zPDv3w}kPy(Bv%!W1iObdG)&#aR42>OT~a!`ONEKUoH1o1 z=0N*LP65SyiSYwUPY?L6>q|}V9md%KS26s=oDu-X%ZmDWE_|dc$DrU>&mS1>W4iHd z5=1~{h;RP@bafT<8Fcxj%Po%&IZhG059t2@{t5m2LtcD#{h6YN#PO1Nh?~id>h{F3?+Q z(d2Dwd1Ts+{{U=Nw>O{%(!ZulqY0aRsr}T&CWwxXi~)_8)mOP33jTBPA0ze{r&c+j#H|mJzO}S*qs&8sfTvtTVs*H51QY>Sx1#d=v@xfY^y$)(yZM>0L z(!{=%)Vyf?RT(WuVO`wJ6 zF5Fj96}XS@3Y}p>a7G1atE*}?MYE9p&py>CvL~X~U-l@Ne6>7KN9k7@&?0z7&NXzU zXXQ?Pt5V-Y1do*0RTaCJ+@p%r7R`m|D<7;bYXmS(`xNYaEgZ>?2f3{^@b#^@!!SQe zaF*gU#C(rxf&SRTETslTi!hLdqAf%j;cjpTJF4 zUPomoqnGq({|DjcaEJFMw8 z`_9ZCrFj;yrCc1+ z^5qRSeB9Vm1!=Akw3!C zG~z1Ru=ro6MlrA&^&6)}T&6cuEpJKrLoP>Z$LmbX*}ljB)c#xQ?Yx*>wMv}1=(Y0R z(mjv4r%A-pifp+Rrwo}mtVD^_=Cy6vM+UhoS-lS0b2g&sR^3EzS@B3ZR){`ly?ND_ zDm`i~^ebQ7NFu;qE6Md1d8BRYj90KrX!0&HK=kcgb?%DW@r>rTz~W;cb)jQ0Q>d1L64<+uNMQtsh7?sJW+Fs=xiEX^Zu}#(DgYvVan0S z-ACn|RZ}7I>sna<0D1HCSkgvuit2&p)Ps8zbDn7_td0#vRrsw`QO73Mg@rp#YF|2{ zSlEhpl+}L7T+$?|lkU@erm2!9+fitpR~tnu#^V^M{JASg$|(iM)rxFU6IO6NDrVyL zHl-VoRhx9(pK6g&anMxJ05gG#&Dk2L*iR*WDDp>Yl!8VDPZUIoj$~4rDESpKL-N$# zQ&Oxl6UB3LqodgoY}r>f?vE6$ZpRgBJ9YqOv7GfBDe6TOTvf<^^;nJ&1^}yZK%=m% zXEWBPOh~&{tWC7yn+>|-p!}-gfN{lhR%yn^bRCtEVkxoCGwodsoy-{h;#LH4U~yNY zM#XS@7#(!$IGrqWT1IjdR)kj&J{SxN<>N;0SJNn<0n)t6n0T|-!{a=Tl(Mk}DYpY7 z6^jc98$%k&7K8cwGFq5=&eO@)|!sm9h9J_yE#|W zrcsYdu9|yfh8-)>MYo%b3Y|1#C-9HTxjwklvD&iTFVOR2U{qyl#8%Sd9V^mP!)=}A zc&!M0KEK_e<|%Gea@O`d>e9k&*Gpxl7GOx`ySRKf7w+Tot(#v9+&K=b$FQ##5u8%D z#Cte?AFceSH)o|bFtyoPX_5s5jEd?s&kJ2EhT4BB?SJ7Jx+n}ukHWrx5uMZKvS-)e zxNfJjO`drcosZzHme9|a!dGu$;j7))lvh~Z9`bFx=3&Qb^5>d+%ard~GUa^e@JT#6 z@0Ky}D;eySBq*zlSE=fEO&MsT`Elu7jM1yM;-eG1kYxxoCZj(Fu0t{4lR z7WS_Z!uO<(#zt?OrFyOJg#>>*XEmCdG^(fN&2&=6ryG%1Eoq!D_He2G%s;zba%qz9 z!k&*uA71_3qO5kx@T1J9Niy?cj;^gTl4pb;@X)%1tMkJBK|^!oF9`TOE;T5lxr-Zm=Dz&+Wd@0(TD7zc1D@5LbKzeN zh*En6R4fii09Tz2P-JyQ-&Va*x{92TKG0Ltu!4gOF6WR1|F5uT3*R* zmW4;XVJVK_?t$<-8qh}pb{=b4VQJgkI43%{{o&LmvSm!NjEc1$r}m3e7+T}ikv{0f zU50XlB>I z`+l{Jn@tNrf_RVQ*%9r-mQl{T4HRpP#gzWE!`mAETDTxOIr9kWyb6hgZ z=|)O)`dHFAwMDx;LO+I94|5!k9tW78%vbXT{{RJ<)e`Ie3ODf@+fJH*l|_FV{{Zk-O&*@WUTi)n>A{FxT-7nYx=1Ecf>5w~n9;9V{Ej4 z#GutlWUf!cI;!4iaW-<}f_w2>erP>w=W*2a(ESeytu0L$rK@gtMViXBuh8gW`?Y({tl`3O=}eWFF9w=Mp3hXxFx-R`|V_xn8F5KtNXPBiOEP4mR znBkknLc-y9CCnvA>_*!Dlz(GS9^L#v{hf5rx5{p{JqG2p>xVr=NsWa60KJkw3jS%n z7a2d{GFOcJ(ZIxeVQcok{t2t%rnm8bj{Y5Q$J^{}bxU~rX4uK*5I(HAua5CMFO5ts z;D6&Rsb>r9ax+*7Iu4bYHkF<$ zylWRSj<4)R z*!cVY5P7M~;AykdrG4L~d@5Ur+FP{p@I5hHhO^+yTM0Hrf)=pD@hYjLO=@uJg4G`+ zp9g)JN1D0MN~Zc85x2~o3io|l&&zU(0Rp*C>}ltd3YpJp_UU7rM}djJD9c03n@CUj zWR<0B;jtkAC3_B^;8Fmd;jj;C>%ZX~v4Z3!p$tBi*&I(Q)Zj}HM#u2xy%OKy&%?bM+$F`la^u`ro!?k^o*BMZII4fx-XzpMO~$yTQgi!dOApHS zJAFxfGYbCzqT58Dg>f2&t!oTrU@@*c0b6bG=frmdGAY2S(flpco=wU=e&e-ePL>)y zPT6CtOQGir;!ROyD{M(0OjgCNv#HGL3Bj*n(|#007dOmv=sQ-tpA599$H}`R(z=yO zcK1#Sj!pZ=m$lnmGNsVpU6+Q1tZ?~vu=7*u9u%^^$gia~6Q((+GS_Nl20#?!Tq zX#0-wXLfZsS}axt&HOdM-uRZ;8^{Ey_Tr$2UXtYbamSJBMR8&BdROML-#pT8Jf}r*O$-#{syk}taknYpx`u_RyMF33iqq5l0FwOo%gC=~ zk|d9i1}X$pWZcek>5BDLX+>G1$E{r)EvJO7t)$+Iz^;{T5kji*iq(%B1tX{!z{Ov- zZ$pmi_cG zlhU3sjw|D?kI)HBg!UX$qko)cp`x;u<n6IT?gHS{x70+%pA|~FojY!<|VTk3} z>|?#($i+5iUaQS>(K{#`LB(H+ZOTCgxvJ9lbUg^>9__4Kh>1956{`iEyM5)Xn{l+( z-MosRX1P6+I%w5W-4V-fpWPK!Ju%t$s^Yhz*_zUc<6JH)&aGa34^E7tx)>j4mfj+E z1NE*(>Q6ddq;2|Fu560s3RVmFdN-H`9epZj*SEx7S~01y;&-;}RJwH)jXXtouc2GO zyVuGKl4`7<2=#0A-(>-D^grQUi#DSjB_xhXWVNO6MET|5Jya7>Ex!K%y~Zosul^Ev zmeOA*?C;3q70BLrD^k-&>$qf5)0HEob<@nLTbW5%@hjyuqbo`_IAymz7Nq+$p4HQ? zhRQ=PdeKh{-#W2H&fdbhe!m-TbF37vWR5S%liH5yJ%D? zJJ`@7G^{?{8h-U`R28f-yCAB#coGx!dQdU zg)1XIIGA>;W+ynn;yMOIjbAc#Ha%)QJf8X-otBtN4Yk~w+^uc9QxOPY2Y0tBOlrm zEN%4_SdbVZV;*~sqM?IR)aPv6%FHXu#N>FYnz`8bFuCRipTh&`8xINiN-Qxu zzbe^}NYSBe+aTCFSBq(y``hu<*F_hIE~S0M?O#D&1|uP<-1cz%M@O;hhCMBDkQeDw zi`yt6A|MI(uQq*8%nR0(TuCPideNPQ#-PoMRIqymvW&Sjy)<( zvvnqr#7|o4!eC=b?s>KIFKMN$aknr}Bay}`o7l<5;6DoP#+tI6)T^VAzb`fJLlw%b zj{V_M_|4HTs8<$XdfRasUN&5XUKuMg?ob zr2N^yt=nxa*|Hak<-}r9(}%*o$0Ht%ESX`Nvbrl{%L|f!I`vB}COnAbU%g(1;qM1Q zeR~>38#8@9tH8!*PJ+>%#v1_{Jx`r~;R@7M1;X09C$OXb5bNe2JPf~ue#87dw($3a zyfz~79+w=s92QYpR^JM|1L7!__Si&J{+Ye-?8}prF=2fbv`Yi zc0R-Z0Igo5;r{^H*Tpx#>n+<2jB#JD`ai>8hyDYB8c`8>JMoI|Gy&l0ZkcU7wq?gm zW15fknzYw3;nc$_a^*<;UAz66ymM^kNUb+UzTDR!pAGz9rb^LErN7I@abLW?Bk>o9 zyg#SSB(}bD0m7(WYv5lPe14uiPFqV&Dgc470qgv#XlK=GK3ZYy{EvX_d;{WFa{@r} z$U5yDR(7S~O>#L6vs*St8*$BjwV-@xhep(z;^R+?%YM56&-ALECiuIfUEB+u3rn1}tm!?@{;%;M;ls0JGt}h`asVHx+svi?o>_@bv9a;HN;eUn~=0L9)>x^Q)r|@@#{5|2AT1(q+GDRQ6E9)5k z9e92@S>lOfDb8_8KZ5kfjih+KNjMx=lZnl$RAzFqNb(UIwpA=Y)FoTT!DblGo@g8k(kA#3ZDgCE}QSvLMF@XVJn+C_M>7U1m_ z^BmqS)wRj@-GKh1wR=Z`tTdw(SBnT)6R5z?;aI%xq~52cJ`tfqsqE6)EJLWOMH{v& zLeksJ=Hr`Q_z9&-r%O!+02RDTRkaE+%8Qz{1ghnl6Xc!NmLfM#{$ zuJ7qw{67kA(IRu%KwA8B0LjzLjwjqz)7jq)K;Rn3&MHpO9{!`8H}d{yDyKF{rv zUZkTOD)q0PJYDh5C`-eo!?l27uYtxfUVbBpDJcH#I-ZsrhNn_iYeU*0lG{n~CXkwg zg>$Ic0UvXlh{qNDMgIW7MDHcL{klFaT{qls6BItUTWA3NEBcPM_@%F0c)LT?ZvnV< z)wI?J(GA={{+0Z`{{X>B?GBgwa(r2Z#vA?;7+m^({>s)tkDzXA_1_0-POf1oOp90Er&$Uwn0~K;_TB%>t_eb+?A?abtGhDZc6x-OzI(1xsI_vHG z*E!;d{LLcZukfaSD%ver^Jf-d=+`1^RGZhWUe|8Y*GIXWe*%yziSVt#8e*e$JddSC zK(&0$McjB2J+!Qz@VQcN92g-hJH_a== z^uLGK8(_GyW5jsbUWdHbjQCpb?Apo$_hjSruVX(e`F`223mpcvXW3=gj%X;OV%>_K zD7%*<2BU$2`KqPVo1CiGih9J6Hq|(-t3RCcT;xr(Z9VIL#ln$67S!rC4WETpV9mi5 zBHTvHf$3AO#yG2v$6J!o9R9qb4-Z|t90EV3e4wS{gD0*V`d87v6m>7`?+iV%{pm;p z74u<`ld%5)a1YMDiz!#8d~ST#Ip}{3m?w#i$?70Ks^Y&%zhR#iTlhcrxbUCa+ZMXS zbLkSX>@EVxKlE7wzcv0FN4r^-`z(7;*A?^~uB~<9ZDU9A4xcBLb*Em#a*n-#V4!|Q ztH$D5e$oyvv;2>LgX?<#uKxf__)Do;N+gmDkZ^JBU&VEE$i}0GWBbyMUwTS*(E0Ym z!grSqg+qai*FztIBn&nwCcdz@@Lj?}5JDsR*2TYs;Ds|YMi_seO7iEPIZE&e;Nu<9 z^InJD5pVE*B&<0UR8$O ziv!S%aZC5J(VTM37S=kAS#;|QWA=1F2Vgm_HXDr=Jlx*Jk@!>>lSO!qC@qoNxckVZ zF&O4@0qIB7W!#UiBcbu0xu#!2(P=SlVljoUoOMqPYg6R9k?mhVX_^EO;%Qj1K9$n8 zn*@!pF5_J|TsotwX4B05u98+hB%i?Aq_P()Opn6XFb$hNe%1ElcvjX-NiyRd060O% zzJ+r-#(`o1j@lwyw-xBD`f-;j&paF?u8*Cx4+_nB8T(#%^siR4@FtfGA<{L?citWF z{jH)Owjp1_x=7Xpj6_uiE2^%0IV&D6CoP55-HtOt@SUtP?sx+)Vk@toPSSRm%q*dP zpW$29(3C+WPM~@Yl~VHl(%lPRI^){Bxk_#G+};%-1!Qq=;q4lEK2^ecnqqi+NwAa6 zTpDJ-<2@Tli!qSlhX7ZPTzqaY&d9MQ!Rub86;x|Og1#XhnXY)!&q^lMqWnOwFQ3Qh zF=Vy@S$m%K=NhlXnXbz-s_Z_!Ys&S17R2l$i0up4746{ihW;jbxU76_?jKX={yu`? z*52nHVdEo#UCqlS#j%kmMdPmF#eR2Rc%z@(F~_e%SabN>Uh-PfIh)IGe@geEli67w zB=JqN?VV@CFR7`KZWd$ctSf|X2IA`%C`cVEz&s!0JzGk0(J_&a_m36zK84~rX46IP zs>;7w&ZZJnwk1^NB+#^J?B&8ou}7tL^VvZv0VY(|HII#SiRWckR15v=3grA-@oPxZ z;xJe^mQHyXz{My~t0!cMn@Jrco*1*5UG5G&s$r$r+pfiJmGembp!F?1*iRU8I&JA* zd*UD3175s;^mz7)^ypx!rK&moq))gQM`r_F&cH^4JoT>*@h``(4CuF}-Z)+MoM*S; zUo7iC6g*R@hK;USRfaKMS9N!Nboi0;iuEwLEN|}~C0s*a3HHU`?KR=pt%$R;ca)Aj z1$nIh0JK+$ZZ1D}3WWaQua@Rl^EWdeY*l%QWWfY`j0c3{;3QyyTDHN_w`8x?_Wu*9%-9wJzc9U6?!q zYu*#DWB=CvUFuaMZAC?VIr;%$bJCXw(xQ#_RLL&p+d;H;69>|#qOE+DoXx3`rltiq4Z&p~aaN+ZY;#dWBQgML;7F?z)Wd9Dj^aCe-7G^}yP})MCFq^*WRKWcX}bgu6XNu zo#)9Mlc)R+zd`zjBiWyoVyjSfe|CO?KkRMcvo709$nF>ZdPYMi{6`equY*1V_-PdB zt2}oMi}IbroQ(1OtK(k{d}8tEg{+N^ov1m4^%5RyCs_EA;!S;-?`~O;b~Lb=ekwm1 z$%@9&YS1P4g{xZKUdeB#A%-O<01mb07T2-avhIjU^%ZXOQhT4?InHY#N0@W#UsFOh zf=88mkJVpJx|B)wV19zRd#x>J`C}iIbsj}c9iN-ft^WXL8uhunIU+7qK_1mt`vy<>=lWJ3 zUOWE)vytafX;r&zqU8ofbYdpaQQ!qpm`O&2;wS zOg7}F#+#=DZDlM~+33@wO*=CO`w~;r`BnII2|F6=#3+M&~`b$d#E4OG*#B;i{%t8FHcWCxa@k>0t;p~p(lTU@9(t`n+` zHl;tXbTW8~O}lMSepOkutwu-+zse7#WC$KVxG`6uj!b~vD=9*q^*5GdR$AJ_l+SU` zo<&(PPW(v4{PffMC4xUG9=c!VZ4Jl%(OGBJXBHu1H`&GE~?SqbUT_S1wO=&}? z_o$9C3*kCGr#WjQl}#~EJ8IHdIQy!9D(q8E%GKDk{?*A}9(T~o)sA}7#j9!?X#opc zacRC+6{ih|AT@b)>*#gR!}clLL50U!+_SenV93C!Ah6rX?_CA8uK?n_s6;_m0Mc~&)pThYiW^NcD4|fuO_B1l=bM~`Ho5*F}T7S(~C@SI2f+lD*?vdm3egC z_^w*{k1TF_w6Gc+H}*fvmgf}T?40Xr%liL}qKiLEOmB6!t{-BRGk4p+MjD8iZ zXQxOQ72sEQ{g6MoR$$WmV-<~j+}eg-jC(sZ^B2t`y-ULS<*ta^vAB-)%O;h`7_Er3 zyf3ABwXu+#bv;^G6&+7m@h6VXv!t!Lm*pLl^Z5$qQ^q>vcI$0#FO(P1*EtrS#wuU2 z*M;?3P2BG0(#8E##r`9{e-7_Kyen((6gHZ6f4i3HoiXD>8GBD~REjN9`IuJNT*N4NJxHU&AX}K%{vw zc2J`quUh(B;djN!d{d+*m#(hsn11O&_p|>1>aUQsr-J%?s_~xn=pGN1w6ZR3&z4kU zBZ|hZG7+Ntnk=G}{o5Z?{6P4Df8lL4Cexu^(y0Kj_OF(FH}R*(b{fyxE+zBRclx3@ z71j8&#ILAn31K<&Ty#E_;#z#4@i0Fxdgh^1oeAX>sh^^j8il`(G}tbnQj=q+rB#0j zYwX`+TZ6Itj)JH7O?lwA(4g|}#8)`zYtVIFcT3Y?8M}{Y_vXBB?JBz-C48ZKLxj_O zJA0xuk=(mVvEcoFmFHeD@xGxGwS}WJF&+sWc=WGqm&Tq7`zHSYwJRKa+}F)sJMmA3 z?&p8(@(;MKPp(y*?0p6sCZ>{;F|_X$YF3atL2~=g{PZ}jJ8O+!QE--*4&c{=+-q<4 zqy5V5bUzTilZJ`wTwWTapnLdA6qL3*t$#$-?V|H8LlfNB5eqX+v9lJ(t#!BFEqzAc z$eK=-#hpgV-P@2!{VUJH;c6({?Z!e0-5!^q>Cv#=Fe)q8Y_BxEG6lPiZddp+Gx=A` zU)%F1%(9(>u&oZfK+lgJbZq9ppR#%%Q{I@}0S%<nex!bNF!4NyKf=7* z&Z_|b0Jl!28kb|8?vF?R0EDV$h$j3GLTeuLSNlv(l4FD{aoI*PeuIkSJl(u?r7z~e zDxuo@e>LvEj~|s~EHx`PZI4QVZ6y+)#m^sF{3-Y^;=hd(4X}7aO&W+&cJ6qYe7qld zHj4Zw{{VulX;Nta0JpcqS8TL5XGhc!o{}4l62m)vaN&m@mGmF{7E|_zwD1?e9Y6MW z@V(Y`-y2HT`n)U2g2LS}khh?b%uYRUab9Wv00l+X^xugZ=faPVT0A9U_>=Je08Z9- z5rdmap5Kw{<)Z#o`^Sb^4SWK@}Q^UVJMD(Ip6)z0{Vv8LP~`extCx+p9A zr#2%<-0`gfaJCVVj082n_|scDt;mKZ1~KWzm^<&3c|O4F}Hnd=Fg`!SFts>Zr}lm$8LhJ z+x_7{8<9wH#a>xiVJvtfwPE@GUloh3YRh4!v&i^n6}1@M@{v+pvF1GA;wH8B>*cWC z3NI%IrFqOjS;0JV1umf}o_3YF%|xBPwe^@xH?mx(!ei_ER@v!459AiNS95m*iuDO( zms5&f2-z4T>MP1T7|OR3m+8fOR*PYuWg z!fk#MB%V(O=}}5}CFNUq_aCS|eJj%sjxYS(!ZsZSHu_hee!|`nw$^`PPlI>TDyvK3 z+iZW|n5*dc{10f8x1WQ9*1yk=E=v!aRD^r8``;$bJ+5C(`6JDSx2#^D-5h7JrT)y+ zEJpu!yH(znx%{7e5cUPaFeq>an)gKADYCKe|qHG z#`>k4g#ah;uXwre`&`OQiSpJg{{RncK%_8kACJ&AO%jtE#!VeC5BV z6w4uVX@`@k>0e04@ao+q#@;J_FNgX%s1l$&dRMXir4C&W25_fy=dB`rW@jxM@#$WL zYo*V17&icuUaJp^tab3l&&9ot% zfEe|zUtQL;_l=A!iv5qJLY3roMmB|xZ^XVcdks8ECzc!PE5|1B&BMPffl~Og*j%}m z=I}E13MkP3NVu=0%PB@z<~(eFL(ncfeP4(a(XIy8kBwr16Xj~nK73#w^gjXvumJ00hjR#A% zwS3B${41!|uCBFUG}|`ViQIIrSh4UkM4(+qT~N?;n`>f4xDE8IE9SJ_kq<4C_)nF! zM8DH*GrOLhd9N$F%#s-yxjyyvcZhUbi_Oagrblio%I|(0>sKdoS|4l*?89bse%^@A zSxrW)cumcrbvOpH+S|+f;=Q8l;g5;gNLx2y=}xrxIpUjvk!iF2<6Zv%+L$hrG5*7u z;8NOg$*3ZM0mE0)1K^*CWZ=uD{$A&dhWTn1RtC48&vD7(nATu)2EG^CQ|^AD^?CL%!aX=EDb8z0 z?9NXd0qagSxftfLm34gy_6*P7-nBkWP4*`AH2(myzok#NG^as~LUz;4&ZKjim2KDz zVy24N9#~R!jiEu@TX~M|YX1P7J!%wyeb6yd{g>XS+gRvSn%R?YZ(3C+bvt08V}02j zE1%k+`nxW9g*XQlX(m3kMWS}cwLMXn?{uhoos^ZLY{hWkV{xq`cW!t!&8&e&1v7Jc zRy87x{j*BPLo+1nn9p9-NXQjLKwu9_&xYlplloPuCKniPnDnS>`-xW4$kK-7ra9nt zttf9m=NS4{8Y+RBon;Ha#Z}2Sq?$QS=|cv}VqnVp>q3sVq z2OgC&L$!Dm{O1C#sQGxT@HSu}K@T5_V1md0J_9pzT*-gnx8aGQ?@2*9(Z|c9Zw5gByp%2Dmbku zBt;)^Rv%esbqp%(arU|#gl)$)w`1XJ>0ir^GXDU|x{F6oG-Yeh^nFZQPBwwZwRt&) zK2(+9eFg&psyjΞn>Kh8T^^me1ucR#EqUwTPM>+Z^YhJ|Fm2*BZZsq>sv+WuoBY>0c`zDXZ-{B(y$;uL@H3eDHdo4_ets zrwmEObkSM2Ltc)a9F3t@Sjc1C^rtU}N_92sR?Ml%ClkoSWb;j?jw;^Gcn?~}$~4E8 zYpY8Xi3ChCM|wf0E1#1!%JHhlW6GqCdOJdXTvm*BP16;>4x5V7io#5EuQI+p9oT%X zMg_H+G5L*aM`45VjyqRH2A>RPX&lyySe^So`Mtf;}ZuqT{xTQ>F|Fx)UJ zptjQq9X|@)x6*J22E3}dq)xgSb&OkE2&-bxPbt(^{id78-L2R(qQ{Y5CMPtB>{7rP zcGeLX!6LNRPm6#nqqWmWQ5vsG=xsEFVU}9)D`u$mA%J|RC2ysuT#mJ&9-7_7cQI)J zh81yI5$NnVuPVN46S{@~^LX^V-OXsjq$PSJdK+hj6`>xDw~Pwm{?t6J4=n!67{he>RVULU9R+%uXz~s*S+^Qo%ls={6mwL}y@rRBT^J(C4F99OGH z9&^;dw$3Um1K3wfrRlz8s;m4x2dC1eTXMYxXz9hR@|y>K5-&s5$VNiUwDf4`0sXOp4Hdd zcneaxV~B|Z&{L-HHS`hr(k9<;rDF`rjPybEQ_LRz=VJ_z4QDLpJc{h^w4%Qz-K1AO znrw{wFe{;Apo{hfM4O+Y)rD!3?gBoQ(_3igA2m*N)>X?2udkp~Y+%4 ze)gVibk9CWF8$hBllsJjr{Bq~33!LEAlGEY3$TJZ+wip`$LHA=2!>>4?F z_m$eBpjdGb1Xoh_`v(A3DI#9uA4>Ipxf7MrXF2w8K>%}A8sEzwIIZHMHW(G1bkfS< zHj1cY)yeh;Idk%yFBMmD^);zIt1;wNM71aH173&M6^!SHBUW9%Gm%;rnwEi}+28oT z#gch?Mu%rMp?4wZvBEq4Bz^*;TSfUmTKRMK_SdgG75fPMN7e247k^{Ym=3KC@sco+kaHelcr*7W`e5yT02Bz|Eg2 zKrFE*q9InD_RoL=(AsV2#o&#&!w~6vil9s)7rlXZdBb` ziAs?$oXAanKmgxc4u*YcES*#@_6Es&3332hVwPd>OX{6O#%$p&SDma)zD^^+=1;@ zV{kCNDbb(_RHoWG9+k@o-$S}y>@G)pIfU*mab8K{y;tq`jiDsKfiQ{K0Z_Rw?_J3O)Ya1wNcM`&# z+Mq8CT=lLFPZ7vI`qr(4_fH=Cf~Wo3)}9-abY)i`IawRear#&E;L=A26gbKo6El{A&j1SGa+HeCm}YhNlhGS&~F= zr4=VB?1xP_ZfIU<7Bj_$ay=`{wT}bIbX)B0#d=VgB$En2QP)4xv!%Ug_j!Dg&{a^w zzNYfVM#qb{!z<~ZmmpM9{36rh`|>+ty^HO0JO)#c{($jS8f%B@O?t3SJ05$*#^;CK z{4j|I+gSI3(>2N7{5{pwfUt!FrF}9K5t2EkunS(LI_@gwHL)?}(D_UL75=i#u&&U1 z)4$o5^{P)e9QUeNm(bv5OkNzbwHz9X&%W89GNYRcfiMi7CyQA_z)#>5#>RrxRaDq)D& zel@jisVvMRiHAOw!|8tyo z0085+0nX5B zA)#Ea36H{$teY%0njGoVVj~pK?3g`GbmKz4KD8+D=BRj7Z_20ZXH6Ut z-(U)ZXvK8D*m{uTV}3@H>=zM!>Qo?)YF}J|KT(%sk^2mrzEscCwM8rm7{g=fUWaL; z3yyMA`Bwe6!>vPDltE;xDK(qS;~fPYzN300#sm?!L5iE~t?gdwehB!yaUGsVu&nm@ zMdM`q_5^+gg__ZpoYpm~lGS|=9QzvW6Du-RQtaNGqUBmRNQRwaf zI3RlBxT|6!#=_eI~HHdx;dz!^sT#S_y)Biu(=J5i=Rrn9jhV34o9V9 zh^@=JJ8<-5w?f(~lhdViFtlSFRTy9q5~qX56{`#z@TR=#m7>+pR*WA+OpM2+O9QAp zin|t=#OksI92<^-u^LRhF_)2GRLe6(o{7 z>C#PdlwyuI!eX^yu!*`6M0U!&5%^Zkv{3XSyt=r=wmm8sO$sqtuI#OBLjeRb)wu1@ zeeNqt7@@%A8sx8uJE&1*jXG&zljT28O6@Jg&w%Za{d3#tT-~j*ob2ZwmD*`YJg?qh z5zrnh=Q9b#tE7DvUk0R)SN_M*?tC#41p-CtK(8&-G)uc^#EvjME7UYw(DpxPlVHca zbO~>yPp0{IpS+X(o^f9zM-ft-osY4dC`v0s=O3`kfKEB}tvfwCZ!Lz;rFu=?m0|v- zF`QLT!KnTU$e&gH1dgwa{AVPVuv<{lQ%&wvtX27n=1A2Sk9{ z%CowuBYg1LG>iAZm-gvP4u>dhs<{>IcHR&&aky7cXW%Qw zA0Wscg1&zl#I)lsR(lI7eUA{b(PUGGTvtOLm9r7;>t5LpfUbH-Fh^`xO{aW1Hw(CR z9@W8!;!0f&p_Dz(2(!{lkQFPVgGFWPqP>K8Dr*+WQ;(%kk4Lxzl^|D>EagvQs-9fY z#9L`$NXpjy8g?;-&(gZNE=_Kk;>r_=n3 z)6#TLv(8P)#w*dhJEz)Sw&akEcBz$lPh*1}k5G-&jQIW$>9-c%d}^oZ?f6%lYkv-O zi+5#Y!!3PZXpw1Fxe^Q>fSzlC)U=yzLhY{Zv$wW7Qst<8<`0PF8l2T%x_nu!cs4jo zG(!Ma1#_Vc?JHlQ%kc9;)X+IN9`(R$pA5V^Z7JO`9C-O6D%~mVt;XUuoU^II}L#6B0eE&h=l1Zn^UMcH+D6dGh;5x}6^9%MtFH)3aujFquW4}wBxxKpWUm%@7xr|Y-4$jnElcGW73`nz zhg^=Re=19F3QUc(jD8i<4k3h=gjB(HXA1_O{*I^zO0#d_`7YmSUY?ci8V`Uax+>D4 z1L?(kZh`Q&z)X&;nD5fOin-L>trHrUigcrSpD*}-z`tmjV|3!Z_f7C@T6LO{ZhCrG zx7&Dg!+K-+Zc}&RTk_p_enE{1;XDQnD^j_+KWDaBkopSE)%+24 zChU%D={Vm-fNoML^2?)7AImU8eXA-eq&11l{jQIIb-xDOyoFjEZO=HaDZCQ^ z1=PGTpxqtU7ii1vjw_4Q^pA&jQ8m16=E_F|b6(XP!mX^%syU2hYo8B)W{j0pJbG58 zj*A7??%#u6ex`@>f@M`J0^=~YumnR#F<)g-dI zxI5jC9<`Zsrd&tJUMr{TY`=8#X*8IPRXRvNDz8fQjXT4J;{d7rtH08`7i{VSuQi=q z-9>0;Xdf+2q7vifrwKv5Mo+r^E zUZT0lw3J_!R=$b3@Kxr1>HMpkxbTg@V0NF%y&OJvpTuW4Q#@GdScHULdsl$|)jtV; z;cfoRo+a=t+1e59^%q`1Xzcef>F7wWy5aDvs_r#*&%t(@+s~w3C}^)OrnQxQW0iUR zR9CH*=h3Bzr&T*yr}rGVtTj2(l{U0$$NA)IR-AK|@D>F67_F{`qHyapxnzEzS0DNB*2k> z*(12(zfm-}7T5j=r|^PIkKTM+@dF>K>4m^skd)QkTB$yxjbm*n%n zXIPqZS|rl^4}tEGq{YulqTOpw_vb$+HJbkbdaWxx4<{5lHOp5c<9n31@a@~5?qiRB zE3k*}R{`-9tbP}}XW$hW{Ka~?d{m`&c-a2{;U6Hz%j6;UD@ja@5dyEhKEA?Hv{Cu3 zUx|(gNU0Te(4TglQ4~T7+|Z{HM>IX;>FHR4q_dR8C#7jtJkjN0NpicR(=@CW`Kz+D zg`E7jt}jc$BNg4*uteRO`AkG7vHAvE6ytf9v*rHny*qTSBg8to>9ILemhFnat7~be z#Knl`r{P{Pe}5*UDVdvr+O)voo7J3|mOq#GaVcEfPjhl-Y;#dB&C-^Y#@hOq7^t5b z0(u6(u9G z)8PSkn%%umGSWalj7Gnk0azByRW^@G*_q2gnEokk)O~!#7!~uAsN)vbW9%@`nWWF{ zt6JJo&FB0qvUB^6eNoI9OrTawEl|7W! zzMrK|HcJbu89JXz^j$x}15r^V@++TJ=qjqzjXemx^=A)0s~TI6I&7b6(6{l3vQ3t5 zSpNXEU8KJbG|PTv{o(W+);6c0_s(eJ(;+TL&20B(- zd{NgUfb8p4bzKrmqGY$|dI4Nz#)o+ud2=13zId-n6lgeVjN6qrba$FhjwIEaW~k~j z+v#3~Z{l4Z*4KDoN40#jtm$cC8%YeDj+iyZoqFot6AMUds)kDnp9#l^$ISQLf5kuA zQ~74&Z{u1X8@x8t1QX>)y?nIy(zEW~YeLrk`J9-gU_GnZr<3yRa?ZP!=i2waDYdb- z+S0D}9CPVhjo-z+KTMBopg!KXuafU|i|F6?lWz3fdRHH)>r>2pvw{s>JeA6KXB}3o zeRC)638JmwQEuBi^sh*>_^aSc#0%k8_IU_BA^RK^+Y8 z!mE@8y7e{zr~sXZ&o^0iT3TJiR+jNT-+gh^0+@6>sB9TPd1dA|JE2 z9F7N0)!b-b8}4+0oAoyy$F@By%EV;~UgS=Sc-gHFtp5PBw49jYMPuAlhR)6bk!-m3 z#eA~=0JOTxF^(-WWL^D#3f#5$$Te8=3>!U(#d-3{YDZ>m_4yw4d8uhunOl}bBDsr? z9{5_sZfSF#n8keOt$2%3)#onVJ*$Gax4Dtbj4(YbzMcp9iIqgpu;%gKhoXR7yyI(h z1Fs&{l>Yz|JS-q-p^%nd#})DHtvp0;?6dijn83ca(%VYkIS zS`3^A-TGIVvOrrcP|Ix_ukQO-MJzNECQD5ZMqi1y^Ef_O{3*@j`}XByuQRu_Z2YV< zT@AL8cOv9-U0%kYV;^SE|Iz%5)in!SnHni{-?{tD9NY%Hjt z`Y^10Pr$a8rB>Q>0CpI!z$xZ3T@UGsm`nYm=ZJKRy{F1teQKT7lYXdoLy_%YMqBuG zCYQ=37%$Lby*o?r--c~%H{Yr@J7iSPJ*gRK5+jxA@gE9*W?f?*Nvi_N>|l8en6J7% z(eTdAw%u5l{{VKe?)*XUxFjawc;1!Mo)zrAcx6|RE3G@^8t#>li_T(FPN`keD^F3 zdAO{@n@F?LrI*u$bv+U-H^LTdqSo_p$8%n1N%4RxiZd`V(jbq`riQ`{7#^6c* z>8=A+(ELJBt~stu1qC{s^(CS?ag`x!K^T_55N5EuSBeC72cs@)x0mB|@<;Yc^H{&qJ0yh19$$Z*Q@!{&3m+FD%8u2&9qVHV z#M6z=@4wQyD%Fy9E*NO;WyIEqKjYrL7s5U#i$s`Q&!)jN1LYydBDqNAwZ;u-i#uO1 z{M~Dwt!CAlJq$b`eUISJ2YfW~t(1~nq!!+9E4u=_OOM&3;hoZzmhJ>N>$q3MR?;QD z7AD3zR;=2!^pWlRL;8K=Up+h)B2&LMhqSR&V+$ysL)?4|_-}7|x_8>5m}eOc&2;|& z@W-QCib=Gn;&`GN8yn`ue4VFnjrRge=RWoC9tHTl;va{X;_|{*nOJ<$$i%7q4R{%j z4;vTzxgPdiTMH%cz|7ZwW*t{acA7LTit>AphD~ob*e{5JSLh9W#U0A{j(;SljJmF7!m{Cii_{u1%T_d6{0J%%yI&0k7*dqMEt zy=d<42?Kz&@fhwWsfK>~KGO%0(Wtkv`EmV=t4D(+q=VlaR*Ku)LaHQ+zf0io&XFW? z=qAlrG5KAT5(R0)@N2=*ls8tfnD&tDP;h&8uO)bkPWF;If7Fk)86)#X2&0f~Xu0}U zoHmf-gs-){NALqe5_y^?=0I|G_RVHV`#i|D;#i%R*suQpTDAWGUGS3YV&4nYhr_Ay z{{Z%Hna5hO7MCnvDyBt!DRuB-<4uUj@?`o}D)>q5;{l>9M{q0Bd`*X+!p;cr{a9{~ z9JjQVA=4SHSmTjLO#Lg@V(|PCF8OWCW7Jn^rRg3Vk{#39?P2J(kNb z!7YXFNYSfW-YrjX^0#fTrfsy339Mr=k@#0*Z=-lfs-!lX2dS@~#PJO@ugZILGMTQY z%XW~r8PXKXMo zJ0CN6LfNCckL^m%nXk3{GjHK`(}`r;oNCJPwRZn7N#S|L&kB_aiYx5uiCJSDHq-jqYk2Ndl zhwy?d9!x$)dlCgyi{bs`@1Hsq9)lHyNY;Bq!SN;@+noOZgkaJjoPD-1-&*$TN&GDP zmMd|dItu43z8X&ifxP8!p{~Xc2kSQKKEfmXTD$PLSNx~J=D6%EwN*b7{+)l}TLHN( z_Ll3%rD;v5_(xJ6NgTHr#dGrbXH8q>rk5ijU9Db@Y)+YOskS-F@HY@@8KQ)?*r9>j z)TqsOQOlzpPapeoQoJcPdFB1Thgw1eY}oV_Z%ls;!i*=kYz%R>yJfLK{;))+{g4Gx z`!3f$Ov(pvYgd9u+Bt;WIb>q$6ZlT%0Fi?T`eM2Jj|q5NQC#x>0EKp!KiL92Uw-o* zm73b#hh{d#<^kw3Dt@I!{7BM{D!n}+bBjLlG2tk!k?zyQP zmEK4+w;5Ug01Bxz+G<2m-{tnKC5ETi^|1MdIxCmj9&CSS?LIytom!b|@Iy?BPd4F` z0=v`VXl#BQ;8tEi~ZF14W6p z{#B7RwUR}J1B&VAk7LuOqa&0TX?G^uhVsDl?@qb7@THPwbuPoOs+Um0%-b6n>0IMk z#M1ef5Au%Vt#VYTl#gEzhe->l_?u3W-J-CAk9y(muHm>GL$Mqd7FG(T>F`>{;|(J7-GcuC)7HWNoc0?O{Q{HN{^ii3_W%m+iHBkH)d?Be_)G4i=KqD{GjgibluE4m%uFI9o$m zLFw{8m+$y2FTiag@Ai$>f3vX$uRW4$mL6EZxJ!e&ct`zm$S2c^^FI`5QuufN2-*8T zB*SW6d_~u_247IxSN{N7zW4tCg53Ci>qXZ-6?|&Zr)cE07q;^R<9ZiH^4lFy#~UeA z#tnRj`&3TW{{ZkypV@m#eYWRSztlD5iZ$AZmo`$zwN!8vV*q+r_n(Eh=~T(E@0$FQ z`>dp``k&1{E8!taF?FLGbH|$B;gnJNReQH@?rR`!{w}q8>B6_m#ZxDDHTx$a_)aL6 z554$ToBUV*0Ff?#_z(VsE6__7+m1OE<$o5ZDd87~J!J#%&3buqw5)g;ths4i`ErVM z+%n^iDYfxr^-Z4xQ_R zB$0zjT=g*OM{IHR6gg3H>rlD7IGwT&Pimx*d1`5n2N|m>GPtXPjAP4fjwLy^xtSFZ zT0O=sU{cIJ@f9zY#ESj;_>=zt1kLz&@E`sOi}1VlzSOU8E_`G0G3B_ri~wzHEdKNX z5C92eWZZskbAyg6=6IiqaQS6Aa<9DFie6{g@V^Px%x5Uk>0|KUOtedVu*7l&dLEpC zIIelDR@UUiia=A}f_?GZHP_f8`CTd0twaGlY&--T$WlgD(Ge(_5 z8fS^`88zFBU-{zdxb_2qT+>`1F`FEdU&oT5l-d267~Tue^qnun*Kn|jn`+l{r2I{7 zE={O+sOq)z{{Y*F`_)ag4Tt3dxvOCzGA(TnyEI>kdQ6V8+(FcO*C*m{iFPth-drnR zE$O<N4dgqBRrFLeMJGx@C zFK*%_^yAvOOZ$k*;cMN&VB@4(IkA|9%dsWytvl`W8zQ-Bt~|*(sbrlQ2&#MI>t5U_ za{MQOR(#zHwN>>QEgY)VDJ~natodBNNja{*N%takIYqp@Y1=JX4<=42(i6o+fnAiY zqIvadb3F>Pb*=jciZfXyRP_}!Qz+=Qr?zQW<&GJ%(RAHj+TDSXcUGn5u&kdvhX?L~ z+PtUjERvnPeJe)ZW{?tV&XgZD-p5z7af#Z+;=Mx321sOO9@UL?;|*Fv#Lryx(#kx|^YibaM?P9SN5xf1c?g(N} zrfVMJ?6yd(2A)Bjv8oeJ^A>PWL@Yqj|20wDd-TxB0jtc(m##J_&-bdtg@WzRO6IFYJ&vwf8u!|oGGThw0G8rI&UX7%RwuP8%PR0P z1zkEejfc%`3)a@ANj~#eLlR9F?(3CXsnhPW52bBcYDF7#y!~n_)9hVYP z8mgYStV_=jvv90MKdV&iS3N%G|I++O`(~q@?_v1U#;bmWZk4$0n%g=Z()CJ-w335# zgBASpoH{f5Z~HoCdg_j z&`nDAGm#-bT2|DqU;SYQs;n&Df`x_IcXXvHkXwt{uH}!kw%p)UNq-{!z!szVw&Q84 zj~tw0oE#p4YTBb&bxVZ%qz|P|b^TPX(Qv?f6I}lQmmu#^Gi~o!R-p!5F_mNRqqDW~ z4zmIM)yt2etqFWhtxPcYE=Sh5v|_z#$Ya!0eM1fWhejVUk<{Aimt~6?TR-m_+>2Yk z;E7)&r_5{5hiH`W%F0Fbj?iP1AJ%XiLGqfLG9YPC&YB3uO^M- zkyPCy%3z<6%EU1At$6O4muvjrTHLz$XX7hCeESJd4*0BR@aCmz+h>kg{;N)&ZAz}q z8vR1NHi@GgsS_V7g+IgF^{X-59sXo^Ufgk5{zc9+j-JQ-D%1%k(Ubiv8u*%WR!2=a zRDTjWD?90JhS<+uxvqu@VY(yE+nVEIhI7gcfme|(au*fl)KKnvG$P}wJDB5+0w#(@ z`gX40!k#?Tv{}5!q{p{v@^~f6gmkO1wZJ^BdG#>VX028?JxF4zD_f!Uo|o~;<4?F- zsjY;)vt5>@@v}(Rw14dIlUtTKVx(u=wR~M~E8r_<=~~gW%fL7X(!Ns}!uUx(WdrFj zS@m0cJ0EO#3&$3AO^9y`ZsQV=f1s~(hvF`oXDSU;q_CgvV>S7mYc0$}E6sFp-OTZQ z_YL*pzDpm$*m-QMeO?QQLs{@{uWZ{5kc{-{nu0qGa>g-leWq_8?1P&2+us>!_qL01Xm&B})Yl;giYH5; z%|7MyBD`;})tq@+*z}z!TSF5>x)!o+`y8?V0J3{mrs&r?Ro-F0w{_{Zw=_LtMbsqo zXI#H?fr|9~GRwo)vaEttK%RoUY*l$Z84{+FGvV+inKhcrVT$67f#Zgg|6m`<FdsdP2&z~yR}RgXokdCn3gbBWEJ+rNR?&sC0FRKX`|g#`FmFU z$oKNyP?H}oZwV3xEc$DcqA1)PqqybH3a*6jiR;@4{jFB){`{m!&j-N_&P%Z7&%OHOr+6M6id^lUAyyY9y2W*__VA*;xOHx;OyMjBB-D|Wj)d# z2vxTAiWe-^mr24K4_9_Qi@j5VsmIx`&%s%pUV1en!GF6Jenmnq8DP$nkygKbzsO|1 z;J2!k_q4`TJmZji=CjuL`Uxshh*DKIFrG z{fWj*=N3eHr8`m^<3WaXW-L>WdipKSmHYR=4O3O5%V!5Gu-Wy!f#7J}xz~qVEEVs@ zS(*yHaI#|0E9ypc#`Z;?#Euz;yS(Mn<;a-epX5WQKXaW?$r9N_T}5JaK?UPhxB-V$ zsmrtjo94#&_PD=zol0J~_~9)P=os3)dUP({+Hws3y;Y@%#F_7Qe>8oqo@x8*jDYW0 zHnpSA8wL@+Cx@utIyxY_!{QHOPXmyYr-4J-PJnz`8QO36=k0DWM5Uq-tEq{n60-PeFL^m z5HPoQAstm3U!2Mw+`^3nrJ;70j)%X~#!NK;iJii4s)!fs{m+ZRs}J9H#s#>Ub-oI# zK9|<o1u?OJ~RPLq$HfjTT4fq zWFq~F%s%w644TB~)3=o77|=$TO{`o<&Cz~f56JAa$leo?L0ZqoMI>L&U8yk?5W^hK zvM{QW8kB5F81q$t75@42k~e-n4zzk(Ug+tWWi>gk@A4oHp;C*6p3`Mc1Yc0Fz4h7q zPiYPRh#tKxVnOFA=J}IkUD$g|sHDy(z}^vCX;z3JqxfIIk!lbOdqA|5xjtj;UgD2Q~ua}J8zU}@p<`ZTrup7S(~r$qfN z#Y=f(no6a6HOP-h#NGO)CY65A*|?-MUW65HN*UzpW?a~@i)2}Yg%ix%rOXeJCn*P$ zV4<(hS<1+3#?#`sxMrNvT_hjGNfaS?Ji~A)63q(6eCRKadw0iOh8J|zv{o0U*yR!` z&5JfGGh9z--;Mq0>i)O7eh<#Dq%4?E{V+=+B3QkBPfbjy%u1VHDFf<_3y=&sGwMhShzG zD<~=KI<`NbGm%JlHel}yt`*B7Qay`2I#iCN({OP}+YIB&EL<*D03G__nx(nnsp;Qk zZwX^}^0MWF#2N@azF%e~&wdpNARw~1u`=LQ3MofH1i5e8zFJk3=wsuau^ffm`~?Lf z<}dmid|2wLOXpS$1U+p+=sP!l7QgJ9{Bl$f$nQG%c?KM%Eps-$aXMVgoOIKFt8tAd z#KpgA`Hpq)49tl{hxhYJAGos};=q^!l=^=L*wb(>?P|Lr85mXwb_jVOs$}uTV9kNg z|4+)s@Y>kT0Q){C(-{gmRl_sF7O4Ac-^WmZqw`ZRqmE{52^60iwlL;$GlCBo{4e>9 znv=`K__uXnl4mR9w?*G3*@{D4iDh)$`XGf#+TVIo5E5fwd%RWGvSHreZ5-_5)bnA8qZ~`&wKp3n>D1(Iyh_g zcNN3Yp4v$!I`SK`A;wv@(vum2B-!xa-|PU-b81+==^d6D& zo8^(a&zzmm+Dk`fmV1dxan{Tz9{i=bd`Y{S{f*0ah_(m{K^2|0kh-yDeOpnu^KcO3PKzCEo7m#ej7 z@UkBo2nx}C$ERG*1YH@wdX@KjT~hzOb!Ja+pJ{mP^E{nz=}K*v`8@(@j$r^UnITRN zb@fr%-qAR_!;ota?Vm5SG%Npj7V=C9>-b#j*7xp!B)aH(qE{)noIH#h4HEdr)F0jG z3tDe$Y8$6;@qhG4PG+-PDA!VaccZ5@_+W8Bu`h@pdQH}atjW)rcFcS-zILh~o~#`q z^MwSxRe#GjSAZ&aD>5`Tz3WTI>M{GZe_eSG{1$Lo9w*b39 zlFa-Y#~cQ-=A-bnlg2v^Ww|RslsC)P-gv`vl=8)yEq)L<8-jP#M{fZoiXnuXvsLa^ zDxRYKzn7!6+ht&;*8A$gE>dq1C6KDSm-!izR%r);@_OPXLxxh)h1X)OB&NK94c#8o zKyt51r|*X{%D0m9oj_TQI0@g-UUw^1z3Tep7Tlx<=f-&x@S$0t#FVA`Pci1N6>Mm; zB1+Xhw|VF-^Knn>Q-bY#?CAZq`Wq6!)XW})%8Mh+O+7qou0`geA}59hsebDCG+I6qcbJ*lRk{XN$igk9i5dFwzY zvB=^UqJY(|;J&WpI=a9@;~unlZ;Vrl_;JH-S`DN7J(VYJSH+u_o+I>yIItW~8`=T( zt8TN%BIRXO2|DA!1n-VVW>N{Dz55eeeaWbbiEZvXtNb|3cAnGxQrR6TaD1Fm6JzuA zJqZq`?v`_4kAP8y`TTlV871AN-XDG4UbCd33!|}$#qOf#cjUg@cSZ$^JUH#BE1u4f z_c1pzn1acew)1yDq(cvWS;Yu#4xQ zW=$G+G8_RnWzSq#^*=L16kx7akFrB3ilSP;t8Vp1o{(?_#ZilvjQ1|@Ry9aXQSp{p z-}9*=q@UDF2i9_KX0Ahz&Iu4nF(e%g#P;$NNN*D>R6JlYel*e}&KQz*f~@0LEUq+? zf{AH_EmPTOUxEXf33tGvKpu3}bEB)xRIxRYprh0(W@zO?u?|^m!dTP~PGh=-GG5 zppxMdC3)t^>dkk80Sp+60eJ&|((^^q%e`TT>raW(LgBV>`}oH@A+m?2Tk*xa)`Bt5 zJbS>KZcT|?($Zw=O^lJRh#L>LRpFnqq0vJvUtGGf<8%7V-Yo8d4 zH|gKmwxkS9JvbtsI?0sK_shq=%UpUvo~>|l6QfVUvs8BWg$jB0Dsl*xzy0$)!0~elGkQ9lp6dm?zG0_|*fu|9QEimA#HuggqE$ zsMl)!sB%nBs!{h_>%{SGFT+{MB`PYaV-V?Kxh3>rfu!D>xd#DSA||^`<;N{e{QW~v zGb!xbw}i=F0d_2|#42O~>|{uA3$^mRaS81B$a8C&h0&)}Et zHdm(OSf=AfpV}cR5GAh7ysRwR+iehF7&655qg-X$#u|L_)o=az{x5IyvC5ZI&cAW9 ztgNk~f6}DI9C8KCo65*C&i~6?XP>@cUnxg9lIuR0JkDsyK$Qcm4t9DYu^_R-OpyiY zozgs`Ayh7@=h%#OolgQIu2{ab;i$|fc*~DuvUp?Bp|tjN{g}sNqRe~MPCH(=Cf4CW z$3nxsk%=e!je~7-HtxBIL8x_GzE1l3SQYf>7&<(kQQodqFEzP?+i?F&v?r2vLVz+j z)lnezMsxYI?NBT3`oj4}N~ zdc56a7O(sGtbEAL@K(IQ_A+W1wR-zEf>jnnYJQ-SFTqBnuSWfgWUI8PF3IDJFv4S- z+5*$~nQXY6e-2yc0u2lOT(@rfL3(r{pRA45?O3!=@wvyqwW|H&{I|5VUs)~#c}_XR zfAfeZW88uXCk2}NjGkOhsb{zBUH!HH!>QzJ&7L9vz)5!^z8HTn_E~P*0#&t=2fr$n~bF&6Jlb8N76Wc>p8bw^V#i_GFB@i>3ZOX z{*VmaCIK{;2d}dxhV`|EK}}?^#IQ|sL9AKI;ED3Pe?*cNBxh|1V-5@~dFUTegrChb zO|zQ?462fpBc~?hh5R$U>}U0YNpFJY+P}=4Qa)U3VOT|UE`ryPU4;GMOLXu56=tof zF{dcHyT-E##B7vp^}{u;-+{mSdfs7?@8*!kkd^ui-+Gja6$oqP)0_PXf*E|QlFAioi3Peq3JbPLZ& zF7uK1aIfO-(sFZkyu+LR`A5VCxn~B7r7V)TKYK>#)m8_^4T(xxwwSL;r??ehplf8` zi9=4?{4>G5TLeK^9+HifGM~5jEpwEMLBzVs=nWKW9b5;m>DW+G`s(mS4*m9Ta!k~gL5M8 z7@S>K7wW7U1Ov=W#mPOO2O1I1&7ENmDR^oMGmRCY9h6{naVV~n0n zcP+y<7Bv16&0KNSu~tZ!$x#;x2ab;CfmOuwQ~UL~i~9ULYdabBOv+i(nL$&q&@$cf zwzXoll_G0A{ULrn-<43PH$eI{PRgNF< z(({Yk_}q8H4+ShOt~(zQM0%b#QjHW{*8Zfs1wTzqI{ZAH{gAVt3xXmXj%BLgGTW&I ze$@YnJkCpe*NQerB~Q-xL(hwya!rtn+q-6E$!TC-c4Cn{vFb%J$fRwW8)`O9g*#eL zF+P@gRbzmwxlxBOtdNfpH2lYJ;RYYAx$7@nH- zKRj2eBvXalwIjI3R&dDWz|+@bMy+R4CQ0^X<|hAF8Pco7l`IL2^7>R-Ao&a2I`e(t zyx;(PVo8<^Vf|@gv(%Fso96Ez9qd%a(>@X)mz4fAEh^`@LE{1x|SylAzP<#u!9}&Y7ko&VNP(wwnx2#$}Mv=15eSo(p&+?lCJS zlzrbHp)Z}Y?*H$hw2Q{}jea4|m-v$>Upw>FX-=d{x`H{eei%1&vY6m@f|Q1V=SvXB zPXM=+aqSW^0@Rw=Wx2=Y>ommO2Pc?aNkI3ttrE9R}Y1ld~Q?ZB@mRN^R1fV(yz3tW3Sc*fn=h+<7sC*#N zR6C zY1T2N>SuF~jzx7=BTUKfnXWPJFVkPa=8rFU92AWGNPi#DwU zB5K>3kM+2Wz&`^%bQHUXrv1=2yL_LcQXXzSn-;JgJl(HL(R#%D&|M<4rY;}z)!0$c zP@@2_Bl3^v-i4H8a$OK+hr)xkO?*S^+DuRC#Pr!2Id`pPoN-j#PJ)w$-1NtPME1PB zFuF2z2=VS2zi`6+VOg_ql8!+u-?PuMNAjL3=-SJrI}(e(1q-*H6=Av4Qk{&lo@v}~ zK=T|TIaVTepK~`J5jQi_8wL8FDqv40A^T+z&p;sJB+?$5a#w!(p zPH4Ox>_)YX^tc)paubn9m1-Zr0Q3e3_=3CMC%7@Xk7Sja@{s{g@W0V6M&s6Gg&L3V)pxs3GZRhl75{(d!`&=${_`5>QHcNOo_3l z`*bXz^38YU40%ICpd{(2tsb5+7(ieB2>Y@g*GS){l3fC#%t4?))Nx7A_{h$xPxL}y zM_=SExaAu1xCPV)YRII;CkvxqrNK!H0qy|J!FQ#l{#?<5|A`V&g0#fEvwo%Z!RTm9 z)?Owu1n6go7he+4M+rtupB;@HD}c9OvJa{Ew))P63sF2a(8{=6DAvqr6a!A6@S^Aw3bG1m=ni1k{^2j&0gb zPsz}HsyCgkeJ6d9YjGmHMfMv|4c?@`HX37?28v4R+t@PAG4jd><~hbuk1|hk`@~-k z#jYJw>}gtFYP|CKnA5aRD=CV8HM_|RAFl}n7A)_1vwuC!&F=P@vyJfr2e^^}NU}lL zlC?8wU}k0huwL@3R3p@sNGkw(Zl%$5dNy*8;LLiA6`cIjulU+w<*?bOly>r5XSqYN_kJq;2h~SVKfFuJKe>n_pb#w~L$O3qNEIm+Z)>IueEGB+{*%;#EDIe? zHMSHtMjJE|hKXj;wnggVs{{kBkmGd=d5L99*1NXztZ<{2qp3N$%v8kNDhyv?0`-v2 z?K)u3zO0wQQT6FQv;qhB?PU1&fY5&$rFzQsIkt(-!^ovQ-m-7Y8tYeftAb7!ha7Ei zVb1SX=G4 z*U7J;&6R4ZHXL5DHR0vm8foEVn>|@rquRb8Qh3_XYM z3c`e2sn2b#kV>+tY`vGe>JdS_pSxVDDQiNUJx{{!5sGvP_yksxL(b`ZP=5Njx1@=@ zSi1@P*Y-Rr21*IiObPJQP}o=vi#WH{K%6v*f7P?gv8;t`REw7(ql|Gz=^L_7SZnBH zZG_$Mk6d?{Q7PZ*_fThPb2p>p>2Kzilr2;ammI8p-AdIDIB5Y~BxBW%hic=0ghvG< z^PiW^8V^I<)4Xzy($q7*=#OPSM<->5+ZBfjFq_u@z0u#1WbFV5C^skQL8Zx}>Y<`` z0YVL1Mj{=(WwXvg_LBAAZll}F@YAcva5w=yu|UWRd6<3u@7wD@YiK39=dg+PM*rB* z<=zUqe^w{U6U*p;_@Z=vLFaKE8U}M8| zlfG9lpx=*swcz)&s=eKUf#EbSMC4pqa@nPMOKbpyX4TBi<5?uni`R{iM~IrV8G%pJ z>6p%Ty~fSd%x|OKnb~2vR67;u3Yr95nk<=L;$CWw2?69v=0>e7{4vqt?4xpVf48c2=jNAgkb}j8k!aan_gvo35@4yQMZJ8F+M>iXim?x> z=^;v78!KAtExKv?#o8*e;`$SAVLpY}RFA9svRf@5uH5;Q#cmn2!Xt6X3fn}%y5z~H zim}-qnz&XMO--eMSHx*zsUr#ZoKYCXO|@Il4OL$npSh&A^+bW$&pZ#gl!O3M`?m$e z+QCafTunt3rQ^=g}LW1lb`tA zt#Ae~)pZduPqM)WbtIV(0k>|*M;->#9lO?bg$iEwxl(jiy zdJQWw;mOOWcH$X0@cxz~n;e+=fFIUVU7+rf5H3_Qk|zy62o5HwQp$vfaDdEg|E0zR z-0>y9Zz8JH6aK;@lpgfi&u!uph&PB5gz2By^8I1h|Jo}~=A|6T3G=%k1^?-#Wwa*W z+!%1~>@uq11QEgRi7tzxnekNdUh$LZ_9nT?>j-UQ{_+_qu~l{$HL?6wBF4UID?SfTpAdo~gTYaNCSx89?K zav-uRPDW-pax#l4?vv-_&2Y?c+?iqiQLRi?C zZXQUFOGdQ50bgo>*QlaIR7XpGD?}?*2gR2>*q})Y>=GsFS!dKgT}5lRUj~a|x>te3 zi-blV`lT!zF3*8CLpd5)Z{*0W0ouXW6Y=$2m)g*-pZOOl2hqbr?SA!)%fewR^Fg2z zGgmSV7k)4z4zU5-h2$d1sajYFaWC*(_NPQ(+8#(%2TM4dxRVmjCtgYEF07ZN+XLIEkleI6oGoc#eLq9F>D;{MuZ zeKQNaZ>U;tVN-~Aw&1EV5cY40$iVQqv=AU(^Ek%>p9kR@NdC-o`krLfl&jl?YH?e- zdh7L@!!`ipLGais`h8E-=C@#-62RHHY0XSgl)sMC8xcmTJgqJzx}ip6Mw82FD=wwA zO8TiykMC;!3`)!aI_@eoS9&$M(4molUmipku#5w4g@ z)DEa=U;BX15ytULc8d1_KlG0zO|epED`>{LzU^8~E8vS_i%)5ftw?p$Ce;coKg~{<;_ITSTaW8*2eq+l%-UPINA_ zW?Zg0uYs#rQ81AG_xo&qhSsE4SXO$LY|x;S5~HeLzEkX5qQe_K>_PA>NVp<_&mc@MGnU zY2oJO&Psle(Rx|1QGXhRp6+Zf$x@5Z*h?n<^J2}1xH|}m~0^zhB=~E*p zi38Ql$RDj4?xE=f_(*b}%dwDuS8*kIPFau?5~nM>4wXOqFXpkY=2D%()lAQJa_sX< z@8_eO@jrug&qz=jFf(!t=*gUucx}^*EQLiW(LU==px~TzYUOrU z-Cr#--|N%%Bg{6e14-c}Wve?`Bjz>e;NcS>#=}3hDraTbiutWL>&fbZDjvMoFojJ z(Wu*d9{q*JF%pePIKV2ci30tX;_*YiWtB4Dq+i9}^($7KYJEr7nS}2>UOj{IWFAY0 z9r*~RqAa=K!x8LFFpF3>0+0Iu8&F&wz5N=WG;N5%&rqHBxL_#!`tQK<{uu5m0wOYZ zYTVFkN9)9cUxGj^XRFT(*_hwaiTJ#NjuPfp%NldY7Kc3^Nl11x3%@JgAuglD+0jc% zn=7B{zSQj2GWyG&zvq(bN$8%u1Mu#=3JqCJ$)pqFu;xQoiEHaheA;xVdJQ*|^?juB zIy}QNaWn1qTS5G2&2F#3X%K72{^xm<6-bS#16F3VhFxIEFGyS;%pRV;-v6_Z6{&1L z=D7>&4Np$pB|x+ zeq8?RD!VcfH}|0#SimdN&w0Mz?fM-XRTOeh0yncEy?;__S5ah)R-6uoeued`6?4>R2tpD(_8d( zwcs0L@KJZ1RLPMq_EAxn)F~@d0kwC1u>A1q=A3w-u@%9Cd9RZQ*fasvHTw7swhMsh0^mhQ(+Us_e=9cWgkv% zku8>7uBy*!WdzqooZY$}Tsw^<)Cet(u&BPyTpGl0FR^#05()!-`i|%bNCO3rPP%YW;pE84PxYZ>Qo_=x^2ywcFAY_( zZdk*6=Vi!)G^Y(&RR-Vvc<7l6!CYGHa)4U@8%Qv!LOz)SuWbY9-bw|3Ci2`J^jG+G z!mt9ED%iBg^H=HpBg+3J7fgI&p0-|IUvJMLu13I-`L& zC0uBJr+;-&ZxK-qY6_UqE@|6mfpxosimGbD4(Vo)ox2$Q-YvQxMfknyp!LZOm7;-N z;b69mh!uc3XxbA=s==l_{`bp20*<~9x}>5$+fdl5&TWwl_ZT0(yUln$YNN!B@436sv1t8JAdkOig?*;HNpbNk za|+l`%oH9IKsr&!9^b|!edzi;Z6c7V6EI~*g)1t3XGgoHz(^%)E}t~0SM*S^=+rNt zS;y)XZ{+wgm81wR!%m29gl^*=$ykf?62+g!3zrsxJ-IYM5IRQ-w;H0BclrZ;x+yt* zDMIyduBfdSI1>aq;L!bmPP&v2Wme#N0JB{> zRzecG%>HcE`u=F-s|N}}odH901>9oS{(Ln?JQY{|x_-PNqTRkppL2QbGJe{!R}gNR z07)^@h!bKi;*xf*zfKjIKq&9D}z0+ZLa`viimcGxSkrvC&YA~3SsNk z%1m~Zh3qe6e9^}O9(f0wBkhujb5%C1q9tej288y+K5S0&S{CQcCb@9hi^@!}xZX$3 zGt~;*II`NMvl`1~xr~q9|4&B~haNF1H=SFt;7E^!-@D9sL-MBr$ zY2nbkNEdEGQ&k2>Tn1aNjIS=8O}C7rPrysInI5jz*8)nc>8@N&ge~!cCCt(5_KErH zJ38!%g5$i$O*-tU-<<(5c}3NBJ4S>ZY1L+zx}34MG+Y$AK2&4HbnUc&N57aSk#B&k)PKD8p_ghj^WOU{~)Vetei3M1#?PHB`jrbS)!j;^$ z^xRW7l$q(4AY)NIwa^6($yug)z^%qtMiKI0yAmCYMbU}$e_N+*E5^Ae0%um54YzXl z!>p_Vf8w8^ZV*?Bn&FFraz#81OGU@24%y)-YL!~Q@VechZ;R72-%w<|6Zun{C?Fih|Dxs`FZ3Dlav!@y#-<9gm7i_&RlZ>w@PX~TmWe{BxU3& z!F_rE&v?!l(q;KuG_`5vok*rQ=dI~Y+6MdRe!DT=5O0yN@dhEf~+>OB%(w8{Lc0vq(=%@j#v07UWsXrQUH4NUPftg8Xvk z+P=ywHTAPzPy05o;#KUf+zB94?)s#)@zyTrcX3_jQjK|rTZBWI{ zrjRhQ$X&H^7c-RBlKwJvzE^!vBy5^JRrvubQCbBMo1NZWYKE+nbmM4Nke&O-3=U)X ze?+tu;R;6Y4k$s{Cn>R#Wayh7mF4Gq<;JDph7sl8e}Nq|2r1AB!(}q=_k{+GnKRaL zWPZn+ueL+PwDckVB}ul>4G9Ul+tEp5NqYn{1X@$3756R>^5_gCfWsl(jf>OKfg0{t z;u(Ylpn)H0!wx7V&C`3uDo=%v6*?TukcR)=x%xuMh!k}?CGb>yeES#u2gV61j-F8^ zklQIfDg9`6*SJ16Ay04C#Y30GvJ)zj24kr%Ahr;Ts^@6m{}3tIv!52zy4++u{p1AeU7~kN}8v*LqIvdy5M2`}VKGWfYz#_AJuPQ|ZFgMGI|=dH?~H48b>l z+)m^<_Ko8SgM{69r*%fT0yH3(c=Sbkt@&8<+-z9#z=cIJyA!1Y1D$c&hzecGy*$sX zIeXd}p6_=KICHgXt?&poLcV2~yS-}EfH*hj&Dh+kg4MphRjB4=LCu`VLTu}g^JI^) zmu1RHzXS`^P%5N6yvXe@*u4q8x}SIcBf8lt4ytvz-mqV`YqdC43@K=j z*7c^)(69Xtl*%mbb|u=e-wz-KCJ4VQFHw(a74#k{(F5L@H}IX-tLqRGWs3Fj_vxY- zxIab^&Qu+y+E&+`Cfz~%DiOF{Ym{otbketej$WgOm#-T#bf)>wEq#~O8=O}OtLg~^nS_+Rq?|Np+&_yp^&4|9k4D4MGFxzA?y+OEydQXM}I_8On{%}zO; zx4T4Sea{ZDxt?MECGVMBzI6RrpIM7JFJghp$|Rv}EUOJnZfY0k&x}snP=)v_t{U<0 zi_6lw3z)H5j&?q35E1U74rwT(kxAYgJTdPA-o-psb~5iaDkY;KW^XWL+X!i6f^;Nb^t}Jz2JYLOJFrI#oyiiw9$c4; z5^EZrDkZE%ifd||E1C&LS_Sy)vOtZ8Ru;AoO|~N+ui8|{A8|Drmu*@=lb*B(uMP&z z5jF|1HZrlWHyG$EQ2Xh}A6RLll|YYM+dx)y3jrg3zgxP=f8{`zffWW^9wL#dKh%y` zCGRMmkV<@SSs(p~)LAP{dIfgyHu#JdzAvRd?UDotR<$>;`$rV>sbR4YW7cB;Jig;9 z0#oJX*ORokh4@x|9GC80S-d;^zWyIk=D6rjg66iP(aGJRVKr2MGI)sihVzGgv>y;Y z{_&GicUG@5@ZCC)45K%srrk|bTl@SZW1#1)IoS-AKi==+I2!p>QolIE)@1r9zup30 z_K1lU_!bo3*xEe#LlJJG=Ev+#JY)@o2dPuF1L&b%4yMcxEY_T))r~&d-I81;;!34i z0zaOZZiTFvn#^Nl_50(N?ripp*h-*e$6BT8YM@Kg2OpWfEB3wEtn}Y(nwTFGoMqN^ z`yyeP!}jc9vsJ!?{-Kz#lExh+?M}ARV2#wz2|W`Y@Hz|0v=*wRLMsJHoe|vLr46r! zzr@e+P;U*JiYk{Yd9cDxx{#%uI!H-u-^|qL)Svl+7L}5e#^31#g7M1&M&-VvxW28^ zmiZA8Q5)+WEVYz$c3ar@-i|6(g%z%}^aNbBn@3B(!n`Y9SoXE#C?m%9CqrI zb3TSSEN=3;0O0zW1|U08!0_QVCQ=9vSrWL)WQe-^E&4G{rvF!eC;@Do{RFH3clja2 zPD)_yb1G#@Sv<)%9gDwfV#4Prhf9kx(-OdU3RDDBgcRtyllsSWuI@NIuQ4k}PWg{^ z)B%4#*%`9N5Q_cWoXq3C>VLD^Qy)Qc0yJtL#My%E%l?>m!DG{)%1;vDu&Sb24?5t%$SPU`F5 zyAH9SUn}&nW-~}M(psmXZdC((=wZlN!5~tgUMhcm3`s~{siuVOp z$@u^_&*ZUv?%#Rjt&#m6P=n{lI2*geF^H zG!N_imAt#~u5(LCIl5(S7yqiV`Q53f!)+W5;{LaOn*sxD$oa<~3bpoy;Z=w>qW7t4mC zV9tIzyPWYqpG02)2BB*)JXGt-C@;ohxPHHyX=Vw}ibXlOo@2uF_r*YSW4qq8_G#Q| zx9inEBKixlU(Okpa7-Fzp}%XPII*r8OI6T@))NP>U^UN)$3h8~LyxQ0B(z4X&^6o( z{5SU=4ty$IvpbhUuq4v-GYD$Na9r2#nhSw_uJi|-6{W471W?vs>Y^ky8Ex3rscT@< zlA^1IXGu4y&HkrW>d)SknfY?* zbq&}tHd1KwsMW|=W$|XvY z8nI)iT3autqW%kehRS6>xvIR0++c%Ka8}F2DpX*jL}sdiEJH(+(AFvpr77Cg?q{rS z*n!NE{Z+rn@+!0xs9({~O!ca!lT&x7>Fz0Kd1-Mvt;!s+52{U(fE}{oalpBF{bXDY zE-M^fXpQ{wkLY1>Ba1Juc8z#sYR7r*AWCRhaf^irmE};LAhxIW@_eL!FMl|T&_<+E z<`Dfs)5lgw&bT5oDNBH=ga*48o_}53oX9J>R!sK>jL4ubzTf+K9gr$m;L@+*2lXu7 zbrHsPxq|G}3{u`hQu!S6*e7Pr3;#@7l}2Y+5{YZ_1uQE3Kri4|n^uphWmlJdBzy%$GB|OR{D}0n)aPxwHvp@>cNb)s+;Qid)1;=NGtmQN zrF6`-u>on4btB#!y?s^}^Rr1uCq7H&$V#`&M=RxSZP_L*-45tytLq_38DIMQ1Ktzy zvF-L16BmTlQC9o6jF4%3BI6x{x!S^HVLma5_n+`Wi{aTD!}sWzjRLPE^-(up8m1}I zg8q^KVT9mxz4lm{nuOUxuiuOQ4*@sc#XuDFmYkj0fGyYu+RcUdnWk~!IVWKWJ^pc2 z>NoWKg-eTCfyh$+ly$N(PP2HTLA0Bl$RMXgVKB4ibEui88u^KF?d=ysnMU#Lx=ph@ zbkmwf9R9KIl`19!eROi30r`;1%foA7DN_0RQv#8KZbeO`TeqLJi91)91$9XK9fkF} z(2!w3FRZgTUWa*Xk?A^^>tkU_fwLOyTo^-jvfa5{z1#+SgdfSI=l^fo>D`i1m+URu zUNP^BL`J{Z_M5aD#g&?70=3DB()`-$6Q9ds3v0xjE{-V~nTWu=7(P_=S=IP9M+j`L zll?JeMRD;~JBP4N^0a;`C3x6Z(St` z@frC}0Z9wyMjQP-D;kC377qK62*R6!oG(LkvQ2zYP}3kt=!y;U7+HN4S3qzd4tHmY zy}9l$*_-zHwp1iIuuE`B#hVM5Eb#w0Dw{X?9&PvB`r(>r`@QUr5EG{9>(--> z?PHx~-z%^v6#e?5`ho1L)Y}T|JD2V5;4FfS&&bU>fpl3EK15FL+&swhJ3q~w)i^`+ zTea=ORD+XlZ3*J5B07xk z%vXL2mpZ&PF~l!q?KDd@);Ow5a!j}k7vJbBnASCoKShR1-Dgp?vxYvb@~Ob~P=BR- ztMMQ9)c9NA0M>pOicb@2ETl)}e31tC=rBK}d--ldjKs^96zq==HO}yOcwSW%`yXY= zc?1?Mt7=Uhy}EUIn`+m>9y|TMJ`-pcw;E4{bouqUEsi|F@~5X+r#3IjC7yuF9PZ7;b`O^yqesr&PV(U<*$l$4;1*jS)H|AQ^Xpb)Y;-jx{@}} zeafUS(-oB}M*B%o`@gyV&(qSYf*2n&=K9y^*?$jVs3jE@WAePuiZRvw=}eyf{`&aG zE~RrV%dbN$ts(tuCRs7fS$7!NI3AUsH(L5UGgK9*B#)Y_Q`-Lk9kwr>RpmRfY8go# zD!%6Rt*Z2)J$d6oAwbbA%o!GAp@b`;6KjJ0bre4Iy zvgZc6cb-@d*}hM&Ptw1OCzDD)5sR&q77F|QGnm2SxrkvZVwgXv1+qpCGq%GxV4AR-Acx$ zQGJg0nOfI9iaU)rhgk;SN}Y9GJ|+PZIqQL5KANI=o@0)E2TIk8#9;{X&3wP^tHr3>WA2V>z?#xdgPQWF zd_?zODlgsit5bNA3mD9|(?@QjyRwXaMzH?PlX?sLN{m;$(dq(NSNK+oTezVA09a7| z72|F33&X3|Qn&vAzQsN-ANY3}x700LAMMt%;rj1roXFJ}mvJbsKAzv-p1u62QC{xL!TB z<26(`TNP4j5sis_Nra>O$;nwCpoyJHR0mu2Fm=a{m=lZR`Hm8_4avp1z>Qxs9gN#G2?7@Cr*1CpKrUB3O(Jy z2dML32lb^%z-5v~GCFrPlmbd7>I+RJI1kQllUqy3&L;vL8lPS8Coor+%7p%uU!8uO(>+oRE=h;q@J zWy&Z3=j&EtZbMdBls6T5TPuNFlZLt-khRU2rKw^H4Q9oY-mSzNI5p2&4#%fWn-OER zX-8Qww$X!Hamcu@CUMmDsmWN@j~E%Pt6-Ib8QwOpTGoRgSD#q*=rv=bv|*a)ttKj_ zFnDU@EtsL^x2<0|CcHYeeVXM+`V2K*U5@)tzP^Tay3$|l_YoB+W8m@EA6oqZ_*3zR zP4U*0x<;oe+G)|s#PUtN#wnkB?nkmLPgA5T%&kEL}mC7Ym;LKHm;^!&Qlz+-V$ zDZW`N9+YWaT^wR*TZS2`>{>G8?~3kYT{L~|rF-JEVj3Ty{Huv$X7)mK(m42ZYca)M zk4y;pgz!2JE2xTXGe2^*?f(F?*+BtUADwf1c4x@U8%U%<`^6*Dx=Tq=Fjhg2rC0*T zjJ|60KiT;iVOY+qxxt8SXp8pOR!T7HD_+|BM#e;6rBt_F7azPiKGnSg=?XUBRQpcn zkyoFwJ-yb4Bw?9ICbZ+1MtlgDrxi*gqTpwW(uiosgZef(i zS0K5(i-_*ON~ImPir7AW(^{7BXuxHzYi@ZSRY@oDBIUgI;qm7m%C_;Zw0ms9eDk<< zS?aoVgZ#Hg>5BC*l`Q$2vGdsbsPe)WJM9z1K0mv+TRUy&@r-15 z`qyQq>R%EA{{XC&N_{c>tA)|NA=&9S0^$Ex|CJNU$#QyCqx zTkz`^g)rN{3jJ3p%WG3=PQHicS%xAtUo0uu+)K?h&(8>r`>PI>4bRz^41yt_YRCT7 zwcX`~$@lA2u5}1o18zM5@AR*+uZ_X-^4U3l2Zi+~A9`9Md&n%N1hPVXit$`rx@FWX z(?pirdjOTV_ZzaiR3A*&3*x^SXr2?(+Unu_#{S}U{+00=)-MMI%~CN(&}CR!l^TT? zCUE}%616-1BE$PKHMd!AdSQBfd9RqS;k{$C3G)8{ba7YpKN(wU_m8OSZ7Gs(z5p0s z_j#;I{7Ku8<9ZZ?@y+(7+zz3+TxqLY*{{XF? zCC_Ys_3L3ZzX<%^T(cbdipaXU@YMMT&4c;Z%gshE$o&e9Ohei>`4lAZ)u=zco*4cPajEp$das_2qkF#h01qdoIb z$u5;21TWxf)mHX3_MS&KBvY}MIg#T^r`%b%I%+(^%$&{@t1|+$D6F%z=U!)XE?7K*S~2$1nBp& zSn0OXUKsc;0KximUac&<6xNV8aF*!zcrBqXwlpk$qx>rBL1cLVW@zv~OxCX~#jSZ6lceN*6LPk(ACQ`?C4>$Qae9A_ z^j{F#yxtwVg7in!A^C^kK&YniQpS9_qX3UVTvnWAX)6or$8uy=8&ry+C$JUCj}*gz zS#Um;UA$9j?SY!vM74vy$mtlkAHC!0QSXoV4;9W$;u|Tt=kluG+cv5`T;u6m82b~Z zmgCDGFEyJb;~ol(Rt&o2WPa;El~VrJc{+Zgx?xT7+!Yg}X(Mh*3cdd{BHj`waLa!M1XU zr17VZWYi^$a9+z#vzZY76Wc~>`E1kyH!^wL5Pn>LceQ@`f5CV>Yo)iw4~9PpEaYSV z00>`&W}fN+{1fVO$1)sx>~`yn4AP7)Ra}Owf?^2 z_?N_t$J*muTk6a^*-80p4sN)smok9cy9&;dWn+1zL6n| zOk58$m&fT|kv-bQ43js?RCVoNJ8K%Ony#d7cVYE=YhG&kL^qDsog!?@UH^HSA%mw2XUF^%`Bw4~(HP-Hn5a|NY!3UDZl;lZc4 z+<2e}6m98JZ#2p%0#%fXv%S>|A=+_{KPsjdJu1dV&9s(}pRufMZeHhe@Ylz_Aoz3R z9T(ychpt}M`$W8k;t3<+kRx&j)k_b?ztD|-D0pAuhx`*i;|J{t;D<-k{8{1MGVkQ+ z3(sx3NO9ZCI1HzdYy9Y9`5&LV>0i>v{1Q+0sE1Pj0D^sf(4Vl@sK#AJ<4SFN!|}>S z$sY+D+NXcM%_cF@zX$Qpfn%ECsN9FTcAxejnH5en5D+ zQ|*q}D~BU#><2+y1jl(l?DVdi#5Zxxa0D&ANKzZr5DtE|#@tT6XO^PAuP3KEl%Y~e z)=3$0WTiaLwe~=uxFFyRSM=@w00fNqdvW03*uq~JYC&VU@s_D2&~M^sAqru~w=G}G z-la6LX*Vlurb(F!#;P()u-dpETvzs&@T&#h8nx8_Cw*Ht{ZTXPxi zdK+KbNB7FqM_aUupcUsXo@XxNrYT2zupthTKKcc{*%X}6Fo&spPM zKHG1{{WVYVsz2Hmk{>NEU@!gABIo(nZ}wFC82xIWbb1@#S;@e_s?9y0UMqxaZAJcN zjhTBDE05<;$rZ#&x=)mQkUyGORj)d^`%G*%E93jX zZXfvWYbego@+zGAVE+Jxb;}C6naZbQ)jzcDhszs@7=H@!im&~s8N_a&DeA!IZ`2C$ zPZM~r!(Iu}+gI_${CaEve1V(he{2f)&&S`kgTZLlp9-|`49pot^!}cxqdEL>Ufx&2 zHStnPn$x-AW*k|Dz^6KmY0&*JYxqjEeq$ z^^X(l+V%Xprn9JA&vSBfF)nr=qUROOZ4pW5T7K>S0K5s!eeNH^n?~-T`OLgskF{x2 z(EURFp#K1Cja$Pyk#kHFLp>_}&(M0%aw?d+aq z{msS6J*ApQACYWTC}i@6!~s{@iB54<+wRxY(ZE97)R!vkd{!$TRm#;J$IUx1^4R7x z{{Sq{-Twd|*0Uv$fyFvjHCksFuE@e`Ngj1s!=kWnK3!AZY35}Kj8#85eJe;uQ=X*N zhBS-~M$TKUHQ05i_ln<TbCu>haf+tp(JtfJn%WGwLS)aP#v&>C1>bp4 zURrP$i_dqxUA#CHLqrG{=8w)_bhSU^oSB^MxwAEE?J^R9g6{9p7Z$g#@D=DYEOgI zFBOe0t9W0`mgZ+;{?%>%r%c9ta-ZKE+qY-DxSH#CUp0we`{uKJ>T*lDw%4^sC!tH>hKGY+IIUAzPQ{g5cfn}W z^{p6E-)1C=H2P+;>d&2B-oh_P(>Uo=sej@cYSAKvi)(@WEIv{6%}X4tMhR8NW#Cpn zoGIxwCxy4zvA4FLS=`%MFmt;#FP&JaH_cOd)gMT+gXYCYUDq5u{-el8j$_!wxn*Vkp1eL`0G4D*@iXaWe2TjtAM;3#2sqrsK-#T_vslO?>V% z3w2}P!&g0f!yYEqd?kDRsh~~vNq@Mlx;Of-L0?#STlSpOWQsVvSzy+)E(6a8%)eX? zKT7y24MQ#ITajPOw+Kew;Pky8ysH>6ckulr?fNSNg7vpDx?tgUmWgFaAX$Ot;i?SNl<~cWX z=O3kfb}JyMOz!pvzb>f>Bkq2cd36}}@D{u|d_bTna--8}t%!B_-8QS&m-SY+GnN5M zR(m8`n&a+@(Ti4Z?h5gUwX=`CS0eEbJUwGq4j;fPs26pdABEf2ADKaDWQ z5Q|xakUxnAPw8HdZSh0GcR%v;<$t~M{{TAt{{T-C+76NdQ@4m9$M0NM+-3ZGji9X6 z&w|JJONFZM9S_id_)k0}pT0VrSoGextPMxv?}BX9pSCoExL!};fnNqm;td}kGyYXV zO?yToD<&~r)x1T;PCUG}IB0k^K|gv&)>_}k>rIaygKTmC06iauA3$ruHQ$Jqx25hi z2;N3Lx&Bq;mtG~)KI%*>3V#u3+5POozAG}~JXS68Hhz zqO)#1TWrgOgaP!gC%^HJhVlEkKPu#|d~2hD#D=_&s#QMk55w_2rMyo9=PIK zTW+K0PnXzNh0EiO6qb{IhDT-YL>#UC9#> z>?^{i@rIxO04!WqO#VBvhCedqP?-J|t)Z6G*niouhUoVjj}z%k#}T*FIIRBw_(`mf z7WrIt1lP>kuf`oF*+{aCv-+CztIvzxCy|M=*$VrPwe3*hsm%w~sjX~%v;DI)rx7Xq zD=PEE7Iwe!`*M+gzQBJv_$L1V;#Z4r0gB@Isvf&|t}|5DE-zO%R}wk@0D8T8933(8 z(>|Q|f$@jTfay96?4Rw%az8O&J6`INY6H#JE*j;}-6wI!*A>Z2s*GZ??loBU{LO+p zSJhy6W{p0F7uIQ;mp3T6Kot6rarCTtbtsVL+<92)eQTVbR73Y$HIZ?ue1^V~j!_L-k5N_MSY{he=0ETn;H1?m5AA`|73@*UR>1cXpcgtE1m7 z=8ttHt;bc^epbh4#})Ng!_V42*8D-E`1Ua!+4)4MK2AMPO87cmW9K5G{?vv-JjKZ! zF~%#CAHz_sO~aDRsAlOsPuIJBZY$iql+j#3f4PJFsUyFHOS59<(!VAwd~vU6R|_VS zeJ#5$-O0-SRqVQN?MdTpITF`avx@Z;ef8n1;;~*F;vTfmUk{wqKSWmY-TvY!`eLLy zkclv%KpxfcE}#2UX!FOvRMKu@Bd{W&x&5g0r;zy?HOj-;_l*8k=B^RUKalJG*TbJ^ zPpX`DA4RI4+aUoN+`B;j+@67=vqI;pAKtqX4?4BO74;Zn;V~GC9B+;Fc3W|$TOXUg zE6D~iT6&G__g7}>PW6`f34vc%Lk;ZD&$Bw--dw}FQy7i3NzhbP-xs0gN>>UfqNvJy z1r$+1kmc)ApN2CXU5 zmlmaohz^23;U9p-ajbyLpY}~)_+!K}>UvUIUAFmQTod%^`c|!}d1M*vaz2Bte3lN6 zDwQbh6Z9|bM_tOa{?YUo?495#ekgv@{s8cdY7BB}w)b}#8DF#6%Ck$jdTc|VO8&St zoo2@SR=ScdaONoX^zzgMQaykb{P2IlCcGbSees{fpB8Up-EHBGFUqz(yxYu%ANTFn zzi+>{kioe|$7s$g`GdxMTg2m`hU~wu(Eh3LCo5F4dDlmweX)INHoSoRu2_9KSDya> zZ_{61;=dRAhmredw>!3)(EY|gjYMUEzEwFsg1q$W$wYbSYhidsdTh}!`R^{&eLw7CYjmx_nss8|UcCXNKUI6B_sx1%B^WHs;7`W5; zkEi}9e%0E)i=vxA_d z@QT2nP%%l?ze&pYdkaA%k@-$%#W<`+w=c@D_{xx?E z`R1aM>a*JG=IdWa{hx*Z0G~&J{j-Vx0H24TTl`A+i(?3a*Id*hTw|M@kHelTI`iTe z#0$90cN*RNs&SPw?FSzD?OZhXeL1SFb(O_*Lny+ve`Zhi?j>pMPh8TzG<;FeBwy@b z5M8bZ-Ie^sAD(Nrf7*lNT-GMdwJ4A`8}<#u^skxx>U5WVs%Md5WYYFxd`F6czqC57 zbHsY@i7t+(;)}=BXX+kOO7XzLbQtNW2LOEqQa3x3ZB%E)dQ`A#kM5%j zBgMq#F1q%Toyn$P0ozr4#;VUZG|xJ^Rvh#FxhI&^#*pHxdDRQe^H)jD^?@akxTu4i z;Lz{C6$>4!n(U?UoK>s)!c0bLq@-e*B$XV!D?^rZxeF1tsgcc3QNL=aZb1uu+J>fY zyHM7^6$^dpnxSvKT?P!TIO&S2TW?Cb{i`*;^#&5$V*veX3b9q|qJfOaTklnA$4ph0 z`_&8fu7fhB4JAz9cA=mT|Iz+g>EY3O)h{`{sle{!*XPzhQs-uZN>AN2AZ^DIay=_K z9H&Z=W@2&0V>PLyUWTujnA_XZuSFpID+76}(8zO&%4XEfLm~sm6xMy%$7-rhY4Ow@ z6t!lEViSs%9+gdGC*`S@LW2T>v1ze&iEbO3l{i2L6;pYq378CVT*m1ov7-|Koz*ar z2TIP4P2YN?$fkj!_nbXyZ!}hAO}T41J8WwNX)KGkwLi>J(y1pZF;1xFW`)$0ZmGP{ zS)k&bywriU=MP$yWK3qVH=3Co?WJUN+}Ms#YEa`mRzz=C5rFxHV@%!}C|4MRt9wRx8H#uM)lz`yTEe9FIm@^kedxx|ic4 zCnQ$`1<=|Kaat>O1_`ekt_M@vrHze`Ot;js04#o$&|2zAit>wV(99F{t?PTuyaQfV zBLwbxxNLGuL(-t-CE_%6<9T-lDvkJLBD}P7A_Y1lNy<%H!DfC}v(~z1aBc zTGPh+TZV3*D>g-XuZI3Fq{U&?N0j}WwS0t^yW6fd*6qFS*#2XY?_W2J;iymEvFg#x ze9yXV{C}jwD3Py01a!w0V$0U-7 zhj}$U8g&y^KEsbtkw-3S^je&Q_^ak9e0iwLjLDy4(zRmoo}rGb`qznHDrH#uZr@Uc zdcAAf>S$l(Y>N4!-^9_&!@fS1(OPSkYBT4N>}!YnDVqIO2hpQcDsZG4x|)!0zCac8 zRQ@fF-4B&EiHSG zTXNs>(AGu&0EzCV=1Ex9Q^{%>{{UrEKBd3$rkiM4WN=9I>0CY6j_u}ivG^YK^L5_1 zU3U{%vg@j-266PST9!#W8!9xQwmyft@q8sojQrJsH;Q*3n1fykHoP!)H#J4I$y{>P z=}^dDVD_;+#(xo`4gjnxjc=0K7358;?dtW1sB3bzaP{cW%6x@e85ECIzVQTHbmF<| zj~57mz*CQE@vEIycW?ETpZ%iDb~9JEhRO8U^D5)?J#t?aNiQgN{SGTC<6B2RInUO- zzDwM4uTV*-d)KJ4ZL~O}k6ljwZB{4i$gGQ$qn<$-tVyqnp0(@Y^0^*me9|XRbE?Xtt#kJpfnrOZ zYo4C<*nR5HdyY9Z>(I;37ZHv}LH6|@bmFK_sBT;WYXPo3>YSGx^{-xr5iKlm&mOF5 z$$Cc{s*>MrJetXt6KRU}_e;QG(UYHP@HI)j2Q8;qk|% zcS8W|dDXK7b^icwjPgw&y@)p8d9FEb)SP0P*MUv|tNN6Tf3#O~zH8{*?s1b)O@4mz zsQT9z_SIAS!&N5Kqd#%D6(8AhANIoTdIXxO^1{q;J5(|1@LM;T8w4Y`#d*YQD9sb2X0X_vqa|E;a zoBLzpCaxDgV_Rxh5y_+;zjQ};BCbdLv*V?97`|_D4kVF-Q8*iMicRK_u{>9_eA!x(p2`=cGPaJ#EnUKjD5^3FD58M{7{^K| zpaP01x%sxx0zVkVPM~e5-3HKVG?k0_XBeObPrr(}3v7@_)Kx?Bwocj09%0EisHE$JbIPz1mo7 zM#%DO;%=_XF8Ofhs`U1%Q!rze=An{eDyvDmtzXp+4{k*(HagW{{Bub(++wO02D`aY z&Fx}KJd4Fv^N|K>Dahj$264@48hRY_s;*oNBpq>8%&K@b5g_-+(yQcF^r2&xypzzd z(x^BU3(Pp?tFarZGql$2+~jE`p)qA6ie*QIUPUJJQ_75lmx0AV9S2%gkg&n12wd}8 z`vzfglfO-zRaa)))~1vJj8%5~wbM;mSi)P7o_f>-lh&uI^>#6t6BC++$>NqqA@Na@ znpPs>WYSSZFt}7L_o`}zzV&n*_aT1OpZs;8{i>;rN6kQnV0u+4cwQ>?R+$Fc=%8mZ zg%}ks!B1F}*Fl+4fZNic!1Sw3qwdsu1pt5l(*9L9ij^f_yHxitVk$(&YxB#q^$vKK zs%4Cio6@Pginzh4#>DkSr$)IC*7!ORU>T$ z^O6N)_j^^C^U%~0kya3MSj%&0PeNL|7h0wjk3m;rQ=XN}U!l`QmI4y>Z*I^k6$dq7 z@&+rGTN@W*AY!y4af;4RLeKSc9&4G?QP}U4`E6rO1ChF|DIoDw7D52U zIb>|&xvElDx!Fdo^k&lTVTG#^?n=vv$i^zqnAOE9F=uQi6|u7v+|`iezH1UGD&k5r zis9vbk4hC1*yyd~+w`p^m*v9su5u}H-me+9$B|sMD7Vn-r(IuDeiuHqqZP(Ya&cUo zb*ph*j)2!4TrHQnk5+~uEsoMTT=QCRPVw`>&2bT575Q`0uO<282a571hn3G>o-HVy z>{n+W;aV|XWLE_R>D)Q3Xzs_YdG+wD^l{JZx4z`YaHyYob|2PE(@kBh5JuR*|xgiSBr?l%GNzvSe1Jn<*b7kHPTw%FgUM2 zwYuX5x*Kgn1Y}o%h{C~`Y3;@0@KHuSCJy>16;@hf2)(Dta~ zEVMi5F4=G^PAk%D&!WDNW74#uzHDcuc^Hfrk>5)gndos}Ogdmyh14hBcVO2G7Nn!A z3P#m&A;Svc{?C_Vqw5emvwLu-JXD8Kd~ii@FRUg9&Ob3#pIn0*OpEl$tz(xvY*NHP z^b@Mcy-L+9t#9Qmk%3-!F0}`(b9#oq5Ia=yT{wKPx2id@IMo&IowlLkqs(#ku6zCx zsSZP$^V_{uT%CuC#FF|}ub`!sK|^E0shBkGdZ6(@^8n3TUlJA>5-oUT#ngpQH4JyH zj!kz^%U5GB?MWWk`m?`7S<`9^Ny}H2{{U`IJDRr+xUwKe>rG`=*%;3h5^tY5UbTB9Yw;=MRwY>zh+jOc91a`?|`p)J#T)^l8qxT+G|Ft0|1 z4K_S1Mk(CWmf+&4J;xQ5Ez=J5AD@`=NUrGNt+9l$4enhx9CC44?{Yfkp)(_O=CkIO zJbbm&OAo2ZUl5WkNp=THtt|Xvv!s{Ovt_(cKJc$rICs$Ur;D+pEaj?%mp)L=;aC?s zugqgk)sZdM;q#xRdXzBk+MY%u8gz1NYNui!nyS97a@EPPQ2T``xW#pSQ5;`fXF>g= zBY&*@Dny(go0{kMzok4*RAAPg#N+zxj)F_Xj18izO{fnkhC5=hnpSR<-M(-uA;N*U z{S;>(k4kZaQ;O&*;@yo)4L?`$R->o*KSqV-yw@(^vA0DT%Shhb{m^<2)#ShMR{sDQ z=^h~cxja+g?+!;0(f%E2I$wfxvk4nM$E2f5Hsg$cr7Hgbz&)$&Z`ithGsEAsBYbu7 z6mKqt@PEU)$B(Xu@RzZHQrZR{uAgbz4|U_Eehv8J#CoQ?t!jQM*5qmLb(@)W2}|<% zxsz~1o^p$Y$6zbycuxrDj+JBk&?rp9jo@dJ{d-CZ+3nci^M8WQ2R{a zd_#I~u67|lT5x^-mB~u7EJZlSYT3Tq42BOw*VeM<~0WoN+)F z&(6Gd6;o+f&037E-aRT72Q&dRT#jl$$~sg~vh>Xq0l8x_F_`}VgpZoOw*82#?KOXl zaZ9O?SHt$xYB0unNh)oLR1f!LZ_2(nf>nreMS4fVKO9_qHu!~M;`?L!xLahyI0YXH zbL=|)6~UUp&krag+Q@MV7^up7AGnt~QYrA_BS5mcS@xzIp;N%_L^&4 z5r zb5%wxrv{HPsxsL;*1pCkKea+L`qWZ5*~Kf7#X`hwts<8*G}j$5SICT8vLd3F z2hxlmN;wgqI55~bs^sL=nR#9+tx*G6bJC(@J8F7secAx2u+$PEVS!gF3?EA9pl33J zF4MHqsS8&OIpm6GmV&wr%<_bqO@^+zAWkZ=Dmu^y|I_|YVe3|W&Ae3*Na;pRd|$Fh z=oe+fE=Uz+CE~MFp0zr4#bYXZo5i~u5y^^|&a8k)=~n#JtY=KC6Baahh8omA^4iQX&T7OrqoL_o@-qFpG-_3ka^|CoNW0>#gBkf$ z;CLiM-yOs{C>oz;8tJz7}X9H)>r%xTQBQ(fZT=&O-jsI~CVivK-V0QaiVST)Iv((w^5i91K;g3Pk;V zo096X)^xX?V65YFp0!R{!Cj(8{hn((|#!MJ%}jv))pp8LDkNe|x2Du=Y4* zU9#M=@l|~Kp!4CfdR1vIM`Mcgu=JI-IcHg-+_z%0p5{&4mAIsqc=1`X&lv4qj4-j6 zL(85e#JMJ~a~T^BYUCQ*alfr{Q%*mFiqC6@2ZM_2h6>2ymNfKlw|TWg&aPVC`ILOj zYWLe(rg$#Ev6>qXbAgJa*BiL3=Db||)m}@a;3w%@Ll4m8o+8?p8f~XFN=v-dCUAO+ zsU*>pU9_n3oHZ+4l38jGH{_&!i}#IZ&2t)^xvxSLZgNW$kuzLv;;6}ZqpwQOxxJ1x z&U#lfaj3~Ab_|N{r-Zhrk%`9L=xknJANglo^w)wxOlg4`Ir>!@cpO)&hN79})vUp$ z!%}YzRjGP(uSs&HvBNv}C#utRr)1I|deEf18DGq?=Tt2>%}uCV?^jQ;M=iyY=0(Ss zP>Z?p4^BlkLN_XU*FCO3h-A^`)4V$hNvdAFc2)?(@7-NG!2xaHFf}-Tf_P_gcs2yj_{bQjBA)mY>(5$m-gJajDKhcb7+kvrKE@Zv$Sji^d`JB_POzA{3L!b&?AyF44Pku?Ce@He&)xS z@I8QVG3{STOC5Y%DBo7E=zNwJE9J@Sx4lSorT%DA!EGrtT_(eag9iw7L)~ zm$iM)A7k=b@80Gs(M3eW=C?f7Ir>y7_i5QQojO+QU<$8RDKI@LnaqNuj+)(R(a>f| zMHEm0On#L5g}UN^8wYxv^r#0NIH##O#Q;K|>nhzQ(dkP-5kFejv5jSf#@>~eAmCP} zo#m0S*s1M`Id7oSe`NhS{g-@qCyTxs4+`oN?$rD}Azry@V3(CA{mt74`?d5NPaIa{ ze-(aD{2uY#{{RfWAz139?X;c6z1KJfDNfQ`{mkwjgjed-#D6nKfLn61=A!eAna3Ea+p#A# zq+?}clB}J@l3I>$yG@aoJk=8JdYaY<=BrY}-S9E#RLgES^rVtN8wRULeyZno_45YP)^fe$2Ho)ePTu zlP0T@PgN! zE=?tAOqoc9zV$Yo-+G%`8MG8pMFe{?y+Yr5rl?!*P-Zro-*%)0SMO5qjLaa+@;%4a4{@?c}qv*nuy6s*LqYL#e1b|q=PTF;hTRfhYu0aZOk1oNZ+ z)c#DjnvqULK~u&Ui~(OBef`kjSbfq5)~rX=WQt@?iIFO1oE&ZhCIw?H&7#$d6sQYz zsM17pv>a8WkSlE}MP(#y28kDPji6MqhHkYWXwMa9H2`E{vYd1`r>n9WkcIiZDq7UG zvinqjBdYyTA4T2%?I3Alh%|^)d(?YKi`l-! zg5WPfOZJj@0-{;BjHW4#IUEsNMJBE_HysN;bJS9P@wb{(8;4LSb~RTdN9~xq+;9y! zNp7d5H_J6VP_fTyiBpZV4{XKW*u_|cowZ+P4mhc!I|dD9?9h8wT8e22#c4%5jB{C$ zL|5jiRGr*cH?l`{3g=G+_h;=|abCti2a4pBH({TpTmeda-z{?0!)tVRLchFru%gS# zkynPp(zv^4kp?0p*5$R}2ls%e^*F_O_=sA^uSTk9=%bE3qb8*QC)}!jY6K0tgH|HL zf`4 z3Tqfl`5@!kw&j7>&mz=!b4k?nt5M!er?pmN2?NR)sLpZ4V=8Lqk*v};BEFZ}t*)gi z^xItIEL)0~b9>ekq>ZU!t)bC>Z0+HcXWE{c(svlHZFiM-#|i6Etbv>^RDF1;`nB{o zc(%7X!>eR`oobuhyq#L9#w2){qUUdMNU==g(xO;ct;$%&%TdT!;MRmvxKN-BRaCVx zpp}?%yRpwt%CxQEbAh*vXFaQv-8YP+_C%=Wuf~Als~E>cBZ|2Yz|W;&J6}W4!@^e8 z78LF2SE1{Q&rh7-Rrvl^#bqSAo`h#+XeR?AuE9Gda0OBufX35PLzZp0BD}hgif5$? zx*G{2HY((37-ZILOahkdwWV&#SBl}S2=qI!^;xSK!|fHR8O8-p+YfmTvaoGSFJSY*%2 zD?%b8U9LubtAbc%)byc?w#LA^k~Z?e;;j9s#e=&!tVr_58Ro8inTX2~US#mmO!a8v z9gTB*&mPfMZRI$@HJ1gp;qsAFGCVF9uhN&WiPEuBdmFJ_OBY7XSQjlTWRX;?E#V69 zagr0RKc!lDteSJ=0UdccKdW#Go(Z2J!u5bA=)YemZvz$#YVF8h?I}5KVUDc z#I*7V_;XASfaf(?*MBlp46?664QWdbCtmv(C7&#MRbs=fRz7BVrZ(U!SBB7^YhjYG z^{Q;D{Y?^=KGjgZ-U+VC6c0NQia{%kR#fhCYDbt6#Z)J8D_))+l04e=8BOJ3=};CY ztvYNW0M%z~t_6BgrxVMSR!hI$lkFb$MVO~rr{=wF8Zqi~)Wl2dni5_rz6~^9f<9bU zJgQik2&(zV0AjjnVPkWOwla3Hvig~fWwDBiO->TIu6dn{Vw_1Uj0)=Z6wW`aOwp56 zGX3s8m6LIBr0`8w{r9ysR^&Ezf<3C}P&jMXnhT*2x7?eU_xAlMG`&U}c*|iK$MCSK zlErts=lMbC1y>Qoofi%3UW$ZPhX*{~rMWM^dSwW__Qw^T%p`~9AUOX3zJCfvhUP^AD(zAT|_T=XRsY`Ifv96C`bNyi7ONbk(r;!VC39f!AP&U>CVx8tB zjC{8kt$my9bA4eOQ!F9}en~~#KMM8#0NIM9uXOlB;`@(--v|CMe$-acFWUSsqYn;g0r=gh>bp$wf8Z40 z@!TIu=fqN$kZXOvBdY;fTBPCjPso3X-a51K{{Y1w7yL@HndXmO@dmT0!#sy1$8&cl z*>RuY7UD1mU!U{K)((_58Mi_Hm&vcJBU*q4;)ZkL1Mrc=of?J{f+< zo;~q)v^=!W zr}$!33ZQ#%Pvc*)U$h^`+btXR1@Mo;}zs%l%`VU|+iuv9H%j;Lg;`2O0w4AwOpYpA&eNTya`dBQN0hCdCTbcg= ziS#h;326~cR?o^$p~XH~l*+j6P=YoU^)>p{7}+0zRhoMYrj!PqO)Li$(a6pry9RMX zpiYzk7(wU0Z(NF5Toa1b3glE#MKUJFlm?V2UX-*@W)xBrflb~h0rb*<(r*;tawq}2 z1t>r%Nef6A9w-6SQAkb>+5mGXC@O0w8}LU;XkEi-rox3u2iki+-?6PocetP`mVSo$6s$GSCdBksut!No(ok&FtGSrjHn z$6`Gyyo5%(kLh0mntCJkSypRCG2}6Q&^fBjD|M-4`M||M$adB8F*$c2XD!yDk%|r~ zeTJz?BCx=#g0at9NR7}x?+sO}fH6z~Rm@I0j8?GR<&)5bhs#j(s|r_k4O6*q?uyvR zWP@PhtCO0x%jPc_6;+XxXBD!Ndl>Suw1i}0jErTzTzbPmJyS72$Yrpc97^hP=iqMgtbZi|e22i~!VsdGWx#>YNS`ar%uDrG?+abkNKf7*d zGv#Oh)&5KBSZ*Xk<)uYWMB04H`19@w1g`8$_N@qS0`L$TsU&+N&#gxZ+s$Jw&7rY; zNbdRAqZMjfq?NcBtlv7U`4uMA%S#^PxE?Cn?mz%gq1 zP3!;d)GIxJ3g>&+}y)+8bpz#eqZ-{=BRz0i;Stq(x5iCPZP?LDul%} z%n~(eHyn2zDLC6fb6-+eZG6WjM%bgd;8me(At6<{Jq0`^jlcwM^1q6@YrAg&{k90a zJ997Dd$W#f8k4cLDqP1nwo*iYdN~zkjC?6xm45!$L55~cF5#w#bYs`w^{TcpMKdz) z*^gSw+nfDa5j;?Y$_{%}0y#S={HtPZFsbtzL9}{Qky=6j06i%7;q_vW1yi0WVv;pG z3gp%2vxW!HnY-Gwbn6%|t{cs`SODlNm+J<&dIdOSgp2@J6>i?i z%=1Vs?h(>1x$jZiTHLcZjlzM|dIOL1#VOa&^dUo0L_r&6$L`4ND@MjG1I%HlJNYB- zirCRSA*t%tml~zK(LrpVc_lx*KT552q}W^Ev0I?Y+lumPROUO?K3fo=wRHjlcJ|Mu zXv-SJBl6GQtmcWfj2*_bAzO&RW=+@}dsatc_)9w*$u5T^Qo;6_2k!a$RgFAqH;E!A zmItBYsKse>5pgPy_z9rR8xg!o2rm{F`^CeE>QylK9zP0+hiVQ4sd^7C^S?P)P~N=X;1D^ zi1rnsh{49%nHD6D6t;FoYG$Q(Y4)=g#!f3aJyE?XaE9j>;|~)-;cYwZGYqV!fIlkm zEl0+8dWNC77{;W0pjTt@sKNb@Zx}6&-`2ca$dAnqhX-h{rNL$V%ICx9n97l>Xr9aA zZyw+2lKFQmT?xSkzLJ3+;y7-EWGsujrF>DRTW+zIX?}9J;8)x}9KErhLa})vKP&^i zcv-zKX3wkP+Nx3g;iV3n9ly(M?#ExTw=2P5srwu-M$QwxfD%?$PB7K_$S-N9^TH{Qf z-bIo`+`n3rx`ho4*rJvZy5o^rJA@I55l$PmI@;O@*rnd>zr$BzvA8O5zbf=5xNFxs z@cWO8Zq+#(oA`c}r*6J^5k<*80QRdk_V=1habMjOB3y*P&q|p!YsTLUQGv6ZfsVE3 zIbT!Kj>J&KvQ4aTeGOQ)j#)UWWsZEq_lh&>q%z`B9=T< z_Zmz#{{VJHkXD7faM{SNHp}ffaovY)w5}|of>I@2x#$AM-mMk}6mZ0v0sH&-xxv&TJpQ@o~x;PZ;~8!acmun#*@cDgb1fN_ez zw9;S_hZ}I;-ZgaVmC4F-#QCJU4n3;7mOMsl(38V9a$Zc6+$1900u&w_BQ=o*mmZ-z zNfhzIiP-Ur?XSkjGwHeP~DwLKk+1LrkrFsqAdKHr4n6#sSQ>UeJ*9mO{ zZLnjO^{&bl^gP=1H92z-9eAqGA30y*>F-^OwWKkKZSB0-6R$N|Tk9M>$gn8icO&?C zuG~FyhY_*ovLv#<$hpU*K=HhPd}6wrtHzEswYCg)9MI!>edNRJJ zlUV0wWzSluQHN2A-jaEN;w7rjs$5Ps$h$w_HQ7Fh;(?CdX51;NMPwN-&{o`s19mqL zTBr7yiSp%-BOR-<8yppPFw_=1&}Dr`N|OhsD^^LZ(%u;DW80E?^r}fLyZ-=&nUP9m z3DUF7kNy3=)uDUTRVVW{@r+k~A9I@#v0bhTE=k{6@?I(A4wZ4d-G}bhdv7X4m;$@q z8=h3^S=hZEzd9g|6K*@6D-vtg11u@`5yP_ut2Rku`%n5m-2NTa)rY6ba@U>7U6>y+ z*BPZZxJ+z6l`>gn-Ts-3`*fddy3Dyo>InAyIa2Am381`AH8bFxZ5JS#N7z4gMpUk zyOs-6bvfR#srgr^WA}La@m)XduWzb+9rz*e{{Z%2xNVpB7VYD|6!EmleW;C^S~t9Q za!DCe&+b>!y<_%N_-&zlQT?Gb3ojL~m*I~LZ}CTnuL&z0I)0J^fXAoJbk^jlAQ;vC zEAvbCllbqie$rnTd`t1$#F3j@pBL&csf#`PqqDb{Ep2NTq;SS?2S7=um*M`kic$VS zW) z57NJ4!%>W$@Sv0p0P zPVFl+n|BkRm#5)fuZ)r?64u5ax`+?w&3VRuhtSullVPDwGqK}wPGdY%gkRYI0EQmH zden4LUXnggDi-_IHA3Hdw*oWNQ=jkVifet^3}?9Rlu-Ww>ctci6mTf0-?acM5w@bD ziU2BP`?U>BpLT#6{i&(BS zU!+=`Ogx=IL#h4{G1oqnsyD9F&~OjZymR1Rij%}&3al1$zD+Ov8cg=b1Nm2|{iauK zn7|zew`%;>rVrV(^^6^VZxu3G3r5>VN`<6-wP3`|2I_B-S*CANS7kFR4zz(IJt^`< z+M?U#(=eFru?`E7?NLe)0kyMAbV`Np}CPyw<_w2<${3|->k|kaYY<8#=(xo$cJo@)+{RmkjE(nDkHWj}gdY?2 zpM-upwDHb{fM3A`kfCmXOuJh@g>XJ~P)*-xVI^lO9P5-(uk78$zq*r9)b8~Ouw=Qo zmS)HIq;kjk)h1ZW4dbO>{{TnwKXv!TP;71(>s#TY?4j(@mMUJ>YNue8A|EmgR5=2! z?B5>BrjhS|FVd*YuiHJUKjPimuS*us-H!EB$aOnUO6URd6lKUGiq3L(;B>35BSH{& zs&fx6K%TT1%Ws;*oBXi)hrfQ4gg)Ms9MW)~NeMWM6bj@vP`hUZf@V)+_bs>`dKh;Lf{{Wzh z^EoZr78cGIYV*ZWnikLQs}+Q(chJpr_z5N`2kz>b$zEyHSD<+;pg|o*DXcOuD$M<= zzIO9nl)e)QZa_gDYOY0S?ei)X2XEc22`d0kN>Z%9cw(NZmdI;^dCdS;Z@XFZ zed^NNyNatUR003i{!HLa%S-#P`xx}?Rux(%^4FiBsfDKLGQz$l(LVRtR3LUz2GfyN z7Bnl2epEaI{a_noPrdysLdx117kkVU9uxt|tldQOa}F0j zC~DZUwzLxL)eNes_sWmu#c4wGqudIs0-T_T=|p8`#@0NciZVh-bp!rvG8&+ocjS@qv4+id^o@G71xRU zN#bFo>Xw&LMFyDBZjxBr8=E1A;qumU#qwzrN_c*K&pL((t|k!OHs5;Pn?i!l5hc9p z=z0Uzm~1pre-swCZFO$XwWnChfIgY2wGAf5+FO>>ZiUg!_)wcTcl>vE@lJqGOOfs;!)s=<6NC+XHpa^s8Ys@3rr@m*rl&e)r+q^rSNV zlf!ER$!NktryC0K`C_MQdjWS1xn>~$0CfKVg;1XA&v2VaE+QN-Y-jMm6?Q2tq;Sf1 zx(+c})JBxpUf5gPPGYuJ*g6x|tXM-~=PcT|Z?h-f$6st!H!n0ni?^HHagT1*a?apF zr5G6j``fX_VI-_|QJtAA>!r^lohf8nW7B_M!lr1f;e-%|k**<t zFuRumv9Z_fh?WBY!75E#w$s{hE;hU19I7bC;AvrkIV9g9RUY-Gu`GXjPCXFdRu(G^ zYd3ry zoD9~z?wfmP&24N<%Ja zULC*JSXt@utIOMSV--VD(yuhji7al}R!F|`iY>fAns&>GkIcQsK?B_5=nZeY5Uo# zt9z>#$rib#(D2WVMIg~F4^{Jk$Q9y}G)*F&N$rvJuXOmstlD1qde={b1PDOpa;ESC~Zwp(aM$Cd0`BePBFG})wk$b1P!nM+4r-C!OFXHS5%l1x<5xJ@?romeC;=ybXgJzuv3Gq*h=!4bD3crF>ELv=*ht zyS;#2W5^A!MN@~5iFZ-%yf#yYJ&+BTX^p+Md{`*f~`4JP*5)ESaL-hJJ^-;HtA zno>O~RPJJle9!_7t?D?fVQFNu0ME6(2LiBsy+v7@$>Z9##f*j}nUMLO=DBLPz0Xpd zB_D*{L~9Z*ZQpB=kC%=qhQV2uR8`BKdsb>Rh~#1SCN~Bd6}@j|scK4(6|*`HK>+ry zT8a@@N2vuVD;8f%F@ps7!hladm2Q2RV$y!u6Nf9&OI9uPvOqr7du{}eh1-G#2jf|> ziC$1G;|F&^pO&~Joc21Y%h{u?S;fZKxa)&&O9DhpXuR}wog=A~CrEx4P8*D1PfwO2IrCheVl0-32AdMJ& z#1Bf%zmoJ_CX=WUx*>tJayj~9n+z9;CeJ0KRr}1t9D7$hB??R4J7Fm|qB`3@4!)^q z*LJhV^7d>kPxeUC-*Yk?bQrE{bS7aLjFk3le=2;JQJDrf_QiR1V;MW9dNHXet&W>a z`(5r{+Uv@aA4;-yd+E|^NgPb=fLVv#$E9-PQSyrYpX4L1(blvU;bl`Jj2~4Mm$Hh~ zrCv`<8p&$*s4bb)v;LK^wzj@rZDk&9a!qtfhS`w4^Uvj4Xz>XA*dlIv8sV?G--f-7 zm?vl^*pe_F#u{3LgS##TpC81Wp^ABUB=t_$UF+uy3@4S&`oV{7_8Ug*y^a||>!lRUX)l{%h7BiwnuSs(#RBrh6<^8n3# z1#9pZU%x`|vb>(&)#v)x!apBdU44ojE?K*7UI4Czn&GAH#L9VO(z-n64L0LbVQn0M zgOUbtII42!xA4H%8&fOWBE1>CI+pmeVAv17%3*UaS&M$nq*z zyiI9DC-{=0n$8G=EVC?t{m?soE0Eb2nZ9@182%sPtJgr=By*VXsxUEK(>N6F&c{QG zK-4wsD_urLFuQ>wsO3d7#+q-2^nG*6(5GvNOOO{h+6F6yj^^Ul;7x8!Zl`w`=j&NF zm$zPHNiuFO+q2%WrG|XhD-nub&c}Baj~=OZ@#$GSeA}M^ex8+qs>3yl7%qk}{nNU+ zpS6;%<9vJY7$L@LIj`Ob^SOpP_x^R?><=s3mnOTNhwthOcqE*I4iGbZ%z6FTAA26h zudu2z>P)}9k#ia8NhA%e=`-lfF|=#MF?q4@(Zs_Ku$+L{&tqNHqwew?(UTK+FY$;sr+fRnE6wDg*-72%l59tp}trB-ZC=2#=js$%PeVnsA52(AscmF zym9EfabETNN_>3rU&arJT7~zC#3^f|*xGn|O|gkQxNP*jJUc^c5{`VW^T_`IF()S( z+5jId$E!8vf_(dmSKNkC)>-?p1F>Jq-(q!bD^}tHeA}bN2ma3n)3= zFzM-D$*1Z1hl_kwr+5>?>Kf}+m_-S0i-pT>$LU^gtoT@XN8)AHgL7~$f3s&~W&;4> zc=h^M*He^fT=*I^Cn)nihauyf4M7Zj6kvWFR|vaW4C9*YJW(u+i!-j)Eax7@jz3D` zSL98DrF%Jku(|PhtGx6f1kz;F%78ei>#-g{xipkfTYynT6d6oCsG^Dp3NR?5fEd7S zsH$fBwG99i^y`XcC^(=Cn^$j6)ls-^DrJz7ieo72Ko5QpVv^sK=A$l1+fRV)`9%OA zBuU2=YpCX+C<9Z&k;MQ`oE#37qZ2V)9+i@gcUAaY(P5`!>o3`7#0w6;sQ5|G?ypsD zkRF?t830$>a6t@`OP$?G2h%*`@abQco(Az7ct^&XM~KD|ZSIqQ_k4-ISI^KMEA@K% z&10HTZ!XkuvM2kl125=Bd_NH3oE^{8ct;YaO2$XrBM0VQfxFQQl#rCellOIPX%43Px08C)TDh#8+}FWBmJ64vKgRbB;4wDBnYpTL>b|1W4*BAhw3) zlH&<)E1@iq>{i>FN_AJJJF^VF~I9k%4+R_Cu+!k4f-Nzu-IF{L)WKwd! zcw_lc9>s&m@kDT-)o8b8GILU4N|>X6&ZVB;%Ze6`RV^n`iqe^!>DY)Wd8D8MtH_z@ zQ#bCWF@g>|kEJXC%>GbZvFVPLp`D7+ESTWphRkO)0NZ`IsJu&c;rq>MNAMk_(AX*48q3SE9dbYef$c@i z=Sn{jfh#{~$K_P#4o>GAN408OAGSUuK3cC_)}EY=wWDzYh#9R3DtY@Q-!cd7)}#^?Z#F1__9m|zy%q?DApPjz z=cRL!OAF3j6sY#9Q)9WaaHmn3wK;@#`5Z)m1^k6jj@l?K&z|{n(whaP?fFNB79jrs zo+@ALO=ejVD6J-x=l)ry?0s=s`*daON+qG%6M{#rRvQcL#cN*Zny#C46I8cre=vNz zhTP-Z+OmAO;kYJ4@~@>*mtq`kpxH*#h2p92ewnLBbnpAT0q<1QF+B95;vT3}`OK=o z`kF`bFmu5)&oF58f7$G^nhK_Gv)QIOCf9Dwc6SwFCM>w+^qfBzO*! z;C)wLNu-PAjvxucFMnK-`c->x2{t_4N91E9{>P=wAvF*X=iYCB@bU z%x&D282>zm0RrVKlO^R~7Yi zXwZy0XMg}wriu)FZKHkDt#Tqky+gHBPIt<)rY)qPnDT- zL18qD<<5)(^iX*Gz^e8TNY{Q3Sb|e=t89b=s+Naj9?1iHk3tHNTfjn#Bw?^?slXqgS zyO+;}qspFyWOS}_IRTlY3P=>W5xS<$sid|xN#q}xe}9kVnz3;W%oefVZNzePE7OiE z6x+hehA_y-qRGWyn#WSR0vGw?9T87Y!nyeo+Xu_B+1lxY>``h)cpPq3D!(%*ZsZ(v z_OEX77lXb8cr(M%-)TNF{@vBC6&_m=AIlLq`F8>G;=XRN@b0M4ypY&Ru|zWN1AM-p zN?8TA7txesdaTwe`5W#VPhd{3yShV8Nc z03QvIr}4#m?vL<$;n#`$F7tR(;zoc{8NcJ!=ge8p&bs= zxyi2P(i<27fwahO1dweQUo4}4z;T*Vz+)6`&W)CBgOt7odLFglJy*jz;qc|f)uqgl z5sQm?Ga1jJAReC7{ZB^Jw5y2UHrD`-f%*Pb=hJBS&?5QR3X%e$LmoEcueEd$+gL|+ z9%A|N-CC78a*y$3g*vK#!hvznL}p94)H(a4as^_g!jZ7V4mil^U9&@=!D26GZzDa) z$fm<%pl$OdGDxyx3>Sm+;;{PKTb&Wh>OGEWV}{WM#J*7v_g6ljg<80gt}UZxfCe4X zy;nl9_;F>X+*@i|OX_zs?ed9}%knnoJY;pq{HvM2(qpub&9sDD#5#WXI3I><6^y3j zbw@6oWA7Y#*&KzCd3o;LE1mJIT85k9%b9092*asNV+W_Dck=0Ne!--S1s{7 zPw>r;!+l>*)Td{^cvI}ACd#He9CWVw6~B2E(DU(he{&<^i`$E<%T|I$cT#)wA4;Cx zQhRld)qds|oM)QCl4)KATX19GFF$u6V}tKrlVR|);zRf<>*5Wqh?h>&3VG5tRabs- z&rjj6xT%SaIH<)RnZktSbl!$7m95M_XtnbE)>X;d&{x;r4{qa|!?4@J_>@C|7(A2c zJ6Fv91@Se#m9C|B@t?F$7zifEVO+Mh!cM;4K5ZDdwE&p*idu$f{9;47iM(e#})lHDba>=H2<17m-{y{cak__Eu=ad|!f zhA1VtO}C}gJZFxG+>XAL;>!~{j`2N;m|ApWc5_xYnn{OjHwxMB&=0M4J|5FNKdM}t z+qO`v&~yG3^7Zbwdo938RzEG@m1iUagAj#pU%KH6&$%%qb~a zXVZFx_lJ#uv9?4_ykrh*77r4(mv7|7bLGbvIN(>9Tx-#|@~$zv`@@RMH#XR4;mnNV zq5hTU(7{QseDyIIloz{9=)7Cvb-U0db10FBd&Ksr~Lprsv8VbGF0|`b1nFOlczLn?@+TGw6QW*aL zb#lL*T4=7+e(~}B-Z5Hzen^LgTUhhGGs9xvEB%_Db_iKX`9~j0wEh{^B-7%yDv0WG zc67(L73en6-nw~aQG?pLD?51*eDW=DSInk%Vem-t$MBZ5=6r-V&>!%wqf78Nh%T-1 z6Dl9}vOkcoVYHE>ljWHQ;48nwWPU^n`h&qxso2)f1;g?3w%#l0y#rJ6jh(UbG%G2S zj;^cyYxAtzig7WO;?eJ{5~m*{^P^Dxp8Q#DYAx+;qnCGa7|5zoe$W0eMG+k`Vn5i4 z&-4}g{h)Y{S&CJCOT%`=f3qTyUeO1P^{=zQ4R_b0* zMXsX^e>|7*F!lpKm3m)WtsTYgt6Jad3iI7h!SH#35u;(>0FUWj zy{q_(O>Ck}Wyf%t$LU^eed28nB`Y<=o135sjQu#TOE1OFQb`=Rnow(+Cy48Q4Ab|s zyG*$M0G@yTv0fdh_*(5R2=hvNWry^yYw^d#{{V*?1-m}2b38=k z5Y~TemSfY+J3pQ)^-PBeijsCdJ2b6Mowja$QbOM8=gq;vbu7ap9~+tJA?$y!MH z%Gu2*YJDX)i+pmo0xJtpI6TcJ;nVxLtUG@e{8fj1ch*{L!1U|?0L0hM_TLvI)f>yb zMvFbe^Y{$a*YRGN81w$oG4|YRvOGUg923vzGtn=6YvLFsWuL;fOh3M|e82hyd54NT zSvIL=mbd;JwYDjpuCjmjBC#+2B*8SSWAjb2^j_k;62Q)c(<26Q@G#;&OeoB-p4HSM`Y8fR{OX$%>Ac( z{`t@S^HtosBv&x2d1Lth0EB)u>PC`l-Z>pd4}|RogFVxjU@aue!P+uCvG`P%UJleP z?Y!I5Z}0#D}VkHfv9OuXQ_Gj_Xqu0X2@S&pM@nli$$=T zq}AqFu<(AZsOj_SRs+n6+Xb*kBR#7eSZN04Samm%jH#4F#z)sUtcbO3Ue*SJ0`^B~ z`4NvidQ~|ubp{bMiWG?OU%qR)mowDjsZ$QNHZ5#g<_Y3?BUQHSbDzVC<#mCn$)^a? zV5=7Mg!|ujin|V@BSRrzyX5LeL0NY{XoJh-Wyn@Nx#{iFs#3eT%~ETc%VXzn+ULV| zeiZ$ud{f~VN}sa*mN)YL;dg-~+`Tx%0nl86i@;sqLY z+~eeW{{ZaOJ&a!)Z6}+OES^LlXKdw$(!SWNKiUW2{;T0H8cX6??_#^K*C&!_-YkYd ztI_ub{WD&9;LjS#;GZ4ozAv_n80=;8q>O#$Ph5Ie)qk}k+jx)R&xJlDXf6GtrfP3I zLQlG>B8(KrA21+-I*eku@xNGwc&P06FD$J_rW&U{{SVEb5U`d6iZ+_Mn8w)#+CbfX^kI-lU=Zz`=;yV z{61q|e7-K#jEz3s@HqKYze@J7xXSX~Sn;uVY$~#Yxs09})#q0K07sBA^$IxrJ631* zBP)l$S>lZd4TTvWnHAN;tzO$f_N`Y?k_d-Hw{S<&xm)dLRlm4qiR6;q$NaVGG19wL zYP&Pfo*Ip`Dry>TgZ9Z6OS_PO2M)OYRgC`tG{0nNT>S-1`m`3X?N&S7;Gs-(_s11N z)>*DEw{DIx?m({URNFk-u!MFW9o_DgDK^XjZ%S>-tnvM#XT*)k=M{x5%SR}A56Vwo z)uVP5lP(B2>0LbLKGl{q`6P;L#u2#h%~6Wh^6KU1v_%EaM;ITaRfrCGSM z)@^j;)FfjX@<7H3^u=hNf$f=JQ;JAOl<@77+o7s)PkPfJC3c{#S6k>b`+ah0j5d$o66dDTSANMXJ0+4*lDN;zanq+t%pe$7 za&4Ek+L!EFtkOqwcMP&1U%V9Jf%BP7bvN3YLU(|}B%Z^rDrkH|s%fyxWv3F*Nc+BV z$j_xvk>iZERN$UC=~gUZxzr^4Et(g0=!!CMIqB_MMk>cVp0_PQe++HD;xfwJan`dh zE+CJCkbNqo>vbH)7vQf1dRB~jHlZ)sBuU_TCizG#K2zvw^g$`PHQ1u_+er#Uav3{{ zAh?vXGBF!Q1}es{p5w$p|5?J+x44m~SAVq+m<8;Lmf^r_@UduB;_u4=mF;@oSOvR%v1 z%_m>Ta4RLam6>_L#y-4PQ7)TxZ)&%)JScyZ^;1CPSGY1tze z%;n@!o*7FR8~0KV;XG&QR)LH!l)p23bgc#RwJF~E_TfazK289twtB2<8Cc^`)T*Y} zxNFLjU5>KDz;L&P{{Z1B)h~dBrV#lE4)A#P#y=Y3FYR9_P{Hqq~3(avRyDl7nrwTETU*%cRjZ1sx_=!-rEDL%jn00HvvT5+d zlHlQ;vCwDJ9VxdOY}U&1-ZW{j;{k{1io&~j8VKVbF5V7!2CMy!NWi$vQLm#ZHL=CW zkN?pAXk_swrFSg<0P!-pRBj6KOpe^d;!ic$COLn4@P?hY=C)T>)l42xmHfX<*XQ+c zZS7J&@hzu?qiFRTxo5GQW_YcL3^Dg}{{YshFMNOC+l?n~m7!ZJxB7{s zZ{H8E9+mQf+R1YwewE3+_OWU{%`OlRH~0U_x9$yBr%gOo3Qj_n(l@f ztq(S?Va>Wb)$wJ_R>I5dakELsm`6(I;no()_ z4U3K|PiAWh$%_@n7k}{zy%pB2cWh%E?jwSKrBFL9khfaSLf3PX8Q4ds6ni99Ir5#J zqi=fz0vo)<02l+D)l1lChTbXGG3AV9w{-2&vVPw?{^IGRlyu>UttjoZDQ(J19bX(4 zZUqk3TAY*v2E>Z3HB&r60M9c#K*{3M$GqW6)+n@^i2N+{p}KPuzl zxSCZ(!l;zvb!F-Q0B;byd2^mIo(TF>{5ptWLRmTv!|6w}J3hA_$559VB!ccsobDg$ zKl;^>`i;1WJekaI++wlZxrt2tW2I<_bkS?S&)o8V2035GxQ#ucI_nK$)*%buU&!|A zyl?Z3NQc`km>@0#eYw2Rxxf3htZFE;-G9&wuI-rK{% z_{7d5gfGn4+*JK)lvm+@f_UI^qFLi_Ae)C17JA= zfn5c?%s*_*()m9x%4?j{{96^Cn{#Wd+8c>vzy+TreKF4!(0Fsi{t<%O=Fh}h9mUHu zScTE?w4eA8{40)lXBBpiygc3SbaXm@jMqw*6}e@+gCrFLo}7MFy%&r8Eer0JJnrx5 z?Oq9~YuaPqul8NLS;rJ1aC+|JC+k_!YH%?qSdmdnCZ|_cj)~{9qoT3w_a7T4vD6#v zI&?ExM9T76u;@v_MLozJv0m}PgC#7{8QkK z0!vrEjNIBy4&aDBWN+fn*N@7$y=%q44|RP$N6=i`Cv|?mm3-R6yRd$ncX z+NGPLMdajkfg2=xfd8NTrS@ljO(n`eLSJg-K$>;BoC<&0IWc zG|}=jaZ`e6o7UF|47T5KHjSvgv5xiVJ_hj|T7CN3T0~ajVmZ%R@%<&yM8$c}TIxJF zyY*{%dN4d5;<3hIDastSJM#R-q$hLrPsTqTZ2Uo~X!m{`(BbgYo;MopRWTw)wFVjLe`h+HsSQTvv-r8bfCTBdBUsFxDykTwn;wd9SweK z{gF!gqxHp~RJMoNKMG#+#hQKVzM&jHXNWltzskHr;+5>$J;K@BKcD6xmwNOWAawj| zhSRm}Ys2x+8q8oU0A*8vSJRHwi>Yg^s+1Gwrrh=bbRSCPp^~Upbr#)^LcV&c;iF0J zjEB^2+xHUx02O!s6_QykrE$|W#rbwI0W&`1?_EBHt1A@KZKDc6$spr3>|$V?Z4`N! zdb+E#(__?NW{TeI?E~7oC=Z6M$hnGVX9N4S;t=c8%Y2YXp+si~73vmRrN5cvz~3yR z40FQcrF@nn3o2>0&#A&>4pGqO%wH?i_p6KTS;&#RoxV@-)h*E6D3&Qdcu-|WP6jK5 zD+<`=(s7()9^RE}O_o%SG6vZJ*_`~lPkehpPG?CJw$fo$vHt)8t?Nax0C|#rl|h$A zvbv7i^HGIz4+=*dde+!JABr^cCHqNUyG?xEe`)-uwTD!{m~0lhd&&0=dKZT5biX@e zK<+(-ayFAkB%7_jm3n7}bs19SW01=b^+>@VoqXnNU0TPrhl7NzW9iR_S|)~CH~!8i zZZn=M?mq@0B7IZSK#mM6LxJpC$Akaq#tRP4M!I5Buok~aK8Ky|N*R`$ro}MrCOVQ=p z=xB@B7|H!|E11?%E#Pr&rj*b8@=d>NJtE;sDZTT^emyf;pRtBqQ;=5>! z=TMn(+ExcYPHP(Yh`4NCUzI{LP8BZxXOno&#?tyWf_*;V$lU{AEqq_&-v(Vi{{XVs zgdgoSeQDyD{7W$>`&gE5;V-m)oh#yhi1uF)1=FRzrwCt0lXY`taORo&CRavkiofJ} zV|XUrZsu9%wu@o@DJs){!Yyi9=GEW9Tbpw^~W{)j!(gnsIMub=d(^Bj9)5&_P36|X%B=NG?Us` z&o$M|YyfD-$?wv>b=QAtjb-(JGfKBl?9sObkDKtXGoRr90Eijmg2YK8wsJP$FX>LY z_zU9*BU2PXM`BM;!oNYwxN`wQDK)9_IfieEl%KQM(D8T1%`?Ok$k%YJ$XAuyUL~h^ zhfB4ZMw3qxGY`A{+V#f$pFDFM9&`%>7WH7Yofqu)@i8T6&8@U&`^I(W^{=tO*TT|P ziSl^gtJ>GDnRk+3sbI+REE99OEs4z^rTAtE*XjsY#2s z%5z<|x8R=<=&82u&Eo^EIsI#llFgxr#UiwBd!DuJVevF-Y|-Q5FqNtFG3U9sTUjQJ z{!(d@-2I+4^0MaxAXatUu_dS38HlFCEzPEo+N?QbSyeOnabCouZI22y=JaOL+!GGu zQpdPGt0rcXe)ICbIj&wg8_HoK^zT_0lX*| z^#Ev~fJX`sFpfJ_6f1)qv3&ly=Cr2NPHKrgjmtYrIJHQBv|Iff2h7K%{ucNuBi%8W!T+4DffwHwreVt^AL>j`F@oi#JY2|Y|I}Y{35aV z!{M9lTK-XOHN1(2UKyXAyGN%S{uTM}@dLqLIQV(u79KOUcgg`3#6Rk`cFFax(#Wr_ zqAYFQGT?0;t1ABX<5ES_{6dgiLI}iCD0RpCr;7J7tbC|@y$>fHf{j>mz0b-iZ$-mq z)qmTxrKg`%Kb7Xw5ynsc+mQvS-ZNI%kt#d~3f2eh&D8>gMAA085)qcONXU zxe53poDa&my)X7s@Fj+WE%e&G^thOawy4>aMkkH9?ah0&bE*-KHABk5aJ72%mL7xM zKfwI?)$O56aHQwnxXD!j1#8p1W#OrG5@2^Mu1Q0Uzb|eFwRv)}bjck!udd7L^V3s~ z$K_eI7Ydw@&}9_QYzk`t2NV$CI*NlPjG72> zu>Sz#soS8am!@gd5;eZj<9A8wUZxn&+xbyt#H)bu|~eir;x@gKw=hxYRc++S(Q_FJH*8QajGuWIU^ zE9mUS$v?Sox=HU}3w$K;_2+@Sc^gS4H2p;RH!4rc03ku%zDfJ6N2taHeXXcSBDL~u zNtW505ghT3xvzlCuuhFf%9W4Na$L?;sHoI>m*MlI96HorY@f~%M>oq@eoemIyoNLW z$;CAt0^|%=Em-p<%zCPEgZEEDwt`E9`!1g{`Mp_Nr{Prm`)6leLP8`@m*wbjT!dF* z97sO-??uF{6(1xiIXLZDdkB~HvvOUfkC)2&nxrSvUd^Jp-RF*&s<-zs0w=U$$VM^R zqta|=)MqW`q(0}ZbW^lUDpbD5THYAc{>^QwUfe>_Tn-R+toyMotcKsrSvLOw6=%|ALUL?B~tYSPrTJxj^A zw4AC(Y(8D9T*R^)R}qi9>Ds4j>&v*+nZ_uU6E})WOA=k)O?&(K&fwkq@l~Oc(%Dat zyNh+*+v!$s;&T@`6tC;fA2i~pI zjAFB()a102?G_ADMd(MN;Qp1Bag_mgA92`J5kHpOy#fAJyea5oDe83TbUfj8Zo%{S zvsy<+pZf@F$uheadM>AyNp==E&T-nNbXzp1h>T*@jhh*5 zZVaDjc94&dZ|Xrke+uZlWuaQ=I;>hwm2>v1cYz!@=aZa|!;16bWR6yoaLzH$rC+#d z?d_qGDKnh({vMSv!?gDjuDKj+M|ROg@;?Q5sG_)r5%ztX^I5L;^GW`a#Acdh+S#@w z*51tJHJ0%sUoJh{X^}L3a9AZMU#3SNg=5Vmi2&Nzts80Qxbm%{8{Ikv>G)ODp~^)G z^P2BxpgqD{B$qQ>VEMjOZ6ROo;aAqE$8Q_Y9gCtV8QYL|oDbq3!(T@HBmJg6CjQUb zoZ6>>^i2v|>xjIjmsGQk^%Xmvhn$YayrTQVKN0+M{jKpX3q_tAhBIDV!H6O){nU&f zm2=QnmF=eLIP&Oy9ZV%SVdE&u^6l8;q0|1-jVGF`wzDp0(ul>ivHa`MbRUP(&VXu{ zP}y5a2cB8TW1sHV3wL84n|9EyE*JaB&{u9AqUQHr$BT-?&Jj*sOv5R7mN;$OCyW!o zrfPaFt)jsd?1ySxXMlQR6&1{^k^^rc&$U#$y1%edYuDyx03JhnbfzvtSrlyx9$qC^%bvbk}=(8GCk-w zp&c|Ne0d<289i#vjiSpTj?nzTbtj6?nWXzmLeTB}t2#XIAxF+Sg*9BP)Yp<5&$N^Z z0Z&ebq>|dj-Yw(Y2;(%%f3@xpas)Z;>s?Xs?W5a8 zeHN>49`H|>$Jmao~CxRZPiSL<9% zGWl|HwXGf6+x+q!ze-%Jad+71Cv7C@>ou#qt?9-pnAEM~7+Cs`;vFj{DU)t(m-k=n z(_UneMml?rYd%Czwq~jXf8K%xOfRE^fivYhfyH8ZiwGW9IqRCY8H!=I$_`IqRqS-Q zqlnhU`)h=^-e6ynPRE+J75sAW7xUx!Vyi;$7~3VPg&y@ZZ4<6!D!#(9dTe(}^RSna zMKn!rBN-U^hI39yE|%Rw%IlnW^sBa#S?ww@mg!JBPb>K-yS8^;*{FBwjnsKo=3T_E zBH{VxIjXT`PEb{wr1DPEMtucOWf+%^l;Ks!WMe8yTeBm~V8w-6wqqV91m$~ns-+HU zWQ_TEQaSBgR*YW9JsOQ23|BkHob9x?PeF>OaXjh|@%xUSl|M|pg2~Xy8e9?lEzo|I zV^@)NXZu_h!rRLqbw26-Rp$F9rJ?EjRN`~?Ok`bKmc>s z)DD=a-&FJOo-6~j?QOqzj+n1vg*JSu5j)SoJR!SEo zgMS4Ri4lOu9@X-$iazmGb~bMBX1gX?PUnB=Q7c?5S&;PT2TG!nsR|EX-qq1Wu^Tpd zxxnpF$sLVj8>_Z*iE(hwUVpkeVz(^WgNf7RWfiR zDNpy)CxOwzmuW*v<(ZG28U6g5OWp*Tay{c>^W1Z@e;< zC-K3qcHaL0%ej)_-Ezl*dSkVD&HF}rT)xd3KWe&%4H*%veN=tl%BWpki6mLn;QFs` zpy~QmYn!8Rh-ek-1yD`+DHtS+N0c*H~%1J^jkby@_z z7yD|N1vy&a=Ax``sIYK{+bQ-{W0gi?3Xy@Ahl= zhkqO?!1`BBsmH|;lJ;_rpJ=8y2VM%U()**W`J-5SJ$t7TP4_WD=l>VDB4 zIFrFg!Iu6h@ded|%qtAm(_YCW4xAeiUeb0(@uT`+p9iYC5Ko zCA0^~MFH7JQrJ8XrGGX0SBQzzqhS)Hk}vNR50`hhwRCuIkUFk!6HApXu6TS`#1p4d zQBOs)@1f{gcZwMq)mhNAeoo*@u#fwhTybB~_w3{U00l+xUxfYw_*205{s`3dTip*! zw$rA$hDBtZ;fyJCVmj_zoY(UoZ+i>LzT~Rc^>zOM1d;Kd!L1+mF4FFPAb7V<(r%km zy>BYv7DkRAHTEyfkKn~^$N4=UWeL-bo~MbI=QOHPe^{pH+SmOLuzno;fB1v(cR;n( z{2kzU?&7>%=@Ha~ZoJ@z0XQJnsUHvgXM1P4zW8&e!0Xj+HLkhz9#wq@@Pk?S6Y)1x zz0>{=stf7v1W3hnKHYB$fvobuz$ue*ogXG4Y4P z@7e3vPA$IChWjfr|TM$Flf)OpZ|uu^5jYY;lEO)Pwq0&luusqcG}KqKq&-(7b6`;G7WyA!+C}#arYI?bDSMG%JMwm{3`gIyL{gn+p9_a-JD{y zt^NV{&g_XU{A`>16d}Ly+}7Rqk3JAfq+RP4TGG#NV;PcXf(CuKz`^4H`h6?ZG+zPV z>N;#Xw}v(Pnb9l+dZWj*-+{joESK$*X~^n&1h{V8R)O$8k1Rv1+G^Vd z#?v(C2OghF^grxR9n8V5tYB8o4%?h6{XiHLo+F!6@Xfdyx@ohgWA>ooeY$a7)Unm1 z?Gu)+8j4z;NvV82@ipzSy48)8aHcnHeaFyoT&%wcbSulC(xh^*9RT9J!$Y_6KD86x z=~_;nV^PGOdSmbv(r8*|iDSBJjX^ZjP!GzpsXvgexMo48zeVYq-?f5EZ-5uN%z+iH?;`n#%KXfr^vA7nUNW|}{{V!yTDXJ{EZn&q z`sTeKNKyX)48?zHmr$d<>_f?8^L(-|YZ ze)_aZ(?pwB$t}=x(S|F;ej)e=N6>7|&YyB;Dl>(QsISm{Bf?%SkHdCzM-9BOHMDOQ z)?RlHuWVP3_~XO=Hq#?(y=P2>!bU#$J$M{f=Xf3A$g{ zxQa3b#|d1XqRkQpXo8=5_g^1NtVtAFdP5W|0m^MS$MUbAZuO8^VYwN{uS)%60bNc9 z<++6z##V^$FNL~emGm7dX)o*!0!9}-YP8p)RL#0&K<$iGL2>3ow_q)fgSI}E+1zi7DHgSW`yyqagcd6ELf;Y0UH z0MyvqEF7ZK_`YD^m8^r)cMHEm0N)8TbD4+`ye8@9Z zlx^u#GlIKCM#FDfX#n(Nx|T%)1b3)W#%jc$Fc|GcfUZsjOADM-1n1?cBl$Yg0{uE8 zwq4`+X|)-VK`B5TkCc~|p_{s%osSqIzQFh|;+gzW;ZdbrNxI`r8(l~DM}Mv><4rE; z`5OQyW-Ge*ed3$11NgQZ$d1@=8_SzLFyAQrvtDLtgXgDsp7u$LtxWK* z2ZYT{X|hj|+z+J_?7NIxM}<*>p84@lrFb4`n=~=C!#9cYV%zM^BrYk zcrg9kb_9BgC%lpk)~cm*iuK=)zB2GHjWxYK^Wo2hpzy_|wW>zq41WGWoyMUGgx?0g5n{r&5}!HDTjuLKR#oyQ15vlMdZ#xPc4(o-evOdeBVy5Frl7`?Z?0 z%^NDH`Gs~WQnl=P)7;XE8;g1T$2rFU9+ke@e432bdVZH1&vh_%5e|f|J-F*!tGC5 zJ7T=-ZXrS%9<~OLG`U&gl0&O#mbV%W%-43wEZ%*^ui7OMOL6yn$stBqS;CE=vJwCo z=qY8qjcveic<)=k5%mdI#=5%2<>9WGU&}dARQ~|AwD#iyxXEsn;v4Z@w4-WjZ1bzu z{pTTjPQBQU!Pp1tqZs~G4EJi^BrA`4h6zwZvX3Hu<3|uEx~!2Zxp1H6Xvfy0}=Ob0J8&F762QKGjYL^&dMH(iqVH01s+-wwg!s zw;kS;$yV+Wvf%AL)nB@`sm9N##ZonSy`@QAJ(pvw(tJ8Eq-A-6l=Q0F?w$RcBzxr& zZqH8G#d2|8URiEf-;-6=S*{x{{D4W_)9|gZ{{Rt$IzV2wS+loz_+U9C7$@RD)?qtiva_BNbas z2{9@cfmihCTH{d`kwVi+GYDdd23h2PyMg#u1zM_`=4VwpJn`j5b4BL<_97P@s%Tin z_vCc{0Cu!Bts`CV{p)G|Bh%;8rnha49LuzptedEs-akH6ZKtJn;qesahVa!Xn!6yl zVI=dnq4utWQnK*Xt%bPL?qqAD{;&bS^yeOxB%kn#TKG=GRr^E+?Z4N>jzx3#`n8+M z7_~)YJxUJM=eoyG5RkSMV8270?#K93;qx6%0sJdU8#ophy_P0eM$x^x4Ek2qm!;^} zUU%BBe#RqQt9trz`cZq0Qn{C<*yH&KDA0Y`M{ILB(6Vr<^S2BU?o)Ipq0WRY>e=jBg}%s~mCuwJ+HxnC+DLbl&s0lWi$Y*dn!{)JrCyt4ke%G(-Km#MH8&zuf2PSM5W__}V@5Rb+u};>=A|p}HZE zG3JAu>gH9EL)yX+r~1b2?Oeax37M6jk8YK?HCwBF(ZCd|fxAA{m2-6_!-P$zt~$_f z9fw##BtkxK%Do~7@SUfDH5*-W2|u(vtXe(~73I+00o3fTP1bCDFK_Vn4MxiH>U-87 zG7vbdrSU}W!+#D(l6)KZYhm$6$GT^LZ@fdL_+~rFBaQWKOLvh6n0AvQq_FP`*OnL+ zYvMn_%{SrS#Z7zRU5ASF&k_FsVBbe;2DMMkYA`$@K(uu6`fgTRah3!EGFpZ762Zxt7i5Fo+c}aPO9>^=(r^)VwcmZ3mifVw3_K?)zi#uL++} zjIHQ0Rwn1K9V<%S=^?jkX;6*8^%$%)I8CKqIobIq?(x#D>X!3~z?}K7w~d`TS3lb| z`zJdXgu3B+6I~vT&WjqjQk>&O22C%c)#}rZ|9(?ZB=sEA}&a zXrFWMR*k%#V}EkF{ejf62ccQ>U1&mjtpcw{Xpx#@7D8moE$tx)lRy{5kz=HrnvI zVxnUtlKFgNwR5~LCa0#cT;Xduke#RHUX|8c>i%7!fTntN;8si?9K4Hd)xz$K^v4x> z;kH-AN)sLW^V+!Ty$+f+IdnJU)vPXb^Ar}lWF1>PR=$bity4n3OQ?j=IbZo^JiLB& zli^<@N7f_O^$S(KhTr{cJF)s#OXGit*M2?KH2qLn#bPwuat$`tbaDW~N|e8Mrm)1H;`u7_jx zyLY{^yI8hF%wv#uB$5ZVdV5u=E-Wr16VAp)2mxMzeJjtzWe}Wg)cQOYbID`uDL-mY z7~JX0s91P=M@_#jH!koG9V_Qn)ph+!=6zy#Vo4>Re$@kD{*~wO>o$lyi%Vv~?m4R_ z_TDBEBT41=TE>o7$Z1N;^; z#QKfhyX@bJopn->AGOZtJ%gMsN^AXv+DDy(wI zK2+laiuw;!)S$W0Zh{s8Vs1#rYvx@lYapi%cWtks;oQ2^=8q4Fvn$0L?<^>Of1BxF zX?z;i^gS=(^fuQ7rP!FTjlSp4*N&L4gT(P{VYwmHr#%J-jhOI2FA-{z!bwmYmEwp6$VSn0E$Mdhz8-MsN zUX69(yO=z2@ajw5WU(<#YjYaL$VtFpWK+P$7_a9F8~EDggm18YDq}D2H~HJ}N46{T z%vT4~aCGWRPRRO1k&o)XaX{XWurUHXmDgr6n9yAtE(KT7;o@vfwQw|DOT9DbFT4gJNd z+s70A>gJ{3=2Io@YEzI=<|o8Yij(;FS+vy+ovcR4A1JIuFyG6JkNvG5Ebv6gs{a7* z0y+-W>fZ^!Xzvzy4_I9$_VVWH$${7w8#T)*m$ z4pFqBYaJDM&lx9Zv+O??{?d?N>B(W>n@im)=4Jl?NG53Hi$CmwS%rMJsB3R~re6!O z<{PG8Ik(}PZgGI5de+Z}d^MqJ8hc!7`@H*ay&yj;=aG+~;;8C28XlW8?G?C?Fc`~Y zZEJ(b@KkCkD415w@s(j`D;b(ki}f8Y-|Sjt{nQcdz(FLK>GiB_GsN15hc6#h({JXt zozE=NyXXD`ll-cSym>RDDx&`Y0IpK;;gVYVSmk(W{{YLg=W#i%CZ3+qwR>|V)x&)G z4=zh~sFeBfNnnm%DuK{rHBSEk#$GqPk$%ywY6oXc^A}&md)76y7E#*+ z_P;T)$mxp7n(znml>Rm1QNZD;yFv%i)y%Omk1Vz{FD~zIrw^!4E!Vlost}tyM%_BB zhtSj?X&>IKxnFclJilk1M(xxZeQuf3YC2Vv8P-(g;j*A?g(Lba8PF(vZN@*!mO0=u6R>gzv)bPYJf#pD{Oj|1;) zW!;abe@gQUJDbaydilTj?Q|X>*CNwyz8HO=;wh%rV}GohK>cgY%rJh|-bdH)jyIO9 z{{X_z$*alF+8-W$1cVDFk({!tTu*M_Y{K#DDtL%1!sH)uXZCRu>|S ziiLjXZ3(y*aZI7f{mT!# ziui2b45`$)_gPjWo)+v-wYrg2-bXSse-3wJ9CfO;mv?J&BZK#Bat=5r@$FZ8n+B#~tc50UMH}0fS(U1E*s|riaPVUw{CUCMTnEbsDLr@Xq*vF=-O2#R( zwV&k_%u#C91lnEn_qNW$Rs{>>0bY&bpBa1`zVL5{^nVWcN_{`XdSqm3y1$&O9p+e+ z*q&iODivN)dU0M%myad9p-U`z;aaF_f>l&AjjZ5t{Hqm9+GuXG8y!8@iL^=V<49wf zRx)yAIqQ$hrC@5>?u`>h>n_G4E1y6zc=}g48?;-s*6%2FdAxTOK0uSBktsfNPhFa8)BEV2w~#4UOD(b0o4Z zHVju)I8^p$B}&niPjvO)3VbB^m*Ss>+TX`514g;O@PCGHVAFLSO+gV^-Am;%MjII1 zeCYW90BBdxx;MhjN8(O_seDV*JUI99X*!0VYc2hw$C&)qKf%*D2RwGK7x3ThC-LXQ zp962dW-kbMn&#KQy8fy)jaNfUcs5;2634NG>^#AShCTrtucdo$g)i($QNZC$)R0$1BOTe;W9@=H1G?`rCsym9#bxp6qebye9G! zZ({)aLbLrV?Xp^`SXIt^9}z|o#J^@wr=*XC+H&0ouhN$Bfc@I_9jPqjo*9w<0Oyd! z34Fl{u*W3l9;UkwAk9|ws&fXx+fqwDT>SgL3}T`(ZX0W8Y-26S6w4oplV+F&mLq9-)_~KT79M{CXPuYxWJZ{{Vt_U;Ie%1-JYZn)dGB!qND8 z>-z{nxC$37~*@rJl#H#&sRGkHiOOfldE z00Du4(z#YsAnOVw|1Y{0_ij%}T#;xMLLOn-bzxzX~1W*$jRC?9V2>3ed#X6bM zbeUr_I=iToO|i{y_nyuWztR?*?oV6eQ7=#zH5qxP;lR@d}P1G4)vH)1+h z3$1vj;?nx-O5)yha)rUl{Je2qZFzGhq60)(>!Ot#`^TM)t;*C-Z;w>d=0S3kDkp@v z*!tr4;qcV#;u=hGq?S+zx|N>+f#{>2x$Ry_cdD(M4Kg+Iq+^Z1@NryRo*>q|$z`^h zLFX>kP~29Qnc-XQMPj%gdOm(*S@KLOVxa#3hJXLl{zkX2*@F-(qS5s268a)9^)YWR zM1(K%IIdOp2!u!v6{V+Wdj6+lbF67Ltu3vH4J4-+LyxCQ`5avs$$PB(j4fF~`^fg| z#nP@m3R+oOSfk(A-tCg+JVxaopmH!sWL87Yj6m> z>wLthFzg26-$B~FPV2${01-S9;k_5)t;MC*m1TJ=Nn?H)cNroBkO9i$ck5gvo*W6I zuBP`fpfUtl2*{8&I%5XCGeWuW9ruTIPYnD(*L6p}w68ZpXOCw@T`C{{RPTnx>O^uGqu6*1*T-usGYh zIOD%cz402?N=;HNM$SQS^Qe+hA0IJXoK*TPw!L}bYlqS=B#t?X`OrqYju$xGKOQ)) zIAdn1w`05NkoJnj&xig3@gI#FP`mJ@oOc0K10<6yJ9>Y3fnA=B`#bpF{^QHl?`^CM zrvoy=`2PUMjd9v_h2DW_CH=&%klij6F67P!@c?n#(yK$L=-1a)QcbRhsBU+@#}hg5&2B_4*pfw7!8fxzO}~q z`{I7H;;ZY~bqiQD)xTwLCe4c~002-4Byyt&OMzI2a(U7;80RI4M@UJk`F0QA%iJDh-Ol>XK zik9l;VKd7jjLLGulb_E&rF~>*$*a$E;w6cvZc2L=2sd*Ra*O`}02LJO_em|oIQe>0 z;+h+1{KQkAPSsK3Pa(07o3(2ga-200ADoYeILuq%cgh( z+L#esH<|oBFYkMhhLjxUW0$2BmF#BzIz8 z_3ez~kOgt_fEUhlv@fuy%Y4@|2_hiv94{A0Cr39rp~!LKEG0O`Qb@~q5g8LEt}y5+-X zwlRv%yx6x&@SUsZIH@%$5_x>YIQFa)D+zhzRS>i;<-d* zZlKoW7Vs^Ug^AsYe5h39?PJe$i7st5H@vf#$aT-mI6T&<@QnJNjd9``wHsg+zG#@@ zN6+_9w|dR9)2=VCQV65m?#vYz_>>O6g;CoTp7{RG3!nr(KI!BTqqjj@QrNCzC@OETk1;OECM# zCpF;Sr(^9^NLcru1nE}#rkh|qNT=*K6DVnzZXq%^9Qz7~<3^d_e+~RXz3}hDJL5i_ zHkFX}ZMiF-O7eI-dw6sO@axLa+heL@aL1?r0IIk>JI3}_TD{t8kTHWhgLlq0*Ox4+ za;@#IbmB93MuMedp!4sq?B8Nt{@0Z8~c2sqAvW ze;4lVF~`1spQUeUHqu_RT&${%2=CPA)AFyLm&Lbo+!J#9M;rsidXAc6Xd$|J7zPIb zcdmMvM`NOvF*)dV*7r6^c*%6-0sG9K{c7!eFX6pE#Tuy7CDj(vBm8W*KgztiDb-^V zFUsWlRfzBBkC6QO@%<~wuSuTe3drgCS8r(?60?i32@tmJ>JD!+T{>s8?kG|eH9 z4mx{!RSiGHbLn={Ex`q(^aG_L-N7x6&g7y-_JB_06?Hs-GlBY6 z@Ak!=y@Z-w)uVl(@wKtS*Ar=b`bNCi*U*?L)V>Jhhx*j)aKCV zVZEBxCb*5vPZmCP`@cdx`d6fQemy5#)oksx+vU>jZhzzIW%;&ysKLkOTn){&@Ok7m z<~U>{a9_K}PSpq6{{V76&kDrDg33Pk*Eto<^>%H9#VAh9^sf_Hcs}1y^1K_VU29Me z`D;8v9DV^;`Bo{4X>9zPV=U^wz<{U$C1T+9Ce2=(Q);dR>!ZB`h?e6wO?TX?|!CF|b4v+)k2sOvgB(q1UZ ziTt)B8}p3g80p1)gz0f(v2QaNQNRN^&mAl2@-K#G%kdr5;Ei) z=LdEp)2H}X*kNMfM$GwaRc>m!9{BpKrU(#(V~)5L%xaMs%Uzgv7XBg9lE&uRX}-Z5 z0p$)EyVYZNKbp`ml5h{)jDQE`4SAGTL(o-Gjco5DGj37r-_ouukhH<>er=%gJ8{~T ztn}oLOS!&6nEl&*^Ys4!^;T5(aUfQd>tCp8C;4F{1nS#2fP(S@uUP}uXx4OM+c`hS_fw@Q=!{n~2`7=# z*?#vNgI4c!xou(+wbXM)-*gVR?cTaEI~@3G>gS_f-~2jn3BlqEEmT};*9_iukdJD? zj9`7yob<3nk(i{Pd zmK1uUrlu+1b7uDO8T_OoBN*r9#a+DAB(}1O&F$k65yXK;#|>Jl=WS~gzj&ywFXQ#xdSZMhmNzJeQM)7#1{!Dw>zX3*jmjONS076A z@7jA*<(2%aN;Iunn)Ckv+ga#faaN;QE%|>`e9QYoY5xG( zKN|JRe0;4U%D+h$aQzK=j+-(q&nGHJO8OJ_y|Z|JFG+Y80`p9>pZ)|OOykz0RdAh0prA17UuHBNf72MK!xTM}rwZ zd(yYAi$!#kz~x9iMN!tISv4_mz&7AAJ6E%ZdLC9bqU16?swN~JDq4lU^>>~X$E7QM z+Ke5&DZuXeO#&mLn&)Z2r%(Y!6i@+06i@+06i@+06i@+DMgct3bmd~bC;?;uIRcqV z$8pJ|pa!=CBBY3u)}wE`Plu%dOB*pfnzdDwZRt{PGyvxuX127ut&XdlXSHD#Bw~o< zFfei2wrbHT4^z0gwlaYyr%!78AK<@;-(T>u-d#%^w>Agt`yD~g`&^G|`34Kv<+B4F ztFZWa;x_QljxCtU<=bB_#~;F{vHI78nBl#oZ`k*67_VnEkFg}WV|3S2Mp;)N#|&6e z`3jA{v^_4q)T3btTcCk1v-fzl3y1oa?#*9Wd*c|vFXU(28Oc&(2Wc<;no)}Z$n$IIXMV~>Q7 z*=vl{bvbU`W0eGCW1m{&!ccH}m5Z+#D@3Pq25H9eEtkZ$ z9utpH5)bW7QA^s}giY9WR#Wq1YT%B4D)XDmEn?otUJGXtcYemX`}mgT-9^MHA~?p^ zKP=~aHwD6WSk1gZPX_MeRZ01EBC3w&Sk2f|b0TR#<_4^IWOTDG4bm1%e; zwua*?$v+{NXI`sbN|!9{nL)>;WLU)&vVEEeQaDqTi5F@q{_q1F*3i~c={&zBS3cC} zP_&iVrAm>U*D1ej)|XJEiz$fOyi(_2z{B(&wfnJS@JGPDJ^s@1?~HU^U&M1?YW838 zF^5!GB=baIMx|cjBZ0RV0R1cQGfo;{y@pLqNs1{VX9FYT-_9|`ev99J(_ad7--4H4 z4lH5T^v@Dpc#6|ej{gA6{m6#ggTN|AF~$Zf^Nuyj@fE7CV1AF`{&k1L#%mC?K6Cx0 z?Pk)xIQYv$5Iv6FUQjKje>Ol9BXMw8XN+~Poh2_mAlL@p_^#XHCy3X@9zNGRQ62fe zTl?3W?G8S28z<%+`QTs*;9B7un42!WQ@wqrONGQk2%%EP!)LXy)v<1LwlxcU_)M;S zX*b+N{PKsOUX@f`EQ`8E-=3HpQeDi;V7DkXC_Fc6?L7Ygtjn=-SYwqpEJZ!fddoJU z6u#gH_32X`PVFM{{_?OJN&c0Xw-C<%05~TdRl)L_wRgJ`i1slxW zcCY^cTCXkSa3_}-kw>;a!lu)&^*bre)u;;+LK$Q!=BUmym0hIZdK#{NU6G98c8nsW zZWu|tk;gvY;Znyse86zsgLkh+_?u(!;>*Nv4~6>Vn#F|B#Uzb1WhD}{V9GFYfUl4^ z=qt(QmI00n5=i%4R+Or{+Qj0h#Y*Jw^GUW)Lw3jAr!0s4Ty96wsu|b}1D~&2%UNYf zi$Q-D3DUs?I0u{9iryktVVJgR?>3$)! zitP>lB#iLfbgfNt^TIazRo11XTs(7x`F5Oj;Qp25+pd^|VT}58{A(XxzP1*R1)PXv z0}O__B}(!=95yjVI-Wgz>m!zdvKh{4kX}ir+(odEszyNRRi|s12c;%Vue%lM zP=%55RnzvelR6&}>ac1z?Qsey$Wg%bP%8=%CzzpdGv2LRX_q%({f`IB>Kpo3S8rpd zrH7lSut`6JR!m;*Dg_t!rD!b0j9> z07Prru@Y9;|J43UqM9i0X12M6qp0b@{zjp))phG_Cg#IWyOIk#+=(Mm!w_-E>OU&) z{6VOAOUB+Mirc{c8lO#^sodUhU~oO1tCrGq4M*)45-bgvn1|=)8+iIx=ib%|9%t#E z%3R(@LkEZTi3?tfn!Ze zQOVqRU*i0$W;(nm!sROx90(sZvGS(to6+W!EDf3|P6IMhL?XPYP98H{PSA3xpu z%fYU5O7O3ZwVfXCSkkoCy|U7QlJZQdqzl*P$p@h94P3I9#?i(y~Fbdm$Azo*DeQfuP zmVw$IHNrFXE4bkPHP=dQ?r~L7TNcQ=c~#Cf`y0-YtUQ4`kk+bE<8hY@uI}uNw?lN?)Br0ab8($ zB?t&%n(MVaENNR6BwsRkyywnX1IHD?RZi(cW4eV{*|`zwI-)X1b1H~hCBWIvb}oNj zg0-}-66+d$%dVX}T8UHlfiZ^v0C;vHv7>0`j2U4P;0O7Cca5MP_|AG)xpV7hubeS*kTdQrN~uCig(G}$EGj{+IXy7`CqfK(M`(U*83g-;yqq@ zWP;D_5|V(pF5fRB9X}fL>2(XJvjj|}cH=eEYtL(SuOdXvEOEzyh2Ym4aUR*5%2K1= zy-YM!wmht5dCRHZ>0c7OS)u8_*w#~|wvzqmW?jn<{c`pEaa|{cylZoDa}+mmM#T@! zfwfOOV0|mcAXQ`g#3S&omqW~Yf(v1vVOe4*MJ)~R`K?thPKT>&x9fMR8PF^sbpHS% zsk2LRsU*2obAZ_5t?Dqyu_?yg7Jfj7*>3h4+|Py=9w_I5U^h5)Q{&{`fa>|?b0oy{qCJTtLiU|ULgL_(}l$L zF~^@p3H}xHbTWvePbn1v_s{EG7z})CMC-=oZx1gcp49E7f_7%-*KbOwE!gd3Anw2w z-CW$-O9&H%+CKIxHdrj)?%gc-Mq$Nv)T_x9dGMmAQJ6*>h%vkYmf9;q+xM2T#U~O0 z-nBKShy}M81WGEs^if(|JaT>VIIf6F>g7ugqG5b9hrsiA;cR{)YFGMIr z-LXru3oqU06?)Xk<;MojlV=(2S#Gpv-eofeKPr|aLpAv*&*Bt@> z01EAN`^!rYv_7!y6SP4=132kjH;Mc|r)Yi@wAC$VH#^iHH*a&>r*4(tR>pEqkovk< z9%yr(Uuy0c#_5o8+PNE&(rWiOVw(p5iuB(MY18Z4%33_j5Oiag!hyFHD363Klb$izZN=Zp0vkzWew>sNBLS1hCEf}a`Fq!X;wWDKarZUSc*F+U!g+0Y` zo)4QvI^DD$B$e&#!NWQaoP8@|5TuuD(7F+123f&a(irLeIKWS5QD2m!J3$zcA z0reueYSfC+8%lH1#+uz5Io94~DYTGtjBtIcc0E?uOjapJ1Gayqao5slcK0v+p3+MI zASiyLKEKo2y>8RtUHWMj+Ptu^7r*M4I|;|2J$m-88kK3SB#&1FtG{|D6LYLj32pPS zL)dhrwvzgE*%4;Xaf<4u_<5;VUdIljrQO2vWG9>mLO}ljII1h8YaSSl=hp37828V=Bse{2yYZhh8 zd3CAyc3>EJamKQt5K9c7i@fHfT=_DyDa?OIkaM$pVSX6?mN@g{`Y4W)*)eSd9mF9+`8w{>T?9+Cz7zm0R2 z+QyL&pX3&8{oA%U{Hu}}TWpS>Zf?aaa^A@zZEd6HladGNT6VgP&xNlADTa~VP(yJ zg}h8TY*)VV7xtXybWs!f0mWs(a>bhIob|4+tFBWyVizIQUg|n%MTQtmdj}h9kZnWC46}oi z+Z>A9i&3;2U2q*pJo|H1H7#7;-b;NEK$3vCz~dD#q~o#Y%A$LdXtz3q*K1(10xnl9 z9AomuExKJ=-5Yy|$sn8)=qo+_#Z>@1)cG@)jr8_7Ce>UhbKHA%D5X1 zCM(YnN2Fa`DJbXnpn!l6?{@aC2x8o2$a+)3MaP*O(6^UMkYE;P^yyib@=CB6TsA+4 z0=o|wY1cYtr6XHNvPPh6W#k{FayMF)^nd{W0Ql{Ce_2trk>gaOEAL}F!;{@XcrR{x z#rHw!T3XJfe|L1(@stF;c#2rYaaAnE(jT>35r*JXH5Sw@CsA|DWB&lvSkjc4X`#1w z_N4?+gGv07dSq( z^ykG-^xuZQ0J&x?O_a;eUzS2K`d7=c${ort@bO<|!;~Pf&Xm5&vGAN@E^6_b>#46K zs?Zki#b(M(i_cnUl;dgXMn!tDqL#p&Bcieom_%}7WfVxwV|#aA|#8qnzUyT@e-fmd$O$rB9Wq+slQ2>$JJ z7E<|dj+NEF&c7!jv6A~J>ZI@QK8M*}2=UqRXN2MNV_7dOz=m9&FgRBK0KRcruP|?n+>dh05a~p-)c<6dp%D)YD^ziSBQW+d98iZykxyIhWpH3_3 z`84Q&j5nPM;|Q#zZc+S0J&k;xI|`}K-9F0!jg;Z3>s~hapWu%j{6xR;&Yj>*6T#M3 zu-n8mT^SNPINA*El|sc3-zI)0CAXNgS$?FPNFd zYRh#LR-!3wf|18370&Rn_~7+ARz80O)`SZcwZIQ>Sd-HgcH~Q@+AP~QM!$5oPr|65 zwrpQ5RQ)Q(t*BkkrvCtD-GC7>j8^Sj=TS)7u<`D%ZKltpPl;f-+t;FFJu4}viQ@g) zUOEcr#7Gp7;{Xf{)b>-@M10ACY3ffwB~+I*c^P9DA-4^ptxs&q!dU{S^c7w> zbO)2p)MS@u_f!7>tyeX#_(LTM`UI2i9i#+*FesnBB-xs@RpZdA_Xw`YtietI7#t7( z09{$v^}TCZw}9$bO(&JW+N0%B>GiBeZR$54$F*xorcYsOzU>(qfGN0RBCit{@WhH0 zCCKg6m&223I;OjCZ{oYAwSq^)lA__3CFkWGH*t#fUl!T?4$*FY$?(?Z_WI6c9&ESh zsW5K)0<#Qyax2WAQMCTZXkOhKYz?jH&-hn9EC%WsP#QR$gMq3MboUjrDpqMOP!&AhaXQ`>viMI zlul_-mF~|yxwN}rE;j66TBi!_f0@m6^JrR>){<%^)h*&U+q;w70DD(Dmgpar6{@?~ z=BYkXR)jeDNcvSdEu@5ra-%(}$hXA4VR7>H6_Mf{O(9ugVX?3_=Zx{%v^lE9M`Eq^ zvpu|Qu@D$nm+BX5JiFptN_w?PB)XKbw3CLw_UT#QVw9knfg8T>ztXyCQ&zdm>Nc!X zc>Lq@9%*943FW^h+MV`kB5cW(9`ub9z~Hd0%xW^lh5K3FM#SygUWu$hVKtRi zmCHjV!!&VQorTC;o&X1m=q1uFbbGe67XoPF0fVFEPJ03>c@Q7})&5VsHLvLwCfHr0 zG||a|>+0cxD81rAD z_m3McTN?JMsOqyJ)7Zzm`{x3q)U-`P(^Y8iM03hV%dqiT2UM0wmv=sxqDZdn7wuN^ zDQP_hSPyE}bXPUwvCS%t(@>Fj=MUxHDx!~i^QlmdI(*kW!s6;%>)CH%wuS|gMt!gY z5wR>;C{{R*EL&Lrs@s^vf z=(>KXCZ(qst-jPsNgTZcLRCrK?l47RkHq^5eC-i{!~1%QD?{h~1H@X#hNNr#GC>rK z8AB0}0}LKMm5pO|sc;U_I~FY4h8HHf>HIw=m0+=I7VchEMor5kVLnlxnZONPz_huR zUoURu^gl4KM=z~Tq&bd!&ka(HtXoTIZ)RrsYYO@!1nCoFgW9?%tR?$=nsJlK*fXA$ z%U`Re#t*zBGL{|qsf{jG%yY^OzEp1RD2pe!TzzW(rK88DTrIp!=1C~rKiRIT**+iH z$EBxw@PEyyg$HbRVYqFqvCV!=3T)V8H znL$!9^7>S?+_dOKQc=|%2AOhg)gDp`6yl(Y*3;~=+-z94oa7F*ZV3MX(2;H7-Lw6i z;8$M@>3&3ycd`>2jggfA5_;fN)#P&6(v>O3Hok|FOCXBrHaIJ>x3xojd1l*NikgoU5WLH9?}yVaTC5 zT}wH#xtUjIJD0s^_=46;E0JLr002O5;rdnGE5X{nrR3Zu*plIM&+zd|l$EbzI?}$2 zp_gRVR`(29UB^rgoK*UawwP_&Xqd)ZvG%7yZ>VV3j}+GkHsoZCsmG;KJGE3FZXuT^ z=Eom}V>mWnGHaB^aEk^yk6-0mJ{P{S(>zIIe{;LaU-jF4D=zBJ8zutXjFI;TrB;&c z$0$g~(A_|;-(;r_Q<{{$l<#xtUyUCSyc6SFNJoV%CA8=+3O3{KFV-~;%0}Ie`UOgHx!>@H7?QBG= z%JOFmaRuZ^{&Zn+)B|1DgP`!_rYSr_el1S*3bHTB*ZktPb^ibk{4s~YvG|Wh)oe0I8JaXVfH778G8fAb|c^IxFx;)Le=LWm$+erS>(^|+R z>%p%!2^&iy#;1OH{{ZV(zd-psd*x=lCNYLywYg^}`cfL^?QNzkB2af9THUu+gHc4A z^sbAbp-}oh?U2}+q?dM8dwnbE--!43kbD5sH2Y&Wl%;SL7iJ012d#Xs9-Xd@HDHp? za;%C(m^cH9`j_ISp`(w1T0NGRIF|WJ`RnO}$LWgv$24+yq|y3jQ&K-wYDdps4>b)t zNY?(^HgoDd8PT%uT3nba)pi$YRa5qabCP4&l0s{ z(mL6r4GKuVd6vCD!*+HdX2pv6Z+I%}T#M(`i4rN3#0OdwX zr|}R!!o19V;;egFJX4aB(HijET`%tn;0nKQI=sR({{UCsyFV1&coyGRO+UexHy5^$ z#tXd5w=Q~)*~M~lrM2Aou#879_W{D6z*m&HyB|pT7uq*8yA2~zxJzwG{DmWOk>3@f zbqHn9E)pw!$!x_Dk6PugwgRKijO_cnc*oL>J+E%18f~MnGZ1$c`A_@?xo1XC-!`LC zDmc5`x{;^;UyMe1-j zsdsKXn8P12x8RHrJ8_fRy#gN+e`{P=!{Tj9=GxZDfQTo*Tz*2hjZa(AS5af9MKo}H zp?7fI2V7R2mb2kSkuEh0X|DXfK=YXZKR`e~(!Fe64ik!YvBQSN)TQpJ9UsKc8Q4#z zG+KVGcX4kM@wkxQ*frvEYtzkb{{RySuC7P$t4L23)O=I%9+7#Y#U7VtEmiP-@o&<+ zDYZFvhVxGyIIirsD~m^wnCA7Pvp%c%AMpy`!agl&wd*3}~I9S!nm}4^htBwkQaJ>BmdD}}OG>Zo3D0i~~h}BO6 z2ci0QuR|%N2k?Mk8|4|)s4Lj_S^gp3+uF@8 zn>F3Ea*}pPOoS1@%L2LS>0H13EX{J}KQ~Zz^Pl8qVS~4~b6#b4cxR8xkmMf5n{GgD z%%S>lN2PMCf|M6EwL5Vz2Z4-nio15Ha$Q}PECKg;uJ30KURxYg@l#T}HR3*J-EP0$t7T_}gjV}qri`3@ zIIddW?s+asuGf#=wCye|#LXhgQKJ~(cJ`^AE>fY1Qa7xkytvk3wrAR8K)iOX&3{0j z!tPK_``mDP<98iD8szjzu5}GOo#H(DiU?guW?iKSuCop{{sOSn(`3F_zN2qvf&N+Pt>eB* z2U8I#h-2gx++Nyh4WeIaXks?<#3Q(g2}PKWm^=bNuO_^bDC3Ea2w_!~UUzjJj!fD~ zSwbUkuOXTgZM(rIlgE0*GDQrxnOpFBx3szJgq;2hC39 zi$NXF%)No9MQMvC$E{e>^xMU-TeLtBF!}TE=~_C2%`L-O!V)pJ_(m(6+CbKx9&$%1 z1cEs|F?89)$bDC8RRNrRdbBC?l3FtpWDa& z3Gd?1*biILJU!wM7Fc+iO?KGcIf0|y6JvJFCjbG!;<+)or5IH9M{W-TR#4`89}npI z2Z%Kh4x^w;`}pNhk#E?}TOe)E6}p)=nvCDM zxSQpMwzXL-e$Q|AY2D|?-BDbAEoPDsdEeLGy(hyESy=dr(CdY6e6c1L*x>Lu-Hddu zoJa2By_v^{~A zierVhXL6}wgP!=WU%meTf}MWRzCG~l9trrttlaC`To-WMtZlJ|V>!tV_$Q`5mGU_3 z?->;17+=%=82el%LzUHni>i_N*A|VeTwJc7Z((x<(hc5Ki-J`%5oFEu#t zUR&8t;Dh){^c}0WF3oGt_>`H+jNyC?e}`EgG!-idhkN|JmZ@J~?v+Av__bBJJoAci~5%9ffw_?L0(j`G1i8x~GMmN71jzoCW0X z;xB`mHL48FrmU+U)68o5x^+~LfG~5<*1n4PXFJ_^VQqloKRyr-_naNSkvvz=7qQJ9 z%!@w^7>s=mE8EIq)moA4e71W}nv5!yXrhYu50op^$@gh+Jt{Wf;81gl{i-@zN&qOL ziU29=(9A!tp=|pX)2C)92q2#d0|#rE1wuL+w+6=~Whyaavg#^Kaed zMJCeYgN}rDuh!pyaCmd#-@wK3rl6vIQ%+^Gxl#WBEI`Op{{REQBig?wONJXqW0CA@ z>L1xB<1N?2Pa9nLI{ovb_@E1WgeQfY{#nRB+8n4p_46E6gq=(@ogEqV{3D5{3{)Xk zWAtysf3oZN`u)G*7S!X7?Z9IbkC*X1tH{19{>(l&_-|tR=B;HH*T^3Z?4 za0Px7fZ`hTvAMLSh9FCP| z`yI2=i>S#kr5A2fope51GNv$@-Jeft?Dc(eeQwIo%H}~7VL$^IsWkrp47|5fC!3X#qG1v?YevgR)=VB`cFxfE;=9MwRdqPf zbpGm`cCVpyh-0+y_(<)u$3kn%#AZ^auFroe$fI9&d_wR~5tq%aH9JCpzk3zj_`6Mm z`%Ym13c2W`0&rhe=!L-WCun!zK~CxyL+L$DR>|2C55N2}%Ae?JG|4 zZl48({{WVX1CVo`dvqqg7dFacR}(6T`|PVetB1?}tkhb4(e-D=KMQ`%x~IS^zZ!TN z(kpFd28@Bz9X$nnh2X6pM!(c;EN|u;8-m!b#_Ptq9k;>xyd~x=nvLneQ_kFU{HxD2 zEopSisM<3N`FQFDdbv(tI&qe#9}{NuF?fnq6*U_xXnMEB4+Hpf!tz^<3g_(}Z#Yq! z&$9SwKCz-ft=L@v`@m&xFe#eXieJQ1J?u!42owd_03AK6)VvR^YGcE3!*i!WdjlVx zfmL+ROnqp?SE)`7EzN1s!`7)7rDNw`5&SpRZA{k?-0y?T`9SHK^NBReojOgjF6>v_ z9yyy%(!4QnTEpxS#+#MgWCD22et2qLEYvPz+LNogx&AG=73t)7nzeM6hs|*wSx%0z z)aYK>zVxSOnaz2>iQ~4sz4IfKvqz7o6?;*=*R>r#%Z^eK-dq+?0mrA(xMq8aFA7_H z-1EA=k`-eu5%HLa&f3K2(hIBP+a>}5jw)HaIjGxM!!^vSlwPq;F@$!M`T2VAH$DGhlIzRu`{ybu}c_a;tdkVEX zTU&+`MU(0(XiLO?ek$d?j9Q)A$0KdeJn_=Lb;&KyuuA(Bt^UUhNaafI?MPY$g~F&i zhXnN>OjWzv0K2vVRX7#Pw2jUV0ON0Zni5i*L}FhoQQZBu9e(Bku0HM+K>^OO-1b z*2{4;g=KGK$F+6-2}>O>#`Jw0)7yfX2$4xz|JL&k%T~!{T>~VDXQP^vS>B9MdjD4R0KZlN(&% z>;PkaKItHV>0VXw4^a4ad|i0o2`z6l{StMSbBqDSL-6avmO72? zoPHzRwv%1O4a;vkkr{^f3OK^9PioZoli_a(cz5C+rKk8N^|dWBGKpt%{{TI5qjIrq z4i6Rc^r%Xd`8pqEi>Rv6QcF|FZ=`uOKRRAagPO?}g6V$4YlS&%XR~9E!n)@+S66Yo zK;Vkf@b`pY#Xcv4K)1TKu|8b0iRWUe@(%a}{W!0Bt!P4vl$rCCaPys27@?Z3wR5Q^ zhdiinV}WCc!uSi2aOyAt!Q#4)6lq#io+7%A$4DA_$B!=FE?040sw*qRz8<&HZ*)5i zCpVC?B+jTo-Sgp`W1XWOmEQQ0*TdJ|HJ0Aa;wyW3cCnR7X!5C@ryv~h>0dKbGjgNN zMtUDdPc7xI$yyqpbK3sJz8AUuleOQBQR+8w>iT?E@>^T(j7EIIL+(O^o(CL^SLYVI zJU?ruTWPjd#9p#)Vt()8UvcSrR+DS+M^)2Cv1_W{O=zVo<8~XimLq}yuZMLjIdwbx zdpN#NEdrJ#XY};17lta=lPst1F?Al~d;H+k;$$k;iKcn3y*`F~w_1B$L|3jH?fztbaVf z#IhZu{nPx3uX_!#;9?|)G>FP1L~Penp^|Rmib&lu2nTNEIOnx<(WsUI_XcsAO2c0Y=g^5bG zc*k>!r{U-<(@u`=MPqq2>5YOX+)bzRIRIDEJ|Xb$!d)v?@ZN>u%^Ew8?8`qg`gkrI zY~{uy+m&CsLF3-Oe9&)iE;Z<^yd`TSvO#k+u}2Z}3Mb9b9B#t(>5BLWV=DVK6%+Lg zRud2H+ve!MBg%Ybrs@{D?2jR5R(+#)7!}p%`lRDPn#M^m(eyvzUFG%v0EYE@OP}qF zrI%3?W6NQ*sq{7FcE8!eHn5$WYaR;!09y5EVk$V=O2>_f!9p_k47It)jom!uV0eCm z=~dd(SzAFlMe+eVRCXTK>XUeP!M45}O=DA$w>pp&I*c52sJ)N)TV`hqW zHlZU#GI{~>kxsTF*{*33OD&yQdpM)Vrb{^rA~6j87wK8kz~(^62NkodT-(03_7l#H z0!jkCu?-nkI0PQJ=dkHp*V=D;0o@Wys#9B&ixf zmLultwys=jmO5>xn;G)t#uW$M{{R}|UPX%$BN9igVfc@HD(0;d8|(~G+ugj=tuIaNF_hOc*SmW7WgZoaB-`e^5~d>6Hv_y@h%57Lm(u zC_%4v@u!9^yg7G1l|9wH+p8513lgUtF_1W~6;yEStb^q>=un!nX!CPQQcrD#Uq8(b zB|Lgo#6^YLN=``isMgYEgL1`;^fh`L8;K=CtBmxmx#DMrr7K>?(N?}V@nzlZ@9S%BHJ*C7`N}g+NSjRPGn#0U#tt4WHr%L(xU{vMJ zW9Xh@`FS=Zx4XQwEe>|6JP)mNx`Vsk$-e^wt!i6Z-QC=Ik%Cb=la4BAbX`+j)qIU6 z*`_iwLcFL2+=Wq+Kp7+tO3^M?W?)<_B`s;+;=o0nnbqCC_tmBuYB-* zmF4e-?Qd@&k}$-GWmAEHjQ;>i@HmmoPV0wGpEpm!y@yWKuaAJ?vAon)7$R?x7dyH1 zv9HA4OG4IsMeu)F z(7b17;k^_4Yh1X~?sh($n0E5oFlOBQV!t%)bS$_kZS3jrQ+}%y~YiuE2yNCk4 z4?)#@Gi4p!hP@Du;eZJ`N4vN28uoFrlqp)rla|JurL2zg!k#73yf0-u)^l54UEId; zy32-Oyn17wO7`E`W8ufa{{V|W8oW2)FNu2S@WzL&d9dAH$09^v*oD=dqzt1h0PV$j z?}lvu0B1+j>}~u};t1_uEb_?=B(_%@M<8)tOXI&B#eJ^b!Qpp_w79^IIfPi;Is$ne zeXHT|6suzrpqDMWpKXTrG?zS`*JJ39iGQ>1y=(E0#~%#-4&K{q`d5XfR=3rokxJcL zG6wld|Mjio^SYOS61)tc>|IT9%b*77bM6=nA-Ggo!X zT|RiC@g{?1_GpW2D<#L6`T8C|TKDidb}J7R3ex6X4+}EQu=U-j)8x0}W!v8Q@%dn! zk6wRT^^b+$3^jj>w>piUk$Y`qnr*qcnp=VLgk9v=t>lpsOQU}$%u@rXpb}aoU>SI z*3m88GR7p^9^8EEgUA)*>vb!RabBD8D_p(tMWi~ckWQ-`Y*2Ht5Db2Go08k8rD~HO2!StA4>Lr3dGl13>Gsw4XjG%j{WPzBh;>8l)Pv3uSe0f ztE=4(9W`vc*JdMn{o|9{>0J2QDXj|1>)JLwqr%=U@pr>N6H9&Jd86F+vfIhMRnVl{ zbHN-Dvx@O$yt{)>IxY#ejFPboU=nkj<3E|jY|R+82IRY2q&N;3{KxPW%v@i=hRoY~)#&3l1f-w`9+k?CPC0zElo7D> z&lSC6s>GL)NgyX9j8}4|(>WuGOijR^I~`X3cs0F#%SeVOe9-p{;aGolaHpEdLM3}i7&cQen|W1S_8lv%wACO> zd2Ho?06H>{TJyd0Lgg|JI0RL=b(t)s!=1$NMmVag2-uocTHNZcU8J^xILu6$B!j`@ z80k;aVqrM@OK*?OR%KE%w>0d^$jl+=m_~O^K=8^Qb#ZQEuC-@(w zczaCoUa6#Na`=wLbvO=_bZ!Jtos1H3f#pf_jyW6}^Y*{+oA`U}?9z=*r1JLe9$r3} z&+A@REiI!&hiamne98C9j)+h9M{lK8yoS*{s1bQr-<5HRBavM55#@Pp zzy`J@lGe}!izJV{5A>?@MsGI97#MHU72VGdvaEUXq|>$TRr_7NtdWhm+tRlS$)?^5 ztv)1zPIA$nPVTwRYYoeZe7IvRw~n2Dl>2)zIrE`h=UvqTBdgU{o|U%KP&B5Kp6w#2{K z+SS~s{#Tes-S+xax1JdBU8@_7LecH_bUbYQu~q*7wSq8vH+bY7G1{iO@n?!;K^5KW zTx0H|4l!Krl}UB6ts2yzh=g}hM)8cWez~r9Q-N93ZQ1O-Dq*Z&Ow5C5E9*|UlIY?& zO731s?f6$kI!+%E!8}hsh7HZI1TQ!Ovm}Ip#dPl!lCPM=W3^|`Z0O+#;Euhx726Gt z3aj3-FiNM{pFD2jxNq9+;rpg~K1EdH3y|aMOHG z($&-ex+Q`Pb_!(Ty7IWSkEY(%f1!qBCHcTtcfMYBnF$&9=B#*X=s>q9gN@!`{3}z(v80!AUqU`vV;TC2`WP=~ikYKe^H2NgGfdcOMHm>M31jJsbA8%kkhG^x* z!>wvue9AG}sKmfn<}(Bjahir2NgEt^r6tVN{P-)bm=11jIABJh2tWn z(yg7#5OGjMHbf%s-j%s(u-hm}56$gdv#Zeblq1SxLM8s%Lm?PU`*yEZzq`4EOH*!1 zj~sx0YVu2y1Q*-o@6a05)_iYa;n)`b;x%HUg$Ih_#?h4HXd|O8$Ht5Ina=ov@jlx7 z&fVU%fVzZi%NjD8^Q~v%71xO_ix@%!Ts7_W^@ZZeZy;QDuX6`VQH7d5e;b+ePU!lE z(UKt2f)u7OPyYZ~xPKSxa{RMPBQci&vG+zgVz4wn9$8HhvDG>qxg*eeRZke|z8tu^ zMbRX7i~*3??esL2C(W^zCl}2HW<_UsmqK<}H_QAj+Vxu7|I9H^jCU*T31?_Vu)r?`+|R(!Qkdw}gBF z;)rMR_3Pat*eGEq`p=ks(Y1E&1C@i~MQh&xk%Km&RTt)Z<(EV)B8u zQxb*r>U(is5v*L?SjPHvkG+`WJ8~E4SYh&N>OUQh{KJaqb6eS;Pu^%Bvi;Y>t-4Ls z%pNM6QJrRl%lm=yx)6i z>yLV;sKqnC`d040E=M@+$I`x=1DCp5wtRL!I`*o{BkUzIT&>KRTz<9J+QzA~#A5=U z_g^C!RJ4b^K#eN(tAeqd{xoH=g zp(8YsVBikbqDE3`#7L+dXMtCm6F&5{1LXDTRfI|;RB{Oa01E7^wT(Uq-Z|qKBLi(d zMr42g*Zx2((k7fZeET(aCe&&cEvZ_j-?N++QS#RrrdcAGXO+J2#ag&);f?)H*48^gb8|Kc94h|+TJxr8Yk1De$4t{Lt?igPS#TV3 z8yH>>)PYw#1@Q02{{R&HO=s}`0KoSCcBga+x3zgzI2aE5$T<9~+WsSaEAT(V{{V$v z4)OAM`%Kn+SFU}sJzv9`Ya`rfpJa3q2?lq5`#Jf5&fY7AzB8Jwk5)Ncog2jYn@wgH zDvr24{j1qLEAY$2em2qd?-1x#?JOE4nV#gTl2JPDbjl5yS$z>2<%UCqvahd=JLF=6zBWH=(9XlUl@MsXG1z^ zc7`j2l@W1}gn)7Mtj#OO`getOrPl_R1?njuEx|uA?ScK=b*^v2`d)#lYW_~Eti__o z95VTGmEGqbPH~Fl?ZuLfiyOI618o^lF~J{E&3#omaFjijL*(jW=NbD-N7mmO{{U=l zd*i>v4I9Uoz7x`XF*bu_wh>$Dhjeqt8O~%o6nxpQH`e|k$KxLt&EgG4&N~4<6Ul=O z8vMB2F~`=tVXrK9gtz&E`(mEAGlBbs#S+QrDtwVz9TfAb@|Qf1xpZHM-w!76CAWpe zkC(2;Cc%0>RN3x&VxahS`#x*m6*Rq5T+;kopy+Vvts8CBC5|VT&Bo%>QR;it_=P)rUYj3B`0{TK_y&LM z4GR9+?)J=iGfxAnLnC92gc1)J>0c_zdUXqVZf@iVOPqjzop=5V((JrXX?YHyj}kzp zWC};{as4Uw+7H9T@I~dzS!xpNQb@mIWMCubJNNef?kezjcSUl@#~+DR{q%Ca5YaSm z5bCk%+TN(TOa?g*+9gfu3ZZ2iJMJI>TokceK+GaxvNtLwiFOtPzQfb~vr%`#OmFb6(W-&l1`~Zc;^w;zbfRKpQj0e;Uft zEp=ZHAMMQ!_DdIIv(1K49DsCft=l6Y`%?Im;^ooPVDQ(5?c=evXyX$x8;g;jyXlJa z`F#7007nFcftc=N#w+J&<@P?S6`4i)npgMwEGqXJWyJpgX=$@0lm7I#smP~8s`;`a z#V}`ZHh?)km>)r0t?j&aaVy)M%ig;rZ zQT~Ktypv3y;r+IOsLSGnyE?&DTWI1KiyY@arEybKYtww&9;Pv3aP_0g$b56-?-6`T z@%E+ToA@mB>y27gpX|0!qT9vXh{nY`N3I1~@P?JB_`Ki8VKt&Hj6QGgq*(X2JT7sQ z$7<)ai(d%c+T6pc-py$;Tjc~%77OSMoffgQnJ*(>FiIt6jELC=20iQ1r$!DOw9g`? zX{qzfv9?ZHNfa>cnu_Q&wrU)1$9jS0hDQRTxwwrkA&N|hhf+FMQN(1fV;f{$tDfT@ zm2T;JexIouho0~3#X#G*;Bob=`@yG4c+uNSlxz-reJiZeEi{NxVS8C8B#)UpVDZwJ3jSJnO{*ZvB>#4G(u>se?tw_;J2Pqb}~ zztyRi3%IJ_6M_xw;cUz(93g*^Q8#RHb=mvK-UJ=MjBzaWs!0F4DQ4Ok?<@IV?T1SnC{XaPpw} zao4qVHvSgB{>r-4qlz_wKKhpV&(g6Vw;~y$^{AX9_Cd}~r6M~yo9w| z?IZiwF~-an=m5a3dggy3M4CB{=lM1$&ONK&{uX}MKeX?_tv=(ypA39S{g+{IK5J8J z4Euo}-)@!WW;Ag$;HQIJ$n^5s*lM%n#y(TThJ}8TGgw0`tTGrTPB!)Ys(m-%kx_o6CM8??mkNkL!mFgZF z_&u*`-Vd|!#>FxucF697i#P-c` z^V4tKI=-o;>Q2WUs#soWHX>{BAd~xy2w%dvx4H8XVA~qTvg8aNm8XB>8x2;@Z7%*l zw#e=Kxh^=s?b5PsuUVu3CPCT02`S-ytG^9KPFkEb-6%%X-Or?c8+gWl27E5@L*D6j z7nXXi&|$iSqmsB?@yf%n9S5a&oc<+_$&|$`jI(Yk@URNVafQdC_C4#-^^X|*GrRaf zVWDUjI*a(JuRqe_Wd8s~5r42x7~72FzH7qX)fRB_5f{oak4p1!7;k!KrVlZC~46!zBFTs)4&vV{y* z5_2*AgpjUgh0k`-lJwkHU{) zKdc=U?z7@M+sJQqYqp+Pq1zbTyB8e=L#%kC#hxaFr&&iM&nXe-&zHT=8A(`laf^Eu)@x%LK+0V0H_D zJ%wRRE@gptNU{V#i^ecVS|L+jP@Nxlm~4_k6e{9n%8r>8oA%h}LdAwhN~Y01585$I zw~8(K^sUZPcrT&ymdXggEBsZ_Hm7%_mDa5#QzGFEKxE_TT-~C{BJYcD6>bEcNm#Pv z`qg^@{a~JJhZkRInn58yF~I4c@U5*qq0Zpjw}Zm<6~bJ~=HSB0e+sb)WyE1we@+Eq zIy#jMMyILj`pnl_Rjr(GBr9%z)-P7BS#D=5wXlQLY8@}n)vuw`blYa0dANoUdi@1= z{y6wK@tfhLg{Haj1Hsyki(#t_+uhy86mdAm@R;V8`BXebI$rF$o$m`$y61;Oh%03b zsFTQj%|&{2*rag;szx){`Br){k`E%%4D86xcYe4PTH8>$mT&Cw2~^1QcH=v6eQRkX zm%FjSE5C@*vA49jf8N|IVS1d5ircyH{*;chN2zW;gA{6>64x{>KTg&>N2XqBk?L_h z#j%(SOA?TvjCSN#9+2I-wd5tXFZ(r#HZ?DF`&fg|ZdsMO733UxQ$@L%%uVv}F}sY6 z3iXEgb8Dyk9@0K2$KpLY=I>j&mO1phGya=z9l0a-irfJB73Ohi`iZ%jr;IB90IXbg zR~wXm0OF!Jo^xzQ6;nb-02yc&aAo^!3}n&pIY zT58r)TAhFoH41b*_4Kb0{jgNY@mo`b@Y-@(T4T7tb|ct z$cg*G{{TH0SBU=C@gMkHUOBjA{{UC7Ll5U|UglBE@f7*Lt&bBsoFRm$m-V^wj+Y`w zeDMSFyl3fMHlhvol(IM8MZm5m1G4cuZ5aHkw6&Yf(;N=f^;q@$)8jJAzwGur@SKo6 zD51aBH5KW<;T|l$FsPUu)U;D1gBYZ#AGJq71r$+036VxcMBjRwKojIr)G7C>Y5+`% zTCYPvDwd#H;uULF&INOsk~jVBdU$-i@@wPHk5AN~vXptr z=3~;Cu4;C-_daAXl?NPSrUi2^apg4z(l> zscq#<6l09^tm~luaELZk_NpEx?*%^Kp^%;;1a@5AIIGQhxSANp0Ts)N&N=CEoZr;;OrZP1Nb3c;DymOC`Qw zEn7ol8E!z6BYo@&49Gg-tN9e^VyZEijOxcD6$GX*yN@JvrtRCsM*XUxxtOuMatyG? zrAdgrYPA)xAOFz)Ong(OX}S!S@GGe}?b5KK)?=`8(ME*&fGef4)_ftR>a7RFyE&$? zNZnEFq*E$NKI=YI1hXcqecKV0PQ(D|w+uz35Ftx;L z%ES-4N2O=@k6W?v7OgGpQGJm?^42uth4jzTy=ri?vEx*g)yk6D#d{ogGq&F&?g88B z>0XuL{{Ra7IDZWtPhIf@)|#VZWwOMianNUB{&nQRt=-1vYz{G99i%p}6_d{dXt^>@sk-XPnzdpxQ><3C%I-c(-SgKy~W?WN@ z81^a*V}XtbLHJaOa{GwM!wes%rBk0`vY4B5G=)GNHelmE^*FYfWdI>QxUPg`;JX*& zd%rkbF4ZZ~2LtI_x>t?$PY+FgEbZ-?^$UL~#Ib_0kT$7KI5@{j!H`I+CB3!Ew54YK(A|akcXz<4T6mTv!Vj6dam7>% zbrqUQ@JNW|wx2A4+o7rAUph4myhq!mYSezl#&D%Cibn0Wy#}9Wb7Zy=OwtdPm;zVm zYnDj|leT<54n{jw!KYlLK6}PCj`^tWG>hANMbv)FGFvy@xZC@)_;saYN|RF1c9(Z0 zx7sIx*<_J+@_EKOeJfW_)io(>lFs1#k^Rxweih0*HxNi#L-QYOcH@fmFN7WkkK#9t zExaXnsz;_9zmgV|e6K0MM9x4Nu1sY}Q>N!6Z-b{glTd+$Wp?j;Zm(r|a0tU3@sH_J zM+|q{g3Pgxdiw|BAM6kNN_-IUg_nW;E8A$=ZPu%$onGEmzAq5A0Z0x^DTh1P1EqX# zuV}L9m$O;3LqC@4g`#c2bJsZiI#-Jf*9lf$&He{oFDu8=k2+d<6ehNk!0q6FyUk0c z$Yi~W5J_h^JGcOPVz8~t!*Cmv$EW`QUp3o!ePY$TLu(Z5RT@AflNljE9RS686RN4v zb2)Hyol44?-pQtD_WH`$%!XV=k>z}$ez?V8!*V3DXoAQQ9QAD1(ccg}6{`4l!${Vl z)4Z!4O5Rg#aK~#*Q*M4@BQm7fYsTek$<2^{gp~Jmk2F;#SE!ch4{b*ln*N@fZ9ebND|`e-2y7x8o^)B!mzb z2OCBQd=B;IEpRQ<%0*mmSLAJ}n2$_}AV(YhWRM{XLA52$C@VDbBuGDM-K>CDB0RK4X2T`S>5RxwY=VGP+*ZU)Ozt;vOE_b zdl4}_XVRP6#!2gQsL)eOL3akH%*sASA9PkM7ShU%B+LspOmy_9=eD>p8KU_je@d{C z%?m7!xC5YGmCs|V+M)<;SzpVOoKzoX`Iv6nqn<|`dseyr(j?u-5c0>~WBfHWltWN| zD$f@Z$U-1q2Ik>&*NpY3sad6>DxEbaakjP>SCdB8kjnmN9$JGer~TFB)yeIJmAsie z=Dia7)5hNf{BdsiGH!P{IQ6btRUEfEt5Q+5 zjk_I3`!@Vsvki=(5K}qhzZJb>sB4$E&1t0FTnL#-bb~0k?UBdfSm`dH){)wfp*wNS zNn>#(mHgKeGly9A;Z6n*(;k$n;h{Ck(RgfB>B%^=rqcE6e+_CAeWC)&V(99)&rH`Q znw%FYJf#!~8#u=m&fGM+zSksMw{Qt0oPKqaKBXw=XrsA;HctJ$lw$x{azzUI^MQ!`N3p2Z*C+yuT@tx(rnd$*(0pXq0f>ze=Zv zzUL~hZH|fWu4Gn@8TN0+6~@NYuV4DL1wXPW!DU+fw%#la5`rd=dhzVW{i1d zS9bDv;~y{7_ODd6@W+Mc_*EZ_CDy^$XVrz2I!BkfPbUQKY!F6D{p?^4)xn6T?DdRJ z+&q-@Y`-1ef%bWpIhk%DkOt9QF)5EsDEd7JL4e<}a zzY8ujNPHWuwvXa39eI&8zN>cNTdlZNe>l3$B7DrDN4_iNT|4_$@_22mmF`|d-jUg( zT(JKDe_;7XxvXi_wcPq1gs|yaH2R$_q7T;nA00rd2!HB$(jNol=f0|O^1 z{{R6U1$s;kr$OP#W7Wh?&b-GbE`)K8G5KSP<*aqjhcao;r)zNDynr7g?98gGj=Qp_ z+ZD}*t3^yi?g9co5-Lr3$l>J3=AxDljxHil7_*bw?2 zJv&yet>F7jHrg`!?Uc6@{K6C?dix)n`d59AGPMM@XP?RQFb!Urqup=t(^#{7r-D0v zRmy+DFMl?j7Ncn_b4cd~1s^*7dH$8VKA(A@#%wMKIRNlIMroMg7bDctg2%`pLU!a2 z!xYP#hqISthb!yWv+N|7QoCCgm0)}xPfjVff9UcHc9VN&1&2?( z&fE{-UB81b^h<9L+1%@IEEaLUUnm{ibUbrhL9}QW?F}d(_TsJy*=!^GJ1da;v0GQ5 zGd*H383y0{4+R!}k0 ztjgXLySj_}7)-MC`H9*14wcObOvqHO;!ZGqtF!Pvn>MlHT`t*U+ZUO!^V1c0nessA z!bQ}cg#>LlTgs^xNa>SVI=7z|n=kfMZf<<6$K`-kTR(LAk~&wjc+bNA4br?km)46Z zx|aG_8;~RN(>q8Q>%i|`O{86Ekz5tlE##IbA0%-$IUHov(7kADq1TM%p)}Fze;o85 z*_%MU(jfhvHLYt~j(Z5?)z?^60t>^tdP2cLE7Kz<9CWVP7;#g%rzg19u1zx#= zS)$rw^6QwmXS0w+a&miCt{2$CH7s?@149$sM>fSIVTsN#FIQbf`EyT+zM;~xoZa;;39k}sH>bGdRxJ9|`7X^Pe@A_C{zr5@E8dnRTM@2AUzb5y!PrvbA%03|9l>Zt8k|744c=f%RQSM7r@6oRPJSq{(k8vY#*( z+w%qNqj0WFPGF$<4$LNA=N@C{*u}%m`}Qa#KZdoWxYIQ)(%Q&lOPgcm50v9{ZZNjU_+yY9{c317dx8RQj^Eg{w&ot<)NVMz|89{D*g^J+VYrLz;53 zxu@bk9C)+E)>?I^i8cGX_%z6j7He}ONVf6ul!za?r)c1M3ax9aT|r=Ag6UX#Z2ti3 zS0Zg>dltM)n8%iKBgZE^ew9MneMNGCzz5h?p2X(%=KbQ?vw4gIvF=4e+U4&l_J_s* z$0vX=2jxupBUQlM-=%gMr-Og9JOvkx#v?SPne|IgkQS2a9C6bauQUCpF79rABlzOw&hXm&MCa67^{;sF{{V&j zRi^l-K)BX)x!||DnP*On%?KZR1Kz$u_>E3YnI z(xul!4B8&fhrQSQj|?V2;zrJEuF+>-Dp=c)M@r?{eq0{M1L!(ctuk{Q+kJSirNvrJ zGvP2^>>4v=8$tTjY?{!%jaF@o(>0psVZg1aY++eyS*Vzt)XU1|tCO14#3?AEfC?z0 zfD;0k;0$72h4s{^djHQ#&&5EII5ueic)GUv6zC&BLMr^C-@9TY81VTz{oV6?b+Wu~>~FOuJJf9c$L5vEkOK zc4=DP*u!jNf0e78xSAt5V%&OFgtS=(26I`nq-DIc9E#fKJ%&s;%|@SgtTF}|H5Smn zD6Px`C$FV7iN<@1aK<^Pivo7`pa|;cmIq3&wLNMb{{Zb&&MrW0M-nW4VZk*Vv+eZHki~~yZd4OjF6)~jHCt}x{i}MkjQ$^En^B<4%lpGQ=}WOyzFL!p7;30)W;s$eAn{C<4hpNbk^#3{5*FT?{*zFXsAtU@QVx%pWd34v3XzHE<_@&_DN z&&CfD_?yPQEYy5m9+Rc`Q%ms`>rV}iik?QF401M1a73!0Draxq8OCrrSD4t^>srpT zbWJMU+)7t+#TeY>M{uK@R(7SUYi8OVF5*JTWL`L-eik#h?jFZ>c(03=U(YUOx%U(^ z1qxh}Iv*J6IyZy-M+LTnsQ&c}0h!eN*xE7I(-j|9RK4bF9AcJ@9Y2Vzv@2s}9)hwlmNoOUv|+{x z#!fN8tlND_O~udI+^JA7jo0+ZtVEDpIfCs(p#K01bQt=2SD^eh_)p_6iheP*@E5}S z71gzS)M+Dtxr7u<0!9HO41vI|n%uNts#Mr1P^l?tvBjIo9g*MeH%9GU1)cGV(S@YG zPX7S;Xa;L%PtiO&_wqyHX&+DfKIIVs(U**L^%ccSdI6CwourC3I3+Ri=jmKEYq`gj z>}^U466QzH+K9V|+!u|%<{C};kfuvkp$2`d- zi1V>T#7JD{j^JQd5e@5k;v6Vf<-2_<(T`5KoZCCdpo8}b9lH@(;pzK0D92-bMlH?z z!pJp9Qe7@kjPW^68-urw4@_2!a(JguusTSVSuD_ew)?QVhg4I?%)R?mw)R&s3kyA9 zn6&3`3m6z-ax!;goDOl`zUT0TxBL^2z}GtFjpH8__^-#7l2}D1o2Y44M^0$(n2$R! z-a>>7XB-~5=D715#&j+2+~~_P$XDjp);0OtD^lfrY zO43U^6}@2S#UJL60f+k4=oi$sG21ooUN!j1@DD~`5_n7C)`xYdXx=2VvbPs{>Z;#b zHLLlIV-X@C+({V#cC3#9{6_Fkhwl;#y($xDduXn7Kd1V~olWP%~IY+U4FdVIJ58uAa? zU%;OhJ_5_F{3-aQqUZwt{y5$ZLdB7#3K#xagx)jtWv&C_9lwJ9D|n_q6#Oo2E5UXW zSjQRBVZ4`O%LmKmhG~BLdUmf1*Sry_d2vkl+pdAbOC{q^kUbgMuq)+qyapmw`Ko?L z+h-Yu4Pp5yo-&I)#HzaoD$UOvEqd3(-8OqoZqHKra$_<^=v0&hXOKwdxP3Rnmm05% zth889n|G*B91wzbqj`r19>cyX@2?F0!=E0!OYvsU;4QA1q*?2R!fWdUzSL7sis}^u zEb_9mWp@A+1D-4Cv)sas82wIC)t?`by)FS6ZV(!CxdOgP2st2@1@o36G$O; z75&`KMsR@fz#058UleFMt<8{{RFFnoFg#}oj@idJt*?n+1$=SvtHeL>jQHp5r%_$O z-qvNEM#dnnht)4^&bG(_)Zmr?A9}f5(ENMoMUh}ti4-K)E_~y);@BKdBTjZ)~k4*#;fBi$h9k* zD}6YhDHbSGhR3ceqK{AUMWv<0b|!IZ%m_)NBWV0b6?m5_aQCzo8j+M+gmpOSA-0}* zzR@NZ%G|+I3`z~k8M_11>0d#7WcV|s_&ecG!=DtN4Ns;=sn|2zYF}z=i+Se4<~Wr9 z0IO8%@`3ld3iJDa3hMqX)h}8bAv6}2Nj=WRkybeQLh|h(GT~28O7u^Sb{-h;zkqx_ z;9m-O?(4<&`bqOX%L}^c+IW}%BZXX*EZZ@KV~h?573N~IMJ#0>W39LQk5>nRYX>zo zYo8t%?qQa4TVzbzeMh$fugs0zCz`8mbWwL6>YxGCS5^C1{>GjK{hak}GeXxcHQBGPEk4y_Jk~-u zZleAY&Ey3K9kM=^-3~CKv1<S%su+u5B-e$8?WUV8P&10(5P z$UJ(mX>)1bAh8;g!D=mST6fQpo}~s42d#O|q@EwLyJ)m{z`y>y_$UOA}|MQj#QqYV?0(OB+xQRkjFoC z0#D_N-M7EdtkK?821!^b&+vi$D)yi!hfMAvqtd8o5O2-}t5%sNs z6^!e_YszZXM`W5w9K_c%$#BUqZ=9W*`8#`lohl@l1X%7Dk8kN-kK?<&D(dzTYaCG& zjxc z<{39ce|V!idtmWaCWh%$OK}kVs*E|v7UJ*A5P6N|j@2x8w^Lks7LIp<1{;C+RUfn6 zlE5FhGJSv`e=-GhIxmN3)Z~A(Uq@-=4B^N^e>_oHQ(7Yx?K!-Qm+{`&Ahx`esbR`4 z2yg!YRZ_9Eyg7o9R*^*mQw0_ZlrH8lDms!e6`eC+g%CU$e3;)bf3F{ z{{YskpEoU$-wstf7Od>At|!g4!)^+y?CxY|+wS-TQm=_NQqYj*1d{ zooZI+2r-Q0dmM3JN~i1@`)|)KxoZ!>zZ7Z_ct-9^4PI*-V=`OZKmlL~+mIIwK2h?K z#cXRIu|MraVerFCxzs;r-xyoH+q{XZPh`Mdqhn@1em3^`M_S~TXM&%*3i12U;g@40 zC3-CR9O@cR+>TbnT&_oN!nyr-#2?xs3tL83+x^gUUYn?R3&s8t)h={T6X>&Pca|%O zt!|LTB6h=W`?cn`_L^nf1In|W+Tl+nTmfF53cWjBK4s)#iK9_ISuRtNH&>3^{5va61+BDiz2A+qNZG-0f%9$Z1$r;QKia?JH|)i&+-u(kZXmapRJS(s zt=+MmHn73j^T&hl`d80h0rB16gEa|m^vo=>Tm777EyF&eim@+>^*gskx>#ITos5fs z*>0Ryfh@99lTeM%sKe$BMM=#sk#EEr&aJHKcN)g0A)b5Lo@u1%^T@@Uj=&Rv?O5q? z_E-5pDtrF`hZV7|-*|FO8VgGlYkQ?F=L6FulaIhwHNEMJWC#FEdS<$C*?nqIaOJZ5 z&J12<8Prne=6$w0L#f`{BAq&A-bczAGxhZCTG4YP#~&!=cyZ_$ai6X$ukd$;JYVA}u9LvpCZn!h$G%pM(hsy4?Bh7|B~>3N z!5OZPz}J5ZpTfHQ-X<5bYEmM9WHclTBw)+G&5x=1at1r%p~?~61S!r@JfvI7vH6Bz zjkj@DJqO`Vig_9m_W1~%z%}W<7Sivo>~#q&H2AM1o;|AGSs@Rh^s1MW-bvxsxxBM@ znVW9t<33(N#xe#f{<&|2UsJZ{otZBaYZ(jf^sQ(vwYyF zn)c=~5m?n*%2th-`HUNmF~?f?XHD@Bi1a%BK zj|giwOLGN{%S6x6dCSyiq0ekrF3H+Jx`$RUvb<20Kq)JXiZngYw(*) zlfzd!lsXf5nWb%6MWi}=9iUAx2Nz%L9-g)D$^QTZ+4!ySXZAqWe`sHfe+~7&5L@`F z#u1|G+C1CswAhA!dwQjlYBA_QY>aVS&x&xg=CJ8%&qou(7>je%X#9h9L{?^BpFE#h zb+?*?C@mg;h<2{3>p}ZzNG)aEcL$x#{IeCEocawf@--Gma}JSj29-_}n)P@sZg5E2@Py&Pl%_ zb-a)qGL54c#b48}?XIVn59nGj#YDpvX8}_;IXNUe2{A#@LhlX|ed|To6vbVX@^?Q#j zJc%No+_I+w%NtHd(!Ng?nuPX~y`fZ%7jPkl2=o>1{{RnvX%7PUYTrh=)VvpMsXmdX zO=Jv{23e$Z*_K`0Z5=r6UN7R066&5NxVYA|Ed^}s>{or?l8o`d8zp?gI9@ttzCSL- z&8aHgJ0E9{!&0ZqZl=G0?Pl=Avv`-p`gSa>j=^&`-VNId0k2oK_^}ke5Y{X$F6LXy z&>)WxD38l0JDImUQLaf<41b)%wqab^O} zS+6GZ2nq`z&rUlGaqU=Q=t^}|n6P-I3S6-~a(I&Nd49^XGA9q59!GLB#(k@rHuKy* zRY?B;Wrhw1>0a&NPYHOR;;)J1(R?#zw--0Z_i;rWn~98P3aO0beuB5|{s#EMO)l$D zlJ3$YZp{?7_+^n<4hsCu!5HKbU6go&jT*keuX!92;p*6k`+NJR&C4SyDuApT^s9QP zn@F~e?O|A&Ro#Lz0IhvfNEQ~WAc0GX>eRrwJ3AmM1Bsj)Ld8Kb zi9g-m9fpVDR^_*3Wri>smUOX-)o-F>99-oGMaTHmR$Jfq>QL-n0qtKzcrW%t_`UHn z;Xb|br^8oTB)Uh1Ar}|WTwbgn-=`@e-|T^o40oaddt zjdXkbp3|Yjig8cbL*g`{yPiSzD^Hq5{t`eW4y0#{53N&s>r&>z@=%f&^MdbOkg615 zXSp0#pnN#-FT>vtYySY+{{RrQN7pRif>bBL~gw<GFdfeqqCLiumtG{{WFUTS3=s zEst;TuR|e^Co@Mc6;*B&pi6aZ9j+N7p9C!nARsG^Dhu|*V_ z6achQ$7(15sdtKsiU6`AGNx9uMJg!j#X2%nrZvIqf)cxN7A1T{v(%$FAM9^42%dQ zJ^gnd%N6VQH!EQHo4?)1ApI-h*j^X;WH|I;)O{<_yg%cs+go#{Uoo6?M}Oh310R9w z1L!b0jGmPbOz}>mdvhn0jsp8vow!w81^sJM^5ujZ^Sgg>+PSH3@^C8(7-Y9E6Nu*9 zVO^!*1IMRovaPm0M_Q|wbq>QBsq#Fv0=pv{92~t#5>11SwV66$_a|Drxkm)mSx7C# zGKeESXHGt~6r?1DPj5=ICJ5u@toPxopyv_f6<6lnnwi1vQQ+c$44R8;deoF2I@Kp) zBq}(CPtNMsn~eN+s3+$>RNer9!!d; zm%L#}6QIYoEA#VJ)qM49OC*vjZ2thOn{SvscWoqowfV+NnpLRvgI=fUJXe)b!^Kpm zG}n;@%zVq|&0)vw;g zQ?|3Te8kJ=e2U8woruLqB}~0WdUdWk)ZfPivFPC^8@f|=JIw=3)cjQ>t6?a=`#1<9 zftUEdgy#Tr?_SmMd-iGg;rlvY z+DF458}z*+Pw>aXeIvk9X|}0rdvT>q_eSKRfF0E!M2z>yCz|kI82J0f-xBpy@gIz} zd+lodAWImoF6J{hw}g_hhCeVsSm93< z{?0xt{j5AYquayaj|R#b{^I8PHM!WuK=R=VcV(S=u*H1Ur+AjfPl7w$25Bty7mUFi z@m!mE;xcwgBf}5Es95T{huG)RQu5|oDUSKRY=A<|58#Pz%9GWgqb@lE!yLjj{g#Eka0x9Xf+v7B>@4 zjjVkp6AM& zcxS`^00{J70$+GqGc4MspEs9#sjyaGEKY@AYZ5mrbtBTg0r0)Pi>Yc-#e1qprb4Oq zm5s-m(&P9sgNpl4_H+HPyglJh18W}zv~PlXCycdE7)bWE*V?3^ZlImx5uvwMf?k*m z03h5rC+f7(w=)aUq1;kygputwUY$-BHrU&;zuk|it2Mdt*LmGZxg zbiW*UHc2$^7+ak+;+(C;vy;9l69=63!wtBue$Dj#e^ff2rK`^t#3U7vhBzE^UZo$b zdpqImmu7jmj9m!SwOH%qdmq4WfL|2;G<-mV;XlHU58Ud$F}l2#8+|$>59i#iMLG53U zp9DN_<8Ox^CU1!U01doIz8=-ABF(+jCuHh>yd8PZ741I-Kj5anw%@`pj5=4v$-HN) zYx`TK>&`zzAHYVnBrZz(n$LJmomfAe6fcA0D=8ec)R`y zsrwZE!XLB!{{Y6Xj6M<9yk|W6N0H(jF_zLZhD7rqCul&;gR~s=HT-vcRJL0iONq4U zVAE|cwF#%wG|PDQ!3~*j+^<1~*^S*sdSF-FpR}+179ai!75id%f5e{+d~@QPEjPm2 z?bAHkZIz?`?%7`{Z{~*EvjAu1!8tgugFG|vhvSFG+l##;!uK)i2SK;CT8bm7g-*yKDt;{N~=>9#leZH@i9MRwSc zWlXp$%K2}eIRmv(_>=K_;^)L9ip$0N?arkgtUqRoDdgVD2V`&I73i$7o+j~Lt?;|TehY^G09nzY zih1LQQnoQhZiZJ9Za!|%I2hu-o_V%S3_`@ptWon2$@8CL@bRSD?2pZ1EPrY~Y;F0{ z0jFL_VkF0k`tRd!>}}&u*}unn-@@;Wm$uq(h;&f0ULr`cD#qP51todLE7<%6{{RG$ z{kOb-@UKDm`=a<(8&3pylI3mm`>jguD|?HEEwC_zm1Rth2XHfAL;byg_8#BbD;@cBSZ|_s%(Ir8)?BG*u^gBX! zyOeIhcoo?v8hXVXT&}H@nWt%{$vcLHe8O<01bHQoL6X3JmDhNW;NQh>1!(YipW+6A zd!uQ0ZmAW;*O#(TdTiM3jB(c;MRMZT%2u^v$IHV5@7Vqox#B;I9~As~q}ljG$G#@j zbzK`zk~tE}>QgjJ8Zz%#(DY)X<@7botJbS6Ey{QM8Wf?*_c^(|nI@7GH{5?KBe-5i z@~d!1XC=bNad6vf+BL5Zd>#0u@kd1PkL=T+>ee=zt6SOmT7$H+&8f?^50|=03ZOYV zMgi%L)%IWQEB^ol%l`m|RnhOYe~+Fso5PyAX)#{ux<$^O@ymU6A&>}F{GEV&sxg)8 z*1Wpe<{q6-XE$Tjp~Lm+!=9d}%wMw4!2bXSe$oE`5_KO3_|M{(g+3R-r)lprH~uiZ z7WUU{7!N8Vf%BfY_7(GQi*y@54|tbV@a~@n?(7dJll$H&g8)Eg?a@ly_WubgxFH4)awM zk2aNcD7bQ*!pPRqE4t+W04gKDx}Id(+yXMAjywL8x`cX-g!0*H7V<+avzTUzMcT;g z)Eci~Jm>RciwWO6SEtPt?}P`p=AYYkOTPJyiXs89+IU z$~%AY>{r}hvVZOE@R#9F?K7|Vi}srFFNz@WAh^4aNVf2e)X^xoE)SZ?Y@3@PH(o38 zcE&RXtq!8Y$nv>IM!{3+2=zf+ z!%|VhY25zDKV@I|E62dU3wZm+{sZ_!rg+Zs-^Gpnp4nAt=5l!6iotL+DK!SVvqpwh9APf07tcP{{U<&Jd9YDc^X!3?IWDCA98w{ z_`DZ^_$qaMuRFJ6=_TU)p-IJ3T;23O>HVgDEPNaB#=RcD@TT8Tg6dm!OUu1PGs|@< z<0P@$x4GzR>Q@%#F@?wRS0Y|F$ccVtJGyuErjsm#$=mo>>)C!uLkme$qJCSM zXEpJ)eWhQ(_RkJ}&_5Zx5AgHEp9?$>;)|_2!@e|$VApi@5D2fzs7CY4!tzJCR18Gp zr)u(|XY#zMoJ$;gR4E&NPzC_UU^pJtRy#Y!A@*fS$j?mHo9LGj4BUkXdjpEvtf|LE zq36-YO~2mgY3oasD%wRG`a=rG!?ua&j1 zcRO-;>4Q;!!a)tCv}+x_P0j%bHxbZ`{W?;kSsT@&rjw@hHGDU!=$;+%HKg7wPd>-G zr&|3G{hF@-0N|VcG5Fu9pW5fc{wTHZ1iCMg;oDCReW>0`s^{eUN7)CNjC9;K`q#%F z4mD4OR(=cd#;4=22I@W_)y7!#x*F z(Di?Ylj^!(h#-tfsLImEv+bcqJdd310Pg4q74x{dUecmzwQkxUQ-Oko7Nuu?==A>p zhJOma0r-Q(dWMnld&CLgn|(4iGQ)K0({AU>Mq|y0JHTS${{T(?PIht!0q&|N<2R}`}C{HGr#>|!Sixd!zR3s+Hcwa z0QL;{9q^a-%htbUFZd{5li~xTX+PR2H-!gqLX;pf~9@C*TVif_-*?z zf5Nu$$HcD$d`D|NS4{CN);=Bhe(fChHx?1hBU#?F6eLkMmd3FaA3a_5KYq#}%{{Rh*G5Mwjy)>xd zlwb9r&k)>dmlAK5IM5I7*DxfSMga3y?#oFoN%?A&vLsx`#eEmDN5)l?bGq2{F9`Ur z#{LPrZBM~EmCl=WJVxF|gBW;U1=X|LfmL+bH5+T7KFf1(RN*0sLz1JCc8-1PmecjQ zA(l0GQe@sTuG>MccJTM@1AXxO8i$1ZIcE;0g`$e?Oii_2N-)`soT(ssb?sh$CNYcH z_i)qZiubH|%=*@{t0&oRrL;*V!$dwtABB1cfbL$}=TGq;jjnE&!_do%o58o|E%O28 zj-PhA4}x0f?7^&fpTsx+0Jdq@r?H<;owRTGMP3BD8k3eFN+y;ef%1h1Big*%!Y!lT z>#3q@_Y*~7Gc>X88<6eJ&ExLw+t;>pir}g5!KlW^Tv~*al&*T;#GexBH-dZ54C&V= zMAnYmTb)YUNMn>Ic%&dJ@(%S4cC3ySdSBErT zhdwIR+UvtU6nPg+w1OWgAq>n($>F(X=ms;2@|kV)_+6s3m1PkC6O3`_N40h3apa5U zIkP!NQ(UG^)$mx&*}{eAo&`5gzc(0_6&M~t9D`f_67d!HhCE%T_?hf2ZvOzY=Z=4~ zT*j{iqBae&(ScG9F@h=HGVuG(Z(W#z+8j zUYED%k0z!SOIx1p@Q32pf_xQiuSer6ZBJ2?R<>B9{?EI7A$)Fyob%+2XCHRED}UOP z_LZOE6yFXd_<^rCgd=9Ok3xCkHtP{%u?Au9_iuMSANG^%duE^LQ1S}*tO!VBf}ma@g}dL_-|27 zUrw>VySKBxOl`ckhG$4_hB(CWg_8q;yNclNF8tVJf&=8`d4EO7&OQAq&C1(1Rar#%K{1owz!ygL#VbJ_{@n69S<82E_5+UxdD`hAnk*CN(7V#^4Xi)0R%&IT|lUMBTac|qZvDc_wE z{3`hO@r&SBj5PlMh~EeNLi#?vr%GNMh~thy8Wk$}NmZ4&JaJv8jsE~{58Bh>{km9u zM)AArR>FCu0&6>ac?&+`av_a4@7lg_xQuyTb=!=&AOsjvs(w?CQ(dRQclb4T@n1~0 z{h_WkdtU}Qf9)CcFDvcp0Qo$#F;okmzs`33-fPyamSN)g6+G$blPpi zI+cy3(yYc3^4rHEob6^#1dG>iVa;S)YKbH-r@UjzxLuA|WHWXIepTt-G5DY3-x+Fl za`@8c?MaSC?7A)?U-$J)FL@qf45a)Yuw;mvx{?6nIflG^43*cv;S zq~+@r?&A8uBb0BbZeE}DoqHQH%yO|#MtMYbnRTt?V0;s)E0LQ zA(mF2StZ;WNf_=f*&MMaB>L94Z6$7N#K#ok_2siH#|7o>(XbKuzGpw(s`J_XlJ#Jn zUfwQ}e;86Y0AtpjbvCPMFp=B_9mP*$u4)!rr?A?u#EjV9%UcCC)^RSd* z8S;_pIwy*>>m=51^j%v|vMv>8mcn>pZ2i(2MGAik+3^0Qt>`+HwcU(fZOx^?lrtyG zk8$*`fB2F6V*b<=)xBY7{{Gc#a|e04(eSf8Q*t{cGyai8_{}bMSLmxVjEmuOU?HsLBiU5Va^){!#rZ@u_pa|%q ziU260iU29e_o=kx`_ut76j4A0J$f2)fwqzO=}jOix8A1I_!MAzRr@d&p>MrOlSx^D zBy}Z1ooY#VJWvCus4{AL0J#_Xu~P-yrD}o_4h3k!!Ftke!$`-LFD3TWb+3B~kbI+> z#cYGHcBG_Vvn|vtpEl}Dnw5E8YK+8+Ue%<9m|;#UEr1;cDN)=W4QWlqZi$k84(m?8 zlpv1b{#xjkD|?^bN7Pr9d5`LAdrj4+w2%YHuRgU6ZjSmj{nVM;I?20W?MofIq4~Sh z>@_<(-MKrkK(u+gep)FoD!`UE z-}qFUcX3r4<@>x*8<4{$p}^5eBx=PM6tT@LwMb8n0W~_ztM^Vml@g7LOw#f$Wg8b5 z6c{8cwp@I^m7C{Doyx?1G{bW=eH*Pqb}k{X8N_uq&75NuMI@BWA0zOsD6OQA_s&ZL z*wpi#RN$n4|I_|C(;&R!`?sroqo-w_;bcS4AmslCV#YRP}}Oa3YQlmHdS;%v?va9fwW_{73a(0yU!F} z>Q~+cvY9L*mCAXpuJJ-x`AXZHCt_i9-xcxn@iKZ!7Wp4ti@_;ViiEj?!JZ6n8EUrr zHlHj4;_<{VERyU}$NjCR7z5I}Ti*#k!mk_N_(NK?wbXB{y`7rh z%jG4#slZ2zaAq71Pd$ef&+Vrw&J^`MTz(#%Ix42eDQ#(}X&RHxB91g-ND>0ueTO58 zv#em<{BC$R5?uy|%ago^*MK zn-pCR0Ix!R(fg5hJZfqd=jFMj@Z(ePoupdko#K58<5|Act^BPpS`iSopA1QSoG>7% z7#vq6rFehEI*i}h`W1xN_s~P;NgPT%!5SPfJs0K3?M@ynxYBfJ?QLSk#jKuXyrk_h zzeCeG;tw7{KIGbn8*KHmk5sttyVvf$Dz` zuB^OQ;_VZ|+ODT<;Y~Jo**>*3xs`3BZk zqDgakcPKXZ&Tx>f)frgv&mj6&pN+#}sp3@d()k|NRhZGMgmk4IRE~F4_@%E(#@FmN zFhnBqipL%@Jx_esBWL38700C9X!=X}cGo|BKnEZUbs(PAm1_2P7X;kFk?lmO^Rd$4 zJ6MH8{vv&VuU?iCDRW7lElfjimMG`sj`TP|8N&1n&{Q`MeBmJ~peIf%x3cg@kF^~@ zY1)2`s2ipkF-L7FY$6&S;>P`s&VWYh`(eRZ%3!icUxv9^Tc@>GoH4*HT%;~RdWy=%sP0@nOp;g9%RTE?Ymx@L+9;27LO z=LZMt&3M?1PPUea^`V4Zmx9#pbw3gO0FS~Neg1>txF1mSfSuWaYGBjfgSjeFgy^omBW``!D#X$MD}@Sv{Qp0NAe#(p!mAH=QMLxQlU7 zl?{`)fq_%}PyLlVRpGr#MUzc}!pq7m1h-JLF>mgOo&Nw_SIyS1LQ=Hlq4f} z3$w^PYvSLATBP>chk-P)V{(#VrEG2^(Dub}x@U=RG@Vpx7P7Uh`}e~&yqhG6(aQ%@ z)lbv4eHSn6gX7!lxI8nUXzh8bT_aw@2c-rW~*a9UNs$6^8YuF~7~CGmcj;nU*( z02FH)Cx>m`&|gO#sl6V!z>H^wn4BJXaAi{;?75XpX@7RmP9~Hg~>%SR( z6!@n|y|?h}Wv;aQwlLkc0YA9}1t8&kIBdcN&B z9%gYyy)SyCY;`^kpG18o^ZQoo%AHeeYugp?`W!z8ZEvQQF>|X4Y{RlEfw|Wh2aX4@ z#e9bZZr5ua$sTL*<@f&p3ii*2a(FLC*M-N6?jBgTY?e63%&G0$@~m?_Duww`*OX=) z>)FZL=cD{0^L%mfb5Hnl;kXkOSvYy*RIQi}q@~8t;t$9{B6V zXTnz=5SQ$8OAIrYyWWW^!Xt)wq&v}p`_1etH{oBzAJ~`S*T#(#;wOqA*WuDM89%fj zHZU1vlTd}pi6&NH78l`If^fa7^i}@=;IW?pbg%d+J@4(&Z}39aYYY7XJvL1`c0@0z zq1u{(0u})IDzOuc?HJAguYkqY%$jhaMcclX-~7Cf(l9k}7bDYE z>HFrl5tSCy{+c2s@uV6_Q^tHs$LY`QTdjO&@K5bu;xB}nEP9{AJx(bXPQKGMD}=gFwmeHI zb(`j2DrlJx05HcqoDZ0MQ~Nf2TJS%@8_$SKtmxMo1)a%;?I63%?RC572^W{h9GvbS zO7g26YsH>8)h_Pz+dJyv{e<-@O?EP#blS@ZqXP!C`*D^ayu?L$Ym@Z@&TX}@I* zU(Fp?2j#%(-|ZUT0hIk#5rjH)9xosDo62iW=M#6O*~{T|@dd`CZpi#BNW;v}`Sz}J zNr5#DNp#o&-ox)2t39;$lX(l|+oibPnM^6H{{Ro&rH-<*-8*b(Rl!^wZ{QBSMSW^c zbhJJms*GzWqtl~xZBBlBZb>_I$Nl~$t22AaroCHDng8Z zeJ)g8T2iYnT5-GVb9x_%hlh2mO?$&4TMbgrWU<`F@$qmoV zCgIbX_K$@gIfuZ$6}*eX(aUS%8E>uRwAEu_m?141$sW`EBnIu&*N9y!O)AGRKR15W z>YfVtE8-6nd^PbW#qBok($>SnY>-?$vf|?pw}>MCKfXEt01B*KI7W@`UR_#FwXLCl z;-;N*d*b`;Za8Gx`(ENj`INaFvmbm8o|WdyYjvqiEi}aq5W#$%zbNIg$51+Z*VMis z*Z%-#{{RyGw@mO(xqG5$i3|*oULaX!m4kVNg3NKk;=DRD;YhqmZ)tK(&XKrpcyYgI z=Qud$)0*=zwQ&^bC`u=(ndSIe^*y9!jBO)Ex>c30?Jg#WBeZ))Ki$dX4|?Zs?KFty zF))!&rfcfI6ntX%Lu>GRLGV|?pAX#Wz9_W0oBLx@o&3bVwJJnQw>VbL2?M7}`D*Ta zjYjm!(Qe0GmDhu*HvRQulNpDjMiErya}-;}vnjx<5pD7rf-lRC-KthxsVl(rs>|mj zP2~_j!o8JJJg0*tccG_WJ_yVRyie&a=fV{ zo}H`d4~L(#hr&OLUkq&gdvoy?$HSNQ+N#;=wpaUI?W~$LBJ_bIe;{*=5C#F-`X9}!m(KW5gasZuz66n&;5JjnGm@SlKmEnmbI-X?jqjXz1Zk!&>$F>!Bo zH_PP6Tlb2QINgQZdVTII&NMw8nkRd7NnRqml=MPZIR2HjqkM1pji&rR)jky3UB=oL zwDGm{4x62f84>Jlz##dKMi1d!Sk&Xww>(l9m47Y9}o_I5nnUONq1 zJoFbT?0O}hsjX>xW#@)$=MY)y1q|@?R@g}c>CIPzTeQ+NORWM;F5cm$O_9kWv3Sq- z)Pe@;;4XEWJ9$3Yb0^4{^BK4apIrX{O506yN7Zejy3%J7qkOxD4o{%PZS189Jq|jU zg=DTlWddoIPi$jFECyU>aX9PO6)u+r%xd%7Y%j+R-nmPghnjI2az;q}jcG^b=@*gQ zPF7W6@~AwFde*;HjF$S4v5716Js-pW0Pv7%x}~OtZ*K!!>NgPC!X|$%KQ`h;iZIWE zv~FR}57NDR_MQEh{yTogpA`?mKaVz6HhSw?N2mqU=WD12##Vz&Mimz`*qpfPUchWMfW=pGwxRp{rx)w*|f#`Zy4Gg~0 z)b690M{{~hxJ0Hi_eV<0notLs)E?BIWs>!wj@Rb%Ip6#v>;C}ish;^Xi?*}Gb7PIe zr}FJxcUQ5;%Y9JTbuTG)ltKvvamd9al0_;rFx$@HM;Is5=~)+-o@LNKD(BOsTx-CH zu%;cDs2J(j`Bu}SqPe7VMhbG$L)J^?X1IZT%nTouagqF74CbxFsOnJ&;Gr@JOdYtd=uh-5=pGt>E}jG!rEqGHOBv?2Lz*NU0qaHT~Q?-=f0rHWG`X|O87JNzJ4<235;Y&!gDfIc}vxCTW zW(ke2fV^#BGxe_nNTa-%m3531vZ>4DJwweybgH4xx!hd(Czg%{#+jy)#(8o}atTC?U zbt8{z^*zq#jc+s8z6pGF@jt*H7E9wV3Tajsw{zLOlSh7rNWwo?3(!B%0bXx5v#8#* z?>LKf;}(#Cj1&Fba4H#XJP{v+B9of=#sV{B;@~ zxW#P|ZsytSAA2@KbjP()iNNKbU^vMmBOg!YT2E@vWf75sRlOSi&q%$B`bZM%%3yq_ z`>l%XthGGONXl1i#<#d-l~Tx*628FRpIY6DZFbVn>GsLoO>KqH@CuhiMB<@B}n7joPL#GXhDVrTl>HLexH?gQ=?>gv&3p^Ur!u)#^bw= zwXbEY?zzZu4r21L?dm=2l@dD-EcErQs}@w5Wb#$<>s(lfBzhQJT6;6Ke-wR^GhsC9 zca5xFerET`$3IGkQt>7A-klDUdwBOYur~POWl)R8INAkfYFdO^&A_|3iB-qtIJY$~T zzSY`X=(Z9MHYJ&WAP}lM{S9mAS{H*P*CVmpwD3tz z_dLOk^n-oKXJi-{K%ob06~W`RJv_acFJ7JEZ7;#L`nA(|6=!`mEZq6kE3t@ z0BIkITDY`=Nn)KWAX|8nTX;gEra8(Yl1@9C^Nmx+o-6SM#M*`Tk-A<8Sin*AuRDAB zS3)GPn0g=Aw>O3QKdMzHKVK4x%er^*iXS5FvR#y!ao= zyGY`@@N9GG*Y*~d6FC4#31WyqCuszKD)Tq-4aTJ+wd7ImLDf?ql~#{ih9~)>4oTaV zCj&jYQmqE|HlxU+d+K)nEYxAt^&cxl3u|jELN#8h2>ZvLhoxp}t*H2a!MAQN65zqQ z)=%F~H~hY%{n1~K zo(XN5@+yBEaz~FzCbgazbZiJ2Lik^z{7uw ztRqv$?-c!fr^R~N9tu=wB_?^CO^Bb?yXFD8D|@S%>e+RNFxKaDL51g!qZiDoxqw+ z;-6O(t*I{k4J&Dhz$XNHR@@q_=t51Mqor~K%tzlHY_6p8qQx?d4@%^f4Nj_cbkyx_ z=UG?HiSv%eq@G}-Cb?(|sRjne*0=VDj2Q?&N}pi68grr{BX8SRv4M)BB4GQf@HrW- zCXHhj_88wN$E`!OElmBYq>>D66|8m>jkIJEq#p8Rnk-sw2~i0GDcd)AA~&*7c@j z)JYct;FyQm#wyWRn65@^CagqUgO1gV 1 else output_file + + with open(output, "wb") as f: + f.write(image_data) + + print(f" āœ“ Saved image {i + 1} to: {output} ({len(image_data)} bytes)") + else: + print(f" Image {i + 1} URL: {image.url}") + + print("\n" + "=" * 80) + print("āœ“ Image generation test completed successfully!") + print("=" * 80) + return True + + except Exception as e: + print(f"\nāŒ Error: {e}") + import traceback + + traceback.print_exc() + return False + + +if __name__ == "__main__": + # Parse command line arguments + base_url = sys.argv[1] if len(sys.argv) > 1 else "http://localhost:8000/v1" + + print("\n" + "=" * 80) + print("OpenAI SDK - Image Generation Tests") + print("=" * 80) + print(f"Base URL: {base_url}") + print() + + # Test image generation + test_image_generation(base_url=base_url) diff --git a/examples/visual_gen/serve/sync_video_gen.py b/examples/visual_gen/serve/sync_video_gen.py new file mode 100755 index 0000000000..2f349e47b6 --- /dev/null +++ b/examples/visual_gen/serve/sync_video_gen.py @@ -0,0 +1,224 @@ +#!/usr/bin/env python +"""Test script for synchronous video generation endpoint. + +Tests POST /v1/videos/generations endpoint which waits for completion and returns video data. +The video is generated synchronously and the response contains the video file. + +Supports two modes: + - Text-to-Video (T2V): Generate video from text prompt only + - Text+Image-to-Video (TI2V): Generate video from text prompt + reference image + +Examples: + # Text-to-Video (T2V) + python sync_video_gen.py --mode t2v --prompt "A cool cat on a motorcycle" + + # Text+Image-to-Video (TI2V) + python sync_video_gen.py --mode ti2v --prompt "She turns and smiles" --image ./media/woman.jpg +""" + +import argparse +import sys +from pathlib import Path + +import requests + + +def test_sync_video_generation( + base_url: str = "http://localhost:8000/v1", + model: str = "wan", + prompt: str = "A video of a cute cat playing with a ball in the park", + input_reference: str = None, + duration: float = 4.0, + fps: int = 24, + size: str = "256x256", + output_file: str = "output_sync.mp4", +): + """Test synchronous video generation with direct HTTP requests. + + Args: + base_url: Base URL of the API server + model: Model name to use + prompt: Text prompt for generation + input_reference: Path to reference image (optional, for TI2V mode) + duration: Video duration in seconds + fps: Frames per second + size: Video resolution (WxH format) + output_file: Output video file path + """ + mode = "TI2V" if input_reference else "T2V" + print("=" * 80) + print(f"Testing Sync Video Generation API - {mode} Mode") + print("=" * 80) + + print("\n1. Generating video (waiting for completion)...") + print(f" Mode: {mode}") + print(f" Prompt: {prompt}") + if input_reference: + print(f" Input Reference: {input_reference}") + print(f" Duration: {duration}s") + print(f" FPS: {fps}") + print(f" Size: {size}") + + try: + endpoint = f"{base_url}/videos/generations" + + if input_reference: + # TI2V mode - Use multipart/form-data with file upload + if not Path(input_reference).exists(): + print(f"\nāŒ Error: Input reference image not found: {input_reference}") + return False + + # Prepare form data (all values as strings for multipart) + form_data = { + "model": model, + "prompt": prompt, + "size": size, + "seconds": str(duration), + "fps": str(fps), + } + + # Add the file + ## Note: The content-type must be multipart/form-data. + files = { + "input_reference": ( + Path(input_reference).name, + open(input_reference, "rb"), + "multipart/form-data", + ) + } + + print("\n Uploading reference image and generating video...") + response_video = requests.post(endpoint, data=form_data, files=files) + else: + # T2V mode - Use JSON + response_video = requests.post( + endpoint, + json={ + "model": model, + "prompt": prompt, + "size": size, + "seconds": duration, + "fps": fps, + }, + ) + + print(f"\nStatus code: {response_video.status_code}") + + if response_video.status_code == 200: + with open(output_file, "wb") as f: + f.write(response_video.content) + print(f"āœ“ Video saved to: {output_file}") + + print("\n" + "=" * 80) + print("āœ“ Sync video generation test completed successfully!") + print("=" * 80) + return True + else: + print(f"\nāŒ Error: Server returned status {response_video.status_code}") + print(f"Response: {response_video.text}") + return False + + except Exception as e: + print(f"\nāŒ Error: {e}") + import traceback + + traceback.print_exc() + return False + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Test synchronous video generation API with T2V and TI2V modes", + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=""" +Examples: + # Text-to-Video (T2V) + python sync_video_gen.py --mode t2v --prompt "A cool cat on a motorcycle" + + # Text+Image-to-Video (TI2V) + python sync_video_gen.py --mode ti2v \\ + --prompt "She turns around and smiles, then slowly walks out of the frame" \\ + --image ./media/woman_skyline_original_720p.jpeg + + # Custom parameters + python sync_video_gen.py --mode t2v \\ + --prompt "A serene sunset over the ocean" \\ + --duration 5.0 --fps 30 --size 512x512 \\ + --output my_video.mp4 + """, + ) + + # Mode selection + parser.add_argument( + "--mode", + choices=["t2v", "ti2v"], + default="t2v", + help="Generation mode: t2v (Text-to-Video) or ti2v (Text+Image-to-Video)", + ) + + # Required parameters + parser.add_argument( + "--prompt", + type=str, + default="A video of a cute cat playing with a ball in the park", + help="Text prompt for video generation", + ) + + # TI2V mode parameters + parser.add_argument( + "--image", + "--input-reference", + type=str, + default=None, + help="Path to reference image (required for ti2v mode)", + ) + + # Optional parameters + parser.add_argument( + "--base-url", + type=str, + default="http://localhost:8000/v1", + help="Base URL of the API server", + ) + parser.add_argument("--model", type=str, default="wan", help="Model name to use") + parser.add_argument( + "--duration", "--seconds", type=float, default=4.0, help="Video duration in seconds" + ) + parser.add_argument("--fps", type=int, default=24, help="Frames per second") + parser.add_argument( + "--size", + type=str, + default="256x256", + help="Video resolution in WxH format (e.g., 1280x720)", + ) + parser.add_argument( + "--output", type=str, default="output_sync.mp4", help="Output video file path" + ) + + args = parser.parse_args() + + # Validate ti2v mode requirements + if args.mode == "ti2v" and not args.image: + parser.error("--image is required when using --mode ti2v") + + # Display configuration + print("\n" + "=" * 80) + print("Synchronous Video Generation Test") + print("=" * 80) + print(f"Base URL: {args.base_url}") + print(f"Mode: {args.mode.upper()}") + print() + + # Test sync video generation + success = test_sync_video_generation( + base_url=args.base_url, + model=args.model, + prompt=args.prompt, + input_reference=args.image, + duration=args.duration, + fps=args.fps, + size=args.size, + output_file=args.output, + ) + + sys.exit(0 if success else 1) diff --git a/examples/visual_gen/visual_gen_examples.sh b/examples/visual_gen/visual_gen_examples.sh new file mode 100755 index 0000000000..b769760203 --- /dev/null +++ b/examples/visual_gen/visual_gen_examples.sh @@ -0,0 +1,238 @@ +#!/bin/bash +# Visual Generation Examples - Test different models and configurations +# +# This script runs a comprehensive suite of visual generation examples including: +# - WAN T2V: Baseline, TeaCache, CFG parallelism, Ulysses parallelism, and combinations +# - WAN I2V: Baseline, TeaCache, CFG parallelism, Ulysses parallelism, and combinations +# +# The script automatically detects GPU count and runs appropriate examples: +# - 1 GPU: Single-GPU examples only +# - 2 GPUs: + CFG parallelism, Ulysses parallelism +# - 4 GPUs: + CFG + Ulysses combined +# - 8 GPUs: + Large-scale high-resolution examples +# +# Usage: +# export MODEL_ROOT=/path/to/models # required +# # Optional: PROJECT_ROOT auto-detected when run from examples/visual_gen +# cd examples/visual_gen && ./visual_gen_examples.sh +# +# Or inline: +# MODEL_ROOT=/llm-models ./visual_gen_examples.sh + +set -e # Exit on error + +# Environment variables with defaults +# PROJECT_ROOT: auto-detect repo root when run from examples/visual_gen +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +PROJECT_ROOT=${PROJECT_ROOT:-"$(cd "${SCRIPT_DIR}/../.." && pwd)"} +MODEL_ROOT=${MODEL_ROOT:-"/llm-models"} + +# Log configuration +export TLLM_LOG_LEVEL=${TLLM_LOG_LEVEL:-"INFO"} + +echo "============================================" +echo "Visual Generation Examples" +echo "============================================" +echo "PROJECT_ROOT: $PROJECT_ROOT" +echo "MODEL_ROOT: $MODEL_ROOT" +echo "LOG_LEVEL: $TLLM_LOG_LEVEL" +echo "============================================" +echo "" + + +# Detect GPU count +if command -v nvidia-smi &> /dev/null; then + GPU_COUNT=$(nvidia-smi --query-gpu=name --format=csv,noheader | wc -l) + echo "Detected $GPU_COUNT GPU(s)" + if [ "$GPU_COUNT" -lt 2 ]; then + echo "Note: Multi-GPU examples will be skipped" + SKIP_MULTI_GPU=1 + elif [ "$GPU_COUNT" -ge 8 ]; then + echo "Note: Will run all examples including 8-GPU configurations" + elif [ "$GPU_COUNT" -ge 4 ]; then + echo "Note: Will run examples up to 4-GPU configurations" + else + echo "Note: Will run 2-GPU examples only" + fi +else + echo "WARNING: nvidia-smi not found. Assuming single GPU." + GPU_COUNT=1 + SKIP_MULTI_GPU=1 +fi +echo "" + +############################################# +# WAN (Wan2.1) Text-to-Video Examples +############################################# +# Demonstrates: +# - Single GPU: Baseline and TeaCache +# - 2 GPUs: CFG only, Ulysses only +# - 4 GPUs: CFG + Ulysses combined +# - 8 GPUs: Large-scale parallelism +############################################# + +echo "=== WAN Example 1: Baseline (no optimization) ===" +python ${PROJECT_ROOT}/examples/visual_gen/visual_gen_wan_t2v.py \ + --height 480 \ + --width 832 \ + --num_frames 33 \ + --model_path ${MODEL_ROOT}/Wan2.1-T2V-1.3B-Diffusers/ \ + --prompt "A cute cat playing piano" \ + --output_path wan_cat_piano.png + +echo "" +echo "=== WAN Example 2: With TeaCache ===" +python ${PROJECT_ROOT}/examples/visual_gen/visual_gen_wan_t2v.py \ + --height 480 \ + --width 832 \ + --num_frames 33 \ + --model_path ${MODEL_ROOT}/Wan2.1-T2V-1.3B-Diffusers \ + --prompt "A cute cat playing piano" \ + --output_path wan_cat_piano_teacache.png \ + --enable_teacache + +if [ -z "$SKIP_MULTI_GPU" ]; then + echo "" + echo "=== WAN Example 3: CFG Only (2 GPUs) ===" + python ${PROJECT_ROOT}/examples/visual_gen/visual_gen_wan_t2v.py \ + --height 480 \ + --width 832 \ + --num_frames 33 \ + --model_path ${MODEL_ROOT}/Wan2.1-T2V-1.3B-Diffusers/ \ + --prompt "A cute cat playing piano" \ + --output_path wan_cfg_2gpu.mp4 \ + --attention_backend TRTLLM \ + --cfg_size 2 \ + --ulysses_size 1 +else + echo "" + echo "=== WAN Example 3: Skipped (requires 2 GPUs) ===" +fi + +if [ -z "$SKIP_MULTI_GPU" ]; then + echo "" + echo "=== WAN Example 4: Ulysses Only (2 GPUs) ===" + python ${PROJECT_ROOT}/examples/visual_gen/visual_gen_wan_t2v.py \ + --height 480 \ + --width 832 \ + --num_frames 33 \ + --model_path ${MODEL_ROOT}/Wan2.1-T2V-1.3B-Diffusers/ \ + --prompt "A cute cat playing piano" \ + --output_path wan_ulysses_2gpu.mp4 \ + --attention_backend TRTLLM \ + --cfg_size 1 \ + --ulysses_size 2 +else + echo "" + echo "=== WAN Example 4: Skipped (requires 2 GPUs) ===" +fi + +if [ "$GPU_COUNT" -ge 4 ]; then + echo "" + echo "=== WAN Example 5: CFG + Ulysses (4 GPUs) ===" + python ${PROJECT_ROOT}/examples/visual_gen/visual_gen_wan_t2v.py \ + --height 480 \ + --width 832 \ + --num_frames 33 \ + --model_path ${MODEL_ROOT}/Wan2.1-T2V-1.3B-Diffusers/ \ + --prompt "A cute cat playing piano" \ + --output_path wan_cfg_ulysses_4gpu.mp4 \ + --attention_backend TRTLLM \ + --cfg_size 2 \ + --ulysses_size 2 +else + echo "" + echo "=== WAN Example 5: Skipped (requires 4 GPUs) ===" +fi + +if [ "$GPU_COUNT" -ge 8 ]; then + echo "" + echo "=== WAN Example 6: Large-Scale (8 GPUs) ===" + python ${PROJECT_ROOT}/examples/visual_gen/visual_gen_wan_t2v.py \ + --height 480 \ + --width 832 \ + --num_frames 33 \ + --model_path ${MODEL_ROOT}/Wan2.1-T2V-1.3B-Diffusers/ \ + --prompt "A cute cat playing piano" \ + --output_path wan_cfg_ulysses_8gpu.mp4 \ + --attention_backend TRTLLM \ + --cfg_size 2 \ + --ulysses_size 4 +else + echo "" + echo "=== WAN Example 6: Skipped (requires 8 GPUs) ===" +fi + +############################################# +# WAN 2.2 (Two-Stage) Text-to-Video Examples +############################################# + +echo "" +echo "=== WAN 2.2 T2V Example: Two-stage with optimizations (FP8 + TRT-LLM + TeaCache) ===" +python ${PROJECT_ROOT}/examples/visual_gen/visual_gen_wan_t2v.py \ + --height 720 \ + --width 1280 \ + --num_frames 81 \ + --model_path ${MODEL_ROOT}/Wan2.2-T2V-A14B-Diffusers \ + --prompt "A cute cat playing piano" \ + --output_path wan22_t2v_cat_piano_optimized.gif \ + --linear_type trtllm-fp8-blockwise \ + --attention_backend TRTLLM \ + --enable_teacache \ + --teacache_thresh 0.2 \ + --guidance_scale 3.0 \ + --guidance_scale_2 2.5 \ + --boundary_ratio 0.85 + +############################################# +# WAN 2.1 Image-to-Video Examples +############################################# + +echo "" +echo "=== WAN 2.1 I2V Example: Single-stage with optimizations (FP8 + TRT-LLM + TeaCache) ===" +python ${PROJECT_ROOT}/examples/visual_gen/visual_gen_wan_i2v.py \ + --height 480 \ + --width 832 \ + --num_frames 33 \ + --model_path ${MODEL_ROOT}/Wan2.1-I2V-14B-480P-Diffusers \ + --image_path ${PROJECT_ROOT}/examples/visual_gen/cat_piano.png \ + --prompt "It snows as the cat plays piano, lots of snow \ + appearing all over the screen, snowflakes, blizzard, + gradually more snow" \ + --negative_prompt "blurry, low quality" \ + --output_path wan21_i2v_cat_piano_optimized.gif \ + --linear_type trtllm-fp8-per-tensor \ + --attention_backend TRTLLM \ + --enable_teacache \ + --teacache_thresh 0.2 \ + --guidance_scale 6.0 + +############################################# +# WAN 2.2 (Two-Stage) Image-to-Video Examples +############################################# + +echo "" +echo "=== WAN 2.2 I2V Example: Two-stage with optimizations (FP8 + TRT-LLM + TeaCache) ===" +python ${PROJECT_ROOT}/examples/visual_gen/visual_gen_wan_i2v.py \ + --height 480 \ + --width 832 \ + --num_frames 81 \ + --model_path ${MODEL_ROOT}/Wan2.2-I2V-A14B-Diffusers \ + --image_path ${PROJECT_ROOT}/examples/visual_gen/cat_piano.png \ + --prompt "It snows as the cat plays piano, lots of snow \ + appearing all over the screen, snowflakes, blizzard, + gradually more snow" \ + --negative_prompt "blurry, low quality" \ + --output_path wan22_i2v_cat_piano_optimized.gif \ + --linear_type trtllm-fp8-blockwise \ + --attention_backend TRTLLM \ + --enable_teacache \ + --teacache_thresh 0.2 \ + --guidance_scale 6.0 \ + --guidance_scale_2 5.0 \ + --boundary_ratio 0.85 + +echo "" +echo "============================================" +echo "All examples completed successfully!" +echo "============================================" diff --git a/examples/visual_gen/visual_gen_wan_i2v.py b/examples/visual_gen/visual_gen_wan_i2v.py new file mode 100644 index 0000000000..3b76470bb6 --- /dev/null +++ b/examples/visual_gen/visual_gen_wan_i2v.py @@ -0,0 +1,226 @@ +#!/usr/bin/env python3 +"""WAN Image-to-Video generation using TensorRT-LLM Visual Generation.""" + +import argparse +import time + +from output_handler import OutputHandler + +from tensorrt_llm import logger +from tensorrt_llm.llmapi.visual_gen import VisualGen, VisualGenParams + +# Set logger level to ensure timing logs are printed +logger.set_level("info") + + +def parse_args(): + parser = argparse.ArgumentParser( + description="TRTLLM VisualGen - Wan Image-to-Video Inference Example (supports Wan 2.1 and Wan 2.2)" + ) + + # Model & Input + parser.add_argument( + "--model_path", + type=str, + required=True, + help="Path to Wan I2V Diffusers model directory (2.1 or 2.2)", + ) + parser.add_argument( + "--image_path", + type=str, + required=True, + help="Path to input image for I2V conditioning", + ) + parser.add_argument( + "--last_image_path", + type=str, + default=None, + help="Optional path to last frame image for interpolation (Wan 2.1 only)", + ) + parser.add_argument("--prompt", type=str, required=True, help="Text prompt for generation") + parser.add_argument( + "--negative_prompt", + type=str, + default=None, + help="Negative prompt. Default is model-specific.", + ) + parser.add_argument( + "--output_path", + type=str, + default="output.png", + help="Path to save the output image/video frame", + ) + + # Generation Params + parser.add_argument("--height", type=int, default=720, help="Video height") + parser.add_argument("--width", type=int, default=1280, help="Video width") + parser.add_argument("--num_frames", type=int, default=81, help="Number of frames to generate") + parser.add_argument( + "--steps", + type=int, + default=None, + help="Number of denoising steps (default: auto-detect, 50 for Wan2.1, 40 for Wan2.2)", + ) + parser.add_argument( + "--guidance_scale", + type=float, + default=None, + help="Guidance scale (default: auto-detect, 5.0 for Wan2.1, 4.0 for Wan2.2)", + ) + parser.add_argument( + "--guidance_scale_2", + type=float, + default=None, + help="Second-stage guidance scale for Wan2.2 two-stage denoising (default: 3.0)", + ) + parser.add_argument( + "--boundary_ratio", + type=float, + default=None, + help="Custom boundary ratio for two-stage denoising (default: auto-detect)", + ) + parser.add_argument("--seed", type=int, default=42, help="Random seed") + + # TeaCache Arguments + parser.add_argument( + "--enable_teacache", action="store_true", help="Enable TeaCache acceleration" + ) + parser.add_argument( + "--teacache_thresh", + type=float, + default=0.2, + help="TeaCache similarity threshold (rel_l1_thresh)", + ) + + # Quantization + parser.add_argument( + "--linear_type", + type=str, + default="default", + choices=["default", "trtllm-fp8-per-tensor", "trtllm-fp8-blockwise", "svd-nvfp4"], + help="Linear layer quantization type", + ) + + # Attention Backend + parser.add_argument( + "--attention_backend", + type=str, + default="VANILLA", + choices=["VANILLA", "TRTLLM"], + help="Attention backend (VANILLA: PyTorch SDPA, TRTLLM: optimized kernels). " + "Note: TRTLLM automatically falls back to VANILLA for cross-attention.", + ) + + # Parallelism + parser.add_argument( + "--cfg_size", + type=int, + default=1, + choices=[1, 2], + help="CFG parallel size (1 or 2). Set to 2 for CFG Parallelism.", + ) + parser.add_argument( + "--ulysses_size", + type=int, + default=1, + help="Ulysses (sequence) parallel size within each CFG group.", + ) + + return parser.parse_args() + + +def main(): + args = parse_args() + + # world_size = cfg_size * ulysses_size + # Example: cfg_size=2, ulysses_size=4 -> 8 GPUs + # GPU 0-3: CFG group 0 (positive prompt), internal Ulysses parallel + # GPU 4-7: CFG group 1 (negative prompt), internal Ulysses parallel + n_workers = args.cfg_size * args.ulysses_size + + # Convert linear_type to quant_config + quant_config = None + if args.linear_type == "trtllm-fp8-per-tensor": + quant_config = {"quant_algo": "FP8", "dynamic": True} + elif args.linear_type == "trtllm-fp8-blockwise": + quant_config = {"quant_algo": "FP8_BLOCK_SCALES", "dynamic": True} + elif args.linear_type == "svd-nvfp4": + quant_config = {"quant_algo": "NVFP4", "dynamic": True} + + # 1. Setup Configuration + diffusion_config = { + "model_type": "wan2", + "attention": { + "backend": args.attention_backend, + }, + "teacache": { + "enable_teacache": args.enable_teacache, + "teacache_thresh": args.teacache_thresh, + }, + "parallel": { + "dit_cfg_size": args.cfg_size, + "dit_ulysses_size": args.ulysses_size, + }, + } + + # Add quant_config if specified + if quant_config is not None: + diffusion_config["quant_config"] = quant_config + + # 2. Initialize VisualGen + logger.info( + f"Initializing VisualGen: world_size={n_workers} " + f"(cfg_size={args.cfg_size}, ulysses_size={args.ulysses_size})" + ) + visual_gen = VisualGen( + model_path=args.model_path, + n_workers=n_workers, + diffusion_config=diffusion_config, + ) + + try: + # 2. Run Inference + logger.info(f"Generating video for prompt: '{args.prompt}'") + logger.info(f"Negative prompt: '{args.negative_prompt}'") + logger.info(f"Input image: {args.image_path}") + if args.last_image_path: + logger.info(f"Last frame image: {args.last_image_path}") + logger.info( + f"Resolution: {args.height}x{args.width}, Frames: {args.num_frames}, Steps: {args.steps}" + ) + + start_time = time.time() + + # Build parameters with explicit I2V and Wan 2.2 support + output = visual_gen.generate( + inputs={ + "prompt": args.prompt, + "negative_prompt": args.negative_prompt, + }, + params=VisualGenParams( + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + seed=args.seed, + num_frames=args.num_frames, + input_reference=args.image_path, + last_image=args.last_image_path if args.last_image_path else None, + guidance_scale_2=args.guidance_scale_2, + boundary_ratio=args.boundary_ratio, + ), + ) + + end_time = time.time() + logger.info(f"Generation completed in {end_time - start_time:.2f}s") + + # 3. Save Output + OutputHandler.save(output, args.output_path, frame_rate=16.0) + + finally: + # 4. Shutdown + visual_gen.shutdown() + + +if __name__ == "__main__": + main() diff --git a/examples/visual_gen/visual_gen_wan_t2v.py b/examples/visual_gen/visual_gen_wan_t2v.py new file mode 100755 index 0000000000..30c55e4a17 --- /dev/null +++ b/examples/visual_gen/visual_gen_wan_t2v.py @@ -0,0 +1,228 @@ +#!/usr/bin/env python3 +"""WAN Text-to-Video generation using TensorRT-LLM Visual Generation.""" + +import argparse +import time + +from output_handler import OutputHandler + +from tensorrt_llm import logger +from tensorrt_llm.llmapi.visual_gen import VisualGen, VisualGenParams + +# Set logger level to ensure timing logs are printed +logger.set_level("info") + + +def parse_args(): + parser = argparse.ArgumentParser( + description="TRTLLM VisualGen - Wan Text-to-Video Inference Example (supports Wan 2.1 and Wan 2.2)" + ) + + # Model & Input + parser.add_argument( + "--model_path", + type=str, + required=True, + help="Local path or HuggingFace Hub model ID (e.g., Wan-AI/Wan2.1-T2V-1.3B-Diffusers)", + ) + parser.add_argument( + "--revision", + type=str, + default=None, + help="HuggingFace Hub revision (branch, tag, or commit SHA)", + ) + parser.add_argument("--prompt", type=str, required=True, help="Text prompt for generation") + parser.add_argument( + "--negative_prompt", + type=str, + default=None, + help="Negative prompt. Default is model-specific.", + ) + parser.add_argument( + "--output_path", + type=str, + default="output.png", + help="Path to save the output image/video frame", + ) + + # Generation Params + parser.add_argument("--height", type=int, default=720, help="Video height") + parser.add_argument("--width", type=int, default=1280, help="Video width") + parser.add_argument("--num_frames", type=int, default=81, help="Number of frames to generate") + parser.add_argument( + "--steps", + type=int, + default=None, + help="Number of denoising steps (default: auto-detect, 50 for Wan2.1, 40 for Wan2.2)", + ) + parser.add_argument( + "--guidance_scale", + type=float, + default=None, + help="Guidance scale (default: auto-detect, 5.0 for Wan2.1, 4.0 for Wan2.2)", + ) + parser.add_argument( + "--guidance_scale_2", + type=float, + default=None, + help="Second-stage guidance scale for Wan2.2 two-stage denoising (default: 3.0)", + ) + parser.add_argument( + "--boundary_ratio", + type=float, + default=None, + help="Custom boundary ratio for two-stage denoising (default: auto-detect)", + ) + parser.add_argument("--seed", type=int, default=42, help="Random seed") + + # TeaCache Arguments + parser.add_argument( + "--enable_teacache", action="store_true", help="Enable TeaCache acceleration" + ) + parser.add_argument( + "--teacache_thresh", + type=float, + default=0.2, + help="TeaCache similarity threshold (rel_l1_thresh)", + ) + + # Quantization + parser.add_argument( + "--linear_type", + type=str, + default="default", + choices=["default", "trtllm-fp8-per-tensor", "trtllm-fp8-blockwise", "svd-nvfp4"], + help="Linear layer quantization type", + ) + + # Attention Backend + parser.add_argument( + "--attention_backend", + type=str, + default="VANILLA", + choices=["VANILLA", "TRTLLM"], + help="Attention backend (VANILLA: PyTorch SDPA, TRTLLM: optimized kernels). " + "Note: TRTLLM automatically falls back to VANILLA for cross-attention.", + ) + + # Parallelism + parser.add_argument( + "--cfg_size", + type=int, + default=1, + choices=[1, 2], + help="CFG parallel size (1 or 2). " + "Distributes positive/negative prompts across GPUs. " + "Example: cfg_size=2 on 4 GPUs -> 2 GPUs per prompt.", + ) + parser.add_argument( + "--ulysses_size", + type=int, + default=1, + help="Ulysses sequence parallel size within each CFG group. " + "Distributes sequence across GPUs for longer sequences. " + "Requirements: num_heads (12) and sequence length must both be divisible by ulysses_size. " + "Example: ulysses_size=2 on 4 GPUs with cfg_size=2 -> " + "2 CFG groups Ɨ 2 Ulysses ranks = 4 GPUs total.", + ) + + return parser.parse_args() + + +def main(): + args = parse_args() + + # Total workers: cfg_size Ɨ ulysses_size + # See ParallelConfig in config.py for detailed parallelism strategy and examples + n_workers = args.cfg_size * args.ulysses_size + + # Log Ulysses configuration (validation happens in setup_sequence_parallelism) + if args.ulysses_size > 1: + num_heads = 12 # WAN has 12 attention heads + logger.info( + f"Using Ulysses sequence parallelism: " + f"{num_heads} heads / {args.ulysses_size} ranks = " + f"{num_heads // args.ulysses_size} heads per GPU" + ) + + # Convert linear_type to quant_config + quant_config = None + if args.linear_type == "trtllm-fp8-per-tensor": + quant_config = {"quant_algo": "FP8", "dynamic": True} + elif args.linear_type == "trtllm-fp8-blockwise": + quant_config = {"quant_algo": "FP8_BLOCK_SCALES", "dynamic": True} + elif args.linear_type == "svd-nvfp4": + quant_config = {"quant_algo": "NVFP4", "dynamic": True} + + # 1. Setup Configuration + diffusion_config = { + "model_type": "wan2", + "revision": args.revision, + "attention": { + "backend": args.attention_backend, + }, + "teacache": { + "enable_teacache": args.enable_teacache, + "teacache_thresh": args.teacache_thresh, + }, + "parallel": { + "dit_cfg_size": args.cfg_size, + "dit_ulysses_size": args.ulysses_size, + }, + } + + # Add quant_config if specified + if quant_config is not None: + diffusion_config["quant_config"] = quant_config + + # 2. Initialize VisualGen + logger.info( + f"Initializing VisualGen: world_size={n_workers} " + f"(cfg_size={args.cfg_size}, ulysses_size={args.ulysses_size})" + ) + visual_gen = VisualGen( + model_path=args.model_path, + n_workers=n_workers, + diffusion_config=diffusion_config, + ) + + try: + # 2. Run Inference + logger.info(f"Generating video for prompt: '{args.prompt}'") + logger.info(f"Negative prompt: '{args.negative_prompt}'") + logger.info( + f"Resolution: {args.height}x{args.width}, Frames: {args.num_frames}, Steps: {args.steps}" + ) + + start_time = time.time() + + output = visual_gen.generate( + inputs={ + "prompt": args.prompt, + "negative_prompt": args.negative_prompt, + }, + params=VisualGenParams( + height=args.height, + width=args.width, + num_inference_steps=args.steps, + guidance_scale=args.guidance_scale, + seed=args.seed, + num_frames=args.num_frames, + guidance_scale_2=args.guidance_scale_2, + boundary_ratio=args.boundary_ratio, + ), + ) + + end_time = time.time() + logger.info(f"Generation completed in {end_time - start_time:.2f}s") + + # 3. Save Output + OutputHandler.save(output, args.output_path, frame_rate=16.0) + + finally: + # 4. Shutdown + visual_gen.shutdown() + + +if __name__ == "__main__": + main() diff --git a/requirements.txt b/requirements.txt index 0bada4a2d4..d2aa81843d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -83,3 +83,4 @@ llist cuda-tile>=1.0.1 nvidia-cuda-tileiras>=13.1 etcd-sdk-python==0.0.7 +python-multipart diff --git a/tensorrt_llm/_torch/distributed/__init__.py b/tensorrt_llm/_torch/distributed/__init__.py index b8bfe4ffdf..2dafa88bf1 100644 --- a/tensorrt_llm/_torch/distributed/__init__.py +++ b/tensorrt_llm/_torch/distributed/__init__.py @@ -4,10 +4,11 @@ from .communicator import Distributed, MPIDist, TorchDist from .moe_alltoall import MoeAlltoAll from .ops import (AllReduce, AllReduceParams, AllReduceStrategy, HelixAllToAllNative, MoEAllReduce, MoEAllReduceParams, - allgather, alltoall_helix, cp_allgather, reducescatter, - userbuffers_allreduce_finalize) + all_to_all_4d, allgather, alltoall_helix, cp_allgather, + reducescatter, userbuffers_allreduce_finalize) __all__ = [ + "all_to_all_4d", "allgather", "alltoall_helix", "cp_allgather", diff --git a/tensorrt_llm/_torch/distributed/ops.py b/tensorrt_llm/_torch/distributed/ops.py index 84468dc612..525a825a3f 100644 --- a/tensorrt_llm/_torch/distributed/ops.py +++ b/tensorrt_llm/_torch/distributed/ops.py @@ -959,3 +959,126 @@ class MoEAllReduce(nn.Module): nranks=self.mapping.tp_size, eps=all_reduce_params.eps, ) + + +def all_to_all_4d( + input: torch.Tensor, + scatter_dim: int, + gather_dim: int, + process_group: Optional[torch.distributed.ProcessGroup] = None, +) -> torch.Tensor: + """ + All-to-all for 4D tensors (batch, seq, heads, head_dim). + + Redistributes a 4D tensor along two dimensions using all-to-all communication. + This is used for Ulysses-style sequence parallelism to transform between: + - Sequence sharding [B, S/P, H, D] → Head sharding [B, S, H/P, D] + - Head sharding [B, S, H/P, D] → Sequence sharding [B, S/P, H, D] + + Args: + input: Input tensor with shape [batch, seq, heads, head_dim] + scatter_dim: Dimension to split and scatter (1 for seq, 2 for heads) + gather_dim: Dimension to gather (1 for seq, 2 for heads) + process_group: PyTorch distributed process group. If None, uses default process group. + + Returns: + Redistributed tensor with same shape as input + + Example: + # Transform from sequence sharding to head sharding + # Input: [B, S/P, H, D] (each rank has S/P sequence) + output = all_to_all_4d(input, scatter_dim=2, gather_dim=1, process_group=pg) + # Output: [B, S, H/P, D] (each rank has H/P heads) + + # Transform back from head sharding to sequence sharding + output = all_to_all_4d(input, scatter_dim=1, gather_dim=2, process_group=pg) + """ + # Only support PyTorch distributed mode (not MPI mode) + if not mpi_disabled(): + raise NotImplementedError( + "all_to_all_4d currently only supports PyTorch distributed mode. " + "MPI mode is not supported.") + + # Get world size from process group + world_size = torch.distributed.get_world_size(group=process_group) + + # If world_size is 1, no communication needed + if world_size == 1: + return input + + # Validate dimensions + assert scatter_dim in [1, 2], "scatter_dim must be 1 (seq) or 2 (heads)" + assert gather_dim in [1, 2], "gather_dim must be 1 (seq) or 2 (heads)" + assert scatter_dim != gather_dim, "scatter_dim and gather_dim must be different" + + batch, seq, heads, head_dim = input.shape + + # Validate that the scatter dimension is divisible by world_size + scatter_size = input.shape[scatter_dim] + assert scatter_size % world_size == 0, \ + f"Dimension {scatter_dim} size {scatter_size} must be divisible by world_size {world_size}" + + # For all-to-all, we need to: + # 1. Split input along scatter_dim into world_size chunks + # 2. Send chunk i to rank i + # 3. Receive chunk from each rank and concatenate along gather_dim + + # Reshape for all-to-all: move scatter_dim chunks to a new dimension + if scatter_dim == 1: # Scatter along seq dimension + # [B, S, H, D] -> [B, P, S/P, H, D] where P = world_size + input_reshaped = input.view(batch, world_size, seq // world_size, heads, + head_dim) + # Transpose to group by destination rank: [B, P, S/P, H, D] -> [P, B, S/P, H, D] + input_transposed = input_reshaped.permute(1, 0, 2, 3, 4).contiguous() + else: # scatter_dim == 2, scatter along heads dimension + # [B, S, H, D] -> [B, S, P, H/P, D] where P = world_size + input_reshaped = input.view(batch, seq, world_size, heads // world_size, + head_dim) + # Transpose to group by destination rank: [B, S, P, H/P, D] -> [P, B, S, H/P, D] + input_transposed = input_reshaped.permute(2, 0, 1, 3, 4).contiguous() + + # Flatten to [P * ...] for all-to-all communication + # Shape: [P, B, ...] -> [P * B * ...] + input_flat = input_transposed.flatten() + output_flat = torch.empty_like(input_flat) + + # Perform all-to-all communication using PyTorch distributed + # all_to_all_single splits input into world_size chunks and exchanges them + torch.distributed.all_to_all_single(output_flat, + input_flat, + group=process_group) + + # Reshape output back to [P, B, ...] form + output_transposed = output_flat.view_as(input_transposed) + + # Transpose back and reshape to final form + if gather_dim == 1: # Gather along seq dimension + # [P, B, S/P, H, D] -> [B, P, S/P, H, D] + output_reshaped = output_transposed.permute(1, 0, 2, 3, 4).contiguous() + # [B, P, S/P, H, D] -> [B, S, H, D] where S = P * (S/P) + # When scattering heads and gathering seq: seq needs to be multiplied, heads needs to be divided + if scatter_dim == 2: + # Scattered heads, so we have H/P heads and need to gather S/P -> S sequence + gathered_seq = seq * world_size + sharded_heads = heads // world_size + output = output_reshaped.view(batch, gathered_seq, sharded_heads, + head_dim) + else: + # Scattered seq (should be impossible if gather_dim == 1), keep as is + output = output_reshaped.view(batch, seq, heads, head_dim) + else: # gather_dim == 2, gather along heads dimension + # [P, B, S, H/P, D] -> [B, S, P, H/P, D] + output_reshaped = output_transposed.permute(1, 2, 0, 3, 4).contiguous() + # [B, S, P, H/P, D] -> [B, S, H, D] where H = P * (H/P) + # When scattering seq and gathering heads: heads needs to be multiplied, seq needs to be divided + if scatter_dim == 1: + # Scattered seq, so we have S/P seq and need to gather H/P -> H heads + gathered_heads = heads * world_size + sharded_seq = seq // world_size + output = output_reshaped.view(batch, sharded_seq, gathered_heads, + head_dim) + else: + # Scattered heads (should be impossible if gather_dim == 2), keep as is + output = output_reshaped.view(batch, seq, heads, head_dim) + + return output diff --git a/tensorrt_llm/_torch/modules/linear.py b/tensorrt_llm/_torch/modules/linear.py index 65811569ca..34581ff224 100644 --- a/tensorrt_llm/_torch/modules/linear.py +++ b/tensorrt_llm/_torch/modules/linear.py @@ -556,6 +556,13 @@ class FP8QDQLinearMethod(UnquantizedLinearMethod): def apply(self, module: Linear, input: torch.Tensor, bias: Optional[torch.Tensor]): + + # Handle multi-dimensional inputs (e.g., 3D: batch, seq, hidden) + # GEMM ops require 2D matrices + original_shape = input.shape + if input.dim() > 2: + input = input.reshape(-1, input.shape[-1]) + cur_input_scale = module.input_scale if input.dtype != torch.float8_e4m3fn: if module.input_scale is not None and not module.force_dynamic_quantization: @@ -591,6 +598,11 @@ class FP8QDQLinearMethod(UnquantizedLinearMethod): bias=None, out_dtype=module.dtype or input.dtype, ) + + # Reshape output back to original shape (with out_features as last dim) + if len(original_shape) > 2: + output = output.reshape(*original_shape[:-1], output.shape[-1]) + if bias is not None: output = output + bias return output @@ -975,6 +987,12 @@ class FP8BlockScalesLinearMethod(UnquantizedLinearMethod): def apply(self, module: Linear, input: torch.Tensor, bias: Optional[torch.Tensor]): + # Handle multi-dimensional inputs (e.g., 3D: batch, seq, hidden) + # GEMM ops require 2D matrices + original_shape = input.shape + if input.dim() > 2: + input = input.reshape(-1, input.shape[-1]) + if input.dtype == torch.float8_e4m3fn: input = input.to(torch.bfloat16) * module.input_scale assert input.dtype == torch.bfloat16 @@ -1003,6 +1021,10 @@ class FP8BlockScalesLinearMethod(UnquantizedLinearMethod): output = torch.ops.trtllm.fp8_block_scaling_gemm( act_input_fp8, module.weight, act_input_sf, module.weight_scale) + # Reshape output back to original shape (with out_features as last dim) + if len(original_shape) > 2: + output = output.reshape(*original_shape[:-1], output.shape[-1]) + if bias is not None: output = output + bias return output @@ -1212,6 +1234,15 @@ class NVFP4LinearMethod(LinearMethodBase): def apply(self, module: Linear, input: torch.Tensor, bias: Optional[torch.Tensor]): + # Handle multi-dimensional inputs (e.g., 3D: batch, seq, hidden). + # GEMM requires 2D. Only plain tensors support for now, skip for + # tuple and Fp4QuantizedTensor. + original_shape = None + if not isinstance(input, + (tuple, Fp4QuantizedTensor)) and input.dim() > 2: + original_shape = input.shape + input = input.reshape(-1, input.shape[-1]) + act_fp4, act_sf = self._input_prepare(module, input) # Use unified interface - supports CUTLASS, cuBLASLt, CuteDSL # Convert list to comma-separated string for torch.compile compatibility @@ -1229,6 +1260,9 @@ class NVFP4LinearMethod(LinearMethodBase): if output.shape[-1] > module.out_features: output = output[..., :module.out_features].contiguous() + if original_shape is not None: + output = output.reshape(*original_shape[:-1], output.shape[-1]) + if bias is not None: output = output + bias return output diff --git a/tensorrt_llm/_torch/visual_gen/__init__.py b/tensorrt_llm/_torch/visual_gen/__init__.py new file mode 100644 index 0000000000..c612522540 --- /dev/null +++ b/tensorrt_llm/_torch/visual_gen/__init__.py @@ -0,0 +1,45 @@ +"""Visual generation module for diffusion models.""" + +from tensorrt_llm._torch.visual_gen.executor import ( + DiffusionExecutor, + DiffusionRequest, + DiffusionResponse, +) +from tensorrt_llm._torch.visual_gen.output import MediaOutput + +# Checkpoint loading +from .checkpoints import WeightLoader +from .config import ( + AttentionConfig, + DiffusionArgs, + DiffusionModelConfig, + ParallelConfig, + PipelineComponent, + PipelineConfig, + TeaCacheConfig, + discover_pipeline_components, +) +from .models import AutoPipeline, BasePipeline, WanPipeline +from .pipeline_loader import PipelineLoader + +__all__ = [ + # Config classes + "DiffusionArgs", + "DiffusionModelConfig", + "ParallelConfig", + "PipelineComponent", + "TeaCacheConfig", + # Checkpoint loading + "WeightLoader", + # Model loading + "PipelineLoader", + # Execution + "DiffusionExecutor", + "DiffusionRequest", + "DiffusionResponse", + "MediaOutput", + # Pipelines + "AutoPipeline", + "BasePipeline", + "WanPipeline", +] diff --git a/tensorrt_llm/_torch/visual_gen/attention_backend/__init__.py b/tensorrt_llm/_torch/visual_gen/attention_backend/__init__.py new file mode 100644 index 0000000000..9cc0d3c272 --- /dev/null +++ b/tensorrt_llm/_torch/visual_gen/attention_backend/__init__.py @@ -0,0 +1,37 @@ +# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Visual Generation Attention Backend + +This module provides attention backend infrastructure for visual generation (diffusion) models. +It reuses existing TRT-LLM attention backends (TrtllmAttention, VanillaAttention) with +simplified metadata that doesn't require KV caching. +""" + +from .interface import AttentionTensorLayout +from .parallel import UlyssesAttention +from .trtllm import TrtllmAttention, TrtllmAttentionMetadata +from .utils import create_attention, get_visual_gen_attention_backend +from .vanilla import VanillaAttention + +__all__ = [ + "AttentionTensorLayout", + "get_visual_gen_attention_backend", + "create_attention", + "TrtllmAttention", + "TrtllmAttentionMetadata", + "UlyssesAttention", + "VanillaAttention", +] diff --git a/tensorrt_llm/_torch/visual_gen/attention_backend/interface.py b/tensorrt_llm/_torch/visual_gen/attention_backend/interface.py new file mode 100644 index 0000000000..a32c3712b4 --- /dev/null +++ b/tensorrt_llm/_torch/visual_gen/attention_backend/interface.py @@ -0,0 +1,33 @@ +# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Visual Generation Attention Backend Interface + +Defines shared types and enums for attention backends. +""" + +from enum import Enum + + +class AttentionTensorLayout(str, Enum): + """ + Tensor layout for attention backend input/output. + + Backends declare their preferred layout so the attention module + can reshape tensors optimally before calling the backend. + """ + + NHD = "NHD" # [B, S, H, D] - batch, seq, heads, dim + HND = "HND" # [B, H, S, D] - batch, heads, seq, dim diff --git a/tensorrt_llm/_torch/visual_gen/attention_backend/parallel.py b/tensorrt_llm/_torch/visual_gen/attention_backend/parallel.py new file mode 100644 index 0000000000..a7e466423f --- /dev/null +++ b/tensorrt_llm/_torch/visual_gen/attention_backend/parallel.py @@ -0,0 +1,162 @@ +# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Ulysses Sequence Parallelism Wrapper + +Wraps any attention backend with sequence parallelism via all-to-all +communication. Not a standalone backend — compose around a real backend +(VANILLA/TRTLLM). + +Architecture: + Input: [B, S/P, H, D] (sequence sharded across P processes) + Step 1: All-to-All → [B, S, H/P, D] (gather sequence, shard heads) + Step 2: Compute attention with wrapped backend (VANILLA or TRTLLM) + Step 3: All-to-All → [B, S/P, H, D] (restore sequence sharding) + Output: [B, S/P, H, D] (sequence sharded) +""" + +from typing import Optional + +import torch +import torch.nn as nn + +from tensorrt_llm._torch.distributed import all_to_all_4d + +from .interface import AttentionTensorLayout + + +class UlyssesAttention(nn.Module): + """ + Ulysses Sequence Parallelism wrapper. + + Wraps any attention backend with sequence parallelism via all-to-all. + Not a standalone backend — compose around a real backend (VANILLA/TRTLLM). + """ + + def __init__( + self, + inner_backend: nn.Module, + process_group: Optional[torch.distributed.ProcessGroup] = None, + ): + super().__init__() + self.inner_backend = inner_backend + self.process_group = process_group + self._preferred_layout = AttentionTensorLayout.NHD + + # Derive head info from inner backend + self.head_dim = inner_backend.head_dim + self.sharded_num_heads = inner_backend.num_heads + self.sharded_num_kv_heads = getattr(inner_backend, "num_kv_heads", self.sharded_num_heads) + + # Get world size from process group + try: + self.world_size = torch.distributed.get_world_size(group=process_group) + except (RuntimeError, ValueError): + self.world_size = 1 + + # Full (unsharded) head counts for external interface + self.num_heads = self.sharded_num_heads * self.world_size + self.num_kv_heads = self.sharded_num_kv_heads * self.world_size + + def forward( + self, + q: torch.Tensor, + k: torch.Tensor, + v: torch.Tensor, + batch_size: int, + attention_mask: Optional[torch.Tensor] = None, + **kwargs, + ) -> torch.Tensor: + """ + Forward pass with Ulysses sequence parallelism. + + Input/Output: [B, S/P, H, D] (sequence sharded) + + Args: + q: Query tensor [B, S/P, H, D] + k: Key tensor [B, S/P, H, D] + v: Value tensor [B, S/P, H, D] + batch_size: Batch size + attention_mask: Optional attention mask + + Returns: + Output tensor [B, S/P, H, D] (sequence sharded) + + Note: + seq_len is computed from tensor shape after all-to-all, not passed as parameter. + """ + # Step 1: All-to-All to gather full sequence, shard heads + # [B, S/P, H, D] -> [B, S, H/P, D] + if self.world_size > 1: + q = all_to_all_4d(q, scatter_dim=2, gather_dim=1, process_group=self.process_group) + k = all_to_all_4d(k, scatter_dim=2, gather_dim=1, process_group=self.process_group) + v = all_to_all_4d(v, scatter_dim=2, gather_dim=1, process_group=self.process_group) + + seq_len_full = q.shape[1] + inner_layout = self.inner_backend.preferred_layout + + # Step 2: Call wrapped backend for attention + # Transpose only if inner backend expects HND layout + if inner_layout == AttentionTensorLayout.HND: + # VANILLA expects [B, H/P, S, D] + q = q.transpose(1, 2) + k = k.transpose(1, 2) + v = v.transpose(1, 2) + # NHD backends (TRTLLM) keep [B, S, H/P, D] as-is + + inner_kwargs = dict( + q=q, + k=k, + v=v, + batch_size=batch_size, + seq_len=seq_len_full, + ) + if attention_mask is not None: + inner_kwargs["attention_mask"] = attention_mask + output = self.inner_backend.forward(**inner_kwargs) + + # Convert output back to [B, S, H/P, D] for the reverse all-to-all + if inner_layout == AttentionTensorLayout.HND: + # VANILLA returns [B, H/P, S, D] -> transpose to [B, S, H/P, D] + output = output.transpose(1, 2).contiguous() + else: + # TRTLLM returns [B, S, (H/P)*D] (3D) -> reshape to [B, S, H/P, D] + if output.dim() == 3: + output = output.view( + batch_size, seq_len_full, self.sharded_num_heads, self.head_dim + ) + output = output.contiguous() + + # Step 3: All-to-All to restore sequence sharding + # [B, S, H/P, D] -> [B, S/P, H, D] + if self.world_size > 1: + output = all_to_all_4d( + output, + scatter_dim=1, + gather_dim=2, + process_group=self.process_group, + ) + + return output + + @property + def preferred_layout(self) -> AttentionTensorLayout: + """Preferred tensor layout: [B, S, H, D]""" + return self._preferred_layout + + @classmethod + def support_fused_qkv(cls) -> bool: + """This backend does not support fused QKV.""" + return False diff --git a/tensorrt_llm/_torch/visual_gen/attention_backend/trtllm.py b/tensorrt_llm/_torch/visual_gen/attention_backend/trtllm.py new file mode 100644 index 0000000000..47b92ca27f --- /dev/null +++ b/tensorrt_llm/_torch/visual_gen/attention_backend/trtllm.py @@ -0,0 +1,244 @@ +# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Diffusion TRTLLM Attention Backend + +Wraps TrtllmAttention with simplified metadata for visual generation (diffusion) models. +Handles the specifics of no-KV-cache operation and fused QKV requirements. +""" + +from typing import Optional, Union + +import torch + +from tensorrt_llm.mapping import Mapping +from tensorrt_llm.models.modeling_utils import QuantConfig + +from ...attention_backend.interface import AttentionRuntimeFeatures, PredefinedAttentionMask +from ...attention_backend.trtllm import TrtllmAttention as BaseTrtllmAttention +from ...attention_backend.trtllm import TrtllmAttentionMetadata as BaseTrtllmAttentionMetadata +from .interface import AttentionTensorLayout + + +class TrtllmAttentionMetadata: + """ + Simplified metadata adapter for diffusion models using TRTLLM backend. + + Lazy initialization with auto-growing capacity: + - Metadata created only when capacity needs increase + - prepare() called only when seq_lens actually change + - Automatically reallocates when batch_size or seq_len exceeds current capacity + + Args: + max_batch_size: Initial batch size hint. Will grow automatically if exceeded. + max_seq_len: Initial sequence length hint. Will grow automatically if exceeded. + device: Target device for tensors. + """ + + def __init__( + self, + max_batch_size: int = 16, + max_seq_len: int = 4096, + device: Optional[torch.device] = None, + ): + # These are initial hints, not hard limits - capacity grows as needed + self.max_batch_size = max_batch_size + self.max_seq_len = max_seq_len + self.device = device or torch.device("cuda") + + # Lazily created BaseTrtllmAttentionMetadata + self._metadata: Optional[BaseTrtllmAttentionMetadata] = None + + # Track allocated capacity + self._allocated_batch_size = 0 + self._allocated_max_seq_len = 0 + + # Track prepared state + self._cached_seq_lens: Optional[torch.Tensor] = None + self._prepared = False + + def _needs_new_metadata(self, batch_size: int, max_seq_len: int) -> bool: + """Check if we need to create new metadata (capacity change).""" + return ( + self._metadata is None + or batch_size > self._allocated_batch_size + or max_seq_len > self._allocated_max_seq_len + ) + + def _needs_prepare(self, batch_size: int, seq_lens: torch.Tensor) -> bool: + """Check if we need to call prepare() (seq_lens changed).""" + if not self._prepared: + return True + if self._cached_seq_lens is None: + return True + if self._cached_seq_lens.shape[0] != batch_size: + return True + return not torch.equal(self._cached_seq_lens[:batch_size], seq_lens) + + def _create_metadata(self, batch_size: int, max_seq_len: int) -> None: + """Create new metadata with given capacity.""" + # Allocate with some headroom to avoid frequent reallocation + alloc_batch = max(batch_size, self._allocated_batch_size) + alloc_seq_len = max(max_seq_len, self._allocated_max_seq_len) + + self._metadata = BaseTrtllmAttentionMetadata( + max_num_requests=alloc_batch, + max_num_tokens=alloc_batch * alloc_seq_len, + max_num_sequences=alloc_batch, + kv_cache_manager=None, # No KV cache for diffusion + mapping=Mapping(), + runtime_features=AttentionRuntimeFeatures(), + ) + + self._allocated_batch_size = alloc_batch + self._allocated_max_seq_len = alloc_seq_len + self._prepared = False # Reset prepare state on new metadata + + def prepare( + self, + batch_size: int, + seq_lens: Union[int, torch.Tensor], + ) -> BaseTrtllmAttentionMetadata: + """ + Prepare metadata for a forward pass. + + Lazy behavior: + - Creates metadata only when capacity needs increase + - Calls prepare() only when seq_lens actually change + """ + if isinstance(seq_lens, int): + seq_lens_tensor = torch.full((batch_size,), seq_lens, dtype=torch.int32) + else: + seq_lens_tensor = seq_lens.to(dtype=torch.int32) + + max_seq_len = seq_lens_tensor.max().item() + + if self._needs_new_metadata(batch_size, max_seq_len): + self._create_metadata(batch_size, max_seq_len) + + if self._needs_prepare(batch_size, seq_lens_tensor): + self._metadata.seq_lens = seq_lens_tensor + self._metadata.num_contexts = batch_size + self._metadata.max_seq_len = max_seq_len + self._metadata.request_ids = list(range(batch_size)) + self._metadata.prepare() + + # Cache for next comparison + if self._cached_seq_lens is None or self._cached_seq_lens.shape[0] < batch_size: + self._cached_seq_lens = seq_lens_tensor.clone() + else: + self._cached_seq_lens[:batch_size].copy_(seq_lens_tensor) + self._prepared = True + + return self._metadata + + +class TrtllmAttention(BaseTrtllmAttention): + """ + TRTLLM Attention wrapper for diffusion models. + + Handles: + - Fused QKV requirement for TRTLLM kernel + - Metadata creation and preparation + - No KV cache operation + """ + + def __init__( + self, + layer_idx: int = 0, + num_heads: int = 8, + head_dim: int = 64, + num_kv_heads: Optional[int] = None, + quant_config: Optional[QuantConfig] = None, + dtype: Optional[torch.dtype] = None, + max_batch_size: int = 16, + max_seq_len: int = 4096, + ): + num_kv_heads = num_kv_heads or num_heads + + super().__init__( + layer_idx=layer_idx, + num_heads=num_heads, + num_kv_heads=num_kv_heads, + head_dim=head_dim, + quant_config=quant_config, + dtype=dtype, + ) + + # TRTLLM expects flat [B*S, H*D] format + self._preferred_layout = AttentionTensorLayout.NHD + + self.metadata = TrtllmAttentionMetadata( + max_batch_size=max_batch_size, + max_seq_len=max_seq_len, + ) + + def forward( + self, + q: torch.Tensor, + k: torch.Tensor, + v: torch.Tensor, + batch_size: int, + seq_len: int, + attention_mask: PredefinedAttentionMask = PredefinedAttentionMask.FULL, + seq_len_kv: Optional[int] = None, + **kwargs, + ) -> torch.Tensor: + """ + Forward pass with automatic metadata handling. + + For diffusion models, expects: + - Fused QKV: q contains [Q, K, V] concatenated, k and v are None + - OR separate Q, K, V which will be fused internally + + Args: + q: Query tensor [num_tokens, hidden] or fused QKV [num_tokens, qkv_hidden] + k: Key tensor [num_tokens, kv_hidden] or None if fused + v: Value tensor [num_tokens, kv_hidden] or None if fused + batch_size: Batch size (required if not inferable) + seq_len: Sequence length for Q (required if not inferable) + attention_mask: Attention mask type + seq_len_kv: Sequence length for K/V (for cross-attention, defaults to seq_len) + + Returns: + Output tensor [num_tokens, q_hidden] + """ + # Handle cross-attention where K/V have different sequence length than Q + kv_seq_len = seq_len_kv if seq_len_kv is not None else seq_len + + # Separate Q, K, V provided - fuse them + q = q.view(batch_size * seq_len, -1) + k = k.view(batch_size * kv_seq_len, -1) + v = v.view(batch_size * kv_seq_len, -1) + qkv = torch.cat([q, k, v], dim=-1) + prepared_metadata = self.metadata.prepare(batch_size, seq_len) + output = super().forward( + q=qkv, + k=None, + v=None, + metadata=prepared_metadata, + attention_mask=attention_mask, + ) + output = output.view(batch_size, seq_len, -1) + return output + + @property + def preferred_layout(self) -> AttentionTensorLayout: + """Return the preferred tensor layout for this backend.""" + return self._preferred_layout + + @classmethod + def support_fused_qkv(cls) -> bool: + return True diff --git a/tensorrt_llm/_torch/visual_gen/attention_backend/utils.py b/tensorrt_llm/_torch/visual_gen/attention_backend/utils.py new file mode 100644 index 0000000000..835e113c55 --- /dev/null +++ b/tensorrt_llm/_torch/visual_gen/attention_backend/utils.py @@ -0,0 +1,118 @@ +# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Visual Generation Attention Backend Utilities + +Factory functions for creating attention backends for visual generation models. +Uses diffusion-specific wrappers (TrtllmAttention, VanillaAttention) +that handle metadata preparation internally for simplified usage. +""" + +from typing import TYPE_CHECKING, Optional, Type, Union + +import torch + +from tensorrt_llm.models.modeling_utils import QuantConfig + +# Lazy imports to avoid circular dependency +if TYPE_CHECKING: + from .trtllm import TrtllmAttention + from .vanilla import VanillaAttention + + # Type alias for diffusion attention backends + DiffusionAttentionBackend = Union[TrtllmAttention, VanillaAttention] + + +def get_visual_gen_attention_backend( + backend_name: str, +) -> Type["DiffusionAttentionBackend"]: + """ + Get diffusion attention backend class by name. + + Args: + backend_name: Backend identifier ("VANILLA", "TRTLLM") + + Returns: + Diffusion attention backend class + + Backend Selection Guide: + - "VANILLA": Full support for cross-attention (different Q/KV seq lengths) + Uses torch SDPA backend + - "TRTLLM": Optimized for self-attention (requires same Q/KV seq lengths) + Better performance but requires fused QKV + """ + # Lazy imports to avoid circular dependency + from .trtllm import TrtllmAttention + from .vanilla import VanillaAttention + + backend_name = backend_name.upper() + + if backend_name == "VANILLA": + return VanillaAttention + elif backend_name == "TRTLLM": + return TrtllmAttention + else: + # Default to VANILLA for maximum compatibility + return VanillaAttention + + +def create_attention( + backend: str, + layer_idx: int, + num_heads: int, + head_dim: int, + num_kv_heads: Optional[int] = None, + quant_config: Optional[QuantConfig] = None, + dtype: Optional[torch.dtype] = None, + max_batch_size: int = 16, + max_seq_len: int = 4096, + **kwargs, +) -> "DiffusionAttentionBackend": + """ + Factory function to create attention backend instance for visual generation. + + Creates diffusion-specific attention backends that handle metadata preparation + internally, simplifying the forward() call. + + Args: + backend: Backend identifier ("VANILLA", "TRTLLM") + layer_idx: Layer index in the model + num_heads: Number of attention heads + head_dim: Dimension per head + num_kv_heads: Number of KV heads (for GQA/MQA, defaults to num_heads) + quant_config: Optional quantization configuration + dtype: Data type for the attention + max_batch_size: Initial batch size for metadata pre-allocation. The backend + will automatically reallocate if larger batches are encountered. + max_seq_len: Initial sequence length for metadata pre-allocation. The backend + will automatically reallocate if longer sequences are encountered. + **kwargs: Additional backend-specific arguments + + Returns: + Diffusion attention backend instance (TrtllmAttention or VanillaAttention) + """ + attn_cls = get_visual_gen_attention_backend(backend) + + return attn_cls( + layer_idx=layer_idx, + num_heads=num_heads, + head_dim=head_dim, + num_kv_heads=num_kv_heads, + quant_config=quant_config, + dtype=dtype, + max_batch_size=max_batch_size, + max_seq_len=max_seq_len, + **kwargs, + ) diff --git a/tensorrt_llm/_torch/visual_gen/attention_backend/vanilla.py b/tensorrt_llm/_torch/visual_gen/attention_backend/vanilla.py new file mode 100644 index 0000000000..d9eb41ca55 --- /dev/null +++ b/tensorrt_llm/_torch/visual_gen/attention_backend/vanilla.py @@ -0,0 +1,126 @@ +# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Diffusion Vanilla Attention Backend + +Simple attention implementation for visual generation (diffusion) models using +torch.nn.functional.scaled_dot_product_attention (SDPA). + +Supports both self-attention and cross-attention (different Q/KV sequence lengths). +No KV cache - full recompute each diffusion step. +""" + +import math +from typing import Optional + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from ...attention_backend.interface import PredefinedAttentionMask +from .interface import AttentionTensorLayout + + +class VanillaAttention(nn.Module): + """ + Vanilla Attention for diffusion models using torch SDPA. + + Uses torch.nn.functional.scaled_dot_product_attention which: + - Properly handles cross-attention (different Q/KV sequence lengths) + - Uses Flash Attention 2 when available (via SDPA backend selection) + - No KV cache needed for diffusion models + + This is simpler than the LLM VanillaAttention which has complex + KV cache handling and uses flash_attn_varlen_func. + """ + + def __init__( + self, + layer_idx: int = 0, + num_heads: int = 8, + head_dim: int = 64, + num_kv_heads: Optional[int] = None, + dtype: Optional[torch.dtype] = None, + **kwargs, + ): + super().__init__() + + self.layer_idx = layer_idx + self.num_heads = num_heads + self.head_dim = head_dim + self.num_kv_heads = num_kv_heads or num_heads + self.dtype = dtype + self.scale = 1.0 / math.sqrt(head_dim) + + # SDPA expects [B, H, S, D] format + self._preferred_layout = AttentionTensorLayout.HND + + def forward( + self, + q: torch.Tensor, + k: torch.Tensor, + v: torch.Tensor, + batch_size: int, + seq_len: int, + seq_len_kv: Optional[int] = None, + attention_mask: PredefinedAttentionMask = PredefinedAttentionMask.FULL, + **kwargs, + ) -> torch.Tensor: + """ + Forward pass using torch SDPA. + + Args: + q: Query tensor [num_tokens, num_heads * head_dim] + k: Key tensor [num_kv_tokens, num_kv_heads * head_dim] + v: Value tensor [num_kv_tokens, num_kv_heads * head_dim] + batch_size: Batch size + seq_len: Query sequence length + seq_len_kv: KV sequence length (for cross-attention) + attention_mask: Attention mask type (CAUSAL or FULL) + + Returns: + Output tensor [num_tokens, num_heads * head_dim] + """ + is_causal = attention_mask == PredefinedAttentionMask.CAUSAL + + # Validate tensor shapes - flexible for Ulysses head sharding + # Expected: [batch_size, num_heads, seq_len, head_dim] + # Note: num_heads may be sharded (num_heads // ulysses_size) when using Ulysses + assert ( + q.dim() == 4 + and q.shape[0] == batch_size + and q.shape[2] == seq_len + and q.shape[3] == self.head_dim + ), ( + f"Invalid q shape: expected [B={batch_size}, H, S={seq_len}, D={self.head_dim}], got {q.shape}" + ) + assert k.dim() == 4 and k.shape[0] == batch_size and k.shape[3] == self.head_dim, ( + f"Invalid k shape: expected [B={batch_size}, H_kv, S_kv, D={self.head_dim}], got {k.shape}" + ) + assert v.dim() == 4 and v.shape[0] == batch_size and v.shape[3] == self.head_dim, ( + f"Invalid v shape: expected [B={batch_size}, H_kv, S_kv, D={self.head_dim}], got {v.shape}" + ) + + # TODO: Maybe we need to enforce cuDNN backend here + return F.scaled_dot_product_attention(q, k, v, is_causal=is_causal, scale=self.scale) + + @property + def preferred_layout(self) -> AttentionTensorLayout: + """Return the preferred tensor layout for this backend.""" + return self._preferred_layout + + @classmethod + def support_fused_qkv(cls) -> bool: + return False diff --git a/tensorrt_llm/_torch/visual_gen/checkpoints/__init__.py b/tensorrt_llm/_torch/visual_gen/checkpoints/__init__.py new file mode 100644 index 0000000000..6d3b138f90 --- /dev/null +++ b/tensorrt_llm/_torch/visual_gen/checkpoints/__init__.py @@ -0,0 +1,7 @@ +"""Diffusion model checkpoint loading utilities.""" + +from .weight_loader import WeightLoader + +__all__ = [ + "WeightLoader", +] diff --git a/tensorrt_llm/_torch/visual_gen/checkpoints/weight_loader.py b/tensorrt_llm/_torch/visual_gen/checkpoints/weight_loader.py new file mode 100644 index 0000000000..77067fe9c9 --- /dev/null +++ b/tensorrt_llm/_torch/visual_gen/checkpoints/weight_loader.py @@ -0,0 +1,152 @@ +"""Weight loader for diffusion models.""" + +import json +from pathlib import Path +from typing import Any, Dict, List, Union + +import torch +import tqdm + +from tensorrt_llm._torch.models.checkpoints.base_weight_loader import BaseWeightLoader +from tensorrt_llm._torch.visual_gen.config import PipelineComponent +from tensorrt_llm.logger import logger +from tensorrt_llm.mapping import Mapping + + +class WeightLoader(BaseWeightLoader): + """ + Weight loader for diffusion models. + + Loads weights from safetensors/bin files, similar to HfWeightLoader + but simpler (no parallel loading optimization for now). + + Supports loading multiple components (e.g., transformer and transformer_2): + loader = WeightLoader(components=["transformer", "transformer_2"]) + weights = loader.load_weights(ckpt_dir, mapping) + # Returns: {"transformer": {...}, "transformer_2": {...}} + """ + + def __init__(self, components: Union[str, List[str]] = PipelineComponent.TRANSFORMER): + """ + Args: + components: Component(s) to load weights for. Can be: + - Single string: "transformer" (returns flat dict) + - List of strings: ["transformer", "transformer_2"] (returns nested dict) + """ + if isinstance(components, str): + self.components = [components] + self.single_component = True + else: + self.components = components + self.single_component = False + + def load_weights( + self, + checkpoint_dir: str, + mapping: Mapping, + **kwargs, + ) -> Dict[str, Any]: + """ + Load weights from checkpoint directory. + + Args: + checkpoint_dir: Path to checkpoint (pipeline root or component dir) + mapping: Distributed mapping (for future TP/PP support) + + Returns: + - If single component: Dict mapping weight names to tensors + - If multiple components: Dict mapping component names to weight dicts + Example: {"transformer": {...}, "transformer_2": {...}} + """ + checkpoint_path = Path(checkpoint_dir) + + # Check if this is a pipeline (has model_index.json) + model_index = checkpoint_path / "model_index.json" + is_pipeline = model_index.exists() + + # Load weights for each component + all_weights = {} + for component in self.components: + if is_pipeline: + # Pipeline format: load from component subdirectory + component_dir = checkpoint_path / component + if not component_dir.exists(): + raise ValueError(f"Component '{component}' not found in {checkpoint_dir}") + weight_dir = component_dir + else: + # Standalone model (only valid for single component) + if len(self.components) > 1: + raise ValueError( + f"Multiple components specified but {checkpoint_dir} is not a pipeline " + "(no model_index.json found)" + ) + weight_dir = checkpoint_path + + # Find weight files + weight_files = self._find_weight_files(weight_dir) + if not weight_files: + raise ValueError(f"No weight files found in {weight_dir}") + + # Load all weights with progress bar + component_weights = {} + desc = f"Loading {component}" if is_pipeline else "Loading checkpoint" + for wf in tqdm.tqdm(weight_files, desc=desc): + component_weights.update(self._load_file(wf)) + + all_weights[component] = component_weights + + # Return flat dict for single component (backward compatibility) + if self.single_component: + return all_weights[self.components[0]] + + # Return nested dict for multiple components + return all_weights + + def _find_weight_files(self, weight_dir) -> List[str]: + """Find safetensors or bin weight files. + + Handles: + - Single safetensors file + - Sharded safetensors with index.json + - PyTorch bin/pth files + """ + weight_dir = Path(weight_dir) + + # Check for sharded safetensors index + index_file = weight_dir / "diffusion_pytorch_model.safetensors.index.json" + if not index_file.exists(): + index_file = weight_dir / "model.safetensors.index.json" + + if index_file.exists(): + # Sharded safetensors: read index to get all shard files + with open(index_file) as f: + index = json.load(f) + shard_files = set(index.get("weight_map", {}).values()) + return sorted([str(weight_dir / f) for f in shard_files]) + + # Single safetensors file + files = list(weight_dir.glob("*.safetensors")) + if files: + # Filter out consolidated if multiple files exist + if len(files) > 1: + files = [f for f in files if "consolidated" not in f.name] + return sorted([str(f) for f in files]) + + # Fallback to bin + files = list(weight_dir.glob("*.bin")) + if files: + return sorted([str(f) for f in files]) + + # Fallback to pth + files = list(weight_dir.glob("*.pth")) + return sorted([str(f) for f in files]) + + def _load_file(self, filepath: str) -> Dict[str, Any]: + """Load weights from a single file.""" + logger.debug(f"Loading {filepath}") + if filepath.endswith(".safetensors"): + from safetensors.torch import load_file + + return load_file(filepath) + else: + return torch.load(filepath, map_location="cpu", weights_only=True) diff --git a/tensorrt_llm/_torch/visual_gen/config.py b/tensorrt_llm/_torch/visual_gen/config.py new file mode 100644 index 0000000000..c5666ae0b3 --- /dev/null +++ b/tensorrt_llm/_torch/visual_gen/config.py @@ -0,0 +1,565 @@ +import json +import os +from enum import Enum +from pathlib import Path +from types import SimpleNamespace +from typing import Any, Dict, List, Literal, Optional, Tuple + +import torch +from pydantic import BaseModel, ConfigDict, model_validator +from pydantic import Field as PydanticField + +from tensorrt_llm.functional import AllReduceStrategy +from tensorrt_llm.mapping import Mapping +from tensorrt_llm.models.modeling_utils import QuantConfig +from tensorrt_llm.quantization.mode import QuantAlgo + +# ============================================================================= +# Pipeline component identifiers +# ============================================================================= + + +class PipelineComponent(str, Enum): + """Identifiers for pipeline components that can be loaded or skipped. + + Inherits from str so values compare equal to plain strings, + e.g. ``PipelineComponent.VAE == "vae"`` is ``True``. + """ + + TRANSFORMER = "transformer" + VAE = "vae" + TEXT_ENCODER = "text_encoder" + TOKENIZER = "tokenizer" + SCHEDULER = "scheduler" + IMAGE_ENCODER = "image_encoder" + IMAGE_PROCESSOR = "image_processor" + + +# ============================================================================= +# Sub-configuration classes for DiffusionArgs +# ============================================================================= + + +class AttentionConfig(BaseModel): + """Configuration for Attention layers.""" + + backend: Literal["VANILLA", "TRTLLM"] = PydanticField( + "VANILLA", description="Attention backend: VANILLA (PyTorch SDPA), TRTLLM" + ) + + +class ParallelConfig(BaseModel): + """Configuration for distributed parallelism. + + Currently Supported: + - dit_cfg_size: CFG (Classifier-Free Guidance) parallelism + - dit_ulysses_size: Ulysses sequence parallelism + + Not Yet Supported: + - dit_tp_size: Tensor parallelism (not implemented) + - dit_ring_size: Ring attention (not implemented) + - dit_cp_size, dit_dp_size, dit_fsdp_size: Other parallelism types + + Total world_size = dit_cfg_size Ɨ dit_ulysses_size + + Parallelism Strategy: + - CFG Parallelism: Distributes positive/negative prompts across GPUs + - Ulysses Parallelism: Distributes sequence within each CFG group + + Example Configurations: + 1. cfg_size=1, ulysses_size=2 -> 2 GPUs (Ulysses only) + GPU 0-1: Single prompt, sequence parallelism across 2 GPUs + + 2. cfg_size=2, ulysses_size=1 -> 2 GPUs (CFG only) + GPU 0: Positive prompt + GPU 1: Negative prompt + + 3. cfg_size=2, ulysses_size=2 -> 4 GPUs (CFG + Ulysses) + GPU 0-1: CFG group 0 (positive), Ulysses parallel + GPU 2-3: CFG group 1 (negative), Ulysses parallel + + 4. cfg_size=2, ulysses_size=4 -> 8 GPUs (CFG + Ulysses) + GPU 0-3: CFG group 0 (positive), Ulysses parallel + GPU 4-7: CFG group 1 (negative), Ulysses parallel + """ + + disable_parallel_vae: bool = False + parallel_vae_split_dim: Literal["width", "height"] = "width" + + # DiT Parallelism + dit_dp_size: int = PydanticField(1, ge=1) + dit_tp_size: int = PydanticField(1, ge=1) # Not yet supported + dit_ulysses_size: int = PydanticField(1, ge=1) # Supported + dit_ring_size: int = PydanticField(1, ge=1) # Not yet supported + dit_cp_size: int = PydanticField(1, ge=1) + dit_cfg_size: int = PydanticField(1, ge=1) # Supported + dit_fsdp_size: int = PydanticField(1, ge=1) + + # Refiner Parallelism (Optional) + refiner_dit_dp_size: int = 1 + refiner_dit_tp_size: int = 1 + refiner_dit_ulysses_size: int = 1 + refiner_dit_ring_size: int = 1 + refiner_dit_cp_size: int = 1 + refiner_dit_cfg_size: int = 1 + refiner_dit_fsdp_size: int = 1 + + t5_fsdp_size: int = 1 + + def to_mapping(self) -> Mapping: + """Convert to TRT-LLM Mapping.""" + world_size = self.dit_tp_size * self.dit_cp_size + return Mapping( + world_size=world_size, + tp_size=self.dit_tp_size, + pp_size=1, + cp_size=self.dit_cp_size, + ) + + @model_validator(mode="after") + def validate_parallel_sizes(self) -> "ParallelConfig": + """Validate configuration against current environment.""" + if torch.cuda.is_available(): + world_size = int(os.environ.get("WORLD_SIZE", 1)) + total_parallel = ( + self.dit_tp_size + * self.dit_ulysses_size + * self.dit_ring_size + * self.dit_cp_size + * self.dit_dp_size + * self.dit_cfg_size + ) + if total_parallel > world_size: + raise ValueError( + f"Total DiT parallel size ({total_parallel}) exceeds WORLD_SIZE ({world_size})" + ) + return self + + +class TeaCacheConfig(BaseModel): + """Configuration for TeaCache runtime optimization. + + TeaCache speeds up diffusion by caching transformer outputs when timestep + embeddings change slowly. It monitors embedding distances and reuses cached + residuals when changes are below a threshold. + + Attributes: + enable_teacache: Enable TeaCache optimization + teacache_thresh: Distance threshold for cache decisions (lower = more caching) + use_ret_steps: Use aggressive warmup mode (5 steps) vs minimal (1 step) + coefficients: Polynomial coefficients for rescaling embedding distances + Applied as: rescaled_distance = poly(raw_distance) + ret_steps: Number of warmup steps (always compute, initialized at runtime) + cutoff_steps: Step to stop caching (always compute after, initialized at runtime) + num_steps: Total inference steps (set at runtime) + _cnt: Internal step counter (reset per generation) + """ + + enable_teacache: bool = False + teacache_thresh: float = PydanticField(0.2, gt=0.0) + use_ret_steps: bool = True + + coefficients: List[float] = PydanticField(default_factory=lambda: [1.0, 0.0]) + + # Runtime state fields (initialized by TeaCacheBackend.refresh) + ret_steps: Optional[int] = None + cutoff_steps: Optional[int] = None + num_steps: Optional[int] = None + + # State tracking (reset per generation) + _cnt: int = 0 + + model_config = ConfigDict(arbitrary_types_allowed=True) + + @model_validator(mode="after") + def validate_teacache(self) -> "TeaCacheConfig": + """Validate TeaCache configuration.""" + # Validate coefficients + if len(self.coefficients) == 0: + raise ValueError("TeaCache coefficients list cannot be empty") + + # Validate ret_steps if set + if self.ret_steps is not None and self.ret_steps < 0: + raise ValueError(f"ret_steps must be non-negative, got {self.ret_steps}") + + # Validate cutoff_steps vs num_steps if both set + if self.cutoff_steps is not None and self.num_steps is not None: + if self.cutoff_steps > self.num_steps: + raise ValueError( + f"cutoff_steps ({self.cutoff_steps}) cannot exceed num_steps ({self.num_steps})" + ) + + return self + + +class PipelineConfig(BaseModel): + """General pipeline configuration.""" + + enable_torch_compile: bool = True + torch_compile_models: str = PipelineComponent.TRANSFORMER + torch_compile_mode: str = "default" + fuse_qkv: bool = True + + # Offloading Config + enable_offloading: bool = False + offload_device: Literal["cpu", "cuda"] = "cpu" + offload_param_pin_memory: bool = True + + +# ============================================================================= +# DiffusionArgs - User-facing configuration (CLI / YAML) +# ============================================================================= + + +class DiffusionArgs(BaseModel): + """User-facing configuration for diffusion model loading and inference. + + This is the main config class used in CLI args and YAML config files. + PipelineLoader converts this to DiffusionModelConfig internally. + + Example: + args = DiffusionArgs( + checkpoint_path="/path/to/model", + quant_config={"quant_algo": "FP8_BLOCK_SCALES", "dynamic": True}, + parallel=ParallelConfig(dit_tp_size=2), + ) + loader = PipelineLoader() + pipeline = loader.load(args) + """ + + model_config = ConfigDict(arbitrary_types_allowed=True) + + # Required: Path to checkpoint or HuggingFace Hub model ID + checkpoint_path: str = PydanticField( + "", + description=( + "Local directory path or HuggingFace Hub model ID " + "(e.g., 'Wan-AI/Wan2.1-T2V-1.3B-Diffusers'). " + "Hub models are downloaded and cached automatically." + ), + ) + + # HuggingFace Hub options + revision: Optional[str] = PydanticField( + None, + description="HuggingFace Hub revision (branch, tag, or commit SHA) to download.", + ) + + # Device/dtype options + device: str = "cuda" + dtype: str = "bfloat16" + + # Component loading options (use PipelineComponent enum values or plain strings) + skip_components: List[PipelineComponent] = PydanticField( + default_factory=list, + description=( + "Components to skip loading. " + "Accepts PipelineComponent enum values or equivalent strings " + "(e.g., [PipelineComponent.TEXT_ENCODER, PipelineComponent.VAE])" + ), + ) + + # Sub-configs (dict input for quant_config is coerced to QuantConfig in model_validator) + quant_config: QuantConfig = PydanticField(default_factory=QuantConfig) + pipeline: PipelineConfig = PydanticField(default_factory=PipelineConfig) + attention: AttentionConfig = PydanticField(default_factory=AttentionConfig) + parallel: ParallelConfig = PydanticField(default_factory=ParallelConfig) + teacache: TeaCacheConfig = PydanticField(default_factory=TeaCacheConfig) + + # Set by model_validator when quant_config is provided as a dict (ModelOpt format) + dynamic_weight_quant: bool = False + force_dynamic_quantization: bool = False + + @model_validator(mode="before") + @classmethod + def _parse_quant_config_dict(cls, data: Any) -> Any: + """Parse user-facing DiffusionArgs.quant_config (dict or None) into QuantConfig and dynamic flags. + + User input is ModelOpt-format dict (e.g. {"quant_algo": "FP8", "dynamic": True}). + We coerce it to QuantConfig + dynamic_weight_quant + force_dynamic_quantization so that + from_pretrained() can copy them into DiffusionModelConfig (internal) without parsing again. + """ + if not isinstance(data, dict): + return data + raw = data.get("quant_config") + if raw is None: + data = {**data, "quant_config": QuantConfig()} + return data + if not isinstance(raw, dict): + return data + qc, _, dwq, daq = DiffusionModelConfig.load_diffusion_quant_config(raw) + data = { + **data, + "quant_config": qc, + "dynamic_weight_quant": dwq, + "force_dynamic_quantization": daq, + } + return data + + def to_mapping(self) -> Mapping: + """Derive Mapping from ParallelConfig.""" + return self.parallel.to_mapping() + + def to_dict(self) -> Dict[str, Any]: + """Convert to dictionary.""" + return self.model_dump() + + @classmethod + def from_dict(cls, config_dict: Dict[str, Any]) -> "DiffusionArgs": + """Create from dictionary with automatic nested config parsing. + + Pydantic automatically handles nested configs, but we keep this method + for backward compatibility and to filter unknown fields. + """ + # Get valid field names for DiffusionArgs + valid_fields = set(cls.model_fields.keys()) + + # Filter to only include valid fields (ignore unknown fields) + filtered_dict = {k: v for k, v in config_dict.items() if k in valid_fields} + + # Pydantic automatically converts nested dicts to their respective config classes + return cls(**filtered_dict) + + +# ============================================================================= +# Utilities +# ============================================================================= + + +def discover_pipeline_components(checkpoint_path: Path) -> Dict[str, Path]: + """ + Discover components from diffusers pipeline's model_index.json. + + Returns dict mapping component name to config.json path. + """ + model_index_path = checkpoint_path / "model_index.json" + if not model_index_path.exists(): + return {} + + with open(model_index_path) as f: + model_index = json.load(f) + + components = {} + for key, value in model_index.items(): + if key.startswith("_") or value is None: + continue + config_path = checkpoint_path / key / "config.json" + if config_path.exists(): + components[key] = config_path + + return components + + +# ============================================================================= +# DiffusionModelConfig - Internal configuration (merged/parsed) +# ============================================================================= + + +class DiffusionModelConfig(BaseModel): + """Internal ModelConfig for diffusion models. + + This is created by PipelineLoader from DiffusionArgs + checkpoint. + Contains merged/parsed config from: + - pretrained_config: From checkpoint/config.json + - quant_config: From checkpoint or user quant config + - Sub-configs: From DiffusionArgs (pipeline, attention, parallel, teacache) + """ + + model_config = ConfigDict(arbitrary_types_allowed=True) + + pretrained_config: Optional[Any] = None + mapping: Mapping = PydanticField(default_factory=Mapping) + skip_create_weights_in_init: bool = False + force_dynamic_quantization: bool = False + allreduce_strategy: AllReduceStrategy = PydanticField(default=AllReduceStrategy.AUTO) + extra_attrs: Dict = PydanticField(default_factory=dict) + + # Distributed process groups + ulysses_process_group: Optional[torch.distributed.ProcessGroup] = None + + dynamic_weight_quant: bool = False + + # Sub-configs from DiffusionArgs (merged during from_pretrained) + quant_config: QuantConfig = PydanticField(default_factory=QuantConfig) + # Per-layer quant (from load_diffusion_quant_config layer_quant_config; None until mixed-precision parsing exists) + quant_config_dict: Optional[Dict[str, QuantConfig]] = None + pipeline: PipelineConfig = PydanticField(default_factory=PipelineConfig) + attention: AttentionConfig = PydanticField(default_factory=AttentionConfig) + parallel: ParallelConfig = PydanticField(default_factory=ParallelConfig) + teacache: TeaCacheConfig = PydanticField(default_factory=TeaCacheConfig) + + @property + def torch_dtype(self) -> "torch.dtype": + """Get the torch dtype of the model (default: bfloat16).""" + return torch.bfloat16 + + def get_quant_config(self, name: Optional[str] = None) -> QuantConfig: + """Get quantization config for a layer or global. Resembles LLM ModelConfig.get_quant_config.""" + if name is None or self.quant_config_dict is None: + return self.quant_config + if name in self.quant_config_dict: + return self.quant_config_dict[name] + return self.quant_config + + @staticmethod + def load_diffusion_quant_config( + quant_config_dict: dict, + ) -> Tuple[QuantConfig, Optional[Dict], bool, bool]: + """ + Parse quantization config in ModelOpt format. + + Returns: (quant_config, layer_quant_config, dynamic_weight_quant, dynamic_activation_quant) + - quant_config: Global QuantConfig + - layer_quant_config: Per-layer config dict (None if not using mixed precision) + - dynamic_weight_quant: Whether to quantize weights at load time + - dynamic_activation_quant: Whether to quantize activations dynamically + """ + quant_algo_str = quant_config_dict.get("quant_algo") + quant_algo = None + if quant_algo_str: + algo_map = { + "FP8": QuantAlgo.FP8, + "FP8_BLOCK_SCALES": QuantAlgo.FP8_BLOCK_SCALES, + "NVFP4": QuantAlgo.NVFP4, + "W4A16_AWQ": QuantAlgo.W4A16_AWQ, + "W4A8_AWQ": QuantAlgo.W4A8_AWQ, + "W8A8_SQ_PER_CHANNEL": QuantAlgo.W8A8_SQ_PER_CHANNEL, + } + quant_algo = algo_map.get(quant_algo_str) + if quant_algo is None: + raise ValueError(f"Unknown quant_algo: {quant_algo_str}") + + # Parse group_size and dynamic flags from config_groups + group_size = None + dynamic_weight_quant = False + dynamic_activation_quant = False + for group_config in quant_config_dict.get("config_groups", {}).values(): + weights_config = group_config.get("weights", {}) + activations_config = group_config.get("input_activations", {}) + dynamic_weight_quant = weights_config.get("dynamic", False) + dynamic_activation_quant = activations_config.get("dynamic", False) + # Extract group_size from weights config (e.g., NVFP4: group_size=16) + if group_size is None: + group_size = weights_config.get("group_size") + break + + # Set defaults based on quant_algo if group_size not specified + if group_size is None: + if quant_algo in (QuantAlgo.NVFP4,): + group_size = 16 # NVFP4 default + elif quant_algo == QuantAlgo.FP8_BLOCK_SCALES: + group_size = 128 # FP8 blockwise default + + # Auto-enable dynamic weight quantization if quant_algo is specified + # but no explicit config_groups setting is present. + # This allows simple configs like {"quant_algo": "FP8"} to work. + if quant_algo is not None and not quant_config_dict.get("config_groups"): + dynamic_weight_quant = quant_config_dict.get("dynamic", True) + + quant_config = QuantConfig( + quant_algo=quant_algo, + group_size=group_size, + exclude_modules=quant_config_dict.get("ignore"), + ) + + # TODO: Per-layer config (None for now - future: parse mixed precision settings) + layer_quant_config = None + + return quant_config, layer_quant_config, dynamic_weight_quant, dynamic_activation_quant + + @classmethod + def from_pretrained( + cls, + checkpoint_dir: str, + args: Optional["DiffusionArgs"] = None, + **kwargs, + ) -> "DiffusionModelConfig": + """ + Load config from pretrained checkpoint. + + Called by PipelineLoader with DiffusionArgs: + config = DiffusionModelConfig.from_pretrained( + checkpoint_dir=args.checkpoint_path, + args=args, + ) + + Args: + checkpoint_dir: Path to checkpoint + args: DiffusionArgs containing user config (quant, pipeline, attention, parallel, teacache) + **kwargs: Additional config options (e.g., mapping) + """ + kwargs.pop("trust_remote_code", None) + + # Extract sub-configs from args or use defaults + pipeline_cfg = args.pipeline if args else PipelineConfig() + attention_cfg = args.attention if args else AttentionConfig() + parallel_cfg = args.parallel if args else ParallelConfig() + teacache_cfg = args.teacache if args else TeaCacheConfig() + + component = PipelineComponent.TRANSFORMER + checkpoint_path = Path(checkpoint_dir) + + # Discover pipeline components + components = discover_pipeline_components(checkpoint_path) + + # Determine config path + if components: + if component not in components: + raise ValueError( + f"Component '{component}' not found. Available: {list(components.keys())}" + ) + config_path = components[component] + else: + config_path = checkpoint_path / "config.json" + + if not config_path.exists(): + raise ValueError(f"Config not found at {config_path}") + + # Load pretrained_config from checkpoint + with open(config_path) as f: + config_dict = json.load(f) + pretrained_config = SimpleNamespace(**config_dict) + + model_index_path = checkpoint_path / "model_index.json" + if model_index_path.exists(): + with open(model_index_path) as f: + model_index = json.load(f) + if "boundary_ratio" in model_index and "transformer_2" in model_index: + transformer_2_spec = model_index.get("transformer_2") + if transformer_2_spec and transformer_2_spec[0] is not None: + pretrained_config.boundary_ratio = model_index["boundary_ratio"] + + # Resolve quant config: use args if user set quant (QuantConfig from dict), else checkpoint + if args and args.quant_config.quant_algo is not None: + quant_config = args.quant_config + quant_config_dict = ( + None # DiffusionArgs has no per-layer dict; only from checkpoint parse + ) + dynamic_weight_quant = args.dynamic_weight_quant + dynamic_activation_quant = args.force_dynamic_quantization + else: + quant_config = QuantConfig() + quant_config_dict = None + dynamic_weight_quant = False + dynamic_activation_quant = False + quant_dict = getattr(pretrained_config, "quantization_config", None) + if isinstance(quant_dict, dict): + quant_config, quant_config_dict, dynamic_weight_quant, dynamic_activation_quant = ( + cls.load_diffusion_quant_config(quant_dict) + ) + + return cls( + pretrained_config=pretrained_config, + quant_config=quant_config, + quant_config_dict=quant_config_dict, + dynamic_weight_quant=dynamic_weight_quant, + force_dynamic_quantization=dynamic_activation_quant, + # Sub-configs from DiffusionArgs + pipeline=pipeline_cfg, + attention=attention_cfg, + parallel=parallel_cfg, + teacache=teacache_cfg, + # Delay weight creation after apply_quant_config_exclude_modules() in __post_init__ + skip_create_weights_in_init=True, + **kwargs, + ) diff --git a/tensorrt_llm/_torch/visual_gen/executor.py b/tensorrt_llm/_torch/visual_gen/executor.py new file mode 100644 index 0000000000..d6e03bdcfe --- /dev/null +++ b/tensorrt_llm/_torch/visual_gen/executor.py @@ -0,0 +1,246 @@ +import os +import queue +import threading +import traceback +from dataclasses import dataclass +from typing import List, Optional, Union + +import torch +import torch.distributed as dist +import zmq + +from tensorrt_llm._torch.visual_gen.config import DiffusionArgs +from tensorrt_llm._torch.visual_gen.output import MediaOutput +from tensorrt_llm._torch.visual_gen.pipeline_loader import PipelineLoader +from tensorrt_llm.executor.ipc import ZeroMqQueue +from tensorrt_llm.logger import logger + + +@dataclass +class DiffusionRequest: + """Request for diffusion inference with explicit model-specific parameters.""" + + request_id: int + prompt: str + negative_prompt: Optional[str] = None + height: int = 720 + width: int = 1280 + num_inference_steps: int = 50 + guidance_scale: float = 5.0 + max_sequence_length: int = 512 + seed: int = 42 + + # Video-specific parameters + num_frames: int = 81 + frame_rate: float = 24.0 + + # Image-specific parameters + num_images_per_prompt: int = 1 + + # Advanced parameters + guidance_rescale: float = 0.0 + output_type: str = "pt" + + # Wan-specific parameters + image: Optional[Union[str, List[str]]] = None + guidance_scale_2: Optional[float] = None + boundary_ratio: Optional[float] = None + last_image: Optional[Union[str, List[str]]] = None + + +@dataclass +class DiffusionResponse: + """Response with model-specific output. + + Attributes: + request_id: Unique identifier for the request + output: Generated media as MediaOutput with model-specific fields populated + error_msg: Error message if generation failed + """ + + request_id: int + output: Optional[MediaOutput] = None + error_msg: Optional[str] = None + + +class DiffusionExecutor: + """Execution engine for diffusion models running in worker processes.""" + + def __init__( + self, + model_path: str, + request_queue_addr: str, + response_queue_addr: str, + device_id: int, + diffusion_config: Optional[dict] = None, + ): + self.model_path = model_path + self.request_queue_addr = request_queue_addr + self.response_queue_addr = response_queue_addr + self.device_id = device_id + self.diffusion_config = diffusion_config + + self.requests_ipc = None + self.rank = dist.get_rank() + self.response_queue = queue.Queue() + self.sender_thread = None + + # Only rank 0 handles IPC + if self.rank == 0: + logger.info(f"Worker {device_id}: Connecting to request queue") + self.requests_ipc = ZeroMqQueue( + (request_queue_addr, None), + is_server=False, + socket_type=zmq.PULL, + use_hmac_encryption=False, + ) + self.sender_thread = threading.Thread(target=self._sender_loop, daemon=True) + self.sender_thread.start() + + self._load_pipeline() + + def _sender_loop(self): + """Background thread for sending responses.""" + logger.info(f"Worker {self.device_id}: Connecting to response queue") + responses_ipc = ZeroMqQueue( + (self.response_queue_addr, None), + is_server=False, + socket_type=zmq.PUSH, + use_hmac_encryption=False, + ) + + while True: + try: + resp = self.response_queue.get() + if resp is None: + break + responses_ipc.put(resp) + except Exception as e: + logger.error(f"Worker {self.device_id}: Sender error: {e}") + + if responses_ipc.socket: + responses_ipc.socket.setsockopt(zmq.LINGER, 0) + responses_ipc.close() + + def _load_pipeline(self): + """ + Load pipeline using proper flow: + DiffusionArgs → PipelineLoader → DiffusionModelConfig → AutoPipeline → BasePipeline + """ + logger.info(f"Worker {self.device_id}: Loading pipeline") + + try: + # Convert diffusion_config dict to DiffusionArgs + config_dict = self.diffusion_config.copy() + config_dict["checkpoint_path"] = self.model_path + config_dict["device"] = f"cuda:{self.device_id}" + + # Create DiffusionArgs from dict (handles nested configs) + args = DiffusionArgs.from_dict(config_dict) + + # Use PipelineLoader for proper pipeline creation flow: + # PipelineLoader.load() internally: + # 1. Creates DiffusionModelConfig.from_pretrained() + # 2. Creates pipeline via AutoPipeline.from_config() + # 3. Loads weights with quantization support + # 4. Calls post_load_weights() + loader = PipelineLoader(args) + self.pipeline = loader.load() + + except Exception as e: + logger.error(f"Worker {self.device_id}: Failed to load pipeline: {e}") + raise + + logger.info(f"Worker {self.device_id}: Pipeline ready") + + # Sync all workers + dist.barrier() + + # Send READY signal + if self.rank == 0: + logger.info(f"Worker {self.device_id}: Sending READY") + self.response_queue.put(DiffusionResponse(request_id=-1, output="READY")) + + def serve_forever(self): + """Main execution loop.""" + while True: + req = None + if self.rank == 0: + req = self.requests_ipc.get() + logger.info(f"Worker {self.device_id}: Request available") + + # Broadcast to all ranks + obj_list = [req] + dist.broadcast_object_list(obj_list, src=0) + req = obj_list[0] + + if req is None: + logger.info(f"Worker {self.device_id}: Shutdown signal received") + if self.rank == 0 and self.sender_thread: + self.response_queue.put(None) + self.sender_thread.join() + break + + logger.info(f"Worker {self.device_id}: Processing request {req.request_id}") + self.process_request(req) + + def process_request(self, req: DiffusionRequest): + """Process a single request.""" + try: + output = self.pipeline.infer(req) + if self.rank == 0: + self.response_queue.put(DiffusionResponse(request_id=req.request_id, output=output)) + except Exception as e: + logger.error(f"Worker {self.device_id}: Error: {e}") + logger.error(traceback.format_exc()) + if self.rank == 0: + self.response_queue.put( + DiffusionResponse(request_id=req.request_id, error_msg=str(e)) + ) + + +def run_diffusion_worker( + rank: int, + world_size: int, + master_addr: str, + master_port: int, + model_path: str, + request_queue_addr: str, + response_queue_addr: str, + diffusion_config: Optional[dict] = None, +): + """Entry point for worker process.""" + try: + # Setup distributed env — use PyTorch distributed, not MPI + os.environ["TLLM_DISABLE_MPI"] = "1" + os.environ["MASTER_ADDR"] = master_addr + os.environ["MASTER_PORT"] = str(master_port) + os.environ["RANK"] = str(rank) + os.environ["WORLD_SIZE"] = str(world_size) + + # Calculate device_id before init_process_group + device_id = rank % torch.cuda.device_count() if torch.cuda.is_available() else 0 + if torch.cuda.is_available(): + torch.cuda.set_device(device_id) + + dist.init_process_group( + backend="nccl" if torch.cuda.is_available() else "gloo", + init_method="env://", + world_size=world_size, + rank=rank, + device_id=torch.device(f"cuda:{device_id}") if torch.cuda.is_available() else None, + ) + + executor = DiffusionExecutor( + model_path=model_path, + request_queue_addr=request_queue_addr, + response_queue_addr=response_queue_addr, + device_id=device_id, + diffusion_config=diffusion_config, + ) + executor.serve_forever() + dist.destroy_process_group() + + except Exception as e: + logger.error(f"Worker failed: {e}") + traceback.print_exc() diff --git a/tensorrt_llm/_torch/visual_gen/models/__init__.py b/tensorrt_llm/_torch/visual_gen/models/__init__.py new file mode 100644 index 0000000000..5f726b84ec --- /dev/null +++ b/tensorrt_llm/_torch/visual_gen/models/__init__.py @@ -0,0 +1,30 @@ +""" +Visual generation model pipelines. + +Each model subdirectory contains: +- pipeline_*.py: Main pipeline implementation inheriting from BasePipeline +- __init__.py: Exports the pipeline class + +TeaCache extractors are registered inline in each pipeline's load() method +using register_extractor_from_config(). + +Pipelines are registered in pipeline_registry.py's PipelineRegistry._REGISTRY dict. + +Example structure: + models/ + my_model/ + pipeline_my_model.py # Pipeline class with inline extractor registration + __init__.py # Exports: __all__ = ["MyModelPipeline"] +""" + +from ..pipeline import BasePipeline +from ..pipeline_registry import AutoPipeline, register_pipeline +from .wan import WanImageToVideoPipeline, WanPipeline + +__all__ = [ + "AutoPipeline", + "BasePipeline", + "WanPipeline", + "WanImageToVideoPipeline", + "register_pipeline", +] diff --git a/tensorrt_llm/_torch/visual_gen/models/wan/__init__.py b/tensorrt_llm/_torch/visual_gen/models/wan/__init__.py new file mode 100644 index 0000000000..f177740809 --- /dev/null +++ b/tensorrt_llm/_torch/visual_gen/models/wan/__init__.py @@ -0,0 +1,5 @@ +from .pipeline_wan import WanPipeline +from .pipeline_wan_i2v import WanImageToVideoPipeline +from .transformer_wan import WanTransformer3DModel + +__all__ = ["WanPipeline", "WanImageToVideoPipeline", "WanTransformer3DModel"] diff --git a/tensorrt_llm/_torch/visual_gen/models/wan/pipeline_wan.py b/tensorrt_llm/_torch/visual_gen/models/wan/pipeline_wan.py new file mode 100644 index 0000000000..f5d0f4fce5 --- /dev/null +++ b/tensorrt_llm/_torch/visual_gen/models/wan/pipeline_wan.py @@ -0,0 +1,521 @@ +import time +from typing import Optional + +import torch +from diffusers import AutoencoderKLWan, FlowMatchEulerDiscreteScheduler +from diffusers.utils.torch_utils import randn_tensor +from diffusers.video_processor import VideoProcessor +from transformers import AutoTokenizer, UMT5EncoderModel + +from tensorrt_llm._torch.visual_gen.config import PipelineComponent +from tensorrt_llm._torch.visual_gen.output import MediaOutput +from tensorrt_llm._torch.visual_gen.pipeline import BasePipeline +from tensorrt_llm._torch.visual_gen.pipeline_registry import register_pipeline +from tensorrt_llm._torch.visual_gen.teacache import ExtractorConfig, register_extractor_from_config +from tensorrt_llm._torch.visual_gen.utils import postprocess_video_tensor +from tensorrt_llm.logger import logger + +from .transformer_wan import WanTransformer3DModel + +# Supported Wan T2V models: +# - Wan2.1-T2V-14B: Single-stage text-to-video (14B parameters) +# - Wan2.1-T2V-1.3B: Single-stage text-to-video (1.3B parameters) +# - Wan2.2-T2V-A14B: Two-stage text-to-video (14B, boundary_ratio for high/low-noise stages; supports 480P & 720P) + +WAN_TEACACHE_COEFFICIENTS = { + "1.3B": { + "ret_steps": [ + -5.21862437e04, + 9.23041404e03, + -5.28275948e02, + 1.36987616e01, + -4.99875664e-02, + ], + "standard": [2.39676752e03, -1.31110545e03, 2.01331979e02, -8.29855975e00, 1.37887774e-01], + }, + "14B": { + "ret_steps": [ + -3.03318725e05, + 4.90537029e04, + -2.65530556e03, + 5.87365115e01, + -3.15583525e-01, + ], + "standard": [-5784.54975374, 5449.50911966, -1811.16591783, 256.27178429, -13.02252404], + }, +} + + +# Default negative prompt for Wan T2V models +WAN_DEFAULT_NEGATIVE_PROMPT = ( + "Vibrant colors, overexposed, static, blurry details, subtitles, style, artwork, painting, image, " + "still image, overall grayish tone, worst quality, low quality, JPEG compression artifacts, ugly, " + "incomplete, extra fingers, poorly drawn hands, poorly drawn face, deformed, disfigured, malformed limbs, " + "fused fingers, motionless image, cluttered background, three legs, many people in the background, walking backward" +) + + +@register_pipeline("WanPipeline") +class WanPipeline(BasePipeline): + def __init__(self, model_config): + # Wan2.2 two-stage denoising parameters + self.transformer_2 = None + self.boundary_ratio = getattr(model_config.pretrained_config, "boundary_ratio", None) + self.is_wan22 = self.boundary_ratio is not None + + super().__init__(model_config) + + @staticmethod + def _compute_wan_timestep_embedding(module, timestep, guidance=None): + """Compute timestep embedding for WAN transformer. + + WAN uses a condition_embedder with timesteps_proj and time_embedder layers. + Handles dtype casting to match the embedder's dtype. + + Args: + module: WanTransformer3DModel instance + timestep: Timestep tensor (shape: [batch_size]) + guidance: Unused for WAN (no guidance embedding) + + Returns: + Timestep embedding tensor used by TeaCache for distance calculation + """ + ce = module.condition_embedder + t_freq = ce.timesteps_proj(timestep) + + # Cast to embedder's dtype (avoid int8 quantized layers) + te_dtype = next(iter(ce.time_embedder.parameters())).dtype + if t_freq.dtype != te_dtype and te_dtype != torch.int8: + t_freq = t_freq.to(te_dtype) + + return ce.time_embedder(t_freq) + + @property + def dtype(self): + return self.model_config.torch_dtype + + @property + def device(self): + return self.transformer.device + + @property + def transformer_components(self) -> list: + """Return list of transformer components this pipeline needs.""" + if self.transformer_2 is not None: + return ["transformer", "transformer_2"] + return ["transformer"] + + def _init_transformer(self) -> None: + logger.info("Creating WAN transformer with quantization support...") + self.transformer = WanTransformer3DModel(model_config=self.model_config) + + # Wan2.2: create second transformer for two-stage denoising + if self.boundary_ratio is not None: + logger.info("Creating second transformer for Wan2.2 two-stage denoising...") + self.transformer_2 = WanTransformer3DModel(model_config=self.model_config) + + def load_standard_components( + self, + checkpoint_dir: str, + device: torch.device, + skip_components: Optional[list] = None, + ) -> None: + """Load VAE, text encoder, tokenizer, and scheduler from checkpoint.""" + skip_components = skip_components or [] + + if self.transformer_2 is not None and self.boundary_ratio is None: + raise RuntimeError( + "transformer_2 exists but boundary_ratio is not set. " + "This indicates an inconsistent pipeline configuration." + ) + + # Detect model version + if self.is_wan22: + logger.info("Detected Wan 2.2 T2V (two-stage denoising)") + else: + logger.info("Detected Wan 2.1 T2V (single-stage denoising)") + + # Set default VAE scale factors (will be overridden if VAE is loaded) + self.vae_scale_factor_temporal = 4 + self.vae_scale_factor_spatial = 8 + + if PipelineComponent.TOKENIZER not in skip_components: + logger.info("Loading tokenizer...") + self.tokenizer = AutoTokenizer.from_pretrained( + checkpoint_dir, + subfolder=PipelineComponent.TOKENIZER, + ) + + if PipelineComponent.TEXT_ENCODER not in skip_components: + logger.info("Loading text encoder...") + self.text_encoder = UMT5EncoderModel.from_pretrained( + checkpoint_dir, + subfolder=PipelineComponent.TEXT_ENCODER, + torch_dtype=self.model_config.torch_dtype, + ).to(device) + + if PipelineComponent.VAE not in skip_components: + logger.info("Loading VAE...") + self.vae = AutoencoderKLWan.from_pretrained( + checkpoint_dir, + subfolder=PipelineComponent.VAE, + torch_dtype=torch.bfloat16, # load VAE in BF16 for memory saving + ).to(device) + + self.vae_scale_factor_temporal = getattr(self.vae.config, "scale_factor_temporal", 4) + self.vae_scale_factor_spatial = getattr(self.vae.config, "scale_factor_spatial", 8) + + if PipelineComponent.SCHEDULER not in skip_components: + logger.info("Loading scheduler...") + self.scheduler = FlowMatchEulerDiscreteScheduler.from_pretrained( + checkpoint_dir, + subfolder=PipelineComponent.SCHEDULER, + ) + if not hasattr(self.scheduler.config, "shift") or self.scheduler.config.shift == 1.0: + self.scheduler = FlowMatchEulerDiscreteScheduler.from_config( + self.scheduler.config, + shift=5.0, + ) + + self.video_processor = VideoProcessor(vae_scale_factor=self.vae_scale_factor_spatial) + + def load_weights(self, weights: dict) -> None: + # Store weights for later use (in case transformer_2 is created after this call) + self._weights_dict = weights + + has_separate_weights = "transformer" in weights and "transformer_2" in weights + + if self.transformer is not None and hasattr(self.transformer, "load_weights"): + logger.info("Loading transformer weights...") + transformer_weights = weights.get("transformer", weights) + self.transformer.load_weights(transformer_weights) + logger.info("Transformer weights loaded successfully.") + + # Wan2.2: Load weights for second transformer if it exists + if self.transformer_2 is not None and hasattr(self.transformer_2, "load_weights"): + logger.info("Loading transformer_2 weights for Wan2.2...") + if not has_separate_weights: + raise ValueError( + "Wan2.2 model requires separate 'transformer' and 'transformer_2' weights in checkpoint, " + f"but only found: {list(weights.keys())}. " + "Two-stage denoising requires distinct weights for high-noise and low-noise transformers." + ) + transformer_2_weights = weights["transformer_2"] + self.transformer_2.load_weights(transformer_2_weights) + logger.info("Transformer_2 weights loaded successfully.") + + # Cache the target dtype from model config (default: bfloat16) + self._target_dtype = self.model_config.torch_dtype + + # Set model to eval mode + if self.transformer is not None: + self.transformer.eval() + if self.transformer_2 is not None: + self.transformer_2.eval() + + def post_load_weights(self) -> None: + super().post_load_weights() # Calls transformer.post_load_weights() for FP8 scale transformations + if self.transformer is not None: + # Register TeaCache extractor for this model type + # Tells TeaCache how to compute timestep embeddings for Wan + register_extractor_from_config( + ExtractorConfig( + model_class_name="WanTransformer3DModel", + timestep_embed_fn=self._compute_wan_timestep_embedding, + return_dict_default=False, # Wan returns raw tensors, not wrapped outputs + ) + ) + + # Enable TeaCache optimization with WAN-specific coefficients + self._setup_teacache(self.transformer, coefficients=WAN_TEACACHE_COEFFICIENTS) + # Save transformer backend before it gets overwritten + self.transformer_cache_backend = self.cache_backend + + # Wan2.2: Setup TeaCache for second transformer (low-noise stage) + if self.transformer_2 is not None: + if hasattr(self.transformer_2, "post_load_weights"): + self.transformer_2.post_load_weights() + + # Enable TeaCache for low-noise stage with same coefficients + self._setup_teacache(self.transformer_2, coefficients=WAN_TEACACHE_COEFFICIENTS) + # Save transformer_2 backend + self.transformer_2_cache_backend = self.cache_backend + + def infer(self, req): + """Run inference with request parameters.""" + return self.forward( + prompt=req.prompt, + negative_prompt=req.negative_prompt, + height=req.height, + width=req.width, + num_frames=req.num_frames, + num_inference_steps=req.num_inference_steps, + guidance_scale=req.guidance_scale, + guidance_scale_2=req.guidance_scale_2, + boundary_ratio=req.boundary_ratio, + seed=req.seed, + max_sequence_length=req.max_sequence_length, + ) + + @torch.no_grad() + def forward( + self, + prompt: str, + negative_prompt: Optional[str] = None, + height: int = 720, + width: int = 1280, + num_frames: int = 81, + num_inference_steps: Optional[int] = None, + guidance_scale: Optional[float] = None, + guidance_scale_2: Optional[float] = None, + boundary_ratio: Optional[float] = None, + seed: int = 42, + max_sequence_length: int = 226, + ): + pipeline_start = time.time() + generator = torch.Generator(device=self.device).manual_seed(seed) + + # Use user-provided boundary_ratio if given, otherwise fall back to model config + boundary_ratio = boundary_ratio if boundary_ratio is not None else self.boundary_ratio + + # Validate that Wan 2.2 models have boundary_ratio set + if self.transformer_2 is not None and boundary_ratio is None: + raise ValueError( + "Wan 2.2 models require boundary_ratio to be set. " + "boundary_ratio was not found in model config. " + "Please pass boundary_ratio as a parameter." + ) + + # Set default negative prompt if not provided + if negative_prompt is None: + negative_prompt = WAN_DEFAULT_NEGATIVE_PROMPT + + # Set model-specific defaults based on Wan version + logger.info( + f"Running {'Wan 2.2' if self.is_wan22 else 'Wan 2.1'} T2V inference" + f"(boundary_ratio={boundary_ratio}, has_transformer_2={self.transformer_2 is not None})" + ) + + if num_inference_steps is None: + num_inference_steps = 40 if self.is_wan22 else 50 + + if guidance_scale is None: + guidance_scale = 4.0 if self.is_wan22 else 5.0 + + if self.is_wan22 and guidance_scale_2 is None: + guidance_scale_2 = 3.0 + + # Validate two-stage denoising configuration + if guidance_scale_2 is not None and boundary_ratio is None: + logger.warning( + "guidance_scale_2 is specified but boundary_ratio is None. " + "guidance_scale_2 will be ignored." + "Set boundary_ratio in config or pass as parameter to enable two-stage denoising." + ) + guidance_scale_2 = None + + # Encode Prompt + logger.info("Encoding prompts...") + encode_start = time.time() + prompt_embeds, neg_prompt_embeds = self._encode_prompt( + prompt, negative_prompt, max_sequence_length + ) + logger.info(f"Prompt encoding completed in {time.time() - encode_start:.2f}s") + + # Prepare Latents + latents = self._prepare_latents(height, width, num_frames, generator) + logger.info(f"Latents shape: {latents.shape}") + + self.scheduler.set_timesteps(num_inference_steps, device=self.device) + + # Wan2.2: Calculate boundary timestep for two-stage denoising + boundary_timestep = None + if boundary_ratio is not None and self.transformer_2 is not None: + boundary_timestep = boundary_ratio * self.scheduler.config.num_train_timesteps + logger.info( + f"Wan2.2 two-stage denoising: boundary_timestep={boundary_timestep:.1f}, " + f"guidance_scale={guidance_scale}, guidance_scale_2={guidance_scale_2}" + ) + + # Denoising with two-stage support + # Track which model was used in last step (for logging model transitions) + last_model_used = [None] + + def forward_fn( + latents, extra_stream_latents, timestep, encoder_hidden_states, extra_tensors + ): + """Forward function for Wan transformer with two-stage support. + + extra_stream_latents and extra_tensors are unused for WAN (single stream, no additional embeddings). + """ + # Select model based on timestep (if two-stage denoising is enabled) + if boundary_timestep is not None and self.transformer_2 is not None: + # Extract scalar timestep for comparison + current_t = timestep if timestep.dim() == 0 else timestep[0] + if current_t >= boundary_timestep: + current_model = self.transformer + model_name = "transformer (high-noise)" + else: + current_model = self.transformer_2 + model_name = "transformer_2 (low-noise)" + + # Log when switching models + if last_model_used[0] != model_name: + if self.rank == 0: + logger.info(f"Switched to {model_name} at timestep {current_t:.1f}") + last_model_used[0] = model_name + else: + current_model = self.transformer + + return current_model( + hidden_states=latents, + timestep=timestep, + encoder_hidden_states=encoder_hidden_states, + ) + + # Two-stage denoising: model switching in forward_fn, guidance scale switching in denoise() + latents = self.denoise( + latents=latents, + scheduler=self.scheduler, + prompt_embeds=prompt_embeds, + neg_prompt_embeds=neg_prompt_embeds, + guidance_scale=guidance_scale, + forward_fn=forward_fn, + guidance_scale_2=guidance_scale_2, + boundary_timestep=boundary_timestep, + ) + + # Log TeaCache statistics - show stats for each transformer separately + if self.rank == 0 and self.model_config.teacache.enable_teacache: + logger.info("=" * 80) + logger.info("TeaCache Statistics:") + + # Stats for transformer (high-noise) + if hasattr(self, "transformer_cache_backend") and self.transformer_cache_backend: + stats = self.transformer_cache_backend.get_stats() + total_steps = stats.get("total_steps", 0) + cache_hits = stats.get("cached_steps", 0) + cache_misses = stats.get("compute_steps", 0) + hit_rate = (cache_hits / total_steps * 100) if total_steps > 0 else 0.0 + + logger.info(" Transformer (High-Noise):") + logger.info(f" Total steps: {total_steps}") + logger.info(f" Cache hits: {cache_hits}") + logger.info(f" Cache misses: {cache_misses}") + logger.info(f" Hit rate: {hit_rate:.1f}%") + + # Stats for transformer_2 (low-noise) + if hasattr(self, "transformer_2_cache_backend") and self.transformer_2_cache_backend: + stats = self.transformer_2_cache_backend.get_stats() + total_steps = stats.get("total_steps", 0) + cache_hits = stats.get("cached_steps", 0) + cache_misses = stats.get("compute_steps", 0) + hit_rate = (cache_hits / total_steps * 100) if total_steps > 0 else 0.0 + + logger.info(" Transformer_2 (Low-Noise):") + logger.info(f" Total steps: {total_steps}") + logger.info(f" Cache hits: {cache_hits}") + logger.info(f" Cache misses: {cache_misses}") + logger.info(f" Hit rate: {hit_rate:.1f}%") + + logger.info("=" * 80) + + # Decode + logger.info("Decoding video...") + decode_start = time.time() + video = self.decode_latents(latents, self._decode_latents) + + if self.rank == 0: + logger.info(f"Video decoded in {time.time() - decode_start:.2f}s") + logger.info(f"Total pipeline time: {time.time() - pipeline_start:.2f}s") + + return MediaOutput(video=video) + + def _encode_prompt(self, prompt, negative_prompt, max_sequence_length): + prompt = [prompt] if isinstance(prompt, str) else prompt + + def get_embeds(texts): + text_inputs = self.tokenizer( + texts, + padding="max_length", + max_length=max_sequence_length, + truncation=True, + return_attention_mask=True, + return_tensors="pt", + ) + input_ids = text_inputs.input_ids.to(self.device) + attention_mask = text_inputs.attention_mask.to(self.device) + + embeds = self.text_encoder(input_ids, attention_mask=attention_mask).last_hidden_state + embeds = embeds.to(self.dtype) + + # Zero-out padded tokens based on mask + seq_lens = attention_mask.gt(0).sum(dim=1).long() + cleaned_embeds = [] + for u, v in zip(embeds, seq_lens): + real_content = u[:v] + pad_len = max_sequence_length - real_content.size(0) + if pad_len > 0: + padded = torch.cat( + [real_content, real_content.new_zeros(pad_len, real_content.size(1))] + ) + else: + padded = real_content + cleaned_embeds.append(padded) + + return torch.stack(cleaned_embeds, dim=0) + + prompt_embeds = get_embeds(prompt) + + if negative_prompt is None: + negative_prompt = "" + + neg_prompt = [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt + if len(neg_prompt) == 1 and len(prompt) > 1: + neg_prompt = neg_prompt * len(prompt) + + neg_embeds = get_embeds(neg_prompt) + + return prompt_embeds, neg_embeds + + def _prepare_latents(self, height, width, num_frames, generator): + num_channels_latents = 16 + num_latent_frames = (num_frames - 1) // self.vae_scale_factor_temporal + 1 + + shape = ( + 1, + num_channels_latents, + num_latent_frames, + height // self.vae_scale_factor_spatial, + width // self.vae_scale_factor_spatial, + ) + return randn_tensor(shape, generator=generator, device=self.device, dtype=self.dtype) + + def _decode_latents(self, latents): + latents = latents.to(self.vae.dtype) + + # Denormalization + if hasattr(self.vae.config, "latents_mean") and hasattr(self.vae.config, "latents_std"): + if not hasattr(self, "_latents_mean"): + self._latents_mean = ( + torch.tensor(self.vae.config.latents_mean) + .view(1, -1, 1, 1, 1) + .to(self.device, self.vae.dtype) + ) + self._latents_std = ( + torch.tensor(self.vae.config.latents_std) + .view(1, -1, 1, 1, 1) + .to(self.device, self.vae.dtype) + ) + latents = (latents * self._latents_std) + self._latents_mean + else: + scaling_factor = self.vae.config.get("scaling_factor", 1.0) + latents = latents / scaling_factor + + # VAE decode: returns (B, C, T, H, W) + video = self.vae.decode(latents, return_dict=False)[0] + + # Post-process video tensor: (B, C, T, H, W) -> (T, H, W, C) uint8 + video = postprocess_video_tensor(video, remove_batch_dim=True) + + return video diff --git a/tensorrt_llm/_torch/visual_gen/models/wan/pipeline_wan_i2v.py b/tensorrt_llm/_torch/visual_gen/models/wan/pipeline_wan_i2v.py new file mode 100644 index 0000000000..d2a3fd629f --- /dev/null +++ b/tensorrt_llm/_torch/visual_gen/models/wan/pipeline_wan_i2v.py @@ -0,0 +1,736 @@ +import json +import os +import time +from typing import Optional, Tuple, Union + +import PIL.Image +import torch +from diffusers import AutoencoderKLWan, FlowMatchEulerDiscreteScheduler +from diffusers.utils.torch_utils import randn_tensor +from diffusers.video_processor import VideoProcessor +from transformers import AutoTokenizer, CLIPImageProcessor, CLIPVisionModel, UMT5EncoderModel + +from tensorrt_llm._torch.visual_gen.config import PipelineComponent +from tensorrt_llm._torch.visual_gen.output import MediaOutput +from tensorrt_llm._torch.visual_gen.pipeline import BasePipeline +from tensorrt_llm._torch.visual_gen.pipeline_registry import register_pipeline +from tensorrt_llm._torch.visual_gen.teacache import ExtractorConfig, register_extractor_from_config +from tensorrt_llm._torch.visual_gen.utils import postprocess_video_tensor +from tensorrt_llm.logger import logger + +# Supported Wan I2V 14B models: +# - Wan2.1-I2V-14B-480P: Single-stage image-to-video +# - Wan2.1-I2V-14B-720P: Single-stage image-to-video +# - Wan2.2-I2V-14B: Two-stage image-to-video (no CLIP, boundary_ratio for two-stage denoising) +# Note: Wan2.2-I2V-5B (expand_timesteps mode) is NOT supported by this pipeline +# Import shared coefficients from T2V pipeline +from .pipeline_wan import WAN_TEACACHE_COEFFICIENTS +from .transformer_wan import WanTransformer3DModel + +# Use same coefficients +WAN_I2V_TEACACHE_COEFFICIENTS = WAN_TEACACHE_COEFFICIENTS + +# Default negative prompt for Wan I2V models +WAN_DEFAULT_NEGATIVE_PROMPT = ( + "Vibrant colors, overexposed, static, blurry details, subtitles, style, artwork, painting, image, " + "still image, overall grayish tone, worst quality, low quality, JPEG compression artifacts, ugly, " + "incomplete, extra fingers, poorly drawn hands, poorly drawn face, deformed, disfigured, malformed limbs, " + "fused fingers, motionless image, cluttered background, three legs, many people in the background, walking backward" +) + + +def retrieve_latents( + encoder_output: torch.Tensor, + generator: Optional[torch.Generator] = None, + sample_mode: str = "argmax", +): + """Extract latents from VAE encoder output. + + For I2V, we use argmax mode to get deterministic encoding of the input image. + """ + if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": + return encoder_output.latent_dist.sample(generator) + elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": + return encoder_output.latent_dist.mode() + elif hasattr(encoder_output, "latents"): + return encoder_output.latents + else: + raise AttributeError("Could not access latents of provided encoder_output") + + +@register_pipeline("WanImageToVideoPipeline") +class WanImageToVideoPipeline(BasePipeline): + def __init__(self, model_config): + # Wan2.2 14B two-stage denoising parameters + self.transformer_2 = None + self.boundary_ratio = getattr(model_config.pretrained_config, "boundary_ratio", None) + self.is_wan22 = self.boundary_ratio is not None + + super().__init__(model_config) + + @staticmethod + def _compute_wan_timestep_embedding(module, timestep, guidance=None): + """Compute timestep embedding for Wan I2V transformer.""" + ce = module.condition_embedder + t_freq = ce.timesteps_proj(timestep) + + # Cast to embedder's dtype (avoid int8 quantized layers) + te_dtype = next(iter(ce.time_embedder.parameters())).dtype + if t_freq.dtype != te_dtype and te_dtype != torch.int8: + t_freq = t_freq.to(te_dtype) + + return ce.time_embedder(t_freq) + + @property + def dtype(self): + return self.model_config.torch_dtype + + @property + def device(self): + return self.transformer.device + + @property + def transformer_components(self) -> list: + if self.transformer_2 is not None: + return ["transformer", "transformer_2"] + return ["transformer"] + + def _init_transformer(self) -> None: + logger.info("Creating WAN I2V transformer with quantization support...") + self.transformer = WanTransformer3DModel(model_config=self.model_config) + + # Wan2.2: Optionally create second transformer for two-stage denoising + if self.boundary_ratio is not None: + logger.info("Creating second transformer for Wan2.2 I2V two-stage denoising...") + self.transformer_2 = WanTransformer3DModel(model_config=self.model_config) + + def load_standard_components( + self, + checkpoint_dir: str, + device: torch.device, + skip_components: Optional[list] = None, + ) -> None: + """Load VAE, text encoder, tokenizer, scheduler, and I2V-specific components from checkpoint.""" + skip_components = skip_components or [] + + # Load boundary_ratio and transformer_2 info from model_index.json (pipeline-level config) + # Wan 2.2 has both transformer_2 and boundary_ratio, Wan 2.1 doesn't + model_index_path = os.path.join(checkpoint_dir, "model_index.json") + has_transformer_2 = False + if os.path.exists(model_index_path): + with open(model_index_path) as f: + model_index = json.load(f) + # Check for boundary_ratio in model_index + if "boundary_ratio" in model_index: + self.boundary_ratio = model_index["boundary_ratio"] + logger.info(f"Found boundary_ratio in model_index.json: {self.boundary_ratio}") + else: + logger.info("No boundary_ratio found in model_index.json") + # Check for transformer_2 component + transformer_2_spec = model_index.get("transformer_2", None) + has_transformer_2 = ( + transformer_2_spec is not None and transformer_2_spec[0] is not None + ) + logger.info(f"transformer_2 in model_index.json: {has_transformer_2}") + + # Set default VAE scale factors (will be overridden if VAE is loaded) + self.vae_scale_factor_temporal = 4 + self.vae_scale_factor_spatial = 8 + + if PipelineComponent.TOKENIZER not in skip_components: + logger.info("Loading tokenizer...") + self.tokenizer = AutoTokenizer.from_pretrained( + checkpoint_dir, + subfolder=PipelineComponent.TOKENIZER, + ) + + if PipelineComponent.TEXT_ENCODER not in skip_components: + logger.info("Loading text encoder...") + self.text_encoder = UMT5EncoderModel.from_pretrained( + checkpoint_dir, + subfolder=PipelineComponent.TEXT_ENCODER, + torch_dtype=self.model_config.torch_dtype, + ).to(device) + + if PipelineComponent.VAE not in skip_components: + logger.info("Loading VAE...") + self.vae = AutoencoderKLWan.from_pretrained( + checkpoint_dir, + subfolder=PipelineComponent.VAE, + torch_dtype=torch.bfloat16, # load VAE in BF16 for memory saving + ).to(device) + + self.vae_scale_factor_temporal = getattr(self.vae.config, "scale_factor_temporal", 4) + self.vae_scale_factor_spatial = getattr(self.vae.config, "scale_factor_spatial", 8) + + if PipelineComponent.SCHEDULER not in skip_components: + logger.info("Loading scheduler...") + self.scheduler = FlowMatchEulerDiscreteScheduler.from_pretrained( + checkpoint_dir, + subfolder=PipelineComponent.SCHEDULER, + ) + if not hasattr(self.scheduler.config, "shift") or self.scheduler.config.shift == 1.0: + self.scheduler = FlowMatchEulerDiscreteScheduler.from_config( + self.scheduler.config, + shift=5.0, + ) + + if self.transformer_2 is not None and self.boundary_ratio is None: + raise RuntimeError( + "transformer_2 exists but boundary_ratio is not set. " + "This indicates an inconsistent pipeline configuration." + ) + + # Load image encoder and processor (only for Wan 2.1) + # Wan 2.2: Has both transformer_2 and boundary_ratio (two-stage denoising) + if self.is_wan22: + logger.info("Detected Wan 2.2 I2V (two-stage, no CLIP)") + else: + logger.info("Detected Wan 2.1 I2V (single-stage, uses CLIP)") + + if PipelineComponent.IMAGE_ENCODER not in skip_components and not self.is_wan22: + logger.info("Loading CLIP image encoder for I2V conditioning (Wan 2.1 only)...") + self.image_encoder = CLIPVisionModel.from_pretrained( + checkpoint_dir, + subfolder=PipelineComponent.IMAGE_ENCODER, + torch_dtype=torch.float32, # Keep CLIP in FP32 for stability + ).to(device) + + if PipelineComponent.IMAGE_PROCESSOR not in skip_components and not self.is_wan22: + logger.info("Loading CLIP image processor...") + self.image_processor = CLIPImageProcessor.from_pretrained( + checkpoint_dir, + subfolder=PipelineComponent.IMAGE_PROCESSOR, + ) + + self.video_processor = VideoProcessor(vae_scale_factor=self.vae_scale_factor_spatial) + + def load_weights(self, weights: dict) -> None: + # Store weights for later use + self._weights_dict = weights + + # Check if weights dict has separate transformer/transformer_2 keys (Wan2.2) + has_separate_weights = "transformer" in weights and "transformer_2" in weights + + if self.transformer is not None and hasattr(self.transformer, "load_weights"): + logger.info("Loading transformer weights...") + transformer_weights = weights.get("transformer", weights) + self.transformer.load_weights(transformer_weights) + logger.info("Transformer weights loaded successfully.") + + # Wan2.2: Load weights for second transformer if it exists + if self.transformer_2 is not None and hasattr(self.transformer_2, "load_weights"): + logger.info("Loading transformer_2 weights for Wan2.2 I2V...") + if has_separate_weights: + transformer_2_weights = weights["transformer_2"] + logger.info("Using separate transformer_2 weights from checkpoint") + else: + # For Wan 2.2, transformer_2 weights must exist + raise ValueError( + "Wan2.2 model requires separate 'transformer' and 'transformer_2' weights in checkpoint, " + f"but only found: {list(weights.keys())}" + ) + self.transformer_2.load_weights(transformer_2_weights) + logger.info("Transformer_2 weights loaded successfully.") + + # Cache the target dtype from model config (default: bfloat16) + self._target_dtype = self.model_config.torch_dtype + + # Set model to eval mode + if self.transformer is not None: + self.transformer.eval() + if self.transformer_2 is not None: + self.transformer_2.eval() + if hasattr(self, "image_encoder") and self.image_encoder is not None: + self.image_encoder.eval() + + def post_load_weights(self) -> None: + super().post_load_weights() # Calls transformer.post_load_weights() for FP8 scale transformations + if self.transformer is not None: + # Register TeaCache extractor for this model type + register_extractor_from_config( + ExtractorConfig( + model_class_name="WanTransformer3DModel", + timestep_embed_fn=self._compute_wan_timestep_embedding, + return_dict_default=False, # Wan returns raw tensors, not wrapped outputs + ) + ) + + # Enable TeaCache optimization with Wan I2V-specific coefficients + self._setup_teacache(self.transformer, coefficients=WAN_I2V_TEACACHE_COEFFICIENTS) + # Save transformer backend before it gets overwritten + self.transformer_cache_backend = self.cache_backend + + # Wan2.2: Setup TeaCache for second transformer (low-noise stage) + if self.transformer_2 is not None: + if hasattr(self.transformer_2, "post_load_weights"): + self.transformer_2.post_load_weights() + + # Enable TeaCache for low-noise stage with same coefficients + self._setup_teacache(self.transformer_2, coefficients=WAN_I2V_TEACACHE_COEFFICIENTS) + # Save transformer_2 backend + self.transformer_2_cache_backend = self.cache_backend + + def infer(self, req): + """Run inference with request parameters.""" + # Extract image from request (can be path, PIL Image, or torch.Tensor) + if req.image is None: + raise ValueError("I2V pipeline requires 'image' parameter") + + image = req.image[0] if isinstance(req.image, list) else req.image + last_image = req.last_image + + if last_image is not None and isinstance(last_image, list): + last_image = last_image[0] if last_image else None + + return self.forward( + image=image, + prompt=req.prompt, + negative_prompt=req.negative_prompt, + height=req.height, + width=req.width, + num_frames=req.num_frames, + num_inference_steps=req.num_inference_steps, + guidance_scale=req.guidance_scale, + guidance_scale_2=req.guidance_scale_2, + boundary_ratio=req.boundary_ratio, + seed=req.seed, + max_sequence_length=req.max_sequence_length, + last_image=last_image, + ) + + @torch.no_grad() + def forward( + self, + image: Union[PIL.Image.Image, torch.Tensor, str], + prompt: str, + negative_prompt: Optional[str] = None, + height: int = 480, + width: int = 832, + num_frames: int = 81, + num_inference_steps: Optional[int] = None, + guidance_scale: Optional[float] = None, + guidance_scale_2: Optional[float] = None, + boundary_ratio: Optional[float] = None, + seed: int = 42, + max_sequence_length: int = 512, + last_image: Optional[Union[PIL.Image.Image, torch.Tensor, str]] = None, + ): + pipeline_start = time.time() + generator = torch.Generator(device=self.device).manual_seed(seed) + + # Use user-provided boundary_ratio if given, otherwise fall back to model config + boundary_ratio = boundary_ratio if boundary_ratio is not None else self.boundary_ratio + + if self.transformer_2 is not None and boundary_ratio is None: + raise ValueError( + "Wan 2.2 models require boundary_ratio to be set. " + "boundary_ratio was not found in model config. " + "Please pass boundary_ratio as a parameter." + ) + + # Set default negative prompt if not provided + if negative_prompt is None: + negative_prompt = WAN_DEFAULT_NEGATIVE_PROMPT + + # Set model-specific defaults based on Wan version + if num_inference_steps is None: + num_inference_steps = 40 if self.is_wan22 else 50 + + if guidance_scale is None: + guidance_scale = 4.0 if self.is_wan22 else 5.0 + + if self.is_wan22 and guidance_scale_2 is None: + guidance_scale_2 = 3.0 # Wan2.2 recommended default + + # Validate two-stage denoising configuration + if guidance_scale_2 is not None and boundary_ratio is None: + logger.warning( + "guidance_scale_2 is specified but boundary_ratio is None. " + "guidance_scale_2 will be ignored." + "Set boundary_ratio in config or pass as parameter to enable two-stage denoising." + ) + guidance_scale_2 = None + + # Validate and adjust frame count for VAE compatibility + if num_frames % self.vae_scale_factor_temporal != 1: + logger.warning( + f"`num_frames - 1` must be divisible by {self.vae_scale_factor_temporal}. " + f"Rounding {num_frames} to nearest valid value." + ) + num_frames = ( + num_frames // self.vae_scale_factor_temporal * self.vae_scale_factor_temporal + 1 + ) + num_frames = max(num_frames, 1) + + # Validate and adjust resolution for transformer patchification + patch_size = ( + self.transformer.config.patch_size + if self.transformer is not None + else self.transformer_2.config.patch_size + ) + h_multiple_of = self.vae_scale_factor_spatial * patch_size[1] + w_multiple_of = self.vae_scale_factor_spatial * patch_size[2] + calc_height = height // h_multiple_of * h_multiple_of + calc_width = width // w_multiple_of * w_multiple_of + if height != calc_height or width != calc_width: + logger.warning( + f"Height and width must be multiples of ({h_multiple_of}, {w_multiple_of}) for patchification. " + f"Adjusting ({height}, {width}) -> ({calc_height}, {calc_width})." + ) + height, width = calc_height, calc_width + + # Encode Prompt + logger.info("Encoding prompts...") + encode_start = time.time() + prompt_embeds, neg_prompt_embeds = self._encode_prompt( + prompt, negative_prompt, max_sequence_length + ) + logger.info(f"Prompt encoding completed in {time.time() - encode_start:.2f}s") + + # Encode Image (I2V-specific) + logger.info("Encoding input image...") + image_encode_start = time.time() + + # Determine model version + model_version = "Wan 2.2" if self.is_wan22 else "Wan 2.1" + logger.info( + f"Running {model_version} I2V inference " + f"(boundary_ratio={boundary_ratio}, has_transformer_2={self.transformer_2 is not None})" + ) + + if not self.is_wan22: + # Wan 2.1 I2V: Compute CLIP image embeddings + image_embeds = self._encode_image(image, last_image) + image_embeds = image_embeds.to(self.dtype) + else: + # Wan 2.2 I2V: No image embeddings needed + image_embeds = None + + logger.info(f"Image encoding completed in {time.time() - image_encode_start:.2f}s") + + # Prepare Latents with image conditioning (I2V-specific) + latents, condition_data = self._prepare_latents( + image, height, width, num_frames, generator, last_image + ) + + self.scheduler.set_timesteps(num_inference_steps, device=self.device) + + # Wan2.2: Calculate boundary timestep for two-stage denoising + boundary_timestep = None + if boundary_ratio is not None and self.transformer_2 is not None: + boundary_timestep = boundary_ratio * self.scheduler.config.num_train_timesteps + logger.info( + f"Wan2.2 I2V two-stage denoising: boundary_timestep={boundary_timestep:.1f}, " + f"guidance_scale={guidance_scale}, guidance_scale_2={guidance_scale_2}" + ) + + # Denoising with two-stage support + # Track which model was used in last step (for logging model transitions) + last_model_used = [None] + + def forward_fn( + latents_input, extra_stream_latents, timestep, encoder_hidden_states, extra_tensors + ): + """Forward function for WAN I2V transformer with two-stage support. + + Both Wan 2.1 and Wan 2.2 14B use concatenation approach: [latents, condition]. + Difference: Wan 2.1 passes image_embeds, Wan 2.2 passes None. + """ + # Select model based on timestep (if two-stage denoising is enabled) + if boundary_timestep is not None and self.transformer_2 is not None: + # Extract scalar timestep for comparison + current_t = timestep if timestep.dim() == 0 else timestep[0] + if current_t >= boundary_timestep: + current_model = self.transformer + model_name = "transformer" + else: + current_model = self.transformer_2 + model_name = "transformer_2" + + # Log when switching models + if last_model_used[0] != model_name: + if self.rank == 0: + logger.info( + f"[TRTLLM] Switched to {model_name} at timestep {current_t:.1f}" + ) + last_model_used[0] = model_name + else: + current_model = self.transformer + + # Wan 2.1 & Wan 2.2 14B: concatenate latents and condition + # Handle CFG: duplicate condition if batch dimension doubled + if latents_input.shape[0] != condition_data.shape[0]: + condition_to_use = torch.cat([condition_data] * 2) + else: + condition_to_use = condition_data + + latent_model_input = torch.cat([latents_input, condition_to_use], dim=1).to(self.dtype) + timestep_input = timestep.expand(latents_input.shape[0]) + + # Forward pass with I2V conditioning + # Wan 2.1: image_embeds is not None (CLIP embeddings) + # Wan 2.2 14B: image_embeds is None (no CLIP) + return current_model( + hidden_states=latent_model_input, + timestep=timestep_input, + encoder_hidden_states=encoder_hidden_states, + encoder_hidden_states_image=image_embeds, + ) + + # Two-stage denoising: model switching in forward_fn, guidance scale switching in denoise() + latents = self.denoise( + latents=latents, + scheduler=self.scheduler, + prompt_embeds=prompt_embeds, + neg_prompt_embeds=neg_prompt_embeds, + guidance_scale=guidance_scale, + forward_fn=forward_fn, + guidance_scale_2=guidance_scale_2, + boundary_timestep=boundary_timestep, + ) + + # Log TeaCache statistics - show stats for each transformer separately + if self.rank == 0 and self.model_config.teacache.enable_teacache: + logger.info("=" * 80) + logger.info("TeaCache Statistics:") + + # Stats for transformer (high-noise) + if hasattr(self, "transformer_cache_backend") and self.transformer_cache_backend: + stats = self.transformer_cache_backend.get_stats() + total_steps = stats.get("total_steps", 0) + cache_hits = stats.get("cached_steps", 0) + cache_misses = stats.get("compute_steps", 0) + hit_rate = (cache_hits / total_steps * 100) if total_steps > 0 else 0.0 + + logger.info(" Transformer (High-Noise):") + logger.info(f" Total steps: {total_steps}") + logger.info(f" Cache hits: {cache_hits}") + logger.info(f" Cache misses: {cache_misses}") + logger.info(f" Hit rate: {hit_rate:.1f}%") + + # Stats for transformer_2 (low-noise) + if hasattr(self, "transformer_2_cache_backend") and self.transformer_2_cache_backend: + stats = self.transformer_2_cache_backend.get_stats() + total_steps = stats.get("total_steps", 0) + cache_hits = stats.get("cached_steps", 0) + cache_misses = stats.get("compute_steps", 0) + hit_rate = (cache_hits / total_steps * 100) if total_steps > 0 else 0.0 + + logger.info(" Transformer_2 (Low-Noise):") + logger.info(f" Total steps: {total_steps}") + logger.info(f" Cache hits: {cache_hits}") + logger.info(f" Cache misses: {cache_misses}") + logger.info(f" Hit rate: {hit_rate:.1f}%") + + logger.info("=" * 80) + + # Decode + logger.info("Decoding video...") + decode_start = time.time() + video = self.decode_latents(latents, self._decode_latents) + + if self.rank == 0: + logger.info(f"Video decoded in {time.time() - decode_start:.2f}s") + logger.info(f"Total pipeline time: {time.time() - pipeline_start:.2f}s") + + return MediaOutput(video=video) + + def _encode_prompt(self, prompt, negative_prompt, max_sequence_length): + """Encode text prompts to embeddings (same as T2V).""" + prompt = [prompt] if isinstance(prompt, str) else prompt + + def get_embeds(texts): + text_inputs = self.tokenizer( + texts, + padding="max_length", + max_length=max_sequence_length, + truncation=True, + return_attention_mask=True, + return_tensors="pt", + ) + input_ids = text_inputs.input_ids.to(self.device) + attention_mask = text_inputs.attention_mask.to(self.device) + + embeds = self.text_encoder(input_ids, attention_mask=attention_mask).last_hidden_state + embeds = embeds.to(self.dtype) + + # Zero-out padded tokens based on mask + seq_lens = attention_mask.gt(0).sum(dim=1).long() + cleaned_embeds = [] + for u, v in zip(embeds, seq_lens): + real_content = u[:v] + pad_len = max_sequence_length - real_content.size(0) + if pad_len > 0: + padded = torch.cat( + [real_content, real_content.new_zeros(pad_len, real_content.size(1))] + ) + else: + padded = real_content + cleaned_embeds.append(padded) + + return torch.stack(cleaned_embeds, dim=0) + + prompt_embeds = get_embeds(prompt) + + if negative_prompt is None: + negative_prompt = "" + + neg_prompt = [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt + if len(neg_prompt) == 1 and len(prompt) > 1: + neg_prompt = neg_prompt * len(prompt) + + neg_embeds = get_embeds(neg_prompt) + + return prompt_embeds, neg_embeds + + def _encode_image( + self, + image: Union[PIL.Image.Image, torch.Tensor, str], + last_image: Optional[Union[PIL.Image.Image, torch.Tensor, str]] = None, + ) -> torch.Tensor: + """Encode image(s) using CLIP image encoder (Wan 2.1 I2V only).""" + if isinstance(image, str): + image = PIL.Image.open(image).convert("RGB") + if isinstance(last_image, str): + last_image = PIL.Image.open(last_image).convert("RGB") + + images_to_encode = [image] if last_image is None else [image, last_image] + + image_inputs = self.image_processor(images=images_to_encode, return_tensors="pt").to( + self.device + ) + image_embeds = self.image_encoder(**image_inputs, output_hidden_states=True) + + return image_embeds.hidden_states[-2] + + def _prepare_latents( + self, + image: Union[PIL.Image.Image, torch.Tensor, str], + height: int, + width: int, + num_frames: int, + generator: torch.Generator, + last_image: Optional[Union[PIL.Image.Image, torch.Tensor, str]] = None, + ) -> Tuple[torch.Tensor, torch.Tensor]: + """Prepare latents with image conditioning for I2V generation.""" + num_channels_latents = 16 + num_latent_frames = (num_frames - 1) // self.vae_scale_factor_temporal + 1 + latent_height = height // self.vae_scale_factor_spatial + latent_width = width // self.vae_scale_factor_spatial + + # Create random noise latents + shape = (1, num_channels_latents, num_latent_frames, latent_height, latent_width) + latents = randn_tensor(shape, generator=generator, device=self.device, dtype=self.dtype) + + # Load and preprocess image(s) + if isinstance(image, str): + image = PIL.Image.open(image).convert("RGB") + image = self.video_processor.preprocess(image, height=height, width=width).to( + self.device, dtype=torch.float32 + ) + + if last_image is not None: + if isinstance(last_image, str): + last_image = PIL.Image.open(last_image).convert("RGB") + last_image = self.video_processor.preprocess(last_image, height=height, width=width).to( + self.device, dtype=torch.float32 + ) + + image = image.unsqueeze(2) + + # Create video conditioning tensor (same for both Wan 2.1 and Wan 2.2 14B) + if last_image is None: + # First frame + zeros + video_condition = torch.cat( + [ + image, + image.new_zeros(image.shape[0], image.shape[1], num_frames - 1, height, width), + ], + dim=2, + ) + else: + # First frame + zeros + last frame (interpolation) + last_image = last_image.unsqueeze(2) + video_condition = torch.cat( + [ + image, + image.new_zeros(image.shape[0], image.shape[1], num_frames - 2, height, width), + last_image, + ], + dim=2, + ) + + # Encode video condition through VAE + video_condition = video_condition.to(device=self.device, dtype=self.vae.dtype) + latent_condition = retrieve_latents(self.vae.encode(video_condition), sample_mode="argmax") + latent_condition = latent_condition.to(self.dtype) + + # Normalize latents to match diffusion model's latent space + latents_mean = ( + torch.tensor(self.vae.config.latents_mean) + .view(1, self.vae.config.z_dim, 1, 1, 1) + .to(latents.device, latents.dtype) + ) + latents_std = 1.0 / torch.tensor(self.vae.config.latents_std).view( + 1, self.vae.config.z_dim, 1, 1, 1 + ).to(latents.device, latents.dtype) + latent_condition = (latent_condition - latents_mean) * latents_std + + # Create mask in video frame space + # Reshaping is required to match the transformer's expected input format + mask_lat_size = torch.ones(1, 1, num_frames, latent_height, latent_width) + + if last_image is None: + mask_lat_size[:, :, list(range(1, num_frames))] = 0 + else: + mask_lat_size[:, :, list(range(1, num_frames - 1))] = 0 + + first_frame_mask = mask_lat_size[:, :, 0:1] + first_frame_mask = torch.repeat_interleave( + first_frame_mask, dim=2, repeats=self.vae_scale_factor_temporal + ) + + mask_lat_size = torch.cat([first_frame_mask, mask_lat_size[:, :, 1:, :]], dim=2) + + mask_lat_size = mask_lat_size.view( + 1, -1, self.vae_scale_factor_temporal, latent_height, latent_width + ) + + mask_lat_size = mask_lat_size.transpose(1, 2) + + mask_lat_size = mask_lat_size.to(self.device, dtype=self.dtype) + + # Concatenate mask and condition along channel dimension + condition = torch.cat([mask_lat_size, latent_condition], dim=1) + return latents, condition + + def _decode_latents(self, latents): + """Decode latents to video (same as T2V).""" + latents = latents.to(self.vae.dtype) + + # Denormalization + if hasattr(self.vae.config, "latents_mean") and hasattr(self.vae.config, "latents_std"): + if not hasattr(self, "_latents_mean"): + self._latents_mean = ( + torch.tensor(self.vae.config.latents_mean) + .view(1, -1, 1, 1, 1) + .to(self.device, self.vae.dtype) + ) + self._latents_std = ( + torch.tensor(self.vae.config.latents_std) + .view(1, -1, 1, 1, 1) + .to(self.device, self.vae.dtype) + ) + latents = (latents * self._latents_std) + self._latents_mean + else: + scaling_factor = self.vae.config.get("scaling_factor", 1.0) + latents = latents / scaling_factor + + # VAE decode: returns (B, C, T, H, W) + video = self.vae.decode(latents, return_dict=False)[0] + + # Post-process video tensor: (B, C, T, H, W) -> (T, H, W, C) uint8 + video = postprocess_video_tensor(video, remove_batch_dim=True) + + return video diff --git a/tensorrt_llm/_torch/visual_gen/models/wan/transformer_wan.py b/tensorrt_llm/_torch/visual_gen/models/wan/transformer_wan.py new file mode 100644 index 0000000000..bb19a78541 --- /dev/null +++ b/tensorrt_llm/_torch/visual_gen/models/wan/transformer_wan.py @@ -0,0 +1,756 @@ +import math +from typing import Tuple + +import torch +import torch.nn as nn +import torch.nn.functional as F +from diffusers.models.embeddings import PixArtAlphaTextProjection, TimestepEmbedding, Timesteps +from tqdm import tqdm +from transformers.modeling_utils import get_parameter_device + +from tensorrt_llm._torch.modules.layer_norm import LayerNorm +from tensorrt_llm._torch.modules.linear import Linear +from tensorrt_llm._torch.modules.mlp import MLP +from tensorrt_llm._torch.modules.rms_norm import RMSNorm +from tensorrt_llm._torch.visual_gen.config import DiffusionModelConfig +from tensorrt_llm._torch.visual_gen.modules.attention import Attention, QKVMode +from tensorrt_llm._torch.visual_gen.parallelism import setup_sequence_parallelism +from tensorrt_llm._torch.visual_gen.quantization.loader import DynamicLinearWeightLoader +from tensorrt_llm.logger import logger +from tensorrt_llm.models.modeling_utils import QuantConfig + +# ========================================================================= +# 1. Rotary Positional Embeddings +# ========================================================================= + + +class WanRotaryPosEmbed(nn.Module): + def __init__( + self, + attention_head_dim: int, + patch_size: Tuple[int, int, int], + max_seq_len: int, + theta: float = 10000.0, + ): + super().__init__() + self.patch_size = patch_size + + # Split logic matches Hugging Face exactly + self.h_dim = 2 * (attention_head_dim // 6) + self.w_dim = 2 * (attention_head_dim // 6) + self.t_dim = attention_head_dim - self.h_dim - self.w_dim + + freqs_cos, freqs_sin = [], [] + + # Order: Time, Height, Width + for dim in [self.t_dim, self.h_dim, self.w_dim]: + # High precision generation + freqs = 1.0 / (theta ** (torch.arange(0, dim, 2, dtype=torch.float64) / dim)) + t = torch.arange(max_seq_len, dtype=torch.float64) + freqs = torch.outer(t, freqs) + + # Interleaved Pattern [c0, c0, c1, c1] + freqs_cos.append(freqs.cos().repeat_interleave(2, dim=-1).float()) + freqs_sin.append(freqs.sin().repeat_interleave(2, dim=-1).float()) + + self.register_buffer("freqs_cos", torch.cat(freqs_cos, dim=1), persistent=False) + self.register_buffer("freqs_sin", torch.cat(freqs_sin, dim=1), persistent=False) + + def forward(self, hidden_states: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + # Robust shape unpacking + b, c, f, h, w = hidden_states.shape + p_t, p_h, p_w = self.patch_size + ppf, pph, ppw = f // p_t, h // p_h, w // p_w + + split_sizes = [self.t_dim, self.h_dim, self.w_dim] + freqs_cos = self.freqs_cos.split(split_sizes, dim=1) + freqs_sin = self.freqs_sin.split(split_sizes, dim=1) + + # Broadcast frequencies to 3D grid: [Time, Height, Width] + f_cos_t = freqs_cos[0][:ppf].view(ppf, 1, 1, -1).expand(ppf, pph, ppw, -1) + f_sin_t = freqs_sin[0][:ppf].view(ppf, 1, 1, -1).expand(ppf, pph, ppw, -1) + + f_cos_h = freqs_cos[1][:pph].view(1, pph, 1, -1).expand(ppf, pph, ppw, -1) + f_sin_h = freqs_sin[1][:pph].view(1, pph, 1, -1).expand(ppf, pph, ppw, -1) + + f_cos_w = freqs_cos[2][:ppw].view(1, 1, ppw, -1).expand(ppf, pph, ppw, -1) + f_sin_w = freqs_sin[2][:ppw].view(1, 1, ppw, -1).expand(ppf, pph, ppw, -1) + + # Concatenate and flatten for Attention [1, SeqLen, 1, Dim] (SHD format) + # New Attention module applies RoPE in [B, S, H, D] layout before reshaping to [B, H, S, D] + return ( + torch.cat([f_cos_t, f_cos_h, f_cos_w], dim=-1).flatten(0, 2).unsqueeze(0).unsqueeze(2), + torch.cat([f_sin_t, f_sin_h, f_sin_w], dim=-1).flatten(0, 2).unsqueeze(0).unsqueeze(2), + ) + + +# ========================================================================= +# 2. Embeddings & Attention +# ========================================================================= + + +class WanImageEmbedding(nn.Module): + """Image embedding for I2V models (Wan 2.1/2.2).""" + + def __init__( + self, + in_features: int, + out_features: int, + pos_embed_seq_len: int = None, + model_config: DiffusionModelConfig = None, + ): + super().__init__() + dtype = model_config.torch_dtype if model_config else None + # LayerNorm weights in fp32 (matches internal float32 normalization; avoids bf16/fp32 mismatch). + self.norm1 = LayerNorm( + hidden_size=in_features, eps=1e-6, dtype=torch.float32, has_weights=True, has_bias=True + ) + + # Match HF FeedForward structure: Linear(in, in) → GELU → Linear(in, out) + self.ff_in = Linear( + in_features, + in_features, + bias=True, + dtype=dtype, + mapping=model_config.mapping if model_config else None, + quant_config=model_config.quant_config if model_config else None, + skip_create_weights_in_init=model_config.skip_create_weights_in_init + if model_config + else False, + force_dynamic_quantization=model_config.force_dynamic_quantization + if model_config + else False, + ) + self.ff_out = Linear( + in_features, + out_features, + bias=True, + dtype=dtype, + mapping=model_config.mapping if model_config else None, + quant_config=model_config.quant_config if model_config else None, + skip_create_weights_in_init=model_config.skip_create_weights_in_init + if model_config + else False, + force_dynamic_quantization=model_config.force_dynamic_quantization + if model_config + else False, + ) + + self.norm2 = LayerNorm( + hidden_size=out_features, eps=1e-6, dtype=torch.float32, has_weights=True, has_bias=True + ) + + if pos_embed_seq_len is not None: + self.pos_embed = nn.Parameter(torch.zeros(1, pos_embed_seq_len, in_features)) + else: + self.pos_embed = None + + def forward(self, encoder_hidden_states_image: torch.Tensor) -> torch.Tensor: + if self.pos_embed is not None: + batch_size, seq_len, embed_dim = encoder_hidden_states_image.shape + encoder_hidden_states_image = encoder_hidden_states_image.view( + -1, 2 * seq_len, embed_dim + ) + encoder_hidden_states_image = encoder_hidden_states_image + self.pos_embed + + hidden_states = self.norm1(encoder_hidden_states_image) + hidden_states = self.ff_in(hidden_states) + hidden_states = F.gelu(hidden_states) + hidden_states = self.ff_out(hidden_states) + hidden_states = self.norm2(hidden_states) + return hidden_states + + +class WanTimeTextImageEmbedding(nn.Module): + def __init__( + self, + dim, + time_freq_dim, + time_proj_dim, + text_embed_dim, + model_config: DiffusionModelConfig, + image_embed_dim: int = None, + pos_embed_seq_len: int = None, + ): + super().__init__() + dtype = model_config.torch_dtype + quant_config = model_config.quant_config + skip_create_weights = model_config.skip_create_weights_in_init + force_dynamic_quant = model_config.force_dynamic_quantization + + self.timesteps_proj = Timesteps( + num_channels=time_freq_dim, flip_sin_to_cos=True, downscale_freq_shift=0 + ) + self.time_embedder = TimestepEmbedding(in_channels=time_freq_dim, time_embed_dim=dim) + self.act_fn = nn.SiLU() + + self.time_proj = Linear( + dim, + time_proj_dim, + dtype=dtype, + mapping=model_config.mapping, + quant_config=quant_config, + skip_create_weights_in_init=skip_create_weights, + force_dynamic_quantization=force_dynamic_quant, + ) + self.text_embedder = PixArtAlphaTextProjection(text_embed_dim, dim, act_fn="gelu_tanh") + + self.image_embedder = None + if image_embed_dim is not None: + self.image_embedder = WanImageEmbedding( + image_embed_dim, dim, pos_embed_seq_len=pos_embed_seq_len, model_config=model_config + ) + + def forward(self, timestep, encoder_hidden_states, encoder_hidden_states_image=None): + timestep = self.timesteps_proj(timestep) + + # Get time_embedder dtype + time_embedder_dtype = next(iter(self.time_embedder.parameters())).dtype + if timestep.dtype != time_embedder_dtype and time_embedder_dtype not in [ + torch.float8_e4m3fn, + torch.float8_e5m2, + ]: + timestep = timestep.to(time_embedder_dtype) + + temb = self.time_embedder(timestep).type_as(encoder_hidden_states) + + temb_proj = self.time_proj(self.act_fn(temb)) + + encoder_hidden_states = self.text_embedder(encoder_hidden_states) + + if encoder_hidden_states_image is not None and self.image_embedder is not None: + encoder_hidden_states_image = self.image_embedder(encoder_hidden_states_image) + + return temb, temb_proj, encoder_hidden_states, encoder_hidden_states_image + + +class WanBlock(nn.Module): + def __init__( + self, + model_config: DiffusionModelConfig, + _layer_idx: int, + added_kv_proj_dim: int = None, + ): + super().__init__() + config = model_config.pretrained_config + + if hasattr(config, "hidden_size"): + hidden_size = config.hidden_size + elif hasattr(config, "attention_head_dim") and hasattr(config, "num_attention_heads"): + hidden_size = config.attention_head_dim * config.num_attention_heads + else: + hidden_size = 1536 + + # Wan 2.1 1.3B defaults + num_heads = getattr(config, "num_attention_heads", 12) + head_dim = getattr(config, "attention_head_dim", 128) + ffn_dim = getattr(config, "ffn_dim", 8960) + eps = getattr(config, "eps", 1e-6) + + dtype = model_config.torch_dtype + quant_config = model_config.quant_config + skip_create_weights = model_config.skip_create_weights_in_init + force_dynamic_quant = model_config.force_dynamic_quantization + + # Store for I2V reshaping logic + self.num_heads = num_heads + self.head_dim = head_dim + + # LayerNorm weights in fp32 (matches internal float32 normalization; avoids bf16/fp32 mismatch). + self.norm1 = LayerNorm( + hidden_size=hidden_size, eps=eps, dtype=torch.float32, has_weights=False, has_bias=False + ) + + # Self-attention with fused QKV + self.attn1 = Attention( + hidden_size=hidden_size, + num_attention_heads=num_heads, + head_dim=head_dim, + qkv_mode=QKVMode.FUSE_QKV, + qk_norm=True, + eps=eps, + config=model_config, + layer_idx=_layer_idx, + ) + + # Cross-attention with separate Q, K, V + self.attn2 = Attention( + hidden_size=hidden_size, + num_attention_heads=num_heads, + head_dim=head_dim, + qkv_mode=QKVMode.SEPARATE_QKV, + qk_norm=True, + eps=eps, + config=model_config, + layer_idx=_layer_idx, + ) + + self.norm2 = LayerNorm( + hidden_size=hidden_size, eps=eps, dtype=torch.float32, has_weights=True, has_bias=True + ) + self.norm3 = LayerNorm( + hidden_size=hidden_size, eps=eps, dtype=torch.float32, has_weights=False, has_bias=False + ) + + self.ffn = MLP( + hidden_size=hidden_size, + intermediate_size=ffn_dim, + bias=True, + activation=lambda x: F.gelu(x, approximate="tanh"), + dtype=dtype, + config=model_config, + layer_idx=_layer_idx, + reduce_output=False, + ) + + # I2V: Additional K/V projections for image embeddings + self.add_k_proj = self.add_v_proj = None + self.norm_added_k = None + if added_kv_proj_dim is not None: + self.add_k_proj = Linear( + added_kv_proj_dim, + hidden_size, + dtype=dtype, + mapping=model_config.mapping, + quant_config=quant_config, + skip_create_weights_in_init=skip_create_weights, + force_dynamic_quantization=force_dynamic_quant, + ) + self.add_v_proj = Linear( + added_kv_proj_dim, + hidden_size, + dtype=dtype, + mapping=model_config.mapping, + quant_config=quant_config, + skip_create_weights_in_init=skip_create_weights, + force_dynamic_quantization=force_dynamic_quant, + ) + self.norm_added_k = RMSNorm( + hidden_size=hidden_size, eps=eps, dtype=dtype, has_weights=True + ) + + # Use torch.empty().normal_(std=...) instead of torch.randn()/scale for MetaInitMode compatibility + self.scale_shift_table = nn.Parameter( + torch.empty(1, 6, hidden_size).normal_(std=hidden_size**-0.5) + ) + + def forward( + self, + x, + encoder_hidden_states, + temb, + freqs_cos, + freqs_sin, + ): + shift_msa, scale_msa, gate_msa, c_shift_msa, c_scale_msa, c_gate_msa = ( + self.scale_shift_table.float() + temb.float() + ).chunk(6, dim=1) + + normed = self.norm1(x.float()) * (1 + scale_msa) + shift_msa + normed = normed.to(x.dtype) + + # Prepare frequencies for Attention + freqs = (freqs_cos, freqs_sin) if freqs_cos is not None and freqs_sin is not None else None + + # Self-attention with RoPE + x = ( + x.float() + + self.attn1( + normed, + freqs=freqs, + ).float() + * gate_msa + ).to(x.dtype) + + norm_x = self.norm2(x.float()).to(x.dtype) + + # I2V: Split encoder_hidden_states into image and text parts if needed + encoder_hidden_states_img = None + encoder_hidden_states_text = encoder_hidden_states + if self.add_k_proj is not None: + image_context_length = encoder_hidden_states.shape[1] - 512 + encoder_hidden_states_img = encoder_hidden_states[:, :image_context_length] + encoder_hidden_states_text = encoder_hidden_states[:, image_context_length:] + + # Text cross-attention + attn2_output = self.attn2(norm_x, encoder_hidden_states=encoder_hidden_states_text) + + # I2V: Additional image cross-attention if image embeddings are present + if encoder_hidden_states_img is not None: + batch_size, seq_len = norm_x.shape[:2] + + query = self.attn2.get_qkv(norm_x, None)[0] # Q only + query, _ = self.attn2.apply_qk_norm(query, query) + + key_img = self.add_k_proj(encoder_hidden_states_img) + value_img = self.add_v_proj(encoder_hidden_states_img) + key_img = self.norm_added_k(key_img) + + query = query.view(batch_size, seq_len, self.num_heads, self.head_dim) + key_img = key_img.view( + batch_size, encoder_hidden_states_img.shape[1], self.num_heads, self.head_dim + ) + value_img = value_img.view( + batch_size, encoder_hidden_states_img.shape[1], self.num_heads, self.head_dim + ) + + attn_img_output = self.attn2._attn_impl( + query, + key_img, + value_img, + batch_size=batch_size, + seq_len=seq_len, + kv_seq_len=encoder_hidden_states_img.shape[1], + ) + + attn2_output = attn2_output + attn_img_output + + x = x + attn2_output + + # 3. Feed-forward + normed = self.norm3(x.float()) * (1 + c_scale_msa) + c_shift_msa + normed = normed.to(x.dtype) + + x = (x.float() + self.ffn(normed).float() * c_gate_msa).to(x.dtype) + + return x + + +class WanTransformer3DModel(nn.Module): + _supports_gradient_checkpointing = True + + def __init__( + self, + model_config: DiffusionModelConfig, + ): + super().__init__() + + self.model_config = model_config + + # Validate no tensor parallelism + if model_config.parallel.dit_tp_size > 1: + raise ValueError( + f"WAN does not support tensor parallelism. " + f"Got dit_tp_size={model_config.parallel.dit_tp_size}" + ) + + # Setup sequence parallelism (Ulysses) + num_heads = getattr(model_config.pretrained_config, "num_attention_heads", 12) + self.use_ulysses, self.ulysses_size, self.ulysses_pg, self.ulysses_rank = ( + setup_sequence_parallelism( + model_config=model_config, + num_attention_heads=num_heads, + ) + ) + + config = model_config.pretrained_config + + dtype = model_config.torch_dtype + quant_config = model_config.quant_config + skip_create_weights = model_config.skip_create_weights_in_init + force_dynamic_quant = model_config.force_dynamic_quantization + + if hasattr(config, "hidden_size"): + hidden_size = config.hidden_size + elif hasattr(config, "attention_head_dim") and hasattr(config, "num_attention_heads"): + hidden_size = config.attention_head_dim * config.num_attention_heads + else: + hidden_size = 1536 # Wan 1.3B default + + num_layers = getattr(config, "num_layers", 30) + attention_head_dim = getattr(config, "attention_head_dim", 128) + in_channels = getattr(config, "in_channels", 16) + out_channels = getattr(config, "out_channels", 16) + text_dim = getattr(config, "text_dim", 4096) + freq_dim = getattr(config, "freq_dim", 256) + patch_size = getattr(config, "patch_size", [1, 2, 2]) + image_embed_dim = getattr(config, "image_dim", None) # e.g., 1280 for I2V + added_kv_proj_dim = getattr(config, "added_kv_proj_dim", None) + pos_embed_seq_len = getattr(config, "pos_embed_seq_len", None) + + # Calculate FFN Dim + ffn_dim = getattr(config, "ffn_dim", None) + if ffn_dim is None: + ffn_dim = ( + 13824 + if hidden_size == 5120 + else (8960 if hidden_size == 1536 else int(hidden_size * 4)) + ) + + # Store config for unpatchify and pipeline compatibility + self.config = type( + "Config", + (), + { + "patch_size": patch_size, + "hidden_size": hidden_size, + "image_dim": image_embed_dim, + "in_channels": in_channels, + "out_channels": out_channels, + "num_layers": num_layers, + }, + )() + + self.patch_embedding = nn.Conv3d( + in_channels, + hidden_size, + kernel_size=patch_size, + stride=patch_size, + dtype=dtype, # use model's target dtype (bf16) + ) + + self.condition_embedder = WanTimeTextImageEmbedding( + dim=hidden_size, + time_freq_dim=freq_dim, + time_proj_dim=hidden_size * 6, + text_embed_dim=text_dim, + model_config=model_config, + image_embed_dim=image_embed_dim, + pos_embed_seq_len=pos_embed_seq_len, + ) + + self.blocks = nn.ModuleList( + [ + WanBlock( + model_config=model_config, + _layer_idx=i, + added_kv_proj_dim=added_kv_proj_dim, + ) + for i in range(num_layers) + ] + ) + + self.rope = WanRotaryPosEmbed(attention_head_dim, patch_size, max_seq_len=1024) + + # LayerNorm weights in fp32 (matches internal float32 normalization; avoids bf16/fp32 mismatch). + self.norm_out = LayerNorm( + hidden_size=hidden_size, + eps=1e-6, + dtype=torch.float32, + has_weights=False, + has_bias=False, + ) + + self.proj_out = Linear( + hidden_size, + out_channels * math.prod(patch_size), + dtype=dtype, + mapping=model_config.mapping, + quant_config=quant_config, + skip_create_weights_in_init=skip_create_weights, + force_dynamic_quantization=force_dynamic_quant, + ) + # Use torch.empty().normal_(std=...) instead of torch.randn()/scale for MetaInitMode compatibility + self.scale_shift_table = nn.Parameter( + torch.empty(1, 2, hidden_size).normal_(std=hidden_size**-0.5) + ) + + self.__post_init__() + + @property + def device(self): + return get_parameter_device(self) + + def __post_init__(self): + self.apply_quant_config_exclude_modules() + + for _, module in self.named_modules(): + if callable(getattr(module, "create_weights", None)): + module.create_weights() + + def apply_quant_config_exclude_modules(self): + quant_config = self.model_config.quant_config + if quant_config is None or quant_config.exclude_modules is None: + return + + kv_cache_quant_algo = quant_config.kv_cache_quant_algo if quant_config else None + no_quant_config = QuantConfig(kv_cache_quant_algo=kv_cache_quant_algo) + + for name, module in self.named_modules(): + if isinstance(module, Linear): + is_excluded = quant_config.is_module_excluded_from_quantization(name) + if is_excluded and getattr(module, "quant_config", None) is not None: + module.quant_config = no_quant_config + + def unpatchify(self, x, original_shape): + N, C, T, H, W = original_shape + pt, ph, pw = self.config.patch_size + gt, gh, gw = T // pt, H // ph, W // pw + # Use output channels instead of input channels for unpatchifying + out_channels = self.proj_out.out_features // (pt * ph * pw) + return ( + x.view(N, gt, gh, gw, pt, ph, pw, out_channels) + .permute(0, 7, 1, 4, 2, 5, 3, 6) + .reshape(N, out_channels, T, H, W) + ) + + def forward( + self, + hidden_states, + timestep, + encoder_hidden_states, + encoder_hidden_states_image=None, + **kwargs, + ): + """ + Forward pass with optional Ulysses sequence parallelism. + + With Ulysses enabled (ulysses_size > 1): + 1. Shard input sequence across ranks: [B, S] -> [B, S/P] + 2. Each block's attention does internal all-to-all for full sequence + 3. Gather output sequence: [B, S/P] -> [B, S] + + When TeaCache is enabled, TeaCacheHook intercepts and replaces this call. + """ + original_shape = hidden_states.shape + B, C, T, H, W = original_shape + pt, ph, pw = self.config.patch_size + + # Generate WAN RoPE frequencies + freqs_cos, freqs_sin = self.rope(hidden_states) + + # Patchify and flatten: [B, C, T, H, W] -> [B, S, hidden_size] + x = self.patch_embedding(hidden_states).flatten(2).transpose(1, 2) + + # Shard sequence for Ulysses parallelism: [B, S] -> [B, S/P] + if self.use_ulysses: + seq_len = x.shape[1] + if seq_len % self.ulysses_size != 0: + raise ValueError( + f"Sequence length ({seq_len}) is not divisible by ulysses_size ({self.ulysses_size}). " + f"Adjust video dimensions or use a different ulysses_size." + ) + + chunk_size = seq_len // self.ulysses_size + x = x[:, self.ulysses_rank * chunk_size : (self.ulysses_rank + 1) * chunk_size, :] + + # Shard RoPE frequencies to match sequence sharding + # RoPE freqs shape: [B, S, ...], so shard along dim 1 (sequence dimension) + if freqs_cos is not None and freqs_sin is not None: + freqs_cos = freqs_cos[ + :, self.ulysses_rank * chunk_size : (self.ulysses_rank + 1) * chunk_size + ] + freqs_sin = freqs_sin[ + :, self.ulysses_rank * chunk_size : (self.ulysses_rank + 1) * chunk_size + ] + + # Time and text/image embeddings + temb, temb_proj, encoder_hidden_states, encoder_hidden_states_image = ( + self.condition_embedder(timestep, encoder_hidden_states, encoder_hidden_states_image) + ) + temb_proj = temb_proj.view(-1, 6, self.config.hidden_size) + + # I2V: Concatenate image and text embeddings if image embeddings are provided + if encoder_hidden_states_image is not None: + # Handle CFG: duplicate image embeddings if batch dimension is doubled + if encoder_hidden_states_image.shape[0] != encoder_hidden_states.shape[0]: + batch_multiplier = ( + encoder_hidden_states.shape[0] // encoder_hidden_states_image.shape[0] + ) + encoder_hidden_states_image = encoder_hidden_states_image.repeat( + batch_multiplier, 1, 1 + ) + encoder_hidden_states = torch.cat( + [encoder_hidden_states_image, encoder_hidden_states], dim=1 + ) + + # Transformer blocks (attention handles all-to-all internally for Ulysses) + for block in self.blocks: + x = block( + x, + encoder_hidden_states, + temb_proj, + freqs_cos, + freqs_sin, + ) + + # Gather sequence from all ranks: [B, S/P] -> [B, S] + if self.use_ulysses: + # Ensure tensor is contiguous before all_gather + x = x.contiguous() + x_list = [torch.zeros_like(x) for _ in range(self.ulysses_size)] + torch.distributed.all_gather(x_list, x, group=self.ulysses_pg) + x = torch.cat(x_list, dim=1) + + # Output projection and unpatchify + shift, scale = (self.scale_shift_table + temb.unsqueeze(1)).chunk(2, dim=1) + x = self.norm_out(x) * (1 + scale) + shift + x = x.to(hidden_states.dtype) + + return self.unpatchify(self.proj_out(x), original_shape) + + def load_weights(self, weights: dict) -> None: + # Remap checkpoint keys to match model structure + remapped_weights = {} + for key, value in weights.items(): + # Remap transformer block FFN keys + if ".ffn.net.0.proj." in key: + new_key = key.replace(".ffn.net.0.proj.", ".ffn.up_proj.") + remapped_weights[new_key] = value + elif ".ffn.net.2." in key: + new_key = key.replace(".ffn.net.2.", ".ffn.down_proj.") + remapped_weights[new_key] = value + # Remap image embedder FF keys + elif ".image_embedder.ff.net.0.proj." in key: + new_key = key.replace(".image_embedder.ff.net.0.proj.", ".image_embedder.ff_in.") + remapped_weights[new_key] = value + elif ".image_embedder.ff.net.2." in key: + new_key = key.replace(".image_embedder.ff.net.2.", ".image_embedder.ff_out.") + remapped_weights[new_key] = value + # Remap I2V attention keys + elif ".attn2.add_k_proj." in key: + new_key = key.replace(".attn2.add_k_proj.", ".add_k_proj.") + remapped_weights[new_key] = value + elif ".attn2.add_v_proj." in key: + new_key = key.replace(".attn2.add_v_proj.", ".add_v_proj.") + remapped_weights[new_key] = value + elif ".attn2.norm_added_k." in key: + new_key = key.replace(".attn2.norm_added_k.", ".norm_added_k.") + remapped_weights[new_key] = value + else: + remapped_weights[key] = value + + weights = remapped_weights + + # Handle root-level parameters (filter_weights doesn't work for empty prefix) + for param_name, param in self._parameters.items(): + if param is not None and param_name in weights: + param.data.copy_(weights[param_name].to(self.model_config.torch_dtype)) + + params_map = { + "qkv_proj": ["to_q", "to_k", "to_v"], + } + loader = DynamicLinearWeightLoader(self.model_config, params_map=params_map) + + for name, module in tqdm(self.named_modules(), desc="Loading weights"): + if len(module._parameters) == 0: + continue + + if isinstance(module, Linear): + weight_dicts = loader.get_linear_weights(module, name, weights) + + if weight_dicts: + loader.load_linear_weights(module, name, weight_dicts) + elif "add_k_proj" in name or "add_v_proj" in name: + logger.info(f"[Weight Loading] No weights found for I2V module: {name}") + else: + module_weights = loader.filter_weights(name, weights) + for param_name, param in module._parameters.items(): + if param is not None and param_name in module_weights: + param.data.copy_( + module_weights[param_name].to(self.model_config.torch_dtype) + ) + + def post_load_weights(self) -> None: + """Call post_load_weights on all Linear modules and convert embedders to target dtype.""" + # Convert condition_embedder components to target dtype + target_dtype = self.model_config.torch_dtype + if hasattr(self.condition_embedder, "time_embedder"): + self.condition_embedder.time_embedder.to(target_dtype) + if hasattr(self.condition_embedder, "text_embedder"): + self.condition_embedder.text_embedder.to(target_dtype) + + # Call post_load_weights on all Linear modules + for _, module in self.named_modules(): + if isinstance(module, Linear): + module.post_load_weights() diff --git a/tensorrt_llm/_torch/visual_gen/modules/__init__.py b/tensorrt_llm/_torch/visual_gen/modules/__init__.py new file mode 100644 index 0000000000..93636ddb53 --- /dev/null +++ b/tensorrt_llm/_torch/visual_gen/modules/__init__.py @@ -0,0 +1,26 @@ +# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Visual Generation Modules + +This module provides modular neural network components for visual generation models. +""" + +from .attention import Attention, QKVMode + +__all__ = [ + "Attention", + "QKVMode", +] diff --git a/tensorrt_llm/_torch/visual_gen/modules/attention.py b/tensorrt_llm/_torch/visual_gen/modules/attention.py new file mode 100644 index 0000000000..0c83bf5e28 --- /dev/null +++ b/tensorrt_llm/_torch/visual_gen/modules/attention.py @@ -0,0 +1,284 @@ +from enum import Enum +from typing import TYPE_CHECKING, Optional, Tuple + +import torch +import torch.nn as nn + +from ...modules.linear import Linear, WeightMode, WeightsLoadingConfig +from ...modules.rms_norm import RMSNorm +from ..attention_backend.interface import AttentionTensorLayout +from ..attention_backend.utils import create_attention + +if TYPE_CHECKING: + from ..config import DiffusionModelConfig + + +class QKVMode(str, Enum): + FUSE_QKV = "fuse_qkv" + FUSE_KV = "fuse_kv" + SEPARATE_QKV = "separate" + + +# TODO: torch compile +def apply_rotary_emb( + x: torch.Tensor, freqs_cos: torch.Tensor, freqs_sin: torch.Tensor +) -> torch.Tensor: + freqs_cos = freqs_cos.to(x.dtype) + freqs_sin = freqs_sin.to(x.dtype) + x1, x2 = x.unflatten(-1, (-1, 2)).unbind(-1) # [B, S, H, D/2] + + cos = freqs_cos[..., 0::2] + sin = freqs_sin[..., 1::2] + + return torch.stack([x1 * cos - x2 * sin, x1 * sin + x2 * cos], dim=-1).flatten(-2) + + +class Attention(nn.Module): + """Attention module for visual generation models.""" + + def __init__( + self, + hidden_size: int, + num_attention_heads: int, + num_key_value_heads: Optional[int] = None, + head_dim: Optional[int] = None, + qkv_mode: QKVMode = QKVMode.FUSE_QKV, + qk_norm: bool = True, + eps: float = 1e-6, # TODO: remove this, we should add this to the config + config: Optional["DiffusionModelConfig"] = None, + layer_idx: Optional[int] = None, + ): + super().__init__() + + config = config or DiffusionModelConfig() + self.dtype = config.torch_dtype + self.quant_config = config.quant_config + self.skip_create_weights_in_init = config.skip_create_weights_in_init + self.force_dynamic_quantization = config.force_dynamic_quantization + self.mapping = getattr(config, "mapping", None) + + self.hidden_size = hidden_size + self.num_attention_heads = num_attention_heads + self.num_key_value_heads = num_key_value_heads or num_attention_heads + self.head_dim = head_dim or (hidden_size // num_attention_heads) + self.qkv_mode = QKVMode(qkv_mode) if isinstance(qkv_mode, str) else qkv_mode + + # Select compute backend (orthogonal to parallelism) + ulysses_size = config.parallel.dit_ulysses_size + base_backend = config.attention.backend + + if self.qkv_mode == QKVMode.SEPARATE_QKV: + backend_name = "VANILLA" # Cross-attention requires VANILLA + else: + backend_name = base_backend + self.attn_backend = backend_name + self.qk_norm = qk_norm + self.layer_idx = layer_idx if layer_idx is not None else 0 + self.eps = eps + + self.q_dim = self.num_attention_heads * self.head_dim + self.kv_dim = self.num_key_value_heads * self.head_dim + + self._init_qkv_proj() + + if self.qk_norm: + self.norm_q = RMSNorm( + hidden_size=self.q_dim, eps=self.eps, dtype=self.dtype, has_weights=True + ) + self.norm_k = RMSNorm( + hidden_size=self.kv_dim, eps=self.eps, dtype=self.dtype, has_weights=True + ) + + # TODO: Use weight mapper to create just a Linear module + self.to_out = nn.ModuleList( + [ + Linear( + self.q_dim, + self.hidden_size, + dtype=self.dtype, + mapping=self.mapping, + quant_config=self.quant_config, + skip_create_weights_in_init=self.skip_create_weights_in_init, + force_dynamic_quantization=self.force_dynamic_quantization, + ) + ] + ) + + # Compute head counts for the backend + # Ulysses shards heads across workers; inner backend sees sharded count + if ulysses_size > 1 and self.qkv_mode != QKVMode.SEPARATE_QKV: + backend_num_heads = self.num_attention_heads // ulysses_size + backend_num_kv_heads = self.num_key_value_heads // ulysses_size + else: + backend_num_heads = self.num_attention_heads + backend_num_kv_heads = self.num_key_value_heads + + # Create compute backend + self.attn = create_attention( + backend=backend_name, + layer_idx=self.layer_idx, + num_heads=backend_num_heads, + head_dim=self.head_dim, + num_kv_heads=backend_num_kv_heads, + quant_config=self.quant_config, + dtype=self.dtype, + ) + + # Wrap with parallelism strategy (orthogonal to backend choice) + if ulysses_size > 1 and self.qkv_mode != QKVMode.SEPARATE_QKV: + from ..attention_backend.parallel import UlyssesAttention + + process_group = getattr(config, "ulysses_process_group", None) + self.attn = UlyssesAttention( + inner_backend=self.attn, + process_group=process_group, + ) + + def _init_qkv_proj(self) -> None: + if self.qkv_mode == QKVMode.FUSE_QKV: + qkv_out_dim = self.q_dim + 2 * self.kv_dim + self.qkv_proj = Linear( + self.hidden_size, + qkv_out_dim, + dtype=self.dtype, + mapping=self.mapping, + quant_config=self.quant_config, + skip_create_weights_in_init=self.skip_create_weights_in_init, + force_dynamic_quantization=self.force_dynamic_quantization, + weights_loading_config=WeightsLoadingConfig( + weight_mode=WeightMode.FUSED_QKV_LINEAR + ), + fused_weight_shard_indices_mapping={ + "q": (0, self.q_dim), + "k": (self.q_dim, self.kv_dim), + "v": (self.q_dim + self.kv_dim, self.kv_dim), + }, + ) + else: + self.to_q = Linear( + self.hidden_size, + self.q_dim, + dtype=self.dtype, + mapping=self.mapping, + quant_config=self.quant_config, + skip_create_weights_in_init=self.skip_create_weights_in_init, + force_dynamic_quantization=self.force_dynamic_quantization, + ) + self.to_k = Linear( + self.hidden_size, + self.kv_dim, + dtype=self.dtype, + mapping=self.mapping, + quant_config=self.quant_config, + skip_create_weights_in_init=self.skip_create_weights_in_init, + force_dynamic_quantization=self.force_dynamic_quantization, + ) + self.to_v = Linear( + self.hidden_size, + self.kv_dim, + dtype=self.dtype, + mapping=self.mapping, + quant_config=self.quant_config, + skip_create_weights_in_init=self.skip_create_weights_in_init, + force_dynamic_quantization=self.force_dynamic_quantization, + ) + + def get_qkv( + self, + hidden_states: torch.Tensor, + encoder_hidden_states: Optional[torch.Tensor] = None, + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + if self.qkv_mode == QKVMode.FUSE_QKV: + qkv = self.qkv_proj(hidden_states) + q, k, v = qkv.split([self.q_dim, self.kv_dim, self.kv_dim], dim=-1) + else: + kv_source = ( + encoder_hidden_states if encoder_hidden_states is not None else hidden_states + ) + q = self.to_q(hidden_states) + k = self.to_k(kv_source) + v = self.to_v(kv_source) + return q, k, v + + def apply_qk_norm(self, q: torch.Tensor, k: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + if self.qk_norm: + q = self.norm_q(q) + k = self.norm_k(k) + return q, k + + def _attn_impl( + self, + q: torch.Tensor, + k: torch.Tensor, + v: torch.Tensor, + batch_size: Optional[int] = None, + seq_len: Optional[int] = None, + kv_seq_len: Optional[int] = None, + ) -> torch.Tensor: + """ + Call attention backend with appropriate tensor layout. + + Two layout paths: + 1. HND backends (VANILLA): [B, S, H*D] -> [B, H, S, D] + 2. NHD backends (TRTLLM, UlyssesAttention): [B, S, H*D] -> [B, S, H, D] + """ + backend_layout = getattr(self.attn, "preferred_layout", AttentionTensorLayout.NHD) + + batch_size = batch_size or q.shape[0] + seq_len = seq_len or q.shape[1] + kv_seq_len = kv_seq_len or k.shape[1] + + # Reshape inputs: [B, S, H*D] -> backend's preferred 4D layout + if backend_layout == AttentionTensorLayout.HND: + q = q.view(batch_size, -1, self.num_attention_heads, self.head_dim).transpose(1, 2) + k = k.view(batch_size, -1, self.num_key_value_heads, self.head_dim).transpose(1, 2) + v = v.view(batch_size, -1, self.num_key_value_heads, self.head_dim).transpose(1, 2) + else: + q = q.view(batch_size, -1, self.num_attention_heads, self.head_dim) + k = k.view(batch_size, -1, self.num_key_value_heads, self.head_dim) + v = v.view(batch_size, -1, self.num_key_value_heads, self.head_dim) + + # Call backend + out = self.attn.forward( + q=q, + k=k, + v=v, + batch_size=batch_size, + seq_len=seq_len, + seq_len_kv=kv_seq_len if kv_seq_len != seq_len else None, + ) + + # Flatten back to [B, S, H*D] + if backend_layout == AttentionTensorLayout.HND: + return out.transpose(1, 2).flatten(2) + else: + return out.flatten(2) + + def forward( + self, + hidden_states: torch.Tensor, + encoder_hidden_states: Optional[torch.Tensor] = None, + freqs: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, + ) -> torch.Tensor: + assert hidden_states.ndim == 3, "hidden_states must be a 3D tensor" + batch_size, seq_len = hidden_states.shape[:2] + kv_seq_len = ( + encoder_hidden_states.shape[1] if encoder_hidden_states is not None else seq_len + ) + + q, k, v = self.get_qkv(hidden_states, encoder_hidden_states) + q, k = self.apply_qk_norm(q, k) + + # Apply RoPE if provided (model handles RoPE, not attention backend) + if freqs is not None: + freqs_cos, freqs_sin = freqs + q = q.view(batch_size, seq_len, self.num_attention_heads, self.head_dim) # [B, S, H, D] + k = k.view(batch_size, kv_seq_len, self.num_key_value_heads, self.head_dim) + q = apply_rotary_emb(q, freqs_cos, freqs_sin) + k = apply_rotary_emb(k, freqs_cos, freqs_sin) + q = q.flatten(2) + k = k.flatten(2) + + out = self._attn_impl(q, k, v, batch_size, seq_len, kv_seq_len) + out = self.to_out[0](out) + return out diff --git a/tensorrt_llm/_torch/visual_gen/output.py b/tensorrt_llm/_torch/visual_gen/output.py new file mode 100644 index 0000000000..d39c15b0c0 --- /dev/null +++ b/tensorrt_llm/_torch/visual_gen/output.py @@ -0,0 +1,29 @@ +"""Output dataclass for visual generation models.""" + +from dataclasses import dataclass +from typing import Optional + +import torch + + +@dataclass +class MediaOutput: + """Unified output for all visual generation models. + + Different models populate different fields: + - FLUX2: image only + - WAN: video only + - LTX2: video + audio + + Attributes: + image: Generated image as torch tensor with shape (height, width, channels) and dtype uint8. + Populated by FLUX2 for text-to-image generation. + video: Generated video frames as torch tensor with shape (num_frames, height, width, channels) and dtype uint8. + Populated by WAN and LTX2 for text-to-video generation. + audio: Generated audio as torch tensor with dtype float32. + Populated by LTX2 for text-to-video-with-audio generation. + """ + + image: Optional[torch.Tensor] = None + video: Optional[torch.Tensor] = None + audio: Optional[torch.Tensor] = None diff --git a/tensorrt_llm/_torch/visual_gen/parallelism.py b/tensorrt_llm/_torch/visual_gen/parallelism.py new file mode 100644 index 0000000000..1bda600fa0 --- /dev/null +++ b/tensorrt_llm/_torch/visual_gen/parallelism.py @@ -0,0 +1,100 @@ +"""Utilities for distributed parallelism setup in diffusion models.""" + +from typing import Optional, Tuple + +import torch.distributed as dist + +from tensorrt_llm._torch.visual_gen.config import DiffusionModelConfig + + +def setup_sequence_parallelism( + model_config: DiffusionModelConfig, + num_attention_heads: int, +) -> Tuple[bool, int, Optional[dist.ProcessGroup], int]: + """ + Setup sequence parallelism (currently Ulysses only) with CFG support. + + Creates nested process groups where each CFG group has its own Ulysses group. + Example with cfg_size=2, ulysses_size=2, world_size=4: + GPU 0-1: CFG group 0, Ulysses group 0 + GPU 2-3: CFG group 1, Ulysses group 1 + + Args: + model_config: Model configuration containing parallel settings + num_attention_heads: Number of attention heads in the model + + Returns: + Tuple of (use_parallelism, parallelism_size, parallelism_pg, parallelism_rank): + - use_parallelism: Whether sequence parallelism is enabled + - parallelism_size: The sequence parallelism degree + - parallelism_pg: The process group for this rank (or None) + - parallelism_rank: This rank's position within its parallelism group + + Raises: + RuntimeError: If torch.distributed is not initialized + ValueError: If configuration is invalid (incompatible sizes, head count not divisible, etc.) + NotImplementedError: If Ring attention is requested (not yet implemented) + + Side Effects: + - Sets model_config.ulysses_process_group to the created process group + + Note: + Both num_attention_heads and sequence length must be divisible by ulysses_size. + Head count is validated here; sequence length is validated at runtime during forward pass. + """ + ulysses_size = model_config.parallel.dit_ulysses_size + ring_size = model_config.parallel.dit_ring_size + cfg_size = model_config.parallel.dit_cfg_size + + # Check for ring attention (not yet implemented) + if ring_size > 1: + raise NotImplementedError("Ring attention parallelism is not yet implemented") + + # Early exit if not using sequence parallelism + if ulysses_size <= 1: + model_config.ulysses_process_group = None + return False, 1, None, 0 + + # Validate distributed initialization + if not dist.is_initialized(): + raise RuntimeError( + "torch.distributed.init_process_group() must be called before " + "setting up sequence parallelism" + ) + + rank = dist.get_rank() + world_size = dist.get_world_size() + + # Validate total parallelism capacity + total_parallel = cfg_size * ulysses_size + if total_parallel > world_size: + raise ValueError( + f"cfg_size ({cfg_size}) * ulysses_size ({ulysses_size}) = " + f"{total_parallel} exceeds world_size ({world_size})" + ) + + # Validate head count divisibility + if num_attention_heads % ulysses_size != 0: + raise ValueError( + f"num_attention_heads ({num_attention_heads}) must be divisible by " + f"ulysses_size ({ulysses_size})" + ) + + # Create nested process groups + # Each CFG group has its own Ulysses group + ulysses_pg = None + ulysses_rank = 0 + + for cfg_id in range(cfg_size): + ulysses_ranks = list(range(cfg_id * ulysses_size, (cfg_id + 1) * ulysses_size)) + pg = dist.new_group(ulysses_ranks, use_local_synchronization=True) + + # Store if this rank belongs to this group + if rank in ulysses_ranks: + ulysses_pg = pg + ulysses_rank = rank - cfg_id * ulysses_size + + # Store in config for Attention modules + model_config.ulysses_process_group = ulysses_pg + + return True, ulysses_size, ulysses_pg, ulysses_rank diff --git a/tensorrt_llm/_torch/visual_gen/pipeline.py b/tensorrt_llm/_torch/visual_gen/pipeline.py new file mode 100644 index 0000000000..7876031df7 --- /dev/null +++ b/tensorrt_llm/_torch/visual_gen/pipeline.py @@ -0,0 +1,544 @@ +import time +from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Tuple + +import torch +import torch.distributed as dist +import torch.nn as nn + +from tensorrt_llm.logger import logger +from tensorrt_llm.mapping import Mapping + +from .teacache import TeaCacheBackend + +if TYPE_CHECKING: + from .config import DiffusionModelConfig + + +class BasePipeline(nn.Module): + """ + Base class for diffusion pipelines. + """ + + def __init__(self, model_config: "DiffusionModelConfig"): + super().__init__() + self.model_config = model_config + self.config = model_config.pretrained_config + self.mapping: Mapping = getattr(model_config, "mapping", None) or Mapping() + + # Components + self.transformer: Optional[nn.Module] = None + self.vae: Optional[nn.Module] = None + self.text_encoder: Optional[nn.Module] = None + self.tokenizer: Optional[Any] = None + self.scheduler: Optional[Any] = None + + # Initialize transformer + self._init_transformer() + + @property + def rank(self): + return dist.get_rank() if dist.is_initialized() else 0 + + @property + def world_size(self): + return dist.get_world_size() if dist.is_initialized() else 1 + + @property + def dtype(self): + if hasattr(self, "transformer"): + return next(self.transformer.parameters()).dtype + return torch.float32 + + @property + def device(self): + return self.transformer.device + + def infer(self, req: Any): + raise NotImplementedError + + def _init_transformer(self) -> None: + raise NotImplementedError + + def forward(self, *args, **kwargs): + raise NotImplementedError + + def load_standard_components( + self, + checkpoint_dir: str, + device: torch.device, + skip_components: Optional[list] = None, + ) -> None: + raise NotImplementedError + + def load_weights(self, weights: Dict[str, torch.Tensor]) -> None: + if self.transformer is not None and hasattr(self.transformer, "load_weights"): + self.transformer.load_weights(weights) + + def post_load_weights(self) -> None: + if self.transformer is not None and hasattr(self.transformer, "post_load_weights"): + self.transformer.post_load_weights() + + def _setup_teacache(self, model, coefficients: Optional[Dict] = None): + """Setup TeaCache optimization for the transformer model. + + TeaCache caches transformer block outputs when timestep embeddings change slowly, + reducing computation during the denoising loop. + + Args: + model: The transformer model to optimize + coefficients: Optional dict of model-specific polynomial coefficients for cache decisions + Format: {model_size: {"ret_steps": [...], "standard": [...]}} + """ + self.cache_backend = None + + # Get teacache config from model_config (always present now) + teacache_cfg = self.model_config.teacache + if not teacache_cfg.enable_teacache: + return + + # Apply model-specific polynomial coefficients + # Coefficients are used to rescale embedding distances for cache decisions + if coefficients: + checkpoint_path = ( + getattr(self.model_config.pretrained_config, "_name_or_path", "") or "" + ) + for model_size, coeff_data in coefficients.items(): + # Match model size in path (case-insensitive, e.g., "1.3B", "14B", "dev") + if model_size.lower() in checkpoint_path.lower(): + if isinstance(coeff_data, dict): + # Select coefficient set based on warmup mode + mode = "ret_steps" if teacache_cfg.use_ret_steps else "standard" + if mode in coeff_data: + teacache_cfg.coefficients = coeff_data[mode] + logger.info(f"TeaCache: Using {model_size} coefficients ({mode} mode)") + else: + # Single coefficient list (no mode distinction) + teacache_cfg.coefficients = coeff_data + logger.info(f"TeaCache: Using {model_size} coefficients") + break + + # Initialize and enable TeaCache backend + logger.info("TeaCache: Initializing...") + self.cache_backend = TeaCacheBackend(teacache_cfg) + self.cache_backend.enable(model) + + def decode_latents( + self, + latents: torch.Tensor, + decode_fn: Callable[[torch.Tensor], Any], + extra_latents: Optional[Dict[str, Tuple[torch.Tensor, Callable]]] = None, + ): + """Execute VAE decoding. Only rank 0 performs decoding. + + Args: + latents: Primary latents to decode (e.g., video) + decode_fn: Decoder function for primary latents + extra_latents: Optional dict of additional latents to decode. + Format: {name: (latents_tensor, decode_fn)} + Example: {"audio": (audio_latents, audio_decode_fn)} + + Returns: + Single result if no extra_latents, tuple of results if extra_latents provided. + Non-rank-0 processes return None placeholders. + """ + if self.rank == 0: + primary_result = decode_fn(latents) + + if extra_latents: + extra_results = [] + for name, (extra_latent, extra_decode_fn) in extra_latents.items(): + extra_results.append(extra_decode_fn(extra_latent)) + return (primary_result,) + tuple(extra_results) + + return primary_result + + # Return None placeholders for non-rank-0 processes + n_results = 1 + (len(extra_latents) if extra_latents else 0) + return (None,) * n_results if n_results > 1 else None + + @staticmethod + def _rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): + """Rescale noise to fix overexposure (https://huggingface.co/papers/2305.08891).""" + std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) + std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) + noise_pred_rescaled = noise_cfg * (std_text / std_cfg) + return guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg + + def _setup_cfg_config( + self, guidance_scale, prompt_embeds, neg_prompt_embeds, extra_cfg_tensors=None + ): + """Setup CFG parallel configuration. + + Args: + guidance_scale: CFG guidance scale + prompt_embeds: Positive prompt embeddings + neg_prompt_embeds: Negative prompt embeddings (None if already concatenated) + extra_cfg_tensors: Optional dict of additional tensors to split for CFG parallel. + Format: {name: (positive_tensor, negative_tensor)} + Example: {"audio_embeds": (pos_audio, neg_audio), + "attention_mask": (pos_mask, neg_mask)} + + Returns: + Dict with CFG configuration including split tensors + """ + # Access parallel config directly (always present now) + cfg_size = self.model_config.parallel.dit_cfg_size + ulysses_size = self.model_config.parallel.dit_ulysses_size + + cfg_group = self.rank // ulysses_size + is_split_embeds = neg_prompt_embeds is not None + do_cfg_parallel = cfg_size >= 2 and guidance_scale > 1.0 + + local_extras = {} + + if do_cfg_parallel: + if self.rank == 0: + logger.info(f"CFG Parallel: cfg_size={cfg_size}, ulysses_size={ulysses_size}") + + # Split main embeddings + if is_split_embeds: + pos_embeds, neg_embeds = prompt_embeds, neg_prompt_embeds + else: + neg_embeds, pos_embeds = prompt_embeds.chunk(2) + + local_embeds = pos_embeds if cfg_group == 0 else neg_embeds + + # Split extra tensors if provided + if extra_cfg_tensors: + for name, (pos_tensor, neg_tensor) in extra_cfg_tensors.items(): + if pos_tensor is not None and neg_tensor is not None: + local_extras[name] = pos_tensor if cfg_group == 0 else neg_tensor + elif pos_tensor is not None: + # Only positive provided, use it for both + local_extras[name] = pos_tensor + else: + local_embeds = None + if is_split_embeds and guidance_scale > 1.0: + prompt_embeds = torch.cat([neg_prompt_embeds, prompt_embeds]) + + # For standard CFG, concatenate extra tensors + if extra_cfg_tensors: + for name, (pos_tensor, neg_tensor) in extra_cfg_tensors.items(): + if pos_tensor is not None and neg_tensor is not None and guidance_scale > 1.0: + local_extras[name] = torch.cat([neg_tensor, pos_tensor], dim=0) + elif pos_tensor is not None: + local_extras[name] = pos_tensor + + return { + "enabled": do_cfg_parallel, + "cfg_size": cfg_size, + "ulysses_size": ulysses_size, + "cfg_group": cfg_group, + "local_embeds": local_embeds, + "prompt_embeds": prompt_embeds, + "local_extras": local_extras, + } + + def _denoise_step_cfg_parallel( + self, + latents, + extra_stream_latents, + timestep, + local_embeds, + forward_fn, + guidance_scale, + guidance_rescale, + ulysses_size, + local_extras, + ): + """Execute single denoising step with CFG parallel.""" + t_start = time.time() + result = forward_fn(latents, extra_stream_latents, timestep, local_embeds, local_extras) + + # Handle return format: (primary_noise, extra_noises_dict) or just primary_noise + if isinstance(result, tuple) and len(result) == 2 and isinstance(result[1], dict): + noise_pred_local, extra_noise_locals = result + else: + noise_pred_local = result + extra_noise_locals = {} + + t_transformer = time.time() - t_start + + c_start = time.time() + + # All-gather primary noise + gather_list = [torch.empty_like(noise_pred_local) for _ in range(self.world_size)] + dist.all_gather(gather_list, noise_pred_local) + noise_cond = gather_list[0] + noise_uncond = gather_list[ulysses_size] + noise_pred = noise_uncond + guidance_scale * (noise_cond - noise_uncond) + + # All-gather extra stream noises + extra_noise_preds = {} + for name, noise_local in extra_noise_locals.items(): + gather_list_extra = [torch.empty_like(noise_local) for _ in range(self.world_size)] + dist.all_gather(gather_list_extra, noise_local) + noise_cond_extra = gather_list_extra[0] + noise_uncond_extra = gather_list_extra[ulysses_size] + extra_noise_preds[name] = noise_uncond_extra + guidance_scale * ( + noise_cond_extra - noise_uncond_extra + ) + + if guidance_rescale > 0.0: + extra_noise_preds[name] = self._rescale_noise_cfg( + extra_noise_preds[name], noise_cond_extra, guidance_rescale + ) + + if guidance_rescale > 0.0: + noise_pred = self._rescale_noise_cfg(noise_pred, noise_cond, guidance_rescale) + + t_cfg = time.time() - c_start + return noise_pred, extra_noise_preds, t_transformer, t_cfg + + def _denoise_step_standard( + self, + latents, + extra_stream_latents, + timestep, + prompt_embeds, + forward_fn, + guidance_scale, + guidance_rescale, + local_extras, + ): + """Execute single denoising step without CFG parallel.""" + if guidance_scale > 1.0: + latent_input = torch.cat([latents] * 2) + # Duplicate extra stream latents for CFG + extra_stream_input = { + name: torch.cat([stream_latents] * 2) + for name, stream_latents in extra_stream_latents.items() + } + else: + latent_input = latents + extra_stream_input = extra_stream_latents + + timestep_expanded = timestep.expand(latent_input.shape[0]) + + t_start = time.time() + result = forward_fn( + latent_input, extra_stream_input, timestep_expanded, prompt_embeds, local_extras + ) + + # Handle return format: (primary_noise, extra_noises_dict) or just primary_noise + if isinstance(result, tuple) and len(result) == 2 and isinstance(result[1], dict): + noise_pred, extra_noise_preds = result + else: + noise_pred = result + extra_noise_preds = {} + + t_transformer = time.time() - t_start + + c_start = time.time() + if guidance_scale > 1.0: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # Apply CFG to extra streams + for name, noise_extra in extra_noise_preds.items(): + noise_uncond_extra, noise_text_extra = noise_extra.chunk(2) + extra_noise_preds[name] = noise_uncond_extra + guidance_scale * ( + noise_text_extra - noise_uncond_extra + ) + + if guidance_rescale > 0.0: + extra_noise_preds[name] = self._rescale_noise_cfg( + extra_noise_preds[name], noise_text_extra, guidance_rescale + ) + + if guidance_rescale > 0.0: + noise_pred = self._rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale) + + t_cfg = time.time() - c_start + else: + t_cfg = 0.0 + + return noise_pred, extra_noise_preds, t_transformer, t_cfg + + def _scheduler_step( + self, + latents, + extra_stream_latents, + noise_pred, + extra_noise_preds, + timestep, + scheduler, + extra_stream_schedulers, + ): + """Execute scheduler step for all streams.""" + t_start = time.time() + latents = scheduler.step(noise_pred, timestep, latents, return_dict=False)[0] + + # Step schedulers for extra streams + for name, noise_extra in extra_noise_preds.items(): + if name in extra_stream_schedulers: + extra_stream_latents[name] = extra_stream_schedulers[name].step( + noise_extra, timestep, extra_stream_latents[name], return_dict=False + )[0] + + t_sched = time.time() - t_start + return latents, extra_stream_latents, t_sched + + def denoise( + self, + latents: torch.Tensor, + scheduler: Any, + prompt_embeds: torch.Tensor, + guidance_scale: float, + forward_fn: Callable, + timesteps: Optional[torch.Tensor] = None, + neg_prompt_embeds: Optional[torch.Tensor] = None, + guidance_rescale: float = 0.0, + extra_cfg_tensors: Optional[Dict[str, Tuple[torch.Tensor, Optional[torch.Tensor]]]] = None, + extra_streams: Optional[Dict[str, Tuple[torch.Tensor, Any]]] = None, + guidance_scale_2: Optional[float] = None, + boundary_timestep: Optional[float] = None, + ): + """Execute denoising loop with optional CFG parallel and TeaCache support. + + Args: + latents: Initial noise latents (primary stream, e.g., video) + scheduler: Diffusion scheduler for primary stream + prompt_embeds: Text embeddings (positive) + guidance_scale: CFG strength (1.0 = no guidance) + forward_fn: Transformer forward function + Signature: forward_fn(latents, extra_stream_latents, timestep, + encoder_hidden_states, extra_tensors_dict) + Returns: (primary_noise, extra_stream_noises_dict) or just primary_noise + timesteps: Optional custom timesteps (defaults to scheduler.timesteps) + neg_prompt_embeds: Optional negative text embeddings for CFG + guidance_rescale: CFG rescale factor to prevent overexposure + extra_cfg_tensors: Optional dict of additional tensors to split for CFG parallel + Format: {name: (positive_tensor, negative_tensor)} + Example: {"audio_embeds": (pos_audio, neg_audio)} + extra_streams: Optional dict of additional streams to denoise in parallel + Format: {name: (stream_latents, stream_scheduler)} + Example: {"audio": (audio_latents, audio_scheduler)} + guidance_scale_2: Optional guidance scale for two-stage denoising. + When provided with boundary_timestep, switches from guidance_scale + to guidance_scale_2 when timestep < boundary_timestep. + boundary_timestep: Optional timestep boundary for two-stage denoising. + Switches guidance scale when crossing this threshold. + + Returns: + Single latents if no extra_streams + Tuple (primary_latents, extra_streams_dict) if extra_streams provided + """ + if timesteps is None: + timesteps = scheduler.timesteps + + total_steps = len(timesteps) + has_extra_streams = extra_streams is not None and len(extra_streams) > 0 + + # Reset TeaCache state for new generation + # Sets warmup/cutoff steps based on total_steps + if ( + hasattr(self, "cache_backend") + and self.cache_backend + and self.cache_backend.is_enabled() + ): + self.cache_backend.refresh(total_steps) + + if self.rank == 0: + if has_extra_streams: + stream_names = ", ".join(["primary"] + list(extra_streams.keys())) + logger.info( + f"Denoising [{stream_names}]: {total_steps} steps, guidance={guidance_scale}" + ) + else: + logger.info(f"Denoising: {total_steps} steps, guidance={guidance_scale}") + + cfg_config = self._setup_cfg_config( + guidance_scale, prompt_embeds, neg_prompt_embeds, extra_cfg_tensors + ) + do_cfg_parallel = cfg_config["enabled"] + prompt_embeds = cfg_config["prompt_embeds"] + local_extras = cfg_config["local_extras"] + + # Extract extra stream latents and schedulers + extra_stream_latents = {} + extra_stream_schedulers = {} + if extra_streams: + for name, (stream_latents, stream_scheduler) in extra_streams.items(): + extra_stream_latents[name] = stream_latents + extra_stream_schedulers[name] = stream_scheduler + + start_time = time.time() + + for i, t in enumerate(timesteps): + step_start = time.time() + + # Two-stage denoising: switch guidance scale at boundary + current_guidance_scale = guidance_scale + if guidance_scale_2 is not None and boundary_timestep is not None: + t_scalar = t.item() if t.dim() == 0 else t[0].item() + if t_scalar < boundary_timestep: + current_guidance_scale = guidance_scale_2 + + # Denoise + if do_cfg_parallel: + timestep = t.expand(latents.shape[0]) + noise_pred, extra_noise_preds, t_trans, t_cfg = self._denoise_step_cfg_parallel( + latents, + extra_stream_latents, + timestep, + cfg_config["local_embeds"], + forward_fn, + current_guidance_scale, + guidance_rescale, + cfg_config["ulysses_size"], + local_extras, + ) + else: + noise_pred, extra_noise_preds, t_trans, t_cfg = self._denoise_step_standard( + latents, + extra_stream_latents, + t, + prompt_embeds, + forward_fn, + current_guidance_scale, + guidance_rescale, + local_extras, + ) + + # Scheduler step for all streams + latents, extra_stream_latents, t_sched = self._scheduler_step( + latents, + extra_stream_latents, + noise_pred, + extra_noise_preds, + t, + scheduler, + extra_stream_schedulers, + ) + + # Logging + if self.rank == 0: + step_time = time.time() - step_start + avg_time = (time.time() - start_time) / (i + 1) + eta = avg_time * (total_steps - i - 1) + logger.info( + f"Step {i + 1}/{total_steps} | {step_time:.2f}s " + f"(trans={t_trans:.2f}s cfg={t_cfg:.3f}s sched={t_sched:.3f}s) | " + f"Avg={avg_time:.2f}s/step ETA={eta:.1f}s" + ) + + if self.rank == 0: + total_time = time.time() - start_time + logger.info("=" * 80) + logger.info(f"Denoising done: {total_time:.2f}s ({total_time / total_steps:.2f}s/step)") + + # Log TeaCache performance statistics + # Shows how many transformer steps were skipped (cache hits) vs computed + if ( + hasattr(self, "cache_backend") + and self.cache_backend + and self.cache_backend.is_enabled() + ): + stats = self.cache_backend.get_stats() + if stats: + logger.info( + f"TeaCache: {stats['hit_rate']:.1%} hit rate ({stats['cached']}/{stats['total']} steps)" + ) + + return (latents, extra_stream_latents) if has_extra_streams else latents diff --git a/tensorrt_llm/_torch/visual_gen/pipeline_loader.py b/tensorrt_llm/_torch/visual_gen/pipeline_loader.py new file mode 100644 index 0000000000..4cbb05a8e7 --- /dev/null +++ b/tensorrt_llm/_torch/visual_gen/pipeline_loader.py @@ -0,0 +1,228 @@ +""" +Model loader for diffusion pipelines. + +Flow: +1. Load config via DiffusionModelConfig.from_pretrained() +2. Create pipeline via AutoPipeline.from_config() with MetaInit +3. Load weights with on-the-fly quantization if dynamic_weight_quant=True +4. Call pipeline.post_load_weights() + +Dynamic Quantization: +- If quant_config specifies FP8/NVFP4 and dynamic_weight_quant=True: + - Model Linear layers are created with FP8/NVFP4 buffers + - BF16 checkpoint weights are quantized on-the-fly during loading + - Quantized weights are copied into model buffers +""" + +import os +from typing import TYPE_CHECKING, Optional + +import torch + +from tensorrt_llm._torch.models.modeling_utils import MetaInitMode +from tensorrt_llm.llmapi.utils import download_hf_model +from tensorrt_llm.logger import logger +from tensorrt_llm.mapping import Mapping + +from .checkpoints import WeightLoader +from .config import DiffusionArgs, DiffusionModelConfig, PipelineComponent +from .models import AutoPipeline + +if TYPE_CHECKING: + from .models import BasePipeline + + +class PipelineLoader: + """ + Loader for diffusion pipelines. + + Supports dynamic quantization: when quant_config specifies FP8/NVFP4, + model is built with quantized buffers and BF16 weights are quantized + on-the-fly during loading. + + Example: + args = DiffusionArgs( + checkpoint_path="/path/to/model", + linear=LinearConfig(type="trtllm-fp8-blockwise"), + parallel=ParallelConfig(dit_tp_size=2), + ) + pipeline = PipelineLoader(args).load() + """ + + def __init__( + self, + args: Optional[DiffusionArgs] = None, + *, + mapping: Optional[Mapping] = None, + device: str = "cuda", + ): + """ + Initialize model loader. + + Args: + args: DiffusionArgs containing all configuration (preferred) + mapping: Tensor parallel mapping (fallback if args is None) + device: Device to load model on (fallback if args is None) + """ + self.args = args + if args is not None: + self.mapping = args.to_mapping() + self.device = torch.device(args.device) + else: + self.mapping = mapping or Mapping() + self.device = torch.device(device) + + def _resolve_checkpoint_dir(self, checkpoint_dir: str) -> str: + """Resolve checkpoint_dir to a local directory path. + + If checkpoint_dir is an existing local path, returns it unchanged. + Otherwise, attempts to download from HuggingFace Hub using the + file-lock-protected ``download_hf_model`` utility (safe for + concurrent multi-process access). + + Args: + checkpoint_dir: Local path or HuggingFace Hub model ID. + + Returns: + Path to local directory containing the model. + + Raises: + ValueError: If the path cannot be resolved (invalid repo ID, + authentication failure, offline with no cache, etc.) + """ + if os.path.exists(checkpoint_dir): + return checkpoint_dir + + revision = self.args.revision if self.args else None + logger.info( + f"'{checkpoint_dir}' not found locally; " + f"attempting HuggingFace Hub download (revision={revision})" + ) + try: + local_dir = download_hf_model(checkpoint_dir, revision=revision) + except Exception as e: + raise ValueError( + f"Could not resolve '{checkpoint_dir}' as a local path or " + f"HuggingFace Hub model ID: {e}" + ) from e + return str(local_dir) + + def load( + self, + checkpoint_dir: Optional[str] = None, + ) -> "BasePipeline": + """ + Load a diffusion pipeline with optional dynamic quantization. + + Flow: + 1. Resolve checkpoint_dir (local path or HuggingFace Hub model ID) + 2. Load config via DiffusionModelConfig.from_pretrained() + 3. Create pipeline via AutoPipeline.from_config() with MetaInit + 4. Load transformer weights via pipeline.load_weights() + 5. Load auxiliary components (VAE, text_encoder) via diffusers + 6. Call pipeline.post_load_weights() + + Args: + checkpoint_dir: Local path or HF Hub model ID (uses args.checkpoint_path if not provided) + + Returns: + Loaded pipeline (WanPipeline, FluxPipeline, etc.) - type auto-detected + """ + # Resolve checkpoint_dir + checkpoint_dir = checkpoint_dir or (self.args.checkpoint_path if self.args else None) + if not checkpoint_dir: + raise ValueError("checkpoint_dir must be provided or set in DiffusionArgs") + checkpoint_dir = self._resolve_checkpoint_dir(str(checkpoint_dir)) + + # Get loading options from args + skip_components = self.args.skip_components if self.args else [] + + # ===================================================================== + # STEP 1: Load Config (includes quant config parsing) + # Merge pretrained checkpoint config with user-provided DiffusionArgs + # ===================================================================== + logger.info(f"Loading config from {checkpoint_dir}") + config = DiffusionModelConfig.from_pretrained( + checkpoint_dir, + args=self.args, + mapping=self.mapping, + ) + + # Log quantization settings + if config.quant_config and config.quant_config.quant_algo: + logger.info(f"Quantization: {config.quant_config.quant_algo.name}") + logger.info(f"Dynamic weight quant: {config.dynamic_weight_quant}") + + # ===================================================================== + # STEP 2: Create Pipeline with MetaInit + # Pipeline type is auto-detected from model_index.json + # - Meta tensors (no GPU memory until materialization) + # - If quant_config specifies FP8, Linear layers have FP8 weight buffers + # ===================================================================== + logger.info("Creating pipeline with MetaInitMode") + with MetaInitMode(): + pipeline = AutoPipeline.from_config(config, checkpoint_dir) + + # Convert meta tensors to CUDA tensors + self._materialize_meta_tensors(pipeline) + pipeline.to(self.device) + + # ===================================================================== + # STEP 3: Load Transformer Weights + # If dynamic_weight_quant=True: + # - BF16 checkpoint weights are loaded + # - Quantized on-the-fly to FP8/NVFP4 by DynamicLinearWeightLoader + # - Copied into model's quantized buffers + # ===================================================================== + if pipeline.transformer is None: + raise ValueError("Pipeline has no transformer component") + + transformer_components = getattr(pipeline, "transformer_components", ["transformer"]) + logger.info(f"Transformer components: {transformer_components}") + + transformer_path = os.path.join(checkpoint_dir, PipelineComponent.TRANSFORMER) + if not os.path.exists(transformer_path): + raise FileNotFoundError( + f"Transformer path does not exist: {transformer_path}. " + f"Checkpoint directory must contain a 'transformer' subdirectory." + ) + + weight_loader = WeightLoader(components=transformer_components) + # TODO: accelerate the cpu loading w/ multiprocessing + weights = weight_loader.load_weights(checkpoint_dir, self.mapping) + + # Load weights into pipeline + pipeline.load_weights(weights) + + # ===================================================================== + # STEP 4: Load Standard Components (VAE, TextEncoder via diffusers) + # These are NOT quantized - loaded as-is from checkpoint + # ===================================================================== + pipeline.load_standard_components(checkpoint_dir, self.device, skip_components) + + # ===================================================================== + # STEP 5: Post-load Hooks (TeaCache setup, etc.) + # ===================================================================== + if hasattr(pipeline, "post_load_weights"): + pipeline.post_load_weights() + + logger.info(f"Pipeline loaded: {pipeline.__class__.__name__}") + return pipeline + + def _materialize_meta_tensors(self, module: torch.nn.Module) -> None: + """ + Convert meta tensors to CUDA tensors. + + Meta tensors are placeholders that don't allocate GPU memory. + After model structure is defined, we materialize them to real tensors. + """ + memo = {} + + def init_meta_tensor(t: torch.Tensor) -> torch.Tensor: + if t.device != torch.device("meta"): + return t + if t not in memo: + memo[t] = torch.empty_like(t, device="cuda") + return memo[t] + + module._apply(init_meta_tensor) diff --git a/tensorrt_llm/_torch/visual_gen/pipeline_registry.py b/tensorrt_llm/_torch/visual_gen/pipeline_registry.py new file mode 100644 index 0000000000..f4c7fc37da --- /dev/null +++ b/tensorrt_llm/_torch/visual_gen/pipeline_registry.py @@ -0,0 +1,94 @@ +"""Pipeline registry for unified config flow. + +Follows: DiffusionArgs → PipelineLoader → DiffusionModelConfig → AutoPipeline → BasePipeline + +All pipelines (Wan, Flux2, LTX2) register via @register_pipeline decorator. +""" + +import json +import os +from typing import TYPE_CHECKING, Dict, Type + +from tensorrt_llm.logger import logger + +if TYPE_CHECKING: + from .config import DiffusionModelConfig + from .pipeline import BasePipeline + +# Global registry: pipeline_name -> pipeline_class +PIPELINE_REGISTRY: Dict[str, Type["BasePipeline"]] = {} + + +def register_pipeline(name: str): + """Register a pipeline class for AutoPipeline. + + Usage: + @register_pipeline("WanPipeline") + class WanPipeline(BasePipeline): + ... + """ + + def decorator(cls: Type["BasePipeline"]) -> Type["BasePipeline"]: + PIPELINE_REGISTRY[name] = cls + logger.debug(f"Registered pipeline: {name} -> {cls.__name__}") + return cls + + return decorator + + +class AutoPipeline: + """Factory for creating pipelines from config.""" + + @staticmethod + def from_config( + config: "DiffusionModelConfig", + checkpoint_dir: str, + ) -> "BasePipeline": + """ + Create pipeline instance from DiffusionModelConfig. + """ + # Detect pipeline type from model_index.json + pipeline_type = AutoPipeline._detect_from_checkpoint(checkpoint_dir) + + if pipeline_type not in PIPELINE_REGISTRY: + raise ValueError( + f"Unknown pipeline: '{pipeline_type}'. " + f"Available: {list(PIPELINE_REGISTRY.keys())}\n" + f"Checkpoint: {checkpoint_dir}" + ) + + pipeline_class = PIPELINE_REGISTRY[pipeline_type] + logger.info(f"AutoPipeline: Creating {pipeline_class.__name__} from {checkpoint_dir}") + + # Instantiate pipeline with DiffusionModelConfig + return pipeline_class(config) + + @staticmethod + def _detect_from_checkpoint(checkpoint_dir: str) -> str: + """Detect pipeline type.""" + index_path = os.path.join(checkpoint_dir, "model_index.json") + + if os.path.exists(index_path): + with open(index_path) as f: + index = json.load(f) + + class_name = index.get("_class_name", "") + + if class_name in PIPELINE_REGISTRY: + return class_name + + if "ImageToVideo" in class_name or "I2V" in class_name: + if "Wan" in class_name: + return "WanImageToVideoPipeline" + # Generic Wan (T2V) + if "Wan" in class_name: + return "WanPipeline" + if "Flux" in class_name: + return "FluxPipeline" + if "LTX" in class_name or "Ltx" in class_name: + return "LTX2Pipeline" + + raise ValueError( + f"Cannot detect pipeline type for {checkpoint_dir}\n" + f"Expected model_index.json with '_class_name' field at: {index_path}" + ) diff --git a/tensorrt_llm/_torch/visual_gen/quantization/__init__.py b/tensorrt_llm/_torch/visual_gen/quantization/__init__.py new file mode 100644 index 0000000000..909629b1b3 --- /dev/null +++ b/tensorrt_llm/_torch/visual_gen/quantization/__init__.py @@ -0,0 +1,15 @@ +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 + +""" +Quantization support for diffusion models. +""" + +from .loader import DynamicLinearWeightLoader +from .ops import quantize_fp8_blockwise, quantize_fp8_per_tensor + +__all__ = [ + "DynamicLinearWeightLoader", + "quantize_fp8_per_tensor", + "quantize_fp8_blockwise", +] diff --git a/tensorrt_llm/_torch/visual_gen/quantization/loader.py b/tensorrt_llm/_torch/visual_gen/quantization/loader.py new file mode 100644 index 0000000000..a4a2a3a11c --- /dev/null +++ b/tensorrt_llm/_torch/visual_gen/quantization/loader.py @@ -0,0 +1,197 @@ +""" +Dynamic weight quantization loader for Linear modules. + +Wraps Linear.load_weights() to perform dynamic quantization before loading to device. +""" + +from typing import Dict, List, Optional + +import torch + +from tensorrt_llm._torch.modules.linear import Linear, WeightMode +from tensorrt_llm._torch.visual_gen.config import DiffusionModelConfig +from tensorrt_llm._torch.visual_gen.quantization.ops import ( + quantize_fp8_blockwise, + quantize_fp8_per_tensor, +) +from tensorrt_llm.quantization.mode import QuantAlgo + + +class DynamicLinearWeightLoader: + """ + Dynamic weight quantization loader for Linear modules. + + Wraps Linear.load_weights() to perform dynamic (load-time) quantization + from BF16/FP16 to FP8 before loading weights to device. + + Example: + params_map = {'qkv_proj': ['to_q', 'to_k', 'to_v']} + loader = DynamicLinearWeightLoader(model_config, params_map=params_map) + + for name, module in model.named_modules(): + if isinstance(module, Linear): + weight_dicts = loader.get_linear_weights(module, name, weights) + loader.load_linear_weights(module, name, weight_dicts) + """ + + def __init__( + self, + model_config: DiffusionModelConfig, + params_map: Optional[Dict[str, List[str]]] = None, + ): + self.model_config = model_config + self.quant_config = model_config.quant_config + self.quant_config_dict = model_config.quant_config_dict + self.dynamic_weight_quant = model_config.dynamic_weight_quant + self.params_map = params_map or {} + + # ========================================================================= + # Weight gathering methods + # ========================================================================= + + def get_linear_weights( + self, + module: Linear, + full_name: str, + weights: Dict[str, torch.Tensor], + ) -> List[Dict[str, torch.Tensor]]: + """Get weights for a Linear module, auto-detecting fused weights.""" + weights_config = getattr(module, "weights_loading_config", None) + if weights_config is not None: + weight_mode = getattr(weights_config, "weight_mode", None) + if weight_mode == WeightMode.FUSED_QKV_LINEAR: + fused_names = self._get_fused_names(full_name) + return self._get_fused_weights(full_name, weights, fused_names) + + return self._get_vanilla_weights(full_name, weights) + + def filter_weights( + self, prefix: str, weights: Dict[str, torch.Tensor] + ) -> Dict[str, torch.Tensor]: + """ + Filter weights by prefix and strip the prefix. + + Example: + prefix = 'blocks.0.attn1.to_q' + weights = {'blocks.0.attn1.to_q.weight': ..., 'blocks.0.attn1.to_q.bias': ...} + Returns: {'weight': ..., 'bias': ...} + """ + result = {} + prefix_dot = prefix + "." + for k, v in weights.items(): + if k.startswith(prefix_dot): + result[k[len(prefix_dot) :]] = v + return result + + def _get_fused_names(self, full_name: str) -> List[str]: + """Get checkpoint names for a fused module from params_map.""" + for suffix, names in self.params_map.items(): + if full_name.endswith(suffix): + return names + raise ValueError( + f"No params_map entry for fused module '{full_name}'. " + f"Add mapping like {{'qkv_proj': ['to_q', 'to_k', 'to_v']}} to params_map." + ) + + def _get_fused_weights( + self, + full_name: str, + weights: Dict[str, torch.Tensor], + fused_names: List[str], + ) -> List[Dict[str, torch.Tensor]]: + """Get weights for a fused module from checkpoint.""" + parent_path = ".".join(full_name.split(".")[:-1]) + module_weights = [] + for ckpt_name in fused_names: + ckpt_path = f"{parent_path}.{ckpt_name}" if parent_path else ckpt_name + filtered = self.filter_weights(ckpt_path, weights) + module_weights.append(filtered) + return module_weights + + def _get_vanilla_weights( + self, + full_name: str, + weights: Dict[str, torch.Tensor], + ) -> List[Dict[str, torch.Tensor]]: + """Get weights for a standard (non-fused) Linear module.""" + fw = self.filter_weights(full_name, weights) + return [fw] if fw else [] + + # ========================================================================= + # Quantization methods + # ========================================================================= + + def _get_quant_algo_for_layer(self, name: str) -> Optional[QuantAlgo]: + """Get quantization algorithm for a specific layer.""" + if self.quant_config_dict is not None: + layer_config = self.quant_config_dict.get(name) + if layer_config is not None: + return layer_config.quant_algo + + if self.quant_config is not None: + return self.quant_config.quant_algo + + return None + + def _should_dynamic_quantize( + self, weight_dict: Dict[str, torch.Tensor], quant_algo: Optional[QuantAlgo], name: str + ) -> bool: + """Decide if weight should be dynamically quantized at load time.""" + if not self.dynamic_weight_quant or quant_algo is None: + return False + + # Check if module is excluded + if self.quant_config is not None: + if self.quant_config.is_module_excluded_from_quantization(name): + return False + + weight = weight_dict.get("weight") + if weight is None: + return False + + # For FP8 algorithms: quantize if weight is high precision + if quant_algo in (QuantAlgo.FP8, QuantAlgo.FP8_BLOCK_SCALES): + if weight.dtype == torch.float8_e4m3fn and "weight_scale" in weight_dict: + return False # Already quantized + return weight.dtype in (torch.bfloat16, torch.float16, torch.float32) + + return False + + def _maybe_dynamic_quantize( + self, weight_dict: Dict[str, torch.Tensor], quant_algo: Optional[QuantAlgo], name: str + ) -> Dict[str, torch.Tensor]: + """Conditionally quantize weight at load time on GPU.""" + if not self._should_dynamic_quantize(weight_dict, quant_algo, name): + return weight_dict + + weight = weight_dict["weight"] + + # Move to GPU only if needed + if weight.device.type != "cuda": + weight = weight.cuda() + + if quant_algo == QuantAlgo.FP8: + qweight, scale = quantize_fp8_per_tensor(weight) + elif quant_algo == QuantAlgo.FP8_BLOCK_SCALES: + block_size = self.quant_config.group_size if self.quant_config else 128 + qweight, scale = quantize_fp8_blockwise(weight, block_size=block_size) + else: + return weight_dict + + return {**weight_dict, "weight": qweight, "weight_scale": scale} + + def load_linear_weights( + self, module: Linear, name: str, weight_dicts: List[Dict[str, torch.Tensor]] + ) -> None: + """Load weights into Linear module with optional quantization.""" + module_quant_config = getattr(module, "quant_config", None) + if module_quant_config is not None: + quant_algo = module_quant_config.quant_algo + else: + quant_algo = self._get_quant_algo_for_layer(name) + + quantized_weight_dicts = [ + self._maybe_dynamic_quantize(wd, quant_algo, name) for wd in weight_dicts + ] + + module.load_weights(quantized_weight_dicts) diff --git a/tensorrt_llm/_torch/visual_gen/quantization/ops.py b/tensorrt_llm/_torch/visual_gen/quantization/ops.py new file mode 100644 index 0000000000..db550ced8a --- /dev/null +++ b/tensorrt_llm/_torch/visual_gen/quantization/ops.py @@ -0,0 +1,98 @@ +""" +Quantization operations for diffusion models. + +Provides on-the-fly quantization functions for dynamic (load-time) quantization. +""" + +from typing import Tuple + +import torch + +# FP8 E4M3 max value +FP8_E4M3_MAX = torch.finfo(torch.float8_e4m3fn).max + + +def quantize_fp8_per_tensor(weight: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Quantize weight to FP8 E4M3 with per-tensor scale. + + Uses torch.ops.tensorrt_llm.quantize_e4m3_per_tensor CUDA kernel. + + Args: + weight: Input weight tensor (BF16/FP16/FP32), shape (out_features, in_features) + + Returns: + Tuple of: + - qweight: Quantized weight (FP8 E4M3), same shape as input + - weight_scale: Dequantization scale (FP32), shape (1, 1) + """ + qweight, scale = torch.ops.tensorrt_llm.quantize_e4m3_per_tensor(weight) + # Ensure scale is float32 and has shape (1, 1) for consistency + return qweight, scale.to(torch.float32) + + +def quantize_fp8_blockwise( + weight: torch.Tensor, block_size: int = 128 +) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Quantize weight to FP8 E4M3 with 128x128 blockwise scales. + + This function converts BF16/FP16/FP32 weights to FP8 E4M3 format using + per-block scale factors. The weight is divided into blocks of size + (block_size, block_size) and each block has its own scale. + + Args: + weight: Input weight tensor (BF16/FP16/FP32), shape (out_features, in_features) + block_size: Block size for blockwise quantization (default: 128) + + Returns: + Tuple of: + - qweight: Quantized weight (FP8 E4M3), shape (out_features, in_features) + - block_scales: Block-wise dequantization scales (FP32), + shape (num_blocks_out, num_blocks_in) + + Note: + - If dimensions are not divisible by block_size, the last block may be smaller + - block_scales are dequantization scales (multiply to get back original scale) + - This uses 128x128 block scaling compatible with Linear module's FP8_BLOCK_SCALES + """ + out_features, in_features = weight.shape + weight_fp32 = weight.float() + + # Calculate number of blocks + num_blocks_out = (out_features + block_size - 1) // block_size + num_blocks_in = (in_features + block_size - 1) // block_size + + # Initialize outputs + qweight = torch.empty_like(weight, dtype=torch.float8_e4m3fn) + block_scales = torch.empty( + (num_blocks_out, num_blocks_in), dtype=torch.float32, device=weight.device + ) + + # Quantize each block + for i in range(num_blocks_out): + row_start = i * block_size + row_end = min((i + 1) * block_size, out_features) + + for j in range(num_blocks_in): + col_start = j * block_size + col_end = min((j + 1) * block_size, in_features) + + # Extract block + block = weight_fp32[row_start:row_end, col_start:col_end] + + # Compute block scale + max_val = block.abs().max() + scale = ( + max_val / FP8_E4M3_MAX if max_val > 0 else torch.tensor(1.0, device=weight.device) + ) + + # Quantize block + inv_scale = scale.reciprocal() if scale > 0 else torch.tensor(1.0, device=weight.device) + qblock = (block * inv_scale).clamp(-FP8_E4M3_MAX, FP8_E4M3_MAX).to(torch.float8_e4m3fn) + + # Store results + qweight[row_start:row_end, col_start:col_end] = qblock + block_scales[i, j] = scale.to(torch.float32) + + return qweight, block_scales diff --git a/tensorrt_llm/_torch/visual_gen/teacache.py b/tensorrt_llm/_torch/visual_gen/teacache.py new file mode 100644 index 0000000000..aa99c1655e --- /dev/null +++ b/tensorrt_llm/_torch/visual_gen/teacache.py @@ -0,0 +1,409 @@ +import inspect +from dataclasses import dataclass +from typing import Any, Callable, Dict, List, Optional + +import numpy as np +import torch +from diffusers.models.modeling_outputs import Transformer2DModelOutput + +from tensorrt_llm.logger import logger + +# ============================================================================= +# Core Data Structures +# ============================================================================= + + +@dataclass +class CacheContext: + """Context returned by model extractors for TeaCache. + + Attributes: + modulated_input: Timestep embedding used for cache distance calculation + hidden_states: Input hidden states for the transformer + encoder_hidden_states: Text/prompt embeddings + run_transformer_blocks: Callable that executes the transformer forward pass + postprocess: Callable that formats the output to the expected return type + """ + + modulated_input: torch.Tensor + hidden_states: torch.Tensor + encoder_hidden_states: Any = None + run_transformer_blocks: Callable = None + postprocess: Callable = None + + +# ============================================================================= +# Extractor Registry +# ============================================================================= + +_EXTRACTORS = {} + + +def register_extractor(model_name, extractor_fn): + """Register an extractor function for a model class.""" + _EXTRACTORS[model_name] = extractor_fn + + +def get_extractor(model_type): + """Get the registered extractor for a model type.""" + if model_type not in _EXTRACTORS: + raise ValueError( + f"TeaCache: Unknown model '{model_type}'. Available: {list(_EXTRACTORS.keys())}" + ) + return _EXTRACTORS[model_type] + + +# ============================================================================= +# Config-Based Extractor System +# ============================================================================= + + +@dataclass +class ExtractorConfig: + """Configuration for model-specific TeaCache extractors. + + Only the timestep embedding logic is model-specific; all other logic is handled generically. + + Attributes: + model_class_name: Model class name (e.g., "LTX2VideoTransformer3DModel") + timestep_embed_fn: Callable(module, timestep, guidance=None) -> Tensor + timestep_param_name: Parameter name for timestep in forward() (default: "timestep") + guidance_param_name: Parameter name for guidance if used (default: None) + forward_params: List of parameter names (None = auto-introspect from forward signature) + return_dict_default: Default value for return_dict parameter (default: True) + output_model_class: Output class name for return type (default: "Transformer2DModelOutput") + """ + + model_class_name: str + timestep_embed_fn: Callable + timestep_param_name: str = "timestep" + guidance_param_name: Optional[str] = None + forward_params: Optional[List[str]] = None + return_dict_default: bool = True + output_model_class: str = "Transformer2DModelOutput" + + +class GenericExtractor: + """Handles common TeaCache logic for all diffusion models. + + Extracts forward() arguments, creates run_blocks and postprocess callbacks, + and delegates only timestep embedding computation to model-specific logic. + """ + + def __init__(self, config: ExtractorConfig): + self.config = config + + def _extract_forward_args(self, module: torch.nn.Module, *args, **kwargs) -> Dict: + """Extract and normalize forward() arguments from *args and **kwargs.""" + # Get parameter names (auto-introspect or use config) + if self.config.forward_params is not None: + param_names = self.config.forward_params + else: + # Auto-introspect forward signature + try: + sig = inspect.signature(module._original_forward) + param_names = [p for p in sig.parameters if p not in ("self", "args", "kwargs")] + except Exception as e: + logger.warning(f"Could not introspect forward signature: {e}") + param_names = [] + + # Map positional args to parameter names + extracted = {param_names[i]: arg for i, arg in enumerate(args) if i < len(param_names)} + + # Merge kwargs (kwargs take precedence) + extracted.update(kwargs) + return extracted + + def _compute_timestep_embedding(self, module: torch.nn.Module, params: Dict) -> torch.Tensor: + """Compute timestep embedding using configured callable.""" + timestep = params.get(self.config.timestep_param_name) + if timestep is None: + raise ValueError(f"Missing required parameter: {self.config.timestep_param_name}") + + # Flatten timestep if needed (common pattern) + timestep_flat = timestep.flatten() if timestep.ndim == 2 else timestep + guidance = ( + params.get(self.config.guidance_param_name) if self.config.guidance_param_name else None + ) + + # Call configured timestep embedding function + try: + return self.config.timestep_embed_fn(module, timestep_flat, guidance) + except Exception as e: + logger.error(f"Timestep embedder failed: {e}") + # Last resort: use timestep as-is + logger.warning("Using timestep fallback") + return timestep_flat.unsqueeze(-1) if timestep_flat.ndim == 1 else timestep_flat + + def __call__(self, module: torch.nn.Module, *args, **kwargs) -> CacheContext: + """Main extractor logic - called by TeaCacheHook. + + Extracts forward arguments, computes timestep embedding, and creates callbacks + for running the transformer and post-processing the output. + """ + # Extract forward arguments from positional and keyword args + params = self._extract_forward_args(module, *args, **kwargs) + + # Compute timestep embedding (used for cache distance calculation) + t_emb = self._compute_timestep_embedding(module, params) + return_dict = params.get("return_dict", self.config.return_dict_default) + + def run_blocks(): + """Execute the full transformer forward pass with original parameters.""" + ret = module._original_forward(**params) + # Normalize output to tuple format + if return_dict and not isinstance(ret, tuple): + sample = ret.sample if hasattr(ret, "sample") else ret + return (sample,) if not isinstance(sample, tuple) else sample + return ret if isinstance(ret, tuple) else (ret,) + + def postprocess(output): + """Convert cached/computed output back to expected return format.""" + if return_dict: + if isinstance(output, tuple): + return output + return Transformer2DModelOutput(sample=output) + # For return_dict=False, unwrap single-element tuple to raw tensor + if isinstance(output, tuple) and len(output) == 1: + return output[0] + # Return raw tensor as-is (TeaCacheHook always passes tensors to postprocess) + return output + + return CacheContext( + modulated_input=t_emb, + hidden_states=params.get("hidden_states"), + encoder_hidden_states=params.get("encoder_hidden_states"), + run_transformer_blocks=run_blocks, + postprocess=postprocess, + ) + + +def register_extractor_from_config(config: ExtractorConfig): + """Register a TeaCache extractor for a model. Call this in pipeline's load() method. + + Example: + register_extractor_from_config(ExtractorConfig( + model_class_name="LTX2VideoTransformer3DModel", + timestep_embed_fn=self._compute_ltx2_timestep_embedding, + )) + """ + extractor = GenericExtractor(config) + register_extractor(config.model_class_name, extractor) + logger.debug(f"Registered TeaCache extractor for {config.model_class_name}") + + +# ============================================================================= +# TeaCache Runtime (caching hook and lifecycle management) +# ============================================================================= + + +class TeaCacheHook: + """Caches transformer blocks when timestep embeddings change slowly. + + The hook monitors the relative change in timestep embeddings between steps. + When the change is small (below threshold), it reuses the cached residual + from the previous step instead of running the full transformer. + + Separate cache states are maintained for conditional and unconditional branches + when using Classifier-Free Guidance (CFG). + """ + + def __init__(self, config): + self.config = config + # Polynomial function to rescale embedding distances + self.rescale_func = np.poly1d(config.coefficients) + self.extractor_fn = None + + # Separate cache state for conditional (pos) and unconditional (neg) branches + self.state_pos = self._new_state() + self.state_neg = self._new_state() + self.stats = {"total": 0, "cached": 0} + + def _new_state(self): + return {"cnt": 0, "acc_dist": 0.0, "prev_input": None, "prev_residual": None} + + def initialize(self, module): + self.extractor_fn = get_extractor(module.__class__.__name__) + + def reset_state(self): + self.state_pos = self._new_state() + self.state_neg = self._new_state() + self.stats = {"total": 0, "cached": 0} + + def get_stats(self): + total = max(self.stats["total"], 1) + cached = self.stats["cached"] + return { + "hit_rate": cached / total, + "total": total, + "cached": cached, + # Backward compatibility + "total_steps": total, + "cached_steps": cached, + "compute_steps": total - cached, + } + + def __call__(self, module, *args, **kwargs): + """Main hook called during transformer forward pass. + + Decides whether to run the full transformer or reuse cached residual + based on timestep embedding distance. + """ + # Extract context (timestep embedding, hidden states, callbacks) + ctx = self.extractor_fn(module, *args, **kwargs) + + # Select cache state (for CFG: separate tracking for conditional/unconditional) + cache_branch = getattr(module, "_cache_branch", None) + state = self.state_neg if cache_branch == "uncond" else self.state_pos + + # Decide: compute transformer or use cache? + should_compute = self._should_compute(state, ctx.modulated_input) + self.stats["total"] += 1 + + if not should_compute and state["prev_residual"] is not None: + # Cache hit: Add cached residual to skip transformer computation + logger.debug(f"TeaCache: SKIP step {state['cnt']}") + # For I2V: output might have fewer channels than input + # Apply residual only to the latent channels + if ctx.hidden_states.shape[1] != state["prev_residual"].shape[1]: + # Extract latent channels (match output channels) + num_output_channels = state["prev_residual"].shape[1] + latent_channels = ctx.hidden_states[:, :num_output_channels] + output = latent_channels + state["prev_residual"] + else: + output = ctx.hidden_states + state["prev_residual"] + self.stats["cached"] += 1 + else: + # Cache miss: Run full transformer and cache the residual + outputs = ctx.run_transformer_blocks() + output = outputs[0] if isinstance(outputs, tuple) else outputs + + # Store residual (output - input) for next potential cache hit + # For I2V: output may have fewer channels than input + # Compute residual only on the latent channels + if ctx.hidden_states.shape[1] != output.shape[1]: + # Extract latent channels (match output channels) + num_output_channels = output.shape[1] + latent_channels = ctx.hidden_states[:, :num_output_channels] + state["prev_residual"] = (output - latent_channels).detach() + else: + original = ctx.hidden_states.clone() + state["prev_residual"] = (output - original).detach() + + # Update state for next iteration + state["prev_input"] = ctx.modulated_input.detach() + state["cnt"] += 1 + + return ctx.postprocess(output) + + def _should_compute(self, state, modulated_inp): + """Decide whether to compute transformer or use cached result. + + Returns True to compute, False to use cache. + """ + # Warmup: Always compute first few steps to build stable cache + if self.config.ret_steps and state["cnt"] < self.config.ret_steps: + state["acc_dist"] = 0.0 + return True + + # Cooldown: Always compute last few steps for quality + if self.config.cutoff_steps and state["cnt"] >= self.config.cutoff_steps: + return True + + # First step: no previous input to compare + if state["prev_input"] is None: + return True + + # Compute relative change in timestep embedding + curr, prev = modulated_inp, state["prev_input"] + + # For CFG (batch_size > 1), only compare conditional branch + # Both branches move similarly, so one comparison is sufficient + if modulated_inp.shape[0] > 1: + curr, prev = modulated_inp.chunk(2)[1], prev.chunk(2)[1] + + # Calculate relative L1 distance (normalized by magnitude) + rel_dist = ((curr - prev).abs().mean() / (prev.abs().mean() + 1e-8)).cpu().item() + + # Apply polynomial rescaling to adjust sensitivity + # Accumulate distance (capped at 2x threshold to prevent overflow) + rescaled = float(self.rescale_func(rel_dist)) + state["acc_dist"] = min( + state["acc_dist"] + abs(rescaled), self.config.teacache_thresh * 2.0 + ) + + logger.debug( + f"TeaCache: step {state['cnt']} | dist {rel_dist:.2e} | acc {state['acc_dist']:.4f}" + ) + + # Cache decision based on accumulated distance + if state["acc_dist"] < self.config.teacache_thresh: + # Below threshold: use cache, apply decay to distance + state["acc_dist"] *= 0.95 + return False + else: + # Above threshold: compute, reset accumulated distance + state["acc_dist"] = 0.0 + return True + + +class TeaCacheBackend: + """Manages TeaCache lifecycle.""" + + def __init__(self, config): + self.config = config + self.hook = None + + def enable(self, module): + if self.hook is None: + logger.info(f"TeaCache: Enabling for {module.__class__.__name__}") + self.hook = TeaCacheHook(self.config) + self.hook.initialize(module) + module._original_forward = module.forward + module.forward = lambda *args, **kwargs: self.hook(module, *args, **kwargs) + + def disable(self, module): + if self.hook and hasattr(module, "_original_forward"): + module.forward = module._original_forward + self.hook = None + + def refresh(self, num_inference_steps): + """Reset TeaCache state for a new generation. + + Sets warmup/cutoff steps based on total inference steps: + - Warmup steps: Always compute to build stable cache + - Cutoff steps: Always compute for quality at the end + - Middle steps: Use caching based on distance threshold + + Args: + num_inference_steps: Total number of denoising steps + """ + if not self.hook: + return + + # Reset cache state (clears previous residuals and counters) + self.hook.reset_state() + + # Configure warmup and cutoff based on mode + if self.config.use_ret_steps: + # Aggressive warmup: 5 steps to stabilize cache + self.config.ret_steps = 5 + self.config.cutoff_steps = num_inference_steps # No cutoff (cache until end) + else: + # Minimal warmup: 1 step + self.config.ret_steps = 1 + self.config.cutoff_steps = num_inference_steps - 2 # Compute last 2 steps + + self.config.num_steps = num_inference_steps + + logger.info( + f"TeaCache: {num_inference_steps} steps | " + f"warmup: {self.config.ret_steps}, cutoff: {self.config.cutoff_steps}, " + f"thresh: {self.config.teacache_thresh}" + ) + + def is_enabled(self): + return self.hook is not None + + def get_stats(self): + return self.hook.get_stats() if self.hook else {} diff --git a/tensorrt_llm/_torch/visual_gen/utils.py b/tensorrt_llm/_torch/visual_gen/utils.py new file mode 100644 index 0000000000..99f8837ceb --- /dev/null +++ b/tensorrt_llm/_torch/visual_gen/utils.py @@ -0,0 +1,39 @@ +"""Utility functions for visual generation pipelines.""" + +import torch + + +@torch.compile +def postprocess_video_tensor(video: torch.Tensor, remove_batch_dim: bool = True) -> torch.Tensor: + """Post-process video tensor from VAE decoder output to final format. + + This is a more efficient implementation than using VideoProcessor for single-batch cases, + as it avoids loop overhead and processes the entire batch with vectorized operations. + + Args: + video: Video tensor in (B, C, T, H, W) format from VAE decoder + remove_batch_dim: Whether to remove batch dimension. Default True for typical + single-batch video generation. + + Returns: + Post-processed video tensor: + - If remove_batch_dim=True: (T, H, W, C) uint8 tensor + - If remove_batch_dim=False: (B, T, H, W, C) uint8 tensor + + Note: + Assumes video values are in [-1, 1] range (standard VAE decoder output). + """ + # Convert to (B, T, H, W, C) format + video = video.permute(0, 2, 3, 4, 1) # (B, C, T, H, W) -> (B, T, H, W, C) + + # Normalize to [0, 1] range + video = (video / 2 + 0.5).clamp(0, 1) + + # Convert to uint8 + video = (video * 255).round().to(torch.uint8) + + # Remove batch dimension if requested + if remove_batch_dim: + video = video[0] # (B, T, H, W, C) -> (T, H, W, C) + + return video diff --git a/tensorrt_llm/commands/serve.py b/tensorrt_llm/commands/serve.py index 76cbde9646..8eb31bd5f1 100644 --- a/tensorrt_llm/commands/serve.py +++ b/tensorrt_llm/commands/serve.py @@ -19,11 +19,13 @@ from tensorrt_llm import LLM as PyTorchLLM from tensorrt_llm import MultimodalEncoder from tensorrt_llm._tensorrt_engine import LLM from tensorrt_llm._utils import mpi_rank +from tensorrt_llm.commands.utils import (get_is_diffusion_model, + get_visual_gen_model_type) from tensorrt_llm.executor.utils import LlmLauncherEnvs from tensorrt_llm.inputs.multimodal import MultimodalServerConfig from tensorrt_llm.llmapi import (BuildConfig, CapacitySchedulerPolicy, DynamicBatchConfig, KvCacheConfig, - SchedulerConfig) + SchedulerConfig, VisualGen) from tensorrt_llm.llmapi.disagg_utils import (DisaggClusterConfig, MetadataServerConfig, ServerRole, extract_disagg_cluster_config, @@ -217,7 +219,7 @@ def launch_server( f"{backend} is not a known backend, check help for available options.", param_hint="backend") - server = OpenAIServer(llm=llm, + server = OpenAIServer(generator=llm, model=model, tool_parser=tool_parser, server_role=server_role, @@ -361,7 +363,7 @@ def launch_mm_encoder_server( encoder_args.pop("build_config") mm_encoder = MultimodalEncoder(**encoder_args) - server = OpenAIServer(llm=mm_encoder, + server = OpenAIServer(generator=mm_encoder, model=model, server_role=ServerRole.MM_ENCODER, metadata_server_cfg=metadata_server_cfg, @@ -369,6 +371,45 @@ def launch_mm_encoder_server( asyncio.run(server(host, port)) +def launch_visual_gen_server( + host: str, + port: int, + visual_gen_config: dict, + metadata_server_cfg: Optional[MetadataServerConfig] = None, +): + """Launch a VISUAL_GEN model server for image/video generation. + + Args: + host: Server hostname. + port: Server port. + visual_gen_config: Arguments for VISUAL_GEN model initialization. + metadata_server_cfg: Optional metadata server configuration. + """ + model = visual_gen_config["model"] + logger.info(f"Initializing VisualGen ({model})") + + n_workers = 1 + parallel_config = visual_gen_config.get("parallel", {}) + if parallel_config: + n_workers = parallel_config.get( + "dit_cfg_size", 1) * parallel_config.get("dit_ulysses_size", 1) + logger.info(f"World size: {n_workers}") + logger.info(f"CFG size: {parallel_config.get('dit_cfg_size', 1)}") + logger.info( + f"Ulysses size: {parallel_config.get('dit_ulysses_size', 1)}") + + visual_gen_model = VisualGen(model_path=model, + n_workers=n_workers, + diffusion_config=visual_gen_config) + + server = OpenAIServer(generator=visual_gen_model, + model=model, + server_role=ServerRole.VISUAL_GEN, + metadata_server_cfg=metadata_server_cfg, + tool_parser=None) + asyncio.run(server(host, port)) + + class ChoiceWithAlias(click.Choice): def __init__(self, @@ -600,6 +641,12 @@ class ChoiceWithAlias(click.Choice): default=False, help="Run gRPC server instead of OpenAI HTTP server. " "gRPC server accepts pre-tokenized requests and returns raw token IDs.") +@click.option("--extra_visual_gen_options", + type=str, + default=None, + help=help_info_with_stability_tag( + "Path to a YAML file with extra VISUAL_GEN model options.", + "prototype")) def serve( model: str, tokenizer: Optional[str], custom_tokenizer: Optional[str], host: str, port: int, log_level: str, backend: str, max_beam_width: int, @@ -616,8 +663,8 @@ def serve( otlp_traces_endpoint: Optional[str], enable_chunked_prefill: bool, disagg_cluster_uri: Optional[str], media_io_kwargs: Optional[str], custom_module_dirs: list[Path], chat_template: Optional[str], - grpc: bool): - """Running an OpenAI API compatible server (or gRPC server with --grpc flag) + grpc: bool, extra_visual_gen_options: Optional[str]): + """Running an OpenAI API compatible server MODEL: model name | HF checkpoint path | TensorRT engine path """ @@ -630,93 +677,120 @@ def serve( logger.error( f"Failed to import custom module from {custom_module_dir}: {e}") raise e - llm_args, _ = get_llm_args( - model=model, - tokenizer=tokenizer, - custom_tokenizer=custom_tokenizer, - backend=backend, - max_beam_width=max_beam_width, - max_batch_size=max_batch_size, - max_num_tokens=max_num_tokens, - max_seq_len=max_seq_len, - tensor_parallel_size=tensor_parallel_size, - pipeline_parallel_size=pipeline_parallel_size, - context_parallel_size=context_parallel_size, - moe_expert_parallel_size=moe_expert_parallel_size, - moe_cluster_parallel_size=moe_cluster_parallel_size, - gpus_per_node=gpus_per_node, - free_gpu_memory_fraction=free_gpu_memory_fraction, - num_postprocess_workers=num_postprocess_workers, - trust_remote_code=trust_remote_code, - revision=revision, - reasoning_parser=reasoning_parser, - fail_fast_on_attention_window_too_large= - fail_fast_on_attention_window_too_large, - otlp_traces_endpoint=otlp_traces_endpoint, - enable_chunked_prefill=enable_chunked_prefill) - llm_args_extra_dict = {} - if extra_llm_api_options is not None: - with open(extra_llm_api_options, 'r') as f: - llm_args_extra_dict = yaml.safe_load(f) - llm_args = update_llm_args_with_extra_dict(llm_args, llm_args_extra_dict) + def _serve_llm(): + nonlocal server_role + llm_args, _ = get_llm_args( + model=model, + tokenizer=tokenizer, + custom_tokenizer=custom_tokenizer, + backend=backend, + max_beam_width=max_beam_width, + max_batch_size=max_batch_size, + max_num_tokens=max_num_tokens, + max_seq_len=max_seq_len, + tensor_parallel_size=tensor_parallel_size, + pipeline_parallel_size=pipeline_parallel_size, + context_parallel_size=context_parallel_size, + moe_expert_parallel_size=moe_expert_parallel_size, + moe_cluster_parallel_size=moe_cluster_parallel_size, + gpus_per_node=gpus_per_node, + free_gpu_memory_fraction=free_gpu_memory_fraction, + num_postprocess_workers=num_postprocess_workers, + trust_remote_code=trust_remote_code, + revision=revision, + reasoning_parser=reasoning_parser, + fail_fast_on_attention_window_too_large= + fail_fast_on_attention_window_too_large, + otlp_traces_endpoint=otlp_traces_endpoint, + enable_chunked_prefill=enable_chunked_prefill) - metadata_server_cfg = parse_metadata_server_config_file( - metadata_server_config_file) + llm_args_extra_dict = {} + if extra_llm_api_options is not None: + with open(extra_llm_api_options, 'r') as f: + llm_args_extra_dict = yaml.safe_load(f) + llm_args = update_llm_args_with_extra_dict(llm_args, + llm_args_extra_dict) - # Specify disagg_cluster_config in config file or through command line "--disagg_cluster_uri", - # but disagg_cluster_uri takes precedence over cluster uri in config file - disagg_cluster_config = llm_args.pop("disagg_cluster", None) - if disagg_cluster_config: - disagg_cluster_config = extract_disagg_cluster_config( - disagg_cluster_config, disagg_cluster_uri) - elif disagg_cluster_uri: - disagg_cluster_config = DisaggClusterConfig( - cluster_uri=disagg_cluster_uri) + metadata_server_cfg = parse_metadata_server_config_file( + metadata_server_config_file) - if metadata_server_cfg is not None or disagg_cluster_config is not None: - assert ( - server_role is not None - ), "server_role is required when metadata_server_cfg or disagg_cluster_config is provided" - try: - server_role = ServerRole[server_role.upper()] - except ValueError: - raise ValueError(f"Invalid server role: {server_role}. " \ - f"Must be one of: {', '.join([role.name for role in ServerRole])}") + # Specify disagg_cluster_config in config file or through command line "--disagg_cluster_uri", + # but disagg_cluster_uri takes precedence over cluster uri in config file + disagg_cluster_config = llm_args.pop("disagg_cluster", None) + if disagg_cluster_config: + disagg_cluster_config = extract_disagg_cluster_config( + disagg_cluster_config, disagg_cluster_uri) + elif disagg_cluster_uri: + disagg_cluster_config = DisaggClusterConfig( + cluster_uri=disagg_cluster_uri) - # Parse media_io_kwargs from JSON string to dict if provided - parsed_media_io_kwargs = None - if media_io_kwargs is not None: - try: - parsed_media_io_kwargs = json.loads(media_io_kwargs) - except json.JSONDecodeError as e: - raise ValueError(f"Invalid JSON for media_io_kwargs: {e}") + if metadata_server_cfg is not None or disagg_cluster_config is not None: + assert ( + server_role is not None + ), "server_role is required when metadata_server_cfg or disagg_cluster_config is provided" + try: + server_role = ServerRole[server_role.upper()] + except ValueError: + raise ValueError(f"Invalid server role: {server_role}. " \ + f"Must be one of: {', '.join([role.name for role in ServerRole])}") + # Parse media_io_kwargs from JSON string to dict if provided + parsed_media_io_kwargs = None + if media_io_kwargs is not None: + try: + parsed_media_io_kwargs = json.loads(media_io_kwargs) + except json.JSONDecodeError as e: + raise ValueError(f"Invalid JSON for media_io_kwargs: {e}") - multimodal_server_config = MultimodalServerConfig( - media_io_kwargs=parsed_media_io_kwargs) + multimodal_server_config = MultimodalServerConfig( + media_io_kwargs=parsed_media_io_kwargs) - if grpc: - # gRPC mode: launch gRPC server instead of OpenAI HTTP server - # Check for unsupported arguments that are silently ignored in gRPC mode - unsupported_args = { - "tool_parser": tool_parser, - "chat_template": chat_template, - "metadata_server_config_file": metadata_server_config_file, - "server_role": server_role, - "disagg_cluster_config": disagg_cluster_config, + if grpc: + # gRPC mode: launch gRPC server instead of OpenAI HTTP server + # Check for unsupported arguments that are silently ignored in gRPC mode + unsupported_args = { + "tool_parser": tool_parser, + "chat_template": chat_template, + "metadata_server_config_file": metadata_server_config_file, + "server_role": server_role, + "disagg_cluster_config": disagg_cluster_config, + } + for name, value in unsupported_args.items(): + if value is not None: + raise ValueError( + f"Argument '{name}' is not supported when running in gRPC mode. " + f"The gRPC server is designed for use with external routers that handle " + f"these features (e.g., tool parsing, chat templates).") + launch_grpc_server(host, port, llm_args) + else: + # Default: launch OpenAI HTTP server + launch_server(host, port, llm_args, tool_parser, chat_template, + metadata_server_cfg, server_role, + disagg_cluster_config, multimodal_server_config) + + def _serve_visual_gen(): + visual_gen_config = { + "model": model, + "model_type": get_visual_gen_model_type(model), } - for name, value in unsupported_args.items(): - if value is not None: - raise ValueError( - f"Argument '{name}' is not supported when running in gRPC mode. " - f"The gRPC server is designed for use with external routers that handle " - f"these features (e.g., tool parsing, chat templates).") - launch_grpc_server(host, port, llm_args) + + visual_gen_extra_args = {} + if extra_visual_gen_options is not None: + with open(extra_visual_gen_options, 'r') as f: + visual_gen_extra_args = yaml.safe_load(f) + + visual_gen_config.update(visual_gen_extra_args) + + metadata_server_cfg = parse_metadata_server_config_file( + metadata_server_config_file) + + launch_visual_gen_server(host, port, visual_gen_config, + metadata_server_cfg) + + if get_is_diffusion_model(model): + _serve_visual_gen() else: - # Default: launch OpenAI HTTP server - launch_server(host, port, llm_args, tool_parser, chat_template, - metadata_server_cfg, server_role, disagg_cluster_config, - multimodal_server_config) + _serve_llm() @click.command("mm_embedding_serve") diff --git a/tensorrt_llm/commands/utils.py b/tensorrt_llm/commands/utils.py new file mode 100644 index 0000000000..df1442c6e7 --- /dev/null +++ b/tensorrt_llm/commands/utils.py @@ -0,0 +1,132 @@ +# Adapted from https://github.com/sgl-project/sglang/blob/030496eb06472f76fcb11de53d93f10cefb4604f/python/sglang/cli/utils.py#L27 +import json +import logging +import os + +from tensorrt_llm.llmapi.utils import download_hf_partial + +logger = logging.getLogger(__name__) + + +def _maybe_download_model( + model_name_or_path: str, local_dir: str | None = None, download: bool = True +) -> str: + """Resolve a model path. If it's a local directory, return it. + + If it's a Hugging Face Hub ID, download only the config file + (`model_index.json` or `config.json`) and return its directory. + + Args: + model_name_or_path: Local path or Hugging Face Hub model ID + local_dir: Local directory to save the downloaded file (if any) + download: Whether to download from Hugging Face Hub when needed + + Returns: + Local directory path that contains the downloaded config file, or the original local directory. + """ + if os.path.exists(model_name_or_path): + logger.info("Model already exists locally") + return model_name_or_path + + if not download: + return model_name_or_path + + try: + logger.info( + "Downloading model_index.json from HF Hub for %s...", + model_name_or_path, + ) + file_path = download_hf_partial( + model=model_name_or_path, + allow_patterns=["model_index.json", "config.json"], + ) + logger.info("Downloaded to %s", file_path) + return str(file_path) + except Exception as e: + raise ValueError( + ( + "Could not find model locally at %s and failed to download " + "model_index.json/config.json from HF Hub: %s" + ) + % (model_name_or_path, e) + ) from e + + +# Copied and adapted from hf_diffusers_utils.py +def is_diffusers_model_path(model_path: str) -> bool: + """Verify if the model directory contains a valid diffusers configuration. + + Args: + model_path: Path to the model directory + + Returns: + The loaded model configuration as a dictionary if the model is a diffusers model + None if the model is not a diffusers model + """ + # Prefer model_index.json which indicates a diffusers pipeline + config_path = os.path.join(model_path, "model_index.json") + if not os.path.exists(config_path): + return False + + # Load the config + with open(config_path) as f: + config = json.load(f) + + # Verify diffusers version exists + if "_diffusers_version" not in config: + return False + return True + + +def get_is_diffusion_model(model_path: str): + model_path = _maybe_download_model(model_path) + is_diffusion_model = is_diffusers_model_path(model_path) + if is_diffusion_model: + logger.info("Diffusion model detected") + return is_diffusion_model + + +def get_model_path(extra_argv): + # Find the model_path argument + model_path = None + for i, arg in enumerate(extra_argv): + if arg == "--model-path": + if i + 1 < len(extra_argv): + model_path = extra_argv[i + 1] + break + elif arg.startswith("--model-path="): + model_path = arg.split("=", 1)[1] + break + + if model_path is None: + # Fallback for --help or other cases where model-path is not provided + if any(h in extra_argv for h in ["-h", "--help"]): + raise Exception( + "Usage: sglang serve --model-path [additional-arguments]\n\n" + "This command can launch either a standard language model server or a diffusion model server.\n" + "The server type is determined by the model path.\n" + "For specific arguments, please provide a model_path." + ) + else: + raise Exception( + "Error: --model-path is required. Please provide the path to the model." + ) + return model_path + + +VISUAL_GEN_PARTIAL_MODEL_NAME_TO_MODEL_TYPE = { + "FLUX.2": "flux2", + "LTX-2": "ltx2", + "Wan2": "wan2", +} + + +def get_visual_gen_model_type(model_path: str): + for partial_model_name, model_type in VISUAL_GEN_PARTIAL_MODEL_NAME_TO_MODEL_TYPE.items(): + if partial_model_name.lower() in model_path.lower(): + return model_type + + raise ValueError( + f"Unknown VISUAL_GEN model type for model path: {model_path}," + f"available models: {VISUAL_GEN_PARTIAL_MODEL_NAME_TO_MODEL_TYPE.keys()}" + ) diff --git a/tensorrt_llm/executor/ipc.py b/tensorrt_llm/executor/ipc.py index f09dd31dc4..4fafc8307a 100644 --- a/tensorrt_llm/executor/ipc.py +++ b/tensorrt_llm/executor/ipc.py @@ -83,8 +83,7 @@ class ZeroMqQueue: "Server and client should not receive HMAC key when encryption is disabled" ) - if (socket_type == zmq.PAIR and self.is_server - ) or socket_type == zmq.PULL or socket_type == zmq.ROUTER: + if self.should_bind_socket(): self.socket.bind( self.address_endpoint ) # Binds to the address and occupy a port immediately @@ -101,6 +100,31 @@ class ZeroMqQueue: self.address = (self.address_endpoint, self.hmac_key) + def should_bind_socket(self) -> bool: + """ + Determine if socket should bind vs connect based on type and role. + + ZMQ binding conventions: + - PAIR: server binds, client connects (1-to-1 bidirectional) + - PULL: server binds to receive from multiple PUSH sockets + - PUSH: server binds when acting as message source + - ROUTER: always binds to handle multiple clients + + Returns: + True if socket should bind, False if it should connect + """ + # Server binds for PAIR, PULL, PUSH patterns + if self.is_server and self.socket_type in (zmq.PAIR, zmq.PULL, + zmq.PUSH): + return True + + # ROUTER always binds (multi-client pattern) + if self.socket_type == zmq.ROUTER: + return True + + # Client connects for all other cases + return False + def setup_lazily(self): # Early return if setup is already done if self._setup_done: diff --git a/tensorrt_llm/inputs/data.py b/tensorrt_llm/inputs/data.py index 615043fe48..48e3441df6 100644 --- a/tensorrt_llm/inputs/data.py +++ b/tensorrt_llm/inputs/data.py @@ -1,6 +1,6 @@ # Adapt from # https://github.com/vllm-project/vllm/blob/2e33fe419186c65a18da6668972d61d7bbc31564/vllm/inputs/data.py -from typing import Any, Dict, List, Union +from typing import Any, Dict, List, Sequence, Union from typing_extensions import NotRequired, TypedDict @@ -85,3 +85,80 @@ def prompt_inputs(inputs: PromptInputs, ) -> Union[TextPrompt, TokensPrompt]: f"Invalid type of inputs for llm.generate: {type(inputs)}") return prompt_inputs + + +class VisualGenTextPrompt(TypedDict): + prompt: str + negative_prompt: NotRequired[str] + + +class VisualGenTokensPrompt(TypedDict): + prompt_token_ids: List[int] + negative_prompt_token_ids: NotRequired[List[int]] + + +VisualGenPromptInputs = Union[ + str, + List[int], + VisualGenTextPrompt, + VisualGenTokensPrompt, +] + +VisualGenInputs = Union[ + VisualGenPromptInputs, + Sequence[VisualGenPromptInputs], +] + + +def visual_gen_inputs( + inputs: "VisualGenPromptInputs", +) -> Union["VisualGenTextPrompt", "VisualGenTokensPrompt"]: + # str -> text prompt + if isinstance(inputs, str): + return VisualGenTextPrompt(prompt=inputs) + + # list[int] -> token prompt + if isinstance(inputs, list): + if len(inputs) == 0: + raise ValueError("`inputs` token list cannot be empty.") + if not all(isinstance(t, int) for t in inputs): + raise TypeError( + "`inputs` list must contain only ints when used as token IDs.") + return VisualGenTokensPrompt(prompt_token_ids=inputs) + + # dict form + if isinstance(inputs, dict): + has_prompt = "prompt" in inputs + has_prompt_token_ids = "prompt_token_ids" in inputs + + if has_prompt == has_prompt_token_ids: + raise ValueError( + "VisualGen prompt dict must contain exactly one of " + "`prompt` or `prompt_token_ids`.") + + if has_prompt: + prompt = inputs.get("prompt") + if not isinstance(prompt, str) or prompt == "": + raise TypeError("`prompt` must be a non-empty string.") + if "negative_prompt" in inputs and not isinstance( + inputs["negative_prompt"], str): + raise TypeError("`negative_prompt` must be a string.") + return inputs # VisualGenTextPrompt + + token_ids = inputs.get("prompt_token_ids") + if not isinstance(token_ids, list) or len(token_ids) == 0: + raise TypeError("`prompt_token_ids` must be a non-empty list[int].") + if not all(isinstance(t, int) for t in token_ids): + raise TypeError("`prompt_token_ids` must contain only ints.") + if "negative_prompt_token_ids" in inputs: + neg_ids = inputs["negative_prompt_token_ids"] + if not isinstance(neg_ids, list) or not all( + isinstance(t, int) for t in neg_ids): + raise TypeError( + "`negative_prompt_token_ids` must be a list[int].") + return inputs # VisualGenTokensPrompt + + raise TypeError( + "Invalid `inputs` for VisualGen.generate. " + "Expected one of: str, list[int], VisualGenTextPrompt, VisualGenTokensPrompt." + ) diff --git a/tensorrt_llm/llmapi/__init__.py b/tensorrt_llm/llmapi/__init__.py index b87b21f9f5..430426786f 100644 --- a/tensorrt_llm/llmapi/__init__.py +++ b/tensorrt_llm/llmapi/__init__.py @@ -22,10 +22,13 @@ from .llm_utils import (BuildConfig, KvCacheRetentionConfig, QuantAlgo, QuantConfig) from .mm_encoder import MultimodalEncoder from .mpi_session import MpiCommSession +from .visual_gen import VisualGen, VisualGenParams __all__ = [ 'LLM', 'AsyncLLM', + 'VisualGen', + 'VisualGenParams', 'MultimodalEncoder', 'CompletionOutput', 'RequestOutput', diff --git a/tensorrt_llm/llmapi/disagg_utils.py b/tensorrt_llm/llmapi/disagg_utils.py index 8512771e1f..86edbefea7 100644 --- a/tensorrt_llm/llmapi/disagg_utils.py +++ b/tensorrt_llm/llmapi/disagg_utils.py @@ -24,6 +24,7 @@ class ServerRole(IntEnum): CONTEXT = 0 GENERATION = 1 MM_ENCODER = 2 + VISUAL_GEN = 3 @dataclass diff --git a/tensorrt_llm/llmapi/utils.py b/tensorrt_llm/llmapi/utils.py index f79823d844..78fe3d6298 100644 --- a/tensorrt_llm/llmapi/utils.py +++ b/tensorrt_llm/llmapi/utils.py @@ -236,18 +236,34 @@ def download_hf_model(model: str, revision: Optional[str] = None) -> Path: return Path(hf_folder) -def download_hf_pretrained_config(model: str, - revision: Optional[str] = None) -> Path: +def download_hf_partial(model: str, + allow_patterns: List[str], + revision: Optional[str] = None) -> Path: + """Download a partial model from HuggingFace. + + Args: + model: The model name or path. + revision: The revision to use for the model. + allow_patterns: The patterns to allow for the model. + + Returns: + The path to the downloaded model. + """ with get_file_lock(model): hf_folder = snapshot_download( model, local_files_only=huggingface_hub.constants.HF_HUB_OFFLINE, revision=revision, - allow_patterns=["config.json"], + allow_patterns=allow_patterns, tqdm_class=DisabledTqdm) return Path(hf_folder) +def download_hf_pretrained_config(model: str, + revision: Optional[str] = None) -> Path: + return download_hf_partial(model, ["config.json"], revision) + + def append_docstring(docstring: str): ''' A decorator to append a docstring to a function. ''' diff --git a/tensorrt_llm/llmapi/visual_gen.py b/tensorrt_llm/llmapi/visual_gen.py new file mode 100644 index 0000000000..2113b43548 --- /dev/null +++ b/tensorrt_llm/llmapi/visual_gen.py @@ -0,0 +1,544 @@ +import asyncio +import queue +import socket +import threading +import time +import traceback +from dataclasses import dataclass +from pathlib import Path +from typing import Any, Dict, List, Optional, Union + +import torch.multiprocessing as mp +import zmq + +from tensorrt_llm._torch.visual_gen import DiffusionRequest, DiffusionResponse +from tensorrt_llm._torch.visual_gen.executor import run_diffusion_worker +from tensorrt_llm._torch.visual_gen.output import MediaOutput + +__all__ = ["VisualGen", "VisualGenParams", "MediaOutput"] +from tensorrt_llm.executor.ipc import ZeroMqQueue +from tensorrt_llm.inputs.data import VisualGenInputs +from tensorrt_llm.logger import logger + +# Timeouts (seconds) +POLL_TIMEOUT = 0.01 +AWAIT_TIMEOUT = 0.05 +THREAD_TIMEOUT = 5.0 +WORKER_TIMEOUT = 2.0 +READY_TIMEOUT = 1200 # 20 minutes for large models (Wan 2.2 with transformer_2) + + +def find_free_port() -> int: + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: + s.bind(("", 0)) + return s.getsockname()[1] + + +def get_ip_address() -> str: + """Get local IP address.""" + s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + try: + s.connect(("10.255.255.255", 1)) + return s.getsockname()[0] + except Exception: + return "127.0.0.1" + finally: + s.close() + + +class DiffusionRemoteClient: + """Client proxy for remote DiffusionExecutor in worker processes.""" + + def __init__( + self, + model_path: Union[str, Path], + n_workers: int = 1, + diffusion_config: Optional[dict] = None, + ): + self.model_path = str(model_path) + self.n_workers = n_workers + self.diffusion_config = diffusion_config + + # Setup distributed env + self.master_addr = "127.0.0.1" + self.master_port = find_free_port() + + # Setup IPC addresses + self.host_ip = get_ip_address() + req_port, resp_port = find_free_port(), find_free_port() + + self.request_queue_addr = f"tcp://0.0.0.0:{req_port}" + self.response_queue_addr = f"tcp://0.0.0.0:{resp_port}" + self.req_addr_connect = f"tcp://{self.host_ip}:{req_port}" + self.resp_addr_connect = f"tcp://{self.host_ip}:{resp_port}" + + # IPC setup + self.requests_ipc = None + self.responses_ipc = None + self.pending_requests = queue.Queue() + self.completed_responses: Dict[int, DiffusionResponse] = {} + + # We'll create asyncio primitives in the background thread's event loop + self._event_loop = None + self.response_event = None + self.lock = None + self.shutdown_event = threading.Event() + self.event_loop_ready = threading.Event() + + # Start background thread (it will create its own event loop) + self.background_thread = threading.Thread(target=self._serve_forever_thread, daemon=True) + self.background_thread.start() + + # Wait for the background thread to initialize the event loop + self.event_loop_ready.wait() + + # Launch workers + logger.info(f"DiffusionClient: Launching {n_workers} workers") + ctx = mp.get_context("spawn") + self.worker_processes = [] + for rank in range(n_workers): + p = ctx.Process( + target=run_diffusion_worker, + kwargs={ + "rank": rank, + "world_size": n_workers, + "master_addr": self.master_addr, + "master_port": self.master_port, + "model_path": self.model_path, + "request_queue_addr": self.req_addr_connect, + "response_queue_addr": self.resp_addr_connect, + "diffusion_config": self.diffusion_config, + }, + ) + p.start() + self.worker_processes.append(p) + + self._wait_ready() + + @staticmethod + def _close_socket(ipc_queue): + if ipc_queue and ipc_queue.socket: + ipc_queue.socket.setsockopt(zmq.LINGER, 0) + ipc_queue.close() + + def enqueue_requests(self, requests: List[DiffusionRequest]) -> List[int]: + """Enqueue requests and return their IDs.""" + req_ids = [] + for req in requests: + self.pending_requests.put(req) + req_ids.append(req.request_id) + return req_ids + + async def await_responses( + self, request_ids: Union[int, List[int]], timeout: Optional[float] = None + ) -> Union[DiffusionResponse, List[DiffusionResponse]]: + """Wait for responses by request IDs. + + Args: + request_ids: Single request ID or list of request IDs to wait for + timeout: Maximum total wait time in seconds (None = wait indefinitely) + + Returns: + Single response or list of responses (None if request timed out) + """ + is_single = isinstance(request_ids, int) + ids = [request_ids] if is_single else request_ids + + start_time = time.time() + results = {} + + while len(results) < len(ids): + async with self.lock: + for req_id in ids: + if req_id in self.completed_responses: + results[req_id] = self.completed_responses.pop(req_id) + + # All responses collected + if len(results) == len(ids): + break + + # Check if overall timeout exceeded + if timeout is not None: + elapsed = time.time() - start_time + if elapsed >= timeout: + break + # Wait for remaining time or AWAIT_TIMEOUT, whichever is shorter + wait_time = min(timeout - elapsed, AWAIT_TIMEOUT) + else: + wait_time = AWAIT_TIMEOUT + + try: + await asyncio.wait_for(self.response_event.wait(), timeout=wait_time) + except asyncio.TimeoutError: + pass + self.response_event.clear() + + out = [results.get(rid) for rid in ids] + return out[0] if is_single else out + + def await_responses_sync( + self, request_ids: Union[int, List[int]], timeout: Optional[float] = None + ) -> Union[DiffusionResponse, List[DiffusionResponse]]: + """Sync wrapper to await responses from the main thread.""" + future = asyncio.run_coroutine_threadsafe( + self.await_responses(request_ids, timeout), self._event_loop + ) + return future.result(timeout=timeout if timeout else None) + + def _init_ipc(self) -> bool: + """Initialize IPC queues.""" + try: + logger.info("DiffusionClient: Initializing IPC") + self.requests_ipc = ZeroMqQueue( + (self.request_queue_addr, None), + is_server=True, + socket_type=zmq.PUSH, + use_hmac_encryption=False, + ) + self.responses_ipc = ZeroMqQueue( + (self.response_queue_addr, None), + is_server=True, + socket_type=zmq.PULL, + use_hmac_encryption=False, + ) + logger.info("DiffusionClient: IPC ready") + return True + except Exception as e: + logger.error(f"DiffusionClient: IPC init failed: {e}") + return False + + def _send_shutdown(self): + """Send shutdown signal.""" + logger.info("DiffusionClient: Sending shutdown signal") + if self.requests_ipc: + self.requests_ipc.put(None) + self._close_socket(self.requests_ipc) + + def _process_requests(self): + """Process pending requests.""" + try: + req = self.pending_requests.get(timeout=POLL_TIMEOUT) + if req is None: + self._send_shutdown() + self.shutdown_event.set() + return + + logger.info(f"DiffusionClient: Sending request {req.request_id}") + self.requests_ipc.put(req) + except queue.Empty: + pass + except Exception as e: + logger.error(f"DiffusionClient: Error sending request: {e}") + logger.error(traceback.format_exc()) + + def _process_responses(self): + """Poll and process responses.""" + try: + if self.responses_ipc.poll(timeout=POLL_TIMEOUT): + response = self.responses_ipc.get() + if isinstance(response, DiffusionResponse): + if response.request_id == -1: + logger.info("DiffusionClient: Received READY signal") + + # Schedule the lock acquisition and event setting in the event loop + asyncio.run_coroutine_threadsafe( + self._store_response(response), self._event_loop + ) + except Exception as e: + logger.error(f"DiffusionClient: Error processing response: {e}") + + async def _store_response(self, response: DiffusionResponse): + """Store response in the completed_responses dict (async helper).""" + async with self.lock: + self.completed_responses[response.request_id] = response + self.response_event.set() + + def _cleanup_ipc(self): + """Cleanup IPC.""" + logger.info("DiffusionClient: Cleaning up IPC") + self._close_socket(self.requests_ipc) + self._close_socket(self.responses_ipc) + + def _serve_forever_thread(self): + """Background thread wrapper that creates and runs an event loop.""" + logger.info("DiffusionClient: Background thread started") + + # Create a new event loop for this thread + self._event_loop = asyncio.new_event_loop() + asyncio.set_event_loop(self._event_loop) + + # Create async primitives in this thread's event loop + self.response_event = asyncio.Event() + self.lock = asyncio.Lock() + + # Signal that the event loop is ready + self.event_loop_ready.set() + + # Run the async serve_forever + try: + self._event_loop.run_until_complete(self._serve_forever()) + finally: + self._event_loop.close() + logger.info("DiffusionClient: Background thread stopped") + + async def _serve_forever(self): + """Background thread main loop (async version).""" + if not self._init_ipc(): + return + + while not self.shutdown_event.is_set(): + self._process_requests() + self._process_responses() + await asyncio.sleep(0.001) # Yield control to allow other coroutines to run + + self._cleanup_ipc() + + def shutdown(self): + """Shutdown client and workers.""" + logger.info("DiffusionClient: Shutting down") + self.pending_requests.put(None) + + self.background_thread.join(timeout=THREAD_TIMEOUT) + if self.background_thread.is_alive(): + logger.warning("DiffusionClient: Force stopping background thread") + self.shutdown_event.set() + self.background_thread.join(timeout=1.0) + + # Shutdown workers + logger.info("DiffusionClient: Stopping workers") + for p in self.worker_processes: + p.join(timeout=WORKER_TIMEOUT) + if p.is_alive(): + logger.warning(f"DiffusionClient: Terminating worker {p.pid} with SIGTERM") + p.terminate() + p.join(timeout=WORKER_TIMEOUT) + if p.is_alive(): + logger.warning(f"DiffusionClient: Force killing worker {p.pid} with SIGKILL") + p.kill() + p.join(timeout=WORKER_TIMEOUT) + + def _wait_ready(self, timeout: float = READY_TIMEOUT): + """Wait for workers to be ready (sync wrapper for async operation).""" + logger.info("DiffusionClient: Waiting for workers") + + # Run the async wait in the background thread's event loop + future = asyncio.run_coroutine_threadsafe(self._wait_ready_async(timeout), self._event_loop) + return future.result(timeout=timeout) + + async def _wait_ready_async(self, timeout: float = READY_TIMEOUT): + """Wait for workers to be ready (async version).""" + start_time = time.time() + + while True: + async with self.lock: + if -1 in self.completed_responses: + self.completed_responses.pop(-1) + logger.info("DiffusionClient: Workers ready") + return + + if time.time() - start_time > timeout: + raise RuntimeError("DiffusionClient: Timeout waiting for workers") + + try: + await asyncio.wait_for(self.response_event.wait(), timeout=AWAIT_TIMEOUT) + except asyncio.TimeoutError: + pass + self.response_event.clear() + + +class DiffusionGenerationResult: + """Future-like object for async generation.""" + + def __init__(self, request_id: int, executor: DiffusionRemoteClient): + self.request_id = request_id + self.executor = executor + self._result = None + self._finished = False + self._error = None + + async def result(self, timeout: Optional[float] = None) -> Any: + """Wait for and return result (async version). + + Can be awaited from any async context (e.g., FastAPI background tasks). + """ + if self._finished: + if self._error: + raise RuntimeError(self._error) + return self._result + + # Use run_coroutine_threadsafe to execute in the background thread's event loop + future = asyncio.run_coroutine_threadsafe( + self.executor.await_responses(self.request_id, timeout=timeout), + self.executor._event_loop, + ) + + # Await the future in the current event loop + response = await asyncio.wrap_future(future) + + if response.error_msg: + self._error = response.error_msg + self._finished = True + raise RuntimeError(f"Generation failed: {response.error_msg}") + + self._result = response.output + self._finished = True + return self._result + + def cancel(self): + raise NotImplementedError("Cancel request (not yet implemented).") + + +@dataclass +class VisualGenParams: + """Parameters for visual generation. + + Attributes: + height: Output height in pixels + width: Output width in pixels + num_inference_steps: Number of denoising steps + guidance_scale: Classifier-free guidance scale + max_sequence_length: Maximum sequence length for text encoding + seed: Random seed for reproducibility + + # Video-specific parameters + num_frames: Number of video frames to generate + frame_rate: Frame rate for video output in fps + + # Image-specific parameters + num_images_per_prompt: Number of images to generate per prompt (for image models) + + # Advanced parameters + guidance_rescale: Guidance rescale factor (for some models) + output_type: Output type ("pt" for PyTorch tensors, "pil" for PIL images) + """ + + height: int = 720 + width: int = 1280 + num_inference_steps: int = 50 + guidance_scale: float = 5.0 + max_sequence_length: int = 512 + seed: int = 42 + + # Video-specific parameters + num_frames: int = 81 + frame_rate: float = 24.0 + input_reference: Optional[str] = None + + # Image-specific parameters + num_images_per_prompt: int = 1 + + ## Image edit parameters + image: Optional[List[str]] = None + mask: Optional[str] = None + + # Advanced parameters + guidance_rescale: float = 0.0 + output_type: str = "pt" + + # Wan-specific parameters + guidance_scale_2: Optional[float] = None + boundary_ratio: Optional[float] = None + last_image: Optional[str] = None + + +class VisualGen: + """High-level API for visual generation.""" + + def __init__( + self, + model_path: Union[str, Path], + n_workers: int = 1, + diffusion_config: Optional[dict] = None, + ): + self.model_path = str(model_path) + self.n_workers = n_workers + self.diffusion_config = diffusion_config + + self.executor = DiffusionRemoteClient( + model_path=self.model_path, + n_workers=self.n_workers, + diffusion_config=self.diffusion_config, + ) + self.req_counter = 0 + + def generate( + self, + inputs: VisualGenInputs, + params: VisualGenParams, + ) -> MediaOutput: + """Synchronous generation. Blocks until complete. + + Args: + params: Generation parameters. + + Returns: + MediaOutput: Generated media with model-specific fields populated: + - FLUX2: MediaOutput(image=torch.Tensor) + - WAN: MediaOutput(video=torch.Tensor) + - LTX2: MediaOutput(video=torch.Tensor, audio=torch.Tensor) + """ + future = self.generate_async( + inputs=inputs, + params=params, + ) + + # Use the sync wrapper to get result + response = self.executor.await_responses_sync(future.request_id, timeout=None) + if response.error_msg: + raise RuntimeError(f"Generation failed: {response.error_msg}") + return response.output + + def generate_async( + self, + inputs: VisualGenInputs, + params: VisualGenParams, + ) -> DiffusionGenerationResult: + """Async generation. Returns immediately with future-like object. + + Args: + params: Generation parameters. + + Returns: + DiffusionGenerationResult: Call result() to get output dict. + """ + req_id = self.req_counter + self.req_counter += 1 + + if isinstance(inputs, dict): + prompt = inputs.get("prompt") + negative_prompt = inputs.get("negative_prompt", None) + elif isinstance(inputs, str): + prompt = inputs + negative_prompt = None + else: + # TODO: Support batch generation + raise ValueError(f"Invalid inputs type: {type(inputs)}") + + request = DiffusionRequest( + request_id=req_id, + prompt=prompt, + negative_prompt=negative_prompt, + height=params.height, + width=params.width, + num_inference_steps=params.num_inference_steps, + guidance_scale=params.guidance_scale, + max_sequence_length=params.max_sequence_length, + seed=params.seed, + num_frames=params.num_frames, + frame_rate=params.frame_rate, + num_images_per_prompt=params.num_images_per_prompt, + guidance_rescale=params.guidance_rescale, + output_type=params.output_type, + image=params.input_reference, + guidance_scale_2=params.guidance_scale_2, + boundary_ratio=params.boundary_ratio, + last_image=params.last_image, + ) + + self.executor.enqueue_requests([request]) + return DiffusionGenerationResult(req_id, self.executor) + + def shutdown(self): + """Shutdown executor and cleanup.""" + logger.info("VisualGen: Shutting down") + self.executor.shutdown() diff --git a/tensorrt_llm/ray_stub.py b/tensorrt_llm/ray_stub.py index 9bd699d929..34d3b4e97c 100644 --- a/tensorrt_llm/ray_stub.py +++ b/tensorrt_llm/ray_stub.py @@ -16,10 +16,8 @@ from functools import wraps as _wraps from tensorrt_llm._utils import mpi_disabled as _mpi_disabled -if _mpi_disabled(): - raise RuntimeError( - "Ray requested (TLLM_DISABLE_MPI=1), but not installed. Please install Ray." - ) +# Don't raise error on import - only when Ray functionality is actually used +_RAY_NOT_INSTALLED_MSG = "Ray requested (TLLM_DISABLE_MPI=1), but not installed. Please install Ray." def remote(*args, **kwargs): @@ -42,6 +40,7 @@ def remote(*args, **kwargs): def __getattr__(name): - raise RuntimeError( - f'Ray not installed, so "ray.{name}" is unavailable. Please install Ray.' - ) + msg = f'Ray not installed, so "ray.{name}" is unavailable.' + if _mpi_disabled(): + msg = _RAY_NOT_INSTALLED_MSG + raise RuntimeError(msg) diff --git a/tensorrt_llm/serve/media_storage.py b/tensorrt_llm/serve/media_storage.py new file mode 100644 index 0000000000..acbdedae65 --- /dev/null +++ b/tensorrt_llm/serve/media_storage.py @@ -0,0 +1,426 @@ +#!/usr/bin/env python +"""Media Storage for generated images and videos. + +This module provides storage handlers for persisting generated media assets +(videos, images) and their associated metadata. +""" + +import os +from io import BytesIO +from pathlib import Path +from typing import Any, Optional + +import torch +from PIL import Image + +from tensorrt_llm.logger import logger + + +class MediaStorage: + """Handler for storing images and videos in various formats.""" + + @staticmethod + def save_image( + image: Any, output_path: str, format: Optional[str] = None, quality: int = 95 + ) -> str: + """Save image to file. + + Args: + image: torch.Tensor (H, W, C) uint8 + output_path: Path to save the image + format: Image format (png, jpg, webp). If None, infer from extension + quality: Quality for lossy formats (1-100, higher is better) + + Returns: + Path where the image was saved + """ + # Ensure output directory exists + output_dir = os.path.dirname(output_path) + if output_dir: + os.makedirs(output_dir, exist_ok=True) + + # Convert to PIL Image if needed + pil_image = MediaStorage._to_pil_image(image) + + # Determine format + if format is None: + ext = os.path.splitext(output_path)[1].lower() + if ext in [".png"]: + format = "PNG" + elif ext in [".jpg", ".jpeg"]: + format = "JPEG" + elif ext in [".webp"]: + format = "WEBP" + else: + logger.warning(f"Unknown image extension {ext}, defaulting to PNG") + format = "PNG" + output_path = output_path.rsplit(".", 1)[0] + ".png" + + # Save image with format-specific handling + MediaStorage._save_pil_image(pil_image, output_path, format, quality) + + logger.info(f"Saved image to {output_path} (format={format})") + return output_path + + @staticmethod + def convert_image_to_bytes(image: Any, format: str = "PNG", quality: int = 95) -> bytes: + """Convert image to bytes buffer. + + Args: + image: torch.Tensor (H, W, C) uint8 + format: Image format (PNG, JPEG, WEBP) + quality: Quality for lossy formats (1-100) + + Returns: + Image bytes + """ + pil_image = MediaStorage._to_pil_image(image) + + # Save to bytes buffer + buffer = BytesIO() + MediaStorage._save_pil_image(pil_image, buffer, format, quality) + + return buffer.getvalue() + + @staticmethod + def _to_pil_image(image: torch.Tensor) -> Image.Image: + """Convert torch.Tensor to PIL Image. + + Args: + image: torch.Tensor (H, W, C) uint8 + + Returns: + PIL Image + """ + if not isinstance(image, torch.Tensor): + raise ValueError(f"Expected torch.Tensor, got {type(image)}") + + # Convert to numpy for PIL + image_np = image.cpu().numpy() + return Image.fromarray(image_np) + + @staticmethod + def _save_pil_image( + pil_image: Image.Image, + output: Any, # Can be path string or BytesIO + format: str, + quality: int, + ): + """Save PIL Image to file or buffer. + + Args: + pil_image: PIL Image to save + output: Output path (str) or BytesIO buffer + format: Image format (PNG, JPEG, WEBP) + quality: Quality for lossy formats (1-100) + """ + format_upper = format.upper() + + if format_upper in ["JPEG", "JPG"]: + # Convert RGBA to RGB for JPEG + if pil_image.mode in ("RGBA", "LA", "P"): + background = Image.new("RGB", pil_image.size, (255, 255, 255)) + if pil_image.mode == "P": + pil_image = pil_image.convert("RGBA") + background.paste( + pil_image, mask=pil_image.split()[-1] if pil_image.mode == "RGBA" else None + ) + pil_image = background + pil_image.save(output, format="JPEG", quality=quality, optimize=True) + elif format_upper == "WEBP": + pil_image.save(output, format="WEBP", quality=quality) + else: # PNG or default + pil_image.save(output, format="PNG", optimize=True) + + @staticmethod + def save_video( + video: Any, + output_path: str, + audio: Optional[Any] = None, + frame_rate: float = 24.0, + format: Optional[str] = None, + ) -> str: + """Save video to file with optional audio. + + Args: + video: Video frames as torch.Tensor (T, H, W, C) uint8 + output_path: Path to save the video + audio: Optional audio as torch.Tensor + frame_rate: Frames per second (default: 24.0) + format: Video format (mp4, gif, png). If None, infer from extension + + Returns: + Path where the video was saved + """ + # Ensure output directory exists + if isinstance(output_path, Path): + output_path = str(output_path) + + output_dir = os.path.dirname(output_path) + if output_dir: + os.makedirs(output_dir, exist_ok=True) + + # Determine format + if format is None: + ext = os.path.splitext(output_path)[1].lower() + format = ext[1:] if ext else "mp4" + + format = format.lower() + + # Save based on format + if format == "mp4": + MediaStorage._save_mp4(video, audio, output_path, frame_rate) + elif format == "gif": + MediaStorage._save_gif(video, output_path, frame_rate) + elif format == "png": + MediaStorage._save_middle_frame(video, output_path) + else: + logger.warning(f"Unsupported video format: {format}, defaulting to mp4") + output_path = output_path.rsplit(".", 1)[0] + ".mp4" + MediaStorage._save_mp4(video, audio, output_path, frame_rate) + + return output_path + + @staticmethod + def convert_video_to_bytes( + video: Any, audio: Optional[Any] = None, frame_rate: float = 24.0, format: str = "mp4" + ) -> bytes: + """Convert video to bytes buffer. + + Args: + video: Video frames as torch.Tensor (T, H, W, C) uint8 + audio: Optional audio as torch.Tensor + frame_rate: Frames per second + format: Video format (mp4, gif) + + Returns: + Video bytes + """ + import tempfile + + # Create temporary file + with tempfile.NamedTemporaryFile(suffix=f".{format}", delete=False) as tmp_file: + tmp_path = tmp_file.name + + try: + # Save to temporary file + MediaStorage.save_video(video, tmp_path, audio, frame_rate, format) + + # Read bytes + with open(tmp_path, "rb") as f: + video_bytes = f.read() + + return video_bytes + finally: + # Clean up temporary file + if os.path.exists(tmp_path): + os.unlink(tmp_path) + + @staticmethod + def _save_mp4( + video: torch.Tensor, audio: Optional[torch.Tensor], output_path: str, frame_rate: float + ) -> str: + """Save video with optional audio as MP4. + + Args: + video: Video frames as torch.Tensor (T, H, W, C) uint8 + audio: Optional audio as torch.Tensor + output_path: Output path for MP4 + frame_rate: Frames per second + + Returns: + Path where the video was saved + """ + try: + from fractions import Fraction + + import av + + if not isinstance(video, torch.Tensor): + raise ValueError(f"Expected torch.Tensor for video, got {type(video)}") + + # Convert video tensor to numpy: (T, H, W, C) uint8 + video_np = video.cpu().numpy() + num_frames, height, width, channels = video_np.shape + + # Ensure RGB format (3 channels) + if channels != 3: + raise ValueError(f"Expected 3-channel RGB video, got {channels} channels") + + # Open output container + container = av.open(output_path, mode="w") + + # Add video stream (H.264 codec) + video_stream = container.add_stream("libx264", rate=int(frame_rate)) + video_stream.width = width + video_stream.height = height + video_stream.pix_fmt = "yuv420p" + video_stream.options = {"preset": "medium", "crf": "23"} + + # Pre-process audio and add audio stream BEFORE any muxing. + # All streams must be registered before the first mux() call + # (which triggers container header writing). + audio_stream = None + audio_tensor = None + audio_sample_rate = 24000 # Default sample rate + if audio is not None: + if not isinstance(audio, torch.Tensor): + raise ValueError(f"Expected torch.Tensor for audio, got {type(audio)}") + + # Prepare audio tensor: convert to (samples, channels) format + audio_tensor = audio + + # Handle different audio tensor dimensions + if audio_tensor.ndim == 1: + # Mono audio: (samples,) -> (samples, 1) + audio_tensor = audio_tensor[:, None] + elif audio_tensor.ndim == 2: + # If shape[1] != 2 and shape[0] == 2, transpose to (samples, channels) + if audio_tensor.shape[1] != 2 and audio_tensor.shape[0] == 2: + audio_tensor = audio_tensor.T + if audio_tensor.shape[1] > 2: + audio_tensor = audio_tensor[:, :2] + elif audio_tensor.ndim == 3: + if audio_tensor.shape[0] == 1: + audio_tensor = audio_tensor.squeeze(0) + else: + audio_tensor = audio_tensor[0] + if audio_tensor.shape[1] != 2 and audio_tensor.shape[0] == 2: + audio_tensor = audio_tensor.T + if audio_tensor.shape[1] > 2: + audio_tensor = audio_tensor[:, :2] + else: + raise ValueError( + f"Unsupported audio tensor shape: {audio_tensor.shape}. " + f"Expected 1D, 2D, or 3D tensor." + ) + + if audio_tensor.shape[1] > 2: + audio_tensor = audio_tensor[:, :2] + + # Convert to int16 if needed + if audio_tensor.dtype != torch.int16: + audio_tensor = torch.clip(audio_tensor, -1.0, 1.0) + audio_tensor = (audio_tensor * 32767.0).to(torch.int16) + + # Add audio stream now (before any muxing) + audio_stream = container.add_stream("aac", rate=audio_sample_rate) + audio_stream.codec_context.sample_rate = audio_sample_rate + audio_stream.codec_context.layout = "stereo" + audio_stream.codec_context.time_base = Fraction(1, audio_sample_rate) + + # --- Encode video frames --- + for frame_array in video_np: + frame = av.VideoFrame.from_ndarray(frame_array, format="rgb24") + for packet in video_stream.encode(frame): + container.mux(packet) + + # Flush video encoder + for packet in video_stream.encode(): + container.mux(packet) + + # --- Encode audio (after video is done) --- + if audio_stream is not None and audio_tensor is not None: + # Build packed int16 frame: (1, samples*channels) + audio_np = audio_tensor.contiguous().reshape(1, -1).cpu().numpy() + + frame_in = av.AudioFrame.from_ndarray(audio_np, format="s16", layout="stereo") + frame_in.sample_rate = audio_sample_rate + + # Use AudioResampler to convert s16→fltp (AAC's native format) + cc = audio_stream.codec_context + audio_resampler = av.audio.resampler.AudioResampler( + format=cc.format or "fltp", + layout=cc.layout or "stereo", + rate=cc.sample_rate or audio_sample_rate, + ) + + audio_next_pts = 0 + for rframe in audio_resampler.resample(frame_in): + if rframe.pts is None: + rframe.pts = audio_next_pts + audio_next_pts += rframe.samples + rframe.sample_rate = audio_sample_rate + container.mux(audio_stream.encode(rframe)) + + # Flush audio encoder + for packet in audio_stream.encode(): + container.mux(packet) + + # Close container + container.close() + + logger.info(f"Saved video{' with audio' if audio is not None else ''} to {output_path}") + return output_path + + except ImportError: + logger.warning( + "PyAV (av) library not available. " + "Falling back to saving middle frame as PNG. " + "Install with: pip install av" + ) + png_path = output_path.replace(".mp4", ".png") + return MediaStorage._save_middle_frame(video, png_path) + except Exception as e: + logger.error(f"Error encoding video with PyAV: {e}") + import traceback + + logger.error(traceback.format_exc()) + logger.warning("Falling back to saving middle frame as PNG.") + png_path = output_path.replace(".mp4", ".png") + return MediaStorage._save_middle_frame(video, png_path) + + @staticmethod + def _save_gif(video: torch.Tensor, output_path: str, frame_rate: float) -> str: + """Save video as animated GIF. + + Args: + video: Video frames as torch.Tensor (T, H, W, C) uint8 + output_path: Output path for GIF + frame_rate: Frames per second + + Returns: + Path where the GIF was saved + """ + if not isinstance(video, torch.Tensor): + raise ValueError(f"Expected torch.Tensor for video, got {type(video)}") + + # Convert to numpy and then to list of PIL Images + video_np = video.cpu().numpy() + frames = [Image.fromarray(video_np[i]) for i in range(video_np.shape[0])] + + # Save as GIF + duration_ms = int(1000 / frame_rate) + frames[0].save( + output_path, + save_all=True, + append_images=frames[1:], + optimize=False, + duration=duration_ms, + loop=0, + ) + logger.info(f"Saved video as GIF to {output_path} ({len(frames)} frames)") + return output_path + + @staticmethod + def _save_middle_frame(video: torch.Tensor, output_path: str) -> str: + """Save middle frame of video as PNG. + + Args: + video: Video frames as torch.Tensor (T, H, W, C) uint8 + output_path: Output path for PNG + + Returns: + Path where the frame was saved + """ + if not isinstance(video, torch.Tensor): + raise ValueError(f"Expected torch.Tensor for video, got {type(video)}") + + # Extract middle frame + video_np = video.cpu().numpy() + frame_idx = video_np.shape[0] // 2 + image = Image.fromarray(video_np[frame_idx]) + + image.save(output_path) + logger.info(f"Saved frame {frame_idx} to {output_path}") + return output_path diff --git a/tensorrt_llm/serve/openai_protocol.py b/tensorrt_llm/serve/openai_protocol.py index 3afcb989d4..21f411cc33 100644 --- a/tensorrt_llm/serve/openai_protocol.py +++ b/tensorrt_llm/serve/openai_protocol.py @@ -1,12 +1,14 @@ # Adapted from # https://github.com/vllm-project/vllm/blob/4db5176d9758b720b05460c50ace3c01026eb158/vllm/entrypoints/openai/protocol.py import base64 +import re import time import uuid from typing import Any, Dict, List, Literal, Optional, Union import torch import xgrammar +from fastapi import UploadFile from openai.types.chat import ChatCompletionAssistantMessageParam from openai.types.chat import \ ChatCompletionContentPartParam as OpenAIChatCompletionContentPartParam @@ -1120,5 +1122,218 @@ def to_llm_disaggregated_params( ) +# ============================================================================ +# Diffusion API Protocol Classes +# ============================================================================ + + +class ImageGenerationRequest(OpenAIBaseModel): + """OpenAI-compatible image generation request. + + Follows the OpenAI Images API specification: + https://platform.openai.com/docs/api-reference/images/create + """ + prompt: str + model: Optional[str] = None + n: int = Field(default=1, ge=1, le=10) + output_format: Literal["png", "webp", "jpeg"] = "png" + size: Optional[str] = Field( + default="auto", + description=( + "The size of the generated images. Must be in 'WxH' format like " + "1024x1024, 1536x1024 (landscape), 1024x1536 (portrait), etc. " + "Use 'auto' for model default size.")) + quality: Literal["standard", "hd"] = "standard" + response_format: Literal["url", "b64_json"] = "url" + style: Optional[Literal["vivid", "natural"]] = "vivid" + user: Optional[str] = None + + # Extended parameters for diffusion control + num_inference_steps: Optional[int] = Field( + default=None, + description= + "Number of denoising steps. More steps = higher quality but slower.") + guidance_scale: Optional[float] = Field( + default=None, + description= + "Classifier-free guidance scale. Higher values follow prompt more closely." + ) + guidance_rescale: Optional[float] = Field( + default=None, description="Classifier-free guidance rescale.") + negative_prompt: Optional[str] = Field( + default=None, + description="Text describing what to avoid in the generated image.") + seed: Optional[int] = Field(default=None, + description="Random seed for reproducibility.") + + @field_validator("size") + @classmethod + def validate_size(cls, v): + """Validate size format is 'WxH' or 'auto'.""" + if v is None or v == "auto": + return v + if not isinstance(v, str): + raise ValueError("size must be a string in 'WxH' format or 'auto'") + # Check format: should be like "1024x1024" + import re + if not re.match(r'^\d+x\d+$', v): + raise ValueError( + f"Invalid size format '{v}'. Must be in 'WxH' format " + "(e.g., '1024x1024', '1536x1024') or 'auto'.") + return v + + +class ImageObject(OpenAIBaseModel): + """Generated image object in the response.""" + b64_json: Optional[str] = None + url: Optional[str] = None + revised_prompt: Optional[str] = None + + +class ImageGenerationResponse(OpenAIBaseModel): + """Response from image generation endpoint.""" + created: int = Field(default_factory=lambda: int(time.time())) + data: List[ImageObject] + output_format: Literal["png", "webp", "jpeg"] = "png" + quality: Literal["low", "medium", "high"] = "medium" + size: Optional[str] = None + + +class ImageEditRequest(OpenAIBaseModel): + """Request for image editing endpoint. + + Follows the OpenAI Images API specification: + https://platform.openai.com/docs/api-reference/images/createEdit + """ + image: Union[List[str], str] = Field( + description="Base64-encoded source image(s) to edit") + prompt: str = Field(description="Text description of desired edits") + model: Optional[str] = None + mask: Optional[str] = Field( + default=None, + description= + "Base64-encoded mask image (optional, black areas will be edited)") + n: int = Field(default=1, ge=1, le=10) + size: Optional[str] = Field( + default="auto", + description=( + "The size of the edited images. Must be in 'WxH' format like " + "1024x1024, 1536x1024 (landscape), 1024x1536 (portrait), etc. " + "Use 'auto' to match source image size.")) + response_format: Literal["url", "b64_json"] = "url" + user: Optional[str] = None + + # Extended parameters for diffusion control + num_inference_steps: Optional[int] = Field( + default=None, description="Number of denoising steps.") + guidance_scale: Optional[float] = Field( + default=None, description="Classifier-free guidance scale.") + guidance_rescale: Optional[float] = Field( + default=None, description="Classifier-free guidance rescale.") + negative_prompt: Optional[str] = Field( + default=None, + description="Text describing what to avoid in the edited image.") + seed: Optional[int] = Field(default=None, + description="Random seed for reproducibility.") + + @field_validator("size") + @classmethod + def validate_size(cls, v): + """Validate size format is 'WxH' or 'auto'.""" + if v != "auto" and not re.match(r"^\d+x\d+$", v): + raise ValueError( + "Size must be 'auto' or in 'WxH' format (e.g., '1024x1024')") + return v + + +class VideoGenerationRequest(OpenAIBaseModel): + """Video generation request (extended API). + + This is an extension to the OpenAI API for video generation support. + """ + prompt: str + input_reference: Optional[Union[str, UploadFile]] = Field( + default=None, + description="Optional image reference that guides generation.") + model: Optional[str] = None + size: Optional[str] = Field( + default="auto", + description= + ("The size of the generated video frames. Must be in 'WxH' format like " + "512x512, 1024x576 (landscape), 576x1024 (portrait), etc. " + "Use 'auto' for model default size.")) + seconds: float = Field(default=2.0, + ge=1.0, + le=16.0, + description="Video duration in seconds.") + + # Extended parameters for diffusion control + n: int = Field(default=1, ge=1, le=4) + fps: int = Field(default=24, ge=8, le=60, description="Frames per second.") + num_inference_steps: Optional[int] = Field( + default=None, description="Number of denoising steps.") + guidance_scale: Optional[float] = Field( + default=None, description="Classifier-free guidance scale.") + guidance_rescale: Optional[float] = Field( + default=None, description="Classifier-free guidance rescale.") + negative_prompt: Optional[str] = Field( + default=None, + description="Text describing what to avoid in the generated video.") + seed: Optional[int] = Field(default=None, + description="Random seed for reproducibility.") + + @field_validator("size") + @classmethod + def validate_size(cls, v): + """Validate size format is 'WxH' or 'auto'.""" + if v is None or v == "auto": + return v + if not isinstance(v, str): + raise ValueError("size must be a string in 'WxH' format or 'auto'") + import re + if not re.match(r'^\d+x\d+$', v): + raise ValueError( + f"Invalid size format '{v}'. Must be in 'WxH' format " + "(e.g., '512x512', '1024x576') or 'auto'.") + return v + + +class VideoJob(OpenAIBaseModel): + """Metadata for an asynchronous video generation job. + + Follows the OpenAI Videos API specification: + https://platform.openai.com/docs/api-reference/videos + """ + completed_at: Optional[int] = Field( + default=None, description="Unix timestamp of completion") + created_at: int = Field(description="Unix timestamp of creation") + error: Optional[str] = Field(default=None, + description="Error message if failed") + expires_at: Optional[int] = Field( + default=None, description="Unix timestamp of expiration") + id: str = Field(description="Unique identifier for the video") + model: str = Field(description="The model used for generation") + object: str = Field(default="video", description="Object type") + progress: Optional[int] = Field( + default=None, + description="Progress of the video generation job (0-100)") + prompt: str = Field(description="The prompt used to generate the video") + status: Literal["queued", "in_progress", "completed", "failed"] = Field( + description="Current status of the video generation job") + + # Video properties + duration: Optional[float] = Field(default=None, + description="Video duration in seconds") + fps: Optional[int] = Field(default=None, description="Frames per second") + size: Optional[str] = Field(default=None, + description="Video dimensions in 'WxH' format") + + +class VideoJobList(OpenAIBaseModel): + """Response from listing video jobs endpoint.""" + data: List[VideoJob] = Field(description="List of video jobs") + object: str = Field(default="list", description="Object type") + + UCompletionRequest = Union[CompletionRequest, ChatCompletionRequest] UCompletionResponse = Union[CompletionResponse, ChatCompletionResponse] diff --git a/tensorrt_llm/serve/openai_server.py b/tensorrt_llm/serve/openai_server.py index 9cb9d59918..c19dd93149 100644 --- a/tensorrt_llm/serve/openai_server.py +++ b/tensorrt_llm/serve/openai_server.py @@ -1,10 +1,13 @@ #!/usr/bin/env python import asyncio +import base64 import os import re import signal import socket +import time import traceback +import uuid from collections import deque from contextlib import asynccontextmanager from datetime import datetime @@ -16,7 +19,8 @@ from typing import (Annotated, Any, AsyncGenerator, AsyncIterator, List, import uvicorn from fastapi import Body, FastAPI, Request from fastapi.exceptions import RequestValidationError -from fastapi.responses import JSONResponse, Response, StreamingResponse +from fastapi.responses import (FileResponse, JSONResponse, Response, + StreamingResponse) from starlette.routing import Mount from transformers import AutoProcessor @@ -26,11 +30,12 @@ from tensorrt_llm._torch.async_llm import AsyncLLM from tensorrt_llm.executor import CppExecutorError from tensorrt_llm.executor.postproc_worker import PostprocParams from tensorrt_llm.inputs import prompt_inputs -from tensorrt_llm.inputs.data import TokensPrompt +from tensorrt_llm.inputs.data import TokensPrompt, visual_gen_inputs from tensorrt_llm.inputs.multimodal import MultimodalServerConfig from tensorrt_llm.inputs.utils import ConversationMessage, apply_chat_template from tensorrt_llm.llmapi import DisaggregatedParams as LlmDisaggregatedParams -from tensorrt_llm.llmapi import MultimodalEncoder, tracing +from tensorrt_llm.llmapi import (MultimodalEncoder, VisualGen, VisualGenParams, + tracing) from tensorrt_llm.llmapi.disagg_utils import (DisaggClusterConfig, MetadataServerConfig, ServerRole) from tensorrt_llm.llmapi.llm import RequestOutput @@ -40,6 +45,7 @@ from tensorrt_llm.serve.chat_utils import (load_chat_template, parse_chat_messages_coroutines) from tensorrt_llm.serve.cluster_storage import create_cluster_storage_client from tensorrt_llm.serve.disagg_auto_scaling import DisaggClusterWorker +from tensorrt_llm.serve.media_storage import MediaStorage from tensorrt_llm.serve.metadata_server import create_metadata_server from tensorrt_llm.serve.openai_protocol import (ChatCompletionRequest, ChatCompletionResponse, @@ -47,12 +53,17 @@ from tensorrt_llm.serve.openai_protocol import (ChatCompletionRequest, ChatMessage, CompletionRequest, CompletionResponse, CompletionResponseChoice, - ErrorResponse, + ErrorResponse, ImageEditRequest, + ImageGenerationRequest, + ImageGenerationResponse, + ImageObject, MemoryUpdateRequest, ModelCard, ModelList, PromptTokensDetails, ResponsesRequest, ResponsesResponse, UpdateWeightsRequest, UsageInfo, + VideoGenerationRequest, + VideoJob, VideoJobList, to_llm_disaggregated_params) from tensorrt_llm.serve.postprocess_handlers import ( ChatCompletionPostprocArgs, ChatPostprocArgs, CompletionPostprocArgs, @@ -69,6 +80,8 @@ from tensorrt_llm.serve.responses_utils import \ from tensorrt_llm.serve.responses_utils import get_steady_clock_now_in_seconds from tensorrt_llm.serve.responses_utils import \ request_preprocess as responses_api_request_preprocess +from tensorrt_llm.serve.visual_gen_utils import (VIDEO_STORE, + parse_visual_gen_params) from tensorrt_llm.version import __version__ as VERSION from .._utils import nvtx_mark, set_prometheus_multiproc_dir @@ -82,7 +95,7 @@ TIMEOUT_KEEP_ALIVE = 5 # seconds. class OpenAIServer: def __init__(self, - llm: Union[LLM, MultimodalEncoder], + generator: Union[LLM, MultimodalEncoder, VisualGen], model: str, tool_parser: Optional[str], server_role: Optional[ServerRole], @@ -90,40 +103,17 @@ class OpenAIServer: disagg_cluster_config: Optional[DisaggClusterConfig] = None, multimodal_server_config: Optional[MultimodalServerConfig] = None, chat_template: Optional[str] = None): - self.llm = llm - self.tokenizer = llm.tokenizer + self.generator = generator + self._is_visual_gen = isinstance(generator, VisualGen) self.tool_parser = tool_parser self.metadata_server = create_metadata_server(metadata_server_cfg) self.disagg_cluster_config = disagg_cluster_config self.multimodal_server_config = multimodal_server_config - self.chat_template = load_chat_template(chat_template) self.server_role = server_role # Will be set in __call__ self.binding_addr = None self.host = None self.port = None - hf_tokenizer_path = llm._hf_model_dir or self.tokenizer.tokenizer.name_or_path - trust_remote_code = llm.args.trust_remote_code - try: - self.processor = AutoProcessor.from_pretrained(hf_tokenizer_path, trust_remote_code=trust_remote_code) - except Exception: - logger.debug("Failed to load AutoProcessor or AutoConfig for %s", hf_tokenizer_path) - self.processor = None - # load model config - try: - from tensorrt_llm._torch.pyexecutor.config_utils import \ - load_pretrained_config - self.model_config = load_pretrained_config(hf_tokenizer_path, - trust_remote_code=trust_remote_code, - checkpoint_format=getattr(self.llm.args, "checkpoint_format", None)) - except Exception: - logger.debug("Failed to load AutoConfig for %s", hf_tokenizer_path) - self.model_config = None - - # Enable response storage for Responses API - self.enable_store = (len(os.getenv("TRTLLM_RESPONSES_API_DISABLE_STORE", "")) < 1) and not self.postproc_worker_enabled - - self.conversation_store = ConversationHistoryStore() model_dir = Path(model) if model_dir.exists() and model_dir.is_dir(): @@ -135,35 +125,19 @@ class OpenAIServer: self.perf_metrics_lock = None # The steady clock offset (in seconds) between this server and the disagg server self.disagg_server_steady_clock_offset = 0 - if self.llm.args.return_perf_metrics: - set_prometheus_multiproc_dir() - self.metrics_collector = MetricsCollector({ - "model_name": "undefined", - "engine_type": "undefined" - }) - max_perf_metrics = self.llm.args.perf_metrics_max_requests - if max_perf_metrics > 0: - self.perf_metrics = deque(maxlen=max_perf_metrics) - self.perf_metrics_lock = asyncio.Lock() - - # gpt-oss - self.harmony_adapter: HarmonyAdapter | None = None - disable_harmony = os.getenv("DISABLE_HARMONY_ADAPTER", "0") == "1" - if disable_harmony: - self.use_harmony = False - else: - self.use_harmony = (self.model_config.model_type == "gpt_oss") - - self.tool_call_id_type = "random" # default tool call id type is random - if self.model_config.model_type == "kimi_k2": - self.tool_call_id_type = "kimi_k2" - elif self.model_config.model_type == "deepseek_v32": - self.tool_call_id_type = "deepseek_v32" # as disagg-worker self.disagg_cluster_storage = None self.disagg_cluster_worker = None + # Skip loading AutoProcessor and model_config for VISUAL_GEN models + # These are LLM-specific and can cause unnecessary memory usage + if self._is_visual_gen: + self._init_visual_gen() + else: + self._init_llm(chat_template) + + @asynccontextmanager async def lifespan(app: FastAPI): if self.metadata_server is not None: @@ -176,8 +150,8 @@ class OpenAIServer: } # TODO: add more metadata # Register with ETCD using the existing key format - self.metadata_server.put(f"trtllm/{self.llm.llm_id}", metadata) - logger.info(f"trtllm/{self.llm.llm_id} is registered") + self.metadata_server.put(f"trtllm/{self.generator.llm_id}", metadata) + logger.info(f"trtllm/{self.generator.llm_id} is registered") if self.disagg_cluster_config: self.disagg_cluster_storage = create_cluster_storage_client(self.disagg_cluster_config.cluster_uri, self.disagg_cluster_config.cluster_name) @@ -188,11 +162,11 @@ class OpenAIServer: yield if self.metadata_server is not None: - self.metadata_server.remove(f"trtllm/{self.llm.llm_id}") - logger.info(f"trtllm/{self.llm.llm_id} is unregistered") + self.metadata_server.remove(f"trtllm/{self.generator.llm_id}") + logger.info(f"trtllm/{self.generator.llm_id} is unregistered") if self.disagg_cluster_worker: await self.disagg_cluster_worker.deregister_worker() - self.llm.shutdown() + self.generator.shutdown() self.app = FastAPI(lifespan=lifespan) @@ -200,15 +174,81 @@ class OpenAIServer: async def validation_exception_handler(_, exc): return JSONResponse(status_code=400, content={"error": str(exc)}) - if self.server_role is not ServerRole.MM_ENCODER: - self.register_routes() - else: - assert isinstance(self.llm, MultimodalEncoder), "llm must be a MultimodalEncoder for multimodal encoder" + if self.server_role is ServerRole.VISUAL_GEN: + assert isinstance(self.generator, VisualGen), "generator must be a VisualGen for VISUAL_GEN server" + self.register_visual_gen_routes() + elif self.server_role is ServerRole.MM_ENCODER: + assert isinstance(self.generator, MultimodalEncoder), "generator must be a MultimodalEncoder for multimodal encoder" self.register_mm_encoder_routes() + else: + self.register_routes() self.app.add_middleware(ServerArrivalTimeMiddleware) + def _init_visual_gen(self): + self.processor = None + self.model_config = None + self.media_storage_path = Path(os.getenv("TRTLLM_MEDIA_STORAGE_PATH", "/tmp/trtllm_generated")) # nosec B108 + self.media_storage_path.mkdir(exist_ok=True, parents= True) + self.video_gen_tasks = {} + + + def _init_llm(self, chat_template: Optional[str] = None): + self.tokenizer = self.generator.tokenizer + hf_tokenizer_path = self.generator._hf_model_dir or self.tokenizer.tokenizer.name_or_path + trust_remote_code = self.generator.args.trust_remote_code + try: + self.processor = AutoProcessor.from_pretrained(hf_tokenizer_path, trust_remote_code=trust_remote_code) + except Exception: + logger.debug("Failed to load AutoProcessor or AutoConfig for %s", hf_tokenizer_path) + self.processor = None + + # load model config + try: + from tensorrt_llm._torch.pyexecutor.config_utils import \ + load_pretrained_config + self.model_config = load_pretrained_config(hf_tokenizer_path, + trust_remote_code=trust_remote_code, + checkpoint_format=getattr(self.generator.args, "checkpoint_format", None)) + except Exception: + logger.debug("Failed to load AutoConfig for %s", hf_tokenizer_path) + self.model_config = None + + self.chat_template = load_chat_template(chat_template) + + # Enable response storage for Responses API + self.enable_store = (len(os.getenv("TRTLLM_RESPONSES_API_DISABLE_STORE", "")) < 1) and not self.postproc_worker_enabled + + self.conversation_store = ConversationHistoryStore() + + # gpt-oss + self.harmony_adapter: HarmonyAdapter | None = None + disable_harmony = os.getenv("DISABLE_HARMONY_ADAPTER", "0") == "1" + if disable_harmony or self.model_config is None: + self.use_harmony = False + else: + self.use_harmony = (self.model_config.model_type == "gpt_oss") + + self.tool_call_id_type = "random" # default tool call id type is random + if self.model_config is not None: + if self.model_config.model_type == "kimi_k2": + self.tool_call_id_type = "kimi_k2" + elif self.model_config.model_type == "deepseek_v32": + self.tool_call_id_type = "deepseek_v32" + + if self.generator.args.return_perf_metrics: + set_prometheus_multiproc_dir() + self.metrics_collector = MetricsCollector({ + "model_name": "undefined", + "engine_type": "undefined" + }) + max_perf_metrics = self.generator.args.perf_metrics_max_requests + if max_perf_metrics > 0: + self.perf_metrics = deque(maxlen=max_perf_metrics) + self.perf_metrics_lock = asyncio.Lock() + + async def await_disconnected(self, raw_request: Request, promise): if raw_request is None: return @@ -221,7 +261,7 @@ class OpenAIServer: @property def postproc_worker_enabled(self) -> bool: - return True if self.llm.args.num_postprocess_workers > 0 else False + return True if self.generator.args.num_postprocess_workers > 0 else False @staticmethod def create_error_response( @@ -248,8 +288,20 @@ class OpenAIServer: status_code=HTTPStatus.NOT_FOUND, ) + def _create_not_supported_error(self, message: str) -> Response: + return self.create_error_response( + err_type="NotImplementedError", + message=message, + status_code=HTTPStatus.NOT_IMPLEMENTED, + ) + def _check_health(self) -> bool: - return self.llm._check_health() + if isinstance(self.generator, LLM): + return self.generator._check_health() + # llmapi.LLM (e.g. PyTorch backend) is not isinstance(_tensorrt_engine.LLM) + if hasattr(self.generator, '_check_health'): + return self.generator._check_health() + return True def register_routes(self): self.app.add_api_route("/health", self.health, methods=["GET"]) @@ -293,7 +345,7 @@ class OpenAIServer: self.app.add_api_route("/server_info", self.get_server_info, methods=["GET"]) - if self.llm.args.return_perf_metrics: + if self.generator.args.return_perf_metrics: # register /prometheus/metrics self.mount_metrics() @@ -340,6 +392,45 @@ class OpenAIServer: self.update_weights, methods=["POST"]) + def register_visual_gen_routes(self): + """Register routes for diffusion model serving.""" + # Health and info endpoints + self.app.add_api_route("/health", self.health, methods=["GET"]) + self.app.add_api_route("/version", self.version, methods=["GET"]) + self.app.add_api_route("/v1/models", self.get_model, methods=["GET"]) + self.app.add_api_route("/metrics", self.get_iteration_stats, methods=["GET"]) + + # Image generation endpoints (OpenAI compatible) + self.app.add_api_route("/v1/images/generations", + self.openai_image_generation, + methods=["POST"]) + self.app.add_api_route("/v1/images/edits", + self.openai_image_edit, + methods=["POST"]) + + # Video generation endpoints (Extended OpenAI API) + # Asynchronous video generation (returns immediately with job metadata, OpenAI API) + self.app.add_api_route("/v1/videos", + self.openai_video_generation_async, + methods=["POST"]) + # Synchronous video generation (waits for completion, extended API) + self.app.add_api_route("/v1/videos/generations", + self.openai_video_generation_sync, + methods=["POST"]) + # Video management endpoints + self.app.add_api_route("/v1/videos", + self.list_videos, + methods=["GET"]) + self.app.add_api_route("/v1/videos/{video_id}", + self.get_video_metadata, + methods=["GET"]) + self.app.add_api_route("/v1/videos/{video_id}/content", + self.get_video_content, + methods=["GET"]) + self.app.add_api_route("/v1/videos/{video_id}", + self.delete_video, + methods=["DELETE"]) + async def health(self) -> Response: if self._check_health(): return Response(status_code=200) @@ -349,10 +440,10 @@ class OpenAIServer: async def health_generate(self, raw_request: Request) -> Response: """Health check that performs a minimal generation.""" extra_args = {} - if self.llm.args.max_beam_width > 1: + if self.generator.args.max_beam_width > 1: extra_args = dict( use_beam_search=True, - best_of=self.llm.args.max_beam_width, + best_of=self.generator.args.max_beam_width, n=1, ) try: @@ -396,7 +487,7 @@ class OpenAIServer: async def get_iteration_stats(self) -> JSONResponse: stats = [] - async for stat in self.llm.get_stats_async(2): + async for stat in self.generator.get_stats_async(2): stats.append(stat) return JSONResponse(content=stats) @@ -416,7 +507,7 @@ class OpenAIServer: return JSONResponse(content=[]) async with self.perf_metrics_lock: perf_metrics = self.perf_metrics - self.perf_metrics = deque(maxlen=self.llm.args.perf_metrics_max_requests) + self.perf_metrics = deque(maxlen=self.generator.args.perf_metrics_max_requests) for metrics_dict in perf_metrics: metrics = metrics_dict["perf_metrics"] timing_metrics = metrics.timing_metrics @@ -466,7 +557,7 @@ class OpenAIServer: async def get_kv_cache_events(self) -> JSONResponse: events = [] try: - async for event in self.llm.get_kv_cache_events_async(2): + async for event in self.generator.get_kv_cache_events_async(2): events.append(event) except IndexError: # queue is empty, no more events @@ -478,7 +569,7 @@ class OpenAIServer: return if self.metrics_collector: self.metrics_collector.log_metrics_dict(res.metrics_dict) - if self.llm.args.return_perf_metrics: + if self.generator.args.return_perf_metrics: output = res.outputs[0] item = { "request_id": res.request_id, @@ -549,9 +640,9 @@ class OpenAIServer: # expanded into an embedding bias tensor in the sampler. sampling_params = request.to_sampling_params( vocab_size=self.tokenizer.tokenizer.vocab_size, - gather_generation_logits=self.llm.args.gather_generation_logits, - reasoning_parser=self.llm.args.reasoning_parser, - backend=self.llm.args.backend) + gather_generation_logits=self.generator.args.gather_generation_logits, + reasoning_parser=self.generator.args.reasoning_parser, + backend=self.generator.args.backend) postproc_args = ChatPostprocArgs.from_request(request) disaggregated_params = to_llm_disaggregated_params(request.disaggregated_params) @@ -582,7 +673,7 @@ class OpenAIServer: if mm_data and mm_embeddings: raise ValueError("Passing 'multi_modal_data' and 'multi_modal_embeddings' at the same time is not supported.") - postproc_args.reasoning_parser = self.llm.args.reasoning_parser + postproc_args.reasoning_parser = self.generator.args.reasoning_parser postproc_args.tool_parser = self.tool_parser postproc_args.tool_call_id_type = self.tool_call_id_type if conversation and conversation[-1].get( @@ -596,7 +687,7 @@ class OpenAIServer: trace_headers = (None if raw_request is None else tracing.extract_trace_headers(raw_request.headers)) - promise = self.llm.generate_async( + promise = self.generator.generate_async( inputs=prompt, sampling_params=sampling_params, _postproc_params=postproc_params if self.postproc_worker_enabled else None, @@ -701,7 +792,7 @@ class OpenAIServer: if mm_data is not None: prompt["multi_modal_data"] = mm_data - promise = self.llm.generate_async( + promise = self.generator.generate_async( inputs=prompt, ) asyncio.create_task(self.await_disconnected(raw_request, promise)) @@ -819,8 +910,8 @@ class OpenAIServer: # expanded into an embedding bias tensor in the sampler. sampling_params = request.to_sampling_params( vocab_size=self.tokenizer.tokenizer.vocab_size, - gather_generation_logits=self.llm.args.gather_generation_logits, - backend=self.llm.args.backend) + gather_generation_logits=self.generator.args.gather_generation_logits, + backend=self.generator.args.backend) # TODO: better way to enable metrics if len(os.getenv("TRTLLM_KVCACHE_TIME_OUTPUT_PATH", "")) > 0: sampling_params.return_perf_metrics = True @@ -839,12 +930,12 @@ class OpenAIServer: prompt = prompt_inputs(prompt) if prompt.get("prompt") is not None: - prompt_token_ids, extra_processed_inputs = await asyncio.to_thread(self.llm.input_processor, prompt, sampling_params) + prompt_token_ids, extra_processed_inputs = await asyncio.to_thread(self.generator.input_processor, prompt, sampling_params) tokens_prompt = TokensPrompt(prompt_token_ids=prompt_token_ids, query_token_ids=extra_processed_inputs.get("query_token_ids") if extra_processed_inputs is not None else None) else: tokens_prompt = prompt - promise = self.llm.generate_async( + promise = self.generator.generate_async( inputs=tokens_prompt, sampling_params=sampling_params, _postproc_params=postproc_params, @@ -947,7 +1038,7 @@ class OpenAIServer: ) # Generate - promise = self.llm.generate_async( + promise = self.generator.generate_async( inputs=harmony_tokens, sampling_params=sampling_params, _postproc_params=postproc_params if self.postproc_worker_enabled else None, @@ -1040,7 +1131,7 @@ class OpenAIServer: tokenizer=self.tokenizer if not self.use_harmony else None, model_config=self.model_config if not self.use_harmony else None, processor=self.processor if not self.use_harmony else None, - reasoning_parser=self.llm.args.reasoning_parser if not self.use_harmony else "gpt_oss", + reasoning_parser=self.generator.args.reasoning_parser if not self.use_harmony else "gpt_oss", ) streaming_processor = None @@ -1053,7 +1144,7 @@ class OpenAIServer: conversation_store=self.conversation_store, enable_store=self.enable_store and request.store, use_harmony=self.use_harmony, - reasoning_parser=self.llm.args.reasoning_parser, + reasoning_parser=self.generator.args.reasoning_parser, tool_parser=self.tool_parser, ) @@ -1062,7 +1153,7 @@ class OpenAIServer: request=request, sampling_params=sampling_params, use_harmony=self.use_harmony, - reasoning_parser=self.llm.args.reasoning_parser, + reasoning_parser=self.generator.args.reasoning_parser, tool_parser=self.tool_parser, streaming_processor=streaming_processor, ) @@ -1071,7 +1162,7 @@ class OpenAIServer: if request.stream else responses_api_post_processor, postproc_args=postproc_args, ) - promise = self.llm.generate_async( + promise = self.generator.generate_async( inputs=input_tokens, sampling_params=sampling_params, streaming=request.stream, @@ -1134,22 +1225,500 @@ class OpenAIServer: }) async def release_memory(self, request: MemoryUpdateRequest) -> JSONResponse: - assert isinstance(self.llm, AsyncLLM), "/release_memory endpoint is only supported with AsyncLLM()" - await self.llm.collective_rpc('sleep', args=(request.tags,)) + assert isinstance(self.generator, AsyncLLM), "/release_memory endpoint is only supported with AsyncLLM()" + await self.generator.collective_rpc('sleep', args=(request.tags,)) return JSONResponse(content={"status": "success"}) async def resume_memory(self, request: MemoryUpdateRequest) -> JSONResponse: - assert isinstance(self.llm, AsyncLLM), "/resume_memory endpoint is only supported with AsyncLLM()" - await self.llm.collective_rpc('wakeup', args=(request.tags,)) + assert isinstance(self.generator, AsyncLLM), "/resume_memory endpoint is only supported with AsyncLLM()" + await self.generator.collective_rpc('wakeup', args=(request.tags,)) return JSONResponse(content={"status": "success"}) async def update_weights(self, request: UpdateWeightsRequest) -> JSONResponse: - assert isinstance(self.llm, AsyncLLM), "/update_weights endpoint is only supported with AsyncLLM()" - await self.llm.collective_rpc('update_weights', args=(request.weights,)) + assert isinstance(self.generator, AsyncLLM), "/update_weights endpoint is only supported with AsyncLLM()" + await self.generator.collective_rpc('update_weights', args=(request.weights,)) return JSONResponse(content={"status": "success"}) async def get_server_info(self) -> JSONResponse: - return JSONResponse(content={"disaggregated_params": self.llm.disaggregated_params}) + return JSONResponse(content={"disaggregated_params": self.generator.disaggregated_params}) + + async def openai_image_generation( + self, + request: ImageGenerationRequest, + raw_request: Request + ) -> Response: + """OpenAI-compatible image generation endpoint. + + Follows the OpenAI Images API specification for image generation. + """ + try: + image_id = f"image_{uuid.uuid4().hex}" + params = parse_visual_gen_params(request, image_id) + logger.info(f"Generating image: {image_id} with params: {params} and prompt: {request.prompt}") + + if request.negative_prompt is not None: + inputs = visual_gen_inputs({"prompt": request.prompt, "negative_prompt": request.negative_prompt}) + else: + inputs = visual_gen_inputs(request.prompt) + output = self.generator.generate(inputs=inputs, params=params) + if output.image is None: + return self.create_error_response( + message="Image generation failed", + err_type="InternalServerError", + status_code=HTTPStatus.INTERNAL_SERVER_ERROR, + ) + + # Build response + output_images = output.image + MediaStorage.save_image( + output_images, + self.media_storage_path / f"{image_id}.png", + ) + + if not isinstance(output_images, list): + output_images = [output_images] + + if request.response_format == "b64_json": + data = [ + ImageObject( + b64_json=base64.b64encode(MediaStorage.convert_image_to_bytes(image)).decode('utf-8'), + revised_prompt=request.prompt + ) for image in output_images + ] + + response = ImageGenerationResponse( + created=int(time.time()), + data=data, + size=f"{params.width}x{params.height}", + ) + + elif request.response_format == "url": + # TODO: Support URL mode + return self._create_not_supported_error("URL mode is not supported for image generation") + + return JSONResponse(content=response.model_dump()) + + except Exception as e: + logger.error(traceback.format_exc()) + return self.create_error_response(str(e)) + + + async def openai_image_edit( + self, + request: ImageEditRequest, + raw_request: Request + ) -> Response: + """OpenAI-compatible image editing endpoint. + + Follows the OpenAI Images API specification for image editing. + Creates an edited or extended image given an original image and a prompt. + """ + try: + image_id = f"image_{uuid.uuid4().hex}" + params = parse_visual_gen_params(request, image_id) + logger.info(f"Editing image: {image_id} with params: {params} and prompt: {request.prompt}") + + if request.negative_prompt is not None: + inputs = visual_gen_inputs({"prompt": request.prompt, "negative_prompt": request.negative_prompt}) + else: + inputs = visual_gen_inputs(request.prompt) + output = self.generator.generate(inputs=inputs, params=params) + if output.image is None: + return self.create_error_response( + message="Image editing failed", + err_type="InternalServerError", + status_code=HTTPStatus.INTERNAL_SERVER_ERROR, + ) + + # Build response + output_images = output.image + MediaStorage.save_image( + output_images, + self.media_storage_path / f"{image_id}.png", + ) + + if not isinstance(output_images, list): + output_images = [output_images] + + response = ImageGenerationResponse( + created=int(time.time()), + data=[ + ImageObject( + b64_json=base64.b64encode(MediaStorage.convert_image_to_bytes(image)).decode('utf-8'), + revised_prompt=request.prompt + ) for image in output_images + ], + size=f"{params.width}x{params.height}", + ) + + return JSONResponse(content=response.model_dump()) + + except Exception as e: + logger.error(traceback.format_exc()) + return self.create_error_response(message=str(e), err_type="InternalServerError", status_code=HTTPStatus.INTERNAL_SERVER_ERROR) + + async def openai_video_generation_sync( + self, + raw_request: Request + ) -> Response: + """Synchronous video generation endpoint. + + Waits for video generation to complete before returning. + Compatible with simple use cases where waiting is acceptable. + + Supports both JSON and multipart/form-data requests: + - JSON: Send VideoGenerationRequest as application/json + - Multipart: Send form fields + optional input_reference file + """ + try: + # Parse request based on content-type + request = await self._parse_video_generation_request(raw_request) + + video_id = f"video_{uuid.uuid4().hex}" + params = parse_visual_gen_params(request, video_id, media_storage_path=str(self.media_storage_path)) + logger.info(f"Generating video: {video_id} with params: {params} and prompt: {request.prompt}") + + if request.negative_prompt is not None: + inputs = visual_gen_inputs({"prompt": request.prompt, "negative_prompt": request.negative_prompt}) + else: + inputs = visual_gen_inputs(request.prompt) + output = self.generator.generate(inputs=inputs, params=params) + if output.video is None: + return self.create_error_response( + message="Video generation failed", + err_type="InternalServerError", + status_code=HTTPStatus.INTERNAL_SERVER_ERROR, + ) + + MediaStorage.save_video( + video=output.video, + output_path=self.media_storage_path / f"{video_id}.mp4", + audio=output.audio, + frame_rate=request.fps or params.frame_rate, + ) + + return FileResponse( + self.media_storage_path / f"{video_id}.mp4", + media_type="video/mp4", + filename=f"{video_id}.mp4", + ) + + except ValueError as e: + logger.error(f"Request parsing error: {e}") + return self.create_error_response(str(e)) + except Exception as e: + logger.error(traceback.format_exc()) + return self.create_error_response(str(e)) + + async def _parse_video_generation_request( + self, + raw_request: Request, + ) -> VideoGenerationRequest: + """Parse video generation request from either JSON or multipart/form-data. + + Supports both: + - application/json: Standard JSON request with VideoGenerationRequest model + - multipart/form-data: Form fields + file upload for input_reference + """ + content_type = raw_request.headers.get("content-type", "") + + if "application/json" in content_type: + # Parse as JSON using Pydantic model + body = await raw_request.json() + return VideoGenerationRequest(**body) + + if "multipart/form-data" in content_type: + # Parse multipart/form-data manually + form = await raw_request.form() + + # Extract all fields and convert to proper types + data = {} + + # Required field + if "prompt" in form: + data["prompt"] = form["prompt"] + else: + raise ValueError("'prompt' is required") + + # Optional string fields + for field in ["model", "size", "negative_prompt"]: + if field in form and form[field]: + data[field] = form[field] + + # Optional numeric fields + if "seconds" in form and form["seconds"]: + data["seconds"] = float(form["seconds"]) + if "fps" in form and form["fps"]: + data["fps"] = int(form["fps"]) + if "n" in form and form["n"]: + data["n"] = int(form["n"]) + if "num_inference_steps" in form and form["num_inference_steps"]: + data["num_inference_steps"] = int(form["num_inference_steps"]) + if "guidance_scale" in form and form["guidance_scale"]: + data["guidance_scale"] = float(form["guidance_scale"]) + if "guidance_rescale" in form and form["guidance_rescale"]: + data["guidance_rescale"] = float(form["guidance_rescale"]) + if "seed" in form and form["seed"]: + data["seed"] = int(form["seed"]) + + # Handle file upload for input_reference + if "input_reference" in form: + input_ref = form["input_reference"] + if hasattr(input_ref, "file"): # It's an UploadFile + data["input_reference"] = input_ref + + return VideoGenerationRequest(**data) + + else: + raise ValueError(f"Unsupported content-type: {content_type}. Use 'application/json' or 'multipart/form-data'") + + async def openai_video_generation_async( + self, + raw_request: Request, + ) -> Response: + """Asynchronous video generation endpoint (OpenAI Videos API compatible). + + Creates a video generation job and returns immediately with job metadata. + The video is generated in the background and stored in media storage. + Client can poll GET /v1/videos/{video_id} to check status and retrieve the video. + + Supports both JSON and multipart/form-data requests: + - JSON: Send VideoGenerationRequest as application/json + - Multipart: Send form fields + optional input_reference file + """ + try: + # Parse request based on content-type + request = await self._parse_video_generation_request(raw_request) + + video_id = f"video_{uuid.uuid4().hex}" + params = parse_visual_gen_params(request, video_id, media_storage_path=str(self.media_storage_path)) + logger.info(f"Generating video: {video_id} with params: {params} and prompt: {request.prompt}") + + # Start background generation task + self.video_gen_tasks[video_id] = asyncio.create_task( + self._generate_video_background( + video_id=video_id, + request=request, + params=params, + ) + ) + + # Return job metadata immediately + video_job = VideoJob( + created_at=int(time.time()), + id=video_id, + model=request.model or self.model, + prompt=request.prompt, + status="queued", + duration=request.seconds, + fps=request.fps, + size=f"{params.width}x{params.height}", + ) + await VIDEO_STORE.upsert(video_id, video_job) + + return JSONResponse(content=video_job.model_dump(), status_code=202) + + except ValueError as e: + logger.error(f"Request parsing error: {e}") + return self.create_error_response(str(e)) + except Exception as e: + logger.error(traceback.format_exc()) + return self.create_error_response(str(e)) + + async def _generate_video_background( + self, + video_id: str, + request: VideoGenerationRequest, + params: VisualGenParams, + ): + """Background task to generate video and save to storage.""" + try: + if request.negative_prompt is not None: + inputs = visual_gen_inputs({"prompt": request.prompt, "negative_prompt": request.negative_prompt}) + else: + inputs = visual_gen_inputs(request.prompt) + future = self.generator.generate_async(inputs=inputs, params=params) + output = await future.result() + + if output.video is None: + return self.create_error_response( + message="Video generation failed", + err_type="InternalServerError", + status_code=HTTPStatus.INTERNAL_SERVER_ERROR, + ) + + MediaStorage.save_video( + video=output.video, + output_path=self.media_storage_path / f"{video_id}.mp4", + audio=output.audio, + frame_rate=request.fps or params.frame_rate, + ) + job = await VIDEO_STORE.get(video_id) + if job: + job.status = "completed" + job.completed_at = int(time.time()) + await VIDEO_STORE.upsert(video_id, job) + + except Exception as e: + logger.error(traceback.format_exc()) + job = await VIDEO_STORE.get(video_id) + if job: + job.status = "failed" + job.completed_at = int(time.time()) + job.error = str(e) + await VIDEO_STORE.upsert(video_id, job) + + async def list_videos( + self, + raw_request: Request + ) -> Response: + """List all generated videos. + + GET /v1/videos + Returns a list of generated video metadata (job details). + """ + try: + # List videos from storage + video_jobs = await VIDEO_STORE.list_values() + + # Convert to API format + response = VideoJobList( + data=video_jobs, + ) + return JSONResponse(content=response.model_dump()) + + except Exception as e: + logger.error(traceback.format_exc()) + return self.create_error_response(str(e)) + + async def get_video_metadata( + self, + video_id: str, + raw_request: Request + ) -> Response: + """Get video metadata by ID. + + GET /v1/videos/{video_id} + Retrieves the metadata (job status and details) for a specific generated video. + """ + try: + logger.info(f"Getting video metadata: {video_id}") + # Get metadata from storage + job = await VIDEO_STORE.get(video_id) + if not job: + return self.create_error_response( + f"Video {video_id} not found", + err_type="NotFoundError", + status_code=HTTPStatus.NOT_FOUND + ) + + # Ensure it's a video + if job.object != "video": + return self.create_error_response( + f"Resource {video_id} is not a video", + err_type="BadRequestError", + status_code=HTTPStatus.BAD_REQUEST + ) + + return JSONResponse(content=job.model_dump()) + + except Exception as e: + logger.error(traceback.format_exc()) + return self.create_error_response(str(e)) + + async def get_video_content( + self, + video_id: str, + raw_request: Request + ) -> Response: + """Download video file by ID. + + GET /v1/videos/{video_id}/content + Downloads the generated video file. + """ + try: + # Get metadata first to check status + job = await VIDEO_STORE.get(video_id) + if not job: + return self.create_error_response( + f"Video {video_id} not found", + err_type="NotFoundError", + status_code=HTTPStatus.NOT_FOUND + ) + + # Ensure it's a video and completed + if job.object != "video": + return self.create_error_response( + f"Resource {video_id} is not a video", + err_type="BadRequestError", + status_code=HTTPStatus.BAD_REQUEST + ) + + if job.status != "completed": + return self.create_error_response( + f"Video {video_id} is not ready (status: {job.status})", + err_type="BadRequestError", + status_code=HTTPStatus.BAD_REQUEST + ) + + video_file_name = f"{video_id}.mp4" + if os.path.exists(self.media_storage_path / video_file_name): + return FileResponse( + self.media_storage_path / video_file_name, + media_type="video/mp4", + filename=video_file_name, + ) + else: + return self.create_error_response( + f"Video {video_id} not found", + err_type="NotFoundError", + status_code=HTTPStatus.NOT_FOUND + ) + + except Exception as e: + logger.error(traceback.format_exc()) + return self.create_error_response(str(e)) + + async def delete_video( + self, + video_id: str, + raw_request: Request + ) -> Response: + """Delete a video by ID. + + DELETE /v1/videos/{video_id} + Deletes a generated video by its ID. + """ + try: + # Check if video exists + job = await VIDEO_STORE.get(video_id) + if not job: + return self.create_error_response( + f"Video {video_id} not found", + err_type="NotFoundError", + status_code=HTTPStatus.NOT_FOUND + ) + + # Ensure it's a video + if job.object != "video": + return self.create_error_response( + f"Resource {video_id} is not a video", + err_type="BadRequestError", + status_code=HTTPStatus.BAD_REQUEST + ) + + # Delete the video + success = await VIDEO_STORE.pop(video_id) + video_file_name = f"{video_id}.mp4" + + if os.path.exists(self.media_storage_path / video_file_name): + os.remove(self.media_storage_path / video_file_name) + + return JSONResponse(content={"deleted": success is not None}) + + except Exception as e: + logger.error(traceback.format_exc()) + return self.create_error_response(str(e)) async def __call__(self, host, port, sockets: list[socket.socket] | None = None): # Store the binding address for server registration diff --git a/tensorrt_llm/serve/visual_gen_utils.py b/tensorrt_llm/serve/visual_gen_utils.py new file mode 100644 index 0000000000..f0cd31f7fb --- /dev/null +++ b/tensorrt_llm/serve/visual_gen_utils.py @@ -0,0 +1,112 @@ +import asyncio +import base64 +import os +import shutil +from typing import Any, Dict, List, Optional + +from tensorrt_llm.llmapi.visual_gen import VisualGenParams +from tensorrt_llm.serve.openai_protocol import ( + ImageEditRequest, + ImageGenerationRequest, + VideoGenerationRequest, +) + + +def parse_visual_gen_params( + request: ImageGenerationRequest | VideoGenerationRequest | ImageEditRequest, + id: str, + media_storage_path: Optional[str] = None, +) -> VisualGenParams: + params = VisualGenParams() + params.prompt = request.prompt + if request.negative_prompt is not None: + params.negative_prompt = request.negative_prompt + if request.size is not None and request.size != "auto": + params.width, params.height = map(int, request.size.split("x")) + if request.guidance_scale is not None: + params.guidance_scale = request.guidance_scale + if request.guidance_rescale is not None: + params.guidance_rescale = request.guidance_rescale + + if isinstance(request, ImageGenerationRequest) or isinstance(request, ImageEditRequest): + if request.num_inference_steps is not None: + params.num_inference_steps = request.num_inference_steps + elif isinstance(request, ImageGenerationRequest) and request.quality == "hd": + params.num_inference_steps = 30 + if request.n is not None: + params.num_images_per_prompt = request.n + if isinstance(request, ImageEditRequest): + if request.image is not None: + if isinstance(request.image, list): + params.image = [base64.b64decode(image) for image in request.image] + else: + params.image = [base64.b64decode(request.image)] + if request.mask is not None: + if isinstance(request.mask, list): + params.mask = [base64.b64decode(mask) for mask in request.mask] + else: + params.mask = base64.b64decode(request.mask) + + elif isinstance(request, VideoGenerationRequest): + if request.num_inference_steps is not None: + params.num_inference_steps = request.num_inference_steps + if request.input_reference is not None: + if media_storage_path is None: + raise ValueError("media_storage_path is required when input_reference is provided") + params.input_reference = os.path.join(media_storage_path, f"{id}_reference.png") + if isinstance(request.input_reference, str): + with open(params.input_reference, "wb") as f: + f.write(base64.b64decode(request.input_reference)) + else: + with open(params.input_reference, "wb") as f: + shutil.copyfileobj(request.input_reference.file, f) + + params.frame_rate = request.fps + params.num_frames = int(request.seconds * request.fps) + + if request.seed is not None: + params.seed = int(request.seed) + + return params + + +class AsyncDictStore: + """A small async-safe in-memory key-value store for dict items. + + This encapsulates the usual pattern of a module-level dict guarded by + an asyncio.Lock and provides simple CRUD methods that are safe to call + concurrently from FastAPI request handlers and background tasks. + """ + + def __init__(self) -> None: + self._items: Dict[str, Dict[str, Any]] = {} + self._lock = asyncio.Lock() + + async def upsert(self, key: str, value: Dict[str, Any]) -> None: + async with self._lock: + self._items[key] = value + + async def update_fields(self, key: str, updates: Dict[str, Any]) -> Optional[Dict[str, Any]]: + async with self._lock: + item = self._items.get(key) + if item is None: + return None + item.update(updates) + return item + + async def get(self, key: str) -> Optional[Dict[str, Any]]: + async with self._lock: + return self._items.get(key) + + async def pop(self, key: str) -> Optional[Dict[str, Any]]: + async with self._lock: + return self._items.pop(key, None) + + async def list_values(self) -> List[Dict[str, Any]]: + async with self._lock: + return list(self._items.values()) + + +# Global stores shared by OpenAI entrypoints +# [request_id, dict] +VIDEO_STORE = AsyncDictStore() diff --git a/tests/integration/defs/examples/test_visual_gen.py b/tests/integration/defs/examples/test_visual_gen.py new file mode 100644 index 0000000000..2b48bab322 --- /dev/null +++ b/tests/integration/defs/examples/test_visual_gen.py @@ -0,0 +1,288 @@ +# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Integration tests: VBench dimension scores for WAN and LTX-2 (TRT-LLM vs diffusers reference).""" + +import glob +import json +import os + +import pytest +from defs.common import venv_check_call +from defs.conftest import llm_models_root +from defs.trt_test_alternative import check_call + +WAN_T2V_MODEL_SUBPATH = "Wan2.1-T2V-1.3B-Diffusers" +VISUAL_GEN_OUTPUT_VIDEO = "trtllm_output.mp4" +DIFFUSERS_REFERENCE_VIDEO = "diffusers_reference.mp4" +WAN_T2V_PROMPT = "A cute cat playing piano" +WAN_T2V_HEIGHT = 480 +WAN_T2V_WIDTH = 832 +WAN_T2V_NUM_FRAMES = 165 + +# Dimensions to evaluate +VBENCH_DIMENSIONS = [ + "subject_consistency", + "background_consistency", + "motion_smoothness", + "dynamic_degree", + "aesthetic_quality", + "imaging_quality", +] + +# Golden VBench scores from HF reference video (WAN); TRT-LLM is compared against these. +VBENCH_WAN_GOLDEN_SCORES = { + "subject_consistency": 0.9381, + "background_consistency": 0.9535, + "motion_smoothness": 0.9923, + "dynamic_degree": 1.0000, + "aesthetic_quality": 0.5033, + "imaging_quality": 0.3033, +} + +VBENCH_REPO = "https://github.com/Vchitect/VBench.git" +VBENCH_BRANCH = "master" +# Pin to a fixed commit for reproducible runs +VBENCH_COMMIT = "98b19513678e99c80d8377fda25ba53b81a491a6" + + +@pytest.fixture(scope="session") +def vbench_repo_root(llm_venv): + """Clone VBench repo into workspace and install; return repo root path.""" + workspace = llm_venv.get_working_directory() + repo_path = os.path.join(workspace, "VBench_repo") + if os.path.exists(repo_path): + return repo_path + # Clone without --depth=1 so we can checkout a specific commit + check_call( + ["git", "clone", "--single-branch", "--branch", VBENCH_BRANCH, VBENCH_REPO, repo_path], + shell=False, + ) + check_call(["git", "-C", repo_path, "checkout", VBENCH_COMMIT], shell=False) + # # Install VBench dependencies explicitly + # llm_venv.run_cmd([ + # "-m", "pip", "install", + # "tqdm>=4.60.0", + # "openai-clip>=1.0", + # "pyiqa>=0.1.0", # install this might also install transformers=4.37.2, which is incompatible + # "easydict", + # "decord>=0.6.0", + # ]) + return repo_path + + +@pytest.fixture(scope="session") +def wan_trtllm_video_path(llm_venv, llm_root): + """Generate input video via visual_gen_wan_t2v.py and return path to trtllm_output.mp4.""" + scratch_space = llm_models_root() + model_path = os.path.join(scratch_space, WAN_T2V_MODEL_SUBPATH) + if not os.path.isdir(model_path): + pytest.skip( + f"Wan T2V model not found: {model_path} " + f"(set LLM_MODELS_ROOT or place {WAN_T2V_MODEL_SUBPATH} under scratch)" + ) + out_dir = os.path.join(llm_venv.get_working_directory(), "visual_gen_output") + os.makedirs(out_dir, exist_ok=True) + output_path = os.path.join(out_dir, VISUAL_GEN_OUTPUT_VIDEO) + if os.path.isfile(output_path): + return output_path + # Install av and diffusers from main branch + llm_venv.run_cmd(["-m", "pip", "install", "av"]) + llm_venv.run_cmd( + [ + "-m", + "pip", + "install", + "git+https://github.com/huggingface/diffusers.git", + ] + ) + script_path = os.path.join(llm_root, "examples", "visual_gen", "visual_gen_wan_t2v.py") + assert os.path.isfile(script_path), f"Visual gen script not found: {script_path}" + venv_check_call( + llm_venv, + [ + script_path, + "--height", + str(WAN_T2V_HEIGHT), + "--width", + str(WAN_T2V_WIDTH), + "--num_frames", + str(WAN_T2V_NUM_FRAMES), + "--model_path", + model_path, + "--prompt", + WAN_T2V_PROMPT, + "--output_path", + output_path, + ], + ) + assert os.path.isfile(output_path), f"Visual gen did not produce {output_path}" + return output_path + + +@pytest.fixture(scope="session") +def wan_reference_video_path(llm_venv, llm_root): + """Generate reference video via diffusers (hf_wan.py) using the same model checkpoint.""" + scratch_space = llm_models_root() + model_path = os.path.join(scratch_space, WAN_T2V_MODEL_SUBPATH) + if not os.path.isdir(model_path): + pytest.skip( + f"Wan T2V model not found: {model_path} " + f"(set LLM_MODELS_ROOT or place {WAN_T2V_MODEL_SUBPATH} under scratch)" + ) + out_dir = os.path.join(llm_venv.get_working_directory(), "visual_gen_output") + os.makedirs(out_dir, exist_ok=True) + reference_path = os.path.join(out_dir, DIFFUSERS_REFERENCE_VIDEO) + if os.path.isfile(reference_path): + return reference_path + hf_script = os.path.join(llm_root, "examples", "visual_gen", "hf_wan.py") + assert os.path.isfile(hf_script), f"Diffusers script not found: {hf_script}" + venv_check_call( + llm_venv, + [ + hf_script, + "--model_path", + model_path, + "--prompt", + WAN_T2V_PROMPT, + "--output_path", + reference_path, + "--height", + str(WAN_T2V_HEIGHT), + "--width", + str(WAN_T2V_WIDTH), + "--num_frames", + str(WAN_T2V_NUM_FRAMES), + ], + ) + assert os.path.isfile(reference_path), f"Diffusers did not produce {reference_path}" + return reference_path + + +def _visual_gen_out_dir(llm_venv, subdir=""): + """Output directory for generated media; subdir e.g. 'ltx2' for model-specific outputs.""" + base = os.path.join(llm_venv.get_working_directory(), "visual_gen_output") + return os.path.join(base, subdir) if subdir else base + + +def _normalize_score(val): + """Normalize to 0-1 scale (e.g. imaging_quality can be 0-100).""" + if isinstance(val, bool): + return float(val) + if isinstance(val, (int, float)) and val > 1.5: + return val / 100.0 + return float(val) + + +def _get_per_video_scores(results, video_path_substr): + """From VBench results, get per-dimension score for the video whose path contains video_path_substr.""" + scores = {} + for dim in VBENCH_DIMENSIONS: + dim_result = results[dim] + assert isinstance(dim_result, list) and len(dim_result) >= 2, ( + f"Dimension '{dim}' result must be [overall_score, video_results]; got {type(dim_result)}" + ) + video_results = dim_result[1] + for entry in video_results: + if video_path_substr in entry.get("video_path", ""): + raw = entry.get("video_results") + scores[dim] = _normalize_score(raw) + break + else: + raise AssertionError( + f"No video matching '{video_path_substr}' in dimension '{dim}'; " + f"paths: {[e.get('video_path') for e in video_results]}" + ) + return scores + + +def _run_vbench_and_compare_to_golden( + vbench_repo_root, + videos_dir, + trtllm_filename, + golden_scores, + llm_venv, + title, + max_score_diff=0.1, +): + """Run VBench on videos_dir (TRT-LLM output only), compare to golden HF reference scores.""" + output_path = os.path.join( + llm_venv.get_working_directory(), "vbench_eval_output", title.replace(" ", "_").lower() + ) + os.makedirs(output_path, exist_ok=True) + evaluate_script = os.path.join(vbench_repo_root, "evaluate.py") + cmd = [ + evaluate_script, + "--videos_path", + videos_dir, + "--output_path", + output_path, + "--mode", + "custom_input", + ] + cmd.extend(["--dimension"] + VBENCH_DIMENSIONS) + venv_check_call(llm_venv, cmd) + pattern = os.path.join(output_path, "*_eval_results.json") + result_files = glob.glob(pattern) + assert result_files, ( + f"No eval results found matching {pattern}; output dir: {os.listdir(output_path)}" + ) + with open(result_files[0], "r") as f: + results = json.load(f) + for dim in VBENCH_DIMENSIONS: + assert dim in results, ( + f"Expected dimension '{dim}' in results; keys: {list(results.keys())}" + ) + scores_trtllm = _get_per_video_scores(results, trtllm_filename) + scores_ref = golden_scores + max_len = max(len(d) for d in VBENCH_DIMENSIONS) + header = f"{'Dimension':<{max_len}} | {'TRT-LLM':>10} | {'HF Ref':>10} | {'Diff':>8}" + sep = "-" * len(header) + print("\n" + "=" * len(header)) + print(f"VBench dimension scores ({title}): TRT-LLM vs golden HF reference scores") + print("=" * len(header)) + print(header) + print(sep) + max_diff_val = 0.0 + for dim in VBENCH_DIMENSIONS: + t, r = scores_trtllm[dim], scores_ref[dim] + diff = abs(t - r) + max_diff_val = max(max_diff_val, diff) + print(f"{dim:<{max_len}} | {t:>10.4f} | {r:>10.4f} | {diff:>8.4f}") + print(sep) + print( + f"{' (all dimensions)':<{max_len}} | (TRT-LLM) | (golden) | max_diff={max_diff_val:.4f}" + ) + print("=" * len(header) + "\n") + for dim in VBENCH_DIMENSIONS: + diff = abs(scores_trtllm[dim] - scores_ref[dim]) + assert diff < max_score_diff or scores_trtllm[dim] >= scores_ref[dim], ( + f"Dimension '{dim}' score difference {diff:.4f} >= {max_score_diff} " + f"(TRT-LLM={scores_trtllm[dim]:.4f}, golden={scores_ref[dim]:.4f})" + ) + + +def test_vbench_dimension_score_wan(vbench_repo_root, wan_trtllm_video_path, llm_venv): + """Run VBench on WAN TRT-LLM video; compare to golden HF reference scores (diff < 0.05 or TRT-LLM >= golden).""" + videos_dir = os.path.dirname(wan_trtllm_video_path) + assert os.path.isfile(wan_trtllm_video_path), "TRT-LLM video must exist" + _run_vbench_and_compare_to_golden( + vbench_repo_root, + videos_dir, + VISUAL_GEN_OUTPUT_VIDEO, + VBENCH_WAN_GOLDEN_SCORES, + llm_venv, + title="WAN", + max_score_diff=0.05, + ) diff --git a/tests/integration/test_lists/test-db/l0_b200.yml b/tests/integration/test_lists/test-db/l0_b200.yml index fc45b1eb8b..c8e88a6e1f 100644 --- a/tests/integration/test_lists/test-db/l0_b200.yml +++ b/tests/integration/test_lists/test-db/l0_b200.yml @@ -91,6 +91,17 @@ l0_b200: - unittest/tools/test_layer_wise_benchmarks.py::test_performance_alignment[1] - unittest/_torch/modeling/test_modeling_exaone4.py::TestEXAONE4::test_llm_load_1_FP8 - unittest/kv_cache_manager_v2_tests/ + # ------------- Visual Gen tests --------------- + - unittest/_torch/visual_gen/test_fused_qkv.py + - unittest/_torch/visual_gen/test_quant_ops.py + - unittest/_torch/visual_gen/test_attention_integration.py + - unittest/_torch/visual_gen/test_attention_perf.py + - unittest/_torch/visual_gen/test_trtllm_serve_endpoints.py + - unittest/_torch/visual_gen/test_trtllm_serve_e2e.py + - unittest/_torch/visual_gen/test_wan.py -k "not TestWanTwoStageTransformer" + - unittest/_torch/visual_gen/test_wan_i2v.py + - unittest/_torch/visual_gen/test_model_loader.py + # - examples/test_visual_gen.py - condition: ranges: system_gpu_count: @@ -161,6 +172,7 @@ l0_b200: - accuracy/test_llm_api_pytorch.py::TestDeepSeekV3Lite::test_nvfp4[moe_backend=CUTEDSL-mtp_nextn=0-fp8kv=True-attention_dp=True-cuda_graph=True-overlap_scheduler=True-torch_compile=False] - accuracy/test_llm_api_pytorch.py::TestSeedOss_36B::test_auto_dtype - accuracy/test_llm_api_pytorch.py::TestLlama3_1_8B_Instruct_RocketKV::test_auto_dtype + - unittest/_torch/visual_gen/test_wan.py::TestWanTwoStageTransformer # ------------- AutoDeploy Backend Stages --------------- - condition: ranges: diff --git a/tests/integration/test_lists/test-db/l0_dgx_b200.yml b/tests/integration/test_lists/test-db/l0_dgx_b200.yml index 3c8eb1b6bc..1592d1247f 100644 --- a/tests/integration/test_lists/test-db/l0_dgx_b200.yml +++ b/tests/integration/test_lists/test-db/l0_dgx_b200.yml @@ -30,6 +30,12 @@ l0_dgx_b200: - disaggregated/test_disaggregated.py::test_disaggregated_gpt_oss_120b_harmony[gpt_oss/gpt-oss-120b] - accuracy/test_llm_api_pytorch.py::TestDeepSeekR1::test_nvfp4_multi_gpus[latency_adp_lmtp_tp4] - accuracy/test_llm_api_pytorch.py::TestMiniMaxM2::test_4gpus[attention_dp=False-cuda_graph=True-overlap_scheduler=True-tp_size=4-ep_size=4] TIMEOUT (60) + # ------------- VisualGen multi-GPU tests --------------- + - unittest/_torch/visual_gen/multi_gpu + - unittest/_torch/visual_gen/test_wan.py::TestWanParallelism::test_cfg_2gpu_correctness + - unittest/_torch/visual_gen/test_wan.py::TestWanCombinedOptimizations::test_all_optimizations_combined + - unittest/_torch/visual_gen/test_wan_i2v.py::TestWanI2VParallelism::test_cfg_2gpu_correctness + - unittest/_torch/visual_gen/test_wan_i2v.py::TestWanI2VCombinedOptimizations::test_all_optimizations_combined - condition: ranges: system_gpu_count: diff --git a/tests/unittest/_torch/visual_gen/multi_gpu/__init__.py b/tests/unittest/_torch/visual_gen/multi_gpu/__init__.py new file mode 100644 index 0000000000..fac2aaa011 --- /dev/null +++ b/tests/unittest/_torch/visual_gen/multi_gpu/__init__.py @@ -0,0 +1 @@ +"""Multi-GPU tests for visual generation modules.""" diff --git a/tests/unittest/_torch/visual_gen/multi_gpu/test_ulysses_attention.py b/tests/unittest/_torch/visual_gen/multi_gpu/test_ulysses_attention.py new file mode 100644 index 0000000000..0d691cf9ae --- /dev/null +++ b/tests/unittest/_torch/visual_gen/multi_gpu/test_ulysses_attention.py @@ -0,0 +1,505 @@ +"""Multi-GPU tests for Ulysses Attention. + +These tests use torch.multiprocessing.spawn to launch multiple processes internally. +Run with: + pytest tests/visual_gen/multi_gpu/test_ulysses_attention.py -v +""" + +import os + +os.environ["TLLM_DISABLE_MPI"] = "1" + +import math +from typing import Callable + +import pytest +import torch +import torch.distributed as dist +import torch.multiprocessing as mp +import torch.nn.functional as F + +# Try to import the modules - skip tests if not available +try: + from tensorrt_llm._torch.attention_backend.interface import PredefinedAttentionMask + from tensorrt_llm._torch.distributed import all_to_all_4d + from tensorrt_llm._torch.visual_gen.attention_backend import UlyssesAttention, VanillaAttention + from tensorrt_llm._utils import get_free_port + + MODULES_AVAILABLE = True +except ImportError: + MODULES_AVAILABLE = False + + +@pytest.fixture(autouse=True, scope="module") +def _cleanup_mpi_env(): + """Clean up TLLM_DISABLE_MPI env var after tests complete.""" + yield + os.environ.pop("TLLM_DISABLE_MPI", None) + + +def init_distributed_worker(rank: int, world_size: int, backend: str = "gloo", port: int = 29500): + """Initialize distributed environment for a worker process.""" + os.environ["MASTER_ADDR"] = "localhost" + os.environ["MASTER_PORT"] = str(port) + os.environ["RANK"] = str(rank) + os.environ["WORLD_SIZE"] = str(world_size) + + # Use gloo backend for CPU, nccl for GPU + if backend == "nccl" and torch.cuda.is_available(): + torch.cuda.set_device(rank % torch.cuda.device_count()) + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) + else: + dist.init_process_group(backend="gloo", rank=rank, world_size=world_size) + + +def cleanup_distributed(): + """Clean up distributed environment.""" + if dist.is_initialized(): + dist.destroy_process_group() + + +def _distributed_worker(rank, world_size, backend, test_fn, port): + """Worker function that runs in each process. Module-level for pickling.""" + try: + init_distributed_worker(rank, world_size, backend, port) + test_fn(rank, world_size) + except Exception as e: + print(f"Rank {rank} failed with error: {e}") + raise + finally: + cleanup_distributed() + + +def run_test_in_distributed(world_size: int, test_fn: Callable, use_cuda: bool = True): + """Run a test function in a distributed environment with multiple processes. + + Args: + world_size: Number of processes to spawn + test_fn: Test function to run (must be module-level for pickling). + Should accept (rank, world_size) as arguments. + use_cuda: Whether to use CUDA (requires sufficient GPUs) + """ + if not MODULES_AVAILABLE: + pytest.skip("Required modules not available") + + if use_cuda and torch.cuda.device_count() < world_size: + pytest.skip(f"Test requires {world_size} GPUs, only {torch.cuda.device_count()} available") + + backend = "nccl" if use_cuda else "gloo" + + port = get_free_port() + + # Spawn processes + mp.spawn( + _distributed_worker, args=(world_size, backend, test_fn, port), nprocs=world_size, join=True + ) + + +# ============================================================================= +# Test logic functions (module-level so they can be pickled by mp.spawn) +# ============================================================================= + + +def _logic_a2a_seq_to_head(rank, world_size): + """all_to_all_4d: sequence sharding to head sharding.""" + batch = 2 + seq_per_rank = 4 + heads = 8 + head_dim = 64 + + if heads % world_size != 0: + heads = world_size * 2 + + device = torch.device(f"cuda:{rank}") if torch.cuda.is_available() else torch.device("cpu") + + input_tensor = ( + torch.randn(batch, seq_per_rank, heads, head_dim, device=device, dtype=torch.float32) + + rank * 100 + ) + + output = all_to_all_4d( + input_tensor, + scatter_dim=2, + gather_dim=1, + process_group=None, + ) + + expected_shape = (batch, seq_per_rank * world_size, heads // world_size, head_dim) + assert output.shape == expected_shape, ( + f"Rank {rank}: Expected shape {expected_shape}, got {output.shape}" + ) + assert output.device == device + + +def _logic_a2a_head_to_seq(rank, world_size): + """all_to_all_4d: head sharding to sequence sharding.""" + batch = 2 + seq = 16 + heads_per_rank = 2 + head_dim = 64 + + device = torch.device(f"cuda:{rank}") if torch.cuda.is_available() else torch.device("cpu") + + input_tensor = torch.randn( + batch, seq, heads_per_rank, head_dim, device=device, dtype=torch.float32 + ) + + output = all_to_all_4d( + input_tensor, + scatter_dim=1, + gather_dim=2, + process_group=None, + ) + + expected_shape = (batch, seq // world_size, heads_per_rank * world_size, head_dim) + assert output.shape == expected_shape, ( + f"Rank {rank}: Expected shape {expected_shape}, got {output.shape}" + ) + + +def _logic_a2a_roundtrip(rank, world_size): + """all_to_all_4d: forward and backward are inverses.""" + batch = 2 + seq_per_rank = 4 + heads = world_size * 4 + head_dim = 64 + + device = torch.device(f"cuda:{rank}") if torch.cuda.is_available() else torch.device("cpu") + + original = torch.randn(batch, seq_per_rank, heads, head_dim, device=device, dtype=torch.float32) + + intermediate = all_to_all_4d(original, scatter_dim=2, gather_dim=1, process_group=None) + reconstructed = all_to_all_4d(intermediate, scatter_dim=1, gather_dim=2, process_group=None) + + assert reconstructed.shape == original.shape + torch.testing.assert_close(reconstructed, original, rtol=1e-5, atol=1e-5) + + +def _logic_a2a_single_process(rank, world_size): + """all_to_all_4d: single process returns input unchanged.""" + batch, seq, heads, head_dim = 2, 8, 4, 64 + device = torch.device(f"cuda:{rank}" if torch.cuda.is_available() else "cpu") + + input_tensor = torch.randn(batch, seq, heads, head_dim, device=device) + + output = all_to_all_4d(input_tensor, scatter_dim=2, gather_dim=1, process_group=None) + + torch.testing.assert_close(output, input_tensor) + + +def _logic_ulysses_init(rank, world_size): + """UlyssesAttention initialization.""" + num_heads = world_size * 4 + head_dim = 64 + + inner = VanillaAttention(num_heads=num_heads // world_size, head_dim=head_dim) + attention = UlyssesAttention( + inner_backend=inner, + process_group=None, + ) + + assert attention.num_heads == num_heads + assert attention.head_dim == head_dim + assert attention.world_size == world_size + assert rank >= 0 and rank < world_size + + +def _logic_ulysses_forward(rank, world_size): + """UlyssesAttention forward pass.""" + batch = 2 + seq_per_rank = 8 + num_heads = world_size * 4 + head_dim = 64 + + device = torch.device(f"cuda:{rank}") if torch.cuda.is_available() else torch.device("cpu") + + inner = VanillaAttention(num_heads=num_heads // world_size, head_dim=head_dim) + attention = UlyssesAttention( + inner_backend=inner, + process_group=None, + ).to(device) + + q = torch.randn(batch, seq_per_rank, num_heads, head_dim, device=device) + k = torch.randn(batch, seq_per_rank, num_heads, head_dim, device=device) + v = torch.randn(batch, seq_per_rank, num_heads, head_dim, device=device) + + output = attention(q, k, v, batch_size=batch, seq_len=seq_per_rank * world_size) + + assert output.shape == q.shape, f"Rank {rank}: Expected shape {q.shape}, got {output.shape}" + assert output.device == device + + +def _logic_ulysses_with_mask(rank, world_size): + """UlyssesAttention with attention mask.""" + batch = 2 + seq_per_rank = 8 + seq_full = seq_per_rank * world_size + num_heads = world_size * 4 + head_dim = 64 + + device = torch.device(f"cuda:{rank}") if torch.cuda.is_available() else torch.device("cpu") + + inner = VanillaAttention(num_heads=num_heads // world_size, head_dim=head_dim) + attention = UlyssesAttention( + inner_backend=inner, + process_group=None, + ).to(device) + + q = torch.randn(batch, seq_per_rank, num_heads, head_dim, device=device) + k = torch.randn(batch, seq_per_rank, num_heads, head_dim, device=device) + v = torch.randn(batch, seq_per_rank, num_heads, head_dim, device=device) + + mask = PredefinedAttentionMask.CAUSAL + + output = attention(q, k, v, batch_size=batch, seq_len=seq_full, attention_mask=mask) + + assert output.shape == q.shape + + +def _logic_ulysses_vs_standard_multi_gpu(rank, world_size): + """UlyssesAttention across multiple GPUs matches standard attention on the full sequence.""" + batch = 2 + seq_per_rank = 8 + seq_full = seq_per_rank * world_size + num_heads = world_size * 4 + head_dim = 64 + + device = torch.device(f"cuda:{rank}" if torch.cuda.is_available() else "cpu") + + # Every rank generates identical full tensors using the same seed. + torch.manual_seed(42) + q_full = torch.randn(batch, seq_full, num_heads, head_dim, device=device) + k_full = torch.randn(batch, seq_full, num_heads, head_dim, device=device) + v_full = torch.randn(batch, seq_full, num_heads, head_dim, device=device) + + # Each rank takes its sequence shard. + q_shard = q_full[:, rank * seq_per_rank : (rank + 1) * seq_per_rank].contiguous() + k_shard = k_full[:, rank * seq_per_rank : (rank + 1) * seq_per_rank].contiguous() + v_shard = v_full[:, rank * seq_per_rank : (rank + 1) * seq_per_rank].contiguous() + + # Ulysses attention on shards. + inner = VanillaAttention(num_heads=num_heads // world_size, head_dim=head_dim) + attention = UlyssesAttention( + inner_backend=inner, + process_group=None, + ).to(device) + + ulysses_output = attention(q_shard, k_shard, v_shard, batch_size=batch, seq_len=seq_full) + + # Standard attention on the full tensors. + q_std = q_full.transpose(1, 2) # [B, H, S, D] + k_std = k_full.transpose(1, 2) + v_std = v_full.transpose(1, 2) + + std_output = F.scaled_dot_product_attention( + q_std, k_std, v_std, scale=1.0 / math.sqrt(head_dim), dropout_p=0.0 + ) + std_output = std_output.transpose(1, 2).contiguous() # [B, S, H, D] + + # Compare the shard slice. + expected_shard = std_output[:, rank * seq_per_rank : (rank + 1) * seq_per_rank] + torch.testing.assert_close( + ulysses_output, + expected_shard, + rtol=1e-4, + atol=1e-4, + msg=f"Rank {rank}: Ulysses multi-GPU output differs from standard attention", + ) + + +def _logic_ulysses_invalid_heads(rank, world_size): + """Invalid head count (not divisible by world_size) cannot be sharded.""" + assert rank >= 0 and rank < world_size + + num_heads = world_size * 4 + 1 # Not divisible + head_dim = 64 + + # With the decorator pattern, the caller is responsible for sharding heads. + # num_heads // world_size truncates, so the wrapper's computed full head + # count won't match the original. + sharded_heads = num_heads // world_size + inner = VanillaAttention(num_heads=sharded_heads, head_dim=head_dim) + attention = UlyssesAttention(inner_backend=inner, process_group=None) + assert attention.num_heads != num_heads # Truncation means mismatch + + +def _logic_different_batch_sizes(rank, world_size): + """Various batch sizes.""" + num_heads = world_size * 4 + head_dim = 64 + seq_per_rank = 8 + device = torch.device(f"cuda:{rank}") if torch.cuda.is_available() else torch.device("cpu") + + inner = VanillaAttention(num_heads=num_heads // world_size, head_dim=head_dim) + attention = UlyssesAttention( + inner_backend=inner, + process_group=None, + ).to(device) + + for batch_size in [1, 2, 4, 8]: + q = torch.randn(batch_size, seq_per_rank, num_heads, head_dim, device=device) + k = torch.randn(batch_size, seq_per_rank, num_heads, head_dim, device=device) + v = torch.randn(batch_size, seq_per_rank, num_heads, head_dim, device=device) + + output = attention(q, k, v, batch_size=batch_size, seq_len=seq_per_rank * world_size) + assert output.shape == q.shape + + +def _logic_different_head_dims(rank, world_size): + """Various head dims.""" + batch = 2 + seq_per_rank = 8 + num_heads = world_size * 4 + device = torch.device(f"cuda:{rank}") if torch.cuda.is_available() else torch.device("cpu") + + for head_dim in [32, 64, 128]: + inner = VanillaAttention(num_heads=num_heads // world_size, head_dim=head_dim) + attention = UlyssesAttention( + inner_backend=inner, + process_group=None, + ).to(device) + + q = torch.randn(batch, seq_per_rank, num_heads, head_dim, device=device) + k = torch.randn(batch, seq_per_rank, num_heads, head_dim, device=device) + v = torch.randn(batch, seq_per_rank, num_heads, head_dim, device=device) + + output = attention(q, k, v, batch_size=batch, seq_len=seq_per_rank * world_size) + assert output.shape == q.shape + + +def _logic_world_size_4(rank, world_size): + """4-GPU test.""" + batch = 2 + seq_per_rank = 16 + num_heads = world_size * 8 # 32 heads total + head_dim = 64 + + device = torch.device(f"cuda:{rank}") if torch.cuda.is_available() else torch.device("cpu") + + inner = VanillaAttention(num_heads=num_heads // world_size, head_dim=head_dim) + attention = UlyssesAttention( + inner_backend=inner, + process_group=None, + ).to(device) + + q = torch.randn(batch, seq_per_rank, num_heads, head_dim, device=device) + k = torch.randn(batch, seq_per_rank, num_heads, head_dim, device=device) + v = torch.randn(batch, seq_per_rank, num_heads, head_dim, device=device) + + output = attention(q, k, v, batch_size=batch, seq_len=seq_per_rank * world_size) + assert output.shape == q.shape + + +# ============================================================================= +# Test classes +# ============================================================================= + + +class TestAllToAll4D: + """Tests for all_to_all_4d function.""" + + def test_all_to_all_4d_sequence_to_head(self): + """Test sequence sharding to head sharding transformation.""" + run_test_in_distributed(world_size=2, test_fn=_logic_a2a_seq_to_head, use_cuda=True) + + def test_all_to_all_4d_head_to_sequence(self): + """Test head sharding to sequence sharding transformation.""" + run_test_in_distributed(world_size=2, test_fn=_logic_a2a_head_to_seq, use_cuda=True) + + def test_all_to_all_4d_roundtrip(self): + """Test that forward and backward all-to-all are inverses.""" + run_test_in_distributed(world_size=2, test_fn=_logic_a2a_roundtrip, use_cuda=True) + + def test_all_to_all_4d_single_process(self): + """Test that single process returns input unchanged.""" + run_test_in_distributed(world_size=1, test_fn=_logic_a2a_single_process, use_cuda=True) + + +class TestUlyssesAttention: + """Tests for UlyssesAttention module.""" + + def test_ulysses_attention_initialization(self): + """Test UlyssesAttention initialization.""" + run_test_in_distributed(world_size=2, test_fn=_logic_ulysses_init, use_cuda=True) + + def test_ulysses_attention_forward(self): + """Test UlyssesAttention forward pass.""" + run_test_in_distributed(world_size=2, test_fn=_logic_ulysses_forward, use_cuda=True) + + def test_ulysses_attention_with_mask(self): + """Test UlyssesAttention with attention mask.""" + run_test_in_distributed(world_size=2, test_fn=_logic_ulysses_with_mask, use_cuda=True) + + def test_ulysses_vs_standard_attention_single_gpu(self): + """Compare UlyssesAttention with standard attention on single GPU.""" + if not MODULES_AVAILABLE: + pytest.skip("Required modules not available") + + if not torch.cuda.is_available(): + pytest.skip("Test requires CUDA") + + batch = 2 + seq = 16 + num_heads = 8 + head_dim = 64 + device = torch.device("cuda:0") + + inner = VanillaAttention(num_heads=num_heads, head_dim=head_dim) + ulysses_attn = UlyssesAttention( + inner_backend=inner, + process_group=None, + ).to(device) + + torch.manual_seed(42) + q = torch.randn(batch, seq, num_heads, head_dim, device=device) + k = torch.randn(batch, seq, num_heads, head_dim, device=device) + v = torch.randn(batch, seq, num_heads, head_dim, device=device) + + ulysses_output = ulysses_attn(q, k, v, batch_size=batch, seq_len=seq) + + q_std = q.transpose(1, 2) # [B, H, S, D] + k_std = k.transpose(1, 2) + v_std = v.transpose(1, 2) + + std_output = F.scaled_dot_product_attention( + q_std, k_std, v_std, scale=1.0 / math.sqrt(head_dim), dropout_p=0.0 + ) + std_output = std_output.transpose(1, 2).contiguous() # [B, S, H, D] + + torch.testing.assert_close( + ulysses_output, + std_output, + rtol=1e-4, + atol=1e-4, + msg="Ulysses attention output differs from standard attention", + ) + + def test_ulysses_vs_standard_attention_multi_gpu(self): + """Compare UlyssesAttention across GPUs with standard attention on full sequence.""" + run_test_in_distributed( + world_size=2, test_fn=_logic_ulysses_vs_standard_multi_gpu, use_cuda=True + ) + + def test_ulysses_attention_invalid_heads(self): + """Test that invalid head count raises error.""" + run_test_in_distributed(world_size=2, test_fn=_logic_ulysses_invalid_heads, use_cuda=False) + + +class TestUlyssesAttentionEdgeCases: + """Edge case tests for UlyssesAttention.""" + + def test_different_batch_sizes(self): + """Test with various batch sizes.""" + run_test_in_distributed(world_size=2, test_fn=_logic_different_batch_sizes, use_cuda=True) + + def test_different_head_dims(self): + """Test with various head dims.""" + run_test_in_distributed(world_size=2, test_fn=_logic_different_head_dims, use_cuda=True) + + def test_world_size_4(self): + """Test with 4 GPUs.""" + run_test_in_distributed(world_size=4, test_fn=_logic_world_size_4, use_cuda=True) + + +if __name__ == "__main__": + pytest.main([__file__, "-v"]) diff --git a/tests/unittest/_torch/visual_gen/test_attention_integration.py b/tests/unittest/_torch/visual_gen/test_attention_integration.py new file mode 100644 index 0000000000..e346421d76 --- /dev/null +++ b/tests/unittest/_torch/visual_gen/test_attention_integration.py @@ -0,0 +1,540 @@ +# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +"""Test WAN Attention Integration. + +Compares the new integrated attention (using TRT-LLM backend) with the original +naive implementation to ensure numerical equivalence. +""" + +from types import SimpleNamespace + +import pytest +import torch +import torch.nn as nn +import torch.nn.functional as F + +from tensorrt_llm._torch.modules.rms_norm import RMSNorm +from tensorrt_llm._torch.visual_gen.config import AttentionConfig, DiffusionModelConfig + +# Import new integrated versions +from tensorrt_llm._torch.visual_gen.modules.attention import Attention, QKVMode, apply_rotary_emb + +# ============================================================================ +# Original naive implementations for comparison +# ============================================================================ + + +class NaiveWanSelfAttention(nn.Module): + """Original naive self-attention implementation (for comparison).""" + + def __init__( + self, hidden_size: int, num_heads: int, head_dim: int, eps: float = 1e-6, dtype=None + ): + super().__init__() + self.num_heads = num_heads + self.head_dim = head_dim + self.hidden_size = hidden_size + + # fused QKV projection + self.to_qkv = nn.Linear(hidden_size, 3 * hidden_size, dtype=dtype) + self.norm_q = RMSNorm(hidden_size=hidden_size, eps=eps, dtype=dtype, has_weights=True) + self.norm_k = RMSNorm(hidden_size=hidden_size, eps=eps, dtype=dtype, has_weights=True) + self.to_out = nn.ModuleList([nn.Linear(hidden_size, hidden_size, dtype=dtype)]) + + def forward(self, hidden_states, freqs_cos, freqs_sin): + B, S = hidden_states.shape[:2] + + q, k, v = self.to_qkv(hidden_states).chunk(3, dim=-1) + + q = self.norm_q(q).view(B, S, self.num_heads, self.head_dim).transpose(1, 2) + k = self.norm_k(k).view(B, S, self.num_heads, self.head_dim).transpose(1, 2) + v = v.view(B, S, self.num_heads, self.head_dim).transpose(1, 2) + + if freqs_cos is not None and freqs_sin is not None: + q = apply_rotary_emb(q, freqs_cos, freqs_sin) + k = apply_rotary_emb(k, freqs_cos, freqs_sin) + + out = F.scaled_dot_product_attention(q, k, v, is_causal=False) + out = out.transpose(1, 2).flatten(2) + out = self.to_out[0](out) + return out + + +class NaiveWanCrossAttention(nn.Module): + """Original naive cross-attention implementation (for comparison).""" + + def __init__( + self, hidden_size: int, num_heads: int, head_dim: int, eps: float = 1e-6, dtype=None + ): + super().__init__() + self.num_heads = num_heads + self.head_dim = head_dim + self.hidden_size = hidden_size + + self.to_q = nn.Linear(hidden_size, hidden_size, dtype=dtype) + self.to_k = nn.Linear(hidden_size, hidden_size, dtype=dtype) + self.to_v = nn.Linear(hidden_size, hidden_size, dtype=dtype) + self.norm_q = RMSNorm(hidden_size=hidden_size, eps=eps, dtype=dtype, has_weights=True) + self.norm_k = RMSNorm(hidden_size=hidden_size, eps=eps, dtype=dtype, has_weights=True) + self.to_out = nn.ModuleList([nn.Linear(hidden_size, hidden_size, dtype=dtype)]) + + def forward(self, hidden_states, encoder_hidden_states): + B, S = hidden_states.shape[:2] + + q = self.norm_q(self.to_q(hidden_states)) + k = self.norm_k(self.to_k(encoder_hidden_states)) + v = self.to_v(encoder_hidden_states) + + q = q.view(B, S, self.num_heads, self.head_dim).transpose(1, 2) + k = k.view(B, -1, self.num_heads, self.head_dim).transpose(1, 2) + v = v.view(B, -1, self.num_heads, self.head_dim).transpose(1, 2) + + out = F.scaled_dot_product_attention(q, k, v, is_causal=False) + out = out.transpose(1, 2).flatten(2) + out = self.to_out[0](out) + return out + + +# ============================================================================ +# Test utilities +# ============================================================================ + + +def create_model_config( + hidden_size: int, + num_heads: int, + head_dim: int, + eps: float = 1e-6, + attn_backend: str = "VANILLA", +): + """Create a mock DiffusionModelConfig for testing.""" + pretrained_config = SimpleNamespace( + hidden_size=hidden_size, + num_attention_heads=num_heads, + attention_head_dim=head_dim, + eps=eps, + ) + + # Create a minimal config without quantization + config = DiffusionModelConfig( + pretrained_config=pretrained_config, + attention=AttentionConfig(backend=attn_backend), + skip_create_weights_in_init=False, + ) + return config + + +def copy_weights_self_attention(naive: NaiveWanSelfAttention, integrated: Attention): + """Copy weights from naive to integrated self-attention.""" + # QKV projection: naive has to_qkv, integrated has qkv_proj + integrated.qkv_proj.weight.data.copy_(naive.to_qkv.weight.data) + if naive.to_qkv.bias is not None and integrated.qkv_proj.bias is not None: + integrated.qkv_proj.bias.data.copy_(naive.to_qkv.bias.data) + + # QK norms + integrated.norm_q.weight.data.copy_(naive.norm_q.weight.data) + integrated.norm_k.weight.data.copy_(naive.norm_k.weight.data) + + # Output projection + integrated.to_out[0].weight.data.copy_(naive.to_out[0].weight.data) + if naive.to_out[0].bias is not None and integrated.to_out[0].bias is not None: + integrated.to_out[0].bias.data.copy_(naive.to_out[0].bias.data) + + +def copy_weights_cross_attention(naive: NaiveWanCrossAttention, integrated: Attention): + """Copy weights from naive to integrated cross-attention.""" + # Q, K, V projections + integrated.to_q.weight.data.copy_(naive.to_q.weight.data) + integrated.to_k.weight.data.copy_(naive.to_k.weight.data) + integrated.to_v.weight.data.copy_(naive.to_v.weight.data) + + if naive.to_q.bias is not None and integrated.to_q.bias is not None: + integrated.to_q.bias.data.copy_(naive.to_q.bias.data) + if naive.to_k.bias is not None and integrated.to_k.bias is not None: + integrated.to_k.bias.data.copy_(naive.to_k.bias.data) + if naive.to_v.bias is not None and integrated.to_v.bias is not None: + integrated.to_v.bias.data.copy_(naive.to_v.bias.data) + + # QK norms + integrated.norm_q.weight.data.copy_(naive.norm_q.weight.data) + integrated.norm_k.weight.data.copy_(naive.norm_k.weight.data) + + # Output projection + integrated.to_out[0].weight.data.copy_(naive.to_out[0].weight.data) + if naive.to_out[0].bias is not None and integrated.to_out[0].bias is not None: + integrated.to_out[0].bias.data.copy_(naive.to_out[0].bias.data) + + +def generate_rope_embeddings( + seq_len: int, head_dim: int, device: torch.device, is_HSD: bool = False +): + """Generate RoPE embeddings with full head_dim. + + apply_rotary_emb expects freqs with full head_dim, then slices with [..., 0::2] and [..., 1::2]. + + Args: + is_HSD: If True, returns [1, 1, S, D] for broadcasting with [B, H, S, D] (naive) + If False, returns [1, S, 1, D] for broadcasting with [B, S, H, D] (integrated) + """ + position = torch.arange(seq_len, device=device).unsqueeze(1) + # Use full head_dim - apply_rotary_emb will slice with 0::2 and 1::2 + div_term = torch.exp( + torch.arange(0, head_dim, device=device) * (-torch.log(torch.tensor(10000.0)) / head_dim) + ) + + if is_HSD: + freqs_cos = torch.cos(position * div_term).unsqueeze(0).unsqueeze(0) # [1, 1, S, D] + freqs_sin = torch.sin(position * div_term).unsqueeze(0).unsqueeze(0) # [1, 1, S, D] + else: + freqs_cos = torch.cos(position * div_term).unsqueeze(0).unsqueeze(2) # [1, S, 1, D] + freqs_sin = torch.sin(position * div_term).unsqueeze(0).unsqueeze(2) # [1, S, 1, D] + + return freqs_cos, freqs_sin + + +# ============================================================================ +# Test functions +# ============================================================================ +@pytest.mark.parametrize("attn_backend", ["VANILLA", "TRTLLM"]) +def test_self_attention_equivalence(attn_backend: str): + """Test that integrated self-attention produces same output as naive.""" + print("\n" + "=" * 60) + print("Testing Self-Attention Equivalence") + print("=" * 60) + + # Config + batch_size = 2 + seq_len = 16 + hidden_size = 128 + num_heads = 4 + head_dim = hidden_size // num_heads + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + dtype = torch.bfloat16 # Use bf16 since flashinfer doesn't support fp32 + + print(f"Config: B={batch_size}, S={seq_len}, H={hidden_size}, heads={num_heads}") + print(f"Device: {device}, dtype: {dtype}") + + # Create models + naive = NaiveWanSelfAttention(hidden_size, num_heads, head_dim, dtype=dtype).to(device) + + model_config = create_model_config(hidden_size, num_heads, head_dim, attn_backend=attn_backend) + integrated = Attention( + hidden_size, num_heads, qkv_mode=QKVMode.FUSE_QKV, config=model_config + ).to(device) # self attention + + # Copy weights + copy_weights_self_attention(naive, integrated) + + # Set to eval mode + naive.eval() + integrated.eval() + + # Create inputs + torch.manual_seed(42) + hidden_states = torch.randn(batch_size, seq_len, hidden_size, device=device, dtype=dtype) + # Naive uses [1, 1, S, D] (HSD format) - broadcasts with [B, H, S, D] + freqs_cos_HSD, freqs_sin_HSD = generate_rope_embeddings(seq_len, head_dim, device, is_HSD=True) + # Integrated uses [1, S, 1, D] (SHD format) - broadcasts with [B, S, H, D] + freqs_cos_SHD, freqs_sin_SHD = generate_rope_embeddings(seq_len, head_dim, device, is_HSD=False) + + # Forward pass + with torch.no_grad(): + out_naive = naive(hidden_states, freqs_cos_HSD, freqs_sin_HSD) + out_integrated = integrated(hidden_states, freqs=(freqs_cos_SHD, freqs_sin_SHD)) + + # Compare (using looser tolerance for bf16) + max_diff = (out_naive - out_integrated).abs().max().item() + mean_diff = (out_naive - out_integrated).abs().mean().item() + is_close = torch.allclose(out_naive, out_integrated, rtol=1e-2, atol=1e-2) + + print("\nResults:") + print(f" Output shape: naive={out_naive.shape}, integrated={out_integrated.shape}") + print(f" Max absolute difference: {max_diff:.2e}") + print(f" Mean absolute difference: {mean_diff:.2e}") + print(f" Outputs match (rtol=1e-2, atol=1e-2): {is_close}") + + if is_close: + print(" āœ… PASS: Self-attention outputs match!") + else: + print(" āŒ FAIL: Self-attention outputs differ!") + + assert is_close, ( + f"Self-attention outputs differ: max_diff={max_diff:.2e}, mean_diff={mean_diff:.2e}" + ) + return is_close + + +@pytest.mark.parametrize("attn_backend", ["VANILLA"]) +def test_cross_attention_equivalence(attn_backend: str): + """Test that integrated cross-attention produces same output as naive.""" + print("\n" + "=" * 60) + print("Testing Cross-Attention Equivalence") + print("=" * 60) + + # Config + batch_size = 2 + seq_len = 16 + encoder_seq_len = 24 # Different from query seq_len + hidden_size = 128 + num_heads = 4 + head_dim = hidden_size // num_heads + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + dtype = torch.bfloat16 # Use bf16 since flashinfer doesn't support fp32 + + print( + f"Config: B={batch_size}, S_q={seq_len}, S_kv={encoder_seq_len}, H={hidden_size}, heads={num_heads}" + ) + print(f"Device: {device}, dtype: {dtype}") + + # Create models + naive = NaiveWanCrossAttention(hidden_size, num_heads, head_dim, dtype=dtype).to(device) + + model_config = create_model_config(hidden_size, num_heads, head_dim, attn_backend=attn_backend) + integrated = Attention( + hidden_size, num_heads, qkv_mode=QKVMode.SEPARATE_QKV, config=model_config + ).to(device) # cross attention + + # Copy weights + copy_weights_cross_attention(naive, integrated) + + # Set to eval mode + naive.eval() + integrated.eval() + + # Create inputs + torch.manual_seed(42) + hidden_states = torch.randn(batch_size, seq_len, hidden_size, device=device, dtype=dtype) + encoder_hidden_states = torch.randn( + batch_size, encoder_seq_len, hidden_size, device=device, dtype=dtype + ) + + # Forward pass + with torch.no_grad(): + out_naive = naive(hidden_states, encoder_hidden_states) + out_integrated = integrated(hidden_states, encoder_hidden_states) + + # Compare (using looser tolerance for bf16) + max_diff = (out_naive - out_integrated).abs().max().item() + mean_diff = (out_naive - out_integrated).abs().mean().item() + is_close = torch.allclose(out_naive, out_integrated, rtol=1e-2, atol=1e-2) + + print("\nResults:") + print(f" Output shape: naive={out_naive.shape}, integrated={out_integrated.shape}") + print(f" Max absolute difference: {max_diff:.2e}") + print(f" Mean absolute difference: {mean_diff:.2e}") + print(f" Outputs match (rtol=1e-2, atol=1e-2): {is_close}") + + if is_close: + print(" āœ… PASS: Cross-attention outputs match!") + else: + print(" āŒ FAIL: Cross-attention outputs differ!") + + assert is_close, ( + f"Cross-attention outputs differ: max_diff={max_diff:.2e}, mean_diff={mean_diff:.2e}" + ) + return is_close + + +def test_trtllm_cached_prepare(): + """Test that TRTLLM attention cached prepare works correctly. + + This test verifies that when running multiple forward passes with same B/S + but different q/k/v values, the cached prepare phase doesn't cause incorrect + results (i.e., outputs should differ when inputs differ). + """ + print("\n" + "=" * 60) + print("Testing TRTLLM Cached Prepare Phase") + print("=" * 60) + + # Config - same B, S for all iterations + batch_size = 2 + seq_len = 16 + hidden_size = 128 + num_heads = 4 + head_dim = hidden_size // num_heads + num_iterations = 5 + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + dtype = torch.bfloat16 + + print(f"Config: B={batch_size}, S={seq_len}, H={hidden_size}, heads={num_heads}") + print(f"Running {num_iterations} iterations with same B/S but different inputs") + + # Create models - single instance to test caching + naive = NaiveWanSelfAttention(hidden_size, num_heads, head_dim, dtype=dtype).to(device) + model_config = create_model_config(hidden_size, num_heads, head_dim, attn_backend="TRTLLM") + integrated = Attention( + hidden_size, num_heads, qkv_mode=QKVMode.FUSE_QKV, config=model_config + ).to(device) # self attention + + # Copy weights + copy_weights_self_attention(naive, integrated) + + naive.eval() + integrated.eval() + + # Generate freqs (same for all iterations since S is same) + freqs_cos_HSD, freqs_sin_HSD = generate_rope_embeddings(seq_len, head_dim, device, is_HSD=True) + freqs_cos_SHD, freqs_sin_SHD = generate_rope_embeddings(seq_len, head_dim, device, is_HSD=False) + + all_passed = True + outputs_integrated = [] + + with torch.no_grad(): + for i in range(num_iterations): + # Different random inputs for each iteration + torch.manual_seed(42 + i) # Different seed each time + hidden_states = torch.randn( + batch_size, seq_len, hidden_size, device=device, dtype=dtype + ) + + out_naive = naive(hidden_states, freqs_cos_HSD, freqs_sin_HSD) + out_integrated = integrated(hidden_states, freqs=(freqs_cos_SHD, freqs_sin_SHD)) + + # Check this iteration matches naive + max_diff = (out_naive - out_integrated).abs().max().item() + is_close = torch.allclose(out_naive, out_integrated, rtol=1e-2, atol=1e-2) + + status = "āœ…" if is_close else "āŒ" + print(f" Iteration {i + 1}: max_diff={max_diff:.2e} {status}") + + if not is_close: + all_passed = False + + outputs_integrated.append(out_integrated.clone()) + + # Additional check: outputs should be DIFFERENT across iterations + # (since inputs were different) + print("\n Checking outputs differ across iterations (inputs were different):") + outputs_differ = True + for i in range(1, num_iterations): + diff = (outputs_integrated[i] - outputs_integrated[0]).abs().max().item() + if diff < 1e-6: + print( + f" āš ļø Iteration {i + 1} output same as iteration 1 (diff={diff:.2e}) - possible caching bug!" + ) + outputs_differ = False + else: + print(f" Iteration {i + 1} vs 1: diff={diff:.2e} āœ…") + + if all_passed and outputs_differ: + print("\n āœ… PASS: Cached prepare works correctly!") + else: + print("\n āŒ FAIL: Cached prepare may have issues!") + all_passed = False + + assert all_passed, "Cached prepare: outputs did not match naive reference" + assert outputs_differ, ( + "Cached prepare: outputs should differ across iterations with different inputs" + ) + return all_passed + + +def test_trtllm_varying_seq_len(): + """Test TRTLLM attention with varying sequence lengths. + + This tests that the prepare phase correctly handles different seq_lens + and doesn't incorrectly reuse cached metadata. + """ + print("\n" + "=" * 60) + print("Testing TRTLLM with Varying Sequence Lengths") + print("=" * 60) + + batch_size = 2 + hidden_size = 128 + num_heads = 4 + head_dim = hidden_size // num_heads + seq_lens = [8, 16, 32, 16, 8] # Vary seq_len, including repeats + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + dtype = torch.bfloat16 + + print(f"Config: B={batch_size}, H={hidden_size}, heads={num_heads}") + print(f"Testing seq_lens: {seq_lens}") + + # Create models - single instance to test caching across different seq_lens + naive = NaiveWanSelfAttention(hidden_size, num_heads, head_dim, dtype=dtype).to(device) + model_config = create_model_config(hidden_size, num_heads, head_dim, attn_backend="TRTLLM") + integrated = Attention( + hidden_size, num_heads, qkv_mode=QKVMode.FUSE_QKV, config=model_config + ).to(device) # self attention + + copy_weights_self_attention(naive, integrated) + + naive.eval() + integrated.eval() + + all_passed = True + + with torch.no_grad(): + for i, seq_len in enumerate(seq_lens): + torch.manual_seed(42 + i) + hidden_states = torch.randn( + batch_size, seq_len, hidden_size, device=device, dtype=dtype + ) + + freqs_cos_HSD, freqs_sin_HSD = generate_rope_embeddings( + seq_len, head_dim, device, is_HSD=True + ) + freqs_cos_SHD, freqs_sin_SHD = generate_rope_embeddings( + seq_len, head_dim, device, is_HSD=False + ) + + out_naive = naive(hidden_states, freqs_cos_HSD, freqs_sin_HSD) + out_integrated = integrated(hidden_states, freqs=(freqs_cos_SHD, freqs_sin_SHD)) + + max_diff = (out_naive - out_integrated).abs().max().item() + is_close = torch.allclose(out_naive, out_integrated, rtol=1e-2, atol=1e-2) + + status = "āœ…" if is_close else "āŒ" + print(f" seq_len={seq_len:3d}: max_diff={max_diff:.2e} {status}") + + if not is_close: + all_passed = False + + if all_passed: + print("\n āœ… PASS: Varying seq_len handled correctly!") + else: + print("\n āŒ FAIL: Issues with varying seq_len!") + + assert all_passed, "Varying seq_len: outputs did not match naive reference" + return all_passed + + +def run_all_tests(): + """Run all tests and report results.""" + print("\n" + "=" * 60) + print("WAN Attention Integration Tests") + print("=" * 60) + + results = {} + + # Run self-attention tests with different backends + for backend in ["VANILLA", "TRTLLM"]: + results[f"self_attention_{backend}"] = test_self_attention_equivalence(backend) + + # Run cross-attention test (VANILLA only) + results["cross_attention_VANILLA"] = test_cross_attention_equivalence("VANILLA") + + # Run TRTLLM-specific caching tests + results["trtllm_cached_prepare"] = test_trtllm_cached_prepare() + results["trtllm_varying_seq_len"] = test_trtllm_varying_seq_len() + + print("\n" + "=" * 60) + print("Summary") + print("=" * 60) + + all_passed = all(results.values()) + for name, passed in results.items(): + status = "āœ… PASS" if passed else "āŒ FAIL" + print(f" {name}: {status}") + + print() + if all_passed: + print("All tests passed! āœ…") + else: + print("Some tests failed! āŒ") + + return all_passed + + +if __name__ == "__main__": + run_all_tests() diff --git a/tests/unittest/_torch/visual_gen/test_attention_perf.py b/tests/unittest/_torch/visual_gen/test_attention_perf.py new file mode 100644 index 0000000000..d2662105dc --- /dev/null +++ b/tests/unittest/_torch/visual_gen/test_attention_perf.py @@ -0,0 +1,622 @@ +# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""WAN Attention Performance Benchmark. + +Compares VANILLA vs TRTLLM attention backends for visual generation models. +Uses CUDA events for precise GPU timing and supports NVTX profiling. + +Usage: + # Run all tests + python test_attention_perf.py + + # With Nsight Systems profiling + nsys profile -t cuda,nvtx --nvtx-capture=range -o wan_attn_perf python test_attention_perf.py + + # Run specific tests with pytest + pytest test_attention_perf.py -v -k "test_self_attention_perf" +""" + +import time +from contextlib import contextmanager +from types import SimpleNamespace +from typing import Dict, Optional, Tuple + +import pytest +import torch + +from tensorrt_llm._torch.visual_gen.config import AttentionConfig, DiffusionModelConfig +from tensorrt_llm._torch.visual_gen.modules.attention import Attention, QKVMode + +# NVTX support for profiling +try: + import nvtx + + NVTX_AVAILABLE = True + if hasattr(nvtx, "annotate"): + NVTX_METHOD = "annotate" + elif hasattr(nvtx, "range_start") and hasattr(nvtx, "range_end"): + NVTX_METHOD = "range" + else: + NVTX_METHOD = None + NVTX_AVAILABLE = False +except ImportError: + NVTX_AVAILABLE = False + NVTX_METHOD = None + +# Torch profiler support +try: + from torch.profiler import record_function + + TORCH_PROFILER_AVAILABLE = True +except ImportError: + TORCH_PROFILER_AVAILABLE = False + + +# ============================================================================ +# Timing utilities +# ============================================================================ + + +@contextmanager +def cuda_timer(device: torch.device): + """Context manager for precise GPU timing using CUDA events.""" + if device.type == "cuda": + start_event = torch.cuda.Event(enable_timing=True) + end_event = torch.cuda.Event(enable_timing=True) + start_event.record() + + def get_elapsed_time(): + end_event.record() + torch.cuda.synchronize() + return start_event.elapsed_time(end_event) + + yield get_elapsed_time + else: + start_time = time.perf_counter() + + def get_elapsed_time(): + return (time.perf_counter() - start_time) * 1000 + + yield get_elapsed_time + + +@contextmanager +def nvtx_range(name: str): + """Context manager for NVTX range profiling.""" + if NVTX_AVAILABLE and NVTX_METHOD: + if NVTX_METHOD == "annotate": + with nvtx.annotate(name): + yield + elif NVTX_METHOD == "range": + range_id = nvtx.range_start(name) + try: + yield + finally: + nvtx.range_end(range_id) + else: + yield + else: + yield + + +@contextmanager +def torch_profiler_range(name: str): + """Context manager for torch profiler range.""" + if TORCH_PROFILER_AVAILABLE: + with record_function(name): + yield + else: + yield + + +# ============================================================================ +# Test utilities +# ============================================================================ + + +def create_model_config( + hidden_size: int, + num_heads: int, + head_dim: int, + eps: float = 1e-6, + attn_backend: str = "VANILLA", +) -> DiffusionModelConfig: + """Create a mock DiffusionModelConfig for testing.""" + pretrained_config = SimpleNamespace( + hidden_size=hidden_size, + num_attention_heads=num_heads, + attention_head_dim=head_dim, + eps=eps, + ) + + config = DiffusionModelConfig( + pretrained_config=pretrained_config, + attention=AttentionConfig(backend=attn_backend), + skip_create_weights_in_init=False, + ) + return config + + +def generate_rope_embeddings( + seq_len: int, head_dim: int, device: torch.device, is_HSD: bool = False +) -> Tuple[torch.Tensor, torch.Tensor]: + """Generate RoPE embeddings. + + Args: + seq_len: Sequence length + head_dim: Head dimension + device: Target device + is_HSD: If True, returns [1, 1, S, D] for HSD format, else [1, S, 1, D] for SHD + + Returns: + Tuple of (freqs_cos, freqs_sin) + """ + position = torch.arange(seq_len, device=device).unsqueeze(1) + div_term = torch.exp( + torch.arange(0, head_dim, device=device) * (-torch.log(torch.tensor(10000.0)) / head_dim) + ) + + if is_HSD: + freqs_cos = torch.cos(position * div_term).unsqueeze(0).unsqueeze(0) + freqs_sin = torch.sin(position * div_term).unsqueeze(0).unsqueeze(0) + else: + freqs_cos = torch.cos(position * div_term).unsqueeze(0).unsqueeze(2) + freqs_sin = torch.sin(position * div_term).unsqueeze(0).unsqueeze(2) + + return freqs_cos, freqs_sin + + +# ============================================================================ +# Performance benchmark class +# ============================================================================ + + +class WanAttentionPerformanceBenchmark: + """Performance benchmark for WAN attention backends.""" + + # WAN model configurations: (batch_size, num_heads, seq_len, head_dim, description) + TEST_SIZES = [ + # Wan2.1-T2V-1.3B configurations + (1, 24, 14040, 64, "Wan-1.3B 480p 2s"), + (1, 24, 3510, 64, "Wan-1.3B 480p 2s ring4"), + (1, 24, 7020, 64, "Wan-1.3B 480p 2s ring2"), + # Wan2.1-T2V-14B configurations + (1, 40, 75600, 128, "Wan-14B 720p 5s"), + (1, 40, 37800, 128, "Wan-14B 720p 5s ring2"), + (1, 40, 18900, 128, "Wan-14B 720p 5s ring4"), + (1, 40, 9450, 128, "Wan-14B 720p 5s ring8"), + # Ulysses parallelism configurations + (1, 20, 75600, 128, "Wan-14B 720p ulysses2"), + (1, 10, 75600, 128, "Wan-14B 720p ulysses4"), + (1, 5, 75600, 128, "Wan-14B 720p ulysses8"), + # Smaller test cases for quick validation + (2, 24, 1024, 64, "Small batch2"), + (1, 24, 4096, 64, "Medium 4k"), + (1, 40, 8192, 128, "Large 8k"), + ] + + # Quick test sizes for CI/pytest + QUICK_TEST_SIZES = [ + (1, 24, 1024, 64, "Quick 1k"), + (1, 24, 2048, 64, "Quick 2k"), + (2, 24, 1024, 64, "Quick batch2"), + ] + + def __init__( + self, + device: Optional[torch.device] = None, + dtype: torch.dtype = torch.bfloat16, + warmup_iterations: int = 10, + benchmark_iterations: int = 50, + ): + self.device = device or torch.device("cuda" if torch.cuda.is_available() else "cpu") + self.dtype = dtype + self.warmup_iterations = warmup_iterations + self.benchmark_iterations = benchmark_iterations + self.backends = ["VANILLA", "TRTLLM"] + + def create_attention_model( + self, hidden_size: int, num_heads: int, head_dim: int, backend: str + ) -> Attention: + """Create a WAN self-attention model with specified backend.""" + config = create_model_config(hidden_size, num_heads, head_dim, attn_backend=backend) + model = Attention(hidden_size, num_heads, qkv_mode=QKVMode.FUSE_QKV, config=config).to( + self.device + ) + model.eval() + return model + + def create_test_data( + self, batch_size: int, seq_len: int, hidden_size: int, head_dim: int + ) -> Tuple[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]: + """Create test input data and RoPE embeddings.""" + hidden_states = torch.randn( + batch_size, seq_len, hidden_size, device=self.device, dtype=self.dtype + ) + freqs = generate_rope_embeddings(seq_len, head_dim, self.device, is_HSD=False) + return hidden_states, freqs + + def estimate_memory_gb( + self, batch_size: int, num_heads: int, seq_len: int, head_dim: int + ) -> float: + """Estimate tensor memory usage in GB.""" + hidden_size = num_heads * head_dim + # Input: [B, S, H] + Q, K, V: [B, S, num_heads, head_dim] each + bytes_per_element = 2 # bf16 + input_bytes = batch_size * seq_len * hidden_size * bytes_per_element + qkv_bytes = 3 * batch_size * seq_len * num_heads * head_dim * bytes_per_element + output_bytes = batch_size * seq_len * hidden_size * bytes_per_element + # Attention matrix can be O(S^2) but flash attention avoids materializing it + return (input_bytes + qkv_bytes + output_bytes) / (1024**3) + + def benchmark_single( + self, + batch_size: int, + num_heads: int, + seq_len: int, + head_dim: int, + backend: str, + verbose: bool = True, + ) -> Optional[Dict]: + """Benchmark a single configuration. + + Returns: + Dict with timing statistics or None if test failed/skipped + """ + hidden_size = num_heads * head_dim + + # Memory check + est_memory = self.estimate_memory_gb(batch_size, num_heads, seq_len, head_dim) + if est_memory > 8.0: + if verbose: + print(f" Skipping - estimated memory {est_memory:.2f}GB > 8GB limit") + return None + + try: + # Create model and data + model = self.create_attention_model(hidden_size, num_heads, head_dim, backend) + hidden_states, freqs = self.create_test_data(batch_size, seq_len, hidden_size, head_dim) + + # Warmup + with nvtx_range(f"warmup_{backend}"): + with torch_profiler_range(f"warmup_{backend}"): + with torch.no_grad(): + for _ in range(self.warmup_iterations): + _ = model(hidden_states, freqs=freqs) + + if self.device.type == "cuda": + torch.cuda.synchronize() + + # Benchmark + times = [] + with nvtx_range(f"benchmark_{backend}"): + with torch_profiler_range(f"benchmark_{backend}"): + with torch.no_grad(): + for i in range(self.benchmark_iterations): + with nvtx_range(f"iter_{backend}_{i}"): + with cuda_timer(self.device) as get_time: + _ = model(hidden_states, freqs=freqs) + times.append(get_time()) + + # Statistics + times_tensor = torch.tensor(times) + stats = { + "avg_ms": times_tensor.mean().item(), + "min_ms": times_tensor.min().item(), + "max_ms": times_tensor.max().item(), + "std_ms": times_tensor.std().item(), + "median_ms": times_tensor.median().item(), + "p95_ms": torch.quantile(times_tensor, 0.95).item(), + "p99_ms": torch.quantile(times_tensor, 0.99).item(), + } + + # Calculate throughput (approximate TOPS) + total_ops = batch_size * num_heads * seq_len * seq_len * head_dim + stats["throughput_tops"] = (total_ops / 1e12) / (stats["avg_ms"] / 1000) + + if verbose: + print( + f" {backend}: avg={stats['avg_ms']:.3f}ms, " + f"median={stats['median_ms']:.3f}ms, " + f"throughput={stats['throughput_tops']:.2f} TOPS" + ) + + return stats + + except Exception as e: + if verbose: + print(f" {backend}: ERROR - {e}") + return None + + def benchmark_comparison( + self, + batch_size: int, + num_heads: int, + seq_len: int, + head_dim: int, + description: str = "", + verbose: bool = True, + ) -> Dict[str, Optional[Dict]]: + """Benchmark and compare all backends for a given configuration.""" + if verbose: + print( + f"\nBenchmarking: ({batch_size}, {num_heads}, {seq_len}, {head_dim}) {description}" + ) + print(f" Device: {self.device}, dtype: {self.dtype}") + print(f" Warmup: {self.warmup_iterations}, Iterations: {self.benchmark_iterations}") + + results = {} + for backend in self.backends: + results[backend] = self.benchmark_single( + batch_size, num_heads, seq_len, head_dim, backend, verbose + ) + + # Print comparison + if verbose and results.get("VANILLA") and results.get("TRTLLM"): + vanilla_avg = results["VANILLA"]["avg_ms"] + trtllm_avg = results["TRTLLM"]["avg_ms"] + speedup = vanilla_avg / trtllm_avg + print(f" TRTLLM vs VANILLA: {speedup:.2f}x {'faster' if speedup > 1 else 'slower'}") + + return results + + def run_full_benchmark(self, use_quick_sizes: bool = False) -> Dict: + """Run benchmark on all configured sizes.""" + test_sizes = self.QUICK_TEST_SIZES if use_quick_sizes else self.TEST_SIZES + + print("\n" + "=" * 70) + print("WAN ATTENTION PERFORMANCE BENCHMARK") + print("=" * 70) + print(f"Device: {self.device}") + print(f"dtype: {self.dtype}") + print(f"Backends: {self.backends}") + print(f"NVTX: {'Enabled' if NVTX_AVAILABLE else 'Disabled'}") + print(f"Torch Profiler: {'Enabled' if TORCH_PROFILER_AVAILABLE else 'Disabled'}") + + all_results = {} + + with nvtx_range("wan_attention_benchmark"): + with torch_profiler_range("wan_attention_benchmark"): + for batch_size, num_heads, seq_len, head_dim, desc in test_sizes: + key = f"{desc}_{batch_size}x{num_heads}x{seq_len}x{head_dim}" + results = self.benchmark_comparison( + batch_size, num_heads, seq_len, head_dim, desc + ) + all_results[key] = { + "config": { + "batch_size": batch_size, + "num_heads": num_heads, + "seq_len": seq_len, + "head_dim": head_dim, + "description": desc, + }, + "results": results, + } + + # Print summary + self._print_summary(all_results) + return all_results + + def _print_summary(self, all_results: Dict) -> None: + """Print benchmark summary table.""" + print("\n" + "=" * 70) + print("BENCHMARK SUMMARY") + print("=" * 70) + print(f"{'Configuration':<40} {'VANILLA (ms)':<15} {'TRTLLM (ms)':<15} {'Speedup':<10}") + print("-" * 70) + + for key, data in all_results.items(): + desc = data["config"]["description"] + results = data["results"] + + vanilla = results.get("VANILLA") + trtllm = results.get("TRTLLM") + + vanilla_str = f"{vanilla['avg_ms']:.2f}" if vanilla else "N/A" + trtllm_str = f"{trtllm['avg_ms']:.2f}" if trtllm else "N/A" + + if vanilla and trtllm: + speedup = vanilla["avg_ms"] / trtllm["avg_ms"] + speedup_str = f"{speedup:.2f}x" + else: + speedup_str = "N/A" + + print(f"{desc:<40} {vanilla_str:<15} {trtllm_str:<15} {speedup_str:<10}") + + def test_memory_usage( + self, + batch_size: int = 1, + num_heads: int = 24, + seq_len: int = 4096, + head_dim: int = 64, + ) -> Dict[str, Dict]: + """Test memory usage of different backends.""" + if self.device.type != "cuda": + print("Memory test requires CUDA device") + return {} + + print("\n" + "=" * 70) + print("MEMORY USAGE TEST") + print("=" * 70) + print(f"Config: ({batch_size}, {num_heads}, {seq_len}, {head_dim})") + + hidden_size = num_heads * head_dim + memory_results = {} + + for backend in self.backends: + print(f"\nTesting {backend}...") + + try: + # Clear cache + torch.cuda.empty_cache() + torch.cuda.reset_peak_memory_stats() + + # Create model and data + model = self.create_attention_model(hidden_size, num_heads, head_dim, backend) + hidden_states, freqs = self.create_test_data( + batch_size, seq_len, hidden_size, head_dim + ) + + # Warmup + with torch.no_grad(): + _ = model(hidden_states, freqs=freqs) + + torch.cuda.synchronize() + torch.cuda.reset_peak_memory_stats() + + # Forward pass + with nvtx_range(f"memory_test_{backend}"): + with torch.no_grad(): + _ = model(hidden_states, freqs=freqs) + + torch.cuda.synchronize() + + peak_memory_gb = torch.cuda.max_memory_allocated() / (1024**3) + current_memory_gb = torch.cuda.memory_allocated() / (1024**3) + + memory_results[backend] = { + "peak_memory_gb": peak_memory_gb, + "current_memory_gb": current_memory_gb, + } + + print(f" Peak memory: {peak_memory_gb:.3f} GB") + print(f" Current memory: {current_memory_gb:.3f} GB") + + except Exception as e: + print(f" ERROR: {e}") + memory_results[backend] = None + + return memory_results + + +# ============================================================================ +# Pytest test functions +# ============================================================================ + + +class TestWanAttentionPerformance: + """Pytest test class for WAN attention performance.""" + + @pytest.fixture(autouse=True) + def setup(self): + """Setup test environment.""" + self.benchmark = WanAttentionPerformanceBenchmark( + warmup_iterations=5, + benchmark_iterations=20, + ) + + @pytest.mark.parametrize("backend", ["VANILLA", "TRTLLM"]) + def test_self_attention_perf(self, backend: str): + """Test that attention backend runs without errors.""" + batch_size, num_heads, seq_len, head_dim = 1, 24, 1024, 64 + + result = self.benchmark.benchmark_single( + batch_size, num_heads, seq_len, head_dim, backend, verbose=True + ) + + if result is not None: + assert result["avg_ms"] > 0, "Average time should be positive" + assert result["min_ms"] <= result["avg_ms"], "Min should be <= avg" + assert result["max_ms"] >= result["avg_ms"], "Max should be >= avg" + print(f" {backend}: avg={result['avg_ms']:.3f}ms OK") + + @pytest.mark.parametrize( + "batch_size,num_heads,seq_len,head_dim", + [ + (1, 24, 1024, 64), + (1, 24, 2048, 64), + (2, 24, 1024, 64), + ], + ) + def test_backend_comparison(self, batch_size: int, num_heads: int, seq_len: int, head_dim: int): + """Test VANILLA vs TRTLLM comparison.""" + results = self.benchmark.benchmark_comparison( + batch_size, num_heads, seq_len, head_dim, verbose=True + ) + + # At least one backend should work + assert any(r is not None for r in results.values()), "All backends failed" + + @pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA required") + def test_memory_usage(self): + """Test memory usage tracking.""" + memory_results = self.benchmark.test_memory_usage( + batch_size=1, num_heads=24, seq_len=2048, head_dim=64 + ) + + for backend, result in memory_results.items(): + if result is not None: + assert result["peak_memory_gb"] > 0, f"{backend} peak memory should be positive" + + def test_quick_benchmark(self): + """Run quick benchmark for CI validation.""" + results = self.benchmark.run_full_benchmark(use_quick_sizes=True) + assert len(results) > 0, "Should have benchmark results" + + +# ============================================================================ +# Main entry point +# ============================================================================ + + +def main(): + """Run full benchmark suite.""" + print("\n" + "=" * 70) + print("WAN ATTENTION PERFORMANCE BENCHMARK SUITE") + print("=" * 70) + + if not torch.cuda.is_available(): + print("WARNING: CUDA not available, results will not be meaningful") + + # Print profiling instructions + if torch.cuda.is_available(): + print("\nPROFILING INSTRUCTIONS:") + print("-" * 50) + if NVTX_AVAILABLE: + print("NVTX Profiling (Nsight Systems):") + print(" nsys profile -t cuda,nvtx --nvtx-capture=range \\") + print(" -o wan_attn_perf python test_attention_perf.py") + else: + print("NVTX not available. Install with: pip install nvtx") + + print("\nPyTorch Profiler:") + print(" The benchmark includes record_function() calls for profiling") + print("-" * 50) + + # Create benchmark instance + benchmark = WanAttentionPerformanceBenchmark( + warmup_iterations=10, + benchmark_iterations=50, + ) + + # Run full benchmark + print("\n" + "=" * 70) + print("FULL BENCHMARK") + print("=" * 70) + all_results = benchmark.run_full_benchmark(use_quick_sizes=False) + + # Memory test + if torch.cuda.is_available(): + benchmark.test_memory_usage(batch_size=1, num_heads=24, seq_len=4096, head_dim=64) + + print("\n" + "=" * 70) + print("BENCHMARK COMPLETE") + print("=" * 70) + + return all_results + + +if __name__ == "__main__": + main() diff --git a/tests/unittest/_torch/visual_gen/test_fused_qkv.py b/tests/unittest/_torch/visual_gen/test_fused_qkv.py new file mode 100644 index 0000000000..c918b44d06 --- /dev/null +++ b/tests/unittest/_torch/visual_gen/test_fused_qkv.py @@ -0,0 +1,126 @@ +"""Tests for fused QKV support in diffusion models. + +Tests: +1. Model structure with fuse_qkv=True (default) vs fuse_qkv=False +2. Weight loading works for fused QKV layers +""" + +import unittest +from types import SimpleNamespace +from typing import Dict + +import torch + +from tensorrt_llm._torch.visual_gen.config import DiffusionModelConfig + + +def _create_test_config(hidden_size: int = 64) -> DiffusionModelConfig: + """Create a test DiffusionModelConfig.""" + num_heads = hidden_size // 8 # e.g., 64 // 8 = 8 heads + head_dim = 8 + return DiffusionModelConfig( + pretrained_config=SimpleNamespace( + hidden_size=hidden_size, + num_attention_heads=num_heads, + attention_head_dim=head_dim, + num_layers=2, + ffn_dim=256, + out_channels=16, + patch_size=[1, 2, 2], + in_channels=16, + text_dim=64, + freq_dim=32, + ), + ) + + +class TestFusedQKVWeightLoading(unittest.TestCase): + """Test weight loading for fused QKV layers.""" + + def setUp(self): + """Set up test fixtures.""" + torch.manual_seed(42) + self.hidden_size = 64 + + def _create_mock_checkpoint_weights(self) -> Dict[str, torch.Tensor]: + """Create mock checkpoint weights with separate to_q, to_k, to_v.""" + dtype = torch.bfloat16 # Match model dtype + weights = {} + for block_idx in range(2): + for attn_name in ["attn1", "attn2"]: + prefix = f"blocks.{block_idx}.{attn_name}" + # Separate QKV weights (as in checkpoint) + weights[f"{prefix}.to_q.weight"] = torch.randn( + self.hidden_size, self.hidden_size, dtype=dtype + ) + weights[f"{prefix}.to_q.bias"] = torch.randn(self.hidden_size, dtype=dtype) + weights[f"{prefix}.to_k.weight"] = torch.randn( + self.hidden_size, self.hidden_size, dtype=dtype + ) + weights[f"{prefix}.to_k.bias"] = torch.randn(self.hidden_size, dtype=dtype) + weights[f"{prefix}.to_v.weight"] = torch.randn( + self.hidden_size, self.hidden_size, dtype=dtype + ) + weights[f"{prefix}.to_v.bias"] = torch.randn(self.hidden_size, dtype=dtype) + # Output projection + weights[f"{prefix}.to_out.0.weight"] = torch.randn( + self.hidden_size, self.hidden_size, dtype=dtype + ) + weights[f"{prefix}.to_out.0.bias"] = torch.randn(self.hidden_size, dtype=dtype) + + # FFN weights + ffn_dim = 256 + prefix = f"blocks.{block_idx}.ffn" + weights[f"{prefix}.net.0.proj.weight"] = torch.randn( + ffn_dim, self.hidden_size, dtype=dtype + ) + weights[f"{prefix}.net.0.proj.bias"] = torch.randn(ffn_dim, dtype=dtype) + weights[f"{prefix}.net.2.weight"] = torch.randn(self.hidden_size, ffn_dim, dtype=dtype) + weights[f"{prefix}.net.2.bias"] = torch.randn(self.hidden_size, dtype=dtype) + + # proj_out + weights["proj_out.weight"] = torch.randn(64, self.hidden_size, dtype=dtype) + weights["proj_out.bias"] = torch.randn(64, dtype=dtype) + + return weights + + def test_load_weights_fused(self): + """Test loading weights with fused QKV (default for self-attention).""" + from tensorrt_llm._torch.visual_gen.models.wan.transformer_wan import WanTransformer3DModel + + config = _create_test_config(self.hidden_size) + + # Create model - self-attention (attn1) uses fused QKV by default + model = WanTransformer3DModel(model_config=config) + weights = self._create_mock_checkpoint_weights() + + # Load weights (model handles fused QKV internally via DynamicLinearWeightLoader) + model.load_weights(weights) + + # Verify fused weights were loaded correctly for self-attention + attn1 = model.blocks[0].attn1 + qkv_weight = attn1.qkv_proj.weight.data + + # Expected: concatenation of to_q, to_k, to_v weights + expected_weight = torch.cat( + [ + weights["blocks.0.attn1.to_q.weight"], + weights["blocks.0.attn1.to_k.weight"], + weights["blocks.0.attn1.to_v.weight"], + ], + dim=0, + ) + + self.assertEqual(qkv_weight.shape, expected_weight.shape) + self.assertTrue(torch.allclose(qkv_weight, expected_weight)) + + # Also verify cross-attention (attn2) uses separate Q/K/V + attn2 = model.blocks[0].attn2 + self.assertTrue(hasattr(attn2, "to_q"), "Cross-attention should have separate to_q") + self.assertTrue( + torch.allclose(attn2.to_q.weight.data, weights["blocks.0.attn2.to_q.weight"]) + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/unittest/_torch/visual_gen/test_model_loader.py b/tests/unittest/_torch/visual_gen/test_model_loader.py new file mode 100644 index 0000000000..9cc8cba70e --- /dev/null +++ b/tests/unittest/_torch/visual_gen/test_model_loader.py @@ -0,0 +1,494 @@ +"""Test PipelineLoader with DiffusionArgs API.""" + +import os +from pathlib import Path + +import pytest +import torch + +from tensorrt_llm._torch.visual_gen.config import PipelineComponent + + +def _llm_models_root() -> str: + """Return LLM_MODELS_ROOT path if it is set in env, assert when it's set but not a valid path.""" + root = Path("/home/scratch.trt_llm_data_ci/llm-models/") + if "LLM_MODELS_ROOT" in os.environ: + root = Path(os.environ["LLM_MODELS_ROOT"]) + if not root.exists(): + root = Path("/scratch.trt_llm_data/llm-models/") + assert root.exists(), ( + "You shall set LLM_MODELS_ROOT env or be able to access scratch.trt_llm_data to run this test" + ) + return str(root) + + +# Skip if checkpoint not available +# Set DIFFUSION_MODEL_PATH env var to run integration tests +CHECKPOINT_PATH = os.environ.get( + "DIFFUSION_MODEL_PATH", + os.path.join(_llm_models_root(), "Wan2.1-T2V-1.3B-Diffusers"), +) + +# Skip heavy components (text_encoder ~44GB, vae ~300MB) to speed up tests +# These components are loaded via diffusers and don't need quantization testing +SKIP_HEAVY_COMPONENTS = [ + PipelineComponent.TEXT_ENCODER, + PipelineComponent.VAE, + PipelineComponent.TOKENIZER, + PipelineComponent.SCHEDULER, +] + + +@pytest.fixture +def checkpoint_exists(): + return CHECKPOINT_PATH and os.path.exists(CHECKPOINT_PATH) + + +def test_meta_init_mode_creates_meta_tensors(checkpoint_exists): + """Test that MetaInitMode creates tensors on meta device (no GPU memory).""" + if not checkpoint_exists: + pytest.skip("Checkpoint not available") + + from tensorrt_llm._torch.models.modeling_utils import MetaInitMode + from tensorrt_llm._torch.visual_gen import DiffusionArgs + from tensorrt_llm._torch.visual_gen.config import DiffusionModelConfig + from tensorrt_llm._torch.visual_gen.models import AutoPipeline + + # Load config directly + args = DiffusionArgs(checkpoint_path=CHECKPOINT_PATH) + config = DiffusionModelConfig.from_pretrained( + CHECKPOINT_PATH, + args=args, + ) + + # Create pipeline WITH MetaInitMode + with MetaInitMode(): + pipeline = AutoPipeline.from_config(config, CHECKPOINT_PATH) + + # Verify tensors are on meta device (no GPU memory allocated) + param = next(pipeline.transformer.parameters()) + assert param.device.type == "meta", f"Expected meta device, got {param.device}" + + +def test_load_wan_pipeline_basic(checkpoint_exists): + """Test basic loading without quantization using DiffusionArgs.""" + if not checkpoint_exists: + pytest.skip("Checkpoint not available") + + from tensorrt_llm._torch.visual_gen import DiffusionArgs, PipelineLoader + + # Simple one-liner with DiffusionArgs + # Skip text_encoder/vae to speed up test (focus on transformer) + args = DiffusionArgs( + checkpoint_path=CHECKPOINT_PATH, + skip_components=SKIP_HEAVY_COMPONENTS, + ) + pipeline = PipelineLoader(args).load() + + # Verify pipeline type + assert pipeline.__class__.__name__ == "WanPipeline" + assert pipeline.transformer is not None + + # Verify text_encoder/vae were skipped + assert pipeline.text_encoder is None, "text_encoder should be skipped" + assert pipeline.vae is None, "vae should be skipped" + + # Verify weights are loaded (not meta tensors) + param = next(pipeline.transformer.parameters()) + assert param.device.type == "cuda" + assert param.dtype in [torch.float32, torch.bfloat16, torch.float16] + + +def test_load_wan_pipeline_with_fp8_dynamic_quant(checkpoint_exists): + """Test loading with FP8 dynamic quantization using DiffusionArgs. + + Verifies the dynamic quantization flow: + 1. Config has dynamic_weight_quant=True when linear.type="trtllm-fp8-per-tensor" + 2. Model Linear layers have FP8 weight buffers + 3. BF16 checkpoint weights are quantized on-the-fly + 4. Quantized weights are in FP8 format + """ + if not checkpoint_exists: + pytest.skip("Checkpoint not available") + + from tensorrt_llm._torch.modules.linear import Linear + from tensorrt_llm._torch.visual_gen import DiffusionArgs, PipelineLoader + + # Use DiffusionArgs with FP8 quantization + # Skip text_encoder/vae to speed up test (focus on transformer quantization) + args = DiffusionArgs( + checkpoint_path=CHECKPOINT_PATH, + quant_config={"quant_algo": "FP8", "dynamic": True}, + skip_components=SKIP_HEAVY_COMPONENTS, + ) + pipeline = PipelineLoader(args).load() + + # Verify model config has dynamic_weight_quant enabled + assert pipeline.model_config.dynamic_weight_quant is True, ( + "dynamic_weight_quant should be True when linear.type specifies FP8" + ) + + # Verify FP8 weights in transformer Linear layers + found_fp8_linear = False + for name, module in pipeline.transformer.named_modules(): + if isinstance(module, Linear): + if hasattr(module, "weight") and module.weight is not None: + assert module.weight.dtype == torch.float8_e4m3fn, ( + f"Linear {name} weight dtype is {module.weight.dtype}, expected float8_e4m3fn" + ) + assert hasattr(module, "weight_scale") and module.weight_scale is not None, ( + f"Linear {name} missing weight_scale buffer" + ) + found_fp8_linear = True + break + + assert found_fp8_linear, "No FP8 Linear modules found in transformer" + + +def test_load_wan_pipeline_with_fp8_blockwise(checkpoint_exists): + """Test loading with FP8 blockwise quantization using DiffusionArgs.""" + if not checkpoint_exists: + pytest.skip("Checkpoint not available") + + from tensorrt_llm._torch.modules.linear import Linear + from tensorrt_llm._torch.visual_gen import DiffusionArgs, PipelineLoader + + # Skip text_encoder/vae to speed up test (focus on transformer quantization) + args = DiffusionArgs( + checkpoint_path=CHECKPOINT_PATH, + quant_config={"quant_algo": "FP8_BLOCK_SCALES", "dynamic": True}, + skip_components=SKIP_HEAVY_COMPONENTS, + ) + pipeline = PipelineLoader(args).load() + + # Verify FP8 weights + for name, module in pipeline.transformer.named_modules(): + if isinstance(module, Linear): + if hasattr(module, "weight") and module.weight is not None: + assert module.weight.dtype == torch.float8_e4m3fn, ( + f"Linear {name} should have FP8 weight" + ) + break + + +def test_diffusion_args_to_quant_config(): + """Test that DiffusionArgs correctly parses quant_config dict to QuantConfig.""" + from tensorrt_llm._torch.visual_gen import DiffusionArgs + from tensorrt_llm.quantization.mode import QuantAlgo + + # Default - no quantization + args = DiffusionArgs(checkpoint_path="/fake/path") + assert args.quant_config.quant_algo is None + + # FP8 per-tensor (dict is coerced to QuantConfig by model_validator) + args = DiffusionArgs( + checkpoint_path="/fake/path", + quant_config={"quant_algo": "FP8", "dynamic": True}, + ) + qc = args.quant_config + assert qc is not None + assert qc.quant_algo == QuantAlgo.FP8 + assert args.dynamic_weight_quant is True + + # FP8 blockwise + args = DiffusionArgs( + checkpoint_path="/fake/path", + quant_config={"quant_algo": "FP8_BLOCK_SCALES", "dynamic": True}, + ) + qc = args.quant_config + assert qc.quant_algo == QuantAlgo.FP8_BLOCK_SCALES + + # NVFP4 + args = DiffusionArgs( + checkpoint_path="/fake/path", + quant_config={"quant_algo": "NVFP4", "dynamic": True}, + ) + qc = args.quant_config + assert qc.quant_algo == QuantAlgo.NVFP4 + + # With ignore patterns (exclude_modules) + args = DiffusionArgs( + checkpoint_path="/fake/path", + quant_config={ + "quant_algo": "FP8", + "ignore": ["blocks.0.attn1.*", "proj_out"], + "config_groups": { + "group_0": { + "weights": {"dynamic": True, "num_bits": 8, "type": "float"}, + "targets": ["Linear"], + } + }, + }, + ) + qc = args.quant_config + assert qc is not None + assert qc.quant_algo == QuantAlgo.FP8 + assert qc.exclude_modules == ["blocks.0.attn1.*", "proj_out"] + assert args.dynamic_weight_quant is True + + +def test_diffusion_args_to_mapping(): + """Test that DiffusionArgs correctly generates Mapping from ParallelConfig.""" + from tensorrt_llm._torch.visual_gen import DiffusionArgs, ParallelConfig + + # ParallelConfig validator requires WORLD_SIZE >= total parallel (tp*cp = 4) + old_world = os.environ.get("WORLD_SIZE") + try: + os.environ["WORLD_SIZE"] = "4" + args = DiffusionArgs( + checkpoint_path="/fake/path", + parallel=ParallelConfig(dit_tp_size=2, dit_cp_size=2), + ) + mapping = args.to_mapping() + assert mapping.tp_size == 2 + assert mapping.cp_size == 2 + # world_size = tp_size * pp_size * cp_size (DP is handled separately) + assert mapping.world_size == 4 + finally: + if old_world is not None: + os.environ["WORLD_SIZE"] = old_world + elif "WORLD_SIZE" in os.environ: + del os.environ["WORLD_SIZE"] + + +def test_load_without_quant_config_no_fp8(checkpoint_exists): + """Test that loading without quant_config does NOT produce FP8 weights.""" + if not checkpoint_exists: + pytest.skip("Checkpoint not available") + + from tensorrt_llm._torch.modules.linear import Linear + from tensorrt_llm._torch.visual_gen import DiffusionArgs, PipelineLoader + + # No quantization specified + # Skip text_encoder/vae to speed up test (focus on transformer) + args = DiffusionArgs( + checkpoint_path=CHECKPOINT_PATH, + skip_components=SKIP_HEAVY_COMPONENTS, + ) + pipeline = PipelineLoader(args).load() + + # Verify dynamic_weight_quant is False + assert pipeline.model_config.dynamic_weight_quant is False, ( + "dynamic_weight_quant should be False when no quant_config" + ) + + # Verify NO FP8 weights + for name, module in pipeline.transformer.named_modules(): + if isinstance(module, Linear): + if hasattr(module, "weight") and module.weight is not None: + assert module.weight.dtype != torch.float8_e4m3fn, ( + f"Linear {name} should NOT be FP8 without quant_config" + ) + break + + +def test_diffusion_args_from_dict(): + """Test DiffusionArgs can be created from a dictionary.""" + from tensorrt_llm._torch.visual_gen import DiffusionArgs + from tensorrt_llm.quantization.mode import QuantAlgo + + config_dict = { + "checkpoint_path": "/path/to/model", + "quant_config": {"quant_algo": "FP8", "dynamic": True}, + "parallel": {"dit_tp_size": 2}, + "pipeline": {"fuse_qkv": True}, + } + # ParallelConfig validator requires WORLD_SIZE >= total parallel (dit_tp_size=2) + old_world = os.environ.get("WORLD_SIZE") + try: + os.environ["WORLD_SIZE"] = "2" + args = DiffusionArgs.from_dict(config_dict) + assert args.checkpoint_path == "/path/to/model" + assert args.quant_config.quant_algo == QuantAlgo.FP8 + assert args.dynamic_weight_quant is True + assert args.parallel.dit_tp_size == 2 + assert args.pipeline.fuse_qkv is True + finally: + if old_world is not None: + os.environ["WORLD_SIZE"] = old_world + elif "WORLD_SIZE" in os.environ: + del os.environ["WORLD_SIZE"] + + +# ============================================================================= +# Memory and Performance Tests +# ============================================================================= + + +def _get_module_memory_gb(module): + """Get GPU memory usage of a module in GB.""" + return sum(p.numel() * p.element_size() for p in module.parameters()) / 1024**3 + + +def _get_cuda_memory_gb(): + """Get current CUDA memory allocated in GB.""" + return torch.cuda.memory_allocated() / 1024**3 + + +def _get_cuda_peak_memory_gb(): + """Get peak CUDA memory allocated in GB.""" + return torch.cuda.max_memory_allocated() / 1024**3 + + +def test_fp8_vs_bf16_memory_comparison(checkpoint_exists): + """Test FP8 dynamic quant uses ~2x less memory than BF16, including peak memory. + + This test verifies that dynamic quantization doesn't create unnecessary + intermediate buffers that would negate the memory savings. + + Expected for Wan 1.3B transformer: + - BF16: ~2.6 GB model memory, similar peak during loading + - FP8: ~1.3 GB model memory, peak should be < 2x BF16 peak + """ + if not checkpoint_exists: + pytest.skip("Checkpoint not available") + + from tensorrt_llm._torch.visual_gen import DiffusionArgs, PipelineLoader + + # ========================================================================= + # Test 1: Load BF16 (no quantization) + # ========================================================================= + torch.cuda.empty_cache() + torch.cuda.reset_peak_memory_stats() + + args_bf16 = DiffusionArgs( + checkpoint_path=CHECKPOINT_PATH, + skip_components=SKIP_HEAVY_COMPONENTS, + ) + pipeline_bf16 = PipelineLoader(args_bf16).load() + + bf16_model_mem = _get_module_memory_gb(pipeline_bf16.transformer) + bf16_total_mem = _get_cuda_memory_gb() + bf16_peak_mem = _get_cuda_peak_memory_gb() + + print(f"\n[BF16] Transformer model memory: {bf16_model_mem:.2f} GB") + print(f"[BF16] Total CUDA memory: {bf16_total_mem:.2f} GB") + print(f"[BF16] Peak CUDA memory: {bf16_peak_mem:.2f} GB") + + # Cleanup BF16 + del pipeline_bf16 + torch.cuda.empty_cache() + + # ========================================================================= + # Test 2: Load FP8 (dynamic quantization) + # ========================================================================= + torch.cuda.reset_peak_memory_stats() + + args_fp8 = DiffusionArgs( + checkpoint_path=CHECKPOINT_PATH, + quant_config={"quant_algo": "FP8", "dynamic": True}, + skip_components=SKIP_HEAVY_COMPONENTS, + ) + pipeline_fp8 = PipelineLoader(args_fp8).load() + + fp8_model_mem = _get_module_memory_gb(pipeline_fp8.transformer) + fp8_total_mem = _get_cuda_memory_gb() + fp8_peak_mem = _get_cuda_peak_memory_gb() + + print(f"\n[FP8] Transformer model memory: {fp8_model_mem:.2f} GB") + print(f"[FP8] Total CUDA memory: {fp8_total_mem:.2f} GB") + print(f"[FP8] Peak CUDA memory: {fp8_peak_mem:.2f} GB") + + # ========================================================================= + # Verify memory savings + # ========================================================================= + model_mem_ratio = bf16_model_mem / fp8_model_mem + peak_mem_ratio = bf16_peak_mem / fp8_peak_mem + + print(f"\n[Comparison] Model memory ratio (BF16/FP8): {model_mem_ratio:.2f}x") + print(f"[Comparison] Peak memory ratio (BF16/FP8): {peak_mem_ratio:.2f}x") + + # Model memory should be ~2x smaller for FP8 + assert model_mem_ratio > 1.8, ( + f"FP8 model memory should be ~2x smaller than BF16, got {model_mem_ratio:.2f}x" + ) + + # Peak memory during loading should also show savings + # Allow some overhead for dynamic quant, but should still be significantly better + assert peak_mem_ratio > 1.5, ( + f"FP8 peak memory should be significantly smaller than BF16, got {peak_mem_ratio:.2f}x. " + f"This may indicate unnecessary intermediate buffers during dynamic quantization." + ) + + # FP8 peak should not be much higher than FP8 final (no large temp buffers) + fp8_peak_overhead = fp8_peak_mem / fp8_total_mem + print(f"[FP8 Per-Tensor] Peak/Final memory ratio: {fp8_peak_overhead:.2f}x") + + # Peak should be close to final (< 1.5x overhead during loading) + assert fp8_peak_overhead < 2.0, ( + f"FP8 peak memory ({fp8_peak_mem:.2f} GB) is too high compared to final " + f"({fp8_total_mem:.2f} GB). Ratio: {fp8_peak_overhead:.2f}x. " + f"This suggests unnecessary buffer allocation during dynamic quantization." + ) + + # Cleanup + del pipeline_fp8 + torch.cuda.empty_cache() + + # ========================================================================= + # Test 3: Load FP8 Blockwise (dynamic quantization with block scales) + # ========================================================================= + torch.cuda.reset_peak_memory_stats() + + args_fp8_block = DiffusionArgs( + checkpoint_path=CHECKPOINT_PATH, + quant_config={"quant_algo": "FP8_BLOCK_SCALES", "dynamic": True}, + skip_components=SKIP_HEAVY_COMPONENTS, + ) + pipeline_fp8_block = PipelineLoader(args_fp8_block).load() + + fp8_block_model_mem = _get_module_memory_gb(pipeline_fp8_block.transformer) + fp8_block_total_mem = _get_cuda_memory_gb() + fp8_block_peak_mem = _get_cuda_peak_memory_gb() + + print(f"\n[FP8 Blockwise] Transformer model memory: {fp8_block_model_mem:.2f} GB") + print(f"[FP8 Blockwise] Total CUDA memory: {fp8_block_total_mem:.2f} GB") + print(f"[FP8 Blockwise] Peak CUDA memory: {fp8_block_peak_mem:.2f} GB") + + # ========================================================================= + # Verify FP8 Blockwise memory savings + # ========================================================================= + block_model_mem_ratio = bf16_model_mem / fp8_block_model_mem + block_peak_mem_ratio = bf16_peak_mem / fp8_block_peak_mem + + print(f"\n[Comparison] Model memory ratio (BF16/FP8-Block): {block_model_mem_ratio:.2f}x") + print(f"[Comparison] Peak memory ratio (BF16/FP8-Block): {block_peak_mem_ratio:.2f}x") + + # FP8 Blockwise has additional scale tensors, so slightly less than 2x savings + # But should still be significantly better than BF16 + assert block_model_mem_ratio > 1.5, ( + f"FP8 Blockwise model memory should be significantly smaller than BF16, got {block_model_mem_ratio:.2f}x" + ) + + # Peak memory check + assert block_peak_mem_ratio > 1.3, ( + f"FP8 Blockwise peak memory should be smaller than BF16, got {block_peak_mem_ratio:.2f}x" + ) + + fp8_block_peak_overhead = fp8_block_peak_mem / fp8_block_total_mem + print(f"[FP8 Blockwise] Peak/Final memory ratio: {fp8_block_peak_overhead:.2f}x") + + assert fp8_block_peak_overhead < 2.0, ( + f"FP8 Blockwise peak memory ({fp8_block_peak_mem:.2f} GB) is too high compared to final " + f"({fp8_block_total_mem:.2f} GB). Ratio: {fp8_block_peak_overhead:.2f}x." + ) + + # Cleanup + del pipeline_fp8_block + torch.cuda.empty_cache() + + # ========================================================================= + # Summary + # ========================================================================= + print("\n" + "=" * 60) + print("SUMMARY") + print("=" * 60) + print(f"BF16: {bf16_model_mem:.2f} GB model, {bf16_peak_mem:.2f} GB peak") + print( + f"FP8 Per-Tensor: {fp8_model_mem:.2f} GB model, {fp8_peak_mem:.2f} GB peak " + f"({model_mem_ratio:.2f}x savings)" + ) + print( + f"FP8 Blockwise: {fp8_block_model_mem:.2f} GB model, {fp8_block_peak_mem:.2f} GB peak " + f"({block_model_mem_ratio:.2f}x savings)" + ) diff --git a/tests/unittest/_torch/visual_gen/test_quant_ops.py b/tests/unittest/_torch/visual_gen/test_quant_ops.py new file mode 100644 index 0000000000..5b141ee74d --- /dev/null +++ b/tests/unittest/_torch/visual_gen/test_quant_ops.py @@ -0,0 +1,120 @@ +"""Unit tests for diffusion quantization operations.""" + +import unittest + +import torch + +from tensorrt_llm._torch.visual_gen.quantization.ops import ( + quantize_fp8_blockwise, + quantize_fp8_per_tensor, +) + + +def _dequant_fp8_per_tensor(qweight, scale): + """Dequantize per-tensor FP8 weight.""" + return qweight.to(torch.float32) * scale + + +class TestQuantOps(unittest.TestCase): + """Test quantization operations.""" + + def setUp(self): + """Set random seed for reproducibility.""" + torch.manual_seed(42) + if not torch.cuda.is_available(): + self.skipTest("CUDA not available") + + def test_fp8_per_tensor(self): + """Test FP8 per-tensor quantization using CUDA kernel.""" + weight = torch.randn(256, 512, dtype=torch.bfloat16, device="cuda") + qweight, scale = quantize_fp8_per_tensor(weight) + + # Check output types + self.assertEqual(qweight.dtype, torch.float8_e4m3fn) + self.assertEqual(qweight.shape, weight.shape) + self.assertEqual(scale.dtype, torch.float32) + self.assertEqual(scale.shape, (1, 1)) + + # Verify dequantization (approximate) + dequant = _dequant_fp8_per_tensor(qweight, scale) + error = (dequant - weight.to(torch.float32)).abs().mean() + self.assertLess(error, 0.15) # Reasonable quantization error + + def test_fp8_per_tensor_different_shapes(self): + """Test FP8 per-tensor quantization with various shapes.""" + shapes = [(128, 256), (256, 512), (512, 1024), (1024, 2048)] + for shape in shapes: + with self.subTest(shape=shape): + weight = torch.randn(shape, dtype=torch.bfloat16, device="cuda") + qweight, scale = quantize_fp8_per_tensor(weight) + + self.assertEqual(qweight.dtype, torch.float8_e4m3fn) + self.assertEqual(qweight.shape, weight.shape) + self.assertEqual(scale.dtype, torch.float32) + + def test_fp8_blockwise(self): + """Test FP8 128x128 blockwise quantization.""" + weight = torch.randn(512, 512, dtype=torch.bfloat16, device="cuda") + block_size = 128 + qweight, scales = quantize_fp8_blockwise(weight, block_size=block_size) + + # Check output types + self.assertEqual(qweight.dtype, torch.float8_e4m3fn) + self.assertEqual(qweight.shape, weight.shape) + self.assertEqual(scales.dtype, torch.float32) + + # Check scales shape: (num_blocks_out, num_blocks_in) for 128x128 blocks + num_blocks_out = (512 + block_size - 1) // block_size # 4 + num_blocks_in = (512 + block_size - 1) // block_size # 4 + self.assertEqual(scales.shape, (num_blocks_out, num_blocks_in)) + + def test_fp8_blockwise_non_divisible(self): + """Test FP8 blockwise quantization with non-divisible dimensions.""" + weight = torch.randn(300, 500, dtype=torch.bfloat16, device="cuda") + block_size = 128 + qweight, scales = quantize_fp8_blockwise(weight, block_size=block_size) + + # Check output types + self.assertEqual(qweight.dtype, torch.float8_e4m3fn) + self.assertEqual(qweight.shape, weight.shape) + + # Check scales shape (should handle non-divisible dimensions) + num_blocks_out = (300 + block_size - 1) // block_size # 3 + num_blocks_in = (500 + block_size - 1) // block_size # 4 + self.assertEqual(scales.shape, (num_blocks_out, num_blocks_in)) + + def test_fp8_blockwise_different_block_sizes(self): + """Test FP8 blockwise quantization with different block sizes.""" + weight = torch.randn(256, 256, dtype=torch.bfloat16, device="cuda") + + for block_size in [64, 128, 256]: + with self.subTest(block_size=block_size): + qweight, scales = quantize_fp8_blockwise(weight, block_size=block_size) + + self.assertEqual(qweight.dtype, torch.float8_e4m3fn) + self.assertEqual(qweight.shape, weight.shape) + + num_blocks = (256 + block_size - 1) // block_size + self.assertEqual(scales.shape, (num_blocks, num_blocks)) + + def test_fp8_per_tensor_zero_weight(self): + """Test FP8 per-tensor quantization with zero weight.""" + weight = torch.zeros(128, 256, dtype=torch.bfloat16, device="cuda") + qweight, scale = quantize_fp8_per_tensor(weight) + + # Should handle zero weights gracefully + self.assertEqual(qweight.dtype, torch.float8_e4m3fn) + self.assertTrue(torch.all(qweight.to(torch.float32) == 0)) + + def test_fp8_blockwise_zero_weight(self): + """Test FP8 blockwise quantization with zero weight.""" + weight = torch.zeros(256, 256, dtype=torch.bfloat16, device="cuda") + qweight, scales = quantize_fp8_blockwise(weight, block_size=128) + + # Should handle zero weights gracefully + self.assertEqual(qweight.dtype, torch.float8_e4m3fn) + self.assertTrue(torch.all(qweight.to(torch.float32) == 0)) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/unittest/_torch/visual_gen/test_trtllm_serve_e2e.py b/tests/unittest/_torch/visual_gen/test_trtllm_serve_e2e.py new file mode 100644 index 0000000000..c785fe8bae --- /dev/null +++ b/tests/unittest/_torch/visual_gen/test_trtllm_serve_e2e.py @@ -0,0 +1,398 @@ +"""End-to-end tests for trtllm-serve visual_gen with real models. + +Tests text-to-video (t2v) and text+image-to-video (ti2v) generation through +the full ``trtllm-serve`` stack backed by real VisualGen models. + +The server is launched as a subprocess (same pattern as +``tests/unittest/llmapi/apps/openai_server.py``), so each test class gets an +isolated ``trtllm-serve`` process. + +Usage:: + + # Run all real-model tests (requires GPU + models in $HOME/llm-models-ci) + pytest tests/visual_gen/test_trtllm_serve_e2e.py -v + + # Run only t2v tests + pytest tests/visual_gen/test_trtllm_serve_e2e.py -v -k TestWanT2V + + # Run only ti2v tests + pytest tests/visual_gen/test_trtllm_serve_e2e.py -v -k TestWanI2V +""" + +import os +import subprocess +import sys +import tempfile +import time +from pathlib import Path +from typing import List, Optional + +import pytest +import requests +import yaml + +from tensorrt_llm._utils import get_free_port + +# --------------------------------------------------------------------------- +# Model paths +# --------------------------------------------------------------------------- + + +def _llm_models_root() -> str: + """Return LLM_MODELS_ROOT path if it is set in env, assert when it's set but not a valid path.""" + root = Path("/home/scratch.trt_llm_data_ci/llm-models/") + if "LLM_MODELS_ROOT" in os.environ: + root = Path(os.environ["LLM_MODELS_ROOT"]) + if not root.exists(): + root = Path("/scratch.trt_llm_data/llm-models/") + assert root.exists(), ( + "You shall set LLM_MODELS_ROOT env or be able to access scratch.trt_llm_data to run this test" + ) + return str(root) + + +_WAN_T2V_PATH = Path(_llm_models_root()) / "Wan2.1-T2V-1.3B-Diffusers" +_WAN_I2V_PATH = Path(_llm_models_root()) / "Wan2.2-I2V-A14B-Diffusers" + +# Reference image used for image-to-video (ti2v) tests +_PROJECT_ROOT = Path(__file__).resolve().parents[4] # repo root +_REF_IMAGE_PATH = _PROJECT_ROOT / "examples" / "visual_gen" / "cat_piano.png" + + +# --------------------------------------------------------------------------- +# Remote server helper (follows RemoteOpenAIServer pattern) +# --------------------------------------------------------------------------- + + +class RemoteVisualGenServer: + """Launch ``trtllm-serve`` for a visual-gen model as a subprocess. + + Mirrors the interface of ``tests.unittest.llmapi.apps.openai_server.RemoteOpenAIServer`` + adapted for diffusion / visual-gen models. + """ + + MAX_SERVER_START_WAIT_S = 1200 # 20 min – large models need time to load + + def __init__( + self, + model: str, + extra_visual_gen_options: Optional[dict] = None, + cli_args: Optional[List[str]] = None, + host: str = "localhost", + port: Optional[int] = None, + env: Optional[dict] = None, + ) -> None: + self.host = host + self.port = port if port is not None else get_free_port() + self._config_file: Optional[str] = None + self.proc: Optional[subprocess.Popen] = None + + args = ["--host", self.host, "--port", str(self.port)] + if cli_args: + args += cli_args + + # Write the visual-gen YAML config to a temp file + if extra_visual_gen_options: + fd, self._config_file = tempfile.mkstemp(suffix=".yml", prefix="vg_cfg_") + with os.fdopen(fd, "w") as f: + yaml.dump(extra_visual_gen_options, f) + args += ["--extra_visual_gen_options", self._config_file] + + launch_cmd = ["trtllm-serve", model] + args + + if env is None: + env = os.environ.copy() + + self.proc = subprocess.Popen( + launch_cmd, + env=env, + stdout=sys.stdout, + stderr=sys.stderr, + ) + self._wait_for_server(timeout=self.MAX_SERVER_START_WAIT_S) + + # -- lifecycle --------------------------------------------------------- + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + self.terminate() + + def terminate(self): + if self.proc is None: + return + self.proc.terminate() + try: + self.proc.wait(timeout=30) + except subprocess.TimeoutExpired: + self.proc.kill() + self.proc.wait(timeout=30) + self.proc = None + + if self._config_file: + try: + os.remove(self._config_file) + except OSError: + pass + self._config_file = None + + # -- readiness --------------------------------------------------------- + + def _wait_for_server(self, timeout: float): + url = self.url_for("health") + start = time.time() + while True: + try: + if requests.get(url).status_code == 200: + return + except Exception as err: + result = self.proc.poll() + if result is not None and result != 0: + raise RuntimeError("Visual-gen server exited unexpectedly.") from err + time.sleep(1) + if time.time() - start > timeout: + self.terminate() + raise RuntimeError(f"Visual-gen server failed to start within {timeout}s.") + + # -- URL helpers ------------------------------------------------------- + + @property + def url_root(self) -> str: + return f"http://{self.host}:{self.port}" + + def url_for(self, *parts: str) -> str: + return self.url_root + "/" + "/".join(parts) + + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + + +def _model_available(path: Path) -> bool: + return path.is_dir() + + +def _av_available() -> bool: + """Check if PyAV is installed (required for video encoding in E2E tests).""" + try: + import av # noqa: F401 + + return True + except ImportError: + return False + + +def _make_visual_gen_options(**extra) -> dict: + """Build the YAML dict passed via ``--extra_visual_gen_options``.""" + config = { + "linear": {"type": "default"}, + "parallel": {"dit_cfg_size": 1, "dit_ulysses_size": 1}, + } + config.update(extra) + return config + + +# ========================================================================= +# WAN 2.1 – Text-to-Video (t2v) +# ========================================================================= + + +@pytest.mark.skipif( + not _model_available(_WAN_T2V_PATH), reason=f"Wan2.1-T2V model not found at {_WAN_T2V_PATH}" +) +@pytest.mark.skipif( + not _av_available(), reason="PyAV (av) not installed — required for video encoding in E2E tests" +) +class TestWanTextToVideo: + """Test Wan2.1-T2V-1.3B-Diffusers text-to-video generation via serve API.""" + + @pytest.fixture(scope="class") + def server(self): + with RemoteVisualGenServer( + model=str(_WAN_T2V_PATH), + extra_visual_gen_options=_make_visual_gen_options(), + ) as srv: + yield srv + + # ------------------------------------------------------------------ + + def test_health(self, server): + resp = requests.get(server.url_for("health")) + assert resp.status_code == 200 + + def test_t2v_sync(self, server): + """Synchronous text-to-video via POST /v1/videos/generations.""" + resp = requests.post( + server.url_for("v1", "videos", "generations"), + json={ + "prompt": "A cute cat playing piano", + "size": "480x320", + "seconds": 1.0, + "fps": 8, + "num_inference_steps": 4, + "seed": 42, + }, + ) + assert resp.status_code == 200, resp.text + assert resp.headers["content-type"] == "video/mp4" + assert len(resp.content) > 1000, "Video file too small" + + def test_t2v_async_lifecycle(self, server): + """Async video generation: create job → poll → download → delete.""" + base = server.url_for("v1", "videos") + + # 1. Create job + create_resp = requests.post( + base, + json={ + "prompt": "A rocket launching into a starry sky", + "size": "480x320", + "seconds": 1.0, + "fps": 8, + "num_inference_steps": 4, + "seed": 42, + }, + ) + assert create_resp.status_code == 202, create_resp.text + job = create_resp.json() + video_id = job["id"] + assert video_id.startswith("video_") + assert job["status"] == "queued" + + # 2. Poll until completed (or timeout) + deadline = time.time() + 600 # 10 min + status = "queued" + while status not in ("completed", "failed") and time.time() < deadline: + time.sleep(2) + meta_resp = requests.get(f"{base}/{video_id}") + assert meta_resp.status_code == 200 + status = meta_resp.json()["status"] + + assert status == "completed", f"Video generation did not complete: {status}" + + # 3. Download video content + content_resp = requests.get(f"{base}/{video_id}/content") + assert content_resp.status_code == 200 + assert "video/mp4" in content_resp.headers.get("content-type", "") + assert len(content_resp.content) > 1000 + + # 4. Verify it appears in list + list_resp = requests.get(base) + assert list_resp.status_code == 200 + ids = [v["id"] for v in list_resp.json()["data"]] + assert video_id in ids + + # 5. Delete + del_resp = requests.delete(f"{base}/{video_id}") + assert del_resp.status_code == 200 + assert del_resp.json()["deleted"] is True + + # 6. Confirm gone + gone_resp = requests.get(f"{base}/{video_id}") + assert gone_resp.status_code == 404 + + +# ========================================================================= +# WAN 2.2 – Image-to-Video (ti2v) +# ========================================================================= + + +@pytest.mark.skipif( + not _model_available(_WAN_I2V_PATH), reason=f"Wan2.2-I2V model not found at {_WAN_I2V_PATH}" +) +@pytest.mark.skipif( + not _REF_IMAGE_PATH.is_file(), reason=f"Reference image not found at {_REF_IMAGE_PATH}" +) +@pytest.mark.skipif( + not _av_available(), reason="PyAV (av) not installed — required for video encoding in E2E tests" +) +class TestWanImageToVideo: + """Test Wan2.2-I2V-A14B-Diffusers image-to-video generation via serve API.""" + + @pytest.fixture(scope="class") + def server(self): + with RemoteVisualGenServer( + model=str(_WAN_I2V_PATH), + extra_visual_gen_options=_make_visual_gen_options(), + ) as srv: + yield srv + + # ------------------------------------------------------------------ + + def test_health(self, server): + resp = requests.get(server.url_for("health")) + assert resp.status_code == 200 + + def test_ti2v_sync(self, server): + """Synchronous image-to-video via multipart POST /v1/videos/generations.""" + with open(_REF_IMAGE_PATH, "rb") as f: + resp = requests.post( + server.url_for("v1", "videos", "generations"), + data={ + "prompt": "The cat starts playing piano, keys moving", + "size": "480x320", + "seconds": "1.0", + "fps": "8", + "num_inference_steps": "4", + "seed": "42", + }, + files={ + "input_reference": ("cat_piano.png", f, "image/png"), + }, + ) + assert resp.status_code == 200, resp.text + assert resp.headers["content-type"] == "video/mp4" + assert len(resp.content) > 1000, "Video file too small" + + def test_ti2v_async_lifecycle(self, server): + """Async i2v: create job with image → poll → download → delete.""" + base = server.url_for("v1", "videos") + + # 1. Create job via multipart + with open(_REF_IMAGE_PATH, "rb") as f: + create_resp = requests.post( + base, + data={ + "prompt": "Snow falls on the piano and the cat", + "size": "480x320", + "seconds": "1.0", + "fps": "8", + "num_inference_steps": "4", + "seed": "42", + }, + files={ + "input_reference": ("cat_piano.png", f, "image/png"), + }, + ) + assert create_resp.status_code == 202, create_resp.text + job = create_resp.json() + video_id = job["id"] + assert job["status"] == "queued" + + # 2. Poll until completed + deadline = time.time() + 600 + status = "queued" + while status not in ("completed", "failed") and time.time() < deadline: + time.sleep(2) + meta_resp = requests.get(f"{base}/{video_id}") + assert meta_resp.status_code == 200 + status = meta_resp.json()["status"] + + assert status == "completed", f"Video generation did not complete: {status}" + + # 3. Download + content_resp = requests.get(f"{base}/{video_id}/content") + assert content_resp.status_code == 200 + assert "video/mp4" in content_resp.headers.get("content-type", "") + assert len(content_resp.content) > 1000 + + # 4. Delete + del_resp = requests.delete(f"{base}/{video_id}") + assert del_resp.status_code == 200 + assert del_resp.json()["deleted"] is True + + # 5. Confirm gone + gone_resp = requests.get(f"{base}/{video_id}") + assert gone_resp.status_code == 404 diff --git a/tests/unittest/_torch/visual_gen/test_trtllm_serve_endpoints.py b/tests/unittest/_torch/visual_gen/test_trtllm_serve_endpoints.py new file mode 100644 index 0000000000..a66e742447 --- /dev/null +++ b/tests/unittest/_torch/visual_gen/test_trtllm_serve_endpoints.py @@ -0,0 +1,876 @@ +"""trtllm-serve visual_gen endpoints tests. + +Tests all endpoints registered for the VISUAL_GEN server role +in OpenAIServer.register_visual_gen_routes(): + + POST /v1/images/generations + POST /v1/images/edits + POST /v1/videos/generations (sync) + POST /v1/videos (async) + GET /v1/videos (list) + GET /v1/videos/{video_id} (metadata) + GET /v1/videos/{video_id}/content (download) + DELETE /v1/videos/{video_id} (delete) +""" + +import asyncio +import base64 +import os +from io import BytesIO +from typing import Optional +from unittest.mock import patch + +import pytest +import torch +from fastapi.testclient import TestClient +from PIL import Image + +from tensorrt_llm._torch.visual_gen.output import MediaOutput +from tensorrt_llm.serve.media_storage import MediaStorage +from tensorrt_llm.serve.openai_protocol import VideoJob +from tensorrt_llm.serve.visual_gen_utils import VIDEO_STORE + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + + +def _make_dummy_image_tensor(height: int = 64, width: int = 64) -> torch.Tensor: + """Create a small dummy uint8 image tensor (H, W, C).""" + return torch.randint(0, 256, (height, width, 3), dtype=torch.uint8) + + +def _make_dummy_video_tensor( + num_frames: int = 4, height: int = 64, width: int = 64 +) -> torch.Tensor: + """Create a small dummy uint8 video tensor (T, H, W, C).""" + return torch.randint(0, 256, (num_frames, height, width, 3), dtype=torch.uint8) + + +def _make_dummy_audio_tensor(length: int = 16000) -> torch.Tensor: + """Create a small dummy float32 audio tensor.""" + return torch.randn(1, length, dtype=torch.float32) + + +def _b64_white_png_1x1() -> str: + """Return a base64-encoded 1x1 white PNG for image edit tests.""" + buf = BytesIO() + Image.new("RGB", (1, 1), (255, 255, 255)).save(buf, format="PNG") + return base64.b64encode(buf.getvalue()).decode("utf-8") + + +def _run_async(coro): + """Run an async coroutine in a new event loop (for test helpers).""" + loop = asyncio.new_event_loop() + try: + return loop.run_until_complete(coro) + finally: + loop.close() + + +# --------------------------------------------------------------------------- +# Mock VisualGen +# --------------------------------------------------------------------------- + + +class MockVisualGen: + """Lightweight stand-in for VisualGen that avoids GPU / model loading.""" + + def __init__( + self, + image_output: Optional[torch.Tensor] = None, + video_output: Optional[torch.Tensor] = None, + audio_output: Optional[torch.Tensor] = None, + should_fail: bool = False, + ): + self._image = image_output + self._video = video_output + self._audio = audio_output + self._should_fail = should_fail + self._healthy = True + self.req_counter = 0 + + # --- VisualGen interface --- + + def generate(self, inputs=None, params=None) -> MediaOutput: + if self._should_fail: + raise RuntimeError("Generation intentionally failed") + return MediaOutput( + image=self._image, + video=self._video, + audio=self._audio, + ) + + def generate_async(self, inputs=None, params=None) -> "MockDiffusionGenerationResult": + return MockDiffusionGenerationResult( + image=self._image, + video=self._video, + audio=self._audio, + should_fail=self._should_fail, + ) + + def _check_health(self) -> bool: + return self._healthy + + async def get_stats_async(self, timeout: int): + return + + def shutdown(self): + pass + + +class MockDiffusionGenerationResult: + """Mock future-like result for generate_async.""" + + def __init__( + self, + image: Optional[torch.Tensor] = None, + video: Optional[torch.Tensor] = None, + audio: Optional[torch.Tensor] = None, + should_fail: bool = False, + ): + self._image = image + self._video = video + self._audio = audio + self._should_fail = should_fail + + async def result(self, timeout=None): + if self._should_fail: + raise RuntimeError("Async generation intentionally failed") + return MediaOutput( + image=self._image, + video=self._video, + audio=self._audio, + ) + + +# --------------------------------------------------------------------------- +# Server factory +# --------------------------------------------------------------------------- + + +def _create_server(generator: MockVisualGen, model_name: str = "test-model") -> TestClient: + """Instantiate an OpenAIServer for VISUAL_GEN with a mocked generator. + + We patch the ``VisualGen`` name inside the ``openai_server`` module so that + ``isinstance(generator, VisualGen)`` returns True for our mock. + """ + from tensorrt_llm.llmapi.disagg_utils import ServerRole + from tensorrt_llm.serve.openai_server import OpenAIServer + + with patch("tensorrt_llm.serve.openai_server.VisualGen", MockVisualGen): + server = OpenAIServer( + generator=generator, + model=model_name, + tool_parser=None, + server_role=ServerRole.VISUAL_GEN, + metadata_server_cfg=None, + ) + return TestClient(server.app) + + +# --------------------------------------------------------------------------- +# Fixtures +# --------------------------------------------------------------------------- + + +@pytest.fixture() +def image_client(tmp_path): + """TestClient backed by a MockVisualGen that produces images.""" + gen = MockVisualGen(image_output=_make_dummy_image_tensor()) + os.environ["TRTLLM_MEDIA_STORAGE_PATH"] = str(tmp_path) + client = _create_server(gen) + yield client + os.environ.pop("TRTLLM_MEDIA_STORAGE_PATH", None) + + +@pytest.fixture() +def video_client(tmp_path): + """TestClient backed by a MockVisualGen that produces videos.""" + gen = MockVisualGen(video_output=_make_dummy_video_tensor()) + os.environ["TRTLLM_MEDIA_STORAGE_PATH"] = str(tmp_path) + client = _create_server(gen) + yield client + os.environ.pop("TRTLLM_MEDIA_STORAGE_PATH", None) + + +@pytest.fixture() +def video_audio_client(tmp_path): + """TestClient backed by a MockVisualGen that produces videos with audio.""" + gen = MockVisualGen( + video_output=_make_dummy_video_tensor(), + audio_output=_make_dummy_audio_tensor(), + ) + os.environ["TRTLLM_MEDIA_STORAGE_PATH"] = str(tmp_path) + client = _create_server(gen) + yield client + os.environ.pop("TRTLLM_MEDIA_STORAGE_PATH", None) + + +@pytest.fixture() +def failing_client(tmp_path): + """TestClient backed by a MockVisualGen that always fails.""" + gen = MockVisualGen(should_fail=True) + os.environ["TRTLLM_MEDIA_STORAGE_PATH"] = str(tmp_path) + client = _create_server(gen) + yield client + os.environ.pop("TRTLLM_MEDIA_STORAGE_PATH", None) + + +@pytest.fixture(autouse=True) +def _clear_video_store(): + """Reset the global VIDEO_STORE before each test.""" + VIDEO_STORE._items.clear() + yield + VIDEO_STORE._items.clear() + + +@pytest.fixture(autouse=True) +def _mock_video_encoding(): + """Mock MP4 encoding to avoid PyAV dependency in unit tests. + + Replaces MediaStorage._save_mp4 with a stub that writes a small + dummy file so FileResponse can serve it. + """ + + def _dummy_save_mp4(video, audio, output_path, frame_rate): + os.makedirs(os.path.dirname(str(output_path)) or ".", exist_ok=True) + with open(str(output_path), "wb") as f: + f.write(b"\x00\x00\x00\x1cftypisom" + b"\x00" * 32) + return str(output_path) + + with patch.object(MediaStorage, "_save_mp4", staticmethod(_dummy_save_mp4)): + yield + + +# ========================================================================= +# POST /v1/images/generations +# ========================================================================= + + +class TestImageGeneration: + def test_basic_image_generation_b64(self, image_client): + resp = image_client.post( + "/v1/images/generations", + json={ + "prompt": "A cat sitting on a mat", + "response_format": "b64_json", + "size": "64x64", + }, + ) + assert resp.status_code == 200 + data = resp.json() + assert "data" in data + assert len(data["data"]) >= 1 + img_obj = data["data"][0] + assert img_obj["b64_json"] is not None + # Verify it decodes to valid bytes + decoded = base64.b64decode(img_obj["b64_json"]) + assert len(decoded) > 0 + assert img_obj["revised_prompt"] == "A cat sitting on a mat" + + def test_image_generation_with_optional_params(self, image_client): + resp = image_client.post( + "/v1/images/generations", + json={ + "prompt": "Sunset over ocean", + "response_format": "b64_json", + "size": "128x64", + "num_inference_steps": 20, + "guidance_scale": 7.5, + "seed": 123, + "negative_prompt": "blurry", + }, + ) + assert resp.status_code == 200 + data = resp.json() + assert data["size"] == "128x64" + + def test_image_generation_url_format_not_supported(self, image_client): + resp = image_client.post( + "/v1/images/generations", + json={ + "prompt": "A dog", + "response_format": "url", + }, + ) + assert resp.status_code == 501 + + def test_image_generation_auto_size(self, image_client): + resp = image_client.post( + "/v1/images/generations", + json={ + "prompt": "A tree", + "response_format": "b64_json", + "size": "auto", + }, + ) + assert resp.status_code == 200 + + def test_image_generation_failure(self, failing_client): + resp = failing_client.post( + "/v1/images/generations", + json={ + "prompt": "A bird", + "response_format": "b64_json", + }, + ) + assert resp.status_code == 400 + + def test_image_generation_invalid_size(self, image_client): + """Invalid size triggers RequestValidationError → custom handler → 400.""" + resp = image_client.post( + "/v1/images/generations", + json={ + "prompt": "A mountain", + "response_format": "b64_json", + "size": "invalid", + }, + ) + assert resp.status_code == 400 + + def test_image_generation_null_output(self, tmp_path): + """Generator returns MediaOutput with image=None.""" + gen = MockVisualGen(image_output=None) + os.environ["TRTLLM_MEDIA_STORAGE_PATH"] = str(tmp_path) + client = _create_server(gen) + resp = client.post( + "/v1/images/generations", + json={ + "prompt": "null image", + "response_format": "b64_json", + }, + ) + assert resp.status_code == 500 + os.environ.pop("TRTLLM_MEDIA_STORAGE_PATH", None) + + def test_image_generation_multiple_n(self, image_client): + """Request n=2 images in one call.""" + resp = image_client.post( + "/v1/images/generations", + json={ + "prompt": "Flowers", + "response_format": "b64_json", + "size": "64x64", + "n": 2, + }, + ) + assert resp.status_code == 200 + + def test_image_generation_hd_quality(self, image_client): + resp = image_client.post( + "/v1/images/generations", + json={ + "prompt": "HD landscape", + "response_format": "b64_json", + "quality": "hd", + }, + ) + assert resp.status_code == 200 + + def test_missing_prompt_image_generation(self, image_client): + """Missing required field → RequestValidationError → custom handler → 400.""" + resp = image_client.post( + "/v1/images/generations", + json={}, + ) + assert resp.status_code == 400 + + +# ========================================================================= +# POST /v1/images/edits +# ========================================================================= + + +class TestImageEdit: + def test_basic_image_edit(self, image_client): + b64_img = _b64_white_png_1x1() + resp = image_client.post( + "/v1/images/edits", + json={ + "image": b64_img, + "prompt": "Make it blue", + "num_inference_steps": 10, + }, + ) + assert resp.status_code == 200 + data = resp.json() + assert "data" in data + assert len(data["data"]) >= 1 + assert data["data"][0]["b64_json"] is not None + + def test_image_edit_with_list_images(self, image_client): + b64_img = _b64_white_png_1x1() + resp = image_client.post( + "/v1/images/edits", + json={ + "image": [b64_img, b64_img], + "prompt": "Merge them", + "num_inference_steps": 10, + }, + ) + assert resp.status_code == 200 + + def test_image_edit_with_mask(self, image_client): + b64_img = _b64_white_png_1x1() + b64_mask = _b64_white_png_1x1() + resp = image_client.post( + "/v1/images/edits", + json={ + "image": b64_img, + "prompt": "Remove object", + "mask": b64_mask, + "num_inference_steps": 10, + }, + ) + assert resp.status_code == 200 + + def test_image_edit_with_optional_params(self, image_client): + b64_img = _b64_white_png_1x1() + resp = image_client.post( + "/v1/images/edits", + json={ + "image": b64_img, + "prompt": "Enhance colors", + "size": "128x128", + "guidance_scale": 8.0, + "num_inference_steps": 15, + "seed": 42, + "negative_prompt": "dark", + }, + ) + assert resp.status_code == 200 + data = resp.json() + assert data["size"] == "128x128" + + def test_image_edit_failure(self, failing_client): + b64_img = _b64_white_png_1x1() + resp = failing_client.post( + "/v1/images/edits", + json={ + "image": b64_img, + "prompt": "Edit this", + "num_inference_steps": 10, + }, + ) + assert resp.status_code == 500 + + def test_missing_image_for_edit(self, image_client): + """Missing required field → RequestValidationError → custom handler → 400.""" + resp = image_client.post( + "/v1/images/edits", + json={ + "prompt": "Edit without image", + }, + ) + assert resp.status_code == 400 + + +# ========================================================================= +# POST /v1/videos/generations (synchronous) +# ========================================================================= + + +@pytest.mark.threadleak(enabled=False) # FileResponse spawns AnyIO worker threads +class TestVideoGenerationSync: + def test_basic_sync_video_generation(self, video_client): + resp = video_client.post( + "/v1/videos/generations", + json={ + "prompt": "A rocket launching", + "size": "64x64", + "seconds": 1.0, + "fps": 8, + }, + headers={"content-type": "application/json"}, + ) + assert resp.status_code == 200 + assert resp.headers["content-type"] == "video/mp4" + assert len(resp.content) > 0 + + def test_sync_video_generation_with_params(self, video_client): + resp = video_client.post( + "/v1/videos/generations", + json={ + "prompt": "Ocean waves", + "size": "64x64", + "seconds": 2.0, + "fps": 8, + "num_inference_steps": 10, + "guidance_scale": 5.0, + "seed": 42, + "negative_prompt": "blurry", + }, + headers={"content-type": "application/json"}, + ) + assert resp.status_code == 200 + assert len(resp.content) > 0 + + def test_sync_video_generation_multipart(self, video_client): + # Use files={} with a dummy file to ensure multipart/form-data + dummy_file = BytesIO(b"") + resp = video_client.post( + "/v1/videos/generations", + data={ + "prompt": "Mountain sunrise", + "size": "64x64", + "seconds": "1.0", + "fps": "8", + }, + files={"_dummy": ("dummy", dummy_file, "application/octet-stream")}, + ) + # The server will parse fields; _dummy is ignored since it's not "input_reference" + assert resp.status_code == 200 + assert len(resp.content) > 0 + + def test_sync_video_generation_multipart_with_reference(self, video_client, tmp_path): + # Create a dummy reference image file + ref_path = tmp_path / "ref.png" + Image.new("RGB", (4, 4), (128, 128, 128)).save(str(ref_path)) + + with open(ref_path, "rb") as f: + resp = video_client.post( + "/v1/videos/generations", + data={ + "prompt": "Animate this image", + "size": "64x64", + "seconds": "1.0", + "fps": "8", + }, + files={"input_reference": ("ref.png", f, "image/png")}, + ) + assert resp.status_code == 200 + assert len(resp.content) > 0 + + def test_sync_video_failure(self, failing_client): + resp = failing_client.post( + "/v1/videos/generations", + json={ + "prompt": "Should fail", + "size": "64x64", + "seconds": 1.0, + "fps": 8, + }, + headers={"content-type": "application/json"}, + ) + assert resp.status_code == 400 + + def test_sync_video_null_output(self, tmp_path): + """Generator returns MediaOutput with video=None.""" + gen = MockVisualGen(video_output=None) + os.environ["TRTLLM_MEDIA_STORAGE_PATH"] = str(tmp_path) + client = _create_server(gen) + resp = client.post( + "/v1/videos/generations", + json={"prompt": "null video", "size": "64x64", "seconds": 1.0, "fps": 8}, + headers={"content-type": "application/json"}, + ) + assert resp.status_code == 500 + os.environ.pop("TRTLLM_MEDIA_STORAGE_PATH", None) + + def test_sync_video_unsupported_content_type(self, video_client): + resp = video_client.post( + "/v1/videos/generations", + content=b"some raw bytes", + headers={"content-type": "text/plain"}, + ) + assert resp.status_code == 400 + + def test_sync_video_missing_prompt_json(self, video_client): + """Missing required prompt → Pydantic ValidationError → 400.""" + resp = video_client.post( + "/v1/videos/generations", + json={"size": "64x64"}, + headers={"content-type": "application/json"}, + ) + assert resp.status_code == 400 + + def test_sync_video_missing_prompt_multipart(self, video_client): + """Missing prompt in multipart form → ValueError → 400.""" + dummy_file = BytesIO(b"") + resp = video_client.post( + "/v1/videos/generations", + data={"size": "64x64"}, + files={"_dummy": ("dummy", dummy_file, "application/octet-stream")}, + ) + assert resp.status_code == 400 + + +# ========================================================================= +# POST /v1/videos (asynchronous) +# ========================================================================= + + +class TestVideoGenerationAsync: + def test_async_video_returns_202(self, video_client): + resp = video_client.post( + "/v1/videos", + json={ + "prompt": "A dancing robot", + "size": "64x64", + "seconds": 1.0, + "fps": 8, + }, + headers={"content-type": "application/json"}, + ) + assert resp.status_code == 202 + data = resp.json() + assert data["status"] == "queued" + assert data["object"] == "video" + assert data["prompt"] == "A dancing robot" + assert data["id"].startswith("video_") + + def test_async_video_job_metadata_fields(self, video_client): + resp = video_client.post( + "/v1/videos", + json={ + "prompt": "Starry night", + "size": "64x64", + "seconds": 2.0, + "fps": 12, + }, + headers={"content-type": "application/json"}, + ) + data = resp.json() + assert "created_at" in data + assert data["duration"] == 2.0 + assert data["fps"] == 12 + assert data["size"] == "64x64" + + def test_async_video_multipart(self, video_client): + """Multipart encoding requires a file field to trigger the correct content-type.""" + dummy_file = BytesIO(b"") + resp = video_client.post( + "/v1/videos", + data={ + "prompt": "A sunset", + "size": "64x64", + "seconds": "1.0", + "fps": "8", + }, + files={"_dummy": ("dummy", dummy_file, "application/octet-stream")}, + ) + assert resp.status_code == 202 + + def test_async_video_invalid_seconds(self, video_client): + """Seconds must be between 1.0 and 16.0. Validation error → 400.""" + resp = video_client.post( + "/v1/videos", + json={ + "prompt": "Too short", + "seconds": 0.1, + "size": "64x64", + "fps": 8, + }, + headers={"content-type": "application/json"}, + ) + assert resp.status_code == 400 + + def test_async_video_invalid_fps(self, video_client): + """Fps must be between 8 and 60. Validation error → 400.""" + resp = video_client.post( + "/v1/videos", + json={ + "prompt": "Bad fps", + "seconds": 1.0, + "fps": 2, + "size": "64x64", + }, + headers={"content-type": "application/json"}, + ) + assert resp.status_code == 400 + + +# ========================================================================= +# GET /v1/videos (list) +# ========================================================================= + + +class TestListVideos: + def test_list_videos_empty(self, video_client): + resp = video_client.get("/v1/videos") + assert resp.status_code == 200 + data = resp.json() + assert data["object"] == "list" + assert data["data"] == [] + + def test_list_videos_after_creation(self, video_client): + # Create two video jobs + video_client.post( + "/v1/videos", + json={"prompt": "First video", "size": "64x64", "seconds": 1.0, "fps": 8}, + headers={"content-type": "application/json"}, + ) + video_client.post( + "/v1/videos", + json={"prompt": "Second video", "size": "64x64", "seconds": 1.0, "fps": 8}, + headers={"content-type": "application/json"}, + ) + + resp = video_client.get("/v1/videos") + assert resp.status_code == 200 + data = resp.json() + assert len(data["data"]) == 2 + + +# ========================================================================= +# GET /v1/videos/{video_id} (metadata) +# ========================================================================= + + +class TestGetVideoMetadata: + def test_get_video_metadata_success(self, video_client): + create_resp = video_client.post( + "/v1/videos", + json={"prompt": "Space walk", "size": "64x64", "seconds": 1.0, "fps": 8}, + headers={"content-type": "application/json"}, + ) + video_id = create_resp.json()["id"] + + resp = video_client.get(f"/v1/videos/{video_id}") + assert resp.status_code == 200 + data = resp.json() + assert data["id"] == video_id + assert data["object"] == "video" + assert data["prompt"] == "Space walk" + + def test_get_video_metadata_not_found(self, video_client): + resp = video_client.get("/v1/videos/video_nonexistent") + assert resp.status_code == 404 + + +# ========================================================================= +# GET /v1/videos/{video_id}/content (download) +# ========================================================================= + + +@pytest.mark.threadleak(enabled=False) # FileResponse spawns AnyIO worker threads +class TestGetVideoContent: + def _insert_video_job(self, video_id: str, status: str = "queued"): + import time as _time + + job = VideoJob( + created_at=int(_time.time()), + id=video_id, + model="test-model", + prompt="test prompt", + status=status, + ) + _run_async(VIDEO_STORE.upsert(video_id, job)) + + def test_get_video_content_success(self, tmp_path): + gen = MockVisualGen(video_output=_make_dummy_video_tensor()) + os.environ["TRTLLM_MEDIA_STORAGE_PATH"] = str(tmp_path) + client = _create_server(gen) + + video_id = "video_testcontent" + self._insert_video_job(video_id, status="completed") + + # Write a dummy mp4 file so FileResponse can serve it + video_path = tmp_path / f"{video_id}.mp4" + video_path.write_bytes(b"\x00\x00\x00\x1cftyp" + b"\x00" * 16) + + resp = client.get(f"/v1/videos/{video_id}/content") + assert resp.status_code == 200 + assert "video/mp4" in resp.headers.get("content-type", "") + assert len(resp.content) > 0 + os.environ.pop("TRTLLM_MEDIA_STORAGE_PATH", None) + + def test_get_video_content_not_found(self, video_client): + resp = video_client.get("/v1/videos/video_nonexistent/content") + assert resp.status_code == 404 + + def test_get_video_content_not_ready(self, tmp_path): + """A queued video should return 400 when its content is requested.""" + gen = MockVisualGen(video_output=_make_dummy_video_tensor()) + os.environ["TRTLLM_MEDIA_STORAGE_PATH"] = str(tmp_path) + client = _create_server(gen) + + video_id = "video_notready" + self._insert_video_job(video_id, status="queued") + + resp = client.get(f"/v1/videos/{video_id}/content") + assert resp.status_code == 400 + os.environ.pop("TRTLLM_MEDIA_STORAGE_PATH", None) + + def test_get_video_content_completed_but_file_missing(self, tmp_path): + """Video marked completed but file deleted from disk → 404.""" + gen = MockVisualGen(video_output=_make_dummy_video_tensor()) + os.environ["TRTLLM_MEDIA_STORAGE_PATH"] = str(tmp_path) + client = _create_server(gen) + + video_id = "video_nofile" + self._insert_video_job(video_id, status="completed") + # Do NOT write a file + + resp = client.get(f"/v1/videos/{video_id}/content") + assert resp.status_code == 404 + os.environ.pop("TRTLLM_MEDIA_STORAGE_PATH", None) + + +# ========================================================================= +# DELETE /v1/videos/{video_id} +# ========================================================================= + + +class TestDeleteVideo: + def test_delete_video_success(self, tmp_path): + gen = MockVisualGen(video_output=_make_dummy_video_tensor()) + os.environ["TRTLLM_MEDIA_STORAGE_PATH"] = str(tmp_path) + client = _create_server(gen) + + create_resp = client.post( + "/v1/videos", + json={"prompt": "Delete me", "size": "64x64", "seconds": 1.0, "fps": 8}, + headers={"content-type": "application/json"}, + ) + video_id = create_resp.json()["id"] + + # Write a dummy video file + (tmp_path / f"{video_id}.mp4").write_bytes(b"\x00" * 32) + + resp = client.delete(f"/v1/videos/{video_id}") + assert resp.status_code == 200 + data = resp.json() + assert data["deleted"] is True + + # Verify it's gone from the store + resp = client.get(f"/v1/videos/{video_id}") + assert resp.status_code == 404 + + # Verify file is deleted + assert not (tmp_path / f"{video_id}.mp4").exists() + os.environ.pop("TRTLLM_MEDIA_STORAGE_PATH", None) + + def test_delete_video_not_found(self, video_client): + resp = video_client.delete("/v1/videos/video_nonexistent") + assert resp.status_code == 404 + + def test_delete_video_without_file_on_disk(self, video_client): + """Delete a video job that exists in the store but has no file on disk.""" + create_resp = video_client.post( + "/v1/videos", + json={"prompt": "No file", "size": "64x64", "seconds": 1.0, "fps": 8}, + headers={"content-type": "application/json"}, + ) + video_id = create_resp.json()["id"] + + resp = video_client.delete(f"/v1/videos/{video_id}") + assert resp.status_code == 200 + data = resp.json() + assert data["deleted"] is True + + def test_delete_video_then_list_empty(self, video_client): + """After deleting the only video, the list should be empty.""" + create_resp = video_client.post( + "/v1/videos", + json={"prompt": "Ephemeral", "size": "64x64", "seconds": 1.0, "fps": 8}, + headers={"content-type": "application/json"}, + ) + video_id = create_resp.json()["id"] + + video_client.delete(f"/v1/videos/{video_id}") + + resp = video_client.get("/v1/videos") + assert resp.status_code == 200 + assert resp.json()["data"] == [] diff --git a/tests/unittest/_torch/visual_gen/test_wan.py b/tests/unittest/_torch/visual_gen/test_wan.py new file mode 100644 index 0000000000..998452cea2 --- /dev/null +++ b/tests/unittest/_torch/visual_gen/test_wan.py @@ -0,0 +1,3094 @@ +"""Comprehensive unit tests for the Wan model and pipeline.""" + +import os + +os.environ["TLLM_DISABLE_MPI"] = "1" + +import unittest +from copy import deepcopy +from pathlib import Path +from types import SimpleNamespace + +import pytest +import torch +import torch.distributed as dist +import torch.multiprocessing as mp +import torch.nn.functional as F +from diffusers import WanTransformer3DModel as HFWanTransformer3DModel +from parameterized import parameterized + +from tensorrt_llm._torch.modules.linear import Linear +from tensorrt_llm._torch.visual_gen.config import ( + AttentionConfig, + DiffusionArgs, + DiffusionModelConfig, + ParallelConfig, + PipelineComponent, + TeaCacheConfig, +) +from tensorrt_llm._torch.visual_gen.models.wan.transformer_wan import WanTransformer3DModel +from tensorrt_llm._torch.visual_gen.pipeline_loader import PipelineLoader +from tensorrt_llm.models.modeling_utils import QuantConfig +from tensorrt_llm.quantization.mode import QuantAlgo + + +@pytest.fixture(autouse=True, scope="module") +def _cleanup_mpi_env(): + """Clean up TLLM_DISABLE_MPI env var after tests complete.""" + yield + os.environ.pop("TLLM_DISABLE_MPI", None) + + +def _llm_models_root() -> str: + """Return LLM_MODELS_ROOT path if it is set in env, assert when it's set but not a valid path.""" + root = Path("/home/scratch.trt_llm_data_ci/llm-models/") + if "LLM_MODELS_ROOT" in os.environ: + root = Path(os.environ["LLM_MODELS_ROOT"]) + if not root.exists(): + root = Path("/scratch.trt_llm_data/llm-models/") + assert root.exists(), ( + "You shall set LLM_MODELS_ROOT env or be able to access scratch.trt_llm_data to run this test" + ) + return str(root) + + +# Checkpoint paths for integration tests +CHECKPOINT_PATH = os.environ.get( + "DIFFUSION_MODEL_PATH", + os.path.join(_llm_models_root(), "Wan2.1-T2V-1.3B-Diffusers"), +) +# Wan 2.2 TI2V-5B: BF16 base, FP8 pre-quantized, NVFP4 pre-quantized +CHECKPOINT_PATH_WAN22_BF16 = os.environ.get( + "DIFFUSION_MODEL_PATH_WAN22_BF16", + os.path.join(_llm_models_root(), "Wan2.2-TI2V-5B-Diffusers"), +) +CHECKPOINT_PATH_WAN22_FP8 = os.environ.get( + "DIFFUSION_MODEL_PATH_WAN22_FP8", + os.path.join(_llm_models_root(), "Wan2.2-TI2V-5B-Diffusers-FP8"), +) +CHECKPOINT_PATH_WAN22_NVFP4 = os.environ.get( + "DIFFUSION_MODEL_PATH_WAN22_NVFP4", + os.path.join(_llm_models_root(), "Wan2.2-TI2V-5B-Diffusers-NVFP4"), +) +# Wan 2.2 T2V (two-stage transformer) +CHECKPOINT_PATH_WAN22_T2V = os.environ.get( + "DIFFUSION_MODEL_PATH_WAN22_T2V", + os.path.join(_llm_models_root(), "Wan2.2-T2V-A14B-Diffusers"), +) +SKIP_COMPONENTS = [ + PipelineComponent.TEXT_ENCODER, + PipelineComponent.VAE, + PipelineComponent.TOKENIZER, + PipelineComponent.SCHEDULER, +] + + +def is_wan21_checkpoint() -> bool: + """Check if DIFFUSION_MODEL_PATH is Wan 2.1 (contains '2.1' in path).""" + return "2.1" in CHECKPOINT_PATH + + +def is_wan22_checkpoint() -> bool: + """Check if DIFFUSION_MODEL_PATH is Wan 2.2 (contains '2.2' in path).""" + return "2.2" in CHECKPOINT_PATH_WAN22_T2V + + +WAN_1_3B_CONFIG = { + "attention_head_dim": 128, + "eps": 1e-06, + "ffn_dim": 8960, + "freq_dim": 256, + "in_channels": 16, + "num_attention_heads": 12, + "num_layers": 30, + "out_channels": 16, + "patch_size": [1, 2, 2], + "qk_norm": "rms_norm_across_heads", + "rope_max_seq_len": 1024, + "text_dim": 4096, + "torch_dtype": "bfloat16", + "cross_attn_norm": True, +} + + +def reduce_wan_config(mem_for_full_model: int, config_dict: dict): + """Reduce model size if insufficient GPU memory.""" + _, total_mem = torch.cuda.mem_get_info() + if total_mem < mem_for_full_model: + model_fraction = total_mem / mem_for_full_model + num_layers = max(1, int(config_dict["num_layers"] * model_fraction)) + config_dict["num_layers"] = min(num_layers, 4) + + +def setup_distributed(rank, world_size, backend="nccl"): + """Initialize distributed process group for multi-GPU tests.""" + os.environ["MASTER_ADDR"] = "localhost" + os.environ["MASTER_PORT"] = "12355" + os.environ["RANK"] = str(rank) + os.environ["WORLD_SIZE"] = str(world_size) + + dist.init_process_group(backend=backend, rank=rank, world_size=world_size) + torch.cuda.set_device(rank) + + +def cleanup_distributed(): + """Clean up distributed process group.""" + if dist.is_initialized(): + dist.destroy_process_group() + + +def _run_cfg_worker(rank, world_size, checkpoint_path, inputs_list, return_dict): + """Worker function for CFG Parallelism multi-GPU test. + + Must be module-level for multiprocessing.spawn() pickling. + """ + try: + setup_distributed(rank, world_size) + + from tensorrt_llm._torch.visual_gen.config import DiffusionArgs, ParallelConfig + from tensorrt_llm._torch.visual_gen.pipeline_loader import PipelineLoader + + # Load pipeline with CFG parallel + args = DiffusionArgs( + checkpoint_path=checkpoint_path, + device=f"cuda:{rank}", + dtype="bfloat16", + skip_components=SKIP_COMPONENTS, + parallel=ParallelConfig(dit_cfg_size=world_size), + ) + pipeline = PipelineLoader(args).load() + + # Verify CFG parallel configuration + assert pipeline.model_config.parallel.dit_cfg_size == world_size, ( + f"Expected cfg_size={world_size}, got {pipeline.model_config.parallel.dit_cfg_size}" + ) + + # Load inputs on this GPU + prompt_embeds = inputs_list[0].to(f"cuda:{rank}") + neg_prompt_embeds = inputs_list[1].to(f"cuda:{rank}") + latents = inputs_list[2].to(f"cuda:{rank}") + timestep = inputs_list[3].to(f"cuda:{rank}") + + # Setup CFG config + cfg_config = pipeline._setup_cfg_config( + guidance_scale=5.0, + prompt_embeds=prompt_embeds, + neg_prompt_embeds=neg_prompt_embeds, + ) + + # Verify CFG parallel is enabled + assert cfg_config["enabled"], f"Rank {rank}: CFG parallel not enabled" + assert cfg_config["cfg_size"] == world_size, f"Rank {rank}: Wrong cfg_size" + + expected_cfg_group = rank // cfg_config["ulysses_size"] + assert cfg_config["cfg_group"] == expected_cfg_group, ( + f"Rank {rank}: Wrong cfg_group. Expected {expected_cfg_group}, got {cfg_config['cfg_group']}" + ) + + if rank == 0: + print(f"[CFG Rank {rank}] Loaded with cfg_size={world_size}") + print(f" cfg_group: {cfg_config['cfg_group']}") + print(f" local_embeds shape: {cfg_config['local_embeds'].shape}") + print(f" Using {'positive' if cfg_config['cfg_group'] == 0 else 'negative'} prompts") + + # Verify prompt splitting - rank 0 gets positive, rank 1 gets negative + expected_embeds = prompt_embeds if cfg_config["cfg_group"] == 0 else neg_prompt_embeds + assert torch.allclose(cfg_config["local_embeds"], expected_embeds), ( + f"Rank {rank}: local_embeds doesn't match expected" + f"{'positive' if cfg_config['cfg_group'] == 0 else 'negative'} embeds" + ) + + # Run single denoising step with CFG parallel + def forward_fn( + latents, extra_stream_latents, timestep, encoder_hidden_states, extra_tensors + ): + return pipeline.transformer( # noqa: F821 + hidden_states=latents, + timestep=timestep, + encoder_hidden_states=encoder_hidden_states, + ) + + with torch.no_grad(): + noise_pred, _, _, _ = pipeline._denoise_step_cfg_parallel( + latents=latents, + extra_stream_latents={}, + timestep=timestep, + local_embeds=cfg_config["local_embeds"], + forward_fn=forward_fn, + guidance_scale=5.0, + guidance_rescale=0.0, + ulysses_size=cfg_config["ulysses_size"], + local_extras={}, + ) + + # Validate output + assert not torch.isnan(noise_pred).any(), f"Rank {rank}: Output contains NaN" + assert not torch.isinf(noise_pred).any(), f"Rank {rank}: Output contains Inf" + + # Return output from rank 0 + if rank == 0: + return_dict["output"] = noise_pred.cpu() + print(f"[CFG Rank {rank}] āœ“ Output shape: {noise_pred.shape}") + print( + f"[CFG Rank {rank}] āœ“ Output range: [{noise_pred.min():.4f}, {noise_pred.max():.4f}]" + ) + + del pipeline + torch.cuda.empty_cache() + + finally: + cleanup_distributed() + + +def _run_all_optimizations_worker(rank, world_size, checkpoint_path, inputs_list, return_dict): + """Worker function for all optimizations combined test (FP8 + TeaCache + TRTLLM + CFG). + + Must be module-level for multiprocessing.spawn() pickling. + """ + try: + setup_distributed(rank, world_size) + + # Load pipeline with ALL optimizations + args_full = DiffusionArgs( + checkpoint_path=checkpoint_path, + device=f"cuda:{rank}", + dtype="bfloat16", + skip_components=SKIP_COMPONENTS, + quant_config={"quant_algo": "FP8", "dynamic": True}, + teacache=TeaCacheConfig( + enable_teacache=True, + teacache_thresh=0.2, + use_ret_steps=True, + ), + attention=AttentionConfig(backend="TRTLLM"), + parallel=ParallelConfig(dit_cfg_size=world_size), + ) + pipeline = PipelineLoader(args_full).load() + transformer = pipeline.transformer.eval() + + # Verify all optimizations are enabled + assert pipeline.model_config.parallel.dit_cfg_size == world_size, "CFG parallel not enabled" + assert transformer.model_config.quant_config.quant_algo == QuantAlgo.FP8, "FP8 not enabled" + assert hasattr(pipeline, "cache_backend"), "TeaCache not enabled" + assert transformer.blocks[0].attn1.attn_backend == "TRTLLM", ( + "TRTLLM not enabled for self-attn" + ) + + if rank == 0: + print(f" āœ“ All optimizations verified on rank {rank}:") + print(f" - FP8 quantization: {transformer.model_config.quant_config.quant_algo}") + print(" - TeaCache: enabled") + print(f" - TRTLLM attention: {transformer.blocks[0].attn1.attn_backend}") + print(f" - CFG Parallelism: cfg_size={world_size}") + + # Initialize TeaCache for single-step inference + if hasattr(pipeline, "cache_backend"): + pipeline.cache_backend.refresh(num_inference_steps=1) + + # Load inputs on this GPU + prompt_embeds = inputs_list[0].to(f"cuda:{rank}") + neg_prompt_embeds = inputs_list[1].to(f"cuda:{rank}") + latents = inputs_list[2].to(f"cuda:{rank}") + timestep = inputs_list[3].to(f"cuda:{rank}") + + # Setup CFG config + cfg_config = pipeline._setup_cfg_config( + guidance_scale=5.0, + prompt_embeds=prompt_embeds, + neg_prompt_embeds=neg_prompt_embeds, + ) + + assert cfg_config["enabled"], "CFG parallel not enabled" + + # Run single denoising step with all optimizations + def forward_fn( + latents, extra_stream_latents, timestep, encoder_hidden_states, extra_tensors + ): + return transformer( # noqa: F821 + hidden_states=latents, + timestep=timestep, + encoder_hidden_states=encoder_hidden_states, + ) + + with torch.no_grad(): + noise_pred, _, _, _ = pipeline._denoise_step_cfg_parallel( + latents=latents, + extra_stream_latents={}, + timestep=timestep, + local_embeds=cfg_config["local_embeds"], + forward_fn=forward_fn, + guidance_scale=5.0, + guidance_rescale=0.0, + ulysses_size=cfg_config["ulysses_size"], + local_extras={}, + ) + + # Validate output + assert not torch.isnan(noise_pred).any(), f"Rank {rank}: Output contains NaN" + assert not torch.isinf(noise_pred).any(), f"Rank {rank}: Output contains Inf" + + # Return output from rank 0 + if rank == 0: + return_dict["output"] = noise_pred.cpu() + print(f" āœ“ Combined optimization output shape: {noise_pred.shape}") + print( + f" āœ“ Combined optimization range: [{noise_pred.min():.4f}, {noise_pred.max():.4f}]" + ) + + del pipeline, transformer + torch.cuda.empty_cache() + + finally: + cleanup_distributed() + + +# ============================================================================= +# Basic Unit Tests +# ============================================================================= + + +class TestWan(unittest.TestCase): + DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") + + def _create_model_config(self, config_dict): + """Helper to create DiffusionModelConfig from test config dict.""" + # Create pretrained_config as SimpleNamespace + pretrained_config = SimpleNamespace(**config_dict) + + # Use default quantization (no quantization for unit tests) + quant_config = QuantConfig() + dynamic_weight_quant = False + dynamic_activation_quant = False + + # Create DiffusionModelConfig + model_config = DiffusionModelConfig( + pretrained_config=pretrained_config, + quant_config=quant_config, + quant_config_dict=None, + dynamic_weight_quant=dynamic_weight_quant, + force_dynamic_quantization=dynamic_activation_quant, + skip_create_weights_in_init=False, # Create weights immediately for testing + ) + return model_config + + def test_wan_model_structure(self): + """Test that model structure matches HuggingFace naming.""" + config = deepcopy(WAN_1_3B_CONFIG) + config["num_layers"] = 1 + hidden_size = config["num_attention_heads"] * config["attention_head_dim"] + config["hidden_size"] = hidden_size + + model_config = self._create_model_config(config) + + model = WanTransformer3DModel(model_config=model_config) + + # Check FFN structure + param_names = [n for n in model.state_dict().keys() if "ffn" in n] + print("\n[DEBUG] FFN parameter names in TRT-LLM model:") + for pn in param_names[:5]: + print(f" - {pn}") + + # Verify expected structure exists (MLP uses up_proj/down_proj) + assert any("ffn.up_proj" in n for n in param_names), "Missing ffn.up_proj structure" + assert any("ffn.down_proj" in n for n in param_names), "Missing ffn.down_proj structure" + + def test_wan_sanity(self): + """Basic sanity test that the model can run forward pass.""" + config = deepcopy(WAN_1_3B_CONFIG) + dtype = getattr(torch, config["torch_dtype"]) + # Use fewer layers for sanity test + config["num_layers"] = 2 + + hidden_size = config["num_attention_heads"] * config["attention_head_dim"] + config["hidden_size"] = hidden_size + + # Create model config + model_config = self._create_model_config(config) + + # Create model with model_config + model = WanTransformer3DModel(model_config=model_config).to(self.DEVICE, dtype=dtype).eval() + + batch_size = 1 + num_frames = 1 + height, width = 64, 64 + seq_len = 128 + generator = torch.Generator(device=self.DEVICE).manual_seed(42) + + hidden_states = torch.randn( + batch_size, + config["in_channels"], + num_frames, + height, + width, + generator=generator, + device=self.DEVICE, + dtype=dtype, + ) + timestep = torch.tensor([50], device=self.DEVICE, dtype=torch.long) + encoder_hidden_states = torch.randn( + batch_size, + seq_len, + config["text_dim"], + generator=generator, + device=self.DEVICE, + dtype=dtype, + ) + + with torch.inference_mode(): + output = model( + hidden_states=hidden_states, + timestep=timestep, + encoder_hidden_states=encoder_hidden_states, + ) + + self.assertEqual(output.shape, hidden_states.shape) + + @parameterized.expand( + [ + ("1_3b", WAN_1_3B_CONFIG), + ] + ) + @torch.no_grad() + def test_wan_allclose_to_hf(self, name, config_template): + """Test TRT-LLM transformer matches HuggingFace output (BF16).""" + torch.random.manual_seed(42) + config = deepcopy(config_template) + dtype = getattr(torch, config["torch_dtype"]) + + mem_for_full_model = (2 + 1) * 1.3 * 2**30 + reduce_wan_config(mem_for_full_model, config) + + if config["num_layers"] <= 0: + self.skipTest("Insufficient memory for a single Wan layer") + + hidden_size = config["num_attention_heads"] * config["attention_head_dim"] + + # Create HuggingFace model (random weights) + hf_model = ( + HFWanTransformer3DModel( + patch_size=config["patch_size"], + num_attention_heads=config["num_attention_heads"], + attention_head_dim=config["attention_head_dim"], + in_channels=config["in_channels"], + out_channels=config["out_channels"], + text_dim=config["text_dim"], + freq_dim=config["freq_dim"], + ffn_dim=config["ffn_dim"], + num_layers=config["num_layers"], + cross_attn_norm=config["cross_attn_norm"], + qk_norm=config["qk_norm"], + eps=config["eps"], + ) + .to(self.DEVICE, dtype=dtype) + .eval() + ) + + # Create TRT-LLM model with model_config + config["hidden_size"] = hidden_size + model_config = self._create_model_config(config) + + trtllm_model = ( + WanTransformer3DModel(model_config=model_config).to(self.DEVICE, dtype=dtype).eval() + ) + + # Copy weights from HF to TRT-LLM + loaded_count = self._load_weights_from_hf(trtllm_model, hf_model.state_dict()) + print(f"[DEBUG] Loaded {loaded_count} weight tensors from HF to TRT-LLM") + + # Create test inputs + batch_size = 1 + num_frames = 1 + height, width = 64, 64 + seq_len = 128 + generator = torch.Generator(device=self.DEVICE).manual_seed(42) + + hidden_states = torch.randn( + batch_size, + config["in_channels"], + num_frames, + height, + width, + generator=generator, + device=self.DEVICE, + dtype=dtype, + ) + timestep = torch.tensor([50], device=self.DEVICE, dtype=torch.long) + encoder_hidden_states = torch.randn( + batch_size, + seq_len, + config["text_dim"], + generator=generator, + device=self.DEVICE, + dtype=dtype, + ) + + # Run both models + with ( + torch.inference_mode(), + torch.backends.cuda.sdp_kernel( + enable_flash=False, enable_math=True, enable_mem_efficient=False + ), + ): + hf_output = hf_model( + hidden_states=hidden_states, + timestep=timestep, + encoder_hidden_states=encoder_hidden_states, + return_dict=False, + )[0] + + trtllm_output = trtllm_model( + hidden_states=hidden_states, + timestep=timestep, + encoder_hidden_states=encoder_hidden_states, + ) + + # Compare outputs + hf_output = hf_output.float() + trtllm_output = trtllm_output.float() + + # Debug: Check for NaN/Inf + hf_has_nan = torch.isnan(hf_output).any().item() + trtllm_has_nan = torch.isnan(trtllm_output).any().item() + hf_has_inf = torch.isinf(hf_output).any().item() + trtllm_has_inf = torch.isinf(trtllm_output).any().item() + + print("\n[DEBUG] Output validation:") + print(f" HF has NaN: {hf_has_nan}, Inf: {hf_has_inf}") + print(f" TRT-LLM has NaN: {trtllm_has_nan}, Inf: {trtllm_has_inf}") + + if not (hf_has_nan or trtllm_has_nan or hf_has_inf or trtllm_has_inf): + # Compute detailed comparison metrics + diff = (trtllm_output - hf_output).abs() + max_diff = diff.max().item() + mean_diff = diff.mean().item() + + cos_sim = torch.nn.functional.cosine_similarity( + trtllm_output.flatten(), hf_output.flatten(), dim=0 + ).item() + + print("\n[DEBUG] Comparison metrics:") + print(f" Max absolute diff: {max_diff:.6f}") + print(f" Mean absolute diff: {mean_diff:.6f}") + print(f" Cosine similarity: {cos_sim:.6f}") + print(f" HF output range: [{hf_output.min():.4f}, {hf_output.max():.4f}]") + print(f" TRT-LLM output range: [{trtllm_output.min():.4f}, {trtllm_output.max():.4f}]") + + torch.testing.assert_close( + trtllm_output, hf_output, atol=0.4, rtol=0.4, msg=f"Output mismatch for {name} config" + ) + + def _load_weights_from_hf(self, trtllm_model, hf_state_dict): + """Load weights from HuggingFace model to TRT-LLM model. + + TRT-LLM structure: + - blocks.0.attn1.qkv_proj (fused QKV for self-attention) + - blocks.0.attn2.to_q/to_k/to_v (separate for cross-attention) + - blocks.0.attn1.to_out.0 and blocks.0.attn2.to_out.0 + + HuggingFace structure: + - blocks.0.attn1.to_q/to_k/to_v (separate Q/K/V) + - blocks.0.attn2.to_q/to_k/to_v (separate Q/K/V) + - blocks.0.attn1.to_out.0 and blocks.0.attn2.to_out.0 + """ + loaded_count = 0 + missing_weights = [] + + def load_linear(module, trtllm_key, hf_key, sd): + """Load weights from HF key into TRT-LLM module.""" + if f"{hf_key}.weight" in sd: + weight_dict = {"weight": sd[f"{hf_key}.weight"]} + if f"{hf_key}.bias" in sd: + weight_dict["bias"] = sd[f"{hf_key}.bias"] + module.load_weights([weight_dict]) + return 1 + else: + missing_weights.append(hf_key) + return 0 + + for name, module in trtllm_model.named_modules(): + if isinstance(module, Linear): + # Self-attention fused QKV: blocks.0.attn1.qkv_proj + # Load from HF separate Q/K/V: blocks.0.attn1.to_q/to_k/to_v + if "attn1.qkv_proj" in name: + base = name.replace(".qkv_proj", "") + q_key, k_key, v_key = f"{base}.to_q", f"{base}.to_k", f"{base}.to_v" + if f"{q_key}.weight" in hf_state_dict: + q_dict = {"weight": hf_state_dict[f"{q_key}.weight"]} + k_dict = {"weight": hf_state_dict[f"{k_key}.weight"]} + v_dict = {"weight": hf_state_dict[f"{v_key}.weight"]} + if f"{q_key}.bias" in hf_state_dict: + q_dict["bias"] = hf_state_dict[f"{q_key}.bias"] + k_dict["bias"] = hf_state_dict[f"{k_key}.bias"] + v_dict["bias"] = hf_state_dict[f"{v_key}.bias"] + module.load_weights([q_dict, k_dict, v_dict]) + loaded_count += 1 + + # Cross-attention separate Q/K/V: blocks.0.attn2.to_q (same path as HF) + elif "attn2.to_q" in name or "attn2.to_k" in name or "attn2.to_v" in name: + # Direct mapping - TRT-LLM and HF use same paths for cross-attention + loaded_count += load_linear(module, name, name, hf_state_dict) + + # Output projections: blocks.0.attn1.to_out.0 (same path as HF) + elif ".to_out" in name: + # Direct mapping - TRT-LLM and HF use same paths for output projections + loaded_count += load_linear(module, name, name, hf_state_dict) + + # FFN layers: TRT-LLM uses up_proj/down_proj, HF uses net.0.proj/net.2 + elif "ffn.up_proj" in name: + hf_key = name.replace(".ffn.up_proj", ".ffn.net.0.proj") + loaded_count += load_linear(module, name, hf_key, hf_state_dict) + elif "ffn.down_proj" in name: + hf_key = name.replace(".ffn.down_proj", ".ffn.net.2") + loaded_count += load_linear(module, name, hf_key, hf_state_dict) + + # Other layers: direct mapping + elif "condition_embedder" in name or "proj_out" in name: + loaded_count += load_linear(module, name, name, hf_state_dict) + + else: + # Direct mapping for any other Linear modules + loaded_count += load_linear(module, name, name, hf_state_dict) + + elif hasattr(module, "weight") and f"{name}.weight" in hf_state_dict: + # Norms & embeddings + with torch.no_grad(): + module.weight.copy_(hf_state_dict[f"{name}.weight"]) + if ( + getattr(module, "bias", None) is not None + and f"{name}.bias" in hf_state_dict + ): + module.bias.copy_(hf_state_dict[f"{name}.bias"]) + loaded_count += 1 + + # Load scale_shift_table parameters + for name, param in trtllm_model.named_parameters(): + if "scale_shift_table" in name and name in hf_state_dict: + with torch.no_grad(): + param.copy_(hf_state_dict[name].view(param.shape)) + loaded_count += 1 + + if missing_weights: + print(f"[DEBUG] Missing {len(missing_weights)} weights:") + for mw in missing_weights[:10]: # Show first 10 + print(f" - {mw}") + + return loaded_count + + def _load_weights_from_state_dict(self, model, state_dict): + """Load weights from state_dict into model (same structure).""" + for name, module in model.named_modules(): + if isinstance(module, Linear): + weight_key = f"{name}.weight" + if weight_key in state_dict: + weight_dict = {"weight": state_dict[weight_key]} + bias_key = f"{name}.bias" + if bias_key in state_dict: + weight_dict["bias"] = state_dict[bias_key] + module.load_weights([weight_dict]) + + elif hasattr(module, "weight") and f"{name}.weight" in state_dict: + with torch.no_grad(): + module.weight.copy_(state_dict[f"{name}.weight"]) + if getattr(module, "bias", None) is not None and f"{name}.bias" in state_dict: + module.bias.copy_(state_dict[f"{name}.bias"]) + + # Load parameters + for name, param in model.named_parameters(): + if name in state_dict: + with torch.no_grad(): + param.copy_(state_dict[name].view(param.shape)) + + +# ============================================================================= +# Pipeline Test - Require Real Checkpoint +# ============================================================================= + + +@pytest.fixture +def checkpoint_exists(): + """Check if checkpoint path is set and exists.""" + return CHECKPOINT_PATH and os.path.exists(CHECKPOINT_PATH) + + +@pytest.fixture(autouse=True) +def cleanup_gpu_memory(): + """Automatically cleanup GPU memory after each test to prevent OOM errors. + + This fixture runs automatically after every test in this file. + It performs garbage collection and clears CUDA cache to free up GPU memory. + """ + yield # Test runs here + # Cleanup after test completes + import gc + + gc.collect() + if torch.cuda.is_available(): + torch.cuda.empty_cache() + + +def _is_fp32_layernorm_param(param_name: str) -> bool: + """True if param is a LayerNorm weight/bias we keep in float32. Only LayerNorm (norm1/norm2/norm3/norm_out).""" + if not param_name.endswith((".weight", ".bias")): + return False + # blocks..norm1, norm2, norm3 (LayerNorm only; attn norm_q/norm_k are RMSNorm) + if ".norm" in param_name and "blocks." in param_name: + parts = param_name.split(".") + for p in parts: + if p in ("norm1", "norm2", "norm3"): + return True + return False + # top-level norm_out (LayerNorm) + if param_name == "norm_out.weight" or param_name == "norm_out.bias": + return True + # condition_embedder.norm1, norm2 (LayerNorm) + if param_name.startswith("condition_embedder.") and ".norm" in param_name: + return True + return False + + +class TestWanPipeline: + """Pipeline tests for Wan pipeline loading with PipelineLoader. + + These tests require a real checkpoint (set DIFFUSION_MODEL_PATH env var). + They test the full loading flow: config → model → weight loading → inference. + """ + + DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") + + def test_load_wan_pipeline_basic(self, checkpoint_exists): + """Test loading Wan pipeline without quantization via PipelineLoader.""" + if not checkpoint_exists: + pytest.skip("Checkpoint not available. Set DIFFUSION_MODEL_PATH.") + if not is_wan21_checkpoint(): + pytest.skip( + "This test requires Wan 2.1 checkpoint (single-stage). Use DIFFUSION_MODEL_PATH with '2.1' in the path." + ) + + args = DiffusionArgs( + checkpoint_path=CHECKPOINT_PATH, + device="cuda", + dtype="bfloat16", + skip_components=SKIP_COMPONENTS, + ) + pipeline = PipelineLoader(args).load() + + # Verify pipeline loaded correctly + assert pipeline.transformer is not None + assert len(pipeline.transformer.blocks) > 0 + + # Verify weights are loaded + # Check that non-scale parameters are bfloat16 + bf16_count = 0 + f32_scale_count = 0 + for name, param in pipeline.transformer.named_parameters(): + assert param.device.type == "cuda", f"Parameter {name} not on CUDA" + if "scale" in name.lower(): + # Scale parameters can stay float32 for FP8 kernels + assert param.dtype in [torch.float32, torch.bfloat16], ( + f"Scale param {name} has unexpected dtype {param.dtype}" + ) + if param.dtype == torch.float32: + f32_scale_count += 1 + elif _is_fp32_layernorm_param(name): + # LayerNorm (norm1/norm2/norm3/norm_out) use float32; RMSNorm (norm_q, norm_k, etc.) stay bf16 + assert param.dtype == torch.float32, ( + f"LayerNorm param {name} expected float32 but got {param.dtype}" + ) + else: + # Non-scale parameters should be bfloat16 + assert param.dtype == torch.bfloat16, ( + f"Parameter {name} expected bfloat16 but got {param.dtype}" + ) + bf16_count += 1 + + assert bf16_count > 0, "Should have at least some bfloat16 parameters" + print( + f"\n[Pipeline] BF16 pipeline loaded: {bf16_count} bf16 params" + f"\n{f32_scale_count} f32 scale params, {len(pipeline.transformer.blocks)} blocks" + ) + + @pytest.mark.parametrize("quant_algo", ["FP8", "FP8_BLOCK_SCALES"]) + def test_load_wan_pipeline_with_quantization(self, checkpoint_exists, quant_algo): + """Test loading Wan with FP8 quantization (per-tensor or blockwise).""" + if not checkpoint_exists: + pytest.skip("Checkpoint not available. Set DIFFUSION_MODEL_PATH.") + if not is_wan21_checkpoint(): + pytest.skip( + "This test requires Wan 2.1 checkpoint. Use DIFFUSION_MODEL_PATH with '2.1' in the path." + ) + + args = DiffusionArgs( + checkpoint_path=CHECKPOINT_PATH, + device="cuda", + dtype="bfloat16", + skip_components=SKIP_COMPONENTS, + quant_config={"quant_algo": quant_algo, "dynamic": True}, + ) + pipeline = PipelineLoader(args).load() + + # Verify FP8 weights in transformer blocks + found_fp8 = False + for name, module in pipeline.transformer.named_modules(): + if isinstance(module, Linear): + if "blocks." in name and hasattr(module, "weight") and module.weight is not None: + assert module.weight.dtype == torch.float8_e4m3fn, ( + f"Linear {name} should have FP8 weight, got {module.weight.dtype}" + ) + assert hasattr(module, "weight_scale"), f"Linear {name} missing weight_scale" + found_fp8 = True + print(f"[{quant_algo}] FP8 layer {name}: weight {module.weight.shape}") + break + + assert found_fp8, f"No FP8 Linear modules found for {quant_algo}" + + @pytest.mark.parametrize("quant_algo", ["FP8", "FP8_BLOCK_SCALES"]) + def test_fp8_vs_bf16_numerical_correctness(self, checkpoint_exists, quant_algo): + """Test FP8 vs BF16 numerical accuracy on real checkpoint weights. + + Pattern (similar to that in test_pipeline_dynamic_quant.py): + 1. Use F.linear() with BF16 weights as ground truth reference + 2. Verify BF16 layer matches F.linear exactly + 3. Compare FP8 layer output against reference + 4. Check max_diff, cosine_similarity, mse_loss + """ + if not checkpoint_exists: + pytest.skip("Checkpoint not available. Set DIFFUSION_MODEL_PATH.") + if not is_wan21_checkpoint(): + pytest.skip( + "This test requires Wan 2.1 checkpoint (loads 2 full models and " + "Needs single transformer). Use DIFFUSION_MODEL_PATH with '2.1' in the path." + ) + + # ===================================================================== + # Load BF16 Pipeline (Reference) + # ===================================================================== + print(f"\n[Compare {quant_algo}] Loading BF16 pipeline...") + + args_bf16 = DiffusionArgs( + checkpoint_path=CHECKPOINT_PATH, + device="cuda", + dtype="bfloat16", + ) + pipeline_bf16 = PipelineLoader(args_bf16).load() + + # ===================================================================== + # Load FP8 Pipeline + # ===================================================================== + print(f"[Compare {quant_algo}] Loading {quant_algo} pipeline...") + + args_fp8 = DiffusionArgs( + checkpoint_path=CHECKPOINT_PATH, + device="cuda", + dtype="bfloat16", + quant_config={"quant_algo": quant_algo, "dynamic": True}, + ) + pipeline_fp8 = PipelineLoader(args_fp8).load() + + # ===================================================================== + # Get Linear Layers from Both Pipelines + # ===================================================================== + attn_bf16 = pipeline_bf16.transformer.blocks[0].attn1 + attn_fp8 = pipeline_fp8.transformer.blocks[0].attn1 + + # Get linear layer - try fused qkv_proj first, fallback to qkv_proj on attention module + if hasattr(attn_bf16, "qkv_proj"): + linear_bf16 = attn_bf16.qkv_proj + linear_fp8 = attn_fp8.qkv_proj + layer_name = "blocks.0.attn1.qkv_proj" + elif hasattr(attn_bf16, "attn") and hasattr(attn_bf16.attn, "qkv_proj"): + linear_bf16 = attn_bf16.attn.qkv_proj + linear_fp8 = attn_fp8.attn.qkv_proj + layer_name = "blocks.0.attn1.attn.qkv_proj" + else: + # Use FFN linear instead (always available) + linear_bf16 = pipeline_bf16.transformer.blocks[0].ffn.net[0]["proj"] + linear_fp8 = pipeline_fp8.transformer.blocks[0].ffn.net[0]["proj"] + layer_name = "blocks.0.ffn.net.0.proj" + + # ===================================================================== + # Get BF16 weights and bias for F.linear reference + # ===================================================================== + weight_bf16 = linear_bf16.weight.data.clone() + bias_bf16 = linear_bf16.bias.data.clone() if linear_bf16.bias is not None else None + + # ===================================================================== + # Create Test Input + # ===================================================================== + torch.manual_seed(42) + hidden_size = linear_bf16.in_features + batch_size = 1 + seq_len = 14040 + + # 2D input for FP8 kernel compatibility + input_tensor = torch.randn( + batch_size * seq_len, hidden_size, dtype=torch.bfloat16, device="cuda" + ) + print(f"[Compare] Input shape: {input_tensor.shape}") + + # ===================================================================== + # Compute Reference Output: F.linear (ground truth) + # ===================================================================== + with torch.no_grad(): + expected = F.linear(input_tensor, weight_bf16, bias_bf16) + + # ===================================================================== + # Compute FP8 Output + # ===================================================================== + with torch.no_grad(): + result_fp8 = linear_fp8(input_tensor) + + # ===================================================================== + # Compute BF16 Layer Output + # ===================================================================== + with torch.no_grad(): + result_bf16 = linear_bf16(input_tensor) + + # Verify BF16 layer matches F.linear reference + assert torch.allclose(result_bf16, expected, rtol=1e-5, atol=1e-6), ( + "BF16 layer should match F.linear reference exactly" + ) + + # Compare FP8 vs Reference + max_diff = torch.max(torch.abs(result_fp8 - expected)).item() + cos_sim = F.cosine_similarity( + result_fp8.flatten().float(), expected.flatten().float(), dim=0 + ) + mse = F.mse_loss(result_fp8.flatten().float(), expected.flatten().float()) + + print( + f"\n[{layer_name}] max_diff={max_diff:.6f}, cos_sim={cos_sim.item():.6f}, mse={mse.item():.6f}" + ) + + assert cos_sim > 0.99, f"Cosine similarity too low: {cos_sim.item()}" + assert mse < 1.0, f"MSE too high: {mse.item()}" + + # Cleanup + del pipeline_bf16, pipeline_fp8 + torch.cuda.empty_cache() + + def test_fp8_vs_bf16_memory_comparison(self, checkpoint_exists): + """Test FP8 uses ~2x less memory than BF16.""" + if not checkpoint_exists: + pytest.skip("Checkpoint not available. Set DIFFUSION_MODEL_PATH.") + if not is_wan21_checkpoint(): + pytest.skip( + "This test requires Wan 2.1 checkpoint. Use DIFFUSION_MODEL_PATH with '2.1' in the path." + ) + + def get_module_memory_gb(module): + return sum(p.numel() * p.element_size() for p in module.parameters()) / 1024**3 + + # Load BF16 + torch.cuda.empty_cache() + torch.cuda.reset_peak_memory_stats() + + args_bf16 = DiffusionArgs( + checkpoint_path=CHECKPOINT_PATH, + device="cuda", + dtype="bfloat16", + ) + pipeline_bf16 = PipelineLoader(args_bf16).load() + + bf16_model_mem = get_module_memory_gb(pipeline_bf16.transformer) + bf16_peak_mem = torch.cuda.max_memory_allocated() / 1024**3 + + print(f"\n[BF16] Transformer memory: {bf16_model_mem:.2f} GB") + print(f"[BF16] Peak memory: {bf16_peak_mem:.2f} GB") + + del pipeline_bf16 + torch.cuda.empty_cache() + + # Load FP8 + torch.cuda.reset_peak_memory_stats() + + args_fp8 = DiffusionArgs( + checkpoint_path=CHECKPOINT_PATH, + device="cuda", + dtype="bfloat16", + quant_config={"quant_algo": "FP8", "dynamic": True}, + ) + pipeline_fp8 = PipelineLoader(args_fp8).load() + + fp8_model_mem = get_module_memory_gb(pipeline_fp8.transformer) + fp8_peak_mem = torch.cuda.max_memory_allocated() / 1024**3 + + print(f"\n[FP8] Transformer memory: {fp8_model_mem:.2f} GB") + print(f"[FP8] Peak memory: {fp8_peak_mem:.2f} GB") + + # Verify memory savings + model_mem_ratio = bf16_model_mem / fp8_model_mem + peak_mem_ratio = bf16_peak_mem / fp8_peak_mem + + print(f"\n[Comparison] Model memory ratio (BF16/FP8): {model_mem_ratio:.2f}x") + print(f"[Comparison] Peak memory ratio (BF16/FP8): {peak_mem_ratio:.2f}x") + + # FP8 should use ~2x less memory + assert model_mem_ratio > 1.8, f"FP8 should use ~2x less memory, got {model_mem_ratio:.2f}x" + + del pipeline_fp8 + torch.cuda.empty_cache() + + @pytest.mark.parametrize("quant_algo", ["FP8", "FP8_BLOCK_SCALES"]) + def test_fp8_vs_bf16_full_transformer_e2e(self, checkpoint_exists, quant_algo): + """End-to-end test: Compare full Wan transformer FP8 vs BF16 output. + + Unlike test_fp8_vs_bf16_numerical_correctness which tests a single Linear layer, + this test runs the ENTIRE transformer (all 30 blocks) and compares outputs. + + Expectations: + - Errors accumulate across 30 layers, so use relaxed tolerances + - Cosine similarity should be high (>0.95) but lower than single-layer test (>0.99) + - This validates that FP8 quantization doesn't degrade quality too much end-to-end + """ + if not checkpoint_exists: + pytest.skip("Checkpoint not available. Set DIFFUSION_MODEL_PATH.") + if not is_wan21_checkpoint(): + pytest.skip( + "This test requires Wan 2.1 checkpoint. Use DIFFUSION_MODEL_PATH with '2.1' in the path." + ) + + # ===================================================================== + # Load BF16 Transformer (Reference) + # ===================================================================== + print("\n[E2E] Loading BF16 transformer...") + + args_bf16 = DiffusionArgs( + checkpoint_path=CHECKPOINT_PATH, + device="cuda", + dtype="bfloat16", + ) + pipeline_bf16 = PipelineLoader(args_bf16).load() + transformer_bf16 = pipeline_bf16.transformer + + # ===================================================================== + # Load FP8 Transformer + # ===================================================================== + print(f"[E2E] Loading {quant_algo} transformer...") + + args_fp8 = DiffusionArgs( + checkpoint_path=CHECKPOINT_PATH, + device="cuda", + dtype="bfloat16", + quant_config={"quant_algo": quant_algo, "dynamic": True}, + ) + pipeline_fp8 = PipelineLoader(args_fp8).load() + transformer_fp8 = pipeline_fp8.transformer + + # ===================================================================== + # Create Realistic Inputs + # ===================================================================== + torch.manual_seed(42) + + # Use smaller size for faster testing (still realistic) + batch_size = 1 + num_frames = 1 + height, width = 64, 64 # Smaller than full 720x1280 + in_channels = 16 + text_seq_len = 128 + text_dim = 4096 + + # Create inputs + hidden_states = torch.randn( + batch_size, in_channels, num_frames, height, width, dtype=torch.bfloat16, device="cuda" + ) + timestep = torch.tensor([500], dtype=torch.long, device="cuda") + encoder_hidden_states = torch.randn( + batch_size, text_seq_len, text_dim, dtype=torch.bfloat16, device="cuda" + ) + + print("[E2E] Input shapes:") + print(f" hidden_states: {hidden_states.shape}") + print(f" timestep: {timestep.shape}") + print(f" encoder_hidden_states: {encoder_hidden_states.shape}") + + # ===================================================================== + # Run Full Transformer Forward Pass + # ===================================================================== + print("[E2E] Running BF16 transformer forward...") + with torch.no_grad(): + output_bf16 = transformer_bf16( + hidden_states=hidden_states.clone(), + timestep=timestep, + encoder_hidden_states=encoder_hidden_states.clone(), + ) + + print(f"[E2E] Running {quant_algo} transformer forward...") + with torch.no_grad(): + output_fp8 = transformer_fp8( + hidden_states=hidden_states.clone(), + timestep=timestep, + encoder_hidden_states=encoder_hidden_states.clone(), + ) + + # ===================================================================== + # Verify Outputs + # ===================================================================== + assert output_bf16.shape == output_fp8.shape, ( + f"Output shape mismatch: BF16={output_bf16.shape}, FP8={output_fp8.shape}" + ) + print(f"[E2E] Output shape: {output_bf16.shape}") + + # Check for NaN/Inf + bf16_has_nan = torch.isnan(output_bf16).any().item() + fp8_has_nan = torch.isnan(output_fp8).any().item() + bf16_has_inf = torch.isinf(output_bf16).any().item() + fp8_has_inf = torch.isinf(output_fp8).any().item() + + assert not bf16_has_nan, "BF16 output contains NaN" + assert not bf16_has_inf, "BF16 output contains Inf" + assert not fp8_has_nan, f"{quant_algo} output contains NaN" + assert not fp8_has_inf, f"{quant_algo} output contains Inf" + + # ===================================================================== + # Compare Numerical Accuracy + # ===================================================================== + output_bf16_float = output_bf16.float() + output_fp8_float = output_fp8.float() + + max_diff = torch.max(torch.abs(output_fp8_float - output_bf16_float)).item() + mean_diff = torch.mean(torch.abs(output_fp8_float - output_bf16_float)).item() + + cos_sim = F.cosine_similarity( + output_fp8_float.flatten(), output_bf16_float.flatten(), dim=0 + ).item() + + mse = F.mse_loss(output_fp8_float, output_bf16_float).item() + + # Relative error + rel_error = mean_diff / (output_bf16_float.abs().mean().item() + 1e-8) + + print(f"\n{'=' * 60}") + print(f"END-TO-END TRANSFORMER COMPARISON ({quant_algo} vs BF16)") + print(f"{'=' * 60}") + print(f"Number of layers: {len(transformer_bf16.blocks)}") + print(f"Output shape: {output_bf16.shape}") + print("") + print(f"Max absolute difference: {max_diff:.6f}") + print(f"Mean absolute difference: {mean_diff:.6f}") + print(f"Relative error: {rel_error:.6f}") + print(f"Cosine similarity: {cos_sim:.6f}") + print(f"MSE loss: {mse:.6f}") + print("") + print(f"BF16 output range: [{output_bf16_float.min():.4f}, {output_bf16_float.max():.4f}]") + print( + f"{quant_algo} output range: [{output_fp8_float.min():.4f}, {output_fp8_float.max():.4f}]" + ) + print(f"{'=' * 60}") + + # ===================================================================== + # Assert Numerical Correctness (Relaxed Tolerances) + # ===================================================================== + # Cosine similarity should be high, but lower than single-layer test + # due to error accumulation across 30 layers + assert cos_sim > 0.95, ( + f"Cosine similarity too low for full transformer: {cos_sim:.6f} (expected >0.95)" + ) + + # Relative error should be reasonable + # Note: Error accumulates across 30 layers, so we use a relaxed tolerance + assert rel_error < 0.15, f"Relative error too high: {rel_error:.6f} (expected <0.15)" + + print(f"\n[PASS] {quant_algo} full transformer output matches BF16 within tolerance!") + print(f" āœ“ Cosine similarity: {cos_sim:.4f} (>0.95)") + print(f" āœ“ Relative error: {rel_error:.4f} (<0.15)") + + # Cleanup + del pipeline_bf16, pipeline_fp8, transformer_bf16, transformer_fp8 + torch.cuda.empty_cache() + + def test_attention_backend_comparison(self, checkpoint_exists): + """Test accuracy of full Wan forward pass with attention backend comparison. + + Wan uses both self-attention (attn1) and cross-attention (attn2). TRTLLM backend + doesn't support cross-attention (seq_len != kv_seq_len), but WanAttention + automatically falls back to VANILLA for cross-attention when TRTLLM is configured. + + This test verifies: + 1. VANILLA backend works correctly + 2. TRTLLM backend with automatic VANILLA fallback for cross-attention produces + numerically similar results to pure VANILLA + """ + if not checkpoint_exists: + pytest.skip("Checkpoint not available. Set DIFFUSION_MODEL_PATH.") + if not is_wan21_checkpoint(): + pytest.skip( + "This test requires Wan 2.1 checkpoint. Use DIFFUSION_MODEL_PATH with '2.1' in the path." + ) + + # ===================================================================== + # Load Baseline Transformer (Default VANILLA) + # ===================================================================== + print("\n[Attention Backend Test] Loading baseline transformer (default VANILLA)...") + + from tensorrt_llm._torch.visual_gen.config import AttentionConfig + + args_baseline = DiffusionArgs( + checkpoint_path=CHECKPOINT_PATH, + device="cuda", + dtype="bfloat16", + ) + # Default attention backend is VANILLA + pipeline_baseline = PipelineLoader(args_baseline).load() + transformer_baseline = pipeline_baseline.transformer + + # ===================================================================== + # Load VANILLA Transformer + # ===================================================================== + print("[Attention Backend Test] Loading VANILLA transformer (explicit)...") + + args_vanilla = DiffusionArgs( + checkpoint_path=CHECKPOINT_PATH, + device="cuda", + dtype="bfloat16", + ) + args_vanilla.attention = AttentionConfig(backend="VANILLA") + pipeline_vanilla = PipelineLoader(args_vanilla).load() + transformer_vanilla = pipeline_vanilla.transformer + + # ===================================================================== + # Create Fixed Test Inputs + # ===================================================================== + torch.manual_seed(42) + + # Smaller size for faster testing + batch_size = 1 + num_frames = 1 + height, width = 64, 64 + in_channels = 16 + text_seq_len = 128 + text_dim = 4096 + + # Create inputs + hidden_states = torch.randn( + batch_size, in_channels, num_frames, height, width, dtype=torch.bfloat16, device="cuda" + ) + timestep = torch.tensor([500], dtype=torch.long, device="cuda") + encoder_hidden_states = torch.randn( + batch_size, text_seq_len, text_dim, dtype=torch.bfloat16, device="cuda" + ) + + print("[Attention Backend Test] Input shapes:") + print(f" hidden_states: {hidden_states.shape}") + print(f" timestep: {timestep.shape}") + print(f" encoder_hidden_states: {encoder_hidden_states.shape}") + + # ===================================================================== + # Run Full Transformer Forward Pass + # ===================================================================== + print("[Attention Backend Test] Running baseline transformer forward...") + with torch.no_grad(): + output_baseline = transformer_baseline( + hidden_states=hidden_states.clone(), + timestep=timestep, + encoder_hidden_states=encoder_hidden_states.clone(), + ) + + print("[Attention Backend Test] Running VANILLA transformer forward...") + with torch.no_grad(): + output_vanilla = transformer_vanilla( + hidden_states=hidden_states.clone(), + timestep=timestep, + encoder_hidden_states=encoder_hidden_states.clone(), + ) + + # ===================================================================== + # Verify Output Shapes + # ===================================================================== + assert output_baseline.shape == output_vanilla.shape, ( + f"Output shape mismatch: baseline={output_baseline.shape}, " + f"VANILLA={output_vanilla.shape}" + ) + print(f"[Attention Backend Test] Output shape: {output_baseline.shape}") + + # ===================================================================== + # Check for NaN/Inf in All Outputs + # ===================================================================== + for name, output in [("baseline", output_baseline), ("VANILLA", output_vanilla)]: + has_nan = torch.isnan(output).any().item() + has_inf = torch.isinf(output).any().item() + assert not has_nan, f"{name} output contains NaN" + assert not has_inf, f"{name} output contains Inf" + print(f"[Attention Backend Test] {name} output: NaN={has_nan}, Inf={has_inf}") + + # ===================================================================== + # Compare VANILLA (Explicit) vs Baseline + # ===================================================================== + output_baseline_float = output_baseline.float() + output_vanilla_float = output_vanilla.float() + + # VANILLA explicit vs baseline (should be identical) + max_diff_vanilla = torch.max(torch.abs(output_vanilla_float - output_baseline_float)).item() + mean_diff_vanilla = torch.mean( + torch.abs(output_vanilla_float - output_baseline_float) + ).item() + cos_sim_vanilla = F.cosine_similarity( + output_vanilla_float.flatten(), output_baseline_float.flatten(), dim=0 + ).item() + mse_vanilla = F.mse_loss(output_vanilla_float, output_baseline_float).item() + + print(f"\n{'=' * 60}") + print("VANILLA (Explicit) vs Baseline Comparison") + print(f"{'=' * 60}") + print(f"Max absolute difference: {max_diff_vanilla:.6f}") + print(f"Mean absolute difference: {mean_diff_vanilla:.6f}") + print(f"Cosine similarity: {cos_sim_vanilla:.6f}") + print(f"MSE loss: {mse_vanilla:.6f}") + print(f"{'=' * 60}") + + # VANILLA explicit should match baseline closely (same backend) + # Note: Not exactly identical + assert cos_sim_vanilla > 0.995, ( + f"VANILLA explicit should match baseline closely: cos_sim={cos_sim_vanilla:.6f}" + ) + + print("\n[PASS] VANILLA backend produces consistent outputs!") + print(f" āœ“ VANILLA (explicit) matches baseline: cos_sim={cos_sim_vanilla:.6f} (>0.995)") + + # ===================================================================== + # Load TRTLLM Transformer (with automatic VANILLA fallback for cross-attention) + # ===================================================================== + print("\n[Attention Backend Test] Loading TRTLLM transformer...") + + args_trtllm = DiffusionArgs( + checkpoint_path=CHECKPOINT_PATH, + device="cuda", + dtype="bfloat16", + ) + args_trtllm.attention = AttentionConfig(backend="TRTLLM") + pipeline_trtllm = PipelineLoader(args_trtllm).load() + transformer_trtllm = pipeline_trtllm.transformer + + # Verify automatic backend override for cross-attention + print("[Attention Backend Test] Verifying backend configuration...") + first_block = transformer_trtllm.blocks[0] + attn1_backend = first_block.attn1.attn_backend + attn2_backend = first_block.attn2.attn_backend + print(f" attn1 (self-attention) backend: {attn1_backend}") + print(f" attn2 (cross-attention) backend: {attn2_backend}") + assert attn1_backend == "TRTLLM", f"Expected attn1 to use TRTLLM, got {attn1_backend}" + assert attn2_backend == "VANILLA", f"Expected attn2 to use VANILLA, got {attn2_backend}" + print(" āœ“ Automatic backend override working correctly!") + + # ===================================================================== + # Run TRTLLM Transformer Forward Pass + # ===================================================================== + print("[Attention Backend Test] Running TRTLLM transformer forward...") + with torch.no_grad(): + output_trtllm = transformer_trtllm( + hidden_states=hidden_states.clone(), + timestep=timestep, + encoder_hidden_states=encoder_hidden_states.clone(), + ) + + # ===================================================================== + # Check for NaN/Inf in TRTLLM Output + # ===================================================================== + has_nan = torch.isnan(output_trtllm).any().item() + has_inf = torch.isinf(output_trtllm).any().item() + assert not has_nan, "TRTLLM output contains NaN" + assert not has_inf, "TRTLLM output contains Inf" + print(f"[Attention Backend Test] TRTLLM output: NaN={has_nan}, Inf={has_inf}") + + # ===================================================================== + # Compare TRTLLM vs Baseline + # ===================================================================== + output_trtllm_float = output_trtllm.float() + + max_diff_trtllm = torch.max(torch.abs(output_trtllm_float - output_baseline_float)).item() + mean_diff_trtllm = torch.mean(torch.abs(output_trtllm_float - output_baseline_float)).item() + cos_sim_trtllm = F.cosine_similarity( + output_trtllm_float.flatten(), output_baseline_float.flatten(), dim=0 + ).item() + mse_trtllm = F.mse_loss(output_trtllm_float, output_baseline_float).item() + + print(f"\n{'=' * 60}") + print("TRTLLM (with auto VANILLA fallback) vs Baseline Comparison") + print(f"{'=' * 60}") + print(f"Max absolute difference: {max_diff_trtllm:.6f}") + print(f"Mean absolute difference: {mean_diff_trtllm:.6f}") + print(f"Cosine similarity: {cos_sim_trtllm:.6f}") + print(f"MSE loss: {mse_trtllm:.6f}") + print(f"{'=' * 60}") + + # TRTLLM should produce similar results (attn1 uses TRTLLM, attn2 uses VANILLA) + # Allow slightly more tolerance since different attention implementations + assert cos_sim_trtllm > 0.99, ( + f"TRTLLM should produce similar results to baseline: cos_sim={cos_sim_trtllm:.6f}" + ) + + print("\n[PASS] TRTLLM backend with automatic fallback works correctly!") + print(f" āœ“ TRTLLM matches baseline: cos_sim={cos_sim_trtllm:.6f} (>0.99)") + + # Cleanup + del pipeline_baseline, pipeline_vanilla, pipeline_trtllm + del transformer_baseline, transformer_vanilla, transformer_trtllm + torch.cuda.empty_cache() + + @pytest.mark.parametrize("quant_algo", ["FP8", "FP8_BLOCK_SCALES"]) + def test_fp8_mixed_quant_numerical_correctness(self, checkpoint_exists, quant_algo): + """Test numerical correctness with mixed quantization (some layers excluded). + + Compares outputs between: + 1. Full BF16 model (reference) + 2. Full FP8 model (all layers quantized) + 3. Mixed FP8 model (some layers excluded from quantization) + + Expected behavior: + - Mixed model should have accuracy between full BF16 and full FP8 + - Excluding sensitive layers (like first/last blocks) may improve accuracy + """ + if not checkpoint_exists: + pytest.skip("Checkpoint not available. Set DIFFUSION_MODEL_PATH.") + if not is_wan21_checkpoint(): + pytest.skip( + "This test requires Wan 2.1 checkpoint. Use DIFFUSION_MODEL_PATH with '2.1' in the path." + ) + + # ===================================================================== + # Define Mixed Quant Config + # ===================================================================== + # Exclude first block and output projection (often sensitive layers) + mixed_ignore_patterns = [ + "proj_out", + "condition_embedder.*", + "blocks.0.*", + "blocks.29.*", # Last block (if exists) + ] + + # ===================================================================== + # Load Models + # ===================================================================== + print("\n[Mixed Quant Accuracy] Loading BF16 model (reference)...") + args_bf16 = DiffusionArgs( + checkpoint_path=CHECKPOINT_PATH, + device="cuda", + dtype="bfloat16", + skip_components=SKIP_COMPONENTS, + ) + pipeline_bf16 = PipelineLoader(args_bf16).load() + + print(f"[Mixed Quant Accuracy] Loading mixed {quant_algo} model...") + args_fp8_mixed = DiffusionArgs( + checkpoint_path=CHECKPOINT_PATH, + device="cuda", + dtype="bfloat16", + skip_components=SKIP_COMPONENTS, + quant_config={ + "quant_algo": quant_algo, + "dynamic": True, + "ignore": mixed_ignore_patterns, + }, + ) + pipeline_fp8_mixed = PipelineLoader(args_fp8_mixed).load() + + # ===================================================================== + # Create Test Inputs + # ===================================================================== + torch.manual_seed(42) + + batch_size = 1 + num_frames = 1 + height, width = 64, 64 + in_channels = 16 + text_seq_len = 128 + text_dim = 4096 + + hidden_states = torch.randn( + batch_size, in_channels, num_frames, height, width, dtype=torch.bfloat16, device="cuda" + ) + timestep = torch.tensor([500], dtype=torch.long, device="cuda") + encoder_hidden_states = torch.randn( + batch_size, text_seq_len, text_dim, dtype=torch.bfloat16, device="cuda" + ) + + # ===================================================================== + # Run Forward Pass + # ===================================================================== + print("[Mixed Quant Accuracy] Running forward passes...") + + with torch.no_grad(): + output_bf16 = pipeline_bf16.transformer( + hidden_states=hidden_states.clone(), + timestep=timestep, + encoder_hidden_states=encoder_hidden_states.clone(), + ) + + output_fp8_mixed = pipeline_fp8_mixed.transformer( + hidden_states=hidden_states.clone(), + timestep=timestep, + encoder_hidden_states=encoder_hidden_states.clone(), + ) + + # ===================================================================== + # Compute Metrics + # ===================================================================== + output_bf16_float = output_bf16.float() + output_fp8_mixed_float = output_fp8_mixed.float() + + # Mixed FP8 vs BF16 + cos_sim_mixed = F.cosine_similarity( + output_fp8_mixed_float.flatten(), output_bf16_float.flatten(), dim=0 + ).item() + mse_mixed = F.mse_loss(output_fp8_mixed_float, output_bf16_float).item() + + print(f"\n{'=' * 60}") + print(f"MIXED QUANTIZATION ACCURACY TEST ({quant_algo})") + print(f"{'=' * 60}") + print(f"Ignored patterns: {mixed_ignore_patterns}") + print("") + print(f"Mixed {quant_algo} vs BF16:") + print(f" Cosine similarity: {cos_sim_mixed:.6f}") + print(f" MSE: {mse_mixed:.6f}") + print(f"{'=' * 60}") + + # ===================================================================== + # Assertions + # ===================================================================== + # Both should maintain reasonable accuracy + assert cos_sim_mixed > 0.99, ( + f"Mixed {quant_algo} cosine similarity too low: {cos_sim_mixed}" + ) + assert mse_mixed < 1.0, f"Mixed {quant_algo} MSE too high: {mse_mixed}" + + print("\n[PASS] Mixed quantization numerical correctness verified!") + print(f" āœ“ Mixed {quant_algo}: cos_sim={cos_sim_mixed:.4f}") + + # Cleanup + del pipeline_bf16, pipeline_fp8_mixed + torch.cuda.empty_cache() + + def test_fp8_static_vs_bf16_accuracy(self, wan22_both_checkpoints_exist): + """Test FP8 static and dynamic quantization accuracy against BF16 reference. + + Compares outputs from: + 1. TRT-LLM BF16 model (reference checkpoint) + 2. TRT-LLM FP8 static quantized model (pre-quantized checkpoint) + 3. TRT-LLM FP8 dynamic quantized model (BF16 checkpoint + on-the-fly quant) + + Uses spatially-correlated inputs that mimic real VAE latent patterns, + which achieves much higher accuracy than random noise inputs. + """ + if not wan22_both_checkpoints_exist: + pytest.skip( + f"Both checkpoints required. FP8: {CHECKPOINT_PATH_WAN22_FP8}, " + f"BF16: {CHECKPOINT_PATH_WAN22_BF16}" + ) + + # Reset dynamo cache to avoid recompile-limit errors from prior + # tests that compiled kernels with different dtypes (e.g. Float32). + torch._dynamo.reset() + + print("\n" + "=" * 70) + print("FP8 STATIC & DYNAMIC QUANT vs BF16 ACCURACY TEST") + print("=" * 70) + + # Load BF16 reference model + print(f"\n[BF16] Loading from {CHECKPOINT_PATH_WAN22_BF16}") + args_bf16 = DiffusionArgs( + checkpoint_path=CHECKPOINT_PATH_WAN22_BF16, + device="cuda", + dtype="bfloat16", + skip_components=SKIP_COMPONENTS, + ) + pipeline_bf16 = PipelineLoader(args_bf16).load() + + # Load FP8 static quantized model (from pre-quantized checkpoint) + print(f"\n[FP8 Static] Loading from {CHECKPOINT_PATH_WAN22_FP8}") + args_fp8_static = DiffusionArgs( + checkpoint_path=CHECKPOINT_PATH_WAN22_FP8, + device="cuda", + dtype="bfloat16", + skip_components=SKIP_COMPONENTS, + ) + pipeline_fp8_static = PipelineLoader(args_fp8_static).load() + + # Load FP8 dynamic quantized model (from BF16 checkpoint with on-the-fly quant) + print(f"\n[FP8 Dynamic] Loading from {CHECKPOINT_PATH_WAN22_BF16} with dynamic quant") + args_fp8_dynamic = DiffusionArgs( + checkpoint_path=CHECKPOINT_PATH_WAN22_BF16, + device="cuda", + dtype="bfloat16", + skip_components=SKIP_COMPONENTS, + quant_config={ + "quant_algo": "FP8", + "dynamic": True, + }, + ) + pipeline_fp8_dynamic = PipelineLoader(args_fp8_dynamic).load() + + # Verify FP8 static model has calibrated scales + static_quant_modules = 0 + for name, module in pipeline_fp8_static.transformer.named_modules(): + if isinstance(module, Linear): + if hasattr(module, "input_scale") and module.input_scale is not None: + static_quant_modules += 1 + print(f"[FP8 Static] Quantized Linear modules with input_scale: {static_quant_modules}") + assert static_quant_modules > 0, "FP8 static model should have calibrated scales" + + # Verify FP8 dynamic model has quantized weights + dynamic_quant_modules = 0 + for name, module in pipeline_fp8_dynamic.transformer.named_modules(): + if isinstance(module, Linear): + if hasattr(module, "weight_scale") and module.weight_scale is not None: + dynamic_quant_modules += 1 + print(f"[FP8 Dynamic] Quantized Linear modules: {dynamic_quant_modules}") + + # Create spatially-correlated test inputs (mimics real VAE latent patterns) + # Wan 2.2 TI2V-5B specs: + # - VAE compression: 16x16x4 (spatial x spatial x temporal) + # - Latent channels: 48 (z_dim=48) + # - 720P resolution: 1280x704 -> latent: 80x44 + # - Text encoder: UMT5, max_length=512, dim=4096 + torch.manual_seed(42) + + batch_size = 2 # For CFG (positive + negative) + in_channels = 48 # Wan 2.2 TI2V-5B uses 48 latent channels + time_dim = 1 # Single frame for unit test + + # 720P latent dimensions: 1280/16=80 width, 704/16=44 height + height = 44 # 720P latent height (704 / 16) + width = 80 # 720P latent width (1280 / 16) + + # Text encoder: UMT5 with 4096 dim, typical sequence length ~226 + text_seq_len = 226 # Default max_sequence_length for Wan + text_dim = 4096 + + # Create structured latent (not purely random - simulate real VAE output) + base_pattern = torch.randn( + 1, in_channels, time_dim, height // 4, width // 4, device="cuda", dtype=torch.bfloat16 + ) + hidden_states = F.interpolate( + base_pattern.view(1, in_channels, height // 4, width // 4), + size=(height, width), + mode="bilinear", + align_corners=False, + ).view(1, in_channels, time_dim, height, width) + hidden_states = hidden_states * 2.0 + hidden_states = hidden_states.expand(batch_size, -1, -1, -1, -1).contiguous() + + timestep = torch.tensor([500.0, 500.0], device="cuda", dtype=torch.bfloat16) + + text_base = ( + torch.randn(1, text_seq_len, text_dim, device="cuda", dtype=torch.bfloat16) * 0.1 + ) + encoder_hidden_states = text_base.expand(batch_size, -1, -1).contiguous() + + print( + f"\n[Input] 720P latent: {hidden_states.shape} " + f"(batch={batch_size}, ch={in_channels}, t={time_dim}, h={height}, w={width})" + ) + print(f"[Input] range: [{hidden_states.min():.2f}, {hidden_states.max():.2f}]") + print(f"[Input] encoder_hidden_states: {encoder_hidden_states.shape}") + + # Run forward passes + print("\n[Forward] Running BF16 model...") + with torch.no_grad(): + output_bf16 = pipeline_bf16.transformer( + hidden_states=hidden_states.clone(), + timestep=timestep, + encoder_hidden_states=encoder_hidden_states.clone(), + ) + + print("[Forward] Running FP8 static quant model...") + with torch.no_grad(): + output_fp8_static = pipeline_fp8_static.transformer( + hidden_states=hidden_states.clone(), + timestep=timestep, + encoder_hidden_states=encoder_hidden_states.clone(), + ) + + print("[Forward] Running FP8 dynamic quant model...") + with torch.no_grad(): + output_fp8_dynamic = pipeline_fp8_dynamic.transformer( + hidden_states=hidden_states.clone(), + timestep=timestep, + encoder_hidden_states=encoder_hidden_states.clone(), + ) + + # Compute metrics + output_bf16_float = output_bf16.float() + output_fp8_static_float = output_fp8_static.float() + output_fp8_dynamic_float = output_fp8_dynamic.float() + + # FP8 Static vs BF16 + cos_sim_static = F.cosine_similarity( + output_fp8_static_float.flatten(), output_bf16_float.flatten(), dim=0 + ).item() + mse_static = F.mse_loss(output_fp8_static_float, output_bf16_float).item() + + # FP8 Dynamic vs BF16 + cos_sim_dynamic = F.cosine_similarity( + output_fp8_dynamic_float.flatten(), output_bf16_float.flatten(), dim=0 + ).item() + mse_dynamic = F.mse_loss(output_fp8_dynamic_float, output_bf16_float).item() + + # Output statistics + bf16_range = (output_bf16_float.min().item(), output_bf16_float.max().item()) + fp8_static_range = ( + output_fp8_static_float.min().item(), + output_fp8_static_float.max().item(), + ) + fp8_dynamic_range = ( + output_fp8_dynamic_float.min().item(), + output_fp8_dynamic_float.max().item(), + ) + + print("\n" + "=" * 70) + print("RESULTS: FP8 QUANT vs BF16") + print("=" * 70) + print(f"{'Method':<20} {'Cosine Sim':>12} {'MSE':>12}") + print("-" * 70) + print(f"{'FP8 Static':<20} {cos_sim_static:>12.6f} {mse_static:>12.6f}") + print(f"{'FP8 Dynamic':<20} {cos_sim_dynamic:>12.6f} {mse_dynamic:>12.6f}") + print("-" * 70) + print(f"BF16 Output Range: [{bf16_range[0]:.4f}, {bf16_range[1]:.4f}]") + print(f"FP8 Static Output Range: [{fp8_static_range[0]:.4f}, {fp8_static_range[1]:.4f}]") + print(f"FP8 Dynamic Output Range:[{fp8_dynamic_range[0]:.4f}, {fp8_dynamic_range[1]:.4f}]") + print("=" * 70) + + # Assertions + # Static should have high accuracy (calibrated scales) + assert cos_sim_static > 0.99, ( + f"FP8 Static cosine similarity too low: {cos_sim_static:.6f}. Expected >0.99." + ) + # Dynamic may have slightly lower accuracy (no calibration) + assert cos_sim_dynamic > 0.95, ( + f"FP8 Dynamic cosine similarity too low: {cos_sim_dynamic:.6f}. Expected >0.95." + ) + assert not torch.isnan(output_fp8_static).any(), "FP8 static output contains NaN" + assert not torch.isnan(output_fp8_dynamic).any(), "FP8 dynamic output contains NaN" + + print("\n[PASS] FP8 quantization accuracy test passed!") + print(f" - FP8 Static: cos_sim={cos_sim_static:.4f} (>0.99), MSE={mse_static:.6f}") + print(f" - FP8 Dynamic: cos_sim={cos_sim_dynamic:.4f} (>0.95), MSE={mse_dynamic:.6f}") + + # Cleanup + del pipeline_bf16, pipeline_fp8_static, pipeline_fp8_dynamic + torch.cuda.empty_cache() + + def test_nvfp4_static_vs_bf16_accuracy(self, wan22_nvfp4_bf16_checkpoints_exist): + """Test NVFP4 static quantization accuracy against BF16 reference. + + Compares outputs from: + 1. TRT-LLM BF16 model (reference checkpoint) + 2. TRT-LLM NVFP4 static quantized model (pre-quantized checkpoint) + + Uses spatially-correlated inputs that mimic real VAE latent patterns. + NVFP4 (4-bit) has lower precision than FP8 (8-bit), so we use relaxed thresholds. + """ + if not wan22_nvfp4_bf16_checkpoints_exist: + pytest.skip( + f"Both checkpoints required. NVFP4: {CHECKPOINT_PATH_WAN22_NVFP4}, " + f"BF16: {CHECKPOINT_PATH_WAN22_BF16}" + ) + + # Reset dynamo cache to avoid recompile-limit errors from prior + # tests that compiled kernels with different dtypes (e.g. Float32). + torch._dynamo.reset() + + print("\n" + "=" * 70) + print("NVFP4 STATIC QUANT vs BF16 ACCURACY TEST") + print("=" * 70) + + # Load BF16 reference model + print(f"\n[BF16] Loading from {CHECKPOINT_PATH_WAN22_BF16}") + args_bf16 = DiffusionArgs( + checkpoint_path=CHECKPOINT_PATH_WAN22_BF16, + device="cuda", + dtype="bfloat16", + skip_components=SKIP_COMPONENTS, + ) + pipeline_bf16 = PipelineLoader(args_bf16).load() + + # Load NVFP4 static quantized model (from pre-quantized checkpoint) + print(f"\n[NVFP4 Static] Loading from {CHECKPOINT_PATH_WAN22_NVFP4}") + args_nvfp4_static = DiffusionArgs( + checkpoint_path=CHECKPOINT_PATH_WAN22_NVFP4, + device="cuda", + dtype="bfloat16", + skip_components=SKIP_COMPONENTS, + ) + pipeline_nvfp4_static = PipelineLoader(args_nvfp4_static).load() + + # Verify NVFP4 static model has quantized weights + static_quant_modules = 0 + for name, module in pipeline_nvfp4_static.transformer.named_modules(): + if isinstance(module, Linear): + if hasattr(module, "weight_scale") and module.weight_scale is not None: + if module.weight_scale.numel() > 1: + static_quant_modules += 1 + print(f"[NVFP4 Static] Quantized Linear modules: {static_quant_modules}") + assert static_quant_modules > 0, "NVFP4 static model should have quantization scales" + + # Create spatially-correlated test inputs (mimics real VAE latent patterns) + # Wan 2.2 TI2V-5B specs: + # - VAE compression: 16x16x4 (spatial x spatial x temporal) + # - Latent channels: 48 (z_dim=48) + # - 720P resolution: 1280x704 -> latent: 80x44 + # - Text encoder: UMT5, max_length=512, dim=4096 + torch.manual_seed(42) + + batch_size = 2 # For CFG (positive + negative) + in_channels = 48 # Wan 2.2 TI2V-5B uses 48 latent channels + time_dim = 1 # Single frame for unit test + + # 720P latent dimensions: 1280/16=80 width, 704/16=44 height + height = 44 # 720P latent height (704 / 16) + width = 80 # 720P latent width (1280 / 16) + + # Text encoder: UMT5 with 4096 dim, typical sequence length ~226 + text_seq_len = 226 # Default max_sequence_length for Wan + text_dim = 4096 + + # Create structured latent (not purely random - simulate real VAE output) + base_pattern = torch.randn( + 1, in_channels, time_dim, height // 4, width // 4, device="cuda", dtype=torch.bfloat16 + ) + hidden_states = F.interpolate( + base_pattern.view(1, in_channels, height // 4, width // 4), + size=(height, width), + mode="bilinear", + align_corners=False, + ).view(1, in_channels, time_dim, height, width) + hidden_states = hidden_states * 2.0 + hidden_states = hidden_states.expand(batch_size, -1, -1, -1, -1).contiguous() + + timestep = torch.tensor([500.0, 500.0], device="cuda", dtype=torch.bfloat16) + + text_base = ( + torch.randn(1, text_seq_len, text_dim, device="cuda", dtype=torch.bfloat16) * 0.1 + ) + encoder_hidden_states = text_base.expand(batch_size, -1, -1).contiguous() + + print( + f"\n[Input] 720P latent: {hidden_states.shape} " + f"(batch={batch_size}, ch={in_channels}, t={time_dim}, h={height}, w={width})" + ) + print(f"[Input] range: [{hidden_states.min():.2f}, {hidden_states.max():.2f}]") + print(f"[Input] encoder_hidden_states: {encoder_hidden_states.shape}") + + # Run forward passes + print("\n[Forward] Running BF16 model...") + with torch.no_grad(): + output_bf16 = pipeline_bf16.transformer( + hidden_states=hidden_states.clone(), + timestep=timestep, + encoder_hidden_states=encoder_hidden_states.clone(), + ) + + print("[Forward] Running NVFP4 static quant model...") + with torch.no_grad(): + output_nvfp4_static = pipeline_nvfp4_static.transformer( + hidden_states=hidden_states.clone(), + timestep=timestep, + encoder_hidden_states=encoder_hidden_states.clone(), + ) + + # Compute metrics + output_bf16_float = output_bf16.float() + output_nvfp4_static_float = output_nvfp4_static.float() + + # NVFP4 Static vs BF16 + cos_sim_static = F.cosine_similarity( + output_nvfp4_static_float.flatten(), output_bf16_float.flatten(), dim=0 + ).item() + mse_static = F.mse_loss(output_nvfp4_static_float, output_bf16_float).item() + + # Output statistics + bf16_range = (output_bf16_float.min().item(), output_bf16_float.max().item()) + nvfp4_static_range = ( + output_nvfp4_static_float.min().item(), + output_nvfp4_static_float.max().item(), + ) + + print("\n" + "=" * 70) + print("RESULTS: NVFP4 QUANT vs BF16") + print("=" * 70) + print(f"{'Method':<25} {'Cosine Sim':>12} {'MSE':>12}") + print("-" * 70) + print(f"{'NVFP4 Static':<25} {cos_sim_static:>12.6f} {mse_static:>12.6f}") + print("-" * 70) + print(f"BF16 Output Range: [{bf16_range[0]:.4f}, {bf16_range[1]:.4f}]") + print( + f"NVFP4 Static Range: [{nvfp4_static_range[0]:.4f}, {nvfp4_static_range[1]:.4f}]" + ) + print("=" * 70) + + # Assertions - NVFP4 (4-bit) has lower precision than FP8 (8-bit) + assert cos_sim_static > 0.95, ( + f"NVFP4 Static cosine similarity too low: {cos_sim_static:.6f}. Expected >0.95." + ) + assert not torch.isnan(output_nvfp4_static).any(), "NVFP4 static output contains NaN" + + print("\n[PASS] NVFP4 quantization accuracy test passed!") + print(f" - NVFP4 Static: cos_sim={cos_sim_static:.4f} (>0.95), MSE={mse_static:.6f}") + + # Cleanup + del pipeline_bf16, pipeline_nvfp4_static + torch.cuda.empty_cache() + + +# ============================================================================= +# Wan 2.2 FP8 Pre-quantized Checkpoint Fixtures +# ============================================================================= + + +@pytest.fixture +def wan22_fp8_checkpoint_exists(): + """Check if Wan 2.2 FP8 checkpoint path exists.""" + return CHECKPOINT_PATH_WAN22_FP8 and os.path.exists(CHECKPOINT_PATH_WAN22_FP8) + + +@pytest.fixture +def wan22_bf16_checkpoint_exists(): + """Check if Wan 2.2 BF16 checkpoint path exists.""" + return CHECKPOINT_PATH_WAN22_BF16 and os.path.exists(CHECKPOINT_PATH_WAN22_BF16) + + +@pytest.fixture +def wan22_both_checkpoints_exist(): + """Check if both Wan 2.2 FP8 and BF16 checkpoints exist.""" + fp8_exists = CHECKPOINT_PATH_WAN22_FP8 and os.path.exists(CHECKPOINT_PATH_WAN22_FP8) + bf16_exists = CHECKPOINT_PATH_WAN22_BF16 and os.path.exists(CHECKPOINT_PATH_WAN22_BF16) + return fp8_exists and bf16_exists + + +@pytest.fixture +def wan22_nvfp4_bf16_checkpoints_exist(): + """Check if both NVFP4 and BF16 checkpoints exist.""" + nvfp4_exists = CHECKPOINT_PATH_WAN22_NVFP4 and os.path.exists(CHECKPOINT_PATH_WAN22_NVFP4) + bf16_exists = CHECKPOINT_PATH_WAN22_BF16 and os.path.exists(CHECKPOINT_PATH_WAN22_BF16) + return nvfp4_exists and bf16_exists + + +# ============================================================================= +# Optimization Tests +# ============================================================================= + + +class TestWanOptimizations(unittest.TestCase): + """Runtime optimization correctness tests.""" + + DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") + + def setUp(self): + """Set up test fixtures and skip if checkpoint not available.""" + torch.manual_seed(42) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(42) + if not CHECKPOINT_PATH or not os.path.exists(CHECKPOINT_PATH): + self.skipTest( + "Checkpoint not available. Set DIFFUSION_MODEL_PATH environment variable." + ) + + def tearDown(self): + """Clean up GPU memory.""" + import gc + + gc.collect() + if torch.cuda.is_available(): + torch.cuda.empty_cache() + torch.cuda.synchronize() + + @torch.no_grad() + def test_teacache_multi_step(self): + """Test TeaCache correctness across multiple timesteps (validates caching behavior). + + TeaCache is a runtime optimization that caches transformer outputs when timestep + embeddings change slowly. This test validates: + 1. Correctness against HuggingFace baseline + 2. Actual caching behavior across 20 timesteps + 3. Cache hits occur after warmup phase + """ + if not os.path.exists(CHECKPOINT_PATH): + pytest.skip("Checkpoint not available. Set DIFFUSION_MODEL_PATH.") + if not is_wan21_checkpoint(): + pytest.skip( + "This test requires Wan 2.1 checkpoint. Use DIFFUSION_MODEL_PATH with '2.1' in the path." + ) + + from safetensors.torch import load_file + + print("\n" + "=" * 80) + print("TEACACHE MULTI-STEP TEST (20 steps, validates caching)") + print("=" * 80) + + # Load HuggingFace baseline + print("\n[1/4] Loading HuggingFace baseline...") + args_trtllm = DiffusionArgs( + checkpoint_path=CHECKPOINT_PATH, + device="cuda", + dtype="bfloat16", + skip_components=SKIP_COMPONENTS, + ) + pipeline_trtllm = PipelineLoader(args_trtllm).load() + config = pipeline_trtllm.transformer.model_config.pretrained_config + + hf_model = ( + HFWanTransformer3DModel( + patch_size=[config.patch_size[0], config.patch_size[1], config.patch_size[2]], + num_attention_heads=config.num_attention_heads, + attention_head_dim=config.attention_head_dim, + in_channels=config.in_channels, + out_channels=config.out_channels, + text_dim=config.text_dim, + freq_dim=config.freq_dim, + ffn_dim=config.ffn_dim, + num_layers=config.num_layers, + cross_attn_norm=config.cross_attn_norm, + qk_norm=config.qk_norm, + eps=config.eps, + ) + .to("cuda", dtype=torch.bfloat16) + .eval() + ) + + # Load weights from checkpoint (auto-discover all shard files) + import glob + + transformer_dir = os.path.join(CHECKPOINT_PATH, "transformer") + shard_pattern = os.path.join(transformer_dir, "diffusion_pytorch_model-*.safetensors") + shard_files = sorted(glob.glob(shard_pattern)) + + checkpoint_weights = {} + for shard_file in shard_files: + if os.path.exists(shard_file): + checkpoint_weights.update(load_file(shard_file)) + hf_model.load_state_dict(checkpoint_weights, strict=True) + print(" āœ“ HuggingFace model loaded") + + # Load TeaCache-enabled pipeline + print("\n[2/4] Loading TeaCache-enabled TRT-LLM pipeline...") + args_teacache = DiffusionArgs( + checkpoint_path=CHECKPOINT_PATH, + device="cuda", + dtype="bfloat16", + skip_components=SKIP_COMPONENTS, + teacache=TeaCacheConfig( + enable_teacache=True, + teacache_thresh=0.2, + use_ret_steps=True, + ), + ) + pipeline_teacache = PipelineLoader(args_teacache).load() + transformer_teacache = pipeline_teacache.transformer.eval() + + # Verify TeaCache is enabled + assert hasattr(pipeline_teacache, "cache_backend"), "TeaCache backend not found in pipeline" + assert hasattr(transformer_teacache, "_original_forward"), ( + "TeaCache forward hook not installed" + ) + print(" āœ“ TeaCache enabled and verified") + + # Create FIXED test inputs + print("\n[3/4] Creating fixed test inputs...") + torch.manual_seed(42) + batch_size, num_frames, height, width, seq_len = 1, 1, 64, 64, 128 + + hidden_states = torch.randn( + batch_size, + config.in_channels, + num_frames, + height, + width, + dtype=torch.bfloat16, + device="cuda", + ) + encoder_hidden_states = torch.randn( + batch_size, seq_len, config.text_dim, dtype=torch.bfloat16, device="cuda" + ) + + # Run multi-step inference + print("\n[4/4] Running 20-step inference with TeaCache...") + num_steps = 20 + pipeline_teacache.cache_backend.refresh(num_inference_steps=num_steps) + + # Simulate diffusion timestep schedule (from high to low) + timesteps = torch.linspace(999, 0, num_steps, dtype=torch.long, device="cuda") + + hf_outputs, teacache_outputs = [], [] + + for step_idx, timestep in enumerate(timesteps): + timestep_tensor = timestep.unsqueeze(0) + + # Run HuggingFace + with torch.no_grad(): + hf_out = hf_model( + hidden_states=hidden_states.clone(), + timestep=timestep_tensor, + encoder_hidden_states=encoder_hidden_states.clone(), + return_dict=False, + )[0] + hf_outputs.append(hf_out) + + # Run TeaCache + with torch.no_grad(): + teacache_out = transformer_teacache( + hidden_states=hidden_states.clone(), + timestep=timestep_tensor, + encoder_hidden_states=encoder_hidden_states.clone(), + ) + teacache_outputs.append(teacache_out) + + if step_idx % 5 == 0: + print(f" Step {step_idx}/{num_steps} - timestep: {timestep.item()}") + + # Compare outputs at selected steps + print("\n[Comparison] TeaCache vs HuggingFace at different steps:") + test_steps = [0, num_steps // 2, num_steps - 1] + + for step_idx in test_steps: + hf_float = hf_outputs[step_idx].float() + teacache_float = teacache_outputs[step_idx].float() + + cos_sim = F.cosine_similarity( + teacache_float.flatten(), hf_float.flatten(), dim=0 + ).item() + + print(f"\n Step {step_idx} (timestep={timesteps[step_idx].item()}):") + print(f" Cosine similarity: {cos_sim:.6f}") + + assert cos_sim > 0.99, ( + f"Step {step_idx}: TeaCache cosine similarity {cos_sim:.6f} below threshold 0.99" + ) + + print("\n[PASS] TeaCache multi-step correctness validated!") + print("=" * 80) + + # Cleanup + del pipeline_trtllm, pipeline_teacache, transformer_teacache, hf_model + torch.cuda.empty_cache() + + +# ============================================================================= +# Parallelism Tests +# ============================================================================= + + +class TestWanParallelism(unittest.TestCase): + """Distributed parallelism correctness tests (CFG Parallelism).""" + + DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") + + def setUp(self): + """Set up test fixtures and skip if checkpoint not available.""" + torch.manual_seed(42) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(42) + if not CHECKPOINT_PATH or not os.path.exists(CHECKPOINT_PATH): + self.skipTest( + "Checkpoint not available. Set DIFFUSION_MODEL_PATH environment variable." + ) + + def tearDown(self): + """Clean up GPU memory.""" + import gc + + gc.collect() + if torch.cuda.is_available(): + torch.cuda.empty_cache() + torch.cuda.synchronize() + + def test_cfg_2gpu_correctness(self): + """Test CFG Parallelism (cfg_size=2) correctness against standard CFG baseline.""" + num_gpus = torch.cuda.device_count() + if num_gpus < 2: + pytest.skip("CFG parallel test requires at least 2 GPUs") + if not is_wan21_checkpoint(): + pytest.skip( + "This test requires Wan 2.1 checkpoint. Use DIFFUSION_MODEL_PATH with '2.1' in the path." + ) + + print("\n" + "=" * 80) + print("CFG PARALLELISM (cfg_size=2) CORRECTNESS TEST") + print("=" * 80) + + # Load standard CFG baseline on GPU 0 + print("\n[1/3] Loading standard CFG baseline (cfg_size=1) on GPU 0...") + args_baseline = DiffusionArgs( + checkpoint_path=CHECKPOINT_PATH, + device="cuda:0", + dtype="bfloat16", + skip_components=SKIP_COMPONENTS, + parallel=ParallelConfig(dit_cfg_size=1), # Standard CFG (no parallel) + ) + pipeline_baseline = PipelineLoader(args_baseline).load() + config = pipeline_baseline.transformer.model_config.pretrained_config + + # Reset torch compile state to avoid BFloat16 dtype issues + torch._dynamo.reset() + + # Create FIXED test inputs + print("\n[2/3] Creating fixed test inputs...") + torch.manual_seed(42) + batch_size, num_frames, height, width, seq_len = 1, 1, 64, 64, 128 + + latents = torch.randn( + batch_size, + config.in_channels, + num_frames, + height, + width, + dtype=torch.bfloat16, + device="cuda:0", + ) + timestep = torch.tensor([500], dtype=torch.long, device="cuda:0") + prompt_embeds = torch.randn( + batch_size, seq_len, config.text_dim, dtype=torch.bfloat16, device="cuda:0" + ) + neg_prompt_embeds = torch.randn( + batch_size, seq_len, config.text_dim, dtype=torch.bfloat16, device="cuda:0" + ) + + # Setup standard CFG config + cfg_config_baseline = pipeline_baseline._setup_cfg_config( + guidance_scale=5.0, + prompt_embeds=prompt_embeds, + neg_prompt_embeds=neg_prompt_embeds, + ) + + print(" Baseline CFG config:") + print(f" enabled: {cfg_config_baseline['enabled']}") + print(f" cfg_size: {cfg_config_baseline['cfg_size']}") + + # Verify standard CFG is NOT parallel + assert not cfg_config_baseline["enabled"], "Baseline should not use CFG parallel" + assert cfg_config_baseline["cfg_size"] == 1, "Baseline cfg_size should be 1" + + # Run standard CFG denoising step + def forward_fn( + latents, extra_stream_latents, timestep, encoder_hidden_states, extra_tensors + ): + return pipeline_baseline.transformer( # noqa: F821 + hidden_states=latents, + timestep=timestep, + encoder_hidden_states=encoder_hidden_states, + ) + + with torch.no_grad(): + baseline_output, _, _, _ = pipeline_baseline._denoise_step_standard( + latents=latents.clone(), + extra_stream_latents={}, + timestep=timestep, + prompt_embeds=cfg_config_baseline["prompt_embeds"], + forward_fn=forward_fn, + guidance_scale=5.0, + guidance_rescale=0.0, + local_extras={}, + ) + + print(f" āœ“ Baseline output shape: {baseline_output.shape}") + print(f" āœ“ Baseline range: [{baseline_output.min():.4f}, {baseline_output.max():.4f}]") + + # Cleanup baseline to free memory for CFG workers + del pipeline_baseline + torch.cuda.empty_cache() + + # Run CFG parallel (cfg_size=2) in distributed processes + print("\n[3/3] Running CFG Parallelism (cfg_size=2) across 2 GPUs...") + cfg_size = 2 + + inputs_cpu = [ + prompt_embeds.cpu(), + neg_prompt_embeds.cpu(), + latents.cpu(), + timestep.cpu(), + ] + + manager = mp.Manager() + return_dict = manager.dict() + + # Spawn CFG workers + mp.spawn( + _run_cfg_worker, + args=(cfg_size, CHECKPOINT_PATH, inputs_cpu, return_dict), + nprocs=cfg_size, + join=True, + ) + + # Get CFG parallel output from rank 0 + cfg_parallel_output = return_dict["output"].to("cuda:0") + print(f" āœ“ CFG parallel output shape: {cfg_parallel_output.shape}") + + # Compare outputs + print("\n[Comparison] CFG Parallel vs Standard CFG:") + baseline_float = baseline_output.float() + cfg_parallel_float = cfg_parallel_output.float() + + cos_sim = F.cosine_similarity( + cfg_parallel_float.flatten(), baseline_float.flatten(), dim=0 + ).item() + + max_diff = torch.max(torch.abs(cfg_parallel_float - baseline_float)).item() + mean_diff = torch.mean(torch.abs(cfg_parallel_float - baseline_float)).item() + + print(f" Cosine similarity: {cos_sim:.6f}") + print(f" Max absolute difference: {max_diff:.6f}") + print(f" Mean absolute difference: {mean_diff:.6f}") + print( + f" CFG parallel range: [{cfg_parallel_float.min():.4f}, {cfg_parallel_float.max():.4f}]" + ) + print(f" Baseline range: [{baseline_float.min():.4f}, {baseline_float.max():.4f}]") + + assert cos_sim > 0.99, ( + f"CFG parallel cosine similarity {cos_sim:.6f} below threshold 0.99. " + f"CFG Parallelism does not match standard CFG baseline." + ) + + print("\n[PASS] CFG Parallelism (cfg_size=2) validated!") + print(" āœ“ CFG parallel produces same output as standard CFG") + print(" āœ“ Prompt splitting and all-gather working correctly") + print("=" * 80) + + torch.cuda.empty_cache() + + +# ============================================================================= +# Combined Optimizations Tests +# ============================================================================= + + +class TestWanCombinedOptimizations(unittest.TestCase): + """Test all optimizations combined: FP8 + TeaCache + TRTLLM attention + CFG Parallelism.""" + + DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") + + def setUp(self): + """Set up test fixtures and skip if checkpoint not available.""" + torch.manual_seed(42) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(42) + if not CHECKPOINT_PATH or not os.path.exists(CHECKPOINT_PATH): + self.skipTest( + "Checkpoint not available. Set DIFFUSION_MODEL_PATH environment variable." + ) + + def tearDown(self): + """Clean up GPU memory.""" + import gc + + gc.collect() + if torch.cuda.is_available(): + torch.cuda.empty_cache() + torch.cuda.synchronize() + + def test_all_optimizations_combined(self): + """Test FP8 + TeaCache + TRTLLM attention + CFG=2 combined correctness. + + This test validates that all optimizations work together correctly: + 1. FP8 per-tensor quantization for reduced memory/compute + 2. TeaCache for caching repeated computations + 3. TRTLLM attention backend for optimized attention kernels + 4. CFG Parallelism (cfg_size=2) for distributed CFG computation + + We compare against a standard CFG baseline with relaxed thresholds since multiple + optimizations compound numerical differences. + """ + num_gpus = torch.cuda.device_count() + if num_gpus < 2: + pytest.skip("Combined optimization test requires at least 2 GPUs for CFG parallel") + if not is_wan21_checkpoint(): + pytest.skip( + "This test requires Wan 2.1 checkpoint. Use DIFFUSION_MODEL_PATH with '2.1' in the path." + ) + + print("\n" + "=" * 80) + print("ALL OPTIMIZATIONS COMBINED TEST") + print("FP8 + TeaCache + TRTLLM Attention + CFG Parallelism (cfg_size=2)") + print("=" * 80) + + # Load baseline on GPU 0 (no optimizations, standard CFG) + print("\n[1/3] Loading baseline on GPU 0 (standard CFG, no optimizations)...") + args_baseline = DiffusionArgs( + checkpoint_path=CHECKPOINT_PATH, + device="cuda:0", + dtype="bfloat16", + skip_components=SKIP_COMPONENTS, + parallel=ParallelConfig(dit_cfg_size=1), # Standard CFG + ) + pipeline_baseline = PipelineLoader(args_baseline).load() + config = pipeline_baseline.transformer.model_config.pretrained_config + + # Reset torch compile state to avoid BFloat16 dtype issues + torch._dynamo.reset() + + # Create FIXED test inputs + print("\n[2/3] Creating fixed test inputs...") + torch.manual_seed(42) + batch_size, num_frames, height, width, seq_len = 1, 1, 64, 64, 128 + + latents = torch.randn( + batch_size, + config.in_channels, + num_frames, + height, + width, + dtype=torch.bfloat16, + device="cuda:0", + ) + timestep = torch.tensor([500], dtype=torch.long, device="cuda:0") + prompt_embeds = torch.randn( + batch_size, seq_len, config.text_dim, dtype=torch.bfloat16, device="cuda:0" + ) + neg_prompt_embeds = torch.randn( + batch_size, seq_len, config.text_dim, dtype=torch.bfloat16, device="cuda:0" + ) + + # Setup standard CFG config + cfg_config_baseline = pipeline_baseline._setup_cfg_config( + guidance_scale=5.0, + prompt_embeds=prompt_embeds, + neg_prompt_embeds=neg_prompt_embeds, + ) + + # Run baseline standard CFG + print(" Running baseline (standard CFG)...") + + def forward_fn_baseline( + latents, extra_stream_latents, timestep, encoder_hidden_states, extra_tensors + ): + return pipeline_baseline.transformer( # noqa: F821 + hidden_states=latents, + timestep=timestep, + encoder_hidden_states=encoder_hidden_states, + ) + + with torch.no_grad(): + baseline_output, _, _, _ = pipeline_baseline._denoise_step_standard( + latents=latents.clone(), + extra_stream_latents={}, + timestep=timestep, + prompt_embeds=cfg_config_baseline["prompt_embeds"], + forward_fn=forward_fn_baseline, + guidance_scale=5.0, + guidance_rescale=0.0, + local_extras={}, + ) + + print(f" āœ“ Baseline output shape: {baseline_output.shape}") + print(f" āœ“ Baseline range: [{baseline_output.min():.4f}, {baseline_output.max():.4f}]") + + # Cleanup baseline to free memory for workers + del pipeline_baseline + torch.cuda.empty_cache() + + # Run with ALL optimizations combined in distributed processes + print("\n[3/3] Running with ALL optimizations (FP8 + TeaCache + TRTLLM + CFG=2)...") + cfg_size = 2 + + inputs_cpu = [ + prompt_embeds.cpu(), + neg_prompt_embeds.cpu(), + latents.cpu(), + timestep.cpu(), + ] + + manager = mp.Manager() + return_dict = manager.dict() + + # Spawn workers + mp.spawn( + _run_all_optimizations_worker, + args=(cfg_size, CHECKPOINT_PATH, inputs_cpu, return_dict), + nprocs=cfg_size, + join=True, + ) + + # Get combined optimization output + combined_output = return_dict["output"].to("cuda:0") + + # Compare outputs with RELAXED thresholds (multiple optimizations compound errors) + print("\n[Comparison] Combined Optimizations vs Baseline:") + baseline_float = baseline_output.float() + combined_float = combined_output.float() + + cos_sim = F.cosine_similarity( + combined_float.flatten(), baseline_float.flatten(), dim=0 + ).item() + + max_diff = torch.max(torch.abs(combined_float - baseline_float)).item() + mean_diff = torch.mean(torch.abs(combined_float - baseline_float)).item() + + print(f" Cosine similarity: {cos_sim:.6f}") + print(f" Max absolute difference: {max_diff:.6f}") + print(f" Mean absolute difference: {mean_diff:.6f}") + print(f" Combined range: [{combined_float.min():.4f}, {combined_float.max():.4f}]") + print(f" Baseline range: [{baseline_float.min():.4f}, {baseline_float.max():.4f}]") + + # Relaxed threshold: cos_sim > 0.90 (compounded numerical differences from 4 optimizations) + assert cos_sim > 0.90, ( + f"Combined optimization cosine similarity {cos_sim:.6f} below threshold 0.90. " + f"This suggests an issue with optimization interactions." + ) + + print("\n[PASS] All optimizations (FP8 + TeaCache + TRTLLM + CFG) validated!") + print(" āœ“ All optimizations work correctly together") + print(" āœ“ Numerical accuracy within acceptable tolerance") + print("=" * 80) + + torch.cuda.empty_cache() + + +# ============================================================================= +# Two-Stage Transformer Tests (Wan 2.2) +# ============================================================================= + + +class TestWanTwoStageTransformer(unittest.TestCase): + """Test two-stage transformer support for Wan 2.2 T2V.""" + + DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") + + def setUp(self): + """Set up test fixtures and skip if checkpoint not available.""" + torch.manual_seed(42) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(42) + if not CHECKPOINT_PATH_WAN22_T2V or not os.path.exists(CHECKPOINT_PATH_WAN22_T2V): + self.skipTest( + "Wan 2.2 T2V checkpoint not available. Set DIFFUSION_MODEL_PATH_WAN22_T2V." + ) + + def tearDown(self): + """Clean up GPU memory.""" + import gc + + gc.collect() + if torch.cuda.is_available(): + torch.cuda.empty_cache() + torch.cuda.synchronize() + + def test_two_stage_pipeline_initialization(self): + """Test that Wan 2.2 pipeline initializes with two transformers.""" + if not is_wan22_checkpoint(): + pytest.skip( + "This test requires Wan 2.2 T2V checkpoint. Set DIFFUSION_MODEL_PATH_WAN22_T2V." + ) + print("\n" + "=" * 80) + print("WAN 2.2 TWO-STAGE PIPELINE INITIALIZATION TEST") + print("=" * 80) + + args = DiffusionArgs( + checkpoint_path=CHECKPOINT_PATH_WAN22_T2V, + device="cuda", + dtype="bfloat16", + skip_components=SKIP_COMPONENTS, + ) + pipeline = PipelineLoader(args).load() + + try: + # Check if this is a two-stage model + has_boundary_ratio = pipeline.boundary_ratio is not None + has_transformer_2 = pipeline.transformer_2 is not None + + print(f"\n[Pipeline] boundary_ratio: {pipeline.boundary_ratio}") + print(f"[Pipeline] transformer: {pipeline.transformer is not None}") + print(f"[Pipeline] transformer_2: {has_transformer_2}") + + if not has_boundary_ratio: + pytest.skip("Checkpoint is not Wan 2.2 (no boundary_ratio)") + + # Verify two-stage configuration + assert pipeline.transformer is not None, "Transformer (high-noise) should exist" + assert has_transformer_2, "Transformer_2 (low-noise) should exist for Wan 2.2" + assert 0.0 < pipeline.boundary_ratio < 1.0, ( + f"boundary_ratio should be in (0, 1), got {pipeline.boundary_ratio}" + ) + + print("\n[PASS] āœ“ Wan 2.2 two-stage pipeline initialized correctly") + print(f" āœ“ boundary_ratio: {pipeline.boundary_ratio}") + print("=" * 80) + + finally: + del pipeline + import gc + + gc.collect() + torch.cuda.empty_cache() + + def test_two_stage_transformer_selection_logic(self): + """Test that correct transformer is selected based on timestep.""" + if not is_wan22_checkpoint(): + pytest.skip( + "This test requires Wan 2.2 T2V checkpoint. Set DIFFUSION_MODEL_PATH_WAN22_T2V." + ) + print("\n" + "=" * 80) + print("WAN 2.2 TRANSFORMER SELECTION LOGIC TEST") + print("=" * 80) + + args = DiffusionArgs( + checkpoint_path=CHECKPOINT_PATH_WAN22_T2V, + device="cuda", + dtype="bfloat16", + skip_components=SKIP_COMPONENTS, + ) + pipeline = PipelineLoader(args).load() + + try: + # Skip if not two-stage + if pipeline.boundary_ratio is None or pipeline.transformer_2 is None: + pytest.skip("Checkpoint is not Wan 2.2 (two-stage)") + + # Calculate boundary timestep + num_train_timesteps = 1000 # Default for Wan models + boundary_timestep = pipeline.boundary_ratio * num_train_timesteps + + print(f"\n[Selection Logic] boundary_ratio: {pipeline.boundary_ratio}") + print(f"[Selection Logic] boundary_timestep: {boundary_timestep:.1f}") + + # Create mock tensors for testing + batch_size, num_frames, height, width = 1, 1, 64, 64 + seq_len = 128 + # Use standard Wan model dimensions + in_channels = 16 # Standard for Wan models + text_dim = 4096 # Standard for Wan models + + latents = torch.randn( + batch_size, + in_channels, + num_frames, + height, + width, + dtype=torch.bfloat16, + device=self.DEVICE, + ) + encoder_hidden_states = torch.randn( + batch_size, seq_len, text_dim, dtype=torch.bfloat16, device=self.DEVICE + ) + + # Test high-noise timestep (should use transformer) + high_noise_t = torch.tensor([900.0], device=self.DEVICE) + print(f"\n[High-Noise] timestep: {high_noise_t.item():.1f}") + print(f"[High-Noise] {high_noise_t.item():.1f} >= {boundary_timestep:.1f}: True") + print("[High-Noise] Should use: transformer (high-noise)") + + with torch.no_grad(): + high_noise_output = pipeline.transformer( + hidden_states=latents, + timestep=high_noise_t, + encoder_hidden_states=encoder_hidden_states, + ) + print(f"[High-Noise] āœ“ Output shape: {high_noise_output.shape}") + + # Test low-noise timestep (should use transformer_2) + low_noise_t = torch.tensor([200.0], device=self.DEVICE) + print(f"\n[Low-Noise] timestep: {low_noise_t.item():.1f}") + print(f"[Low-Noise] {low_noise_t.item():.1f} < {boundary_timestep:.1f}: True") + print("[Low-Noise] Should use: transformer_2 (low-noise)") + + with torch.no_grad(): + low_noise_output = pipeline.transformer_2( + hidden_states=latents, + timestep=low_noise_t, + encoder_hidden_states=encoder_hidden_states, + ) + print(f"[Low-Noise] āœ“ Output shape: {low_noise_output.shape}") + + # Verify outputs have same shape but different values + assert high_noise_output.shape == low_noise_output.shape + assert not torch.allclose(high_noise_output, low_noise_output, atol=1e-3), ( + "Different transformers should produce different outputs" + ) + + print("\n[PASS] āœ“ Transformer selection logic working correctly") + print(" āœ“ High-noise stage uses transformer") + print(" āœ“ Low-noise stage uses transformer_2") + print("=" * 80) + + finally: + del pipeline + import gc + + gc.collect() + torch.cuda.empty_cache() + + def test_two_stage_with_custom_boundary_ratio(self): + """Test overriding boundary_ratio at inference time.""" + if not is_wan22_checkpoint(): + pytest.skip( + "This test requires Wan 2.2 T2V checkpoint. Set DIFFUSION_MODEL_PATH_WAN22_T2V." + ) + print("\n" + "=" * 80) + print("WAN 2.2 CUSTOM BOUNDARY_RATIO TEST") + print("=" * 80) + + args = DiffusionArgs( + checkpoint_path=CHECKPOINT_PATH_WAN22_T2V, + device="cuda", + dtype="bfloat16", + skip_components=SKIP_COMPONENTS, + ) + pipeline = PipelineLoader(args).load() + + try: + # Skip if not two-stage + if pipeline.boundary_ratio is None or pipeline.transformer_2 is None: + pytest.skip("Checkpoint is not Wan 2.2 (two-stage)") + + model_boundary_ratio = pipeline.boundary_ratio + custom_boundary_ratio = 0.3 # Override value + + print(f"\n[Custom Boundary] Model default: {model_boundary_ratio}") + print(f"[Custom Boundary] Custom override: {custom_boundary_ratio}") + + # Verify custom value would change boundary timestep + num_train_timesteps = 1000 + model_boundary_t = model_boundary_ratio * num_train_timesteps + custom_boundary_t = custom_boundary_ratio * num_train_timesteps + + print(f"[Custom Boundary] Model boundary_timestep: {model_boundary_t:.1f}") + print(f"[Custom Boundary] Custom boundary_timestep: {custom_boundary_t:.1f}") + print( + f"[Custom Boundary] Difference: {abs(model_boundary_t - custom_boundary_t):.1f} timesteps" + ) + + assert custom_boundary_ratio != model_boundary_ratio + print("\n[PASS] āœ“ Custom boundary_ratio can override model default") + print("=" * 80) + + finally: + del pipeline + import gc + + gc.collect() + torch.cuda.empty_cache() + + def test_two_stage_guidance_scale_2(self): + """Test two-stage denoising with different guidance_scale_2 values.""" + if not is_wan22_checkpoint(): + pytest.skip( + "This test requires Wan 2.2 T2V checkpoint. Set DIFFUSION_MODEL_PATH_WAN22_T2V." + ) + print("\n" + "=" * 80) + print("WAN 2.2 GUIDANCE_SCALE_2 SUPPORT TEST") + print("=" * 80) + + args = DiffusionArgs( + checkpoint_path=CHECKPOINT_PATH_WAN22_T2V, + device="cuda", + dtype="bfloat16", + skip_components=SKIP_COMPONENTS, + ) + pipeline = PipelineLoader(args).load() + + try: + # Skip if not two-stage + if pipeline.boundary_ratio is None or pipeline.transformer_2 is None: + pytest.skip("Checkpoint is not Wan 2.2 (two-stage)") + + print("\n[Guidance Scale 2] Two-stage model supports separate guidance scales:") + print("[Guidance Scale 2] High-noise stage: uses guidance_scale (e.g., 4.0)") + print("[Guidance Scale 2] Low-noise stage: uses guidance_scale_2 (e.g., 2.0, 3.0, 4.0)") + print("\n[PASS] āœ“ Different guidance scales supported for two stages") + print("=" * 80) + + finally: + del pipeline + import gc + + gc.collect() + torch.cuda.empty_cache() + + def test_two_stage_with_teacache_both_transformers(self): + """Test that TeaCache is enabled for both transformers in two-stage mode.""" + if not is_wan22_checkpoint(): + pytest.skip( + "This test requires Wan 2.2 T2V checkpoint. Set DIFFUSION_MODEL_PATH_WAN22_T2V." + ) + print("\n" + "=" * 80) + print("WAN 2.2 TWO-STAGE + TEACACHE TEST") + print("=" * 80) + + args = DiffusionArgs( + checkpoint_path=CHECKPOINT_PATH_WAN22_T2V, + device="cuda", + dtype="bfloat16", + skip_components=SKIP_COMPONENTS, + teacache=TeaCacheConfig( + enable_teacache=True, + teacache_thresh=0.2, + use_ret_steps=True, + ), + ) + pipeline = PipelineLoader(args).load() + + try: + # Skip if not two-stage + if pipeline.boundary_ratio is None or pipeline.transformer_2 is None: + pytest.skip("Checkpoint is not Wan 2.2 (two-stage)") + + # Verify TeaCache on transformer (high-noise) + assert hasattr(pipeline, "transformer_cache_backend"), ( + "Pipeline missing transformer_cache_backend" + ) + assert pipeline.transformer_cache_backend is not None + print("\n[TeaCache] āœ“ Transformer (high-noise): TeaCache enabled") + + # Verify TeaCache on transformer_2 (low-noise) + assert hasattr(pipeline, "transformer_2_cache_backend"), ( + "Pipeline missing transformer_2_cache_backend" + ) + assert pipeline.transformer_2_cache_backend is not None + print("[TeaCache] āœ“ Transformer_2 (low-noise): TeaCache enabled") + + # Verify both have get_stats method + assert hasattr(pipeline.transformer_cache_backend, "get_stats") + assert hasattr(pipeline.transformer_2_cache_backend, "get_stats") + print("[TeaCache] āœ“ Both transformers support statistics logging") + + print("\n[PASS] āœ“ TeaCache enabled for BOTH transformers") + print(" āœ“ Low-noise stage benefits MORE from TeaCache") + print("=" * 80) + + finally: + del pipeline + import gc + + gc.collect() + torch.cuda.empty_cache() + + def test_two_stage_with_fp8_quantization(self): + """Test two-stage with FP8 quantization on both transformers.""" + if not is_wan22_checkpoint(): + pytest.skip( + "This test requires Wan 2.2 T2V checkpoint. Set DIFFUSION_MODEL_PATH_WAN22_T2V." + ) + print("\n" + "=" * 80) + print("WAN 2.2 TWO-STAGE + FP8 QUANTIZATION TEST") + print("=" * 80) + + args = DiffusionArgs( + checkpoint_path=CHECKPOINT_PATH_WAN22_T2V, + device="cuda", + dtype="bfloat16", + skip_components=SKIP_COMPONENTS, + quant_config={"quant_algo": "FP8", "dynamic": True}, + ) + pipeline = PipelineLoader(args).load() + + try: + # Skip if not two-stage + if pipeline.boundary_ratio is None or pipeline.transformer_2 is None: + pytest.skip("Checkpoint is not Wan 2.2 (two-stage)") + + # Verify FP8 in transformer (high-noise) + found_fp8_t1 = False + for name, param in pipeline.transformer.named_parameters(): + if "blocks.0" in name and "weight" in name and param.dtype == torch.float8_e4m3fn: + found_fp8_t1 = True + print(f"\n[FP8] āœ“ Transformer: Found FP8 weight in {name}") + break + assert found_fp8_t1, "No FP8 weights found in transformer" + + # Verify FP8 in transformer_2 (low-noise) + found_fp8_t2 = False + for name, param in pipeline.transformer_2.named_parameters(): + if "blocks.0" in name and "weight" in name and param.dtype == torch.float8_e4m3fn: + found_fp8_t2 = True + print(f"[FP8] āœ“ Transformer_2: Found FP8 weight in {name}") + break + assert found_fp8_t2, "No FP8 weights found in transformer_2" + + print("\n[PASS] āœ“ FP8 quantization enabled for BOTH transformers") + print("=" * 80) + + finally: + del pipeline + import gc + + gc.collect() + torch.cuda.empty_cache() + + def test_two_stage_with_trtllm_attention(self): + """Test two-stage with TRTLLM attention backend on both transformers.""" + if not is_wan22_checkpoint(): + pytest.skip( + "This test requires Wan 2.2 T2V checkpoint. Set DIFFUSION_MODEL_PATH_WAN22_T2V." + ) + print("\n" + "=" * 80) + print("WAN 2.2 TWO-STAGE + TRTLLM ATTENTION TEST") + print("=" * 80) + + args = DiffusionArgs( + checkpoint_path=CHECKPOINT_PATH_WAN22_T2V, + device="cuda", + dtype="bfloat16", + skip_components=SKIP_COMPONENTS, + attention=AttentionConfig(backend="TRTLLM"), + ) + pipeline = PipelineLoader(args).load() + + try: + # Skip if not two-stage + if pipeline.boundary_ratio is None or pipeline.transformer_2 is None: + pytest.skip("Checkpoint is not Wan 2.2 (two-stage)") + + # Verify TRTLLM attention on transformer (high-noise) + first_block_t1 = pipeline.transformer.blocks[0] + attn1_backend_t1 = first_block_t1.attn1.attn_backend + attn2_backend_t1 = first_block_t1.attn2.attn_backend + + assert attn1_backend_t1 == "TRTLLM", ( + f"Expected TRTLLM for transformer self-attn, got {attn1_backend_t1}" + ) + assert attn2_backend_t1 == "VANILLA", ( + f"Expected VANILLA for transformer cross-attn, got {attn2_backend_t1}" + ) + + print("\n[Attention] Transformer (high-noise):") + print(f" āœ“ Self-attention: {attn1_backend_t1}") + print(f" āœ“ Cross-attention: {attn2_backend_t1}") + + # Verify TRTLLM attention on transformer_2 (low-noise) + first_block_t2 = pipeline.transformer_2.blocks[0] + attn1_backend_t2 = first_block_t2.attn1.attn_backend + attn2_backend_t2 = first_block_t2.attn2.attn_backend + + assert attn1_backend_t2 == "TRTLLM", ( + f"Expected TRTLLM for transformer_2 self-attn, got {attn1_backend_t2}" + ) + assert attn2_backend_t2 == "VANILLA", ( + f"Expected VANILLA for transformer_2 cross-attn, got {attn2_backend_t2}" + ) + + print("[Attention] Transformer_2 (low-noise):") + print(f" āœ“ Self-attention: {attn1_backend_t2}") + print(f" āœ“ Cross-attention: {attn2_backend_t2}") + + print("\n[PASS] āœ“ TRTLLM attention enabled for BOTH transformers") + print("=" * 80) + + finally: + del pipeline + import gc + + gc.collect() + torch.cuda.empty_cache() + + def test_two_stage_all_optimizations(self): + """Test two-stage with ALL optimizations: FP8 + TeaCache + TRTLLM.""" + if not is_wan22_checkpoint(): + pytest.skip( + "This test requires Wan 2.2 T2V checkpoint. Set DIFFUSION_MODEL_PATH_WAN22_T2V." + ) + print("\n" + "=" * 80) + print("WAN 2.2 TWO-STAGE + ALL OPTIMIZATIONS TEST") + print("FP8 + TeaCache + TRTLLM Attention") + print("=" * 80) + + args = DiffusionArgs( + checkpoint_path=CHECKPOINT_PATH_WAN22_T2V, + device="cuda", + dtype="bfloat16", + skip_components=SKIP_COMPONENTS, + quant_config={"quant_algo": "FP8_BLOCK_SCALES", "dynamic": True}, + attention=AttentionConfig(backend="TRTLLM"), + teacache=TeaCacheConfig( + enable_teacache=True, + teacache_thresh=0.2, + use_ret_steps=True, + ), + ) + pipeline = PipelineLoader(args).load() + + try: + # Skip if not two-stage + if pipeline.boundary_ratio is None or pipeline.transformer_2 is None: + pytest.skip("Checkpoint is not Wan 2.2 (two-stage)") + + optimizations = [] + + # Check FP8 + for name, param in pipeline.transformer.named_parameters(): + if "blocks.0" in name and "weight" in name and param.dtype == torch.float8_e4m3fn: + optimizations.append("FP8") + break + + # Check TRTLLM + if pipeline.transformer.blocks[0].attn1.attn_backend == "TRTLLM": + optimizations.append("TRTLLM") + + # Check TeaCache + if ( + hasattr(pipeline, "transformer_cache_backend") + and pipeline.transformer_cache_backend is not None + ): + optimizations.append("TeaCache") + + # Check two-stage + optimizations.append("Two-Stage") + + print(f"\n[All Optimizations] Enabled: {', '.join(optimizations)}") + assert len(optimizations) == 4, ( + f"Expected 4 optimizations, got {len(optimizations)}: {optimizations}" + ) + + # Verify all optimizations on transformer_2 as well + for name, param in pipeline.transformer_2.named_parameters(): + if "blocks.0" in name and "weight" in name and param.dtype == torch.float8_e4m3fn: + print("[All Optimizations] āœ“ Transformer_2: FP8 enabled") + break + + if pipeline.transformer_2.blocks[0].attn1.attn_backend == "TRTLLM": + print("[All Optimizations] āœ“ Transformer_2: TRTLLM enabled") + + if ( + hasattr(pipeline, "transformer_2_cache_backend") + and pipeline.transformer_2_cache_backend is not None + ): + print("[All Optimizations] āœ“ Transformer_2: TeaCache enabled") + + print("\n[PASS] āœ“ All optimizations working on BOTH transformers") + print("=" * 80) + + finally: + del pipeline + import gc + + gc.collect() + torch.cuda.empty_cache() + + +# ============================================================================= +# Robustness Tests +# ============================================================================= + + +class TestWanRobustness(unittest.TestCase): + """Error handling and edge case tests.""" + + DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") + + def setUp(self): + """Set up test fixtures and skip if checkpoint not available.""" + if not CHECKPOINT_PATH or not os.path.exists(CHECKPOINT_PATH): + self.skipTest( + "Checkpoint not available. Set DIFFUSION_MODEL_PATH environment variable." + ) + + def tearDown(self): + """Clean up GPU memory.""" + import gc + + gc.collect() + if torch.cuda.is_available(): + torch.cuda.empty_cache() + torch.cuda.synchronize() + + def test_invalid_quant_config(self): + """Test that invalid quantization config raises appropriate error.""" + with pytest.raises((ValueError, KeyError)): + args = DiffusionArgs( + checkpoint_path=CHECKPOINT_PATH, + device="cuda", + dtype="bfloat16", + skip_components=SKIP_COMPONENTS, + quant_config={"quant_algo": "INVALID_ALGO"}, + ) + pipeline = PipelineLoader(args).load() # noqa: F841 + + print("\n[Error Handling] āœ“ Invalid quant_algo raises error as expected") + + +if __name__ == "__main__": + unittest.main(verbosity=2) diff --git a/tests/unittest/_torch/visual_gen/test_wan_i2v.py b/tests/unittest/_torch/visual_gen/test_wan_i2v.py new file mode 100644 index 0000000000..34d232893f --- /dev/null +++ b/tests/unittest/_torch/visual_gen/test_wan_i2v.py @@ -0,0 +1,1491 @@ +"""Optimized tests for Wan Image-to-Video (I2V) pipeline with module-scoped fixtures. + +Run with: + pytest tests/visual_gen/test_wan_i2v_2.py -v + + # With real checkpoint: + DIFFUSION_MODEL_PATH=/path/to/Wan-I2V-Diffusers pytest tests/visual_gen/test_wan_i2v_2.py -v + + # Run only smoke tests: + pytest tests/visual_gen/test_wan_i2v_2.py -v -m "unit and smoke" + + # Run only Wan 2.1 tests: + pytest tests/visual_gen/test_wan_i2v_2.py -v -m "wan21" + + # Run only Wan 2.2 tests: + pytest tests/visual_gen/test_wan_i2v_2.py -v -m "wan22" +""" + +import os + +os.environ["TLLM_DISABLE_MPI"] = "1" + +import unittest +from pathlib import Path +from types import SimpleNamespace + +import pytest +import torch +import torch.distributed as dist +import torch.multiprocessing as mp +import torch.nn.functional as F +from PIL import Image + +from tensorrt_llm._torch.visual_gen.config import ( + AttentionConfig, + DiffusionArgs, + DiffusionModelConfig, + ParallelConfig, + TeaCacheConfig, +) +from tensorrt_llm._torch.visual_gen.models.wan.pipeline_wan_i2v import WanImageToVideoPipeline +from tensorrt_llm._torch.visual_gen.pipeline_loader import PipelineLoader +from tensorrt_llm.models.modeling_utils import QuantConfig +from tensorrt_llm.quantization.mode import QuantAlgo + + +@pytest.fixture(autouse=True, scope="module") +def _cleanup_mpi_env(): + """Clean up TLLM_DISABLE_MPI env var after tests complete.""" + yield + os.environ.pop("TLLM_DISABLE_MPI", None) + + +def _llm_models_root() -> str: + """Return LLM_MODELS_ROOT path if it is set in env, assert when it's set but not a valid path.""" + root = Path("/home/scratch.trt_llm_data_ci/llm-models/") + if "LLM_MODELS_ROOT" in os.environ: + root = Path(os.environ["LLM_MODELS_ROOT"]) + if not root.exists(): + root = Path("/scratch.trt_llm_data/llm-models/") + assert root.exists(), ( + "You shall set LLM_MODELS_ROOT env or be able to access scratch.trt_llm_data to run this test" + ) + return str(root) + + +# Checkpoint paths +CHECKPOINT_PATH = os.environ.get( + "DIFFUSION_MODEL_PATH", + os.path.join(_llm_models_root(), "Wan2.2-I2V-A14B-Diffusers"), +) + +# Skip components for different test scenarios +SKIP_MINIMAL = ["text_encoder", "vae", "tokenizer", "scheduler", "image_encoder", "image_processor"] +SKIP_WITH_IMAGE = ["text_encoder", "vae", "tokenizer", "scheduler"] + + +# ============================================================================ +# VERSION DETECTION HELPERS +# ============================================================================ + + +def is_wan21_checkpoint() -> bool: + """Check if DIFFUSION_MODEL_PATH is Wan 2.1 (contains '2.1' in path).""" + return "2.1" in CHECKPOINT_PATH + + +def is_wan22_checkpoint() -> bool: + """Check if DIFFUSION_MODEL_PATH is Wan 2.2 (contains '2.2' in path).""" + return "2.2" in CHECKPOINT_PATH + + +# ============================================================================ +# MODULE-SCOPED FIXTURES +# ============================================================================ + + +@pytest.fixture(scope="module") +def wan21_i2v_pipeline_bf16(): + """Load Wan 2.1 I2V BF16 pipeline once per module.""" + if not CHECKPOINT_PATH or not os.path.exists(CHECKPOINT_PATH): + pytest.skip("I2V checkpoint not available") + if not is_wan21_checkpoint(): + pytest.skip("This fixture requires Wan 2.1 checkpoint") + + args = DiffusionArgs( + checkpoint_path=CHECKPOINT_PATH, + device="cuda", + dtype="bfloat16", + skip_components=SKIP_MINIMAL, + ) + pipeline = PipelineLoader(args).load() + yield pipeline + del pipeline + torch.cuda.empty_cache() + + +@pytest.fixture(scope="module") +def wan21_i2v_pipeline_fp8(): + """Load Wan 2.1 I2V FP8 pipeline once per module.""" + if not CHECKPOINT_PATH or not os.path.exists(CHECKPOINT_PATH): + pytest.skip("I2V checkpoint not available") + if not is_wan21_checkpoint(): + pytest.skip("This fixture requires Wan 2.1 checkpoint") + + args = DiffusionArgs( + checkpoint_path=CHECKPOINT_PATH, + device="cuda", + dtype="bfloat16", + skip_components=SKIP_MINIMAL, + quant_config={"quant_algo": "FP8", "dynamic": True}, + ) + pipeline = PipelineLoader(args).load() + yield pipeline + del pipeline + torch.cuda.empty_cache() + + +@pytest.fixture(scope="module") +def wan21_i2v_pipeline_fp8_blockwise(): + """Load Wan 2.1 I2V FP8 blockwise pipeline once per module.""" + if not CHECKPOINT_PATH or not os.path.exists(CHECKPOINT_PATH): + pytest.skip("I2V checkpoint not available") + if not is_wan21_checkpoint(): + pytest.skip("This fixture requires Wan 2.1 checkpoint") + + args = DiffusionArgs( + checkpoint_path=CHECKPOINT_PATH, + device="cuda", + dtype="bfloat16", + skip_components=SKIP_MINIMAL, + quant_config={"quant_algo": "FP8_BLOCK_SCALES", "dynamic": True}, + ) + pipeline = PipelineLoader(args).load() + yield pipeline + del pipeline + torch.cuda.empty_cache() + + +@pytest.fixture(scope="module") +def wan21_i2v_pipeline_with_image_encoder(): + """Load Wan 2.1 I2V pipeline with image encoder once per module.""" + if not CHECKPOINT_PATH or not os.path.exists(CHECKPOINT_PATH): + pytest.skip("I2V checkpoint not available") + if not is_wan21_checkpoint(): + pytest.skip("This fixture requires Wan 2.1 checkpoint") + + args = DiffusionArgs( + checkpoint_path=CHECKPOINT_PATH, + device="cuda", + dtype="bfloat16", + skip_components=SKIP_WITH_IMAGE, + ) + pipeline = PipelineLoader(args).load() + yield pipeline + del pipeline + torch.cuda.empty_cache() + + +@pytest.fixture(scope="module") +def wan22_i2v_pipeline_bf16(): + """Load Wan 2.2 I2V BF16 pipeline once per module.""" + if not CHECKPOINT_PATH or not os.path.exists(CHECKPOINT_PATH): + pytest.skip("I2V checkpoint not available") + if not is_wan22_checkpoint(): + pytest.skip("This fixture requires Wan 2.2 checkpoint") + + args = DiffusionArgs( + checkpoint_path=CHECKPOINT_PATH, + device="cuda", + dtype="bfloat16", + skip_components=SKIP_MINIMAL, + ) + pipeline = PipelineLoader(args).load() + yield pipeline + del pipeline + torch.cuda.empty_cache() + + +@pytest.fixture(scope="module") +def wan22_i2v_pipeline_fp8(): + """Load Wan 2.2 I2V FP8 pipeline once per module.""" + if not CHECKPOINT_PATH or not os.path.exists(CHECKPOINT_PATH): + pytest.skip("I2V checkpoint not available") + if not is_wan22_checkpoint(): + pytest.skip("This fixture requires Wan 2.2 checkpoint") + + args = DiffusionArgs( + checkpoint_path=CHECKPOINT_PATH, + device="cuda", + dtype="bfloat16", + skip_components=SKIP_MINIMAL, + quant_config={"quant_algo": "FP8_BLOCK_SCALES", "dynamic": True}, + ) + pipeline = PipelineLoader(args).load() + yield pipeline + del pipeline + torch.cuda.empty_cache() + + +@pytest.fixture(scope="module") +def test_image(): + """Create a shared test image for I2V tests.""" + import numpy as np + + img_array = np.zeros((480, 832, 3), dtype=np.uint8) + for i in range(480): + img_array[i, :, 0] = int((i / 480) * 255) + img_array[i, :, 1] = 128 + return Image.fromarray(img_array, mode="RGB") + + +@pytest.fixture(autouse=True) +def cleanup_gpu(): + """GPU cleanup fixture.""" + import gc + + gc.collect() + torch.cuda.empty_cache() + yield + gc.collect() + torch.cuda.empty_cache() + + +# ============================================================================ +# DISTRIBUTED HELPERS (for CFG Parallelism tests) +# ============================================================================ + + +def setup_distributed(rank, world_size, backend="nccl"): + """Initialize distributed process group for multi-GPU tests.""" + os.environ["MASTER_ADDR"] = "localhost" + os.environ["MASTER_PORT"] = "12356" # Different port from T2V tests + os.environ["RANK"] = str(rank) + os.environ["WORLD_SIZE"] = str(world_size) + + dist.init_process_group(backend=backend, rank=rank, world_size=world_size) + torch.cuda.set_device(rank) + + +def cleanup_distributed(): + """Clean up distributed process group.""" + if dist.is_initialized(): + dist.destroy_process_group() + + +def _run_cfg_worker_i2v(rank, world_size, checkpoint_path, inputs_list, return_dict): + """Worker function for I2V CFG Parallelism multi-GPU test.""" + try: + setup_distributed(rank, world_size) + + from tensorrt_llm._torch.visual_gen.config import DiffusionArgs, ParallelConfig + from tensorrt_llm._torch.visual_gen.pipeline_loader import PipelineLoader + + # Load I2V pipeline with CFG parallel + args = DiffusionArgs( + checkpoint_path=checkpoint_path, + device=f"cuda:{rank}", + dtype="bfloat16", + skip_components=SKIP_MINIMAL, + parallel=ParallelConfig(dit_cfg_size=world_size), + ) + pipeline = PipelineLoader(args).load() + + # Verify CFG parallel configuration + assert pipeline.model_config.parallel.dit_cfg_size == world_size, ( + f"Expected cfg_size={world_size}, got {pipeline.model_config.parallel.dit_cfg_size}" + ) + + # Load inputs on this GPU + prompt_embeds = inputs_list[0].to(f"cuda:{rank}") + neg_prompt_embeds = inputs_list[1].to(f"cuda:{rank}") + latents = inputs_list[2].to(f"cuda:{rank}") + timestep = inputs_list[3].to(f"cuda:{rank}") + # I2V-specific: image embeddings (if present) + image_embeds = inputs_list[4].to(f"cuda:{rank}") if inputs_list[4] is not None else None + + # Setup CFG config + cfg_config = pipeline._setup_cfg_config( + guidance_scale=5.0, + prompt_embeds=prompt_embeds, + neg_prompt_embeds=neg_prompt_embeds, + ) + + # Verify CFG parallel is enabled + assert cfg_config["enabled"], f"Rank {rank}: CFG parallel not enabled" + assert cfg_config["cfg_size"] == world_size, f"Rank {rank}: Wrong cfg_size" + + expected_cfg_group = rank // cfg_config["ulysses_size"] + assert cfg_config["cfg_group"] == expected_cfg_group, ( + f"Rank {rank}: Wrong cfg_group. Expected {expected_cfg_group}, got {cfg_config['cfg_group']}" + ) + + if rank == 0: + print(f"[CFG I2V Rank {rank}] Loaded with cfg_size={world_size}") + print(f" cfg_group: {cfg_config['cfg_group']}") + print(f" local_embeds shape: {cfg_config['local_embeds'].shape}") + print(f" Using {'positive' if cfg_config['cfg_group'] == 0 else 'negative'} prompts") + print(f" Image embeds: {'present' if image_embeds is not None else 'None'}") + + # Verify prompt splitting + expected_embeds = prompt_embeds if cfg_config["cfg_group"] == 0 else neg_prompt_embeds + assert torch.allclose(cfg_config["local_embeds"], expected_embeds), ( + f"Rank {rank}: local_embeds doesn't match expected embeds" + ) + + # Run single denoising step with CFG parallel + def forward_fn( + latents, extra_stream_latents, timestep, encoder_hidden_states, extra_tensors + ): + # I2V-specific: include image embeddings in extra_tensors if present + return pipeline.transformer( # noqa: F821 + hidden_states=latents, + timestep=timestep, + encoder_hidden_states=encoder_hidden_states, + encoder_hidden_states_image=extra_tensors.get("encoder_hidden_states_image"), + ) + + with torch.no_grad(): + local_extras = ( + {"encoder_hidden_states_image": image_embeds} if image_embeds is not None else {} + ) + noise_pred, _, _, _ = pipeline._denoise_step_cfg_parallel( + latents=latents, + extra_stream_latents={}, + timestep=timestep, + local_embeds=cfg_config["local_embeds"], + forward_fn=forward_fn, + guidance_scale=5.0, + guidance_rescale=0.0, + ulysses_size=cfg_config["ulysses_size"], + local_extras=local_extras, + ) + + # Validate output + assert not torch.isnan(noise_pred).any(), f"Rank {rank}: Output contains NaN" + assert not torch.isinf(noise_pred).any(), f"Rank {rank}: Output contains Inf" + + # Return output from rank 0 + if rank == 0: + return_dict["output"] = noise_pred.cpu() + print(f"[CFG I2V Rank {rank}] āœ“ Output shape: {noise_pred.shape}") + print( + f"[CFG I2V Rank {rank}] āœ“ Output range: [{noise_pred.min():.4f}, {noise_pred.max():.4f}]" + ) + + del pipeline + torch.cuda.empty_cache() + + finally: + cleanup_distributed() + + +def _run_all_optimizations_worker_i2v(rank, world_size, checkpoint_path, inputs_list, return_dict): + try: + setup_distributed(rank, world_size) + + # Load I2V pipeline with ALL optimizations + args_full = DiffusionArgs( + checkpoint_path=checkpoint_path, + device=f"cuda:{rank}", + dtype="bfloat16", + skip_components=SKIP_MINIMAL, + quant_config={"quant_algo": "FP8", "dynamic": True}, + teacache=TeaCacheConfig( + enable_teacache=True, + teacache_thresh=0.2, + use_ret_steps=True, + ), + attention=AttentionConfig(backend="TRTLLM"), + parallel=ParallelConfig(dit_cfg_size=world_size), + ) + pipeline = PipelineLoader(args_full).load() + transformer = pipeline.transformer.eval() + + # Verify all optimizations are enabled + assert pipeline.model_config.parallel.dit_cfg_size == world_size, "CFG parallel not enabled" + assert transformer.model_config.quant_config.quant_algo == QuantAlgo.FP8, "FP8 not enabled" + assert hasattr(pipeline, "transformer_cache_backend"), "TeaCache not enabled" + assert transformer.blocks[0].attn1.attn_backend == "TRTLLM", ( + "TRTLLM not enabled for self-attn" + ) + + if rank == 0: + print(f" āœ“ All optimizations verified on I2V rank {rank}:") + print(f" - FP8 quantization: {transformer.model_config.quant_config.quant_algo}") + print(" - TeaCache: enabled") + print(f" - TRTLLM attention: {transformer.blocks[0].attn1.attn_backend}") + print(f" - CFG Parallelism: cfg_size={world_size}") + + # Initialize TeaCache for single-step inference + if hasattr(pipeline, "transformer_cache_backend"): + pipeline.transformer_cache_backend.refresh(num_inference_steps=1) + + # Load inputs on this GPU + prompt_embeds = inputs_list[0].to(f"cuda:{rank}") + neg_prompt_embeds = inputs_list[1].to(f"cuda:{rank}") + latents = inputs_list[2].to(f"cuda:{rank}") + timestep = inputs_list[3].to(f"cuda:{rank}") + image_embeds = inputs_list[4].to(f"cuda:{rank}") if inputs_list[4] is not None else None + + # Setup CFG config + cfg_config = pipeline._setup_cfg_config( + guidance_scale=5.0, + prompt_embeds=prompt_embeds, + neg_prompt_embeds=neg_prompt_embeds, + ) + + assert cfg_config["enabled"], "CFG parallel not enabled" + + # Run single denoising step with all optimizations + def forward_fn( + latents, extra_stream_latents, timestep, encoder_hidden_states, extra_tensors + ): + return transformer( # noqa: F821 + hidden_states=latents, + timestep=timestep, + encoder_hidden_states=encoder_hidden_states, + encoder_hidden_states_image=extra_tensors.get("encoder_hidden_states_image"), + ) + + with torch.no_grad(): + local_extras = ( + {"encoder_hidden_states_image": image_embeds} if image_embeds is not None else {} + ) + noise_pred, _, _, _ = pipeline._denoise_step_cfg_parallel( + latents=latents, + extra_stream_latents={}, + timestep=timestep, + local_embeds=cfg_config["local_embeds"], + forward_fn=forward_fn, + guidance_scale=5.0, + guidance_rescale=0.0, + ulysses_size=cfg_config["ulysses_size"], + local_extras=local_extras, + ) + + # Validate output + assert not torch.isnan(noise_pred).any(), f"Rank {rank}: Output contains NaN" + assert not torch.isinf(noise_pred).any(), f"Rank {rank}: Output contains Inf" + + # Return output from rank 0 + if rank == 0: + return_dict["output"] = noise_pred.cpu() + print(f" āœ“ Combined optimization I2V output shape: {noise_pred.shape}") + print( + f" āœ“ Combined optimization I2V range: [{noise_pred.min():.4f}, {noise_pred.max():.4f}]" + ) + + del pipeline, transformer + torch.cuda.empty_cache() + + finally: + cleanup_distributed() + + +# ============================================================================ +# SMOKE TESTS (No Checkpoint Required) +# ============================================================================ + + +@pytest.mark.unit +@pytest.mark.smoke +class TestWanI2VSmoke: + def _create_model_config(self, boundary_ratio=None): + """Helper to create test model config.""" + config_dict = { + "attention_head_dim": 128, + "in_channels": 16, + "out_channels": 16, + "num_attention_heads": 4, + "num_layers": 1, + "patch_size": [1, 2, 2], + "text_dim": 4096, + "freq_dim": 256, + "ffn_dim": 1024, + "torch_dtype": "bfloat16", + "hidden_size": 512, + "qk_norm": "rms_norm_across_heads", + "cross_attn_norm": "layer_norm", + "eps": 1e-06, + "image_dim": 1280, # CLIP dimension (HF naming convention) + "added_kv_proj_dim": 1280, # Added KV projection dimension for I2V + "boundary_ratio": boundary_ratio, + } + pretrained_config = SimpleNamespace(**config_dict) + quant_config = QuantConfig() + + return DiffusionModelConfig( + pretrained_config=pretrained_config, + quant_config=quant_config, + skip_create_weights_in_init=True, + ) + + def test_wan21_instantiation(self): + """Test Wan 2.1 I2V pipeline (single-stage).""" + model_config = self._create_model_config(boundary_ratio=None) + pipeline = WanImageToVideoPipeline(model_config) + + assert pipeline.transformer is not None + assert pipeline.transformer_2 is None # Single-stage + assert pipeline.boundary_ratio is None + + def test_wan22_instantiation(self): + """Test Wan 2.2 I2V pipeline (two-stage).""" + model_config = self._create_model_config(boundary_ratio=0.4) + pipeline = WanImageToVideoPipeline(model_config) + + assert pipeline.transformer is not None + assert pipeline.transformer_2 is not None # Two-stage + assert pipeline.boundary_ratio == 0.4 + + def test_retrieve_latents(self): + """Test retrieve_latents helper.""" + from tensorrt_llm._torch.visual_gen.models.wan.pipeline_wan_i2v import retrieve_latents + + class MockLatentDist: + def mode(self): + return torch.randn(1, 16, 1, 64, 64) + + def sample(self, generator=None): + return torch.randn(1, 16, 1, 64, 64) + + class MockEncoderOutput: + def __init__(self): + self.latent_dist = MockLatentDist() + + encoder_output = MockEncoderOutput() + + # Test argmax mode (I2V default for deterministic encoding) + latents_argmax = retrieve_latents(encoder_output, sample_mode="argmax") + assert latents_argmax.shape == (1, 16, 1, 64, 64) + + # Test sample mode + latents_sample = retrieve_latents(encoder_output, sample_mode="sample") + assert latents_sample.shape == (1, 16, 1, 64, 64) + + +# ============================================================================ +# INTEGRATION TESTS - WAN 2.1 (Require Wan 2.1 Checkpoint) +# ============================================================================ + + +@pytest.mark.integration +@pytest.mark.i2v +@pytest.mark.wan21 +class TestWanI2VIntegration: + """Integration tests with Wan 2.1 checkpoint.""" + + def test_load_pipeline(self, wan21_i2v_pipeline_bf16): + """Test loading I2V pipeline from checkpoint.""" + # Verify I2V pipeline + assert "ImageToVideo" in type(wan21_i2v_pipeline_bf16).__name__ + assert wan21_i2v_pipeline_bf16.transformer is not None + assert len(wan21_i2v_pipeline_bf16.transformer.blocks) > 0 + + # Detect version + is_two_stage = ( + wan21_i2v_pipeline_bf16.boundary_ratio is not None + and wan21_i2v_pipeline_bf16.transformer_2 is not None + ) + + print(f"\nāœ“ Pipeline: {type(wan21_i2v_pipeline_bf16).__name__}") + print(f"āœ“ Transformer blocks: {len(wan21_i2v_pipeline_bf16.transformer.blocks)}") + print(f"āœ“ boundary_ratio: {wan21_i2v_pipeline_bf16.boundary_ratio}") + print(f"āœ“ Two-stage: {is_two_stage}") + + def test_image_encoding(self, wan21_i2v_pipeline_with_image_encoder, test_image): + """Test CLIP image encoding (if model uses it).""" + # Check if model uses image encoder + if ( + not hasattr(wan21_i2v_pipeline_with_image_encoder, "image_encoder") + or wan21_i2v_pipeline_with_image_encoder.image_encoder is None + ): + pytest.skip("This checkpoint doesn't use image encoder") + + # Encode test image + image_embeds = wan21_i2v_pipeline_with_image_encoder._encode_image(test_image) + + assert image_embeds is not None + assert image_embeds.dim() == 3 # [batch, seq_len, embed_dim] + print(f"\nāœ“ Image embeddings: {image_embeds.shape}, dtype={image_embeds.dtype}") + + def test_fp8_per_tensor_quantization(self, wan21_i2v_pipeline_fp8): + """Test FP8 per-tensor dynamic quantization.""" + # Check transformer for FP8 weights + found_fp8 = any( + param.dtype == torch.float8_e4m3fn + for name, param in wan21_i2v_pipeline_fp8.transformer.named_parameters() + if "blocks.0" in name and "weight" in name + ) + assert found_fp8, "No FP8 weights found for FP8" + print("\nāœ“ FP8: FP8 weights found in transformer") + + # Check transformer_2 if two-stage + if wan21_i2v_pipeline_fp8.transformer_2 is not None: + found_fp8_t2 = any( + param.dtype == torch.float8_e4m3fn + for name, param in wan21_i2v_pipeline_fp8.transformer_2.named_parameters() + if "blocks.0" in name and "weight" in name + ) + assert found_fp8_t2, "No FP8 weights in transformer_2" + print("āœ“ FP8: FP8 weights found in transformer_2") + + def test_fp8_blockwise_quantization(self, wan21_i2v_pipeline_fp8_blockwise): + """Test FP8 blockwise dynamic quantization.""" + # Check transformer for FP8 weights + found_fp8 = any( + param.dtype == torch.float8_e4m3fn + for name, param in wan21_i2v_pipeline_fp8_blockwise.transformer.named_parameters() + if "blocks.0" in name and "weight" in name + ) + assert found_fp8, "No FP8 weights found for FP8_BLOCK_SCALES" + print("\nāœ“ FP8_BLOCK_SCALES: FP8 weights found in transformer") + + @pytest.mark.parametrize("backend", ["VANILLA", "TRTLLM"]) + def test_attention_backends(self, backend): + """Test different attention backends.""" + if not CHECKPOINT_PATH or not os.path.exists(CHECKPOINT_PATH): + pytest.skip("DIFFUSION_MODEL_PATH not set") + if not is_wan21_checkpoint(): + pytest.skip("This test requires Wan 2.1 checkpoint") + + args = DiffusionArgs( + checkpoint_path=CHECKPOINT_PATH, + device="cuda", + dtype="bfloat16", + skip_components=SKIP_MINIMAL, + attention=AttentionConfig(backend=backend), + ) + pipeline = PipelineLoader(args).load() + + try: + # Check transformer attention backend + first_block = pipeline.transformer.blocks[0] + attn1_backend = first_block.attn1.attn_backend + attn2_backend = first_block.attn2.attn_backend + + # TRTLLM for self-attention, VANILLA for cross-attention + if backend == "TRTLLM": + assert attn1_backend == "TRTLLM", f"Expected TRTLLM, got {attn1_backend}" + assert attn2_backend == "VANILLA", ( + f"Cross-attn should be VANILLA, got {attn2_backend}" + ) + else: + assert attn1_backend == "VANILLA" + assert attn2_backend == "VANILLA" + + print(f"\nāœ“ Attention backend: {backend}") + print(f" Self-attn: {attn1_backend}, Cross-attn: {attn2_backend}") + + # Check transformer_2 if two-stage + if pipeline.transformer_2 is not None: + first_block_t2 = pipeline.transformer_2.blocks[0] + attn1_backend_t2 = first_block_t2.attn1.attn_backend + attn2_backend_t2 = first_block_t2.attn2.attn_backend + + if backend == "TRTLLM": + assert attn1_backend_t2 == "TRTLLM" + assert attn2_backend_t2 == "VANILLA" + print( + f" Transformer_2 - Self-attn: {attn1_backend_t2}, Cross-attn: {attn2_backend_t2}" + ) + + finally: + del pipeline + torch.cuda.empty_cache() + + def test_teacache(self): + """Test TeaCache on both transformers.""" + if not CHECKPOINT_PATH or not os.path.exists(CHECKPOINT_PATH): + pytest.skip("DIFFUSION_MODEL_PATH not set") + if not is_wan21_checkpoint(): + pytest.skip("This test requires Wan 2.1 checkpoint") + + args = DiffusionArgs( + checkpoint_path=CHECKPOINT_PATH, + device="cuda", + dtype="bfloat16", + skip_components=SKIP_MINIMAL, + teacache=TeaCacheConfig( + enable_teacache=True, + teacache_thresh=0.2, + use_ret_steps=True, + ), + ) + pipeline = PipelineLoader(args).load() + + try: + # Verify TeaCache on transformer + assert hasattr(pipeline, "transformer_cache_backend") + assert pipeline.transformer_cache_backend is not None + print("\nāœ“ TeaCache enabled on transformer (high-noise)") + + # Verify get_stats method + stats = pipeline.transformer_cache_backend.get_stats() + assert "total_steps" in stats + assert "cached_steps" in stats + assert "compute_steps" in stats + print("āœ“ TeaCache stats available") + + # Check transformer_2 if two-stage + if pipeline.transformer_2 is not None: + assert hasattr(pipeline, "transformer_2_cache_backend") + assert pipeline.transformer_2_cache_backend is not None + stats2 = pipeline.transformer_2_cache_backend.get_stats() + assert "total_steps" in stats2 + print("āœ“ TeaCache enabled on transformer_2 (low-noise)") + + finally: + del pipeline + torch.cuda.empty_cache() + + def test_all_optimizations_combined(self): + """Test all optimizations enabled simultaneously.""" + if not CHECKPOINT_PATH or not os.path.exists(CHECKPOINT_PATH): + pytest.skip("DIFFUSION_MODEL_PATH not set") + if not is_wan21_checkpoint(): + pytest.skip("This test requires Wan 2.1 checkpoint") + + args = DiffusionArgs( + checkpoint_path=CHECKPOINT_PATH, + device="cuda", + dtype="bfloat16", + skip_components=SKIP_MINIMAL, + quant_config={"quant_algo": "FP8_BLOCK_SCALES", "dynamic": True}, + attention=AttentionConfig(backend="VANILLA"), # VANILLA more stable with all opts + teacache=TeaCacheConfig( + enable_teacache=True, + teacache_thresh=0.2, + use_ret_steps=True, + ), + ) + pipeline = PipelineLoader(args).load() + + try: + optimizations = [] + + # Check FP8 + if any(p.dtype == torch.float8_e4m3fn for p in pipeline.transformer.parameters()): + optimizations.append("FP8") + + # Check TeaCache + if ( + hasattr(pipeline, "transformer_cache_backend") + and pipeline.transformer_cache_backend + ): + optimizations.append("TeaCache") + + # Check two-stage + if pipeline.transformer_2 is not None: + optimizations.append("Two-Stage") + + # Check attention backend + optimizations.append(f"Attention={args.attention.backend}") + + print(f"\nāœ“ All optimizations: {', '.join(optimizations)}") + assert len(optimizations) >= 3 + + finally: + del pipeline + torch.cuda.empty_cache() + + def test_fp8_vs_bf16_numerical_correctness( + self, wan21_i2v_pipeline_bf16, wan21_i2v_pipeline_fp8 + ): + """Test FP8 vs BF16 numerical accuracy on I2V transformer.""" + # Get linear layers from first transformer + attn_bf16 = wan21_i2v_pipeline_bf16.transformer.blocks[0].attn1 + attn_fp8 = wan21_i2v_pipeline_fp8.transformer.blocks[0].attn1 + + # Get qkv_proj layer + if hasattr(attn_bf16, "qkv_proj"): + linear_bf16 = attn_bf16.qkv_proj + linear_fp8 = attn_fp8.qkv_proj + layer_name = "blocks.0.attn1.qkv_proj" + elif hasattr(attn_bf16, "attn") and hasattr(attn_bf16.attn, "qkv_proj"): + linear_bf16 = attn_bf16.attn.qkv_proj + linear_fp8 = attn_fp8.attn.qkv_proj + layer_name = "blocks.0.attn1.attn.qkv_proj" + else: + # Use FFN linear instead + linear_bf16 = wan21_i2v_pipeline_bf16.transformer.blocks[0].ffn.net[0]["proj"] + linear_fp8 = wan21_i2v_pipeline_fp8.transformer.blocks[0].ffn.net[0]["proj"] + layer_name = "blocks.0.ffn.net.0.proj" + + # Get weights + weight_bf16 = linear_bf16.weight.data.clone() + bias_bf16 = linear_bf16.bias.data.clone() if linear_bf16.bias is not None else None + + # Create test input + torch.manual_seed(42) + hidden_size = linear_bf16.in_features + batch_size = 1 + seq_len = 14040 + + input_tensor = torch.randn( + batch_size * seq_len, hidden_size, dtype=torch.bfloat16, device="cuda" + ) + print(f"\n[Compare] Input shape: {input_tensor.shape}") + + # Compute reference output + with torch.no_grad(): + expected = F.linear(input_tensor, weight_bf16, bias_bf16) + + # Compute FP8 output + with torch.no_grad(): + result_fp8 = linear_fp8(input_tensor) + + # Compute BF16 output + with torch.no_grad(): + result_bf16 = linear_bf16(input_tensor) + + # Verify BF16 matches reference + assert torch.allclose(result_bf16, expected, rtol=1e-5, atol=1e-6), ( + "BF16 layer should match F.linear reference exactly" + ) + + # Compare FP8 vs reference + max_diff = torch.max(torch.abs(result_fp8 - expected)).item() + cos_sim = F.cosine_similarity( + result_fp8.flatten().float(), expected.flatten().float(), dim=0 + ) + mse = F.mse_loss(result_fp8.flatten().float(), expected.flatten().float()) + + print( + f"\n[{layer_name}] max_diff={max_diff:.6f}, cos_sim={cos_sim.item():.6f}, mse={mse.item():.6f}" + ) + + assert cos_sim > 0.99, f"Cosine similarity too low: {cos_sim.item()}" + assert mse < 1.0, f"MSE too high: {mse.item()}" + + # Test transformer_2 if two-stage + if ( + wan21_i2v_pipeline_bf16.transformer_2 is not None + and wan21_i2v_pipeline_fp8.transformer_2 is not None + ): + print("\n[Testing transformer_2]") + attn2_bf16 = wan21_i2v_pipeline_bf16.transformer_2.blocks[0].attn1 + attn2_fp8 = wan21_i2v_pipeline_fp8.transformer_2.blocks[0].attn1 + + if hasattr(attn2_bf16, "qkv_proj"): + linear2_bf16 = attn2_bf16.qkv_proj + linear2_fp8 = attn2_fp8.qkv_proj + else: + linear2_bf16 = wan21_i2v_pipeline_bf16.transformer_2.blocks[0].ffn.net[0]["proj"] + linear2_fp8 = wan21_i2v_pipeline_fp8.transformer_2.blocks[0].ffn.net[0]["proj"] + + weight2_bf16 = linear2_bf16.weight.data.clone() + bias2_bf16 = linear2_bf16.bias.data.clone() if linear2_bf16.bias is not None else None + + with torch.no_grad(): + expected2 = F.linear(input_tensor, weight2_bf16, bias2_bf16) + result2_fp8 = linear2_fp8(input_tensor) + + cos_sim2 = F.cosine_similarity( + result2_fp8.flatten().float(), expected2.flatten().float(), dim=0 + ) + print(f"[transformer_2] cos_sim={cos_sim2.item():.6f}") + assert cos_sim2 > 0.99, f"Transformer_2 cosine similarity too low: {cos_sim2.item()}" + + def test_fp8_vs_bf16_memory_comparison(self, wan21_i2v_pipeline_bf16, wan21_i2v_pipeline_fp8): + """Test FP8 uses ~2x less memory than BF16 for I2V.""" + + def get_module_memory_gb(module): + return sum(p.numel() * p.element_size() for p in module.parameters()) / 1024**3 + + bf16_model_mem = get_module_memory_gb(wan21_i2v_pipeline_bf16.transformer) + if wan21_i2v_pipeline_bf16.transformer_2 is not None: + bf16_model_mem += get_module_memory_gb(wan21_i2v_pipeline_bf16.transformer_2) + + fp8_model_mem = get_module_memory_gb(wan21_i2v_pipeline_fp8.transformer) + if wan21_i2v_pipeline_fp8.transformer_2 is not None: + fp8_model_mem += get_module_memory_gb(wan21_i2v_pipeline_fp8.transformer_2) + + print(f"\n[BF16] Transformer(s) memory: {bf16_model_mem:.2f} GB") + print(f"[FP8] Transformer(s) memory: {fp8_model_mem:.2f} GB") + + # Verify memory savings + model_mem_ratio = bf16_model_mem / fp8_model_mem + + print(f"\n[Comparison] Model memory ratio (BF16/FP8): {model_mem_ratio:.2f}x") + + # FP8 should use ~2x less memory + assert model_mem_ratio > 1.8, f"FP8 should use ~2x less memory, got {model_mem_ratio:.2f}x" + + +# ============================================================================ +# TWO-STAGE SPECIFIC TESTS - WAN 2.2 (Require Wan 2.2 Checkpoint) +# ============================================================================ + + +@pytest.mark.integration +@pytest.mark.i2v +@pytest.mark.wan22 +class TestWanI2VTwoStage: + """Tests specific to Wan 2.2 two-stage denoising.""" + + def test_transformer_selection_logic(self, wan22_i2v_pipeline_bf16): + """Test boundary_timestep logic for transformer selection.""" + # Skip if not two-stage + if ( + wan22_i2v_pipeline_bf16.boundary_ratio is None + or wan22_i2v_pipeline_bf16.transformer_2 is None + ): + pytest.skip("Not a two-stage checkpoint") + + # Calculate boundary + num_train_timesteps = 1000 + boundary_timestep = wan22_i2v_pipeline_bf16.boundary_ratio * num_train_timesteps + + print(f"\nāœ“ boundary_ratio: {wan22_i2v_pipeline_bf16.boundary_ratio}") + print(f"āœ“ boundary_timestep: {boundary_timestep:.1f}") + print(f"āœ“ High-noise (t >= {boundary_timestep:.1f}): uses transformer") + print(f"āœ“ Low-noise (t < {boundary_timestep:.1f}): uses transformer_2") + + @pytest.mark.parametrize("guidance_scale_2", [2.0, 3.0, 4.0]) + def test_guidance_scale_2_parameter(self, wan22_i2v_pipeline_bf16, guidance_scale_2): + """Test guidance_scale_2 for low-noise stage.""" + # Skip if not two-stage + if ( + wan22_i2v_pipeline_bf16.boundary_ratio is None + or wan22_i2v_pipeline_bf16.transformer_2 is None + ): + pytest.skip("Not a two-stage checkpoint") + + print(f"\nāœ“ Two-stage model supports guidance_scale_2={guidance_scale_2}") + print("āœ“ High-noise: uses guidance_scale") + print(f"āœ“ Low-noise: uses guidance_scale_2={guidance_scale_2}") + + def test_custom_boundary_ratio(self, wan22_i2v_pipeline_bf16): + """Test overriding boundary_ratio at runtime.""" + # Skip if not two-stage + if ( + wan22_i2v_pipeline_bf16.boundary_ratio is None + or wan22_i2v_pipeline_bf16.transformer_2 is None + ): + pytest.skip("Not a two-stage checkpoint") + + default_ratio = wan22_i2v_pipeline_bf16.boundary_ratio + custom_ratio = 0.3 + + print(f"\nāœ“ Model default boundary_ratio: {default_ratio}") + print(f"āœ“ Custom override: {custom_ratio}") + print("āœ“ forward() accepts boundary_ratio parameter for runtime override") + + def test_two_stage_with_all_optimizations(self, wan22_i2v_pipeline_fp8): + """Test Wan 2.2 with FP8, TeaCache, and TRTLLM attention.""" + # Skip if not two-stage + if ( + wan22_i2v_pipeline_fp8.boundary_ratio is None + or wan22_i2v_pipeline_fp8.transformer_2 is None + ): + pytest.skip("Not a two-stage checkpoint") + + # Load pipeline with all optimizations + args = DiffusionArgs( + checkpoint_path=CHECKPOINT_PATH, + device="cuda", + dtype="bfloat16", + skip_components=SKIP_MINIMAL, + quant_config={"quant_algo": "FP8_BLOCK_SCALES", "dynamic": True}, + attention=AttentionConfig(backend="TRTLLM"), + teacache=TeaCacheConfig( + enable_teacache=True, + teacache_thresh=0.2, + use_ret_steps=True, + ), + ) + pipeline = PipelineLoader(args).load() + + try: + print("\n[Two-Stage + All Optimizations]") + + # Check FP8 on both transformers + fp8_t1 = any(p.dtype == torch.float8_e4m3fn for p in pipeline.transformer.parameters()) + fp8_t2 = any( + p.dtype == torch.float8_e4m3fn for p in pipeline.transformer_2.parameters() + ) + print(f"āœ“ FP8: transformer={fp8_t1}, transformer_2={fp8_t2}") + assert fp8_t1 and fp8_t2 + + # Check TeaCache on both transformers + has_cache_t1 = ( + hasattr(pipeline, "transformer_cache_backend") + and pipeline.transformer_cache_backend + ) + has_cache_t2 = ( + hasattr(pipeline, "transformer_2_cache_backend") + and pipeline.transformer_2_cache_backend + ) + print(f"āœ“ TeaCache: transformer={has_cache_t1}, transformer_2={has_cache_t2}") + assert has_cache_t1 and has_cache_t2 + + # Check TRTLLM attention + attn1_backend = pipeline.transformer.blocks[0].attn1.attn_backend + attn2_backend = pipeline.transformer_2.blocks[0].attn1.attn_backend + print(f"āœ“ TRTLLM: transformer={attn1_backend}, transformer_2={attn2_backend}") + assert attn1_backend == "TRTLLM" + assert attn2_backend == "TRTLLM" + + print("āœ“ All optimizations working on two-stage model!") + + finally: + del pipeline + torch.cuda.empty_cache() + + +# ============================================================================ +# ROBUSTNESS TESTS +# ============================================================================ + + +@pytest.mark.robustness +class TestWanI2VRobustness: + """Robustness and error handling tests.""" + + def test_invalid_quant_config(self): + """Test that invalid quantization config raises appropriate error.""" + with pytest.raises((ValueError, KeyError)): + args = DiffusionArgs( + checkpoint_path=CHECKPOINT_PATH, + device="cuda", + dtype="bfloat16", + skip_components=SKIP_MINIMAL, + quant_config={"quant_algo": "INVALID_ALGO", "dynamic": True}, + ) + pipeline = PipelineLoader(args).load() + del pipeline + + def test_mismatched_image_size(self, test_image): + """Test handling of unexpected image dimensions.""" + if not CHECKPOINT_PATH or not os.path.exists(CHECKPOINT_PATH): + pytest.skip("DIFFUSION_MODEL_PATH not set") + + args = DiffusionArgs( + checkpoint_path=CHECKPOINT_PATH, + device="cuda", + dtype="bfloat16", + skip_components=SKIP_WITH_IMAGE, + ) + pipeline = PipelineLoader(args).load() + + try: + # Check if model uses image encoder + if not hasattr(pipeline, "image_encoder") or pipeline.image_encoder is None: + pytest.skip("This checkpoint doesn't use image encoder") + + # Create image with unexpected size + import numpy as np + + small_img = np.zeros((224, 224, 3), dtype=np.uint8) + small_image = Image.fromarray(small_img, mode="RGB") + + # Should handle gracefully + try: + image_embeds = pipeline._encode_image(small_image) + assert image_embeds is not None + print("\nāœ“ Handled non-standard image size gracefully") + except Exception as e: + # Some error is expected + print(f"\nāœ“ Raised appropriate error for mismatched size: {type(e).__name__}") + + finally: + del pipeline + torch.cuda.empty_cache() + + +# ============================================================================ +# CFG PARALLELISM TESTS (Requires 2+ GPUs) +# ============================================================================ + + +@pytest.mark.parallelism +class TestWanI2VParallelism(unittest.TestCase): + """Distributed parallelism correctness tests for I2V (CFG Parallelism).""" + + DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") + + def setUp(self): + """Set up test fixtures and skip if checkpoint not available.""" + torch.manual_seed(42) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(42) + if not CHECKPOINT_PATH or not os.path.exists(CHECKPOINT_PATH): + self.skipTest( + "Checkpoint not available. Set DIFFUSION_MODEL_PATH environment variable." + ) + + def tearDown(self): + """Clean up GPU memory.""" + import gc + + gc.collect() + if torch.cuda.is_available(): + torch.cuda.empty_cache() + torch.cuda.synchronize() + + def test_cfg_2gpu_correctness(self): + """Test I2V CFG Parallelism (cfg_size=2) correctness against standard CFG baseline.""" + num_gpus = torch.cuda.device_count() + if num_gpus < 2: + pytest.skip("CFG parallel test requires at least 2 GPUs") + if not is_wan21_checkpoint(): + pytest.skip( + "This test requires Wan 2.1 checkpoint. Use DIFFUSION_MODEL_PATH with '2.1' in the path." + ) + + print("\n" + "=" * 80) + print("I2V CFG PARALLELISM (cfg_size=2) CORRECTNESS TEST") + print("=" * 80) + + # Load standard CFG baseline on GPU 0 + print("\n[1/3] Loading standard CFG I2V baseline (cfg_size=1) on GPU 0...") + args_baseline = DiffusionArgs( + checkpoint_path=CHECKPOINT_PATH, + device="cuda:0", + dtype="bfloat16", + skip_components=SKIP_MINIMAL, + parallel=ParallelConfig(dit_cfg_size=1), # Standard CFG (no parallel) + ) + pipeline_baseline = PipelineLoader(args_baseline).load() + config = pipeline_baseline.transformer.model_config.pretrained_config + + # Reset torch compile state + torch._dynamo.reset() + + # Create FIXED test inputs + print("\n[2/3] Creating fixed test inputs...") + torch.manual_seed(42) + batch_size, num_frames, height, width, seq_len = 1, 1, 64, 64, 128 + + latents = torch.randn( + batch_size, + config.in_channels, + num_frames, + height, + width, + dtype=torch.bfloat16, + device="cuda:0", + ) + timestep = torch.tensor([500], dtype=torch.long, device="cuda:0") + prompt_embeds = torch.randn( + batch_size, seq_len, config.text_dim, dtype=torch.bfloat16, device="cuda:0" + ) + neg_prompt_embeds = torch.randn( + batch_size, seq_len, config.text_dim, dtype=torch.bfloat16, device="cuda:0" + ) + + # I2V-specific: Create image embeddings (or None if Wan 2.2) + image_embeds = None + image_dim = getattr(config, "image_dim", getattr(config, "image_embed_dim", None)) + if image_dim is not None: + # Wan 2.1 uses CLIP image embeddings + image_seq_len = 256 # CLIP patch count + image_embeds = torch.randn( + batch_size, image_seq_len, image_dim, dtype=torch.bfloat16, device="cuda:0" + ) + print(f" āœ“ Created image embeddings: {image_embeds.shape}") + + # Setup standard CFG config + cfg_config_baseline = pipeline_baseline._setup_cfg_config( + guidance_scale=5.0, + prompt_embeds=prompt_embeds, + neg_prompt_embeds=neg_prompt_embeds, + ) + + print(" Baseline CFG config:") + print(f" enabled: {cfg_config_baseline['enabled']}") + print(f" cfg_size: {cfg_config_baseline['cfg_size']}") + + # Verify standard CFG is NOT parallel + assert not cfg_config_baseline["enabled"], "Baseline should not use CFG parallel" + assert cfg_config_baseline["cfg_size"] == 1, "Baseline cfg_size should be 1" + + # Run standard CFG denoising step + def forward_fn( + latents, extra_stream_latents, timestep, encoder_hidden_states, extra_tensors + ): + return pipeline_baseline.transformer( # noqa: F821 + hidden_states=latents, + timestep=timestep, + encoder_hidden_states=encoder_hidden_states, + encoder_hidden_states_image=extra_tensors.get("encoder_hidden_states_image"), + ) + + with torch.no_grad(): + local_extras = ( + {"encoder_hidden_states_image": image_embeds} if image_embeds is not None else {} + ) + baseline_output, _, _, _ = pipeline_baseline._denoise_step_standard( + latents=latents.clone(), + extra_stream_latents={}, + timestep=timestep, + prompt_embeds=cfg_config_baseline["prompt_embeds"], + forward_fn=forward_fn, + guidance_scale=5.0, + guidance_rescale=0.0, + local_extras=local_extras, + ) + + print(f" āœ“ Baseline output shape: {baseline_output.shape}") + print(f" āœ“ Baseline range: [{baseline_output.min():.4f}, {baseline_output.max():.4f}]") + + # Cleanup baseline to free memory for CFG workers + del pipeline_baseline + torch.cuda.empty_cache() + + # Run CFG parallel (cfg_size=2) in distributed processes + print("\n[3/3] Running I2V CFG Parallelism (cfg_size=2) across 2 GPUs...") + cfg_size = 2 + + inputs_cpu = [ + prompt_embeds.cpu(), + neg_prompt_embeds.cpu(), + latents.cpu(), + timestep.cpu(), + image_embeds.cpu() if image_embeds is not None else None, + ] + + manager = mp.Manager() + return_dict = manager.dict() + + # Spawn CFG workers + mp.spawn( + _run_cfg_worker_i2v, + args=(cfg_size, CHECKPOINT_PATH, inputs_cpu, return_dict), + nprocs=cfg_size, + join=True, + ) + + # Get CFG parallel output from rank 0 + cfg_parallel_output = return_dict["output"].to("cuda:0") + print(f" āœ“ CFG parallel output shape: {cfg_parallel_output.shape}") + + # Compare outputs + print("\n[Comparison] I2V CFG Parallel vs Standard CFG:") + baseline_float = baseline_output.float() + cfg_parallel_float = cfg_parallel_output.float() + + cos_sim = F.cosine_similarity( + cfg_parallel_float.flatten(), baseline_float.flatten(), dim=0 + ).item() + + max_diff = torch.max(torch.abs(cfg_parallel_float - baseline_float)).item() + mean_diff = torch.mean(torch.abs(cfg_parallel_float - baseline_float)).item() + + print(f" Cosine similarity: {cos_sim:.6f}") + print(f" Max absolute difference: {max_diff:.6f}") + print(f" Mean absolute difference: {mean_diff:.6f}") + print( + f" CFG parallel range: [{cfg_parallel_float.min():.4f}, {cfg_parallel_float.max():.4f}]" + ) + print(f" Baseline range: [{baseline_float.min():.4f}, {baseline_float.max():.4f}]") + + assert cos_sim > 0.99, ( + f"I2V CFG parallel cosine similarity {cos_sim:.6f} below threshold 0.99. " + f"CFG Parallelism does not match standard CFG baseline." + ) + + print("\n[PASS] I2V CFG Parallelism (cfg_size=2) validated!") + print(" āœ“ CFG parallel produces same output as standard CFG") + print(" āœ“ Prompt splitting and all-gather working correctly") + print(" āœ“ Image embeddings handled correctly") + print("=" * 80) + + torch.cuda.empty_cache() + + +# ============================================================================ +# COMBINED OPTIMIZATIONS TESTS (I2V) +# ============================================================================ + + +@pytest.mark.parallelism +class TestWanI2VCombinedOptimizations(unittest.TestCase): + """Test all optimizations combined for I2V: FP8 + TeaCache + TRTLLM + CFG Parallelism.""" + + DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") + + def setUp(self): + """Set up test fixtures and skip if checkpoint not available.""" + torch.manual_seed(42) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(42) + if not CHECKPOINT_PATH or not os.path.exists(CHECKPOINT_PATH): + self.skipTest( + "Checkpoint not available. Set DIFFUSION_MODEL_PATH environment variable." + ) + + def tearDown(self): + """Clean up GPU memory.""" + import gc + + gc.collect() + if torch.cuda.is_available(): + torch.cuda.empty_cache() + torch.cuda.synchronize() + + def test_all_optimizations_combined(self): + """Test I2V FP8 + TeaCache + TRTLLM attention + CFG=2 combined correctness. + + This test validates that all optimizations work together correctly for I2V: + 1. FP8 per-tensor quantization for reduced memory/compute + 2. TeaCache for caching repeated computations + 3. TRTLLM attention backend for optimized attention kernels + 4. CFG Parallelism (cfg_size=2) for distributed CFG computation + + We compare against a standard CFG baseline with relaxed thresholds. + """ + num_gpus = torch.cuda.device_count() + if num_gpus < 2: + pytest.skip("Combined optimization test requires at least 2 GPUs for CFG parallel") + if not is_wan21_checkpoint(): + pytest.skip( + "This test requires Wan 2.1 checkpoint. Use DIFFUSION_MODEL_PATH with '2.1' in the path." + ) + + print("\n" + "=" * 80) + print("I2V ALL OPTIMIZATIONS COMBINED TEST") + print("FP8 + TeaCache + TRTLLM Attention + CFG Parallelism (cfg_size=2)") + print("=" * 80) + + # Load baseline on GPU 0 (no optimizations, standard CFG) + print("\n[1/3] Loading I2V baseline on GPU 0 (standard CFG, no optimizations)...") + args_baseline = DiffusionArgs( + checkpoint_path=CHECKPOINT_PATH, + device="cuda:0", + dtype="bfloat16", + skip_components=SKIP_MINIMAL, + parallel=ParallelConfig(dit_cfg_size=1), # Standard CFG + ) + pipeline_baseline = PipelineLoader(args_baseline).load() + config = pipeline_baseline.transformer.model_config.pretrained_config + + # Reset torch compile state + torch._dynamo.reset() + + # Create FIXED test inputs + print("\n[2/3] Creating fixed test inputs...") + torch.manual_seed(42) + batch_size, num_frames, height, width, seq_len = 1, 1, 64, 64, 128 + + latents = torch.randn( + batch_size, + config.in_channels, + num_frames, + height, + width, + dtype=torch.bfloat16, + device="cuda:0", + ) + timestep = torch.tensor([500], dtype=torch.long, device="cuda:0") + prompt_embeds = torch.randn( + batch_size, seq_len, config.text_dim, dtype=torch.bfloat16, device="cuda:0" + ) + neg_prompt_embeds = torch.randn( + batch_size, seq_len, config.text_dim, dtype=torch.bfloat16, device="cuda:0" + ) + + # I2V-specific: Create image embeddings + image_embeds = None + image_dim = getattr(config, "image_dim", getattr(config, "image_embed_dim", None)) + if image_dim is not None: + image_seq_len = 256 + image_embeds = torch.randn( + batch_size, image_seq_len, image_dim, dtype=torch.bfloat16, device="cuda:0" + ) + + # Setup standard CFG config + cfg_config_baseline = pipeline_baseline._setup_cfg_config( + guidance_scale=5.0, + prompt_embeds=prompt_embeds, + neg_prompt_embeds=neg_prompt_embeds, + ) + + # Run baseline standard CFG + print(" Running baseline (standard CFG)...") + + def forward_fn_baseline( + latents, extra_stream_latents, timestep, encoder_hidden_states, extra_tensors + ): + return pipeline_baseline.transformer( # noqa: F821 + hidden_states=latents, + timestep=timestep, + encoder_hidden_states=encoder_hidden_states, + encoder_hidden_states_image=extra_tensors.get("encoder_hidden_states_image"), + ) + + with torch.no_grad(): + local_extras = ( + {"encoder_hidden_states_image": image_embeds} if image_embeds is not None else {} + ) + baseline_output, _, _, _ = pipeline_baseline._denoise_step_standard( + latents=latents.clone(), + extra_stream_latents={}, + timestep=timestep, + prompt_embeds=cfg_config_baseline["prompt_embeds"], + forward_fn=forward_fn_baseline, + guidance_scale=5.0, + guidance_rescale=0.0, + local_extras=local_extras, + ) + + print(f" āœ“ Baseline output shape: {baseline_output.shape}") + print(f" āœ“ Baseline range: [{baseline_output.min():.4f}, {baseline_output.max():.4f}]") + + # Cleanup baseline + del pipeline_baseline + torch.cuda.empty_cache() + + # Run with ALL optimizations (FP8 + TeaCache + TRTLLM + CFG=2) + print("\n[3/3] Running with ALL optimizations (FP8 + TeaCache + TRTLLM + CFG=2)...") + cfg_size = 2 + + inputs_cpu = [ + prompt_embeds.cpu(), + neg_prompt_embeds.cpu(), + latents.cpu(), + timestep.cpu(), + image_embeds.cpu() if image_embeds is not None else None, + ] + + manager = mp.Manager() + return_dict = manager.dict() + + # Spawn workers with all optimizations + mp.spawn( + _run_all_optimizations_worker_i2v, + args=(cfg_size, CHECKPOINT_PATH, inputs_cpu, return_dict), + nprocs=cfg_size, + join=True, + ) + + # Get combined optimization output + combined_output = return_dict["output"].to("cuda:0") + print(f" āœ“ Combined optimization output shape: {combined_output.shape}") + + # Compare outputs (relaxed threshold for combined optimizations) + print("\n[Comparison] I2V Combined Optimizations vs Baseline:") + baseline_float = baseline_output.float() + combined_float = combined_output.float() + + cos_sim = F.cosine_similarity( + combined_float.flatten(), baseline_float.flatten(), dim=0 + ).item() + + max_diff = torch.max(torch.abs(combined_float - baseline_float)).item() + mean_diff = torch.mean(torch.abs(combined_float - baseline_float)).item() + + print(f" Cosine similarity: {cos_sim:.6f}") + print(f" Max absolute difference: {max_diff:.6f}") + print(f" Mean absolute difference: {mean_diff:.6f}") + + # Relaxed threshold (0.95) since multiple optimizations compound numerical differences + assert cos_sim > 0.95, ( + f"I2V combined optimization cosine similarity {cos_sim:.6f} below threshold 0.95" + ) + + print("\n[PASS] All optimizations (FP8 + TeaCache + TRTLLM + CFG) validated!") + print(" āœ“ All optimizations work together correctly") + print(" āœ“ I2V image embeddings handled correctly with all opts") + print("=" * 80) + + torch.cuda.empty_cache() + + +if __name__ == "__main__": + import unittest + + unittest.main(verbosity=2)