From f48e08b34fd63b6b3b6d39cf7710f4e779b1f97d Mon Sep 17 00:00:00 2001
From: kewei <2512235663@qq.com>
Date: Mon, 27 May 2024 15:11:03 +0800
Subject: [PATCH] 0527
---
Model_Architecture_Discussions/llama3/LICENSE | 21 +
.../llama3/README.md | 1135 +
.../llama3/images/42.png | Bin 0 -> 791272 bytes
.../llama3/images/a10.png | Bin 0 -> 649183 bytes
.../llama3/images/afterattention.png | Bin 0 -> 296198 bytes
.../llama3/images/archi.png | Bin 0 -> 866105 bytes
.../llama3/images/attention.png | Bin 0 -> 207123 bytes
.../llama3/images/embeddings.png | Bin 0 -> 481788 bytes
.../llama3/images/finallayer.png | Bin 0 -> 818316 bytes
.../llama3/images/freq_cis.png | Bin 0 -> 833456 bytes
.../llama3/images/god.png | Bin 0 -> 1269810 bytes
.../llama3/images/heads.png | Bin 0 -> 818922 bytes
.../llama3/images/implllama3_30_0.png | Bin 0 -> 49767 bytes
.../llama3/images/implllama3_39_0.png | Bin 0 -> 27612 bytes
.../llama3/images/implllama3_41_0.png | Bin 0 -> 26436 bytes
.../llama3/images/implllama3_42_0.png | Bin 0 -> 28024 bytes
.../llama3/images/implllama3_50_0.png | Bin 0 -> 27587 bytes
.../llama3/images/implllama3_52_0.png | Bin 0 -> 26433 bytes
.../llama3/images/implllama3_54_0.png | Bin 0 -> 28027 bytes
.../llama3/images/karpathyminbpe.png | Bin 0 -> 806344 bytes
.../llama3/images/keys.png | Bin 0 -> 440484 bytes
.../llama3/images/keys0.png | Bin 0 -> 432739 bytes
.../llama3/images/last_norm.png | Bin 0 -> 1027925 bytes
.../llama3/images/mask.png | Bin 0 -> 482770 bytes
.../llama3/images/model.png | Bin 0 -> 674648 bytes
.../llama3/images/norm.png | Bin 0 -> 316073 bytes
.../llama3/images/norm_after.png | Bin 0 -> 304528 bytes
.../llama3/images/q_per_token.png | Bin 0 -> 495555 bytes
.../llama3/images/qkmatmul.png | Bin 0 -> 193872 bytes
.../llama3/images/qkv.png | Bin 0 -> 509106 bytes
.../llama3/images/qsplit.png | Bin 0 -> 564238 bytes
.../llama3/images/rms.png | Bin 0 -> 348921 bytes
.../llama3/images/rope.png | Bin 0 -> 528606 bytes
.../llama3/images/ropesplit.png | Bin 0 -> 411043 bytes
.../llama3/images/softmax.png | Bin 0 -> 195571 bytes
.../llama3/images/stacked.png | Bin 0 -> 392799 bytes
.../llama3/images/swiglu.png | Bin 0 -> 619341 bytes
.../llama3/images/tokens.png | Bin 0 -> 500210 bytes
.../llama3/images/v0.png | Bin 0 -> 192702 bytes
.../llama3/images/value.png | Bin 0 -> 204710 bytes
.../llama3/images/weightmatrix.png | Bin 0 -> 388974 bytes
.../llama3/llama3-from-scratch.ipynb | 2221 +
.../llama3/params.json | 11 +
.../llama3/params.txt | 9 +
.../llama3/requirements.txt | 5 +
.../llama3/tokenizer.model | 128000 +++++++++++++++
46 files changed, 131402 insertions(+)
create mode 100644 Model_Architecture_Discussions/llama3/LICENSE
create mode 100644 Model_Architecture_Discussions/llama3/README.md
create mode 100644 Model_Architecture_Discussions/llama3/images/42.png
create mode 100644 Model_Architecture_Discussions/llama3/images/a10.png
create mode 100644 Model_Architecture_Discussions/llama3/images/afterattention.png
create mode 100644 Model_Architecture_Discussions/llama3/images/archi.png
create mode 100644 Model_Architecture_Discussions/llama3/images/attention.png
create mode 100644 Model_Architecture_Discussions/llama3/images/embeddings.png
create mode 100644 Model_Architecture_Discussions/llama3/images/finallayer.png
create mode 100644 Model_Architecture_Discussions/llama3/images/freq_cis.png
create mode 100644 Model_Architecture_Discussions/llama3/images/god.png
create mode 100644 Model_Architecture_Discussions/llama3/images/heads.png
create mode 100644 Model_Architecture_Discussions/llama3/images/implllama3_30_0.png
create mode 100644 Model_Architecture_Discussions/llama3/images/implllama3_39_0.png
create mode 100644 Model_Architecture_Discussions/llama3/images/implllama3_41_0.png
create mode 100644 Model_Architecture_Discussions/llama3/images/implllama3_42_0.png
create mode 100644 Model_Architecture_Discussions/llama3/images/implllama3_50_0.png
create mode 100644 Model_Architecture_Discussions/llama3/images/implllama3_52_0.png
create mode 100644 Model_Architecture_Discussions/llama3/images/implllama3_54_0.png
create mode 100644 Model_Architecture_Discussions/llama3/images/karpathyminbpe.png
create mode 100644 Model_Architecture_Discussions/llama3/images/keys.png
create mode 100644 Model_Architecture_Discussions/llama3/images/keys0.png
create mode 100644 Model_Architecture_Discussions/llama3/images/last_norm.png
create mode 100644 Model_Architecture_Discussions/llama3/images/mask.png
create mode 100644 Model_Architecture_Discussions/llama3/images/model.png
create mode 100644 Model_Architecture_Discussions/llama3/images/norm.png
create mode 100644 Model_Architecture_Discussions/llama3/images/norm_after.png
create mode 100644 Model_Architecture_Discussions/llama3/images/q_per_token.png
create mode 100644 Model_Architecture_Discussions/llama3/images/qkmatmul.png
create mode 100644 Model_Architecture_Discussions/llama3/images/qkv.png
create mode 100644 Model_Architecture_Discussions/llama3/images/qsplit.png
create mode 100644 Model_Architecture_Discussions/llama3/images/rms.png
create mode 100644 Model_Architecture_Discussions/llama3/images/rope.png
create mode 100644 Model_Architecture_Discussions/llama3/images/ropesplit.png
create mode 100644 Model_Architecture_Discussions/llama3/images/softmax.png
create mode 100644 Model_Architecture_Discussions/llama3/images/stacked.png
create mode 100644 Model_Architecture_Discussions/llama3/images/swiglu.png
create mode 100644 Model_Architecture_Discussions/llama3/images/tokens.png
create mode 100644 Model_Architecture_Discussions/llama3/images/v0.png
create mode 100644 Model_Architecture_Discussions/llama3/images/value.png
create mode 100644 Model_Architecture_Discussions/llama3/images/weightmatrix.png
create mode 100644 Model_Architecture_Discussions/llama3/llama3-from-scratch.ipynb
create mode 100644 Model_Architecture_Discussions/llama3/params.json
create mode 100644 Model_Architecture_Discussions/llama3/params.txt
create mode 100644 Model_Architecture_Discussions/llama3/requirements.txt
create mode 100644 Model_Architecture_Discussions/llama3/tokenizer.model
diff --git a/Model_Architecture_Discussions/llama3/LICENSE b/Model_Architecture_Discussions/llama3/LICENSE
new file mode 100644
index 0000000..b0b882f
--- /dev/null
+++ b/Model_Architecture_Discussions/llama3/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2024 Nishant Aklecha
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/Model_Architecture_Discussions/llama3/README.md b/Model_Architecture_Discussions/llama3/README.md
new file mode 100644
index 0000000..df048f3
--- /dev/null
+++ b/Model_Architecture_Discussions/llama3/README.md
@@ -0,0 +1,1135 @@
+# llama3 implemented from scratch
+in this file, i implemented llama3 from scratch, one tensor and matrix multiplication at a time.
+
+also, im going to load tensors directly from the model file that meta provided for llama3, you need to download the weights before running this file.
+here is the offical link to download the weights: https://llama.meta.com/llama-downloads/
+
+
+

+
+
+## tokenizer
+im not going to implement a bpe tokenizer (but andrej karpathy has a really clean implementation)
+
+link to his implementation: https://github.com/karpathy/minbpe
+
+
+

+
+
+
+
+```python
+from pathlib import Path
+import tiktoken
+from tiktoken.load import load_tiktoken_bpe
+import torch
+import json
+import matplotlib.pyplot as plt
+
+tokenizer_path = "Meta-Llama-3-8B/tokenizer.model"
+special_tokens = [
+ "<|begin_of_text|>",
+ "<|end_of_text|>",
+ "<|reserved_special_token_0|>",
+ "<|reserved_special_token_1|>",
+ "<|reserved_special_token_2|>",
+ "<|reserved_special_token_3|>",
+ "<|start_header_id|>",
+ "<|end_header_id|>",
+ "<|reserved_special_token_4|>",
+ "<|eot_id|>", # end of turn
+ ] + [f"<|reserved_special_token_{i}|>" for i in range(5, 256 - 5)]
+mergeable_ranks = load_tiktoken_bpe(tokenizer_path)
+tokenizer = tiktoken.Encoding(
+ name=Path(tokenizer_path).name,
+ pat_str=r"(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\r\n\p{L}\p{N}]?\p{L}+|\p{N}{1,3}| ?[^\s\p{L}\p{N}]+[\r\n]*|\s*[\r\n]+|\s+(?!\S)|\s+",
+ mergeable_ranks=mergeable_ranks,
+ special_tokens={token: len(mergeable_ranks) + i for i, token in enumerate(special_tokens)},
+)
+
+tokenizer.decode(tokenizer.encode("hello world!"))
+```
+
+
+
+
+ 'hello world!'
+
+
+
+## reading the model file
+normally, reading this depends on how the model classes are written and the variable names inside them.
+
+but since we are implementing llama3 from scratch we will read the file one tensor at a time.
+
+

+
+
+
+```python
+model = torch.load("Meta-Llama-3-8B/consolidated.00.pth")
+print(json.dumps(list(model.keys())[:20], indent=4))
+```
+
+ [
+ "tok_embeddings.weight",
+ "layers.0.attention.wq.weight",
+ "layers.0.attention.wk.weight",
+ "layers.0.attention.wv.weight",
+ "layers.0.attention.wo.weight",
+ "layers.0.feed_forward.w1.weight",
+ "layers.0.feed_forward.w3.weight",
+ "layers.0.feed_forward.w2.weight",
+ "layers.0.attention_norm.weight",
+ "layers.0.ffn_norm.weight",
+ "layers.1.attention.wq.weight",
+ "layers.1.attention.wk.weight",
+ "layers.1.attention.wv.weight",
+ "layers.1.attention.wo.weight",
+ "layers.1.feed_forward.w1.weight",
+ "layers.1.feed_forward.w3.weight",
+ "layers.1.feed_forward.w2.weight",
+ "layers.1.attention_norm.weight",
+ "layers.1.ffn_norm.weight",
+ "layers.2.attention.wq.weight"
+ ]
+
+
+
+```python
+with open("Meta-Llama-3-8B/params.json", "r") as f:
+ config = json.load(f)
+config
+```
+
+
+
+
+ {'dim': 4096,
+ 'n_layers': 32,
+ 'n_heads': 32,
+ 'n_kv_heads': 8,
+ 'vocab_size': 128256,
+ 'multiple_of': 1024,
+ 'ffn_dim_multiplier': 1.3,
+ 'norm_eps': 1e-05,
+ 'rope_theta': 500000.0}
+
+
+
+## we use this config to infer details about the model like
+1. the model has 32 transformer layers
+2. each multi-head attention block has 32 heads
+3. the vocab size and so on
+
+
+```python
+dim = config["dim"]
+n_layers = config["n_layers"]
+n_heads = config["n_heads"]
+n_kv_heads = config["n_kv_heads"]
+vocab_size = config["vocab_size"]
+multiple_of = config["multiple_of"]
+ffn_dim_multiplier = config["ffn_dim_multiplier"]
+norm_eps = config["norm_eps"]
+rope_theta = torch.tensor(config["rope_theta"])
+```
+
+## converting text to tokens
+here we use tiktoken (i think an openai library) as the tokenizer
+
+

+
+
+
+```python
+prompt = "the answer to the ultimate question of life, the universe, and everything is "
+tokens = [128000] + tokenizer.encode(prompt)
+print(tokens)
+tokens = torch.tensor(tokens)
+prompt_split_as_tokens = [tokenizer.decode([token.item()]) for token in tokens]
+print(prompt_split_as_tokens)
+```
+
+ [128000, 1820, 4320, 311, 279, 17139, 3488, 315, 2324, 11, 279, 15861, 11, 323, 4395, 374, 220]
+ ['<|begin_of_text|>', 'the', ' answer', ' to', ' the', ' ultimate', ' question', ' of', ' life', ',', ' the', ' universe', ',', ' and', ' everything', ' is', ' ']
+
+
+## converting tokens to their embedding
+IM SORRY but this is the only part of the codebase where i use an inbuilt neural network module
+
+anyway, so our [17x1] tokens are now [17x4096], i.e. 17 embeddings (one for each token) of length 4096
+
+
+note: keep track of the shapes, it makes it much easier to understand everything
+
+
+

+
+
+
+```python
+embedding_layer = torch.nn.Embedding(vocab_size, dim)
+embedding_layer.weight.data.copy_(model["tok_embeddings.weight"])
+token_embeddings_unnormalized = embedding_layer(tokens).to(torch.bfloat16)
+token_embeddings_unnormalized.shape
+```
+
+
+
+
+ torch.Size([17, 4096])
+
+
+
+## we then normalize the embedding using rms normalization
+please, note after this step the shapes dont change, the values are just normalized
+
+things to keep in mind, we need a norm_eps (from config) because we dont want to accidently set rms to 0 and divide by 0
+
+here is the formula:
+
+

+
+
+
+```python
+# def rms_norm(tensor, norm_weights):
+# rms = (tensor.pow(2).mean(-1, keepdim=True) + norm_eps)**0.5
+# return tensor * (norm_weights / rms)
+def rms_norm(tensor, norm_weights):
+ return (tensor * torch.rsqrt(tensor.pow(2).mean(-1, keepdim=True) + norm_eps)) * norm_weights
+```
+
+# building the first first layer of the transformer
+
+### normalization
+you will see me accessing layer.0 from the model dict (this is the first layer)
+
+anyway, so after normalizing our shapes are still [17x4096] same as embedding but normalized
+
+
+

+
+
+
+```python
+token_embeddings = rms_norm(token_embeddings_unnormalized, model["layers.0.attention_norm.weight"])
+token_embeddings.shape
+```
+
+
+
+
+ torch.Size([17, 4096])
+
+
+
+### attention implemented from scratch
+let's load the attention heads of the first layer of the transformer
+
+

+
+
+
+
+> when we load the query, key, value and output vectors from the model we notice the shapes to be [4096x4096], [1024x4096], [1024x4096], [4096x4096]
+
+> at first glance this is weird because ideally we want each q,k,v and o for each head individually
+
+> the authors of the code bundled them togeather because its easy it helps parallize attention head multiplication.
+
+> im going to unwrap everything...
+
+
+```python
+print(
+ model["layers.0.attention.wq.weight"].shape,
+ model["layers.0.attention.wk.weight"].shape,
+ model["layers.0.attention.wv.weight"].shape,
+ model["layers.0.attention.wo.weight"].shape
+)
+```
+
+ torch.Size([4096, 4096]) torch.Size([1024, 4096]) torch.Size([1024, 4096]) torch.Size([4096, 4096])
+
+
+### unwrapping query
+in the next section we will unwrap the queries from multiple attention heads, the resulting shape is [32x128x4096]
+
+here, 32 is the number of attention heads in llama3, 128 is the size of the query vector and 4096 is the size of the token embedding
+
+
+```python
+q_layer0 = model["layers.0.attention.wq.weight"]
+head_dim = q_layer0.shape[0] // n_heads
+q_layer0 = q_layer0.view(n_heads, head_dim, dim)
+q_layer0.shape
+```
+
+
+
+
+ torch.Size([32, 128, 4096])
+
+
+
+### im going to implement the first head of the first layer
+here i access the query weight matrix first head of the first layer, the size of this query weight matrix is [128x4096]
+
+
+```python
+q_layer0_head0 = q_layer0[0]
+q_layer0_head0.shape
+```
+
+
+
+
+ torch.Size([128, 4096])
+
+
+
+### we now multiply the query weights with the token embedding, to recive a query for the token
+here you can see the resulting shape is [17x128], this is because we have 17 tokens and for each token there is a 128 length query.
+
+

+
+
+
+```python
+q_per_token = torch.matmul(token_embeddings, q_layer0_head0.T)
+q_per_token.shape
+```
+
+
+
+
+ torch.Size([17, 128])
+
+
+
+## positioning encoding
+we are now at a stage where we have a query vector for each token in our prompt, but if you think about it -- the indivitually query vector has no idea about the position in the prompt.
+
+query: "the answer to the ultimate question of life, the universe, and everything is "
+
+in our prompt we have used "the" three times, we need the query vectors of all 3 "the" tokens to have different query vectors (each of size [1x128]) based on their positions in the query. we perform these rotations using RoPE (rotory positional embedding).
+
+### RoPE
+watch this video (this is what i watched) to understand the math.
+https://www.youtube.com/watch?v=o29P0Kpobz0&t=530s
+
+
+
+

+
+
+
+```python
+q_per_token_split_into_pairs = q_per_token.float().view(q_per_token.shape[0], -1, 2)
+q_per_token_split_into_pairs.shape
+```
+
+
+
+
+ torch.Size([17, 64, 2])
+
+
+
+in the above step, we split the query vectors into pairs, we apply a rotational angle shift to each pair!
+
+we now have a vector of size [17x64x2], this is the 128 length queries split into 64 pairs for each token in the prompt! each of those 64 pairs will be rotated by m*(theta) where m is the position of the token for which we are rotating the query!
+
+
+
+

+
+
+## using dot product of complex numbers to rotate a vector
+
+

+
+
+
+```python
+zero_to_one_split_into_64_parts = torch.tensor(range(64))/64
+zero_to_one_split_into_64_parts
+```
+
+
+
+
+ tensor([0.0000, 0.0156, 0.0312, 0.0469, 0.0625, 0.0781, 0.0938, 0.1094, 0.1250,
+ 0.1406, 0.1562, 0.1719, 0.1875, 0.2031, 0.2188, 0.2344, 0.2500, 0.2656,
+ 0.2812, 0.2969, 0.3125, 0.3281, 0.3438, 0.3594, 0.3750, 0.3906, 0.4062,
+ 0.4219, 0.4375, 0.4531, 0.4688, 0.4844, 0.5000, 0.5156, 0.5312, 0.5469,
+ 0.5625, 0.5781, 0.5938, 0.6094, 0.6250, 0.6406, 0.6562, 0.6719, 0.6875,
+ 0.7031, 0.7188, 0.7344, 0.7500, 0.7656, 0.7812, 0.7969, 0.8125, 0.8281,
+ 0.8438, 0.8594, 0.8750, 0.8906, 0.9062, 0.9219, 0.9375, 0.9531, 0.9688,
+ 0.9844])
+
+
+
+
+```python
+freqs = 1.0 / (rope_theta ** zero_to_one_split_into_64_parts)
+freqs
+```
+
+
+
+
+ tensor([1.0000e+00, 8.1462e-01, 6.6360e-01, 5.4058e-01, 4.4037e-01, 3.5873e-01,
+ 2.9223e-01, 2.3805e-01, 1.9392e-01, 1.5797e-01, 1.2869e-01, 1.0483e-01,
+ 8.5397e-02, 6.9566e-02, 5.6670e-02, 4.6164e-02, 3.7606e-02, 3.0635e-02,
+ 2.4955e-02, 2.0329e-02, 1.6560e-02, 1.3490e-02, 1.0990e-02, 8.9523e-03,
+ 7.2927e-03, 5.9407e-03, 4.8394e-03, 3.9423e-03, 3.2114e-03, 2.6161e-03,
+ 2.1311e-03, 1.7360e-03, 1.4142e-03, 1.1520e-03, 9.3847e-04, 7.6450e-04,
+ 6.2277e-04, 5.0732e-04, 4.1327e-04, 3.3666e-04, 2.7425e-04, 2.2341e-04,
+ 1.8199e-04, 1.4825e-04, 1.2077e-04, 9.8381e-05, 8.0143e-05, 6.5286e-05,
+ 5.3183e-05, 4.3324e-05, 3.5292e-05, 2.8750e-05, 2.3420e-05, 1.9078e-05,
+ 1.5542e-05, 1.2660e-05, 1.0313e-05, 8.4015e-06, 6.8440e-06, 5.5752e-06,
+ 4.5417e-06, 3.6997e-06, 3.0139e-06, 2.4551e-06])
+
+
+
+
+```python
+freqs_for_each_token = torch.outer(torch.arange(17), freqs)
+freqs_cis = torch.polar(torch.ones_like(freqs_for_each_token), freqs_for_each_token)
+freqs_cis.shape
+
+# viewing tjhe third row of freqs_cis
+value = freqs_cis[3]
+plt.figure()
+for i, element in enumerate(value[:17]):
+ plt.plot([0, element.real], [0, element.imag], color='blue', linewidth=1, label=f"Index: {i}")
+ plt.annotate(f"{i}", xy=(element.real, element.imag), color='red')
+plt.xlabel('Real')
+plt.ylabel('Imaginary')
+plt.title('Plot of one row of freqs_cis')
+plt.show()
+```
+
+
+
+
+
+
+
+### now that we have a complex number (the angle change vector) for every token's query element
+we can convert our queries (the one we split into pairs) as complex numbers and then dot product to rotate the query based on the position
+
+honeslty this is beautiful to think about :)
+
+
+```python
+q_per_token_as_complex_numbers = torch.view_as_complex(q_per_token_split_into_pairs)
+q_per_token_as_complex_numbers.shape
+```
+
+
+
+
+ torch.Size([17, 64])
+
+
+
+
+```python
+q_per_token_as_complex_numbers_rotated = q_per_token_as_complex_numbers * freqs_cis
+q_per_token_as_complex_numbers_rotated.shape
+```
+
+
+
+
+ torch.Size([17, 64])
+
+
+
+### after rotated vector is obtained
+we can get back our the queries as pairs by viewing the complex numbers as real numbers again
+
+
+```python
+q_per_token_split_into_pairs_rotated = torch.view_as_real(q_per_token_as_complex_numbers_rotated)
+q_per_token_split_into_pairs_rotated.shape
+```
+
+
+
+
+ torch.Size([17, 64, 2])
+
+
+
+the rotated pairs are now merged, we now have a new query vector (rotated query vector) that is of the shape [17x128] where 17 is the number of tokens and the 128 is the dim of the query vector
+
+
+```python
+q_per_token_rotated = q_per_token_split_into_pairs_rotated.view(q_per_token.shape)
+q_per_token_rotated.shape
+```
+
+
+
+
+ torch.Size([17, 128])
+
+
+
+# keys (almost the same as queries)
+
+

+
+im lazy as fuck, so im not going to go through the math for keys, the only things you need to keep in mind are:
+
+> keys generate key vectors also of dimention 128
+
+> keys have only 1/4th the number of the weights as queries, this is because the weights for keys are shared across 4 heads at a time, to reduce the number of computations need
+
+> keys are also rotated to add positional info, just like queries because of the same reasons
+
+
+```python
+k_layer0 = model["layers.0.attention.wk.weight"]
+k_layer0 = k_layer0.view(n_kv_heads, k_layer0.shape[0] // n_kv_heads, dim)
+k_layer0.shape
+```
+
+
+
+
+ torch.Size([8, 128, 4096])
+
+
+
+
+```python
+k_layer0_head0 = k_layer0[0]
+k_layer0_head0.shape
+```
+
+
+
+
+ torch.Size([128, 4096])
+
+
+
+
+```python
+k_per_token = torch.matmul(token_embeddings, k_layer0_head0.T)
+k_per_token.shape
+```
+
+
+
+
+ torch.Size([17, 128])
+
+
+
+
+```python
+k_per_token_split_into_pairs = k_per_token.float().view(k_per_token.shape[0], -1, 2)
+k_per_token_split_into_pairs.shape
+```
+
+
+
+
+ torch.Size([17, 64, 2])
+
+
+
+
+```python
+k_per_token_as_complex_numbers = torch.view_as_complex(k_per_token_split_into_pairs)
+k_per_token_as_complex_numbers.shape
+```
+
+
+
+
+ torch.Size([17, 64])
+
+
+
+
+```python
+k_per_token_split_into_pairs_rotated = torch.view_as_real(k_per_token_as_complex_numbers * freqs_cis)
+k_per_token_split_into_pairs_rotated.shape
+```
+
+
+
+
+ torch.Size([17, 64, 2])
+
+
+
+
+```python
+k_per_token_rotated = k_per_token_split_into_pairs_rotated.view(k_per_token.shape)
+k_per_token_rotated.shape
+```
+
+
+
+
+ torch.Size([17, 128])
+
+
+
+## at this stage now have both the rotated values of queries and keys, for each token.
+
+

+
+each of the queries and keys are now of shape [17x128].
+
+## in the next step we will multiply the queries and key matrices
+doing this will give us a score mapping each token with one another
+
+this score describes how well each token's query relates to the each tokens's key.
+THIS IS SELF ATTENTION :)
+
+the shape of the attention score matrix (qk_per_token) is [17x17] where 17 is the number of tokens in the prompt
+
+
+

+
+
+
+```python
+qk_per_token = torch.matmul(q_per_token_rotated, k_per_token_rotated.T)/(head_dim)**0.5
+qk_per_token.shape
+```
+
+
+
+
+ torch.Size([17, 17])
+
+
+
+# we now have to mask query key scores
+during the training process of llama3, the future token qk scores are masked.
+
+why? because during training we only learn to predict tokens using past tokens.
+
+as a result, during inference we set the future tokens to zero.
+
+

+
+
+
+```python
+def display_qk_heatmap(qk_per_token):
+ _, ax = plt.subplots()
+ im = ax.imshow(qk_per_token.to(float).detach(), cmap='viridis')
+ ax.set_xticks(range(len(prompt_split_as_tokens)))
+ ax.set_yticks(range(len(prompt_split_as_tokens)))
+ ax.set_xticklabels(prompt_split_as_tokens)
+ ax.set_yticklabels(prompt_split_as_tokens)
+ ax.figure.colorbar(im, ax=ax)
+
+display_qk_heatmap(qk_per_token)
+```
+
+
+
+
+
+
+
+
+```python
+mask = torch.full((len(tokens), len(tokens)), float("-inf"), device=tokens.device)
+mask = torch.triu(mask, diagonal=1)
+mask
+```
+
+
+
+
+ tensor([[0., -inf, -inf, -inf, -inf, -inf, -inf, -inf, -inf, -inf, -inf, -inf, -inf, -inf, -inf, -inf, -inf],
+ [0., 0., -inf, -inf, -inf, -inf, -inf, -inf, -inf, -inf, -inf, -inf, -inf, -inf, -inf, -inf, -inf],
+ [0., 0., 0., -inf, -inf, -inf, -inf, -inf, -inf, -inf, -inf, -inf, -inf, -inf, -inf, -inf, -inf],
+ [0., 0., 0., 0., -inf, -inf, -inf, -inf, -inf, -inf, -inf, -inf, -inf, -inf, -inf, -inf, -inf],
+ [0., 0., 0., 0., 0., -inf, -inf, -inf, -inf, -inf, -inf, -inf, -inf, -inf, -inf, -inf, -inf],
+ [0., 0., 0., 0., 0., 0., -inf, -inf, -inf, -inf, -inf, -inf, -inf, -inf, -inf, -inf, -inf],
+ [0., 0., 0., 0., 0., 0., 0., -inf, -inf, -inf, -inf, -inf, -inf, -inf, -inf, -inf, -inf],
+ [0., 0., 0., 0., 0., 0., 0., 0., -inf, -inf, -inf, -inf, -inf, -inf, -inf, -inf, -inf],
+ [0., 0., 0., 0., 0., 0., 0., 0., 0., -inf, -inf, -inf, -inf, -inf, -inf, -inf, -inf],
+ [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., -inf, -inf, -inf, -inf, -inf, -inf, -inf],
+ [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., -inf, -inf, -inf, -inf, -inf, -inf],
+ [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., -inf, -inf, -inf, -inf, -inf],
+ [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., -inf, -inf, -inf, -inf],
+ [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., -inf, -inf, -inf],
+ [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., -inf, -inf],
+ [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., -inf],
+ [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]])
+
+
+
+
+```python
+qk_per_token_after_masking = qk_per_token + mask
+display_qk_heatmap(qk_per_token_after_masking)
+```
+
+
+
+
+
+
+
+
+

+
+
+
+```python
+qk_per_token_after_masking_after_softmax = torch.nn.functional.softmax(qk_per_token_after_masking, dim=1).to(torch.bfloat16)
+display_qk_heatmap(qk_per_token_after_masking_after_softmax)
+```
+
+
+
+
+
+
+
+## values (almost the end of attention)
+
+
+

+
+these scores (0-1) are used to determine how much of value matrix is used per token
+
+> just like keys, value weights are also shared acorss every 4 attention heads (to save computation)
+
+> as a result, the shape of the value weight matrix below is [8x128x4096]
+
+
+
+```python
+v_layer0 = model["layers.0.attention.wv.weight"]
+v_layer0 = v_layer0.view(n_kv_heads, v_layer0.shape[0] // n_kv_heads, dim)
+v_layer0.shape
+```
+
+
+
+
+ torch.Size([8, 128, 4096])
+
+
+
+the first layer, first head value weight matrix is given below
+
+
+```python
+v_layer0_head0 = v_layer0[0]
+v_layer0_head0.shape
+```
+
+
+
+
+ torch.Size([128, 4096])
+
+
+
+## value vectors
+
+

+
+we now use the value weghts to get the attention values per token, this is of size [17x128] where 17 is the number of tokens in the prompt and 128 is the dim of the value vector per token
+
+
+```python
+v_per_token = torch.matmul(token_embeddings, v_layer0_head0.T)
+v_per_token.shape
+```
+
+
+
+
+ torch.Size([17, 128])
+
+
+
+## attention
+
+

+
+the resultant attention vector after multipying with the values per token is of shape [17*128]
+
+
+```python
+qkv_attention = torch.matmul(qk_per_token_after_masking_after_softmax, v_per_token)
+qkv_attention.shape
+```
+
+
+
+
+ torch.Size([17, 128])
+
+
+
+# multi head attention
+
+

+
+WE NOW HAVE THE ATTENTION VALUE OF THE FIRST LAYER AND FIRST HEAD
+
+now im going to run a loop and perform the exact same math as the cells above but for every head in the first layer
+
+
+```python
+qkv_attention_store = []
+
+for head in range(n_heads):
+ q_layer0_head = q_layer0[head]
+ k_layer0_head = k_layer0[head//4] # key weights are shared across 4 heads
+ v_layer0_head = v_layer0[head//4] # value weights are shared across 4 heads
+ q_per_token = torch.matmul(token_embeddings, q_layer0_head.T)
+ k_per_token = torch.matmul(token_embeddings, k_layer0_head.T)
+ v_per_token = torch.matmul(token_embeddings, v_layer0_head.T)
+
+ q_per_token_split_into_pairs = q_per_token.float().view(q_per_token.shape[0], -1, 2)
+ q_per_token_as_complex_numbers = torch.view_as_complex(q_per_token_split_into_pairs)
+ q_per_token_split_into_pairs_rotated = torch.view_as_real(q_per_token_as_complex_numbers * freqs_cis[:len(tokens)])
+ q_per_token_rotated = q_per_token_split_into_pairs_rotated.view(q_per_token.shape)
+
+ k_per_token_split_into_pairs = k_per_token.float().view(k_per_token.shape[0], -1, 2)
+ k_per_token_as_complex_numbers = torch.view_as_complex(k_per_token_split_into_pairs)
+ k_per_token_split_into_pairs_rotated = torch.view_as_real(k_per_token_as_complex_numbers * freqs_cis[:len(tokens)])
+ k_per_token_rotated = k_per_token_split_into_pairs_rotated.view(k_per_token.shape)
+
+ qk_per_token = torch.matmul(q_per_token_rotated, k_per_token_rotated.T)/(128)**0.5
+ mask = torch.full((len(tokens), len(tokens)), float("-inf"), device=tokens.device)
+ mask = torch.triu(mask, diagonal=1)
+ qk_per_token_after_masking = qk_per_token + mask
+ qk_per_token_after_masking_after_softmax = torch.nn.functional.softmax(qk_per_token_after_masking, dim=1).to(torch.bfloat16)
+ qkv_attention = torch.matmul(qk_per_token_after_masking_after_softmax, v_per_token)
+ qkv_attention = torch.matmul(qk_per_token_after_masking_after_softmax, v_per_token)
+ qkv_attention_store.append(qkv_attention)
+
+len(qkv_attention_store)
+```
+
+
+
+
+ 32
+
+
+
+
+

+
+we now have a the qkv_attention matrix for all 32 heads on the first layer, next im going to merge all attention scores into one large matrix of size [17x4096]
+
+we are almost at the end :)
+
+
+```python
+stacked_qkv_attention = torch.cat(qkv_attention_store, dim=-1)
+stacked_qkv_attention.shape
+```
+
+
+
+
+ torch.Size([17, 4096])
+
+
+
+# weight matrix, one of the final steps
+
+

+
+one of the last things to do for a layer 0 attention is, is to multiply the weight matrix of the
+
+
+```python
+w_layer0 = model["layers.0.attention.wo.weight"]
+w_layer0.shape
+```
+
+
+
+
+ torch.Size([4096, 4096])
+
+
+
+### this is a simple linear layer, so we just matmul
+
+
+```python
+embedding_delta = torch.matmul(stacked_qkv_attention, w_layer0.T)
+embedding_delta.shape
+```
+
+
+
+
+ torch.Size([17, 4096])
+
+
+
+
+

+
+we now have the change in the embedding value after attention, that should be adding to the original token embeddings
+
+
+```python
+embedding_after_edit = token_embeddings_unnormalized + embedding_delta
+embedding_after_edit.shape
+```
+
+
+
+
+ torch.Size([17, 4096])
+
+
+
+## we normalize and then run a feed forward neural network through the embedding delta
+
+

+
+
+
+```python
+embedding_after_edit_normalized = rms_norm(embedding_after_edit, model["layers.0.ffn_norm.weight"])
+embedding_after_edit_normalized.shape
+```
+
+
+
+
+ torch.Size([17, 4096])
+
+
+
+## loading the ff weights and implementing the feed forward network
+
+

+
+in llama3, they used a SwiGLU feedforward network, this network architecture is really good at adding non linearity when needed by the model.
+
+its pretty standard to use this feed forward network architecture in llms these days
+
+
+```python
+w1 = model["layers.0.feed_forward.w1.weight"]
+w2 = model["layers.0.feed_forward.w2.weight"]
+w3 = model["layers.0.feed_forward.w3.weight"]
+output_after_feedforward = torch.matmul(torch.functional.F.silu(torch.matmul(embedding_after_edit_normalized, w1.T)) * torch.matmul(embedding_after_edit_normalized, w3.T), w2.T)
+output_after_feedforward.shape
+```
+
+
+
+
+ torch.Size([17, 4096])
+
+
+
+# WE FINALLY HAVE NEW EDITED EMBEDDINGS FOR EACH TOKEN AFTER THE FIRST LAYER
+just 31 more layers to go before we are done (one for loop away)
+
+you can imagine this edited embedding as having information about all queries asked on the first layer
+
+now each layer will encode more and more complex queries on the quesions asked, until we have an embedding that knows everything about the next token that we need.
+
+
+```python
+layer_0_embedding = embedding_after_edit+output_after_feedforward
+layer_0_embedding.shape
+```
+
+
+
+
+ torch.Size([17, 4096])
+
+
+
+# god, everything all at once
+
+

+
+yep, this is it. everything we did before, all at once, for every single layer.
+
+
+# have fun reading :)
+
+
+```python
+final_embedding = token_embeddings_unnormalized
+for layer in range(n_layers):
+ qkv_attention_store = []
+ layer_embedding_norm = rms_norm(final_embedding, model[f"layers.{layer}.attention_norm.weight"])
+ q_layer = model[f"layers.{layer}.attention.wq.weight"]
+ q_layer = q_layer.view(n_heads, q_layer.shape[0] // n_heads, dim)
+ k_layer = model[f"layers.{layer}.attention.wk.weight"]
+ k_layer = k_layer.view(n_kv_heads, k_layer.shape[0] // n_kv_heads, dim)
+ v_layer = model[f"layers.{layer}.attention.wv.weight"]
+ v_layer = v_layer.view(n_kv_heads, v_layer.shape[0] // n_kv_heads, dim)
+ w_layer = model[f"layers.{layer}.attention.wo.weight"]
+ for head in range(n_heads):
+ q_layer_head = q_layer[head]
+ k_layer_head = k_layer[head//4]
+ v_layer_head = v_layer[head//4]
+ q_per_token = torch.matmul(layer_embedding_norm, q_layer_head.T)
+ k_per_token = torch.matmul(layer_embedding_norm, k_layer_head.T)
+ v_per_token = torch.matmul(layer_embedding_norm, v_layer_head.T)
+ q_per_token_split_into_pairs = q_per_token.float().view(q_per_token.shape[0], -1, 2)
+ q_per_token_as_complex_numbers = torch.view_as_complex(q_per_token_split_into_pairs)
+ q_per_token_split_into_pairs_rotated = torch.view_as_real(q_per_token_as_complex_numbers * freqs_cis)
+ q_per_token_rotated = q_per_token_split_into_pairs_rotated.view(q_per_token.shape)
+ k_per_token_split_into_pairs = k_per_token.float().view(k_per_token.shape[0], -1, 2)
+ k_per_token_as_complex_numbers = torch.view_as_complex(k_per_token_split_into_pairs)
+ k_per_token_split_into_pairs_rotated = torch.view_as_real(k_per_token_as_complex_numbers * freqs_cis)
+ k_per_token_rotated = k_per_token_split_into_pairs_rotated.view(k_per_token.shape)
+ qk_per_token = torch.matmul(q_per_token_rotated, k_per_token_rotated.T)/(128)**0.5
+ mask = torch.full((len(token_embeddings_unnormalized), len(token_embeddings_unnormalized)), float("-inf"))
+ mask = torch.triu(mask, diagonal=1)
+ qk_per_token_after_masking = qk_per_token + mask
+ qk_per_token_after_masking_after_softmax = torch.nn.functional.softmax(qk_per_token_after_masking, dim=1).to(torch.bfloat16)
+ qkv_attention = torch.matmul(qk_per_token_after_masking_after_softmax, v_per_token)
+ qkv_attention_store.append(qkv_attention)
+
+ stacked_qkv_attention = torch.cat(qkv_attention_store, dim=-1)
+ w_layer = model[f"layers.{layer}.attention.wo.weight"]
+ embedding_delta = torch.matmul(stacked_qkv_attention, w_layer.T)
+ embedding_after_edit = final_embedding + embedding_delta
+ embedding_after_edit_normalized = rms_norm(embedding_after_edit, model[f"layers.{layer}.ffn_norm.weight"])
+ w1 = model[f"layers.{layer}.feed_forward.w1.weight"]
+ w2 = model[f"layers.{layer}.feed_forward.w2.weight"]
+ w3 = model[f"layers.{layer}.feed_forward.w3.weight"]
+ output_after_feedforward = torch.matmul(torch.functional.F.silu(torch.matmul(embedding_after_edit_normalized, w1.T)) * torch.matmul(embedding_after_edit_normalized, w3.T), w2.T)
+ final_embedding = embedding_after_edit+output_after_feedforward
+```
+
+# we now have the final embedding, the best guess the model could make about the next token
+the shape of the embedding is the same as regular token embeddings [17x4096] where 17 is the number of tokens and 4096 is the embedding dim
+
+

+
+
+
+```python
+final_embedding = rms_norm(final_embedding, model["norm.weight"])
+final_embedding.shape
+```
+
+
+
+
+ torch.Size([17, 4096])
+
+
+
+# finally, lets decode the embedding into the token value
+
+

+
+we will use the output decoder to convert the final embedding into a token
+
+
+```python
+model["output.weight"].shape
+```
+
+
+
+
+ torch.Size([128256, 4096])
+
+
+
+# we use the embedding of the last token to predict the next value
+hopefully in our case, 42 :)
+note: 42 is the answer to "the answer to the ultimate question of life, the universe, and everything is ", according to the book "hitchhiker's guide to the galaxy", most mordern llms would answer with 42 here, which should validate our entire code! wish me luck :)
+
+
+```python
+logits = torch.matmul(final_embedding[-1], model["output.weight"].T)
+logits.shape
+```
+
+
+
+
+ torch.Size([128256])
+
+
+
+### the model predicted token number 2983 as the next token, is this the token number for 42?
+IM HYPING YOU UP, this is the last cell of code, hopefully you had fun :)
+
+
+```python
+next_token = torch.argmax(logits, dim=-1)
+next_token
+```
+
+
+
+
+ tensor(2983)
+
+
+
+# lets fucking go
+
+

+
+
+
+```python
+tokenizer.decode([next_token.item()])
+```
+
+
+
+
+ '42'
+
+
+
+# thank you, i love you :)
+
+This is the end. Hopefully you enjoyed reading it!
+
+If you want to support my work
+
+1. follow me on twitter https://twitter.com/naklecha
+2. or, buy me a coffee [https://www.buymeacoffee.com/naklecha](https://www.buymeacoffee.com/naklecha)
+
+Honestly, if you made it this far you already made my day :)
+
+## what motivates me?
+
+My friends and I are on a mission - to make research more accessible!
+We created a research lab called A10 - [AAAAAAAAAA.org](http://aaaaaaaaaa.org/)
+
+A10 twitter - https://twitter.com/aaaaaaaaaaorg
+
+our thesis:
+
+

+
diff --git a/Model_Architecture_Discussions/llama3/images/42.png b/Model_Architecture_Discussions/llama3/images/42.png
new file mode 100644
index 0000000000000000000000000000000000000000..a369a41f4cf9482b4cf6a9544b296355357ba265
GIT binary patch
literal 791272
zcmeFZby$?$7d8qA3KF6yDT1W5LAQXUgwhQnozfjDB7$@XLw5~B&Cml_NDeae(A^C~
za~|}4UzMNd`>yl<@w!wPnEmX%@?Q5^dxBMzWC$-)UdF({Ae57pdWwN@Nf-kI=K}6U
z;FSub1}E?zrqfdy35?>-n+w1guW`q$%(Mm)+XTy9;L_f;!od}mS;@?t`oI0A
z_;`!?Uw8QX)}_H10dsji&dxV~fAw!i!xi2q_&@F+*iU2L4chga(D1OZ|J!lJiLw4J
zKyrhC99YRsi^T|i9^0~9nBs14r2;qW(~$wr^b&ab?Fc5UY#fRxigkqq_<@h8zzg|+
zm}xWFhpysBzJ`f+?mj+SHXxl8*oW1V;-J3?-TyMT)wli6HLuE^nIzZ23+4J&EwAB=
zTH?}nF&f>TtHZ0lUns2EnW9Ta)R3|EhrD!su5RGKjys$XZn6}|9y`Xp+G^KS7Sg_mm3o%by%F(d^NUFQ4N~E{ehwl7r(9~TLbQixR_sHAHBp`405=c{>k%Mp;{7tt20ET^vDxvN*Nee87h*bQ~s!;X8*7N-jl
zRO*L!I4(_yZXaNl(r~8T{^8tRAdCQWdsdsjMhGJetgDJfq8@XCgLMt+#d^F=KKBAw
zFvj_@12A#h3%|uf;I=u~7ku{@X?R;!hhUgVUvB?6{K6ga@q{qtYfD}vz0gZhrm
zqd36?jW{+MG5gEN0is}5vWIC3MuY9wJo=+H${=uk-x_btexlKz+l-(E7W8k@{fibc
zu`jX`he_%8YV*@M_AZbxa=RKyW5P
z+GK28VdR441C76|8E~6Kt07n0U|53-@LtD>4U?ed8a-T0>>B*X>@9KG59EhqU}Apn
zSP`p&-anky2n##Ko9J(Wh?x9!<0c``waYRyBDs%ak{%ZbU3&P91(!f;ygpYcLhsmT
zw}%RPK{V*X)w2lQE#u$#z$b-={?T88Avql;sB?FLZ}IcoR*NoaJmGisY62|WZN|H%
zHpSPcu}wRQnJk~{px_?{!T^`c0-~k)kf`Uyvv1A^!@F7mv%7DXg$G!11|RCzKHwi{
zbGQXKj7npH5ZPn)h@rTMx(cYTzxfODvp0*^;1av%v_+dG{m)8(AxSc+yj%;Ip%G
z?fZ0g8Mm&p0V=bGl$qYYu@U>Si@dJ2OGkP@a^sU9Td<@DY=SpDF@WwbX^J4WakPe)
zQ1}s3{4DN^IhA}zYU~5DJFOkW4JZChO*vsFSDthIIM)y!aIO)0^TxlOONNd3vh`3t
z$Ad;g2sM~-j{dGn071ivd%#Hp{q@6RzOF5Pz|a6|+QHf5D(mt48cO^Z|1^4wqa#RX
z$I&!g)JSZY(01Ej+z{)L$HUTwF42Lpb~6_}L+NW^S(SH6F?E
z;Q}%@1ThyK8cN_+T(8sC-tLTAiG72CwZPVOH4I#Dh1KM*d~|@ohdj!lyFFsx0Yja9
zA9ns~TDqZAZfu6VhZ*(#NO(c$hiXp&r#wC^A^UZVA72H;kd?ULuC#q@iHjr62ZLk#
z4ZIX@`D{NKZ18KqzsFHm(THT_
zcIpNuc9h|G*_tR<;)Qp|b|>hN+?Fr+ZBxm<^S!M56Q6X4(b3J*->5Zd#0n%4e7ig?
z4JQXKd@24XTT4v{E|(^A@5Z`^KfH!E5Qtx@^TzkD|7QsQ9P=Wb(L_#Q91jmqneVaZ
zK5FcQ$-1v7Qoq(!PE|F!0tTyT(;A;sSJ1i3?|bB4I^Km(z(G*a)n$HJj61XKY=aFj-t@iU^9@`p^cHpp~7U;9+Mr=Tl9!
zfe*j}r+|WkVwwR9%e8}m%lHpLs83}LPJVV~;;)`?QHsottU@o?FWW|y#AaK8vM_CZ
zUD5B{3s<+llB^#_d2`njMZW*(xCB~8>wb&m>m2<5>w;lWe|3ev{{FTcMbPvYGDGLv
zb8#HH!@Q8F?=Nfjx8`aWj5)e=>)cDPlaVQScvRzaIypIw6l(H#uzyw4))q)Z4Y{wh
zNvK+r`g%Tu)K~iVz>5lcb0)xrg-)?x(mfqUYe~shLj71z3mr!)?Nf_%pso6CSek{x
zJbdLP4m~gR@x@l^6$`D+%*@yDFJI2e%KGeMsHQeOimEo)DXbqt18_s!C=Kgii^6fo
zuYq+XUCY9&?Jz95d@hV)e&Oa%U?|>t7ORHoO~ud0s?XT$LC&e3e=7?Kl?^cJak@Z8a_>WfF5^*vNpx_-7z?bbPjSH_siF>dtb?tjT
zs(=Is3h(R`Jj%bevj2MTRscN-zt1kYfQNLcP)lE3g{6^F&@`|a;P
zqpPeo010D!DhPf+j$hf1b_AQ9-)pmlc_zhc^#w%3!=h<$&^(o6%>&^z{h37B`NnHl
zM_*hPb+SK&4v!7OY{`d-7hj6w<_@bU$8{)ePtJE}z7xkyXuh+pNN+&_Yia%)juO~-
zPao?F$Mv0;PYN$XcO*FopsY`HRiPCXYButd@7`TTAJr4bi*s+}GoZcL`h*h4GBj3l
zX@uMe8{W7|Zq$wxU|;e~4%qceYu-Uu*c6>)Hpy32;Uo#1tS%WbE#K;^+(qDe8C~GH
zC2QW!$UOC}ndE!H6VRRhgZpafYbwG^0ig*dKMvllc?M!qMd;7c+HZ+Q_A2IKh$DVm
zal?tHcD*OG@P5S(f5Ub*SIZ+Z{ppu?3vXpnQ(q_v$YN>y90CTo^HBx5Sp{%?o8q30FWUF3xpjv=
zUG;D^d4IH@%{r)yVy#YiC}63RUWbkeei~6I_WZltVE3&!8BNW$SCG~CgIS^$f&tu@s@F|0kiG%ed%=vXT
zOs@a9_l4;q&(~@5uZQ)O#fq+KmAm~=$z5$CA#3lH>kE5cy!cjyz3V6OyDe;}#q6AX
z{h|WrP|+n168vdf=J!Yz*K#7fo7TNHQ*g6mAA98B*OLU}{?U@!C!4*)aTi2iSVcM!
zEW}hEQY{Nu-YGI(t;$%9T?+QGLL%CnKvpwuh~=ooH$=w|L-I6E53|$`x8O*o0_)P}
z3+0R8K_8733t}(A`P;njiwzOB-3Y{m0}n0usz3MqA;E}%xkQSd%K<|IKa=&hmy4qZ
zpLx$LDqx#reZh}lso^6~2mb+b0M20un|;2_g}&i%WUY1VWS6fJo`s`xxqCzQpve!Q>Pc*3Hl5_Jp3!Y5kw=wvC`X*%l5%!OGk_^!YA
zNipJ`e}AjFwYd`Q-`&-wyx5#m2wNJdbPOL}pL=h2+EaFO|JYoqJED`BRB3ego5uc3w9wQX(e(R{
zw(Ym*iN^9%p4{br7GY$j)h(nn}U$5wY
z4!*JFmrD5cHt$=g?5dj7oriRiD|*OEXuIKcyw-i}d_JYz_8ndkR!I<||m7IF5w(r=zn=O1i+uI@;O`eK(*!j~+eZ
zB;R3`!jr6LcG%M_+&x;h@2fIu#o}v=w55ECPss7S+)O=a%*!G~@BMc2ktyVibrU9kS^`ztoi#>@I_t_$wZFb&z=Ljr?c
zvvzjk+uY=5KHRKc{`{geTF4LTH+!sgp?R!$AphYDo6KbUR37|IcH!Nx9%-fez9*lF
z_(o5AO4em{%;QH`?)jp+>2C{1;qN*Ysq6V}#uq-OWh*^F=a2Cf`klwj5Uv_X=(_gc?mCiba;
z!77%2n~_t$Nc*YlfVwcX@y^i42bN%z*g~RagQ(K?HSREtTgUjpX(%
zugy_hZj(r#a?$Az3FC@hnA)69d*gZqu9vMEYGsi{5m`N!qn>7tU~y^oe69t+5)%9WS$hTF$9qs55waV46G8-Z9hjK>hx9`T*p{`W4E(tA7
zJ$w$+nA3=HK2~|@(!~0xz68l{Tg2an+BPU9{?4jCr})h+-#o>a=wi3Qm;u-68U!>l_c4qnI{vn1
z%G29(G<+Jlc)LRV4OWb6K3vN#NGg=`XR)J0jpQ6YL#3pr14|t2c|H=k;xIGybo;(m
z11}}sN2?@)&=pC+*cV>r>Pz-K7?lf-3fu5Azz=|lCKNz_5synVg`)lNd3Kpg3Z)bJc7I`A!MX_;l@6>HMwaXYpB?^x!?)DT&
z+71o#jk-%Ffv#+?gbx
z%A}M;ep=QziZ8LkLoL=!%Id4q&wUi{#H0X!6Jk)lbH_GufSKo8IZXZ4UHn#dsm-Bq
z9&9j@hZ?Nyz6W;ehgP84PiL_exlYHB%t94biYO_FQUE$N?H9?UA@Q5W?;#23E-#C9lev`wq
za_D_B3vvr24xcr~P;qQ2m&Lf1wbw1%a>R@WCDxFpVF#9f518fcMdnyf%L|
zt?#=QaF5IV`rW1nOl{2D@dk&*Ecz(q0D2E>U4#`BitU%xeTZCW+$#bu4{cI1KyL0B#2kZOYvq_14kOvZqEi5sLh+!
zSssMU(9{^@&a{z=yUm-^ve8fIj5fXU)`uWTA)^$9olx=3O-2$TJfW3t*xGBBN1G4z
z4R?Tq>eLX#r_R%M3;eivnf~!D-3S)m<(j`>GmjkN&6&06oGGL`uRRiD^
z%QcRk?T?D~)K5T^mBqzMR@T;Ea=z*}_|^k8ZNU?i8N@Y(2gKPA!s;)!UV1Y`nX&76
zR!M)jXY%~yX#bh&cG&LSp0bicM543ozyj?y8UF5O*-J`n*QyBKzSHpiL5nLs(q#QWf2;?ivG
z)P0v$1E1s$rhnu4BV+>+octnY
zVcOyMa9@VX5m!**+uDtGIZ`UL8)Ezy0K?Z&t72AmbJ$P)n6s$vQ
zwG^nMK0n(5nt}EkIH}I*Xf3Koi*9@-8O>iszt%?;h;tAm3%V2}Id{uFTM8PwUZas<
zg|38aF@sjvZX=N^XQ~^|)Ko`w=v1c#rHJ>RZ_n>n=
zsk;u!sf5Sa9$m>RQUq=hh%iHr?79>MLJi6n;bkueuD6czu0hS!tp*f`gszk<*plQ{
zK}UFb>}4+q>Bd(`7cv?4S{kk)EnlGfc11YXd?k0RZcl$IESNb>8O)w|&K||ydl`um
ztq?DBA#5KV>ZbTI!i(}C?H=ME;wc#^=q+mNzZD_2Sd;Ln*M_iz&$(biD;(;nD@OLH
z`WTXF8Cy=G;Q0J_R7-unobp4%+V(ri8jFYMZVG+tOK9Q7m-dBrJ39CQxiU>`XYSJY
z60!2arv
zI?%FyQVyNz9IIV;rHi%ghzbCD^Bwu@hUZ7c(8Es84wuovin-R9`HE=o(_^$(ZVkY!
zY**|^fmwS#e%CHHt7b`$=g^&n&CSewG34Ntge44D1KHIBx{Pc?E;#DtikPi~akhS@?58o5sIcL?D+w#1h8MRLQ@4A&Gg@D}LS3r*~>
z_x_5e)`nAG-$x5L&3@aTOQl(Rs~B(nFi+i2XI)!geMc~SwVV~oi{rnMr|wRa
zHM3ZgW>Eg_BV4C(_Aa$iIcrZqMS$T(SE05zS~Zz>cXSRd>$l{n(>0KlZXBd>m&gsJ
zj$})CXm;HwRG%9-ACCw$M7i_`QJpAv)M~J<#F$$4sjD!s+xRruAuOUehyZo(EwbZufss#
zxh_njEWVciRv?%6!xYa&7J-=<)#+J~y=zmOEc(^q?TBS8QmweSxEec8^$hc*7t(bo
zUPU_vg^)ulTU!^*JjDcSfPM(!<>6@=t@qNmu!4|A73AHIFmz;9w>fH9dnZ-z=6^GaW8PEFBu!iGETgN%15aWxOY#m#RiOJVd
zk0}geIGPt~5>(q9XOIn^`mm*nl23!|&c~Db)~0@Q+l{0uzRSZcj-xNKk2U0V{5?kO
z`@W67UX%ruxhL*=4Gri~6yB&pwrx#Uz4P|)(5?4ILqIj)DS<2#dD%3SWc?*bFgnCS
z)>9>yXjdm)qx_%21X(7`la83j%CF$(l3W(lALlO6i~$QDZzDIu-0CFZCp!>@Cr_T}
zfbP0uUt}{<9URhp@xrF#Gf+pjv@oaMY)AsrSf%HwzXQr^AMHnZftkBjwhP_a8&lCM
zigq6lKGa%!rW$aUB4C_m&y3V#kUS!mlP_9!eXU(Wj#dXpHcFhlpX7LNbSIQKIHpfq
zx>y3-&SGG(2Hs*lP4SC?gZQz7%$+
za|2d%Aic#);8@GGpc-v4{EC-q47=5(R>)60g_lLTx^8^XjJpMA-FE1IF7`9eO9Cz?
z5R<5fe;)WhysX7I#M7@wj}YDpP4d|t+yvYEw1FS;e`|X|3G_>(8~d)TRNKw%ZA@Kp
z(%4)s26*RqSYI`fE!d6}fAYSIVi$a<_!amWvc%K2EJciRXcKxV3+(hxr%?X*>rA=n
zLVJ`!Kd-G7p#rElTtKfwq9K)Q>~>$S#oRNH6Vr6t+}Qn3IB~}#n#!>*wNz653~@vmjtWIN3EG=sg055x)<#UYL-`G_Ic~`_FPMnT=wT
zQxAG_r>o~uendWdze8OXE-q!EOBX{iuwg)l$8#(rf%8u-&R!j0S#}=!`#Ues@5#VbnEsHIxki)
za8YH`-^3SkD#zM{mGyv@D>|iYnjWMv6
zZSHxXKlk53&xD|ZH$eH)ZNw#Bc&A78skSyt!$B_uR|vUC(r9#*ysoh-gT4Cslj@`0
z)Y{LE2FlwdWW1Lili6gv+Umhz^dwDU~X<*JLthO}-t28peAi)t645DA)#
zIdnt|4Oh-cbFsP-&DDM`drCv0ftTNgW{C(jAB;HdteP)5cp}?Rj-jjx+9_-a6oUHT
zp@FS6h+;3|o3>N)w@-z;LWSmheXWXDY@))GM6_Aom5&T%#rYNY7Yu@EhVOV;n=JX5
zVf$?EE98V27QX(cd}D|P1~Tr85FVUs)kpARk-CohogOopS?+bP-nU^
zpHEi_rSm&*4_oFZ208A~Ks0L|Dl_>(F%0qv8bTJ^vJs8{Ip^xhI7Rd{$ZvR@RR~l;
ziC&`2P+~(MMK4u2@~{hCfyrjVbwI#akI=SPA-u>QuO-)Srv@aw%VrN+NCD3zI63PK
znUB5=tl`ZrRSz^y)2wd=M%K`yfAV&}cb&AMRq%%XJLgp+kyaQ8o{ZB*h2av?vlDHadt%7!=1v8mI76wHTPVzZ~_b6c;u7e
zOp>+E8~h&Vvdd(?I(|bu?G=~hbq#wZtnr2F=bTYNWN#IztvNLEQS4M`9#>EqS|8vO
ztCUCR0#G7`Plr
zfr*M^{NcI(3`yBwskt+uNYcgr@LCKk7A_T4Rr;}`S-$N2e7WM1D+6&WatShUQG3@rWsNSUi4(vZ>%qA-GfR*Bwo7I4%}xeOZx1z9T9A{Li%8MXqtA
zUz6K0S=Tt^Q^Gmb0^KeQ%tMns8L@*W9@CLB)2(cf7%Ka^Coh}e7s4_1q9lpM$H{Sr
zcL$WJH<~y$4RzGr<)R*6wSH)>;HA~?vh2`gXso1?t3X&2Q52uFNS;SM!okr@xa2&p
zXnU2g|Krmb&;%AgD21nV(9kt8@OZ2589e2XBwR#e4eZCO4_2R^T~wG|6Q=L8mtK<;(j)A38(*)nOWgvZhyFKgYgH{LOV2y}T}
z*{&k=qM9i{8Ahs_leVmhy*LtAo~oiJsnUG4#1^)EJ?v~ADy$yEFGFw_;eYG*j0EK7
ztjI#N=_%}SL2>v0jFU=NHUhAGT4-!P?D_NO-}>RZJPG$rTjy^P*Z$$Uu5#YQWds=TNxnVDX@{oh!atmWBwgKr(dj~TdML3dmW|Fy*J{f%%xb`;Du
zT7j!xjg^fp+$FvW0KWtQ;&OH>7nCC}?5s#S@NfoTU|qOsVH+;1cBQ?8fMD7o>_-fj
zr3cpL_8cGpW~P2Wg3&ZiA6S?9itxHWkRQLDv0yo6)cZ1x)K|$#1LPP7C;Jj73ODxS4?Pmc|P3*Yi8x
zd8($y{n%}??575@;EhdRU4x}E?!#yw>|AQY{Xsc|$pQra+bVwYKevey5P;8a2-kzz
zjj*=GaYW;D)pdc;wWiwy1d}6FZnOz
z%mBbb#>KVs%mz7K05G>!J@JN$K}bkf^msv8!Om`#SU;id45s3qjh9zzB&-zM#**r5
zMx3q8wSG{1vC?(b@a{e~ACDC3LS
z`NKH>->m~zO8A2!1~0kI!S^!!VcPRh;DT5l_bL
zVW%`kK-P_t#s?4P<5NXS^@|Qd&!j_d)HXc|eO6iFDm6Xq!T8CEr1;$#=D%BTc6DKA
zDs$_cgj$%4kOOD*9AF;|d?U`$d&fuLg$7fu9zQh7!=6=ck2gi)&xlLb3hz0_S};jU
zVgwiy4*+xIY<&H#RUaSkPYXo9nIO^wECCq$s=!~*E&hIjz3@+A`0vBSyK$xHu2wG<
z*Jc3}?kM1q3T9?z-P`ZO0N$$$f1mBXF5!zh3}I`kC(kkv{Ad@XkpVtspfd3KJVjxI?Yc6=h#vKV6s{gEn~Mp3P;el^
z!VSG2;6buEeN=7o+)c6YXnjq0I0lqygX>IoCdZO32t=dB&|s7e+2%N1_Q)g}266r|
z=rlfXnb`f7uHWjpx#I{rtO*CLlcPDYwiFSs>CHB-c%!=?=;%$zfx8T;;^KwE4d~Iq
z`}qtCx4g6ESNr5Ec2%+(R0n|Rw+qB2EE%aNHD*4Mk|%=B!Nc|qCU4p)yg02{r%o%
zpiJP}y8zxi`bxtq>jYxr2HJDL#O(#H)o8>RRA+haQgLm`)6Ng-!t>TI=+3Gl@kJmI
z=bSt^7oRb(=xNvIGs9bfAT_gGj))5;VGe7guEqfd3Zffd-0;|3vYu_@ClsOawQca8
z&xL#g>YM!C$2mb#hEWFM<|VN{F1E!5TOhPOF{pcYi%h+>Aw^8vrbF+a+RO)UFJtIn
zsKxcqV*(-ot}Cd!!H-w$II;Ja1H$v3MSp>W<1~y*Bd(G3ej4P8>EWn^!2s)#7>6IZu<7RRt
zmKpY^je5W>mZq!v(XYaF#to9QC*NhlFO!|QytI}-&Le5^B-Bql`rlYiR!ZZ$8*I-3
zYS6Jy+q~stAik=-e<(0cf06y}L&**yQEttY5j8G1;ZjM%rD=r*LuQR6jimV7XKch|
zt3*rddPrb8?BX550B4ds>&tSoOAfC66|E2Kd%hJX@|M;l6%nMi-$BGyrt)iudLKwp
zL^m56=Az7BZJe$zB
z*KV%M1#1Xs#?5|n*FWroYzaT;b<*);Cl>}x)j*v5ZuvK|{Nw%k>?PLyoYhld
zLeGI2Y@drMFvI?usIz2!86RIY;Z-RR+-Y&xKYza?)5_CR$_4RIw;M&*(M51#sqy9h
za}%KP8^DW%0Zrc@qy4`?3Arg&M@L8Z=;&(SjV18X84NgFtyOMqYnw5lIn=TbDj@_+
zc3#!LD_5D)^}kds!_fGAUgq~6`*Ok1
z;))jRoXHV{1NU;)s~pw;RC>U5;yeh%?M8~JscBGX4p0r*)hsZzw6;bee0R%(md(#n
z<9~Jw1ln)Ft%tDC2X5z*40#|{t=pPwJtrrpXcn4^*CwK-Mo_!TA-!;2cp8KE_ay*n
zhrR_i^Y*S2$+^vZzf%H?p!Tl-Gw*IozWg`iP{`ly3oOOogWFKO36(!v_m9P9RvN!(fslGmk;eM;;=e4zy^3MQa
z;e#I8oZ<4*-rU?Arc4`Anx#5-z}si{>z*oco;yT@JeEd_B{(C4PPFL4C8BToZX=8d
z2?>sartx1YQBE9p&QlOVgc!xUIl^=5U&nP$
zbu)KgZBjocZ(?#6toQHVKT9`%Hg1`oqwXM``Gm~v*P$+I~T{xiW>+9>C+}T^G*+w5OOjmA>h}%Ml
zjg2&9W_}7x))-6xftGSgv&)>cXb8#vtS@Tt?q=Km1r^aUsc334@9gYwu2;w_6)t-3
z4w^FpmVra2Hx5H3CZ9DdkLG2>qJZleXV_BDG*@#!nDM+k{$70m?w~PZ*$Xmh4s)?~
z@9XQU;Sp>OrJz3>Ki3-Y07$LYmCCWzoYMM*91yOM%!E@;+wXpM_WqwB{{U2bDm)9pHY;-ehfUZO1l_N@7={PY=eUeqYhQ
zQ5-vO{pQkMzFL+r_ucJC=4t)#;)$}$pcj_t@*xH?=yp&_!zi39j#o$BV>EX
z<+rc!u`|ol%K1m}@o$~f{lxz=!r6N<6o9zDd){~nmXO=3bMsjftg_+Q7VQgIZy6Bs
zSU5Rd)l(GW*aend`2StypLZI$;{eAsFf2X?x$a)Z$@rev)7u*iT&n3e^wU3!4Ses;
zE`vOI5(z{rpdU4S@`PWw2H>iHJ5^Q;#*Gw~9exVndfD$1Y*fJ=aE1YQOP(S7PPTK!
zVjcLGIdz{trK=qCt!3A*i79cqlU9B4k2l@LK5Lize`Eapl)(ODxwME~WIC`q7V7D%~J|KBS=*O8?Q=iGjJisP@G_x{mpG8Yxrq
z_tYpz{?NlY-vYk1qyw(dF02?c
z(7lkHci2uuPE57G+l5iZ86bl(OZ)wLH7r5A!Z`Q!Zc)s7@yxX$^h>Elm|O)|;{xKX
zY~IX=oe-)YX=s~%9NBPyYZlD)flZW9e3b*@5Z7;o
zn12=lwiqCZU337iHeeXO9oZ-CW0)ycv?;xOH&xl2BJf%s|D*e4-ct8x_0my3L3%F<
ze?BQIy|G?26>;)K_6%Z6#{%do&r{Et^gk~(V!*LU9O`2(j^5pAKwef#F5o4OX}45h
zxN8T)W^O0(E54Wn7oQu8+#;Ky-?ynE1CHPzg|%v4zcW|=={DZV82dy)M|bJ*4Lq8G
zoKu2+8XcA-x&qAC(
zM#s@6#{A^iu(O+u(P>rSb+Sn0j~FcbBtX1HszqSS3NJ=6ewW8h9;G1G6}ecG`p-D+
zKbgT3kW-eQl=H8ffJRc-&Lf?(hhv5
z742|MLjIt(;dncQySoRLNj+eJ{vDr<1GCe8*q>8vb*&cDEa+=_98x7E`MXt1S9|
ze53&ISs@pG9dUspEx^2Lk+IUuJnfm1r|w!j2dNs1^_P?S52DV8`7Hr(WU5m*j*sVF?1Gd#y&m_}0PrN=u8q7tn+rK`{82L3h
zGZVS}gZcZJ?gO~51Mc=Is59c16I{dAA*!!3^!L{V#f@H{G48mD
zsPikF16yb8F3{ttXVLtnigVESpKpOn0=QvaK2?95Ix9fDB2!%u*1$@=lo}!Hlq}0l
zryar>-9DHbBK5Hw_9D=ZWMlv6T2h_LkpPYy_!^;`Z1ZfaVCdVuBj1(}HBx}x$@I)w
zC?r?d`*kEwA==|YEt%btY2M4Da?GVZ8uM((IegUHAPZd?Le$ce&JXOCN$F6Hcu+*o
zhKSj>Up0J1a!vr!@qp0hJq@;hW#}^|{w<70VM?-6Hp|tmQdAoCY$7@^9n9uSg31jr
zH#mV(x_7R)+LZa2m{{%4L4*#A0;cx_WNx~@u|~@;Jt%!~==e_0qF^DX){J|Y(-twz
zxLa_qWIE~OVGG0h&7)@V3)6jMr*|R`d{!w>wnXK|1>6-o|CFfo`!nR=q2_%3zYvEJ
zFimPPcBF?cK}v`a@{VXax+VYf86>}{sqxO)2ljlq5?=gU{T}fcYeHnHwuYoVt_X)r
zHqGKC5C>dsuY5kio!r8)F+dfsEF5(vNU1f><#Z^TU;cWoRP%2%0u1C*=2wB=P0r!1
zcvdzqoP*!)&2uieTEp699v=#UD7RH+lV*z<^E7A2qdjMzDg5X1s{%B=JBmE(Js1OG
z7o(kz+3(u@Y2*P-XLdCj_TYC*rF}ws?Y-X|f};A-Q>UBi4w=j;H55v7B{NCOwdQ!QY%Bgg!d8xWZi2O?rYr&Uy5hSEe(VaA#a2j
zhK6XrtJKy8a(%cBFB0A`eeR_xvTR)e{hmx?Yu{(p8nIV~FLi|J)6tK*IVDEQn5ZS2
z#NP{94S6GJ=PgPY`en(Qf&D7aRi1k^hSG7qGyL(No_)UZRzB+ymG(QB*Bu?
z7D#dOK?u!QGsr?M!!{XyP{7=LknXcn2oXRJF6*r{SbHj?hj&00d5WnX(F0qXbKzGF
z&FoU+`3tD6s7ghxY3Mp!N>8kM!OGO$gC3p>uMxR!^w99d_>HBCK+F>TCUf2Z7F
z@wjjS}Pqq~+JNnk!1(xWqv8P@YlDv>L>vtk8QEL#MTt1x~b#I4ZVPHG7qW&Q5jX0&e
zM^oBGF?f+)OOnr<=L=Adq(!KXfXsuACMgk5t4<}SIU>C@()BPwI7k8tf*Iy}@
zc=sh=o1Du?kN6ki7Lk<#t?>cPhD>8Tb=%0kxz#9HpV%7bi(#&H7duXlp00o}@Y`Ykxy%c;T;GQMDir_K2)V6&Y*%N0*3OJrbs7@ut9v;j-Zuwp
z2FZB%x>Bw&he8S07S$)rY6}s&DWn(9Q8gVh$#y3sb#&`}And*0Nf`bMTSKE|?iQg_
z)e)@6_lAWf1(#sK-&&ulc>lDAg+$`V7`aO|5ZQ*+cwJqIl
zc89up4WABpz{9486_?}{taGX;3Q0ZWu9A3iB7(OzX5}>u+vC#c$Wgbpg&gZ&sI_Vl
z+4_`7=b};Jamg~TA9irh(q~3FXt-JEGpk6ZB9!vh$yXdL4>E7*qrj=jTAAth?;jl+
zHRUpCPYz$Rvl(kv)LG1$No|M=d*zf%*8
zYwIy_rMq;Z#ZLX*MrILRi5=uk#3@G;o>LN-tC9gObRr#Y?K8t=btJ1fPdS!v@2m%0
zh}OQ)JIm~wb9?N1OZ~W^$&Hxc=E1pUui1kk*n}ma%9@LMRd5jDj91~bjqefj!QfE$
zq3KG3vjuI9^?UABZ7%ICE{CREGS7=`IdX-*Cv}KyoV3BVQRE@i*?O#T~ew$4D}goT0&y_wWM`=zbqa
z6dA8O4Wn-1vW1*-Xhd4}x3+($b*%G(T0!4l^ei>H!%8h=5E-Ul$e8K)qOvWh&`lP_
zcEvd`ney#L=*uoctzt5QdG6GmYsEwJIf}g@)kejpGa^xLqs+DHTQ0Tp_fDAVyxz~g
z)!EOTh*Lk_f7iqr}iP
zz|bMm0>aSU-QCYH-K48&b?P}(Mk%Lr7*y0J{kdzkH+Qo#
zI~S3%R%l#L8L0)2ShN|v+wC|Mf@5RM0af{WRT}z3Fo(#)3u2R2+omSem1(*yMD_Sp
zoZg%5Ope3yLDeL&fR3K8?aA#;VcY7~CiYKOOqCv8YiPBY?Ps~K2{f0JV#M`L<7!)vHhZS2`ud{X^R!La4dv+kFTb5#DsXl_
zf@+=@&u(wCL8?-2-Y9GR>#EdCD0gA_lg*d0^gB!|#}B$A^0JOk7J3%DbQndU?^=21
zmw7oBzNjgr-1AvpjQh3l6q}7hCO?p+z%op4ZY$&U-_Z~G$#GS?Qv>{zQU!LK5uZz%5B-3*Uebi+2qDR^Yh^S`lnz%K}hWglZ;FvLK#wk;=Ql{0TW#I^%}^
z?aa{{vIkYRco}lmvhOE+(`b#8SNJjS&fmWICRh4GV2Q4%oUmz*u4rxNdm+v0sVrP8
znz3T3jTvSqnDywhIeZ~sq+o37xmm0{e$N}~ZyzlH_oE5)_66gm<#K~!2S|B7dlB6P
z{k;mX0aN)iRvPACYx2;F?R2a{c{AsxkesgC*Y~`N>QgaZHVrZQwwA4pT02W(xBb+-
zBIei{W0Q|nR9-1=y!%F3=ers^H>9^v0RU(*&~2|6Lzu-%@r|a4aA;SlIL^fF-@zt^
zmQHF^7wz+8eOiuTR)&MV7~PB3vV>Uq_nyq{$0cvKX!DCe9XLkYdOEV5^V1v;%_?=m
zr{bLbJY7_!Qaop}jjeF*eRm#ypn*_lO1q}!YN{y%hIK_}_q4$T^<;QNuGBqWZvpzb
zm~~c%Cg^=Cd?c~@RF4H*OeToIqCL?A{q#(1R;02yue)UiRfUME%!HxMtpbOM6YxOQ
zL!HZ&VCjMaMT_>+f{ZAe-lBA+tj_K9G`!W`ZLQw--D&2QZRG=5*2Dx{FxWxS;`QHL
zhbp5m
ziXvg{3St?>$tW2JAd*O9sg8G1JSAj%isGY-#k$G9g1CdUnNpR_wXzbTPdJ~Xbv0L%
zbGd$9?6T`gs|krvHwuf9RUJ6!JT7e=?6a0)t=!a~p0g?nIa~q>LsR^?NJEq}rObZj
zPWol1nJ}Sag}2Yy4sq(IXE?m1{n+UPw7lpb&^kP9ofup)vLE+vcB^i7&wGKj?Q<84
zYs)Re!VnSIx_xO%OpENf0=+0)rcq;p(b-?4Uf6ql*fF*8>WfOg
z|LB@eS)98p69jSHktr3yTxJX!OxU$VVA;A*n3~F0#S*W2*{bwHX38zChhJt<73y|z
zYG^ZwXx0|O!(WX*dSz(NegS?GB>{OJ_7b$Bd4bJ;r4%j0?eE2wrsyZlyJff9D?C*O
zvlkDhEHaV(S}0b58zf<299AgcE_ZQ|K8Nls!%EU6q-{BHVBt7%A8)UGGKIAoBMt^c
zfo4yoUppPVEaS_5>@V%jb#vNOMZ5jKY|?Olmrqqv2mQb=ea#$ozUxh9;MlOvjb7@7xGmIY?d^rO><6^A6l2j
zC0~`Q;KZuN^{CMbkPP-KVxG|f!^d8{hwW^)1k5Q8_gv{_Gd*tfZ*7+zT=1eEP#E5I
z2o|3hEb*VaV{L+SO&q+yv34A7W#U|e5i^`3x?)rDVXb&@r;n>B&RCeiPt;Kt(eJsN
zi69NXJ81uK8A8^pt=p8R%y_M#-C}SVAAGGlU8ll81Tr|#P;P$vG(Dp5#1m8J<+9ZB5v!BBc%HGe&3o}T-fZTy>J1`SK4U-cOx*BmO)$*~uJbhkWI&p99b
zX4XVNJJz3@+s0?lN@}=B*_C)s*7LYnLMe+~z$W_HEwwtx_b(b%*26%}phufq4tf-u
za_O=8CFjp)7VLnE64)v$zb}8Cpg$+BTbtnKV;M$K77Zjxl5=ci(7_%Mz>@Q6*
zc>TqwyOi0=#!Djib){bX9_KJ;b%OR4`3$1pRNeI-Kw4v;3dIz-2}@xX7bT{*9V(ST%K{$P9W&0@H;Sq`Ex@35weE-rx%g+a`T<^OX5|44x3h4wnsc
zyQ=8?QIyoN=Z#C~#B^->oiC5M1O+m(@Cl9hAGy1r$%A3A_4ZL8FB=}iV`;{&F33mQ
z40S^+|LAU0Qz`SZU83}Swrb?^qoHUSO6%OM+9bW&T5+aDP?(txzH1Uwf94w5$!-8I|wHwDj%uuzk|M+u#YTRUltfuEAA^g
zAQ#{whpi@h-^&=l(oT`n9u&udC=h2{U7mFN&|0(3W@R(*#q?#?K#$-!nm*r;6j*E2
zY08I0zR`uKSVA`IC@{E830#mxci)A^$%Ze$x8lucbo{|bEjaIv%$8-GTH)Zb+Hco`
zI~5Xkh4!Hj&_z)}R)K3_YllA~y6=7eSU~hdWA_whOTP9_MRj7g{Y{iZ1U1-s}~JI%G7gCkoj5Q9NthuwF+t{ZA^d+@pKyUZB)hza}1
ziL1h(rjEyVhTFc)(nc{*5AKeIt{M?_#q66Yu)R#yHE~FP
zIcfPD{@CHmt~7SvZ{w&6TMH6sfOk_E=`fUDp2ECDb$`
z1sys&0sHpV`P9Ljpl@1vzGX!bl}t7BT#_LF>_mc<@VI6_(1^7c!vkKO#;E7L6`R=W
z9J08l#Vyy+8?Kr}e7emp5$@70p1l0=T0n$#cePf-pipn-_wI?DFFBE)<`0}cZ!Kv%
z^3T~k{LIFbq0nhKnmzwvb%bFtzF4A^^Js)lEf=<2=SB5c*153U*xc4@i$o)bT61@<
zefdg~gJ{%o@xK0gxXfF9ErH{nT$0>z9D%iazW2?wB0mI>z7?P2$yFIR>B@`@|7^Yd
z)9nL?W4_@j5q6I;&Qa(kLy2};a9LNha?-RrG^R(Ct-x(Mjfs2KnHI(^@7y|hTdLk8
zKynard(pn1@|%_^i>v&R?#w6it(y@X0~yI)WxH9SxfnC@=#v%Cb*UfE6AcfqFa1*5
zZP7(>6akBQ597LB%0%Xc>(0Sy6bUuWaRM#YOm)jSCiIjbbksIQD?-W7TMj_Eg!eCxGjBfbXy`~U>77T|2|a{tcBac
z+1gVmL=9jnyLcVBhecEz$+a#WvjizbU{QD)+ZfLe#5%meBCF(cX(_+?vsj&1^;j(4
zpF77N?vmo(s2*(X>1GHri@e$;{-$WLBcxUWvCZu~UFx!=9-Y0KmSd=49blDtu)UrD
zy*lT(lWnC9-D$E330Ha;SsG`2$~XgCiNi-_xJ&MLaqV_Fhwl=xi=uc>m)sxVaOaVY
ztM583EJCSL?K{g?D+{&7#>?b)4x13>gGy|}P9pesZ^)FrpnP^LxUq$00}9vZ$vTzx
zurv$V!XW2?^mi!PIuA^`X!0%wKz%L8Y@p?K3$-USpzPi9tWz4p$6FEYzuuk3?uo?_
zeRAaLGi+E3h-HX`AHUxdi;ZiYlWY&lj9x|j{FQOl2HLJV+po&`Jm36O98<1L{e7lu
z+F-?CUS;c0^r*ALSmn3vxzOXo7DK5Xrd=cN5qFWHkQzD7&urYs22G`trxxm=X3L10
zt>^E{YK*vU_I2Lmu(qnUgbMuPxgk?IVDGRg9=l;t#q&a`Uhmtpfm@O*4JTEwYZZ!hlZp*#>Q2t&EE;S1uDL5eDoq8x
zSI%@$1Z~~+Sa9&Epq27=U{tfs4u*zXLkWEvqO%9*VG|tCXK<_Nx4M2UrJvbJdXDYu
zLNp&%mUm8GX5l;W?iuW%_*!oaPf66LaKLJayB=<=J``v`8}|mfU9$So(z!Q4gSqwv
z{G9+lwWXDtcjUG$B2xma+R0I9(IC{2cXes;<><*O!g*jI<&`Fc?b~EgXVr+uiwViz
zJjfUm6S#MNX*A$ub@{M==j$q5F?R+6(F3W2cTY$(qc^4w9fx`2#|>Itx_0|2HQ%<+
z6p!*R%oX^qm05P$e!)5!Ipyd7EMhJn@nK=7ghPx{R-D~3UyVPqw9TkvZ?Zpx)}xNo
zdt-xOwYpck&_61Az4nx8wfQ!@P3VT(miNE>@4%BQi#D8+lbnE
ztmR9`d3N;&7dh|$fU)G6lv%O|TwORiuy%myEKr9R6$(%D?7YkFWqiX;vAMl<>z3V2
z%;7>+@=7mQVWsjw$XP^aP>@}zr&4dI+_JoTQYsI`(}Nh$nOQI3ik!X^#_JGH_n`?Id9%dijCO&;+jafvDhPW_@s{U+y&&|q)(UPs}=RW
z5z<_+OK!rU=$SI_3^T%~y6?UIKHRzAMy_G6HCV=#v*Zhj7n^Tf`t1_o*iE$10)ZTI_9+)_5GP#(EWfy*o>@pi`YQ)QN=?K7W)1x?lKR)Hyx;}|3n9Avi`K-X%&n20E4T4zW*sGHCL#g&92!Y84|5DqV
zLAaXj-K>AydGS^w-A$a0pG|sL%b|>q+1kAPPk1%piXH)QZnv)RdYxC1KjB}-wkzf6
zcX;mb$cUH)_EZRTJ-i;=TDjtsiv%a
zC;elxJy>D;7XF^EmsZC$A@?6R6l0MDo6lIz>FlXc
zJt>WU`eE05?&I=`W{cOF#1he2BI6GFck4qcE1g)oPOKkX%SztA^be5JZ~oq-uoSRA
znfZCcwSRtc3C$VJ-QJZU&iD2F$X?6`daiAOR@<{PyNfrsY!=a0*e+q73P!VBxhEgR
zQio2!{O{8)p?T8+th_$sOLor6Kz}Vru0(HSXS8z>TdH9&KypuQ8i(Yg=9$I#0dvFP
z;jwxJe@R;DK*TQ-FYt&Ad=KM%^2w0FrT){`5a3(cedwYOTTwa|vi{l>?n(Y46a
z!)Fq%-uyOiNBQs8h10KZEjcOjMjXR{NP8|g?g7_Q!BG&=rC7R?Af+F8jodEoLp=`C
zoAd4NFTcJ*&TbJNy0PV1`i!xdaN#bd7uAr1jKfamuV{6fxH*{kk9fM1chXiBZL3E2
zUG0bbKl=Y=?{Gu`*?#h4M&UXIjsWnn1Ewc`&cTw*IJ|4L^F6Hap!#84Bf(Vsj_ubD
z&;GvXu+Ea8zE5UeYR7uRI<}22ScP2~t<(Spe`|cS8%y?=AA`dOJTwnBBLO`BIq>nt
z4?WfM=chgV#Sj!uk4dghJ3fF7BFYnU;3H-aNmY8EKnfyz;rbUWAJ97ka`-CoSe1YN
z`NX=RBq%$ak|mO1e<9xI>&t&JI==n`i+3LZ-}#`Uer
zGE-o-lyh+W8Hc>4Pq+sOfQJZe3DlfPCUEfFWq=Ka4X)ilyZ$53jm7^^tQSRuBq-cn
z@VgF=zX)GR$T;7}Kc9r3-bkX>%b92Y_Z-hP#KlhufNJK!MtSnN?4W$2cm)K5G)r9_
z?@(m+KfgpV*2IH!Ay9ShAOFQf+Q(PW|2-8AP!nzE((!-!B%ub78DA2yN|2g#p}7
z%{4?(;r*LesoB_+?lWsV0?@z>M|pC7CliW%qAXgN5hD}b)YcY`Wl{tHPcyb!FZMJA
zQHpEK%#7E1)iomkShjFm!zrL=f04pr)#$ef99;D4s$qL3M=KO@g&h)E)
zXo!)Nlr;OUskwPhUjzo@atm5C68)>QQxVyqsK2w+9~>Rcu)n{*!U2Y|F)_6|Q+r;m
zICr_uUM#@@5HyW*()U7KfGbs<&wS_+s^Za;|HRa^)sL8~8EjrL?GXk1#3O2_)D$8j
zb+~}_1|*iEZwM-y0os11fi8600HzRx7EH15!Yy*-i*S_zoAs@YlL?
z5QL=#!a?-(3{^UpA*WL8td6b%Q>#EA4H`r3ndIKZFrb-^@X6~B3?h60igzTNp}#K?
zM>7UDEI(gEm`y-n7(*oFhF{I^r}-aJ?@oXP@GgE|CNOcvT^7p>m|!BxX+FZJ65N9p
zr{dEi)eYiN$1(;Q5P>eSaNtd)tBK+P_p!`CJtI<07}*h
zVpv^}mSE-T<`&Rf1X={Z+WA0}YD8KI&@9}aum8DaY-wS^A}87%gMq0ScrXAqod}$E
zW~K~{%$G5jA{jZbC87v+Q~vLUbrjAB9RFSWJd>^Vh!zkKu-KY=q3UA;0QZ^s$zr`7
zD2?8U3d=0al*IypO8Wa@hArQof7a_vl4MR*S5%x};$4nVesRYx6S!t;!#4S$gyU0rbi;$V2)gmzS53W0&j?=Apw;
z(c!H|i>4qLE38IGfiaNB^~A}3^l*fh&EV%-h#3IqP})oPJlccy_JwRT@EQU|;8aAL
zTiQ_fLGc3Ppl38QCno~v8na(WgX(psD+5ir%V;s-K}I#LKtF9HfD-F;G)W$T#3j{%
z>RT%MbG`ro&ClJ#Gh$Ii0oLnT^pk_XZkPXU0YqtZMC4k=hVpd1S|*$TD4C@6bg8JQ
zs2^G|YYtkOFBAxfMD1-S#
z)(Yhg{blrTOgP@pG~RAbM`(v9OGlJa7y^n_Ep6cW=le|nLO(s$ea65SiDE%Dgr-hA
zzfai|z6+R7u{$A}sIk4U6P6mMAppebbb2@nV23y#?vyOafj?UU1HLxqi%VHp*1?Yw!Jv|;p>e}G=cowZ3
zE^0;ss^1^hL?wDGxtzfsjUR^SL=Hb#-vi?$2qI*&0ZV)`fw(T$CbhX!JR#($jncG}xQN4Y~*2wB{lSCz0lI{tEL
z&+TyFH#8$UUNcWWR8LEiBG+WFDWQ!~Vh{`*R<
zG6;Ll>LmAFXo{JT(gEhIZS1$ZjsP9eB5l{m>;NS4jfwPti_OMoN9C#IcJq(K5stn)
zvd`R5aFa04qKF&-OP&Ug9^9{KdqfQ5`+W_#&w(dAi=;b1PDuGz@5<=_9m-~{Z8I|l
z`wOa7^yG(q4O9XGq@=%p9Y^8?AiZ?F>o#i^m0MfSuZ?V{v(Li-em3fNc(7(3j4&eAC*0
zHVF`QvECGbG~F@q7DPFwzzH-!Z*SPT?%L+Np1PC(UFsS@I$pdc>aGwIf#C0p09b;S
z_ClWVN1?ExA(VgXmuSA(Ft2*Pp10F1-)NA|V}f4i6id{lp0Nc=xO2`XZ||V7R2}8>
zZ!}T@69j3H2v<{6QyM?C?0$pc1a?D}0Vr_2*bA3Yg>kPV{n5&GKXVBin+!Q!;E_Cat`^7=aUJWb?fRQ-y45)Q`Y
zY=UQ(gEUdkSl9|fC0m283K;Z84WOtpt|6x^4qwUT`jeT6E^BxCesI6=ZN=+IoX4u8
zmMRHxaW8%(Yz5Hhes}UU&L5}nm>o^e?v@{eBdWLV{u$@fvxgfQ+Q(8z&mO=;3!#<{
z|L;>s3I+)9GRn063@_?$0>ogE@LEY!0?U_{lk?|atG+U^`68624eE>9gm(7FkCy;7
z<>h>1?gClA2&<|_U|Ap`pJ|2mB_lZ?_y0=1UD!aiJz%urB0gOR4k_-VL3*>pBD5SN
zfr%cE;!lre>n7KKOyrVWIhVJ;Kj8vijG>1-#V`tf1O*{_X<6BH%{B7Sx7>_KtPd(7
zAt6G~I)dL^AfN#GUuopAA+%^t(_mzn(Uqoz#15Ms`Et=0`%BTg02@DiwRWD!04VT7
zD@yiG$ZEG#s`|Ee(OTjqeo=UW}AU^O4%SI^a{
zA%u@h{>O7PkU>>)ElLg)s53~S0q{2~$9Oa{+@?ZEpt?rNfKOTd`>L+ns(Q9@G(Yez
z@CR~#j|l*6`~Jo23pbF8b=-lo?e6YgaRxwepcSrw{ZLd^rbdqHga9sCL9abZ%IhDt
zA@Bu=Uh^pLLaUiZt}qZ>vU71s1DF}$MA+%A($dl?z!#T4TU%CeIm1zZMx29M@{?cs)+dufCzEcy2Sv#7uYjI&
z4PONmHoCx}!X64&{mpiUcPYl~#qP&lSgiEDxw$zUER8r&rwtM~ac9uHC6rDH3Eji9
z{CFo0iFV|2vKim9G2J5X7)y)FVSj~M?k7kgfUE&FZj1zy@DCQixi$lWqn`Pt`T3xq
zKYxZmtUx`pi2;HOWUkW;+B{iZrka|uST@<9LMeKB`YB+_Ymps)TF-+BO?U%y
zFW6iH3AlB>iV2Z~l$3hw#Dan?JzHQ{(DX-qt-#=5TtMCQG|etw1FvoZAUyZ5Y~;Q?
z7KwA(kc1Dr`PrRYtA9VmJOtblItu;^)S0_Z!e{ey`OVf`$BH8m=^p`ZL9hhX8A?cK
zL_|Elg0pi4uzIbEfv{5NrM!mgHgR3!XQc|%MdENr0BmPDzjh&()F=>kbS!f;H#5sV
z9O>(ObX#C6@b3Nl&43FN8FE-yJ*R!Tt=)n|90UC7jYRTeiF#i`%MELnKg=MI0Hg*I
zfEv8z7y1S8nG}?jr$_jJ@wmIrz>p+d0Ib84*o>R-R{;lK)!IjvV%DO~+4czXqp@pd
z%Wa{x205il>9!9vW?d1Ofj?GJ(!cwfzAAy>Z=+KR6;{7A9|G)J9mp>$_g*^=wWoh4|U{g^DqB}S^
zB&DQ?0kepyX=%kr>?X1M%WQMN@tZyj*@2Zh(5!wQ#~qV>K$D5(vI(a6YyJDczgHRf
z7BGu6L!lk7P}xPh8ptzbbF#4^oB?P#OXk7N7VVS#K2RJIHBc?r5||>j0sN#Bs~v+c%Qx%z
zs82WG&XUYpNy_0rH2jB}H{k$ljK^K}M`@@R#=QY#NsJ(nkjzX*Bp7N^VWF}?TMXUq
zux)&%Mu}o}`Gyn{CKqYeFvZ&g&*kCoPZZ%mo!2j!6E)LC@fdOm}hbAX`WAKqXE$~<79x4TrNxq=T99k+=bq_=1WA)cfrLJybQ!k?f_bC<)PXBL2nha
zOI6h>b>$xx;u6}6dqAz_^8#5vA?o{+%7uY3)Fwm4FYYs{QPk)>o))--BGwcl=y4l%
z{N$2>m7THx`;H|66b!k0^r!UvLsFEA998xuh#k$!P=_T7f+m$&U)GhqSm*`l4m}FZ1@`Y=1|40tX3M4cDgYL~|yLDb-{g1x_
z0{l>XAi&>%Ub=+3oc|yKu;t6`KVtbD{adK=2m+<)8yNKgDN
zOaZ0atOkyZXc?KC24(x(%)c=OV6|eNKKvc~7`Y7sGIt#@&%G;`*Y~dkhiFq=cGlqj
zCeutSb2mqDcH5zDaT51r?dL7==K%N!-WXi}TPwjaGQMF}NJz|5_E%AE_TSD(UU?fv
zlEK5sIwR2+VJ`y{74kmXXw`CRuifQ$B65x+
z_}8TjY>M>s^rf2`U$7cGeLVRZLo*+#j{B<@jc`6VCE(ArE^q{*48ITvrt!EymBAa5
zU}@Nvm93=G@sl)xm(+d-70?XnB{3UF$;UcYgU-8YUpSTMQ`&nPK8kjiK(`F`0&aG<
z^|Js%_!z*#>OQX#J!v1zJZZg+2J2o
z?c>oq5$%Q0&-Q+9vvsNKWWXt+X5EXe+|4Q`%_qUq`ooJXWhQbA(tPh`NFpoth+3x@
zBdoCBrp%Y#{-ZVl=mj8=m#go+M6*kzng8bSfWty()<$(WBh@5S!+z70b#{Q5nu*Ey
zUKvmw?3otV;<`kF&57YDDpbo6lp_`2bZFl)qi+-
z3;)xbHQ31;zfAQ59}}%Y4Su4zp9l;1?oq3d)Vuw;lW47_c65Fg75`lTJ2V%N3lbHV
zTS(QSQSKI&ku^{u8fQOs*!gXdH`I@imdkmKlGQ?^u5LBd79|<;ue?!xs2)FdoX(n!
z4|`u`E2Scr?X-ra{ch1o>va(=5CVR2<^|GFP~bKE${!j4=@Uk~u0|Kq$}kc<>$;xG
zY%)p-yU!TDII?>gGsTcfcy<2IU=sKeJNoypfte25^7)EY565#EOSk1pcCv4O?wu{`
z8^&8JSK}E0GRC#^Z#$~CJqvqDkh)jm)nzOjvre7a&jF|Kll3SXtcW2xEHfAR8=oIr
z{mCgne+y%7$(#UOdcD!Y{=_@JX#+prdeL6Xve#>?jEmz10S9n-FG;vdg;h>c;3_gv
zc=LVr2`N}J7Y65gXzMh=GMyeBep8f%`;Y7}a^L0GWny^1D@R?kR5&-AH}K=ahKYL{
zqTn31vlQ^sd%~dorOG%GVB_z1)~-6#!E3W#KcdmcSLgIE8M--UGh*${G}2@jX=u|Cb;Bd0o}fX@9zI)8YWb$
zXE&P);Z@p4CvMDsThjAOZ$6KtXh@YDK`W|5Jk1|n~
z`wC<}&i~=zT&F4)?*EdJb1gJEWOI-D)l7p`nBZCRW5Re-<|X@@=4f!O%5{3S2s
zf7)mjN+8iBY3ZRI5wgMgd6VsMo}B+$9R&rUx_W8cEOzH`|AR8Fs`uA{GYsqkb{gW~
zKu7{C@_luj?5YG6Nid-*vxMjP6sO5!*IT$9H&
z`w4j-2^a%l@Km)wQ}$8gUK8T#vEZMT0i-FS$VrvxWXqpQP{Dzu=UhQnB2eq??TUv7%NzIn#Ozy0$BfQ3aAW-f1cc6U5*W=QgUbbvwPE`XP0b
z`-xp7L$pB>&pJCB-}8ptYk%M-{2J@+Hh7mQr}=9|*%0~HI!bZsbqT`r
zC|CI54J*&GM}!upQ6Q`m_QJtIwDVVg=qFLBMnQb^%^wjnAro6@O;`pk
zjX!bI8c8WjP8cyAu3+-e@Sas0a5UXxj-J{&r&;XY^8Qc@FlkH62tZ0XP0Y#|yRjE{gz@phpF)eWGSLDM9f(g2q|5AQVzT=sL+H@8eK*eSR@=cOO4
zcFF%fvAY6y5>H{pm0RJ&jo~d84b7R~Bw1l^^EEhLhwJTiUNfmZD}=ds-DzxW4qY13
zm^XohtYqI>-#<=cvl9c7r*xFnw1J_K##e#Q-ZVN8h5}9w$yY{cdsi|?Bj0R_yZ$4P
z?s~YXi*Evjl$k>ecdO{HfikFvh{N65SxV-4Sl{V=)KCFEPyrQEFvxeZkInB|Jm65-
z9K~C?lW|LFwC52MX;yw!wxV&Au|8zdOkEq^eXoo(giWgQ@p1c9RK9(1+z0peHid{T
z8_rbGXT0AvU%T&9l3M3Qd4vkJrT$c`XsTf#97#BVlLwl(db;Q<8uLuI-@G&-Ng;bm
z%*04acg=P+*fUX_nvIVuu2ftbsSQ$r>#DvtDEW{<`ZpFplIclJnWBbyl&{2ve)Fk(
z?06nIa>olxT#05ObCM<-T2nH@vMv^$N%c@HaT$0`sKl;_Ml`l&Uoks~ozozkQ2sWF
zuX&4pTeXEw2j3NVa_aEAcDehuF_vgU0%Z
zyycCf8m?&_>B6hLd{p1Ov%gBr{cyi#P;pDsybl(mCfbt|IcyZij+wNd{Ls8$h;s%M
z3Fepe@hAcMF~)8f-axCR8{6JbDxYBOFEjnj-c1^|fr;k@<0C;9bO>+(u@?Q_EQeIB
z()irv=I}wu`MD<%m25lzT6I9;F>Vw<6y~*iPZYGWK;!v5v%awp;BA#6`3P9!P^oZ>
zn)%A+0XxoeNE2`uC8-lraoFmm3w#En^@yL|wQOm0>i8a4%V_I~)zhqXBlWt%D5LU&
zIhy^I6z#c#ye#H61H!ub6DzuAI$Q>5%*yRgqxgZ}Gmq3@(53YSkben>Q`ysD=03B?
z`e^^
z=&<%?Hb?DnIqdI4T$^}`!7uEZ`6d>>Vg%kQBn>+qI7ljYc=Hh}Q=MlTGkH>9X8cj)
zOHM7Zg}s9wziG6SwbpnH1&vSg(pX@XJbi-Ij4Z2B*V1L9{r$Gl7AVw7K~U}8=exV)
zANG0|*{VPh_!3QpEa1CcUZi49_;;9v9iT!91l_~qIQM%z;~V|!D_9{*R_d&a4oxkM
zgq646O{HE}!%6t2m+5pc;5Y>g9&E-6GnWiFIbA+sSV3G8c4Gc|r=z+b;-%WW%XzG5
zJg`Wm-TNt4`-%9BanU<}eWH-qyJ41;Aili)w_HocV#9mEPgWKenp7pF4KvD1bdyce
zhiC#!j{S~aoht5YTb_Q;qKi&FZHHTo%4#xgsBH7vTeNQ~cB@dD324?8aQGrR*>pk~VS2YCNm{D{}Ko%fVMnz4YZRlgktI`)VU?ih$FX{t|L>$AoG^Mn$VN
zbD&5yn~R}$9AHUUF+V(fmG}9`i{rDN^f9whFW)wVZ&6b{{~%io@b;oi$_T>7Wg|Sc
z_?ve0CDaYPe?E$NWbg2TUQ{~~@XdeSu$o(~G@o~fC0;0MIa-HJ?UcaZ*ejg=A`7i(
zc$vVSqjq9ab2#k{KNZ*Nu&4_k--m-nI7gzw#GPNaTW-LXPbme6G)m1D9;&k&Rq9>c
zbGmD8X`OwdhQqFE{-xNttu^1S^IpD1JFj}4W9(7kt8!+sA*nV42!*!Swb
zxt`>S1FWS5=%(7m_70?Og54;9vzPU9bhgy<+Yin+n$gf43`>q4N7xF5yI~DF2y#uZ+jp$z~e+qSs?@`Oj
zz5NN=+Z*E8Hb;^$d6QN;uxf`#{GAHEu%WWPw#wn#L$>&37`%w>@9nLnWNoIp*g*PYbst?VWnbrz{ad4{ha(IhfnwuU-GB^Ix9uFf4-l)&H7ZuU!iIjBq=ehRF)n4
z3ZLG(np`L1XTmplF$+?=@K)9>(
z0xTswgz-wuu563#`f?q2RsGq?#5(cjgS1y&w65q^b&M)KAk=1+GmCte<=cr>VRNfv
zLaEx!Ah)_8>m!F^Z~Z~v#BC{Q@o0YdICa0JUX>QraY?z0U6|9c4@=9ZpWpUK&?}x&
zTt)|RIL_lo8R_>+eYT1f04-c+>_|YkR;@$eG~=?@Q)y5zL^aDur{Pv4}fY=2gzL{1I=qsVfL?de;0{Y$RQsww+Xs-qx>*jzbDhse^M}{xx#^0~F9K{C*$)Dws0A
zOb}&}urlIl6le{5ZL4RVIrtTl*VU4j?NFiCG1=_50pI$Vt~IoCzpsE#1yKR=VXF{s
zE?k=P{qD_LsN*5iNSLT@H&5iWQRjTAi`k|^Z9n{Iw+NbCn%JX^+uB=}M(ayJo46sR
zZ?gWTslC~nT6y_BeZL*PZKcLS+0xZ(hxv?y%-9YCAnvejuj#Qo(R?u5^U94LE`6yJ
zyZxbpfJTpeo(bamV7MRPBaHC{G()^pimUeyXGdsY^)j}n6&mSRRs*u!j^kh)Fu~Hf
zE1kav9#yGtXPTrAGoLz6i*CRz1)12eXzPkZNwD3i?x?9$Xq9f)2iofzN=x42x=qzo
zaLbrr(x+igR#;cBH}5!Vyg)shjK^GImDtBmtCx7lhJD3$@qref_1BYnC^gLOSm^j5
z?X!N%*Vv8buhSb%O}0&;9zMl{`ApI46|E}uHff>L0XPiXrqd=yO_Vyb31Mv~jUHuF
z&>SJ=(eC^@JRpQK(PTSIL{J3U0q!iGdE49*q>aQ!GsAw~ims*t(~9fzO{+B(FF*%H
zv`lRog=UVc`^1n{*$xi)3Y4e16Y_q{5NcW~<0!yDs
z|BrBMKF1}i$YkvYtp`KN5a!B4%dn-?QI^wPpKH@Q?|NGdp~Fm+tT(Qe?9Q1QBnP-w
zZW7u{NRZius;S%E++308T$0(p>+fW`y*o9&SV==o7%C*Q
z)StXt`(SNgJj?;_b7M>%o9E;ji6sHXlKGDPq+wI#!2A61zM@xHcA)BGOeSTs(@~ev
zp($-?a{?%T_AS$UM!?dU+N(CBvZ)Wj_B!LE4Xv%di;G1~g@*)DhDbM0=Xb7`&L~APa73-#CS!r7Cto+p!<*|i;RA2xi%Mg{ZHk&YW_bHu
zw-QLWRj3!YANE8Iv~F~*;7`BDGYi^P9b&Y1kLj~sAra5VEdnp>Hrf{QPxCbtm0798!W89DUCslh3-Lnc7MVs5YG0$;tS$p;fVF&e?wdue0z
z^XamUWqpp#=?kPQ0Vf+4aM-ocGB7L8>xh@eX_RXZ1{*@=pH$=->}Piq>70fijg=mH
z6;e%7-}Hh%e9VYeL{bxept)jD7zCP$4tGGK+-%VZG!h-QfwOQLxGg%7%Zw}&(f;!qXG?T9WC%NY-R}L`FS=Yqq$yYj(u6c8B&=x2{}`Z!3+&~G
z6CLc-HzZZUB;QBPub8dvM>!iNx0@XQVE2fA5*@Hf+BgGM=q)4xzg(GVn#s{>5vt5{
z{%FEiE=@MF!LUDkqB^CWeMB6o^;mM2c?ZAfAYfc!@Ws9tkWWZiQHgxw;8VW3K^=VW
z=Ta4>5bYN?eP@to_VAPAGHaG*&&0v0O0Ftqq0;e1#i%>c7T(uDw_4=k$36PdRpPym
zDElIHXges%ApMfP@|sgp^yWN8V3*XY(MtZqkav-Ff{hRI2+^M0>w~u77VKCRNBf#o
zXn+G3OQcL$qZxQYEVn!89E2w$rVE*$o1KdUHz2fRS{Dvctaj4F$
zLR)q6^59JU>ULQ*!Yij#^khaL{$0Oq_p6Xb{oKbru5es5Mdr4W^#~owp_lg
znTznaZ>w!hThk`J<_m{|0@v|A3sG^&fEhVRtvBQu$SiDIZ(9p7m2IA?ADhm9vT9nk
zG!I%Y-C@Wx<9y+__?EHonyc@Y?c$ByH`akdKKfOo
zDbf%6WSt7SaB>%(;z>2k2MdT#V`sg2e%6yA!3abs-W=LzDA@$}FPB}~=}$hTI;}V+
zZ0hWEk1w4uD=|9g4krBhDSBJk6N5(NwsDPoS>VXN!~ufSn{7on&aYc9rV%Wf8I1g$P_l%@F(gM0z*0dr@{@anrL#>Tlxo9
zr`AigIGH#wOi1g3_9@wG^QBvw$$WX9nu@>g<6YU|GLB;o6Ok6UoIwzp9M6)Klr?;l
zCnl@(I91JlHaO<>u@kEeciHzmUB)p)J7ce02cuHaxmbjn3RWa>}Ny^rj0iIAFjSSEXuxVTaX4x73nVN?v_?SP`ZR=SGv1F
zMWmMQl2i%lE|rd@m!*4w1(sf#Z@us5dB6Aj{$UTUJ&tQHmie7CbIv(4dhD*gB`n|7
z>Q^9UymV(hrgy(8T3af;f5=o{OX902y_Oxl^gj0<@xRpH6ndojWFY6nnMwa5&O@ZYAnQ&&i2TBggVgWRYkY3$zR;*
z{tH4ESQA0$U#vK>d4#vpr1Mhxd~OwM&PeX!`EEit?9YjKo3W0CPc|TRL;wNOcOLgy6iH6R5gC6F_Z$7*Z
zw1yYvq^)bJ>NByXUbwlJhw?%7GY!9rzw=c0GIh#6{o?#j?U+avU9_-Dp8Y$B0loLb
z{MGg5z#=3%rKtt9YF_!l>5Crg?avb-j}2wRg~}fsDs^ppI-Bp+f?oFCsGd>7zQK)+
zm+&$Q*wpO4F$sKg4BtSsb4+rprRAhGt4Keygz0Yhegb7izATZ*OilM;<4E6$^Q*fx8jd%I!nWfw
z4&>=QWBqU?tjzE>fE)Jdum5}}dSpnLgXe3~TN|WWP@Egei(!*z=qMV~81JYq`#|-t
zCXDqby2gW*E7S6~KuS~`y7aKm94%{_9z9a?mA%z1omrJ?mGEMJ~cT)VTkf&$nH`ntFmcVjz^Fh4IdFV8Q|
zRY)~p>n8xT&^({}<(jDDst&o>m#EVmOPBNJY3x(>)rmjO2+sEfQKqDT2;8O5q9)Q&78_lfl6w+ntXD!67|I
z8Ui28Pu@SO8OU6unPD6)xiVMwW%d4B#X&EQjRH5SFxt%d7n=xQl?MVEfZLOLhWRd3
z;g#=r)La~VzXTI}y?VKu8!6`Ql4%%tHT~ouF;_jNc1b@gcl>X0FAd#8bdq@C`w#P)
zjrl0&)v@;(j^T`B@AkI=Np)2(Rg7z^PS-UVL+!n^)?B)YW7&p=Tyx!@`IKmE?@f!D
zj>eO%X@BSP2BTpL>Fpr@icU1a%Kwcmg#)PglI_5Hv+~nJa=B``ch<3?OMjvI8_Sr7
z^@u9J7H!dJQQo_vAgWx^-#_CVEqzP^q)vr3s|Y(xGi*)l31&w;&SA5IFnW^k(vpY&
z+R`gD`T1YviZ!F|Jd81Rkyoh2wFoOH>=|u>@}DGyg4D$4!+hYG18?*sQM>+@hStmc
zd2tN%e1W)Yy7*y!ub_pApVHrh_FD<(Yt-$&aj$5dM4L+>@em)iMyB%rqmupG=B1ib
zziK8tKJ&BwY~{M<)w*fS(xa5n+edb64XtNe!yS;P%3eI@|Fp{e@8)*}J-pe-)>miJ
zJ#yPA4qcO)&12||c3Xj-tDHOI9#$bk1x!>o)uXDC`PU6y_LY_E>RPYD+Jr~%Q;
z88Cwg(w)w>E|$8OQF7vv7j-EuL*S@m{?d&Ko3C`rj1&HCwJ3k
zIsNn8d}xgvyl4JM`>z}0!fs(+tT~z`9v}kre_r@nM9UH()v4g8m_g{<@wQzXsv7$1nPDyZwp^sAxMt_J4m$JTh4I~k
zT9jQr*4-jHoiw{e>GO%}45^|xuI7|RD`(~qrw$w|eLmJ*kAGmP%s3|%BLSaf~S
zK)1veNo`+LUpvyy@eHrO>_(`O_zGQXAwR_)ZRMZu8gxefkmWQbc1kOZ^_r-BLLzge
zGsZ9#6q~fOnN-XZ@*%)kR~qoY8}ans{H=v(%l!W9XGYmcCxi;US9=B@%=J7)3=
z@lc;_AsK!Bl-GG0Xu&`451FsxCS(7Yp`LPnaqZh&b5!Eq_=*jRD}~j2W-sMQz|6q2
zN4`s5mlb;1roI}SPiM_K94gw{9nJ<;0QYnT(}qL65vKLKFG|c_3#wJ-HK;!QkamviUnKvvo4>{*po=~$3MBM@F^P+Rtg*qmWvSTNIJj==
z@lUJ9iYhy{>Amd#BGzAB?)Kh6VqWK2^3bl#{GF1C+n*YGhM*I}<22|ZOOK=qX#O-C
zGQTWX
z+Be(X+nI=YpI3xPd~6e~rqv9jA!#s)f1+FIP?^+TpE6N-PdEDt4#x=x9p
zW;E|Iu%qKx>c2;hl<=>JM0X3lJpS(yWKHiHNNhm+YR9eZ|BT2v;JHSyyy$dd$T^0=l&*$Vk17$T1Fk+5GNm
zT8O!^AG>$glxE}1X{cB+LT9GMtK!(Op^b}T=(*YX_CU@KiEIHqC8_KL|E#2Pe530`
zAlFz;ivhb&8nCmY!6}1q%7oMlHku+7FziW5ia?Kf?U2C8(wBxV%~flnx#9(rwe##n
zq}?<}%|tCA&VN}3F~{^s$P|bacF6PDOm$*?`aVb6xnx@rGPif*>wT-`o_~-}=-J|B
zspIGbZ?eUN?}X?W>a=KXy&axe*@5{qSVm;?I|jauI6dQYfDv6~wnM6(SPl|uZ8pHf
zqi!4j?&z)Bq}Zp@$g=`4WeSI9hNs2=TFU={oK&YwaUr
zI&??J>ozaH0xO1j40%^b`pr(XkzekeG59#doy!^*C{7vMlnks7<+&C6@B0lst?$}g
zsD4;0gPMM;X}ieusEXbN&4%!Z+aFG4dCUBOgug8$7y`Ue{PwrlW8NtxI(d>4VY=YW
zvDDib{a64+Db71R3=?fh?&})1j7F(=-ue$^2HNxyNE%tPz|pzy^W6W<$D1kUPyo(EP1f4z8L{4{F5|J++9{zfkz
z*0>zKe_z~juV?Nn|JV8ztpED@VZTD?|3;C24na()d0PXc{<29*18@t?YfzEy)ph%#
zw-6r{aRWT2i6!UE5WT2hy~RRaQyWPb-Q7fULOdw?AaStg;uN+}i+V)PmCik|Sh*@w
zz_7u<37VLTyQtM?LSrsp!9PR1E0y>3v~CNUc+b#s9EZ`ZjS9cxULov{tn{stIsFyi
zGk9zwP*HiAPH1*YH=P^egfJzSBYRI%T&z}=)o@r>Z7k1`q7ANTBjXUZCJ~(>+BRHv
zy9|DZnOZR~KWj&oP<+V(NCpas`P6->$g$tF=G~~%d3XwGKdA|{T5^iuBa@z8qMfn2
z%bGW6S|{3)HJ6p;d$1%wn?cqOb~E|aAW^CiS1;1P`iYwIed8@G#AC>Fe~$do7z8TR
zd^^81_wX3^(J@MNXbas#q;h1WK?3YwwVBqN44kqB&xx5|El*NV58oeG^@P6|I49%lRzL
z)6OI5uIWH>?+I)sm#4({yO3>N)7vR3LMQ^3~g
zU>;7&pjSnPW`)5C
z5*EE`@@RhG5R*V6MWw6IQ3lUnOnu$J>ZHaq_?lDn`UxuP-g<_G$CgiH!veTk+
z>ra=kR}KvgUvJ|MhEnZ!-y1|qp)w64D#Rge2ACF|0fD=nd$NQaqm#p^2^I&wYRA
zJO3VG@`)4ApZojf2EmQvPR>E~^+EyDxsOV7D4filu3F1q5sSWYz(0s}hu+KsUapo!%jIJqH@-P=
z4GZLsU+9E6G<^C|q3mdBda4RW^Jqi7!j-N0otxg!JrCoF1{c%F*}JE0V1tUyj{SSe
z9Dlh;W`FIu`RfDfW-whf?>~ol5Wnp@?d9O39E|^^z9f|mGZ|TfRoYVSW5$}Nqjpq&
zquCmCp-+`Pu1vy4Y^;ZVkuQdBBBn4$G~g^Cwhr~4U6YmBqN;K}C4n0qi}`eiF9cI6
z>d2;=ht3rvk*$pWy9kiKBimE8jqL02R@-vN3DH9yt%4rDq8*jzdFMkjcQtw`snyBe
zUme#o;=0k@>fmh5Sn3xwqQVt0WGEW0b41@;V+;za6qA5({E3U03t#B5_BP*&D%6mnq-X{#m~4mI`ml3=^QFx(3K;?ch#ku{pX>#D@RRi2
zM%#RVp-Z%^B?(5284y{iwF||Gn~k&DT*u=l*%PBI4w>s|uwnaE<(0|x`PUp5
zp^=~Ae(w&kij%2a->EC_#X-BF-krRbMrY-Kf?(H>^19uUwq~R(#Ot$@?r|1WEKV3G
zY08|NkvUJs*p*K-!$aI_^1ARs8E}GvUNrth+yv`SHg`I0dw&UK&d1}#w4uaclj%EX
zhO!u9J;qQ`Ak+Q#1@9rICqYo4CpT?$pUh|=sP>wVK9vWsDIgW>f44y&=K}Zd)`p7B
z4B+--N_ecdx&85X@p)Ux!hBzsz+x(G)6A1A!60PS{(FskmwU6~7Ku-%-#ynWiC_)*
z#|0R9waBs7|qB}
z)87kv9Qr1z_pInQX&Ur-)zHhSd7>F1^2t{@uPPxAk`fqR
zZ0#Gzk#{T#8S6<aixpGBj78uTPiVO>4E{a=^s#=y+ugp%5CsubPRzE61ytZCD&6z08i$8+ksH@N
zBfaJ>;XA9>5{}>(0C)7(JwnR
zpA^U8kophEFj!sA{bo<$KJ}Fa>k(M`97L;6$wb)kC`5SEx0h(zH>mBCTMO!!;ybJG
z%zrEcZ6IR|SjmTbB=#+_|Gkor7h{BVL2mhlNQ4G9O4M{yw*anD4VU$m!sAOzW1Ot`
zH5I<*9n%KU8cM~KfGN3Ix8yu1R^oVq&>Y85R1+vZD^d8wUqFIe-YF+Mzm@EEwEJ*PL>F`7>%E+Pp
zaTjB3!fMa;H^0
z|A4xiRYJb;246pIT5QPb5zff#0UMf`VycfM>I>Tad)L&U`Bm|%-^t&L-EJy=*W2!&
zZp!BNsW-_<+|!@rcHbVg)qWC0bc~|6985}78rh(a%KCSV&Wh|t5805(K;lKuz^nxR
zMx<6b+xHSQ#QcLar+;zgIy&3A^uJd)F=k_pILsCs3
zw>Pt8*R3Vw?A{XCE|`_1s8P=-$<4@ZA4C*k@!wehbf;~lm6;9000ZpWNGPh!L1^F9
zEvK+HFO1bOt@mYFB$Fg{G@?$F+3}F%b#O0H`W(X87#+m^
zfGw+PXg!)w|leQS)h~47_NGx@h`iOF$aGacN^Dj$qjOmRYM&lKc#CM;Kc
z&Z&GZSR>bvw}^23+E=#|qE?z9&`vYx!#VB2_WrA936Y_&obF#U5z@<0vSNvHB>c0J8N8wIigU+a`drQ{_-G#
z+eEsh*8Ae+x|aPsOoNpN8y4-6N6Z4tMt~fkT#;scR&j|R%MM`6Xw*S{-Jq`q^hbA!J;nq`G(+D
zvL^g`Y=`8CSY$*Ki!ohbVVmiEEdNj>%4N#He!
zMhv~1`BxDwpG|y8IV+lXUWdJlCd|iNb9r%irSO(Ii!AWl&38W+;!V10q8iu?0qMrT
z(LTiYcJt!BsyD}oMSM2G!|_OVS}tqOmR8(*c-F6@gQZFKd_A0`%kC7{C@<8bL!0|w
zkmSS+V#@yjvc%hQ|2Gl~!%z#QQcu%GM$~IPe?|x4v~Js}j>Dt7A_QZLKdiS=fAQ(J
zx}Ly~eJp$UPkJ4S>}Tg6V}yXs-(Y2Lg+~DNM?DjH<8E#(r){(y?q-!tYgq~30?Akv
zwQx0`(%yJ#4|8W?!{02APcB_c#*{6R#03K_{CCo=cJ2BLL$7Y&t6=(9F%1a+&2yqGt5QnooY7eNR`9t3Kfh|PBS@NlSg1MR+hcMDP
z>^*aaMk}%8cT3tUquh4H>(lc2p6>nld^!%8Mh17o+w@!zZI3NjrBJ3SC3tOllm<+2
zdprj@%c$t<*pm#=eZJdbJJJ@f5`wF0b>G9%%QRBI?Kmd@aw4gjbe4StYxE!leZ89?
z6OQ)UQx81f1k}yXBe}t9&Q2cCd4NKXKDhhDE7sbQKD)EKKc{+&a-S9ORD9GNnq!9r
zdE6IYUnECc>Md)z{HLIrTPTm}l@5PEQ`VlRBRD0S$p>O$D^wxbN)9V)yZiJdqEI8}
z4LU9*3lEi+C~dV65T1x&^5;ZQJ4e6BD&Ns)b~_)k8?LkI-K-s=-&Zk^g4Rw3Is={t
z;D#^l)v~6!W$xmqk$pbB_
zfNMkLIw(+Jz#D^&yezUY)H!pPddq@H%EMF%CYEcfX`aCpWp$uDeom%)_B{FoW*Xep=w$Af%H}TNA
zq^CMD>6keCOUwuO(Cs{ws#+j?W>UgfRWK%yAl%M&Bt9;F0OCq%%Ihk&{X?%J&dj
zeWLQ8+Jc&6TSb7Y#Sbs8|CAp!#nB2Q4o!wmT#BDa((fk70%GeOuWg8o-W`6fzwD`f
zIP;rLby%w4+wZBbCCgvBw$(x*LJ3VzO-@KZEV{{gzm9hSebQ-Cm(#QprpcvJAF1H|
zG%V?LE0Zg^D3tfa7kB}?Lv0p|LbR+I&Z^}*k2SEqHWh>XLTn%D$XyGDet{w=*h*i~
zkTBtWyoC!hJ1|N)J^fq;B0c?+&+ZUt!G+b_bKZG9-Wh-=jUTE-Uc7jh!Q+RK_A`jYP?Wp%4PwrV-HA{O}aD0g5Oh;zRonR^-NG@0iYXdv-%
z&sNSxN;sZUCo|q8XMjm`z~}3JDeBqUlpIt@H?*Ga_gUjJy-3y7!7UhZcjHXihjC~+
zPxzNllj*l3`{DH*X)+rF*)e}EPeF9LVX$CeRcQm~z^pY^v_DOs5_PUdd+erCxG1nb
zfac-$)PM8fusg<;`-7Rh8Bd!3CdQ0dSljRYkz*Y0kG&T5qDAPehdb7X=!KKEpXKLt
z`e|5vWZj@Nl%IFuAYpkEQggyv;0YF$5^9p4>9z@?F7*h_@Q{aH3SV9=cCJMY!@c(q
zxN36Bi;t(cs~@dqWf%lXI`jss`_9=sulfNt^g(C(<|w7H`m|AIkJImk+IcqOQdF3j
zk49o^Na{g*%~rq-W!I_0K-1_yMp=aTW10Z%K>xs2H(iT}lhvoASxKRF2;3{Vfc39a
zp+%($JZ$5yYL@JV+0*qUe!l?R2QK3RLp+<@+aytadp4%)Q6VHKNGtCAD<3giyb=5i
zmE+S1wT*zh4r^LB&ypH3<^d6@lJ_!qIpC4`hddI!6>r?*dr#HW@y5uulY6sbTC&4x
z#%gtaV!dj321XpCz<4?_kK;5Pl0^{FV2Gyol>hH%j4Z5g^Xk(L93<5aeg5
zfWdZt(f9|^ByV%u%)cU4`8RE_A;ge#k*wX>ehI&QPJVwWEN5dZid9K5i{g_3`LWc+p=U;@ttHg!$o9GHfSE_;LTGlcz2x4
zXW?x$5qvz{XblUJcWp48H{w`leg;5Pss;9!G3A`MDUWbvhJ`jr6UulW===||hJ`69
z;6uWc4Cded%iL^Jge5^osqKulRI7VwSXOG8QvPg%W+WB@-8t+ja)tQkSwkj0}F?JL#OO69ow(kAT=e((bCO$zm)lIN16
zJ5ygb3qDcYZDy>3!q>-af=G*U&sx}n^k#BwW^$SHN|sAjsUDa!LpNrOB8dNooXJ0
zzI#s%>c-O5WdWLTHL+d}nI)P6bY+bMH=c~KFpl5cKq?WI@57`i$0Eqf$^*l(rQG5Cny>*#O%T=;6O%H~1N-$!dE9qg
z_Vs`8kcJUk$k`W_Hu2@K;J!AU1ojJZe=#my=(QVgeZNt>+284Om!NC^SvAUJX8L4w
zP%6CtUcVR=%gR+)dpTiuz2m4~s;m}TPp)Ax!KtODHZ*L+#v-78^z8SuE`=5T*?z34-ybu-n-N&O*uAyK$Q-ZjyN
z@7Kx1iN%=-*O3?xfSC9%45VjG(0wffqfLFAVLoKCxxdffqP)M*Z0fHPxXl
zeQDhj#LsUCySqDAm@WG&5rDUGyx;Bc6ME|@=O4>|d4wynm7oNatlT=df8#_uf6tw-
zbXX=9RUNoMs9fFtMs&M{pVD>