From 7b6c296b496d5372b4294e4b1037ccba00ed3161 Mon Sep 17 00:00:00 2001 From: songw Date: Fri, 16 Jun 2023 15:24:12 +0800 Subject: [PATCH] music-gen --- .gitea/workflows/build.yaml | 47 ++ Dockerfile | 12 + app.py | 304 +++++++ assets/bach.mp3 | Bin 0 -> 160496 bytes assets/bolero_ravel.mp3 | Bin 0 -> 161280 bytes audiocraft/__init__.py | 10 + audiocraft/data/__init__.py | 8 + audiocraft/data/audio.py | 215 +++++ audiocraft/data/audio_dataset.py | 525 ++++++++++++ audiocraft/data/audio_utils.py | 174 ++++ audiocraft/data/zip.py | 74 ++ audiocraft/models/__init__.py | 10 + audiocraft/models/builders.py | 218 +++++ audiocraft/models/encodec.py | 302 +++++++ audiocraft/models/lm.py | 527 ++++++++++++ audiocraft/models/loaders.py | 90 +++ audiocraft/models/musicgen.py | 361 +++++++++ audiocraft/modules/__init__.py | 20 + audiocraft/modules/activations.py | 96 +++ audiocraft/modules/codebooks_patterns.py | 539 ++++++++++++ audiocraft/modules/conditioners.py | 990 +++++++++++++++++++++++ audiocraft/modules/conv.py | 245 ++++++ audiocraft/modules/lstm.py | 25 + audiocraft/modules/rope.py | 124 +++ audiocraft/modules/seanet.py | 258 ++++++ audiocraft/modules/streaming.py | 135 ++++ audiocraft/modules/transformer.py | 747 +++++++++++++++++ audiocraft/py.typed | 0 audiocraft/quantization/__init__.py | 9 + audiocraft/quantization/base.py | 107 +++ audiocraft/quantization/core_vq.py | 400 +++++++++ audiocraft/quantization/vq.py | 116 +++ audiocraft/utils/__init__.py | 5 + audiocraft/utils/autocast.py | 40 + audiocraft/utils/export.py | 56 ++ audiocraft/utils/notebook.py | 32 + audiocraft/utils/utils.py | 234 ++++++ requirements.txt | 20 + 38 files changed, 7075 insertions(+) create mode 100644 .gitea/workflows/build.yaml create mode 100644 Dockerfile create mode 100644 app.py create mode 100644 assets/bach.mp3 create mode 100644 assets/bolero_ravel.mp3 create mode 100644 audiocraft/__init__.py create mode 100644 audiocraft/data/__init__.py create mode 100644 audiocraft/data/audio.py create mode 100644 audiocraft/data/audio_dataset.py create mode 100644 audiocraft/data/audio_utils.py create mode 100644 audiocraft/data/zip.py create mode 100644 audiocraft/models/__init__.py create mode 100644 audiocraft/models/builders.py create mode 100644 audiocraft/models/encodec.py create mode 100644 audiocraft/models/lm.py create mode 100644 audiocraft/models/loaders.py create mode 100644 audiocraft/models/musicgen.py create mode 100644 audiocraft/modules/__init__.py create mode 100644 audiocraft/modules/activations.py create mode 100644 audiocraft/modules/codebooks_patterns.py create mode 100644 audiocraft/modules/conditioners.py create mode 100644 audiocraft/modules/conv.py create mode 100644 audiocraft/modules/lstm.py create mode 100644 audiocraft/modules/rope.py create mode 100644 audiocraft/modules/seanet.py create mode 100644 audiocraft/modules/streaming.py create mode 100644 audiocraft/modules/transformer.py create mode 100644 audiocraft/py.typed create mode 100644 audiocraft/quantization/__init__.py create mode 100644 audiocraft/quantization/base.py create mode 100644 audiocraft/quantization/core_vq.py create mode 100644 audiocraft/quantization/vq.py create mode 100644 audiocraft/utils/__init__.py create mode 100644 audiocraft/utils/autocast.py create mode 100644 audiocraft/utils/export.py create mode 100644 audiocraft/utils/notebook.py create mode 100644 audiocraft/utils/utils.py create mode 100644 requirements.txt diff --git a/.gitea/workflows/build.yaml b/.gitea/workflows/build.yaml new file mode 100644 index 0000000..ba0d002 --- /dev/null +++ b/.gitea/workflows/build.yaml @@ -0,0 +1,47 @@ +name: Build +run-name: ${{ github.actor }} is upgrade release πŸš€ +on: [push] +env: + REPOSITORY: ${{ github.repository }} + COMMIT_ID: ${{ github.sha }} +jobs: + Build-Deploy-Actions: + runs-on: ubuntu-latest + steps: + - run: echo "πŸŽ‰ The job was automatically triggered by a ${{ github.event_name }} event." + - run: echo "🐧 This job is now running on a ${{ runner.os }} server hosted by Gitea!" + - run: echo "πŸ”Ž The name of your branch is ${{ github.ref }} and your repository is ${{ github.repository }}." + - name: Check out repository code + uses: actions/checkout@v3 + - + name: Setup Git LFS + run: | + git lfs install + git lfs fetch + git lfs checkout + - name: List files in the repository + run: | + ls ${{ github.workspace }} + - + name: Docker Image Info + id: image-info + run: | + echo "::set-output name=image_name::$(echo $REPOSITORY | tr '[:upper:]' '[:lower:]')" + echo "::set-output name=image_tag::${COMMIT_ID:0:10}" + - + name: Login to Docker Hub + uses: docker/login-action@v2 + with: + registry: artifacts.iflytek.com + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + - + name: Build and push + run: | + docker version + docker buildx build -t artifacts.iflytek.com/docker-private/atp/${{ steps.image-info.outputs.image_name }}:${{ steps.image-info.outputs.image_tag }} . --file ${{ github.workspace }}/Dockerfile --load + docker push artifacts.iflytek.com/docker-private/atp/${{ steps.image-info.outputs.image_name }}:${{ steps.image-info.outputs.image_tag }} + docker rmi artifacts.iflytek.com/docker-private/atp/${{ steps.image-info.outputs.image_name }}:${{ steps.image-info.outputs.image_tag }} + - run: echo "🍏 This job's status is ${{ job.status }}." diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..5943d24 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,12 @@ +FROM python:3.8.13 + +WORKDIR /app + +COPY . /app + +RUN pip config set global.index-url https://pypi.mirrors.ustc.edu.cn/simple +RUN apt -y update && apt -y upgrade +RUN apt -y install ffmpeg +RUN pip install -r requirements.txt + +CMD ["python", "app.py"] diff --git a/app.py b/app.py new file mode 100644 index 0000000..8ee7f22 --- /dev/null +++ b/app.py @@ -0,0 +1,304 @@ +import argparse +from concurrent.futures import ProcessPoolExecutor +import os +import subprocess as sp +from tempfile import NamedTemporaryFile +import time +import warnings + +import torch +import gradio as gr + +from audiocraft.data.audio_utils import convert_audio +from audiocraft.data.audio import audio_write +from audiocraft.models import MusicGen +from gradio.themes.utils import sizes + + +theme = gr.themes.Default(radius_size=sizes.radius_none).set( + block_label_text_color = '#4D63FF', + block_title_text_color = '#4D63FF', + button_primary_text_color = '#4D63FF', + button_primary_background_fill='#FFFFFF', + button_primary_border_color='#4D63FF', + button_primary_background_fill_hover='#EDEFFF', +) + +MODEL = None # Last used model +IS_BATCHED = "facebook/MusicGen" in os.environ.get('SPACE_ID', '') +MAX_BATCH_SIZE = 12 +BATCHED_DURATION = 15 +INTERRUPTING = False +# We have to wrap subprocess call to clean a bit the log when using gr.make_waveform +_old_call = sp.call + + +def _call_nostderr(*args, **kwargs): + # Avoid ffmpeg vomitting on the logs. + kwargs['stderr'] = sp.DEVNULL + kwargs['stdout'] = sp.DEVNULL + _old_call(*args, **kwargs) + + +sp.call = _call_nostderr +# Preallocating the pool of processes. +pool = ProcessPoolExecutor(4) +pool.__enter__() + + +def interrupt(): + global INTERRUPTING + INTERRUPTING = True + + +def make_waveform(*args, **kwargs): + # Further remove some warnings. + be = time.time() + with warnings.catch_warnings(): + warnings.simplefilter('ignore') + out = gr.make_waveform(*args, **kwargs) + print("Make a video took", time.time() - be) + return out + + +def load_model(version='melody'): + global MODEL + print("Loading model", version) + if MODEL is None or MODEL.name != version: + MODEL = MusicGen.get_pretrained(version) + + +def _do_predictions(texts, melodies, duration, progress=False, **gen_kwargs): + MODEL.set_generation_params(duration=duration, **gen_kwargs) + print("new batch", len(texts), texts, [None if m is None else (m[0], m[1].shape) for m in melodies]) + be = time.time() + processed_melodies = [] + target_sr = 32000 + target_ac = 1 + for melody in melodies: + if melody is None: + processed_melodies.append(None) + else: + sr, melody = melody[0], torch.from_numpy(melody[1]).to(MODEL.device).float().t() + if melody.dim() == 1: + melody = melody[None] + melody = melody[..., :int(sr * duration)] + melody = convert_audio(melody, sr, target_sr, target_ac) + processed_melodies.append(melody) + + if any(m is not None for m in processed_melodies): + outputs = MODEL.generate_with_chroma( + descriptions=texts, + melody_wavs=processed_melodies, + melody_sample_rate=target_sr, + progress=progress, + ) + else: + outputs = MODEL.generate(texts, progress=progress) + + outputs = outputs.detach().cpu().float() + out_files = [] + for output in outputs: + with NamedTemporaryFile("wb", suffix=".wav", delete=False) as file: + audio_write( + file.name, output, MODEL.sample_rate, strategy="loudness", + loudness_headroom_db=16, loudness_compressor=True, add_suffix=False) + out_files.append(pool.submit(make_waveform, file.name)) + res = [out_file.result() for out_file in out_files] + print("batch finished", len(texts), time.time() - be) + return res + + +def predict_batched(texts, melodies): + max_text_length = 512 + texts = [text[:max_text_length] for text in texts] + load_model('melody') + res = _do_predictions(texts, melodies, BATCHED_DURATION) + return [res] + + +def predict_full(model, text, melody, duration, topk, topp, temperature, cfg_coef, progress=gr.Progress()): + global INTERRUPTING + INTERRUPTING = False + if temperature < 0: + raise gr.Error("Temperature must be >= 0.") + if topk < 0: + raise gr.Error("Topk must be non-negative.") + if topp < 0: + raise gr.Error("Topp must be non-negative.") + + topk = int(topk) + load_model(model) + + def _progress(generated, to_generate): + progress((generated, to_generate)) + if INTERRUPTING: + raise gr.Error("Interrupted.") + MODEL.set_custom_progress_callback(_progress) + + outs = _do_predictions( + [text], [melody], duration, progress=True, + top_k=topk, top_p=topp, temperature=temperature, cfg_coef=cfg_coef) + return outs[0] + + +def ui_full(launch_kwargs): + with gr.Blocks(theme=theme, css="footer {visibility: hidden}") as interface: + gr.Markdown( + """ +
ιŸ³δΉη”Ÿζˆ
+ """ + ) + with gr.Row(): + with gr.Column(): + with gr.Row(): + text = gr.Text(label="θΎ“ε…₯ζ–‡ζœ¬", interactive=True) + melody = gr.Audio(source="upload", type="numpy", label="ζ—‹εΎ‹(可选)", interactive=True) + with gr.Row(): + submit = gr.Button("Submit") + # Adapted from https://github.com/rkfg/audiocraft/blob/long/app.py, MIT license. + _ = gr.Button("δΈ­ζ–­").click(fn=interrupt, queue=False) + with gr.Row(): + model = gr.Radio(["melody", "medium", "small", "large"], label="ζ¨‘εž‹", value="melody", interactive=True) + with gr.Row(): + duration = gr.Slider(minimum=1, maximum=120, value=10, label="Duration", interactive=True) + with gr.Row(): + topk = gr.Number(label="Top-k", value=250, interactive=True) + topp = gr.Number(label="Top-p", value=0, interactive=True) + temperature = gr.Number(label="Temperature", value=1.0, interactive=True) + cfg_coef = gr.Number(label="Classifier Free Guidance", value=3.0, interactive=True) + with gr.Column(): + output = gr.Video(label="η”Ÿζˆηš„ιŸ³δΉ") + submit.click(predict_full, inputs=[model, text, melody, duration, topk, topp, temperature, cfg_coef], outputs=[output]) + gr.Examples( + fn=predict_full, + examples=[ + [ + "An 80s driving pop song with heavy drums and synth pads in the background", + "./assets/bach.mp3", + "melody" + ], + [ + "A cheerful country song with acoustic guitars", + "./assets/bolero_ravel.mp3", + "melody" + ], + [ + "90s rock song with electric guitar and heavy drums", + None, + "medium" + ], + [ + "a light and cheerly EDM track, with syncopated drums, aery pads, and strong emotions", + "./assets/bach.mp3", + "melody" + ], + [ + "lofi slow bpm electro chill with organic samples", + None, + "medium", + ], + ], + inputs=[text, melody, model], + outputs=[output], + label="例子" + ) + + interface.queue().launch(**launch_kwargs) + + +def ui_batched(launch_kwargs): + with gr.Blocks(theme=theme, css="footer {visibility: hidden}") as demo: + gr.Markdown( + """ +
ιŸ³δΉη”Ÿζˆ
+ """ + ) + with gr.Row(): + with gr.Column(): + with gr.Row(): + text = gr.Text(label="Describe your music", lines=2, interactive=True) + melody = gr.Audio(source="upload", type="numpy", label="Condition on a melody (optional)", interactive=True) + with gr.Row(): + submit = gr.Button("Generate") + with gr.Column(): + output = gr.Video(label="Generated Music") + submit.click(predict_batched, inputs=[text, melody], outputs=[output], batch=True, max_batch_size=MAX_BATCH_SIZE) + gr.Examples( + fn=predict_batched, + examples=[ + [ + "An 80s driving pop song with heavy drums and synth pads in the background", + "./assets/bach.mp3", + ], + [ + "A cheerful country song with acoustic guitars", + "./assets/bolero_ravel.mp3", + ], + [ + "90s rock song with electric guitar and heavy drums", + None, + ], + [ + "a light and cheerly EDM track, with syncopated drums, aery pads, and strong emotions bpm: 130", + "./assets/bach.mp3", + ], + [ + "lofi slow bpm electro chill with organic samples", + None, + ], + ], + inputs=[text, melody], + outputs=[output], + label="例子" + ) + + demo.queue(max_size=8 * 4).launch(**launch_kwargs) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument( + '--listen', + type=str, + default='0.0.0.0' if 'SPACE_ID' in os.environ else '0.0.0.0', + help='IP to listen on for connections to Gradio', + ) + parser.add_argument( + '--username', type=str, default='', help='Username for authentication' + ) + parser.add_argument( + '--password', type=str, default='', help='Password for authentication' + ) + parser.add_argument( + '--server_port', + type=int, + default=0, + help='Port to run the server listener on', + ) + parser.add_argument( + '--inbrowser', action='store_true', help='Open in browser' + ) + parser.add_argument( + '--share', action='store_true', help='Share the gradio UI' + ) + + args = parser.parse_args() + + launch_kwargs = {} + launch_kwargs['server_name'] = args.listen + + if args.username and args.password: + launch_kwargs['auth'] = (args.username, args.password) + if args.server_port: + launch_kwargs['server_port'] = args.server_port + if args.inbrowser: + launch_kwargs['inbrowser'] = args.inbrowser + if args.share: + launch_kwargs['share'] = args.share + + # Show the interface + if IS_BATCHED: + ui_batched(launch_kwargs) + else: + ui_full(launch_kwargs) diff --git a/assets/bach.mp3 b/assets/bach.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..16d0da76cdae45a067c0d3360503509768fa0b34 GIT binary patch literal 160496 zcmXV0cRbbK|G%y+*|IXTw@?Vz-m-VHvy#XtE~6k@wMWK&AC@dg2zd)1=aAciAIDR$2Y7 zwD{U`_!cC2j0%kAglB3cv)&JP_Grp(*Utb{;6KdpXlstHUnM)~4T1SjjvS=u014ki5U2mss=P&61hJ*CX;o<1P^| z{2&D-loh!|t(lnnDiGq-SsrrNnMR@=-|O-W#vecLygIa{d1RkCP5${k1D32Qsd*{| zrxsdzr(j3ibb^a{cZwU#lZq!FY$&PRA^r^H_h4ojVmPd~izi`Lg{=B#QYj)5QYvOh zOP&&!N~Q9{pa&~IYQ{XUZ2DoFxBm;bXDlDjmgupB!45hzINhJvcWaS)BVNq75O7^uZY0vjPnFuka?1<7W?Dve2)H>?gLj zxnnHc3-lB@owR$v%}d9QNNe&}(Ou#_C(VYUA(fPFge+&KtPalZiU_FHrKDjx5YT;u9 zTXV+>o@4Hf$qcjlNZNOBs8MNDs9r|c^4@?(&yhS+pdjmy;-%n+VzL|w6}?jJoT!Z& zkFFL@T^neoHogbnT?1|K!rrUCbN0MjtA}`EGw-BJ~+ z*c41^y~TZ#F}mQ3l2d(q-jwH!3i zm+c>-qR0jP#wkN9$txv=rX%`QRVOz}ans6F6uWDJ-!g7YgzEhYvfeQurL)rxHK8!t zlD%YHWyrIG^_7p zQ?$HLArJ3czLQd__8LZRy_Jb3qQ9F&P&)b#A!-b|%AvcHbSf^+oq1aTj& zx>HV5-&xCC^PJngFOZ1>ofh63Bk;dM5m8B##c~BdwK}yqi9TPgA*1~E1 zpUBEyqxn`?7>noI$=Si#!O29{sxd3nOL|%*=6DGJs`RV(tLEXL8i14J6Q=1W?M!sX zGcTM>ROY?Z3&|i+jD}IPw}OS+f>t{WKl6O6tw>1x^^WeR9`;S?bTJ%*c(fJL?O@$& z(Q;W@BdE3?D1q)cQVC5z!c_y?#GN1v0O;&yP$re;AgO}#&uvY-sDA85nY(Qs*a$#2 z88a&vgp6z8vtUthP~GcRFTuTbHB#2CH`YTGR>fw_r%X4$QI3w~Ia6!2aaFaWj5|7_$>;{81B*#V)29|< ziIBXEj^*LFHl=J=UTO(kXlG4>x$LxsRop$C#~nOcxT^_JGJ-&YQ*tn;`kEXPL8)%} zNNoKwnW{G1CPJ1dYGw9b=;{2fb9+e})&bS(nafhh;~`CB9@j{yF1g9ji_%TO8hwqB zGX`sbG!Yzd5MBuZWRmI)^bU?b>hHMT>i_n9XBFY^$mOyu~NSxGU%dvD<2P1x5r=6)AXs#m-JJM?{C z$wjyw`hhUa!WXzx?%gJ?8HiA8SsGx~jeQ1CFT>x<`(hEt(pU!pP`^hNc&l#B24EkN zLycP!D1;LtUX&9yKgPqdpDW7BdsJ26e?z^j_zKh z%cOU#5M%__lm8H+gb+r0OkLeJpEU9qu~({Yp>*6cJW#b%wW_U`&Cd4n6nNZez3pkc zeLHr;dmwapWhpx{f1aB)Jhy9o%3SZ0pfmz$xjrVwR_yiF(cab`=);ZSTlE&&6M_`p_-1a;3Ncih{9VH?s^1`3Ql~$Z58=3h>?_=rnAy)r|ZMh#ZBF z*ApaF37CL*>jQO4vxlsT{#>q`s&{3c;F?vnC+`R2s*3@+1At>;0MP$^^p+~O@tNw3 zR4a*#j0Pm9{oV_>&5;0fNtu@dLVS$lBcj*Fd$YhJD302WuG;m3s1Wy??AFYrlm{2QN zm$|<+{O~WzQAI#^^ejA)jOK0>wjoZ{1xd8Q_SJ2FO(Mnj+T&_%@3E=nJn*Rx(V~wl z;koZ*>}|$u0X+gW`VLtXFkBSJ@@3=cG1$S?xErAr%|dSea`!K}y_I^CWWKugxdce^ zCTD;UQ1GLcgI5GS)Kptcei3ibb~HBJ{$zZH{51ErToYc=9{)s2PhnO%V$v+1((&e_ zu}ZT&w-V7*v5-khgzt1n-&6jhxht@{b+h`8I<8DN^!IknW}$8JR6=w{m1?^0VH*D} zjS3ro+XtzcSy8U{$Mdz6ZkUr=`W7%e-@q~dCsetAFHQGVQdEZSaH;u}alu{Gl>NsC zbo{Po69f*JF#e?;!IPJFT@ZhdW|{63pO6|IQv@M%cTor6iF=nX=CYdMQHu^S!+0a> za@+2x;BJWXYy)2gHI2+nuCLiOcvs0G0p-FWpO*;c0flY9HTjDk?@wVKvv$#NtoF+f z!m!3#2~Z!709hAyiWS2NgOs^j2{hsc;paA3>V|0;PSXHF$pww{Ta>Tn-A>U(QrkVT zD$j#;;r<~+34!j~TY4fzHypgY#gufuCGqju;Srkht8I`uE*BQ~o|X1|0FD!x=CbN9 ziE)+VClFD-TvN#^8iZ&}V00;h++~}EdF<-+Yd(Of;y)rjxEgKLiy&pO4wHtC<(WOC zJV5E#v$Va|pevV5Ya_pOSvfvR>8>2Q&CaX6k#9oltwo^U48E`P->wNh`Op+s;N!|4 zq4Kt{_@Z7D$xw{dhnF54jfep7TkUd9>z9Y$DMPtSs+Y1%+ke@FM&ewpzceASEfie2 zyz!fncWlyf_}YGoug7oFGn#(BDLT9i#-m5)-x`mA zbt*bSO|c~zTHc}a*%+Z`bwB<)>Bx*IIy(1~7dU>{>zA^L(+6fTQmlqNW=5uzU; zU!|JwBz8|j{`%*d_dQUIIi?At=`mBjF^S5#fHnU8OBANx{f+_v=`E=!l`-5K5};}Y zv;o*U0MX&X&Ckcrl`nH@_BrV*VjS2&a=?(ZMoBL3f4YmtWlz3S{`~0VCsFa9OaI+J2{KpWv)-ePw#UqDBs165sBt+7E} zvP)`MpJwOkdg>l3{xeYmgaGnic!!L1F`@ltc}2Wn4vV)`?Fz~-|7NSKEEr3Oef;Bh zc3|em;}Erv9Qv&7^UWnUn3~GzNWd%toeDc%1DI;m&Ql08B!c3;B{}W1Swv7!8olSr z)<^Ri>YVF```HV0!SR`i@>gTa?qjsJLphSrjJm{E?Fb_yV}-QRiYM31kZK1OPS4#e zrX?lrcFHBxc+XY5n$7x1A;f9F-)1A5LlZlet^71c1sV0cd7)G#MIQbG=u7FnN`r{b z*a)f9bY{L8$l6OvxKw|TKZ}iuK#EY}xLn2w=?9+D@a2ioa(wqWg{JdK>k+RmVAZ~% z;uPaB<%l3~d4y(EcbF@ar-VIzIZi$s^Yy-Tal}p2&nveTVSm@; zR6hOV5F-Ryschy=9re>}FYkv^-MfUeE4z)}(o#$Dz0)ssQE-nTCA~6-+zwai)G03q z1^WAIIs*lvxZ21>xJ@zE5`dZs6f5X)$7DvS43pET`Ao8LF;~PweqEOzE$Ne8OlA98 z+1-jF61g4X`>oCC+*C9HYez8>CV{#O`cY=5Z1BZSY&~Wj0Q{Q3;jHtL z5dPCV-Gq$`a{aBy<4L+CnYW`EbFvI5P9h=eix#WhEk=GRuDNX^F;@rU7IK13!O`IA zX)l@8@0=3UJ0Gi!?q|>$Sj=fRHz+H}HQP z5{`h*DVu&Hk4`hF9w}5xu%)QEw5o19iBv8YmB9$prPl)i;9Xi0K@e?1<0hXjEQzA# zMs!UP_+QUhRioE+KkfTD4-4azH=cde`ZYJ)QDxCSdONJ%u7fzRnwzZ$hQ8*2G(q5x zQ%jG(d0a6XK_D6lp_Mqf!6lKC)TkeKeMtKC@VlqqH3QeYR9y@D;m4SdQ`L6wFr;@M z{1X1*u*Vua4E>u3Z@Nz%SYLaqd`h|VL4aSN^vQU&;5BClBEowJa4Ap8L(T^2xp=MBg#ITac|P9bI11jYrAPa9WNmsKGoIy$nn*a(~1C6t>*`)W&+=f5@TnWzeh>ksEstjln* z3*={X-Md5ElE(@|Qh&7*Vg5YFhWlD|ySJAUTZ}kBQvkHU81OxkhJy$60BOy@l}^K( zcwc{*K9h%_0i}K9GhNAmQ|PqVAlp!)PDV@^Yq4BBek{&7eh0?o0v zaXMhwH}_-(O71TxxV0^`YphMogdWSB)gtGNH_241!DK1*DHST*A`uwgJ=CwpI&9eC zEPgx`pTLR|K)zvq&4ZF#l(*jtdtHScZtv#=YQiK4+P5 z4$_iY2gGT_rRVn)2~Z0MIDm#T0yIXgsiS3Y(zvzvhtUm9E+SeVCBiZ?F%5zST`K;j z)tB|BJyB!7&H1PzLyuq7SLm|_iruJ@qMCn*xQOzV)Q4)pJL&{L7O~IF93C$TFj@WO z!Ej2IRO6nTfAyxXpLuP`)hzPf;!Kle8Na8dY`pr`UxN0vmmuzm8oPwI25lq zoPT?`qkI0LS!1ACyO+F1m}wc!0G;Y?iu5OTvkFraM5Ve6hWYVjN@jaKl6UbL<7e@O*g#*zN02=wIWP)Kg_|IQO*(JMf zL0CJ0d!Y_+5tm&RRrQ`~_fEX-zab+?gBT>g+TM-2^`4w0>>omQ5NN*g%(v^75Hn%h z5vl6%5!zc$v4Tf`b!s2Coh86NJd$Ex5#OeLOt`P50GRSs{(hj1u-T6ODEFc@ZtP_M zG;?(5wfj-!p;1w>ThN<9)%3h+3WZTK z3Z>SXF2|rJIP(vk?QEoHHCgVDDkt5=1`YA$yD`Jhd63ODcXZCNHh3Qa20kI;-JE1- z)O$@mcc_HAgtNYi3JI8Aj(byQVJc~GiR3p&nax1{EPD(9Ld_-2|#ionLM?J&SmE--*B z1UvjjtS6~Tb*}T9I;HA^_|i9r246MlMIzh3I$t**cc1=T!^~yiK%E`*Wr@d}yDXFL ztS6;k!JE(~P|wswCC-#~75d@`z8Y!gOC$hl2NCN||Dv2mAOuq^LC8aL-@O2*G92I? zF{T-_z4@b8L9WZo>nVOsRU@L4>V(08Ah*itKloGxWvG!Aynv;H=q8`eK zuEi-{hP%F}EZGtF! zw({%J{t7bth_<|WzbD$o_ZQ>lYupEcJ^F15%|w^EE;$wNYQz#5q%|P=$C|KCZh(W zcc0vuJlo?sjIlv6xJL6}`Dtppvyh8&7o&)0IT8;R``OUoAXUDLIh|y zM-QGqZ|PaC0h^o#lZb%IA8TM&QiYCykd;nrqq3ean%LiF@Xt*_F)M0pw-fg3D~S+~ z%n4&UXDDmjFS|EHzn6D!^Aj$XT4cwImx6e?O#T-=U)JfUb3>{TNCEhtFCZ9z*%DVS z2oc?@zw|R+JKWRCM&m0AvBImB#g*a1gstyhFM##?>J!FgvT+n|RSfKKeVPtCDXF+N z*t{GYJ`rwV5lD%!>Wy{1m1wH!a|S*FX>=$?8ZR)^zBYh>j#N(<{7sl)d`AR>Phx)sBI(tK%y~5rG*pXSCZ;VG3M3u1vZzB5}Y{ ziJf5a6jBaeP$9$L{q^f(h;YIDGLjFAOCggH<3g4w@(kUpAtgBr`pjl9PjA&@Z{hl) zsdR<E2n+W50=QfAIc<63DGl#4N9hwzCxb6 zv1KP$!1v-De==+j_>JaZtgSUW(*}I|;q@#^Q~%uB{LTrm%kX*8u6pB{k7JLviFg<9ji;cbS0?F<5Yg z|ATvPu@%sdQ6(p@-iWYaM_P=);ly0F`235{K^Jc?pg{E#k>rtor2qWpi2Vef3=q4z1e-l@8pY zPtVK@)%vp#OoSi9(o2xaoy-W@$Depi`d+>%&N^HWh((-Euit$iF=Kl%;OV9G=iN2} zJ)@)}N`h*ti*8@EWUP&Iq1 zayu&x6-m<1wBP?WlD*HY5$&F>;i%{JLn*l|F)-M(y4~6@S9m|*tQLoh z=x_l{&rItc|JI(U%E&UvFaCI@5~SqT@bCM<}8ki5gBIn^kakv&N@n3=>!w-KVCzp^Rsd_ zJ|a*8NC)&l2n@vmrvD+-76DzbU)Vjkhw<>rD|9;WN@Nc-*;mq!%oeI^JF7azH#+lh zrheZIRznxHYcEx@GQ))7$7`75)9b`UYNnqlRWh~x4ts7LFkf&A-G=iRzHhYo`Ic1X zGl%jAg(@3AqP$}N6UkWR8cV|8Ax5FzCE_9DWNQF{e(eMDK_2>1)_z~X-yO5dFG|}X zVcOIM{J(cEQXJDf@LttYNq9nOFs(v-ExpHYRjBZoj$yUh{%V8U`={3o|CW4D6NsHy zS$%a)QaTeXQ4o9ncJ%03n-BisLC16k6DFm2FvAg2_TDuRwFM?i;Kl06!ae++9fYrf z?n$l|V;sPYq`~bd!Ln~{%(@%Kr=2le$%@W=zn=V5Xqs(o(DHmO%N!q$J4VP~!9M+H zPFS3vNOs}a}`wE&Eef0?Evo@d6{u85A@7l6Q5TPU;ip7Eo{fc;U!nM;K z7Ut}#!g;BWRZ7eM5Ne5lZkd=*9au!Wi*%1ERfihlhk9 zz>y@pegj-iV4n$SxVEV2)+`kM$NTiD30J1Vsp3F|fqa75Tx`{wRGj=LW6>)i+SQN% zvH<&IC*stsSSJE2Y)JG{uj^z^QRn3hK#~g!Z}XVn=MT`5nohb^jVr#WkVPh-Sa+@d z8goG(HzUEu58l{Zk0`q!3a`g$@n&R+=~8FmL|$}SEFbyPuNeRJPxF~hU_)ri z+=p#sKdF~_k$@3EGN+!DzCXD90*WA-*xu`4Py%Ry{@b{-5UOH8pY(m(2p?@K2Bl~q zB`cY-DG}#>^sW)pq9d)z*(yBDu-U|Ric5+i#SG{&=5U=WP+G*4`vPw8=@yqf3}x~x zo-pjV*d*c~Ld_89jONT9b)@4XeR$W?f)TEP_q{p*RIDAALMHy`>;`$;5KZh zQ$eL3(+$ut6diG2qN(a=@ze>F5(QraWzW@QwDjUH6JD0Oec>mU=ATN}wuvwim~!*W zGcm2pJ+<{-Hk`1bc9u)&aE<%ZX+5hQrIL!3E3#3nVS55*a6D|dRFH$uN9$Y&L@E8% z3$m)>$F2B7B2qk5NOD7jycPT&3hu(EAB8dwGjf$HWFc4Qw_kp3o0#lTl-RC6+`aq= zW91&FDNNR!EV_kd)!yolJRP;H;*6#|hmUkF3lmhP73*)@uV0Hi#}p7`J?iLXp}Jn-vdEI{iW%StEJ991|n8Q z1pgsa4}p$ZEjS%mh`R4}*Evn2g->xFp5#CCuYpO~Sf@ejeQln;@j_vbOv^FJa`V!qn`i!hsTej!r~((%}$TyN>$aj!zrF}8*r(7Tf8Z_ z?@qT%eZG{Ao+o_Vo3Xa1*Mc}f`T+F}UP|Bnk`urH_)Ckcr1rdl?uw;~G#zwum_Ncq zlHp|>f$TfwPv|_oO~>@_&ocw^kHi{0cpDQb5;NvH#C(e{)}oeg4!v>FvB-WttM;lC z1~T9^Kp(ZlYo0tzqV+BXNOZpnj|!FY%ql}< zQq!)v-jce_WHdc_io*<+q}jkmZl6eNo93>P4z;Sz=-%*Wf49#j1?i|vxR)ab;38iUdr(pw%Pcjzgp2eOAk_V8*HHyyXld>pH?)PcjR_NZ0iie z5xMRI!M8WPv=|!S3HhRTMCm0;o`D|tr|N^bF)#;CK}qyA!{uF?3ZtoxkM*&)1i}HP z@IabJv?NOJT|HULIvxM?nb+ZP-1@rr*!6pma7-;yn)(LPIXprxfkJimk+!K|PRuR% zf(d(HDLps?CSr&~2fzTstzA4wo%4ES+2{17m?n0-SkQmM0r2c-a6!1zY{%}gPQ&W5 z0{`p1_)TjSb(?^`Dud*Zf7y)kKuU+WyqB43cYdh^3V|dq`qbad?0*PFK*vlLc9EJJ zc6#@el&W7R6l$9_@adUE3hwU4b9i->N#Q$!F}hd6eb3BSyG-EAdtj%xL*@x@y5PJ% z&1X!^-8fy&|Gu=*y9N~{?dTr!e=os#2$LGZ_6<_g20yRLzBX&;I)#II=MkjHHMJ`^ z7zXGy-$Q&r+k%;Dpu(7sW~ZN8TTZ$yyabno5Y-YgZU#%=ao>{@zl`>K%OP&Bn4C_S zf;sOi`=_Rt`K)bqnbk^KYMqXLhrRBfG1o@x(N0rWJ_5hp#oiI2~?xk1mSF<(=~_wqHvV;gNg= z3~M^3*nh`r6Om=q))6FwKGVL4O~e(bi3CV@ock(f2vNyI~u1Y^-Gy;QW-ahKo@)u#ldS-K9(wv5(4- z4dD*J%ZwOzToqWtTbA;v;aTJwG-VwRX+9jr6DlBfz(0iWT5i^4arZzo%t2V8(5bE< zAz9O`LG*~$PpDotHm`ddDJe)xK*!=`{cP#QEy(@2(mhplG~|ZYvR-zjQh#Jp)V=RA zxtT@-*8*8$_D^Cys;wahx01gxCxw-0g7SQvqj(Ga8+Alh zs0B{9!j17YI`~V9A1yHgU`XOLs+{}P+OQ_yo}lo>kr+PYWG@Nw@DJE}T0TjBHs&qx zoiuE}PI4h{VhcgL?y|O8#9_WVNx~N!@{Fn5^@cA9*Fn_md<+c-7n!n%0`X4F7~&j! z2@UNHpxkq_Owi>uVER#O=@AImk9emFfvO4c+B+P2PqEX+1gvCz6#p~N`sVP2KQ8p$ zLDjn9wI2Y~1A7wZ%j%po{5DEhY2m16u=$$~*g0H)wE)0#5MXjZtGSk(n z*q2^IqfXwTtF26d^~-Q-<*_1Fxji?P== z&Ll&-!k%>cJ_03XV3rN<+52J~L0Q24QHb@e??o6-I}2l= z;QMz^Ca+y<$b0`Zf3IFeV1S6qRW0mNltHCA{}y;`fubj%XTnm!Pp^T!e@wutKL30i z$En6U)ow6ooc1%=8J8yq!Fvl$ZycALn`bghT5|7Pso)}<7$*Gp-g?HVJyC(y9&Q(A z%CSoM8UA+^XrM{*c-U%6OeO92KMIXNpcAGGo=DAoiv)!qN;ORhZ#3=W+M55`3W=Sb z6&{W)62*{C_Sq~m(D)vgh@#<2h~q;rjKx)|s_{a&Yu@?{Wm|s^D4*mH;#z!fo)q<@ zYEJ~`iK&x{#NryANRs)#?v@NWV!%mb%E_7^4{lB3%1uSEM=2bEg#o~TD8JRfEB|Cx z@_lR^vjgdPo$0$N-uw)CoB6Gco=?$Py91Wfe7)~uL?|tua$CpqHE&gKkbRxZ>gQ#A zUr95U*o?ls;rb#hoapH!+RLmy` zm)e|r;XCOuB`ZIx_rUHuO(L2Eci#FJZ>jcqLJD4PeI}E13-sSI_BvkR-W}4?%mVZ! zFZruDO|+#2>sq`lczC)Aekp*qmSk=xL;XgcuS2(%k$2$^DzZ+0m)b;<4$H_0a8sn}q=~ zEjlI;A=(w`Lnx_|x!croN&63>HOYRu?Ow~+DzbpDRqa>|9r{=A>u9HIX9yU&{8WP zN8L6gH-wOJPx;g_{5PGZ8ngS-VjHqjujI;beJnOY+8ETYiy$2d?GV#ze+Zk4f9;&i ztlt9%AZj`EI?|quQ?AHm!|61{37?J8qRuM3GzD$sLf;vY1ChxrCU>MH9N?Iqd>wCotvscppFb;Y)3!eEtaq z)S;;BTA1FNZ{tTAUI>J@yg-)D`x#4bl3m7j!X2^mJ3wqw28=)&>TZ2(&8|DUVT1Pw zV!d@1^9XuG?qW|64MxLwT{GkakoN%o9~YXKbIXFGj3@Mr$`pf-Q7|A}K*^@7PWOGn zs&dtHNs@FTt%HC^BdfeOJlj%qZ^iYzkS_KiX5;q^RdgL@G~=1NaZY3*OD(*jQH!&H zz8RQ>*p9xc*8PXj5n_T@T(Q{Gxgww-Ly&(OVMd~q=uVZ*@GM{ zO8Ch#XA6cEuL@?#@6>revuggb#g4zW#knii{Zplvf?tSZE%7b=jWy}`X^(g}goqgb z$I236u0?GFN#cfg37>V+e|hPpurDX(2j~{usZB2(`hgkLvlWc=I6r$MtBLB~UB`QR zm>bdFKUmEFpie_#pI8yqv{Fb%;$ZyTb|fB&kR+ zHW`qoqmX!nT-PDcod@&3=@>93dJ0&nI{)O{VEYD{lk~J2WIJALtK@_(tP9{L^T_bc zHrc*VfTfEgifIma5+g*$_=lOE~_Pf)kTSqQXT)A9%Zg z-@aDI!Mv+#MW{Z=XDrY8w;B8ne?jsxOh#~S`XJsm!CtxD@INo%9>KoVlGSN!yUd*$z-4m9locfY&+b!G}dnwfDZn1e-LdyieB0~x!&#?rp2EE|=QKF}Ve)TEVsvZ+q`UnR*>9;&G89TK|K8eTDM+U}Ma zAY%IU=f>pWYT4Q;LD}ky5Bg9M`n~=8#dTw)QTdquUn6i-*D1lnellaq|;tzP(txk%wcVu!)QiDG_mK#`;r=B39NEa zi3#G|7inD53hn_Re16D3>03CQp!sog@Fg{&+G8%Ln0W4r+%keK#Quj$)4`virh7li zN*^=iEHpy|QaC5_-~UjTs;*^|9H%&jpS`3W9T)~DW4M-9ThK_2b-*G{2K(~sNKZ_l z^AmyXlhi!b%}u8OoLX>2Lm+BaZGF*(pAyE25>;-N!HDW0uLQnz=MRcM8~imLo(QqQ zWMx@?Q4e~O+xJBEK6750F;$6TW%KU1+i@h{JuG||(&c(&uMIoNM`8*eH(@5rZ~o)Z zHUzq1GGivG8Q|^ReV|N{oFJuXR{ieS9#-?GPA0=f>fDM4ULqyvWPuYQD zuko7E*m&BQ^V>{afG_z~^IdAi$9-CgEMf6OlZ3F)g4+bA;L^hIPamFoQ1T#P21nrq zf8xYRqvYQ0fOWa>n@*o7w=SO*quHOl7K&kJ4Z8W>L)xBi>Ha!VzErlo zpBZiU_w)z56ZA>j_knUYHtJ7I`ncQDB5)OTIHq%cI)cZr9rLpjak5-mRAJ{i3Uyw6 zF=00BEqyfzJ?NhD(vG~A#919cDmqxw<63EXCKSkvbm9Q1F0zXgQtde ziYAichxCvg$)%f3Zj{@A!RHsCA|+nsu%m<>zwelZ)wqH8fA_O0mTYltcX3gk{}4h( z5b~QE&jgsibN8<6#cR0)H*Krxc4OMqTG6(%I({YD7We#v4W6ejlGgT|hd)9hwX)d? z5`rQH3cvYTC+_n^OjHGWDUVd$x;{7X#8t2H*ZZ^SYjStzgZ&$(;;bpk570#~=3U$O z3TZzF0iYnP3r3>Xn%VKPEddwXyZB(Xn?@@tBPco&UrLC-y6(fHF79Bx*EDuBjDajC zdoA9bk;$=b&A)#0$440?@66NHphhR3yN010Wv}Dq@ouXgA{ifatdlwsIcc)fXpVLC zAqN^XL71C&r+++ToauV|Q;w+2jXX(Y0U!->Xd|Mz$#*(Ec3jzfN({f{&9+^$%r{JV z=OO-K+1(>D$slu^vxZ%Rh?4!a8moZ`rzR2neT8d-(~e3N3m0Vm2!NYl4si-50Y)X| zs%!h^XN^blMj@YGA4^|4L=~fvjw29=iXOiSA?#r37YQ!1ZQOuO=iyMH{*r-T!KOZ? zN^*VUjORzz*x6t$+sE7y61DKXv4GV=}*Y z-E7v;%htu@7M3L2e#7B*;iv)@UQR?q zUo*gWCyn9f`D@=;m9Jo2J90~eI#RSxT@0*;Hb2Xl3|yCK8jO)dsk?D z32>u6M4JsK=ferW+khJS%IFfE#o2NS@gw^HyT|Tj3xly+YnwV^JjID(FDE`&>i>xM zo4@+V;B7D@9n%(8K*6P-FJ{nG5L9eFtYfMbZ!HaKreMeKGV>}rZJBI||LUS@pDYA+ zqL*={P_7qsn$z~sr%6o7Bb*Hl-yL`<^6!kU?FGt9x*-54w4kB784lc@LScFx6zVVU1wQ(d{7WZ`cVerP$$>UlOcg?M=}aLHV^z)k!^Xfpy@U@|=; zc5l|kGlAcm-ZH7O{NLF2F|^G)Fs8$@S{q-J094g{v&~0SdoaTdtHs?qjfI^SKA$)i zpodDP6Wtdd2sVoIp}H3Kwc1frb{$^sX+<>*kb4Sw z=z7tyuHl(>nXQGJEw%MV?WB-SEp4;ds(!uIa2O@57a?pUf!ThfKF@|{)StWO*6{=Z z!@KxN4Ysv8@VB|#cO1SW$$+0kV1}r~_{WL4tj9fKG13jZkl*o@-saCK-c}M)f0q`r z8q_@reat~g2DM+k9ypXaHt>lG>G3szcLM% z3WuJO*IB)s^;_au+BU0m!;cPoPChM{Innil(*Kn^P`;vYiG5NN%u>7In=q+LSD z4=JQhf~K~SLEHOu4}>C26V5DC!fDkL4$_u7I-N;OhhMu82LgZ$fG*M^t5Hq3 zndeWcKbu`B)vF~j$emBdul>z`)HU%hpFHI$)xVZ zzg^pecV3Rc!xF}zBc#63Up{O&;-W|}WSi&aM%)&s;;G;BN}j|8K zb`=$jkyaivVzCek*K7RD2C*JD=!aEDKicG4f#1(B{!43JwYT1xh~D>NTf;dn-AS#Y zs0Pc(J<<@cCkBQOdeDV-o5VAGw;m4`wXUJfTe--%OrkjUJ1Q2p^iwI-zVqUz5{q2e z9}Sk}%1gDaaEzsFoar{)jvXNFc&+Nen)%rle%ve4P(y+D@J`9cKqi>P2WQ+Z-01`; zOPSKkVdbgps4+n+YSh%yu;-Oz=XjU2zy>SxIwwo?|4VffGJd)@(=kDuf#c%2&ztxB}S;mCe3%gZW z+MQ%?9;7=0!GnI)T3RcIQ)X|w|K!NGnpqHEEr@L{X%3;WU_L`LCGu5ufV--kcIO3|%h_)tv%F473@eM9K?T0tQwtvPQuV61QAJ9q?a3$X&qp`g zaCYJ<5q_W~?RpFH3+=OKA-ENaqOqmbar+Z}aT@=Qq&r>L+gIT`BT}P+AY9iw2nCy) zRRi0mC@dU+OlmN5`#)_%LsPv5l-}D%9RIODEh#9Nlqx%Xt5~KY&&eDg+xFoOca3fW znTWV7p{d~gLWES*W@=Fq(zS#QFAnd4O?(mp=$N&Mz-PR<53BoJ6&_Fnu+VM5n^Lv= z+FIiQ$JEW|rR9#=G*^^ymj*YewhAtVYpW+e|1|Z?3*|2d>G#|Q#GJ;%3>mCcb|{+W z`bHHjzo&LnWDNuq`%XWJn)>pUh4z%44^g7_zj3+k2trDWdCvp;01rI{JpXGQ#ba4aV#2e!XgEX!wHU2ZEV`1RARBe7!#^x`k$+iE)8Y~x(B*}u=7 z7!#zFF>;DJng@?oyF0s%ll4Zf!>|5moO9&ZX+En(>Piz)erg1JN4`n1;H!7-uF^;Z z)0p8>pjROb7f^ zgc~(j_og7rj1%jgyK+PN-byQuM>%~>l9t^ZN!a@Q$7Z?=b&^y#+s-vnbnUhW*z@ga zq>IQdNvRIE76>`oOa)-H&uo|bBmN28B0II2>td&wPm^uU%cp9h{ee`dD%^OO7DJQJ zxW^|vOAI|03Hffp!`%gu9CO%FP8M%TbCwN8CxR7xw|eB?xFsMC@7sdE|9ZcB?n7VO zw|kN;*1=pPyJW+G+@})T*y#BG)Fa>grrICU`*nK^=b!13c?o^+OFMJd3u|BYF0k@o zN?K^vPs`hi4{_-`DnCiIK4`4D;9R@b#GvtyLr4f=oT>GmL`Il%onE6;Ra2rLlXgQD z6@j6UAl{erh!qoQsmA;?z3VmqtXv2z4Sw>G(CD1}^B+%?pjtT!WtZCG>{C`BgJ5*A zga|&$Jz7c4tIKHZSpi)Hi7o2>l*?E$nDM&wqz&mh%>$jGo zOFFfKit(+NUEpXZiu>(JPS#=^=DbNtmMG~Jw(3WJiv?F|{T4swmP$pKAju|QD=dlWjgapo#(HmMKz#E{G&ys{nmfU{i@?@%fm2UQ_;j9(D z;lOD!y2j;5RTh&d}Vn&%P(4}!Ky2&=iba-+`ul*Zt1~@6W+%d1) zqt4P~dYx_99QZ2i_%)tO_lX=;icpZF)%D8%2IO^!UL~huEiQeTgKxVEbF?Jz1q6I9 z<(=W5e3NFvBtjPjDFsH=uOry6u&{r52+tvysqOB8rQ$Om)<&ng_(UIF{J30w$=}TIB*jozZ>f{6ChyGA_#Z`Fcs|Zj=V;?(Xgm5s+?>265@`lKj%$ zU6Rt>g2aj-(!C(H_p|=~&zpU@pS}0oGuO?b>qlShlniv>A?pshuVawa0=;lCHw=P z!QV;!!PZuutvI5~gJu`d#AsHUCIhOL(lb~^(^11dq5^j@Civ}EbWw;K*ojFipE}AD z7)+(3PWCWkR$d5|F&`eRFq`N<@YKWkx3rFSS35 zaTj^1*?@(IPs-nvZ|I=nVhIWVjc2L(UnQF4Bg4KZzi0ZK^D~S^!gniM6{U5c z7=b0K8=ZOAhbsmQ1E5H6;oSfn3l?awskPo!4%-Pt4A2E0YlOwgeui*&Jl8MmyYf>L zRF;GdCFOZWhd8{=_}#Bps+-c&zqmz@d>u`n=Z$zAg%CQk@w=o7ENgk(>sUAF-=ddE zEBuxOsOl)IG0m+685)WI=QJBXK0cfMRK)!cp`Cxz2fbxHae1SusW>SD`FIwKe?k&` zjfT3m+v>OvH~o;C$WX0v{iErZ5^YZ`fEorL><XbL3i_}fzgN9MzbDx+jm1la>;)uC9v2XXgPxjmj-l>cC}O}0U=eY1al#Bf>IC3q zH1K@Ced1ob&x8}Tt{g}73F8PGYw00v=4tvSS;m`{9@fpvVGtB>DDDIOy#u2OdG9nR zX4i~Yg-i*vJf;|H%0E)YfWuyd3wUz#^uTwdAh2Rx*rUPHy}p!RsT)Z-Z={n4JLT`@ ziLS1h(04y=u#R(-nB-T+D{Fqb7226(A;^)%x?iwC$L}Z;MFe|H_rqd``hg#YQ^oHO zG4a}|t2g>A9ue&z@O7U*VE7FBA3|p#NCifVCp`KdcB(xW|F~^zB^`JS;k&S~dEYc# zyTZ$gjL(Igfc}Vf9$GPC@;6WIa+MD8t#u2kfP`sL$nL{9!NI=OGe3H65AMuhSVG&( zy?{<{Hd5xmLTW>exXQYP6b$%F^Qr%k_!Yd;0Dzxe8h3_FOeDD6W4%<7ap`2*O=-#H zG^jF33V9NSq@fIMC--4yTr-=>s7>!?WG(bgxDJ&};>E39dcptgkPV0w$X>{@l7CHF zA5rwsOnkZ3_dP9tEtAm-Qm49yQa!HbAnldhf6vzf6yRYx003){Tg?|uV8ks_&IK=m zm2-t;-@t22a~xBM$I=Z=zlZI3WMT2avtHpnoz*>X4nBSrQU2W;V@ht~Ge(ca^^=3h z?Hf#tS@=;Y6Qs6qRTX{PCkU?^R(8fVL-8>GsG7P_z-FrEB=L)Y%Ijp)BAd+|THE0W~<~Kw!-EO{j zLL$ma7l2+cF0SznxcYxXaCkq|u4}k^;F@5;syb^`6PVy*p?6B&L7Tu*1BDO4ZwdjR z1fGyY{Z_A~Ue3ei9T3A+`q%dp?Hj&Q=x)s}7_gSs8aDN+D11yLX|4orJfE*p;6mrg~$PQ1U=?cAA81Uwhg;~4+m-ZKhq_Hc|ujX(_ zFm~n|l9(b5-$if_2F}nw0k^EIc-s6$yhzvr%MG%{Rg{jB_^dZuyV|*z6)yK&ZL&>x zs6AUmC-&jP>o6z8l3HM-G+iJ4G&4IMAt>z>n4+3s)SW|%>wVDxw8Q3sEVwup&ixGl zKt1ro68Mq&FNO|4h zqrW%6ihOBABwG0OPm5EfZ?oBRBb8q855Hf^fgJH7;EUWPOlh(T8z()Rg|}0~(#VsQ zFK&t1+l|r=o$x4{3x2ailIB>zXuc@x0(k=OS02zcA9)qRZf0<<+8s%Iffp_mIsZ0w z77ZO7>GujoOD%%nJX^ttkSu-kVG157?TL9oU zs_|<=D`eV_{yKG)YS^*6q$LS~{)}oSoeIK^&IN4l2yYrzA3;0JG?q6jT4~?9aQO%J zM-6XX{(26;Pp0r-un`;0=RRBTBQ=1I=pWKrJTh12&KboLJr&L5{-Oki8hDTp*rr*k zOjQ^7#A2_!LF{T&sROMUe0jK9BhNUYOU^{D=$S>OE3z#?H9=8bnx}{>!Rqf=*!5V% zca|Rv{a?p=9)d(>zW7965aQ^R8~lN(ArV@tSI6JE7}9XUf0q>qz{sRtzz>Cw1wba8 zOREMSUQ<6WOF@CD+(T{NVPu-LrZBcbQuV})1XQB`!fq=A2C#nVTW)LOaXTxWq&D=- zgoNTVW|zx`e(K;0QP-e;Zv4I=4VOs`y29le%K&g){?c3f{d*?v`etQ6@N60&(kfa! zG-_s=?{0PEXgi+gRFdQ>qIaGyIm2cAF0-(QwJh=>Xi>w|@`$#=E+Vjb^otG=<)iXg zBaPKZl|yXcW#Dr5OZU|85l>ett2|K@d^+$ZzzRrgye+87Q8^csh9<%;UPwDf1Lz_( z=e56OZ!_YLTCUY>JjkJCsj2VVkvY;8;SI6N$n}vPqBc;XqM)oz3%-rn6)At&O5Lea zeUWAdo`QqIfWZvPPPC5 zjEJ2-2CLZWv&s4B!{;pEZJ=2Cr!69yjL6ovn=8yJl2yl2u6kKwVPc=OGbU`6q6*mb z97>yN{8H6J(=$~KHT3Z@3|{9JsQ{Ubz`*xEuq|1$->8_xobMj7n749jhJ}gPL_J5o zL%~;=N4w_Cw6e1FzuTnoa~NptdCe;sO(Z@kyx?_VmO70sQ=Ln-<~qK37L6yvmt@D``@rr zfi1{w8QgC?aET<6mS}WYub5d}#J~)$s)A)=dvW(l!5~l^E{GR?>XTt{$^45iV7htw zFUvtdNH{vnC)oP(CdWOqA6~h{2dh|A7HD57RUNbBV0_$W03UO#LC)c&Ju!L_A|4ae z%Y}7K&J&mNbfYG0jhj5rI-L*&t4AN7zB%}_H^O9Ip@g9Kc*qrr6`dALvCi|0Z6FFf zDOeyvV9fBC>4ecxE({-z5&G17`Wydecfbb$nB4*V8WOCLX);2#*BJ~bYtoN6D(Z*^ zDOz!4%f2Nfzmgc#cGYE%Ec8t)sR=^+>@kkdIvA~)^J4norGRW}_!KeTOmi$>SQh-S zNC%4H@AS?rkRmnPyYZvn=T$KLUDPd-&I92MB{S@U4T--EiF`S5M~YO%G}WH}xVf-+ z{v%dSRu&_C+kFY!m`8nzMZ-zHM_R_%38>BV{#sMqjyewqLoZk;W>_T4TYyp#m<5{t zV}7XU*3*y^Um_V&Z|;UK>E5npvQt!7RBJj>los`M&+vVloA?l&8P&Eq!b_Ck$!$^5eV^0 z=lcn+)2H{CxqVhOF7a0?dX?p-_*tw@J~1_PmCeVj;6i&K8vLS#5~fX~&R9s6PHdNi zx?)c4m=n)%*kb&0ek1r8pOhe}j6krq4MjBW3i~hX)4D$5ZxmSCtEmZm^$e7M|J@$= z7x-UkmH9vr&#GBA&YHesP&ycqzZJ3;k%g1uo@MA4pEjVMw24>9YiP&PDn_&ONp`!5Z7$^pk<@*Xm zdj|Rp9LG>hvsI)RfqGB~zYu$P@IN356tAjyL7I9ayj?}Z`0iH=$LrK zVyXvT%f%!hD5?Cbw$rNFZtqH2{~Z;Nc1+PUC}ipH`+1bfJ)1sL25OY>BC%M9_WOc9 z{PCMkwc!Ph!pkahw%M=b&GDyU-QP4$%uZfe!<+bz;A02?e?X=iqKPDl32)gCe+ZB# z8py8G;QiJ{ zL-J!TmL0nmFi_mD3|GmH1)UvQtUWuyzllhwyPRUgT>W1N!Hex&h51^x{37Gpo->K^ zqyzyaxHHy0Pjj1-PxgPjst4SE1IiCXn|s0VVw+STB6~!g8ACb+d(o36Uz(my(2vLj z(|9-71XqVhi(19%&^+Pn=>Y2i7PgfUl2tz&;SsI&Vl83T_ni&(IyevBLGq!*4!pPn zjqqeXP=BEu^t;v_=A~biCTXu{iVK5+iYF)(O6=m5A_!cytrE6VmL9CoNS94dk%ozC zF}%Zsp1mZwRtSd)OACA`{$r3)76=AwK+0=uXb!gF`u+>J-H*j%Do_=aVKMf8mFUIc z1dBHEU@rO$%8Z0O7=rbRdDNJZ{rHG(?=jq*yfiD%S;VcRFZe{l*%J5HyB;@7v%R)v zbBZI1 zLO1e9t`19o=o_PUGM>tuFLsbN>9C&`eF&7-w{D)-^&SMScRJ>6g4ZcE8|sBfLC4W% z?&4tmom$A@nB%@yUGUgg`?718Xm~M0H=h zfs$TjJMp3?^RYnGe|%gj34RN8H+PNaQnmY8+3s8*rT&=8}?-?l83(=$Y3yOY@y2(_#PNj zInB-g1dAV|z5HlH4{ij?2btY579vfKM#|LaeLvs_Dpd!d^Bh2|(XUnXvwt*O^uu^u z=&E4~qdQs?8xFgKfEUW>MUMDnC?@zjpnZSn-I8m(Q`tgWBv~Q#82oHP^hl627H%~l zAiN@snjtlQg=E*K1r6r>@-2HUiSYy86{s?G5SP=7 zMj)P*5oC#%LNkgQtt2g-6fWhD?1?XHI7DqsvpuL!^w+oMXI)AyXnt+j|KyljVb)Gs zmuBSXR%w|=qgz^dd9Autk(t9!eO6U>N+xazt|@{e1Sd`@3{6h3%|DsPi6&Yd8zw-F zj4G>1Q@xl^`Jq%(;1y2lUJ*JFSN)nL4HT|=-9CitCqIUN)9ymzPyh6A={p80!A<5aAax^XD0U z{^@(#jS6%TK(!3930S7$*f}XIvl+NdOi#w@G5ljIl*e;3rVdq+!_ohY~}~ z)wSi_7w5_f(4Ox7wWh$|UFkaVU8c8U^C5pcUi7kjs;6eIt*aiCKM|QdwgLr_wSO$Nwk=n*$IMy_n9E4*O4Y_;^SXBPrhhMB}ut zHdLeP-v*x3_;vDmvbWK+Mv$_lqRhjfN#tjhaaKGe6j4L7i3#f1W>6bfH*)KJ6_>~- z?cdjJihWT!nHlo^K^b$o$D(*PPyIEvvQ+^xt)sAph^xDzG84k}cs>BIuo8ahOu+`f zhk(W7d!fvzaXO5v_a=(P82ZE9ee7DC#PFWQDpXECMU9oUM0EB!Gm;|17xQ5DO2+%4 zJyB_toLqO+w@4@?=Igo9F&Z*Qqn8ybVv_6X6*S{%!zaA^DcBv5Zu`%38?^xMCgYHj zQR@l3M||&_XL4Hc@N0MIC?67g~^FQb&T8VraWmg*>+5=j^V}Z!vSJef3B0 zFK2%%jhpms>+3O3BIOtieJYKd)f=uelax;vkUL6>dzXOlrq$pU8RQ7&AY%c~GrQLSK^TLV?l$l}jx z;J3x>Uj_)2>u#&c!E9Dhc!NUMc4wEt?OzO?h9LRs89QSuoI2K+nmS?R$M?Z$IRQPL z_w+0*HB=r(@Xg4`my3YHyCg;;%@^R&-y;M%?D7qF2KC+rIR1iEVoZ%y?F;z5e&Q_1J8}{g{Ti^OIdGO zt!8uTl^3Yd@nJb(2Xnh|*((X6D8U61+~r5P;^I^znJ7|{4i0A~mHf)KzvTOG=y6Ny zbeWDa7EE55D(KRQ%7YZlp~?aMX-t}v?{qH!6JS%BP{nI{dpR-lrXd&+(G7A0#Gwas z#so0jC)M91ZZv{On@WesW$eQ>=|$=Bnq# zO0m0DUEh1~>+Tz%2NyMZiZGT5KUJF&`~+U0(z0EsqG|-wdIbemLcgtPCUwe~Zlpu3 zaIDTm{Oayu4Bl7-Wf6`VLsppNPH?S4=KKBJ8qAz7+H;0Ls$ICCrQG@2f%mbG=0ChF z$+D5@6y!4OB0(&J`ddtN+0@uP_Q`(;?S~-#(pf#h)%7-0RSmSQ2~6}fx2}`EY0h9i zd&#O{;*&dOLuQeL04EHvDprEZCV;1rEt5~+_tVY(i-h9XU zlaAQZ+{q4Ip1As_GoG6pkbp88#-^kpdI(Kg8o=#&tW-^&$CSh-1ZU=CBxmP{KJTQ% zE57Rfqf+JRr*-EyD4PE%z+D)a5VUF1@KN!)Hu77hI^>7H7Y8fn3*iY~zt8yV7q?XQ ztJZNZiCL2Zm@jylK~oH1MWZZwZ;Rxdi^!izZFU~3vXPVYWtnN&vOOL0^wLhp9tnBA z-@Jd>fmbMiY&Sn*DJAPK1>?&B^*1%ynE@{VJh zVn|_7?*8>{L7bOffwx0t7OY~}k{Ye?ZOdw>6}|{=)f~am zi;`faA_INCmN&Wv*UEe4mqJs#o z)Jva}0)y4=z_pZ9=i88Ffp({DI$Ac1@5!-dPgH=PxY^mUh5%P>X5q`3)SVDOcUcQM zQEcccAHd8AM^Hjbr4iIn|8GS-+h%pFiGx6?DZ7kqK=1Gk@@-U(LNMOSV{2)JBz4IBS zd9~%UEo%CW4>?BoZ16g4y0gC2*U)oio1tta5SRuEyv1Dho2S0{UU4puLIU>&te_n< z0zVIYw?is)N9F?fz!&fxBmnL*0k5{mANS6~0DfvV`N)v!5>Ajj!sYdc!D^qKPw<2L2VgDSmUQ&21}#<#|$s%1wW6%2I4C8k&9$~CQLC5d;gXkJ3^e< z3`6#1+%;i8ky%ogP3(udi4dVIi~=AkOsh~CFG`Y$MSqs_{vC$bU$%zFlR{XYgn(Oo z3wI-+R_a9u=(^-}7r)z|2{_En*Ji_Es;q_QmGXhdnyhz}CH%Cyj}wc;quTOXd@%&P zK9(PxcXsj2h#IF%ihur9gXWq!7e1_pHGjKKh{Nvewn(LD4P^sh_~*2oD`BNY1Q7W8 zf%6R{lM6_a>d)jb{511_S&lCA-8$Sub4K?l5bma6n9%N}aK@=ClizUEp3O=(B<)v^ z&y<_Ylfb{8^8WKZ8^<;4!3wG@nzmiHa6ip!Pc9zpIWj@CwY7YTLd>K}o2qrY)RM16 zAJu#8V%H=?BG33q**Rzj=>Z+XtRNERj@Bo1&!?aExE}mNx zf2N~@WZmK36$^+jqjCvx>w(=K4fho;Sh6%u(-SUHUPu+!r)hB3`VZD0b%hZ7=7zSU zxM5u2QA`VQ9^Qf{=FRbPZF}Rd?iBW zFI~5va#fxgrj%8`EEk0DGiBPo>_CRSw!5m0Q=yNbfLGxNmOhFn;%?kZ$nZ_G)7zEo zG@(uan)Ln2%`ym(Ijy}I*jXlcluS%7KCh{&k9YF^N@1IVr|Y1K_$~%d=I_rWxsa%S zypYs3P#OtCL%KL>YGrM>hxaEY@i z5Q-?~b0cYn=yQa+l6r!$0%tMVj3V0*(thdC^z3l)ZzxK;*yCx>EK-&r3;TK593u82 z@IrBziBkUJvxH(y7uMA8-sqT}Zl^D1Y^d~~x?8%Mlq@R1TGcAbeXj_jNLyq;+S-9nF0kD9YW0|CVg zSajB*;@-rwJnFFXBBcg!&hgy-F%o>tDh2bqo$5SW+j*Hb*c-HUj|s_<#UK?iAcq)+ z{~1$1mA18F*y?PXT>*s=WSw%TBkO@mGrZJhWL~{>zg?mTP0$7k9>2&(;gke0mHXUM z!;0^AG!`gNmRsTHDd7XDNz!y`OT?#M?|hrNVaATjv>AMAs&`rVn*QB-U#emvm-1^C z(1|+}705&ZFiM|ytiW&kl%PX<9rEjB*k(O@CeZJQL9br>+KRYeR zC}CB0Pwn52(##X5t zEtkW9oHj?0?xcTVnJ|Sw=!RYKAc6W;tmmB><%k%En;S}+xzO~z0_Xq{kDJx3 zzK`^_)@zj8AQ)z02(JU}9yv2^|JHj|tB!BzI9T^4pE$LXn=ju|LD3@c?}ph={QH;C zd{ji8r~GWTrOY8mV|C1oGt2^pDwnZ-Igx)`4BB#d_i!$i(}8MOnQ0sF;L513t&!zq z%pvGrzz!_Af+64UBueq(%JLVG=#Y)`DQoEPR0h=`QB5pGAv=t|YOm}G@f+19OhPPY zsuKtnBL52j{I{g+@cUaM+TN(;hI9o0i?_w&X~sn0YglUsUc*jL)tKZ!Zag15iu-03Z=g<6BlBnmukufTDx_uT%tNr>1nwy}k{ep`W z>61-Zyb~QxgXmK8cYBAo=o3$(*4~XRGTx{xxuUfnulQ+h z2Y~Zo&+8G3=WD8jnOzS?n`}uv9lif*F3jaaGjC5~$S!P?hzLb+P$IoY5Lw8G`wa-4 z^T;S)RUK6n*c##?A|)9GBPcju`G>P%;EP^arg_f}O$1AA)0~p5a&;z1I-4sDA}K%) zEzW-=^t~}HLs|o0zlAS@vqB7&yzRG>=Vhv_Dp0GC@bxFr%{&BFE2}>(@Xeaf^cHc3 zBX$k{J(a_p42jcCIRn;p*B**0dG#8MGb9k;JC%=1ibjCWn1YhEc9Q`a4%`q(IYr`4 zdn*bBWV@&I1pFM$3$S7coXL7%(ax4ocKietebIc!ZiD}bZEUSdv}^i^QzsBZ;}lU9 zhDFKy>)X}u6-%5_NWBURMI8!fwms>nFEAf5~7npOazwhkB z^H~i{#+ZU5B>xW~omIy(gM9N>PTP{NV&ga9ZVCcAA|IKL`Jp7gYKV}tBbagUR${SL zJhT-IV`JluYCezf%O2>>r*CpFC(gY)_Nlqr;2*;(R=V5*} z$JT@imL$}VlfgzG@lelN4~i-Qwr$APk0&;~l+H6Hn&+do*T}+K2AJ65b!2-UIO?y< z-(!bUR1FXSbyBI4i{o+4=={xsWRqU}y&dDS4^2y0#i|$6&u^FBvS+b5IpndyMV{G>H(a z0amk>@704=Z~xTTCswHP{MG>W#dgaKv?*+=i`lF>jj(GThJpn%H@=}3aETC@+y8oH z2lB0cw95;?{k8GL{oPza8WYvvtm2a=2xWz|x_z#5n~?CgW#j;O`mr%2Ea>s&1;Z~# zbKsVJ+u$EUXCNeWMZKdl@qKGnxD2HxcY27_q;k-RCpx#%C#Mh;u2+tMG8Ct(ULj!< zj;;99cvy!>)ac3_8}a?b8&M)9v!vGg8@F`j0E@O!Hbn8w+7(>X#i?Kw9&vvn?9eUX z>8S@z#fm6+VD7&_0XDDVen*SG1ZA>#tfTGn(QSj@h17UP6rE;Mrh6Zhz-SOqjbUT3 zz(l+ge?^lnYX-%fk70_Kw~j*|xdz2wm}DNkA0o4h&g@F-5fdHcF~djlB=-&18?*=u zc&SG2dUg+ek&@7;$#Iy8*yNsb_xpO=zz{$^@RyYdU4Ih4W1ZU+!AY(~P+CosqTHs8 zN2uOEblWt>2sz$XjhZyFrVekAEba={O& zD2rG-&WUJhibyh(h$2%cmL!b8=@0>?F8Ej)R#rvHi$nGTNJ0O81$1${j-wkCwbh=b zHmkYZ3;rQ=6oPo9uxyR3zh!Qh+xG!GG=Z4XppM^bBevw&`!373SbgUy;oR}Vd1j<> zvE5hZIPInkuH*lh1kzp_p7py1urv>TmTMNV5KT(6`s)qPpA=W19rY|b;glsttb;}- z0}E#!>!yU?vC{~_o4z=W^Md7zy}m=+9AGB@6|_03>7+7)<9|pwWnf%*g9(S;GqzgjD}T6)MMD~v7&tb=Ti787-W9zV^d|57Bgt#1 z=adGZK>jzbcmkT-LVR3Xcs^hm3RO^*cBDWYN&*tZ2|#rNfv+FZ;>qoisK)^#wgZ!}pDE zZ&HcvvYQ^qS5xuNJ~ssyoi><-WKnQL|Ne047h=xqs`ZDWzbDNTWB?e!noEI=fuonh zNzsn;O7#Da&^iDoeC6XATKJ-7_lKuK1HW=ih zDTtcVION(L$xzuVIt9uP<;f_&+;o4aQ^urYixho3QBfsOy!+wlBpbsziulVk$95Sn zJ94*v00NaQhp$+CIfUFI(~GYhn{zzxZ_K@X*E`XM29Pjg>gdQXlz1Qg7t$#>6g%-6 z1B!;m&&G=ylpC5%zFP7qbI>=hsB*d#QQ(f|))U>n8GQ1K6pM4zV=G&9kdg0xF za-3_R_N0FxTaQl^C=`WggMfxgUz~y_`;$~M(_8MM$si&m8Zt`dP)UmRjlS8G3W3yi zs()$q9nZ9s#b%;F!8IBCw_w*Gb+iC{;v%{dS%y_==zqtz>La@VFyPC{envF2(E%i;yP^EoQ z%QbBRW#^h{@2cupxL|)vPiZ*0;qrrCb$`p+X)lhCjw7nE2rC0JcOCx}jh&yR0bA(8 zwoX*;(4Y)I_aJqW?HG0nL5wU-^@RnD8g1GY> z)(W{UK?jcO2W~Kc<3kxeSN^!2SD1vLnLz|5WT(LcAw>Qy+r)8)KYtoK@6UX3bHiR z3sf}!5Q1~tWQrQj{01%7Xp`F?5c4r7Nwq55^isp$*g>-dm1j}SSQxTGmm&SXaps;{ zk^`wq!z!X2{Nhc5Dp%{S>83(K+AT=R$_NyqVY;Yo+={A!gspM-I~UK*5e^e`6N_7N z0*9~g)eo%{q!#6!QG#GOv9KPMLO!aYmTmYAO?9eZ;3?0HWWKfv#EcjM0;zz|#EaXP zj5xZ|m}N2y>cXYBkPIqEs!V0nL~CMV zOZ0z9_8hdV9-u=LR!HG9EWOYdIIaGm%vLu%7<({k3wR*S9g~y+?`K> z`P`m0lT7W|TM2(TBLySZMH=)8_wV|&%7XqXpyQIe_C94-xr3x6Nb`8O^Ty-fw|!p5@=D; zJDticGs62G)(Ixu@=Vw=#@RRaV2@} zPyf3G?+AocDypjHURI=P`-4OLEG231grk1Y51sjo-TwDy&=hiM2JB zG($N?TQ*Tw!6ERKPNSZIRhTFVeZ>LijLC-(w#eW2W&0e1s_fk~?>#EYaoGny{tXy5 zBBXP=G>zXp)aU-DAIgb35r@ZZq*QXia?AtT@~tYHvmCS8B_>*M(aC+&n=hx!!f?Q< zt_6ANZzy*2)-TdH)IGd^KuH5Xk=JNba2BVK=UpPBQ>6=3$k4r3yWtA^kj!c;MkUm_0N5ksD=3vLg_N>FsE3>Mn;>wi zE#)%mn9Jm0H84=W0~MgxA4?-z;9*{uxDb7!N*9vYRXAC6mHUQ>n(cyYo0~% z8BUSzYh;F-hZ?G4IGm0WtESXxiopE)e5G!Kaot)Ap1d{>-1b3K_OP9~$^|iCdf?vU zJ(yKA-@nE>)wuivBIC(guJ3Q^G}a0ZJ%e)Fc!W@4^g&P@2Y|-pYKVhxZf4xp@$QE? z;?UXr7b*e7=+{anp? z`8_)#cTsVJKK=}gfBBW1PcbT>5#Q{j&hBL=OW{b1!e^B8QWuS2t!QitDOO%2SBl0h zroS0fj`Lu1{F01wnClcfVoF5yUl=3JXP|i{NQ{iENCElhca;H-+KSAT*Nf&%OWOoy z_##v_|2v1^Vk||?6ZVLd_l(EtR+rB4FO*Vs%$@krUn+xRS&hTWR>Dm6IX^RJmhmvh zF(^U_$jXR0tUasj;ExH_TSOQGkZ}b znVV;o@?_@egWNlYzO*J|}t|6t++{aH%Suxur>i z(sccHQz4{^2GmhEKqll_toGJc-6jJez5+7cNvZt8jXV^KR8zqjF@jjEz*PN|Zec)v zBd2RAWB@sldeQ%u41KsyBA3~OJ2I41np`I`IW2}ArOSKMBBDezWO8I4S>^8xI!>G9 zV^Y*P3x&{ds`X(t;Xmt88bI^W#?Y{L4|61KBuD2A1u=Jm2e$iAby=9hrC{gGqjvHi z5rVej6J9@?M5q0ZcKLc7^R?h=PqDu>9gR$J)j z;x`$wpuXa@{732ipFf{A{#XQk`(^t*<;c{BGM7+Vg~PP+)?!t6iI40LF#iLh>>}Ar zq){Ig9`=6-5!`VuIpaEQy;JxhF5#4!puQ|tC($AkM!^h?ArYhsWDUPOF{$6?bIDeb z9$Z03u0gD3^(wDdWd>GW-Icz)T$+Z( z-3yKlDB?Uz24i@h0)hM#N5f!q z2#0kk&M#qCzidhBk|Rt5FC1s+K9Sf(xq6Lti3VivQ`fsE-}r2K=P3bXbS%M{yhIl|3oaK-i>a`m}x=TLF&c}GC>fBrT-$4cxtxa_% z?mkK&y8lB6359dkSv0)K-1hi2oKu)yIf4(P>aN0btKm8YHU48pHRKwq(MIE@pE4xK zpo1S#xv%2@nIM&z5)rkHyb+r2PpT&dRqXIPi+uBuMd((k`4Xo=(v)fn$IULe_@JhA z6d?Ozm8jT?2&wM7*uCjmCi7cr-Faa=idJmA&XDqV{=!?EVkJ}2iNQ_@9|e)w+uQ^X zb#Sq>B9_1CQ30*YWChOciDhELhMc~mW5g<%+kE`JhPiPI)iXHAwcwdv?GgvoFN4Im zzK?MRK?1ZHvjz<0wF2=hWeGTqvtCXu^G2%rg7JLS$onqnISd5WjmVZ#83*5lD0f*t zZMKET%or5!s1go0fcs3H!Jv1UM7+=<+WuC=*Dv9Zp`V)Vqr3a0?U^^F`=-_B**3^gnXpqub_^A#XYHwp zmjsV<^G&RRuAr9ULR!uBquUe*aueGT9n|{(`=}^4U7Brm`$eHmXd8J_@3e-j?h+2Ot-ig>-C@nr=*F}F0vmQYd{Bs8 zBXZ*~Sy57In?H7`dkXW~#(t{3w3C! z=?0;Rc6KK~S!(~5d5|xv&n|D4tT-+cq3_Z&*}uqXwLBW@IJ-#tXGx54=}X9N@~@@= z_b$_qk$7s;f!-e3No(z7Z1EAkDC@MZG=(&KJ^G6nQCD;`&DAA_kn=uQr`mqna5qkr zg1LRu=Hw|GL-N4nKnjr^o+IxNkv&N0ZRd(B`SLzwez2+pLF*2c0-Y{XGm(~sal*z! zG;}{%ouAwxbU17)yg$fzQ%s_f$-5GDdd? z0-XkplC_hPXMtXCmIV;~mC4x5nHZ&iyei@BS=y&F-s`^2U?_5eJmt?iQJs4+I{&6> zwminj`WHgSA&4)Wi+i{(`BqGDYP&WOeif-uU0?Z7NUj|;r@>+4bYe%dbx5I68v8>r zDT0YHBIJ}*R=t8g9=9V|{3Mt>PsC*}QAG#o?g*-~mGhd<9xpw6mVJ<-ch}5*de{Zc zq($lk$ZTp8tJ#5_9-F_coqiX@+^b#0 z@?kkA3B~BGfS6@@#cNEhf-}q&vml~1l3D{wB7oEeD)U=PPWi|iqoVO8dPP^Jnt4^- z1`}?;Z=Pr6bFw6i5)nUyP+RmEMog`n@`I})S{I7NL~dV93cgfCrVS!gNwkoa}}Z9Z63s?1uQnPNsP3)J!J$$&{KeWHgP#;Nph?-@zTbmX{mdqR=~ zL$lpyrk%&m*DeHB#(Fw7TxX7A6iF3@cp}5qyEbxVmuh6z%LX>^%d99L3!KzU*?6r9 zE^%54l>77z^B84+;QT@?)$UV%T~xeZpdN_L;P5ruB&oDfPr_bRx8w+4{J}T$waJzf(^4^tNXa3W8rIlyd@k$Q_>d}#lzmTvs{V4edRzs< zYVV8sWB=X{I-RR>0uisbYbtS{6M7n~kM(rfTwC3+J?%`1by9W*=p{@({d&et)?u|LC%2V(~ESg>^^Dz$cn(Pof5)dAY_VWX@&jyN}hcodeOU57O|7ba)UamTn7KCV0yY?K`3DOThQK?w!hEEMLGu7@- zHi)IM?Cz(Md}5?`C+(RmeKj4QvoN!&@^ksG#z%kZQ-qUz^mxF*3#gvo`pRr&R{pXO zI-{zSIR{b&|8A&tOrMYr;hp@bUu;zMehEdHL6||aTp`-;EC)fJ`6nnPiyK|L9A0UK z^Hm0vYf0QPbzvpR@Z6R!*jz3n+S3;kMoZj9N1Iqu$T2A%GJd=cN6O=+fMm11pORpY z!EXmk*2#tuf5pHOZZC_E+#?#TN5B}?4!HYpd-%>DRnh^OJ%NFHiDvH?&fy_5Wjeb> z;zUx0S%CWpiD|GZ>ogxL5hG`(9Z`N(Yly>nIg9$QG?W!HSJ&!;>DnJieU-9^ z#?#>pqKjc|tRrg_wgc?Rweau7E0bz^UoTvrR$rvgfdFvm$y0Un`f&jj0>4N~bwlyK z(TRxjA42c}-cQciATB-2_b3~EA5>$o_$xW;l6Vnfzt~o%Ybw9Pv1-8GZvR2<1iO>1 z#bc0h=!`~m2SOtq!`=zkhapUEDR#`3qqeXioASsZN72rg#0#saznnPIsM{>(gl3r* zZl_|?`U<(a-aYc`a1-Hz_v6`b4t4&_uOSl?>hlo1dWAlyQ;P-40Z;CyL`4!|c?^t>@P;Y=rM+PH2&! zBorciGL=U;RiSCr$`7oZJyK5=V zFTp*UBBZVA5!g`E3=(;UkmuZ4(J7|76LO-z>Dbjr>Khw*8tUQ4rRpebH(hqmUf`no zy}uWCA}*?<`|@dIhymY#Nq5IX-3(Y#Ja$q^wyih5L#lY3|^cPYNEQ2kgl!kEc<(~^D0c+FzQpC63S{J ztT~`oUNW$FFbaS&I2GrK#_HPB+UkCnR~e~&`fl~cq?cWpb`XLUirj*DBQ`krE?ON= zc*T#*X*?RC`K`iwd$}%h){fv2-h{SgkjeD1rFiJQFyo}J>nsk!N9T+B9Kn{2WsQ;q z;HkP{-cWrV2}5Aaf0sx^b(QYA#S;vP@3wZrXQv3G6N6~aqQ{M_zsKU~%&_olNFui1 znMOCZE~QwO(x{-%`Z*@h`fBy@L?=Gp9@b8_v8aNqv35X9qtqI#X3Al3uF^b~;)BQY ziaBz~Q5zmZ|AycImqrkmKFxdEqeR=pG|baJj=CWogwJ2fp&4V+Rl818=K@DxL3lXX z(r!P7x_DABJSI0|A~UTr6${^IFOV&sLOpn|Bh%A9B298P(5z;12znDS@PGF?OpzDn zm^ONlxHIEGHa;YuU%W6`Z5ItH;ZZEWk!BV~refA9Vj)}4kY0N=ClIrKC-BZvR!57V z*l-7gr+dQ)QLe%Gg=(+5a-nI!mAy^zwnR^fLI615+`~q_-4rG%E++2OOPkJ zsrr8`on=&8UAKjU7Kh?m+}+*X-J!T^ixqcwcXxL$1zMoEJ1tV&-3jOB{l?9Y{LIM6 z%-Uz~wby)R5cX+J^ij%ji}9Go@t-JOp68Ebi|EOSmaW0=l=XPEVv7PnaomA-3ijoO zd36JLhCt3urvk|BEm*5>#!{35x2CPWIzcDbDvzo1-~?k*3IrPuv+Zs}2u~Chg<`Pa zhQduUfYF*cuedH_sJwdSA58rh@S-F2E5pD3qAM%TixWgk6$(_pUgV_WVV#2AjGDh* zOyhD4@Tnh$ozQp;|NkmC4LJ_MI0jkoulZY@>mg_La@Gkeiu>lcbVe7JP__U0&{9{#>qM^n;8-xL>7RI|=TOLoSXyJ9a1s>F4VJM+!R4 z?@0_&VKI4DSRGwf9Cy+(UMidt-x@qWx4095OvS3cC$-*O|MTT?@blr8Y^Y%|Z*Dem z+t~!&(&jq(^grM;13iTmCw5+Z-aoZ}#&NJ1ScRTwmMP+|ab&WbatB^ielr_@g+qA% zpoa2?0mb+i>}MopID*@LJcK0~tLz8IUZoV=apM$e9T>lME|QN=u!EWP_(>!m5LM3G zedhNtLGYK0Mp``s!6`Se&?38$uu%H;cjnfTIbHEA8WrOgJFQSQk0fTiNp+*H<-k>dHJ(>JZext$yz z)V&A@JW6^AbqIizMSX$Xw1%{!2{dj-!#481$)-vAFvxAe5*56OL8 zUJlqWN250HmTL}H?Wnlj7$nC2ywABkR=mB}~8SJHC}$+vm#ye?aI^jAZ5v z53_jf;+f9eitqwZra!m!Xcs*z>6knw0+sk?st#Rvtb8VMz*Wd5#ILz(;a6PHpo85zcCivm|Wj?yV(71Vox@l`*qr{ts z7CERk4Z_5uu){{pISplZTXluN|GMr(yw{^)bx2@G-&KBvu5|R{c9ty@2Zg?OFHH9d zvIM#Iz7Zvw!S=a8{ihJbKJ?5oX5pqKW@61CYz`kE$J8rZ$L-!6m1*rC-k}DX{80P| zY@Ek1;~c?}i)kf)BZ)7vem^Z0#<0q@?_c`*VgNPR|$povpUQ98?C z-ILgRwN;1Ep^-InBQPowWmeS!`eRIB$^oi%zZ3ZZm7Ml4SpQF06nWjO*KOlzm(S~z z+il+u5}O|6hAOD;=8{HtjI!K9<0IwIzrx*V!peCnCIo#MeFkBn>DYKR@c4+6zYy#B zF~^BaSg2yReOUinkkv|0L%mxTyi67b<#yPl9Lt=Q^&#ndjiSL~2LXl*obF5WWlx#A z3Y4)r4aZ!M)@iad#e$vp^zD$Nv5&f<<{Y=7J+QO{A|n<9ZP6Dyx*~CwhvgH~q;Pb> zJC)H&cq&VxooXYa3`J3jYkiCD0k5CA%g{7B*ozp#DHYhf_ZJ(|G#lBrRw}X&nAv_d zn$y@D?TBSPU9t$Mocm89hy`zyQu!6xu}TYWt>4Tx6J<1>tbS*AFe0<;I{TAe=k6R( zMTWo|F`yt5T=4aP4yr-KbTH#SGq)q?pq+lA8K=lPmC+6NV*x9NbLF#YT18-cQu^+;`=oZk>%Q|VKn(^^^VoE)8Oa6)oVOWC$ z#LXRk>%M54l&Tr~w__vKGE6=m##!THI3|o!lKJ23pI^O?(F{-*F8$pgvr5RH@*mHI zKc(ysV8;19O0CADIYWTfaG7eB-0IUIr7a0Ukm=h-4H z85I%zun|Mx`6s8#Ku#@f>+RMwI8?!90#}%@+B|-Tu7wvk)#AlYt{SKH@?}V}K&{5v zk*K;TP~&;Cd4ASp?XKmYM!_&p714nzR|22r&;XS2l_MH({w!i%Ud7&HgQa(z4IT&{ z6l2A@ocpI9onu_f)p;hS4vEi*oWPQHX2Jx_?moMm8dhtuO4ACU|9)BQYnyGkIFKv+ z#L|1$H?hf7&Wwk`{hv7nGI9#NOew#slv-pieQa!1*@9x)%Usu|C?0@s)gDE-ibM51 z1U$3Zz zOk>`>P z>eZ5j9RY6*C;xMfYENDWv%^X!1sj`B3J$Y33itV@+?!@4$50%HxLX-!V#XW>w-p&a zzS~}l4n%mCTgwx5s9|G3DS?JwlkWd`f=^bLL0iJe4!lZWT8H+cA_k{rq1X6 zma&(NK8yy&td1Uvr-GW3Y+J{#aD)}*|H|(dYu6ODPJ2=QbWo(jYH`m=5q%+pR|`~E z)1T70AZFYf~Kl(UfA6Am80U2s(h1b_WlZ$7dh&`zv_+4KHx@+)`o zVjqNF%`XAs$zY>WT8CNwPa((!9*AY~l+h8xJp957RbID1f ziT*jn(q0HLdwg3GgyrLueVa^>J|@wIE8y#@suxF9oWO5j=DTth_-E-=fB@LMJOUvx zQ#Vn?Q^0(8)K*z6*3jP+_!!kp9+#aixBcxG*ihteJjLcWo6ltrXMT)|E_Zr1$dQf9 zvH3h>5ghU2GWY%)B;{NejS)+PKF#&jQ5~*@#p#8+@fYVn8z?Fyq>q@KO^p+A;3fm# z0FAQkuYPhmG){ZAe>H{li1xmHSUC;zgvvgTRdbejrkze~#kWPxA!vEU4UU@b-}?Db zaJ}7alC<2;4;ru~F`uWT#{9r24u3jyg*iTdf}&zAZlg-!`L@!8+p_zeURP9BLzGd> zfI?M?5}hI-&nMnWsq}}4AuswwbFr|415u%P(IN1@x!QN#^S20=5;PNmZli#S#U836 zT(Q~_%d|0^Gc2&L3>Q=TKXVGi3I3OI{FKSC$;i@sOGu#^S#OZBuH8M@AJ5V$i$Rah zqmJo)#d~2hNX{$|OMGNqe9jAd%7AF(ud_1WC2=rK&K@aICwv9g9Oma~O@&V+4GcD3 zMu^Nv`gsZAoRLo!%Fs%X zkeyOabf$6Kp@(u|`;6(O;ka^CBYGw28anw)!Z{66ssrn@>S!i(`s`mBJV(WL=_{y7 zbu=#ym{)TaqT)Xr9evBN9me~wh9Ki|Oq7Zas)k}#mUhNwkm!-``HXe!^-#WeR@}ig zVohHpV9u1hNhGTaMFz2H5cPS|3@OEMm0*Smy@Hr=>L5AmP4~z_m~s4mFj;qqDa~A7 znFt~|(LFZ(NbLn$w)H&nc_e}7>}&i}e15ci-v%Q=v7DL6u*8mWRo(22#l(GQ0o zE-$I)VL>1mIznd|+Z)&7mJt$+JzhsbO-gBfAyGl%_<_d;3z=ff&EL~YVD)r~%6 z*@kQI=lw?%Np1_uc2kRU_!_M=h*;O!U&)x{QH#jJ8is!ucTC^4N!u45Bt*O}G2Qk3SAis;^ahdcahJ zCmQ)Z)Exz^6!rW19lk$Y6DoTO{!<8YV!dNmdS%w?`fTalC^X)T{3n-^8MQ^+AG1<8 zvYu-vAD!)+(RQ-Lz^#c3@B-lokD>>{M z1&YXfX5f5lSBGlb`H^X+ZaK!~V#{0=Nx--5n3 zwZi3~(3$89%+HVhUJt8-WCDqy3>W0J$Q(cu`iVAy8^$(9E07-TtaWqQme=3ip#U-&#A5R8DZ@+mU$ta;?xjG09SDyWjb4vr71nZ&9% zGQCTr8C`HvwC9qbe5XCu+czg?zS-^F`!*i#gLE$Wh3U1q6pihoy(sV_Jg89fk@Lr+ zqPz;n8}C{#-nRxQc`KYcdP!s|g25~NnLCDxCz-jibj)y(DI(?3WqCR7UOvsy=j%z< z@oHb26TKomar_AmT2ar~?~NJXjHemp!|iDT+`nf>+h!pl{8RW%IKwNKmWYJRQ7k8@ zb8nN|YH3%#7p$@av+STLE(JC+xNO~VZvCfvA47%{0ix#Gm636M6HDgWcF)=KuLE6^ zeI;4B&z0WKA6SW?Gn}q(zLu|ef4jJ3ZXJwAY3|n)$bGW!s)`T*g)zRd|tY~ zZhWV1xu$ig5bk5Irk%P!qE3>J(lohv(V@qgqZOegj#hU#``ptfD|)Q8#Nj~|)GOqz zE7?wMZy(o_p*N;&<88qiwjc^gtp&mOi>RKm3aR;oAA?0%{m{O@iq;9K50*95 zT&HKS)Z<`$y}LY@)Rry1$(~a^IZ`JoJ$k5-qoEjsmod!@oNbQbs+bqpSm3>(C<>Mz z#$Hc$n}UwkU0kDZ=)0-8sqD|GDiSAv)s|H|gIiF*wqv$&xyK6Lp3^<;Q(g>bOD3eK zM@o}ZQ_g#3;GYtRbg}T#5hGw3>w8Y4xukL6Y%qiGrSU%o_iKdr-&hwjp^1@$=#`#4 zG1_BP+O_+YrR(^#!i!m5gP z#jeOCVEO4mPT4t3mS98W_Ku3P2QUHy3DD)oycUfQ|)suYI}cd)xT2@TiL80Ix#`mTL8{{aRz#;^?LGLqMhiZ_>Bs!SY{dnei0i8HYuIn z$!Fk07KM4hh=J`7X$)I+{gxfK`~MV*fQE@Tp8mz9UF5G=Lt;-`gG%is8n4tfxXw~} zojyQZIJvZqXXUGZ`g_|xSM0tH%lKL=cHV*`(w*vMdE;S4d&nwK>MOx^9{&DF^#dFcX( zlbMplv)9ViOW_l2l7;Au`KK3qC{dM-T9nv`GdjZx!d)7HkTgjo*1WwfP;hG~F#1w6QbF+X+uNe$rmp8GLlDLjc8dz-o?(&#svlz;k9>*iZMGWKnGCYT3d6G@e)>m z@X+#mL@mB8p-93Gz97H4i*EzK`4uV;?nSEsz+K!p%9&@%I!MHD!tt)RaU}UM{ zuCB$8&wnca+FgUJAe6DBjCD5_gq|b-X*V&uXVlKxS85p{N#j&IWa>*%GPL)U$&##* z$|I{biY0;jL{CiK6#)X?Eepeb%(3o^@1NM9$xuMz@?FD=w;{*P75Uv#|Ewb79)DsT zhc`I(XHNaoZlFg_YQoa!LE@S==J_578}}akr0TFQSZ1n${D; zHN%?qiOYO55CpW1q+!0<7^Y@l7J&gHksg0^?3R+PxTWA#M&{4l@T*eT=NrV|ewuJO zk04JmEV}a`>X{LRNcHzq-rwVi>D0B1(jv-Ip6t&#g(oKR6vTK;YYx?vR(uyu_WshB zJ>0h)7JLKjR320w+JA9=n7P?J7TfeF7ni~HEL1u^!poHe+n~p*ETpIFaPYUyOA9F2 z7K^HyI|pKAsNX1av8I<}egHze+tdE5Aq$)WR#_09GYc>h2O z022)}QeZ)U{_bcUg!HlZxdZ$mC`=6Hc47j~u3gT>Nv-<%Q(furo2v_RSO8?A(^fp& zn!YH|l?t)&=)=gjWyFR)j}$rl1kb1GEuGw!>~=Z}C`lga`PN0ti`yTr!JiM&$yM!f z>%{Y%GdTV61T4l0xm#Fz&wHtB~gj-A21mx@VYKINP8Q(s~-*Q%uoYmzNFTWzKzMUSK9> z1!rsjtsiU^1*t-4)aDXQ0u%!>HKpLTs7B0(&lP+A+i%DFrb^~_(}rHg+pF>5EjWFP zKNK8x2{w6OzGq{9ptVudkjN_{l*)@1+GfG_#m&XHtRHQ4nu?(RB)j1$cJ-ik zeLKx}XN4*;Us@IP0bW~bjx6#;2|FbqbofX1uL=3O!>UgK7OK-r+IKQ9q7Z%@a4EV4 zOo3l*)<=rb48zz@5z3UrB4X*%j?twslTYFG-VzwJixW@~XfU~zA}@$P$1ELp5eRy< ziN!ccjJ1iUx1qWpwfqK#uFw1PiRQV`-yf%b+MT7vI;2oCQC>Ig;HFzPf%&t!W8&W?#f;YeYfnU2-iAAX?^CY#R28$Yat zmdL9vU$3jz`Qg^Lr&-XFutf8fWt0=~B*r0z&~eIcJy$V3y6$*7c};W0|Mh61ZF4B#Ha$sAYccauq+3jIfde;R z9MjI!99{Hf)tLO{=ZQ<;0ZQItKG!Em$nu}WZi&ypfKA_P;7$M5d*Ho=iLsXZx55|d z3vVPz$BrZh1jC;X9FgC+a+HNgWms~wVp+fQ`JLnlsd8TbHn1iE@0lg4*pR-?q~wmc z|H{Xk%g?p&FQDdr|68b^3gUQqpmw=)sSWH1foN0L_#C(x|5~{r7Vg|{nQyDZp+&7X z+p_f=5shtWwkSR4MMoa9O`zkyaXFMA=o8}&UvvR?)6-mL;qv;}IeJY<=TzOFnu+T+ zV5Bd(7)1Qix33@YtqrdVl4V-V54;5Ka8+oqDi8_mD@rvo$Vy>Ui~^A`1h!S3>7dQ zDgRglYoW0)dWAj6=|Y+K(UeWal~c6XOP7E3PJj&h-_!w>v`A#(&lBGA>921dumkGt zE{tZ6Pp{8^6QSYKm6P&d3n1po_rgXnH*Dam<(mdP<7L1S0?CUGIxw4Wt^cu1o*rRD z7GX3Ov8Wgd1kJYbfh}(SKODLxDhT=!QbLC;#JcnfZ=!JN@%pstXLMcSs)Y_$w=oBP zb|N%C4OMU33Ymx^RCOvQGm&<6lwg{Czk>sZC`F3)dKy$30D=j?u=! zAMOFsMu=3vi<;@ex+u4ASqnju_!mVaeD|O{%Ca1M7g^e$hn>~jDjfq}mOUyrfuBYE z&ZKQlerShhl5KAE4E_FGi1jmmE~xi>(U$0ujR;Ki&Rspi`sxhg*?M@~Ie>(qpq$Xb zfuJBy2MxvLIPyZ)mPfv(a8b1mvHOYUgr8eJ$uY^-82fi^FV5hA+b3w_@U>pa@R5Uj z^7i$^>c4Q3=6sdiAR*6-8kS}Jzu896N=a(kt|lfMo>O! zjhdPzMF@p;4z`Ns%94AkO49i-f*}t*anA8{QvV2sXbJlFQFmKJOeX{}yvp9xG~91j z!4GVAvNE-zaBu%fS27#LA_)3%J4K!0qn@;5aQ5t0s&>Lvn{p&R43dH?-SkKF|Tl-RpnBa|o7L!Vao~@<1+l#+;WfTPD zxVvd7Iq(M3LBGFB>Q9&p&M9}M@O{E}+;DUn_6+k2ykbuMRJn+{{DwxcGq{!dQWXrR z@Fnumneo-bq+Xecr)L-QXFK*U?q@XZH{M~vc=zc0qCMgu5UiNJqK$upz&Vsc>eO^X znI(^!0wWQ=emW8xgUY{+{cxsd&P|%9xasU2Z?xpi(9G_#~9IJxZ(ZAq*nrctU zH$B@tn>w61MQJEUDE2sqJI(x{hU>%MlByiu3TY>8!GA%vDktQ-DOA^2i7mbEFipEL zlYcx{n4z8#epwnc3FV3W*YuGh2SInTtlJ$LacM+OC<~ni#0(mmRko=os}M6o6jD&+ zyw{{W1gw5>D#G=c}0{X#yMn$FN z&#;P)k`mWHXNS+HLndx|I3ix^N{#)S6$^@z5m5O@Y3f2ND90YWL4|9jhl8)15UrqW{ZKeJ^$Dp zB{Qa;GK6r|X&0~kJU0JNA)X-UDi!_Xb4GXdgIrX-ZY9 zaofzifu2)L>5T@?5@)}A-NO2$ej!ifUn_hZeYbf5R!Yjsg5aoG(sa`kKJc$@4kU5K zS^Uy&~_AqT*SlJiIectcZo!l9bSDVlmMI}f2yB|DP-1>6**@kxv$aZ z)RCqmRR3d6rcA14&zu3nGmHH$-1F}KVH(A~G~wI%CPwS2E89aI2>c!9XWep^Nm4NE zJjKpVxRKzFCZf@jjKW6zf}(m$f{v}O?OlmsH*&_9auoRXr!@B;YPFQOG|4j#XZNOd zV9wRx!JkszhsV?RLoj6YD@95L5`xE|W%pzZ(nOqyH;dtmiJMc6+>ssBV61n4ApXZ+ zfAhJKAHra64}<*R2=093XpM&=M$+yxP@^HIS-ez1RsfqUcKSJ4=E8*|fIcYXp2hM$A?m_1@ zDgDHAX$}7LSm*UZ0K6N&$Yw8qk=o7m?LH8%p9Q2P%Rh5mBBhQ0jHdt4ctZej7f$3N zK#|ijjx{aP{EXm~jn@CS6ZVN}lajaV3HRUzjF9JmI_8$y=)E0VqyOPG5CBvkz>0y9 z9mZV-QMP1y&?@3}a#G3PzviVosM;9YzC~H8%rIEl?`q}MRi&RkCm-K#mNb^h zfA}Hkq4}_P8+c8sVFIWERsAinv^2Xs1;EcA$D*h2*a%b_8k)%-#E@$uXJ1TImK#63 z3Fl(-FLtFqPr|9g<6Q8n|0!ezAqA@#9G?krX^d|;375ph=;a&A);*}JQ=C9TA4V_S z2MJ_Q<+L28%{Cun6>VQ%!dFgM91EEX^4^W_-;#Y6&U+G&Uhj*aDqCF|$^Y}AN}KGM zikS(JS6BUh7X8Sf`SAx41V0mbkw91Yz@P8;+jXc&xrJ!IH8jBYcKGWk*>8`?VU}l@ z_0-O1aNrZC1C|Sz);XczTNc7wKf~pTlo;eQ5qZ`F3@V$Xx>bxoIH~2>lsif%r$y|c zzoo}f>80Jutmzta)fBZ|bAvULwEB>5VHZgS|G?uliuWFmdG`eZwhAT!3C|)woj)-3 z)^9DJP0(Nvn&B}Qxib)YMT48I7977&b*#*+bOse$Z#V~@pYH=c6Mw|;W7?Fu4y&gO zv;!Ec0CIg{;okeZXQ2HjYhC^O+wc0r)o6xtn<3p8flD);ArRz?(g4@XM4CUT=C3k? zn76aa!Q<61nvP+u;ugpW$?q=ec7mA&v7O z@N}i3Boo*UHH#7hfr3*#BI8B~5Ejh1(+0G2rNQqe9XMY6ab9K9R=+l_jn40V^Tp{7 zR0}14`OR9`!>T4?`lEPtV*M!VeeM&Htd2aUzMcmA=)C22ytp5+)kDo%Ju#`3HB3LTg7TF6pq1%RC zhHdU#1p9$^m%F!OtUw_D{b>RyDS;IOA-XX>Gy0b${n6W3)5@kWU4rWi3w5_g2(xCp z(ErK2y}LuXAlzJ9{+~kWAn0`n;NXnwlU z!l>hR3Dd9%D;U6DPtO01`5l=`{Omy8Jh%m{V2~w&;JEhM(M4L!5?>el^Ac~au#`mped z%O^_E2NEDk40qCxNpjCnO5bc0$vOJu9Ruhb1GrJ0sno|wcc&Nt-V3cr=Rg0b<5|R2 zg(b)gQjh24?y{EGeuzk3zQ=U<-2-Z36rjZvQjDqx^(Sv7{EI77h_|}67_NJA z_TptvtZdM&SyWjXN)nPWG!Nx*nz4 zonFvjt;^UvgQf>LW>TN+P_u${B1`hAihrBE6snqxsSWk^MDzsc<%l-^uo=0>8%z6A zdTTQEk_jAbspev;PiG!EQ?3MIfW&F?s}78li%s7=Gu@`tvk?41_q2>(lP@{OSIq{h zTE6~C`|?8Fw~j1CX(T!cHF@ya=SXm?q(#*PuM@a)F&PH{-b?mPr`SZoY0t;K!WR4@ z=sLvb-L@=~tRdJfM7X+e3 zmK82AviD!*Tc8h)1l{8M%n~Uu`v3+=Nz;0&wjtRGjMTJKBOn(POuSGis;9A#gGi=C zl(rCevON9SfayJ`qUDfJVjG2Brkq#eSgTc)vxusNKEWgyP96b6Hz~Qq#Y%^ymnr{H zS(m-;?p^U9gIxc$*NGota}z!%NR`h(Z{E|4EXPC?d^3`7^FZO6ZuuodqLuW!eCfqN zZ=_5oP?9?rAcxeDBxA7h=@#f?Um=M%Y;97tN$M((jP%~6=-kbqE$Z3j`Gz73c8C}fN1C5 zTk$0@3II|7SQNmLo*jU~%=2V|=SD!VMpB!^92;u5RH0u{kVgIz7xxR#A=f1En;`Ss zD$~;SO$wJCx5@$*u<$YzI5cxcL14v_xM)$iD-H=Yt^K$B&~k?hycM4MONVPMLV({F z8b5=n{y6V5ttXUVfl;Hdu-r_d9~SjPxQbfnJgfi2e@b1sUGDGnQm^>?*XKZTLS&uf zLgY)Q78-#lA47DkgiuxM;aut@@5_GdLpv`d-wAR)2Y`2AocB(t zXqL<8CM@6keK9ZxukhvG130RG>7Vp`Xa*RZ9+O?;PfpQVT|9e1QczI5%r?%J@}Lrl z&JzI!-saqiaxRx1GlkrrSQW@MF>=Tx`EUwYW})XL9eBUaUy;LD#U|MLi=tkV!y{@v zuicV5yD_qs-(6%~@=rp^6YE7aa9?tqGicC08Qj$mZZQ$5UM}S;H>p!+ETnZfKMc!+ zmCNRQt#VMaYP=}9vB}IZPPfe);}uF2lIIyk_N*p7W1t>(k*>zpedi9m#D#D7Afjfx zW3uYOfODCO`q3E3yJN*WeeOX2p31HnC8}-Bx@#;zd#&YiHXDwaB?p_=d1k$kDPfn3 z2}(v$VCXPlK>2#&|LI@0fJIAvVna@Jj9av1KnZH4XdbFnrK73CgzdW|qf-Vm;PfJT z;5KFRNq~dmDE7bS&~6ZPiSp|4u_l+E0wakqgIC-+x3&_y>c>2my8H0-_nv|Ymtzyi zAEcT`onF$3tL*E}I*@KwE%J=dNr-uX1&up&`1a}dqB@;QR#n#6$gV`wot7@Ytlvth zPXccf1@2~Sjk~wZJq4E6!{keXN#-1MazBwFhwN<1S((&X?cFW#Qf--q2}SR?6?vrq zX?#t@Kmr}Kf*du|5A=C^&(<;NL>6CT4b>vnP-B~cihLE|km zzm)J~0%PkUe$iuL35-VaBk{mq@t7?me>^{(?X2|9@>k(6pA+jK1Ttu%jBRb1q8EI- zm`Lvlf>(pum;p>#v=T~kzY&Mzz5Yr14@TCDtpA*>zo_#6GF5QG%m4(ZFCsW0kRDaH#+B*t;OSyWcFs@GT4lt)%yjLa=|)7Ujo zAd-`-O+}U1Kh>joH>fCaDi5PLw`ASd*~=MLdD$2ON0*4J7^>eaQh#q~M9jV@x%>I^ zUSJXVMDZL(v@>sv73+MNO5b?gssg4qLns#u-#?iwv=oWEQob07;UkhosB6$AaU$Zw zee^Uz)NCXC;%Ssg_Qo)hHI!@HaIZlGeV^nbOHh-Ra-i5hb-FW*;|N7DTHisdY&aw9 zmrxSek^gCPOTh57)U}XJGOwYMj>~Z$4V|v~=g;jx#XfGSL8UZ&=Cr&5`=yvF#lu5| z(ea$VWutBGO7KW8Q<4Qq13UUy-mG~7r&e&6C3AMmX0gMvJJ>FZFz+jw2PHlc!>-(o z{TNqvw#fXhhsklJn|`hIEphvmoU(C8oAZdDcsTZs9NUJ|xM`Q$A6~amzG^)c!l@58 z_&&~Fk3mbA0O_vcuNt(mU#T#R>+_}XKhPB0D`9>!z}lY{HQU=;I+Z%;7A2%CB_5qE z{F{)@@UPZ2{mcA6?h-_y4CR&MQ-MXp=$<#B{iQg6Nt4PtI+e{dB~> z<3MRxkQf=XVZxGpYS1#lj4P8R)jz+0W?3b|J_Eo#8RO7bNlcETiT5F!g^24Viz&b<0icDU< zV<#lcvKqOPDU!fd-~H?6M^UK>*%iMSJ-3H(wk#z6;wqV`X&esRYn?J~x$5^XJixS6 zRp=VjTSJmmDI=#eM-uf$t51?aH1i8uYNnN(^VwxD_!|gvrK&uZ8u~>#Qz?I!oJgpZ z$u{-#PXloG_TswHzXjHHGUR-;Us5?yY3byn13Pq*&*u{Jy9@Z(2{jGg^|G^_r<7=! ze;f{-`cB4A?i2I<1C5GL8~1ltDjJ^xUjh4Q%oJGYypA)9h{}&@cJ-3nNRI58E^goQ z*$dG6BiKkqu~BF6M^|sFBxt<|&P3+RlbOOy?9&OB)=qhS2~FYGMHI~H9IP4dGo8eh zvS*%XhiKmrVh15!{r_?~pux(Jz8ubX6I%Nzq2Fn-9*9#QN5~!op-UpNh668yas}uZzs`ja%Bt|(`l@Hc zRW2|P?V*==PbwG@fPP!^#by20AX)+|FP#jx-l>l7BlGkp<>8nyS3g_6H!jbCn#iCk zsk%0;W(S1xJBZ=^jS_R2V=YnHYTtF|Dl~m)_Nr)fyk_j(ewilM4?9cOOXuB4n}fe{ z`1|*yi3|cpzg6HhKJ5mH?_Tzv z`K#4;{Hy8vtNd6yj@uup9dw2dF*Vb!zi+LDW3MqmFZGrVKp$y-dlpw5R6wj#Da`TvV22E64F7RFB+ zB*@bs{*834G?xY|baaq0i+9`IWY5Y&vdfq3sF$A9f6jni)&1J?+ifkN!Bu|~cpP4q z1Oh3hjE}DrZDd82L9Q)7K9w{vf)I7WL@KR>r4{~S6zcQ%u)_@g|9l@44xtIF|~ zqqb~VYwZ@J@p~YzwFj|RSgDO4OprU^SryrvW8O0%t#I+^RPLWWGYM>@&||KOrKEh3 zBEVyWwj2B^iu`O8M=iL1b{=(5>^UDz1u<8PD($puD$-UNGpt#@VhHWNj1r7YLr^@< zfE9#fP^ZQ2-&QN1KFy>~1A3@a$!&IwXrKDXrs7DS~=b4gpai zEtnyh%Kw`$jLqtr$aIC_f>N_PS!}vFtDU^0|9ZUoO763!yP%=7q|C=4@Y;y-W}Ua7 z|Kkh>UH{w7NZQhWHFO#TL#n%ae5TQ6#)xz-%-|j;Os!wXbv+kc#{{9v9n_%-K-k2x z6M^q&X3!wWmVTq&VZsZx=JUBz6G6yTulj@4!zMy|=Zj?e8G+%U2K$BrgAhW{4?AO` z-m%hc3sqbn_i+TBjD0TNjQ2-LSXZ$=w2Q?xJRQ*!v6)MoeJ&j!>9w^#IXoqqPIBeR zO<-UZB(Us*wcuvQnUgdsuDF)d-Y&=}&9R84DU@_->nB^}^4l!}TZfMs3u*FqcDx|{ z`Jp!}>2u8PPHRft@a7UBJY1#zX-(UL#Sp>NJKqU96C^|&c(-7K`iY+jUTdQ`8B>bk zOHM-?tI}MV+~lzf6QulWOFr2B&V@xm z1r+Mht$&DUzPx>lXEB!+x_DS!6C8V;FY zqm_N~;pvu3<{W*GgSC!;H4R7M3IF%ob*rP^%(*aTI9u}W<(_S?TJ@%3v;GM(ID}E3 zPH%qlwREyED&oJ`cBz9P=sC$%$WCF;=$IkijG+XDpVln0>W1B)s>UvqA?ORboxS%A z@V8gK6oucZL31K0YK-;6V3Xj#bupDlpX*kCLn={>F}uojzb=eZ&I;$o)8A`2%1sx| zcm@VN^{Sk7ZW~ASn6Bx4e={0t7HunZvP(C@&iYOSLf-!2WUkaSw8ETKuc>^m1Wu!I zN9lNpF?Ey@<@wk5j9|)MV4x4hkRsA4Uwb}CzXV9xmv}HtV+35?BF6eS6rH*FNwHV> zYy!V)wfnV@$+cTg{?OQ+nBo*NV^n>jmRcVZyRwSK-o3vy0q8^EpztN&0^bx_Bj)Pc z!l}el!m`TR3z>(murmFO){4k0jSZi=AtZRjCdA=cJ ze9if1e_+HAej+ULpF+nW(1-GRcE>_Ymh=MaX7Qe}{?rzgb=A#LwI?2tRb4WHs^`E> zYaoPqFG2<-rpG*n1wZw5JdtFvn8gCK!peK+>JX_5owh+ZJ(dG7N{N%stUTTvG|d&NVZh|9`d1GoA~{RU<&+HHGY)7 zLH`!mYO@55m?Q^FjpZ6lOu!?Xx6CuFp_CfUC2g8}Ac{<6XHDT$43s3@6c=-kcP%TC$_O2i|QyO5#?Pgp&Ca^E!%Q zGqkm=^q9W=x{Bm-7IYu^Wa=_}Bn~Kobg59;JzcA=*Y-BP_TV-n2E2kpWzJu7SO}lT z93T66)K&}!ypY)qE=hGl!>@WTph#?{;)oeqb-M~fv5=h zKYn`zqq|P#x!*hsRHQ6DF;yFDz(eu3Uq6u?F?gS|>D8HJEM`n>Ie5Tk0*eM)ti6k* zd0UNXzMb!)%f|m(<&HrT@762FhfH^N3=A=5ho58eX)P+F?%6Hq94znXFKADIw`G)$ zSi3Au?b&H9%jA^k+}N%^x+z+$s0a3e?1=)LEDPBi0c@*fbVv@8qRvVp4+GsJKYaqO zMvUafd#CrgAY=A;m;PO&sCVNWqI6rGd(0N2LuR<#JSuhwp)h+to1XQ2^mY>{C?gnF zE&xT~8g@@}5ULP%2ZBgXr%Kz>8G!$YU}Fl4kRYRyDRAutV*CrZlkOR$Qlce>qjMO) zP8Airu;|jMW?W9LydPPH!IMhb<@#lwm|T_f^pWmES{3jPUhnaRQOOY$APljTJSQzZ zTFI0UPkz*ls7yKMdOti#WZDoPhI4}~*G)f;Ai^-jkb55d+>NYa&r$l@f^+=v5rx&? zk~f7L`(pC1GjElTFG!iC1=@u}?iSff03ERL8fbDdN(KU9ISmpXUnCF{XP##hX^_7V zm1%WDiOCr06Zg9I-l$Gg9e0!Yu%dH1NCA(48&_`Ci+ybO78vt+& z+y?UA#Dqiktz^)w`T0W@@Y%fZXWKzdSsWjX4W6T2mvn^Af4Zy|1y%J@p8u1iLy=D~ z7~%DyHKc3d;m=ln@#Th+PWz#9oW{K2G6)Y@82&Ey?|gErtGSO7htHI0S-TzqUIDP7 z2_&=OCQ(HPJBpes#gz>s4hy7($Q}XJBm;e4-e>X$^zTZ_ZXv|C@RFuXf5 z$89^ril(mJ+&Xs`xS9KjBoL5&>s$;4_KAR9ybkdv=J!O^$AL*JI*oX$U0*Wg@6<@G z#CqCVQFEQogWHD1Mfs2OI7(LwTeMBL7TjJ3lX@F7(`YuZ@?%6NFtzGmOikH^t_4kt z_qs6_RG%@h0oA$p`g;nS8h-Q>KJ(W~P3YBG%P%$Kc#(RLU^IpBkPtddl&=Per76Xf zSe*Jt;>d0Bu$S9i2-30Z{Vwo9N|gl1ODLIRWU)`X6M;7Z59h^ezq>W-e~VGxfsxNk z-W|Y!bdtc~e+og&DS-9D0y_Vm9m={fPS7oq0r72c$g9au|yOUVBI3 zEhz{;=MoT8JwK44MZ|7*QqCV9-Au=RoM6DHTWrE7dFI6#A3Mn`DRs{v|6>s=5uwF) zxIkftdg*LcT*(%SEudO^&lU$)*a-$)MnBoje6_5WBsh#m=Z+I@NQOS}D(&nChJnd< znlQ0qqRj6%=peYGavgEP`HJM95iK=TEs=DgH8tDo_=(qDSsJKikd#DP7x;6{Sz~ffT+MLr><3KfkfjJ;>#-(u|Iq)s{RrD(I_}vHA95$&d({{P`i^c9#CYKf= z3!@X6{kw*bQ4y`d(T!XJ1k^olUva-`BYav|-K0Xi({OD3Q@^mo>i9k;x7PUw*V_B+v(FWZ%@-zr zUxtHYDBrZ6rxYa191oaF2c`Yw@ozdxpyH&whj| ziWSmD@9wfvz$yMYXBo>(51-K`=dbE-S1w0M>-3%f#vDMO+nAx{5w+mwhC2XU1;QH+ z4jr+|uUA4$-73mL^9HOz=+-~}VmBo-|Df1E3~V2_K4Wb!qZKs6Omg8~9jAL*Yd!bc zL8Qjr;`@)ClOZj0KeRR-|lzs2wvDSR?525`?4@AbAC1&w!spR*mV>T+9Oom83&1|o8Uy`|FwhVc? ziJf5HqI(>rH)T4obFQch#cg+OfpPke#xZ7Gd+@!-nR3s&=DyhzfA^ytdh_+wA3vgO zyj5JH*|2gTIXB0(X2Y))QnR-y7*v-ZT1}+fM9QkYR1(vj0qo0O4&58;0wR5MV0Lht=YYD8qtHT{;0* zf2+oX8r~ed2!5^vA42)@K4z*$>*931+|2%1@JdUol2_D|Kt^05@-BV#vyFm{j}I9d zJFepkvObC^)^~K07Ea+WPv_5b0QYKDlz6|}dxKT8M#Jm~_Oix!9miuT7mL0|5AxTm zbyo*m_iozB4vf*99hBY^DTa=>ETB~Cy~yRhF1E+3fW>&gBOZ|~z1L)JbDpnAX#;h9 zoFoq+y&PR*6!)b>xSVDx{zMorshN1-a0WO{QA*jJ*rW}3KOf`Ejzc_ba;m-RTBTT^=l=Bhmu&K6lg611 zw$e+xoMbnG6?uzMSz7+UVZSI_o~cP6Oc+1Xu7Qm2=Wl+-K3soQ|56RbDQfvQivF^s z&}Z@%c!Kn~$@u7qX5D^JGwa={ydZ2l6XsfoWgsH7y%CNoP_~{+UP7AE*lF z(4K(Vmz*vu(y`IgyJ;OKYW(TjJf*~hYs>kFD>sZ^zvG~BdnzzzyOy(2?9DcJKhLGl z&+K|rY9GK6{59w%Dp!O2>D)!^171Dg z&7Q>7T%jA6Je3?>z5CjoYX{cy8hL3+g!=qwT3>U$Cy((td?K%YSvw^;Ku3qk78TPr z2y?vVRnRq(*!-Zn$Dzfk%SwBtk&byo&z#I!{=Uw2gG|fz(BnL?1so`Y;qtK*7*)xt zIlqxz>J{dduQ#;5tZ2oB+dMXN@hvYuObXXUz|Rv&b`;}`D2JGH99~G8hVGbRnkEzn z2=B;&r{DhSf=0v)4$V9nL*$H4y?dJvcB!808&mFZi4AZD8A2qw z128=-dgK_dZy3(GGYq=aeY$&o(anBBFidC_?_`f#M%R^avzA-Tua30#N40JPwF+MG z*l^_Jmfo#v_Sy}5HVyio1$#85P<}dyJYaJmvbi@6 z8mWudd!h@3HvX$!ygL@d)sFz8MATOra=5dl=VpU!+~MuM;>j z)8S?;M@}G^QLfJsRSPX))JT3kz%k__qciEph1j|PpeG-&;a_7zbGNnohn%b#w-;93 zCOR&QW&Xowi#A*cTp`#4wB7?lN?->~|5C3|$GM-)KC?O>O5THEA)=hDNw^bhIRI*E zGk2Cz6&W)HL_K^Dd#{K4=Z@&Ih`lwWhX|^%Tq$Oo>(7B?`o{)ug~=V0LpmBS$q5=U z1oQyQw>tmRceR61&@?wZI1oXSlN{JiX%?v?c3RDRhEXvbCvFL);)mMp?d|-69n|6b zhg8!9h3D_VIsi>DFu5AV9K*OSu!-#dDOyE=LEeu!4Exsl4eGCKW{n)5LhI$hRRyzG zR-(!)I1N76DCT6-oQY2Ndv6}Hsv`;OYUa!!yfPQv1Ns$A$Kmd+xn9kN3BQusnr|sp zyXtwE4cnP;Z;`myrfg!sNi;X`dOTOg=o5+l>F7*ug4+QWHc8XKC|ll)5xY1|{^D~> z@=>EYkBB{6=Mkq`L#ga%>$ey;Y9&0MyGsL}V7*e85cuTMBsJYFc(@0?QQqw~`}eSk z%6CfAOZS)RKE-<~EBu);8w5{veD=|_Z9;I`n4~CLIY9o^Fnb#MUv(B+X?-pkrqq&g z4y%I}kBbjaH(53g#<%5aTtM1hT0-a5?w6M@Uk7v(wdpV5E*yZN4R zRWro(K%9QB+_hs@T^{_bx;E|&>j{;y#FodW6~bJp30=rF-dH#@#GswjQLSyK-YIJ+ zR{(*hjB&31)er(9Pc5V0=hDp{FR?y59r~o9G1=7Be{L)1IBARPE*mPP;nS-3@kz~P zyB%`n(RtaTE7#{{7WE&Rw9Ewfz^SOK@)ir{HX`^b!ll8in4)XRvExL^v*^Q87vf*` zl*Za)trQDmST>oed^JCfK&bBP(j-PDH&<8g(9E!F6VX`R$*Q~7q7TQ04x#4L$kw*? z>b|5MEx4AWCq?vBEN00N4D^fO6e~3Z?6ySb6 zGS5rE=A*D_HGM*0c2Z{-;R0uq39>zJ%&s#e63hPDUv2cfeK8EOwV?as((OIhMY(82 zAQMK+1rhr;QWnTdzp8vs1|Dk3j@P<=JUwt!Zp;ZR`1c=M9_^vI?Db z!buh+TtYbOzEdoTM3FYtTjnyVaGj<|c0;I#gqTbcN=^APIJtxp>ZO@b)TS2lv2(9C z$aX~mYNz1+=UpqAKuD7^)Fr|DD%-q00NGv+nUSz5b+S-NA%_=~lwLyYQ$(y{U|#^L1%7Z8^eb z@(*`mGoXa7Loh+ft3`>67vus8UUCUp6-_ z%G?5v@`z?B1WeW1v7%7UTNJkq?|$7z9@aC|Jfs7;Scua41!xzBFTh-%_khhcb#_jD zeVzif)9Q79+GN~X$EJOi@~rE4_J0Uv#v-q)Eof*N@!D%nOWC;_r6M^fX$js$m3D9i+?Fe6ud#4x=l>v)qvh09 zi9FIFA+}kv#4!@D`Z-DWo0@L)kFr`#AWTvR-WB~QUZNb>A|tr7&8 z*JZ%s2n_-(7GK`5otr-@&j24Y)%4GODkbWy9|{q5_Gmmz+vJ9ig%m|NocGAWj{Y;ZqeE183}CYnVeKUrL_H8#R4%IJ`YHgBAP6kJCTwxMmK zxdsKT+rX&q_l9`1&n&KN01IA02RZgdHletNq_8R4A}@R~L!=E?ow84j>c!ZWH^z<~0@)nzqhm<%-l z8-nY}!23>`&4_ZlcVo|OI%ZCls-uO4l7G)ELdgspH50gm<)#OQV6@ZBPSZ1Veb597 zJ%;2rC^AG$BA@Hc!5KsBHg}J|LI9YI{L}yg-wkqAwPc+O_#A*j)CoQ}_^4)Q#x#oE zz^Q-ACoyb!iUqSfv`IeOcBv~g<+G;a61<~9w@($EU+j%@j@~06<{bhs zd5?9KRp7mBf#s*0;PE!r@E?YhUG6;SE6cMnm=Tn%pFGM9Z7R3`;3=Zz7Mugw38ptu zy_^SN?wF_)ZZh8q$W9ewnY5qc%GSn_aj42O%QDZWlrS#*pgDbWYx@jUaVdintu3dQ zTAThONuRz3ro`(w$uXj0>ii{u@D?9KqZ|SOc=^Qk?a-h5BN!N{!{R5`wbnfi;|>Xk ztbXIBrj9CL=WC%sbMf+48?d`wo7TzJ8@P?ZC)W6R?CylzE1nH%>zYcQEzKdR>Fuy? zS?77s1n$?{8E^P+kY0lQIda1LBG`n0KagXXk6b*uBVo30KaQ{`DgSou8wJ~e*V9}t zjz@m`SbwAZq`&4ceIWOGn(-a4jcf?|iy7WTb4L3#T4bs@;UCb%vHN|PE)%@8(XCt_ zh@%^-TNwf-LLtBquxC%yiHy3R^(D+ZGq2A)v=Q}tGyZtstJC$BalC>uJ;fx?`;OZG zX(Wq4$b;hZO`1k{R%+}6c6GTg*;sU%g>>!Gs~g?&sv1oe{@xDS-D2`pM4}$$%kj4> ze*Xcv23nt>N)BT|C(i+3=tv4=LFMh$Q2=B_MO@-X^Q|;4#l`j`%`z0So37$2vE=sy z8_(S_EN=tcQbj_g0<<48r%@ufZ83ZN+~9diE#mV*%E2*whBg0T_-YKEi3dB*Kg0{-hAms;WAl&<^M0Tlg+11t1v+0 zmNB6u_%QG$q7_&gjsam|u&cfg=OU5*#xNQ)^TpYGbwT~n#gZJ+!-l7jpu4u zw{hjzp@v0kJys>k>2#5H1Mk;Eldt8g{a!by2wCUWUlZpZ7oU9>dSr@Mz3iEtUGzph1)S8EMlM zk1#mpq~5T@=gPms0#BlvK)hUUNyJJmOxAKVLx@U6DO`ajt%LL?P#KDnflwqvVqiSK zW@2ydc^MGF-Rw5N3jk^xE#koD9Ic*`-Ao-@|6Y}Fk@qzUl)GKq5Dz=|v)t<=O8cP| zWB2fIyMExFRp5tVG?o2$vh1R15y`Jmmvnd#43T=N(k~2$mOp`2ez6c}E#?2OhFU?$ zt>TLY8d9{DSkUc%UGCh*sF_Odw^1HKXH2SGI#Froi~2pxxq_B~HO7k8bVG1pD5330 z7Dclh{aBx0Ys2tw@w}moS|*ug{kvJwvky^_G#`ui9*~`iegH8g0Jraj_!BPIkIDhy zrjq&|CYr@63$g>zp<3A7@8o>R(C1-4p*ic_`y4289xBr8bBbnA_>>uOFph+;lt;7V zds;Oi$ImtLmXFT?|JsOHgXuqn0}Q~5gLuxl8@L$-*^b_LzKRHlL{@}a{C+KXwP>WA zq+i^V`m;1y5Ys9$L*A(59l6a54ap_N#OKKTxQ2-=1_h=sEbo-s7|qEfk-Ywr-}9%I zgH6T3fd2P~G>8H43jEye$P2P9?UnMiOseSESkyasC9pQPX1V2y;b-*S%g*;I8*Uin zs)4P(mA?+Vf2_4YCPu;wkf`}#yZ*{w4L;NaheYq*i+D(zD&+Ac-|cXdoz-IE_)ZeG zt1GYe^P!R47?U9QCWKe<$;{t6-gc|B_B0q5Jp|dXzyji;Y2&36j?0AqhtOaoa0jMM`uH*v+yb$gq!?>rMaN!$fvz4_U!-L5OG z-PO=G`h2J2wU#S?qlG4xH^F@tP;3UFYeRv?9wY18^9Q7Zvu~prF(PHNGX*_u6pWE< z$saM}DH|)O#-n6URuzAuF$_!|;Wt#60>6UE46O`Es@=Q6 zlp9@at7hKSFe0n7U`A-A2cUK(KE*#We&3?YhUXZiEYB#4^bVxMxGn3KSy*aA3?O9} z=hVT*1h2%c!uiee7idrRAuB*fOWPUd&)U!dQ|&2O7aI|u{;jdQ9o;_FF5ejJKutFGBS38XVLKUc3jkRQm=~9 zknAawcuz3C=ddH5hq@K%hXaaY^L4&H*Q_T$AT-)Cz{5d>GoG@uS^5seUDjuPRIE@N zvu~OXZmwoj=w>O6wIc8eGX(A5;=;yRS=r;Krq{2C4mE=bm{sOG&FTh^;1DNSYyl4h zA1m5@#Zosyo;NW$Ev1Nex@A6*Nj7y@skykLLYw!$xm=R(b&7aG=VPsn5h;KF;{qe* z2btTS&9diYg{dS4x7sDfeYS~4R7SPCD6zjMdQw)zIz^Cubwj^B@+u0m`#+b}S+$S6 zWt)0hd%Wi#0!B#xYZ^Pw%e9vDa&5GKP+Pp)eU5ev~(b@ zi8DX&hRhbMh+7VIty}vcrD$p6BK8A|uIp#SZ+tf2c-{3k-xEu$;rQf?c)r{S-&zHM zFZA1uT#prW7G3??wV#dyC$OliVLH{9!iZ#ae3DJ>?&>HDvJ2NDs$gL^)@&$H6$SvnAu-KS<}y=>K%R(9jr-02Zx#)2y7YGG&#MB;`=c7 z9_fxpEmdD?WT%B{=>v*HNMmu!r<$2z5Gr9_!l&i%I7W`@XF6@$*SsQP^i)rtD0@s@ zQ6^WWOT|so3Jfd;o*~DiTKUGlk&=R(u{Zji<~gY3aCHbNqODk?Ol^e=1wZ{m=uadH zpV|f@(D=+@ELqL%&?j|-#R@UY{Y}qDObWtzGaeIeY7?VTx-eoa;`@li84##}!!^nJ zEjAV7UW_Z!aLH#96r7oOhL_~*L5_l1TbjRiL8)1_fTuX?n*gU$#qI&ng6Z=7_|SWp z#R;Pb&}h;c)S$B`{jSf9s~L9AAfe1<$pzNmD;S##5~y^AI76)< zdsoYYm4<&gNxsG5>Mf@oi4Bu=(T-ZS%^v}zA zPeRu~M}I++#a-R4&K+mC&Eb5&tu3Uq!7aa`;DwYFDjpa4E-!>2lz~yvmW48NKdkbO ze^zM0q+roI{~jqP{{dR7&+Xfgk?5gOr=sG|tsD0NM~>k70HSLyw- zn~Rgq1RvzNo^wSRG__OW8^dJ~2FF`=mOLj|5gFSnF$d z#p#4?Y6vlhH7#~Rd_KXOA@ClUS}}tmKk5b}QKFv6CfG2VNmt6Pne#s`RWX}dqRc!m zH{f$E&h2Nu2~Ad3vFsKSt~}igKu@9fs)V35tF^9SoneeKzDo6Pjytj$QxC+9ez}r` zUg~}ySAZLTTi>}HQ&(Q!6=BctWNwFKt%Grmf}6867SSj2`F&V7xN_3f!{fey_=8h<@Vo4Vp~olk+2j(gW;q)c+#R&=w}i`7MKcH}H4 zVv5XMqC#Gk>fzir`*cQaS`hj-9Ku{1)*0NylJ#e0yUg{clfWby!)2f&2cHQyW%yH? zdAr%VvGhrENch{HyXSI3H2X=M&J>)*^L-3?UVIR!*n-!U{y*2RJKhX&s6|#2tggN5 z67q^fHYaj9-0gmxTtyLHxHCAYv_t;6>L(rf({kJ7`;`M82aoL0rQ$h~lh&nAwB)R^ zGD`gr3*oKV6XsUn{w`QTdWv1DPbop=z0n3r-H;+tJ+fk?I+KEyjGZGWb0ktzxvGs` zTT%d7=Dq*uBs%TIeHH~f**fIGDghH-s+9Vlh~nWk<>0*Kj%6aPlFVZRGz+E?jt@}z zEDk7P03vr=Kc@eK04YYciNlP%ghf+~`|z0m5-%gux$4|5bm57#Mfc-HD{SRl|#40UKNu_zXbHtvzsq?nf8`x|41_mCrfrhs+Z-nJts|?h}ZOp5V z=mWWnmj9ZoQ8qheP^-}z+lm*l6uk5uX6v{Yx>EhvN0bQjYGB7Hz9#o*_5WVjM+{=n z0#_E5RYRR{bl9bivu+BXAzziB0|5mYxEgS4M4hN;rq;V}6ciXJqn1sr)rE~A!GXeX zxj@4`^Fc^Ne-|vNuZa3m%s4ounm*(Fr!8@cW|v;$d=vai`weFK6WQCnQnhBUmvu^b z_1`0!xB(AwVF97_IVeYafX_m%9$@9Q5iLjmUET1TH_Y%HcXtOzEW-BNh^yTfUY}CM z=9%r!4-W(reWx|M=8keogWVw*g}P;Hblcwce?CIir!s+>Zh%fFS7-V016kM!Mr(S&DCGFB4&oCWLG_75 zzSS~5K2Z#?Olf*;S7(!|7wFQ=r}{ViLvvSrLj=WlUVI$x@=wYD9Q#g*9T(LgtH&lj zPBNAWw`x*LFIjqO3GT=u|KDansvon4RLi$ty(~b*TNzB9(*s0QTti!W zHRg_~#_T0mTC%VJC6(g*b$@_?B&hXPvQ%q2x1W%|wu8y7!wOpEtwi z)sziKv}Gu;9Nt@TL!fYo@5mZ( z7IS>K8Z=2V1hfo2xAClQB4Z;MDlhc46eRFyqXJDb$T~$-{IVv?imj{I0&kWDR@*-{ z%V-flXn(ZMEetLY+VD$@Rn=`@&f}60Fc#4 zj_1iwpeD}g%Xv;=(%;6U{Hk&zL zRzeseLWvd!*FyI{KvEwj{vz~6@ky%01b5M5Sf}&P>8pgMEG);41?GvFvE+XUA^gx$ ztc-VYINv&PG)dVt+9YfFn>9=BXIbVq1QwKKE;=qHCprgm48)GaZL7r(Q{@px7oe^f z58hsLdysxSFJzDoTw1X$UvA6|1scjuOd$Y_B7%&J(K9Rj3wiL)aaBA}$vJGSb@bj# zykJPVm6a8))713*4Jrd%-c)seBy7vq)B0HG@j9R7k$3o?MMjxwP`{x4n8JKVps4Ox z+|UVkS+|XCef9=%XOEGRQ%B>6`WA~(&0)2PBzNCwk;nb)PkE2(+Yf3eQk!lrIy}NT z@`sZYs=J`zP&>QJWt!iR^E)y?54=E$*fg-*KPx-|R;LW7qiw`mFzjqv9FRI2sR&rZ zO*`auC=D9}9XchP+S3A>^(gq8QT(~{9i@!Uq%b=r*x*E@f-sFn;VGh6XHc7<#~Zem zsWV_#{3{ml`5K|#{@2(B&-qrml}u@}x8hwEojkN}%Z}5=M-P-O9%s0L2CD~kT*4+& zZ---yIP3ewK;2=D3SQG>($UvL&3B!~-l(1N9gWU*`%W#M#$8*rr6}|Nx*TGbtFc=5 zz~cIBaU$O5B;T7Z6{w)4+AC*N-*g+FEPcas1iaB%5J05*)O+c^&b??`gCn?Hy1qZs zJD_GNvIMF`S?vC=yi>wX*o|H$^L$jh!s52_x6iT-yDGTRjQMu?_<@NM_81mTeOh3~ zp$_Kg(X9=afM-DAdH^7E3^zegD$D@-enk*1>fR?%8eC2eb)+MaA}28mTf+j%O>%Z2 z9pbeWRn4z>?l1P!v2@Z|+>}Sf$iMB~O^!8dG*!X8BO+gmLiYtu;dW3g&o+p84zqIdzU)W^keLlkmBDm`4HY*d=>FQ%Vyaav;gNSIFY7Fws zCzOvkzV=5bV|VGMK-2$dD=ttXVZnyj{c3^LKd2r7f!nMCRvE8@1%J?iq9;$j-++M} znU1oHDXOVZFiv9yDu@<@s^s3J8kIxt^9o_3pPXLUflusET?r!Ln`Jq%z=~fn=9>T6 zu>JK3Yu#y;q*IT?^(*K7;v+MX-?!J5Nk8`(X=-hXh!B!$Fytz>yzU=DyOAjTR>lT6 zoSukZF&uj=i*&R=#dE~(gIqbspS!ZWtJS0a?HpqZ@Ug@xc=|>c1%wAeHW_-Mnw+y= z${caWtncn&zGS@k$JQaPx95;E`v{Dq-GmSNKpd^Ox;oqKKTeNDPkLLn;uAk|8pziq zW+-9acJN8CBXU;;y#5&YsMmDXFBP&qG$4`e1Gcc&5`*i%JX@+6ulRu&XbsLEIZ)KNEEhgD1;SXeQj<}eG`Ug*lB2@l#n&0&}X^M}1z7{sf_Z@~}b9Aq*BPW1rLRJ+NqPpb+%)nG+dXAx$ z<9r9S;w0}~qy3oM4i2AK9vOfS!_Be;d&vr9yM3Oz8MEvKU6LOMuG{;W(SHBj&VEYF z5RfBzZknkc7QGEJU*=|`#YC@aC_)JttC;qyJTzY|HP5TzEW!RT^Z)k{3clsyDW4I@ zdAbjg%aQ!DLCA<@M4vDDgU1_$IORv5p1i~*ojyzhA*X;`cGrArl`1ys#IT}ULkghIlS-y$Q2MUpD6p=i8Y5s?> ze(YRY*UtZRi0Mm(K|U}Ws9MO$>etTWGJl-CG=ZPwh?S;D!R0yjvfs!sEhn~Z0y`?2 zkeF29#w!Hj*!DbsdC#qe2vKi^0IAd?oEwR#)j^i!!^}^oq>r>!%gQg3m~xY+WYr`e zJvL6q$*;HKubb^H9@(z#bn}Pib^miVM7Y^2&NP6l5aF^_GWQ6&oZ~J+;H9-{fI}4E zkkmM9SCyV(7U)pdrKeZS-4vL}Za-v5d%2?9{dWjB440Y8d%ifYTZZW2U2oLwRm9Fr z;Ef1cc&$9J3pD5IT(N|zShvfI$L|DX9tu%l@=sB1s31?eZ^D#}U)em}ees52KIy%L z_XXitEaGUf>$X3;KncY|>lVtvaMfTAXzd?^p1*1!dU`b48ZWcYZ^=D`sI#xO4j|ii z{iVv!$$|h`k$GASU%B~13~xI+(bg(em<-@QXNMW)0)E!&zw>BA)nVH>tB8?-tzOvp!g*gKrI)ao! zu@Q^XOJEKT27o=FF{Rr>79kSl5trarVwtjL7BFW>5Vystx z=Q&6>J+==C%eavj?X|5~70L?9g>v>%N|Uz8V|x?^IJ#c^FZAIT2>Hs&_!LL7(^#!Z z#;(yK%`VX3TuSG+5w~YxeCgoP&;L?!o`hR;m>|lE!bxNWnk8&(NbqKlh7QAN zP1Y-Kd;g|XMH{U=`?fk)wxE<-jmNuxtd#F(Su>1PDYpeaAZr=8!`pO z-MKNf2nE*#{82RORbs{LkLh%P-@p%2;2P!HV@`T7Y&hPqCtSBPwR1Ipq zdqkx!YSb0s+u%|P8OUu!NBMjRm8Qs@f7$m>1{cti*?!hk70SgKvHV)m=+Pj4=?IE{v z>kU=Q@v=KSN)RgVn1tr$Nb)isrmw>rWg@A>z4@WWB6Hk(U@|>#GrVLxHH0L#MpL#jKFq%d z{^}qIdWKqgmxA_=K@9@Aa!aE5?CkNR))gv&+d=8hOkAV=yNmKfqFA0MD^h)vM=_In zvKzZCg5_QEljkcwHWni0+5uO}b5rb?hFk4ko{Gezv;Pb#<{w{dLI)ak?%eIGL9|4~ z9p2Mr-;p&)j$v<>)kJp!bq^mujF6rLvmFy<;U!$yDqvN}6v>oy=n0X~uMWQ7UgE=y z0_}_EsvOxizYEKm2}+9gCdgs9qqXvj^6c< z(^;=ZHW71~pq0J}jc$rKEaDx;W8A!|5)Htfy`mA8SNAZ{5&nnJ79s@C${ewlvvlxi zY_+S(z-AKD(iH5H%l_cj6<@~cN|#Qzow}9dq3JqSuU8<6auK-Pb;@pjR=hQ~2YI>< z3T>-3pD}EU>T{u8ZoCIhqp(e571w6H)lH8uY$dHLT`}eTqE+{XLqNEL^iA0&G)lqO zwzg=jv+EK9*Qkm)G~5F@VJGNSu9xmL`0#O?q{6K6(Q69pm#8jt0%($BiaD(ONDQj=Ub*o z2V}RWiji49}&6Q6=D4MxM@6g)maZ`|hZ{4wjO-*p5QYU8s znwURvU+KjB^_`$|mZOByyqdqQz3jGUE-4!hyoW<7N<&poiZ%NNy)q9%-^@R;13kz@ zeb7}0foNGn35QRUYL%^%vW;ulNaCKSdV=6u;J;FPD>n3qe+aFCkRP;-C(jKz9h>@E zr0PWYYT&>w$CWLubn7ns8Li3XC~7j{)=5F+ zzOk+0fTmk8=C#=&Vs55^Tw#bvLBeHUh)o)Ajbja#UwD%=svot~?t9G)P^>MzbrY&` zjTaQf?SCEjmFh(?y!w?sIqp7(i#`Qf-QIw?%c!h(J51~68)Q067th^4X^nBqvfBD0 z66??jsgur!Z%|YKi}VRRbZw)A+ed$7(o?zII*>y&vZdxaJ?6>Vpr+9m`Wpo$j;%5J z)Q`@K-fOCW^>y}8L>SpKvR(t+MGN#1S6i1R^9AtX%wdFXK5zgwdHgazT?T>ackkKj<0#g$ ztI0?!4AkQkGT`(1a4r>J#;xZh_#9`b=((A=r*gH;x~OdBg#5zzsph}fP8 zI-b`jTaf5RkoGzq&MACR$B;78fU@{B_~u?nfAojbhZD0KqQOG1NtDanyz@W|Fz%AGtbda1#}kGsmrsFJ1fDXYO&*+mPf`p{Lk;g@e2rLKyQATGTByrvTs&O zK0OU4qL(UY!l#tN3Wv+hP!H(8Nqc3Kt?0aQAuJt9futsn$0>?Z^ zcfa1~>H7lFuril5yoYG#r1A7*g~5L{m=SVBEMv0(K`=&7e9j-?W}Z!Nc7~Vq)?v47 z^%fsoR$;)&aL@z=M7NUARL&iB*0$QYAuMn|4HKH+&@@Y8nzagg$DG&40jhrdC!V4qN1VEi+ijTjIB&i_kdW8nS4hB>x;G+q0TA(z!Qf*_Se|3nSW2@Ex5mR><$>%ShQyP0&G3XIZGIy~I}K*{6+1(eLUE zoUHF?DVXYc$=fbND45rciYrG%Z+apgbq?Y2U^oQGiOAqFo{*RiaI4>{S==w!rJ3smn%+b z5YlK7(nB(>Z`@B}=kEB$ikxdISB9-e6N^K7KK@W-vO_hUbvCVWqKT;*rf_mY#+yp- zejE1dg`ZCgLPbU8Xs3}7Mxv{zrQ~?2Jd;nfZ1cXF5sih%m~U^ z^rN$Qv5DkKeX#6St7SV>-=Z5clO1&0!qv-;7a>O%&G7u`;9%szZt3ycEh1Am{MKQr zY1@&Q;r@1f3&DdNu#=bBI-&`9n{--`t|K#RYy#|&a8-hAZ8Je(Vb8pWTG%7HE|;;d zz-jJd>#l^kZX|QbzJw9&A8m%~sKEjkXUa)2;a27qvfBP!Sjk46_1~|(PY$`2LGKT@ z4~Goq`HYUf&fnr~8^Fy5>IWQFoMa%$;O<^jMI_92Pu&-%t48vVwGJ4c-i>vog2FuS zcg47S*-4*&2Z+b<>rgob+uhhG@aNDQJ;d=Cr!^EAEta+=i1iG73` z49E*AtBD}5B&+vS|6Wga8NU>c;r7vnpR}J?$On3U#rnQ~e9ZGdhepQ<5b`OHnFp`LTf^hNR=abF zm%Wd6wF8!X4$P;%@ePCbs$@}=`Yk0sqcT%SVK+)#Y8MBuX1)3Gxk}e>9W1I~=nB_4 zUyVdI7v!Nl>tua{`QTT3=eo)k-s4W_;jY|YuHErozKEG2qs*M;fvjWJv&Go_2uHhl z-5<)y`DGIB3$JVt?^dUc@|sZ;Q_QSx%gO5g#;5zjbzn#vCS!!)%{D_M?*sbsy!uRl zsj_gNW5>@MuIQ!jEUo_fI?><0fuka2_TnhQFvul0g-Pr*e}2*Cve}dF_@GT7({zu4 z63WBNjihdu(2+w@(ADnqyD=Q#c)ncmM>ulZoQQ^p12y4B(t-}>gFL!KAX&6+!@=Ca z#-e@OpsKXd4zu@Q@dv8uITu=@HK7>cI_5$&Spkum0ZlxHMdB?{FuMbGBsHaqdpe~| z{|js}QjUkO%*DVB+-F}qc;DzjBVnNHHVxk8I{2%evDmdYp81R>)}*&2J|kAC?-Q;Y zEUu5>0ZbrcDwn{xb2=?vRG`wjvTkjQ#*PhEEfTlV{#Qdwk;t1`#%||^idLt^T2hTk zDX;yl>N?LCoy(4I^BRt7*_FVk-)&)M4L2{KTYqH`YcrqWEIlr!V!-D8XuC6r))=*w zA(w_+Tac7348`I)aCn5#n7r_k5wg)hS-qpv$O)KfevYmLwLc|>rAD7_aY!r>Qdnyrf;ZI~oVfUj9jMv$ZT z@{kE%Q80h7fFQHZ$NpYix(UQ-EORi~c5qOrR&ru^KjGz77_ z69IO>=Fj9{z&v~iHHwNi5~sMjG~c+AmU;5eO^)%=YVOO->C)jvyg=yX${Z4b(+M># z1S{YkZA$#zC?QGq&JG~^nk)Pza{M8G+~9{ZD}OCkWTjGK0SWiJ6QzgG*-sG=0CMYd za}2N9v}OJ%i#(nc@%7i)H;@R#c4$go51kOnRRasz&`wjOTw#q5{&3t?+lb+@NQwiU*2!pX-L-g%0M`fx>gn!u`14Ufhcl_q22*>M(mk#aoYi6;Rx%+_2kD8FzSh(6z!+KIflhi;vq?jOLx zCZYqM)_O#laRA^RrUe6R01sN9(J(ybZaj=is-T|#tZ^vV;OYdBZ~Mb+s%|NLj!NZg zI$R@-ywZ+1qTH_S?;iP1?3PF!WJ4a?Nr|T(D+?L&4%=NyrdeOMNo@U>l8}{R($e`>s#@0N-&pa{X$BBZNsi;WPE)8 z3x?}3UIu)Xo~=LT9)ZkTSva?+k8(CoVL}o&0Nn&|jL4uYF6LwAaeF1+|K&(UzaZD}Utok<#H{UDZFT3%PAD0I8^T;DncuT@Hu}bl`Mdg@q^vH1=YHUaEi2BnZ`8Y}vYr79;(A^D+HLZ)z=fbt|@~v}L3^ ziS=~x6gjW!x9W+u%R7GRA}nG2k^Q30dG`eS+}&d}B4R<)2Ws7)XSo8ztVe-DO3UFx z2JnPK69khPYEw|EZepZ>@q+2&n+sA0{g|a0<{2CU^(+a)R8qpslqAt3hK(!PHH*nC zy1Fl2Q@jtXPzsz%nuG8}g-f6wpVatPWhu72$Y$q{wGRN4Lq!mN6flGTW9cf~nqa^7 z=tjD`LApV@yFoxYB_|TTA~3p@?nZKSmy`n1-7SK^D2WjRwr~Ef?{B!DbDndZbBD{m zRYSjcMlpbD&8YWyP+TeME<;O^%Q#w7M~Pz?hbdxeYp7^k=WxSNsV)CPBvsUha(??~ zM=gRg_qO>!tT+xo{_|P@W2G*UTaOyR2E?~0oLA#{a~c4KJp%Fr|! z=|lTXwzS|2y_l+wTjd~cRgK>*7&XOu!H?|JiRtJ3#e@F-(`Y^i*dJ7NAUh71)T*}F zO8fXUeV%>XF!s{hDwL}@6u{S^J^?J_RzK>c&v7cU*2#LAS%5hh;7hsN344=@~p7db$aj2CL!M)R!f5 zL|V_BsINCBZX%xI@|NUH;X5tCb8?17`*J-WloL13W4V;%M9I>GU$s=fV?#Ny*2^71 zD=4M;kCRVC+&?~|2!b7|LAcP?yD5Xghxn`H{s~@qw0BsZZp9k~(wEwTN#9G&fKE!w zE5?`-7meMt_)1<5Q~>0x`cN9{J!}cBP8*s057f*I%G7=%ZUfPzUa`^d$wA7#8!&<@ z|KhBaaxP}=xq#k1^mqZeVYCmnh>JTv7t5hNtpu*dr8es^&bR9S5P|^EF|GA`jN}A$ zxWU%;#P9IyRFU`?Hc?Z0h~8G00o${Qc4Gmm4?xIz+pU7T>td}m$tQIovscUcfGnr2 zy!YjP_?IeDkU9sVh=+?5eHnND;W&`l(o1sQ{b5z~xIvE0Z)-NVY6`Dn!s=&)n6 z6``)q2sWI9LaWe`(l-zMNx8v}2uF!A$tm z_?7{rA%v&?kaQed37@d6$DVVapZdHKR9)m4RY0VKky*8K0|y1^gTJV8F^N9{FX(261Sx#+5kN5q9)KSA#*ssEw4plF!MOqLAqW|P}VV@nu44R8K zOunH5>~mc$ESCR%=FiD|SY4+kxp zQ5H*KIbJ!{r}z=o*yy*{0?iXE2vCxkIGz_%%a2;YY%lJh-di$B*6HajCHV`cjhKkN5Ukmh_6~j(dWX$(|U^$zW7T;(R%<$^kUL7PgMj$$_k%|h< zIol+B7B6Dk<=&U~2zWNODrOj0XI?x|$S=J%^UrRM_j9mEgS({7LylAXZhW$u(bcG4 zDM~h9S5jSMtqB4_uAv!}pxZN6*S6Q9@402K4l)njr2PDD zQ?{ZrENiJaf-r_v(wrP1eqJeBe}vK8Z7)5?-wE|Cf5ZD%Lx-Vg?p6yPA|@KIwZW4P zJUI!9{;z9An^Ghx!EJFB!Wjge+Gi?VGB)U_S`zGm=CV;Yg%l7Lk{}h<2##kN7F@@t z@4ACG3=-%6DGr(kcRRL8FQz5#8i+8h{aD{Fp>>~E@N9GUPa4wYY2XcMrtW}25DCG8 zO8svnV*F35Y-3~#ll5UHw`zI2`1C5qEdr=2u!E&A$Uk)9vj*9EtKk{pha^Pi4~{)Z+Hgo(i_AxJ^> zv{P4`p=wU<4y@fdYtQ?_gC&#Okr-h0nNo>CdRajpd1J?EEU?l*-)EbaGFPDM*lpU%Suw(yJ&^;92+N3d4jljz&b)8DiQl^D^UG;qc6ol zxt=Q*#v+kECg=rCT^cB~or374e^xa_-)b+|xTQJ|9e#jSp1)O(?yoXOt!B&$B>V-y zitnqduA0Q==0Ds}uv?5z^<`(@5tM7YHb@+QH2daTskrm6hW-N3baaWi#J7JbaGpcjPRHtbAd1DB-XxHCA&;s)s#WNF zSOWds9a^W1Thz|?5XF)w>b`6p<=_{Ucbr(r0x=7f`pjh^S#bApp5zjLx4Jsi4n1&5 z{_Yv13@_R^ag$Ln>|=g5QT{3oj6wbC+jdpPT|X&`UTM!yy03@Tm?VBWO3dv{VPctG z2$mPrL>0TgBJ<1lNsagf87FY}Y>T*- z@0d->dbUwRGXiLRu4W^WTb>li5r|@cIUKjzw~VqkDw3D3b}JIwkVjA)zr*FfTKHmm zyW%2h=<4mMXx!E50xCCiL)yl<;1uG90S@|uct#v=3fdH%{JH`=fp_0U=BIily$gv^ z=n$z&9A5pv&%Ba%<_t4BaZe`$vYe&U%t~i!98~@xbObPZ^&|HD8xP0ykvp#Ky7ZnrjM=tm7t z6)D#{?JcKSWhRpm0kG^=1xHEIXD6$Z(m$#VRnmU5&SJ#P+lT5&6Y{k&^&2}IjF-qz z&*z(6f-_mK81fF{mA3y5K~;*L?`bBl9o@M*Sy0GXGb7L~^A+`8P|Ye7iFsk;o_YO9 zpM1I{Y+W3g)cF-S0D2*xq}NugPmXxj#_r}Z8$ofjwyfMD6c~t?KB*yA$A~g zV#XilkBJLzwc#-xOWd=8TRSzKZ4NA(sq9=Y>|8sA(c6S&hnC6ggg!ANGY|-P3h*is zG$=Ic`;3Z2_LwTVDG=VrNN((hyzWGdbkmQqG)?ifufn&DZGE_~Mz;8Sw*FB*seoeejJrY^XfVW_9-E99M!J6`+T z(mwiqeyrFID6zn?>~+UXb|QYAwq2%~3={LJy4%BHYxBc;jK!@;;DQBUlYG{{VO(+n zCfc4oW?c&m6{RD7-6UfYi2_i}?p(-xgtQI53=f8;o#63WUp0b@=lFgr4YSQxkfc*x zgT@-mcI&4|6;J5f)=;awmCMv2z$zi)a;#1IkjTd_K$kn!NK}IMMgOg3z_T{I|0g0J znFCRtc=AF1XlW_H8c_ZIIq}q{>3+zKlTXWTh|@DHl7e_2=N)Re&)3IYD5W?%mMZR7 zRtOPOnf?)64#ll#@zW7Tm?HX0N~}}%WGOcHl<|~uG@4s%;@cOwF)T=rG>IF1#0IRQ zi^^;Md^nGQO2LI%4=rHJKNS0|695pPGyg{WIMDEubjae(SB^Kd-&PZP5Q6I)lch{r zH@ab604Pm&7sYu6J+lw>vu&wJc|#qN_0r$Pd7D(qJdaqk8t=oy8zWbEE!MJk)RT#5xctHK9&|Fu;}pO%D-<&d0VR_ z?nIJ+OWMiEmJ4S17K6MH)~_Qkth050#JeMg2cA)uU2=ZPCUqM z^+i9$2eXJx&k=m$41^X;zLnZrHr&uNVv19!Bzl+bncyBkj(b0aQ+-EJi6M_+N2Z@X zPD(TejWO7Iz3`a8R0-NRA^36QH;LEAU7+f~y&bQK$1T%{yU!aVY&Y|Hjzajnclhr5JH z2;sy-C}(7fe7fwWO0=r(aY*qq$=7KDil;dc*Ylr$*9Vnf$P_$lBM(yEcUBOdI;8Q^ zXLeuXn9)n0+^Q3#jFphDKIvAOKvpXFgW=Gvvc6tqVJ0GO>KW0nv<7EDOtGMZVgh1u z45T^dP{y%b*qEB(J@~yhW^023_)S_LI~|H@c54HF1}eo1ekMp(`L4pKGpsVpljaPi z=qvFS?Q0{282OJJlRUHTSkQtNpM#Vaw&tn|4Yxi?az%S_6oZ{RSl`!@_3aVxOm3pW zP_mr3CU8#ZwcU;!AJjx#(FFbj`&oq*&&`}fT@_tBTp#7CK3nyczB2gEZuM(wW z98a-a1BVGRfE3bcCVsW28~jeJ`O#+R5MG2^?q!Gar!M+(BFs-n!Hc zQeWTnYfV`lEplwQNmBvrW|fEa7Q5UsN8V3AH`ILuNboQcd_#{PuW{OkD=_jxnRZ-Mtx7zp zn+~lt6`uP6kTzw?}_t1(9&kJJKVe%}`bnzp`kE}MLh(;OSO?!@mX8Sr^ zS2MMi-(Ebm(Y(c6idXtY``yFSKq;dfW}YSIhRXPXI0wxKk3i$Mdo z5tg28u^i9fJ~_bRiG0gc;4SBaB9aN9K7inhS7PUt-Zx=0>s)U&>kYV>4&#t(=<;4Cx z(uvr#-a9+cCkGI41qNr^DT4P`4zKm%f=Bju+1A~aV28~p)iNRmnF6-t?5-T&Svcf{ z5!@%t-x<#8n8k6@UNLjlC6c4hbcUXdGxQ|A!EA>%(tt?sjQnezoK2P=xvWh_kkgT~A zWo{C^pT9zjO%zt;2~K6wDb!+xgO-W~6iuG+8XTs_;)jnr;eZd9mThbz-*vQ#@)*oDV$DG~1OI}O~d8!}0N zx^A+f#qmFRR4D_PS)A$=y)R_Yxp)akPXu58YF~k{gYw=?_XaDwLUw8@-}3bVk9DNXSahyD*D_SHPkz1#kPvh$YJn@d<+ego=gvTohp7UXP0rmf(Km zp%jLbPBF-6_LcRxWVtTtpdnP@r%!t!4~&ShXMLoWI$32i6m?2>(U@4q#FQNpD9`7R z;PnCNb=uP8T2bwmlPNgl2&{y=!@ByaYDEZDtDV<KT$0^vlIDUUv2{9s!sH2Tle_cp6;McKPIwn{1?k>(~7>O*m=29%3GyqZ= zch7hYKjoD`D>rieaP^76(&JTj_EqmGDY6X;G|y)3F@-i0kzu`EvXL@;99hr6&>UM+ zrHX{q+#TdRKN4kWpZ>VuyH{+U>u#;yvq*HN`KR$cV^{1OwPD?Bh01x$yJ^tOQ^|`9 z#L>ikao!X1!T)?ev;I=Ei&Z!+R^}e-k6j#&=XIC|lNXZB;Gd!A_Eb#T`Kv=~ihPRnFdG14>BSHu|gJ_K(Z zy?c2HT+?O#Qm3*3cyW)|c?{V>gd-3Q+INk38(}`)odroe?zI^}c8#nGoI#46^nk_$ zx{Vv{R+BqEV102YO0i}&xxHav%9|WqGpNMwHfTn23Q`XBtoAbE$5nUCGp=A={F<)v z+@YXg((YEtmqVNe$#bym5pjIe^q^u88>zsfu~_HsyRyDDqf2dt{K~eEkMUSRiOr;; zZVZe%;>xdrey#{(JN3)d6qhO6wXQCm|K~ZjxNCM}{la~psT)|1=wIsZKp^Hok~7ml zM8}m7l15*$xyz>Ye$LjUD>{y`8s&x&NpCcNtt} zOkmF~;(!h3lr-bOyi57!)K#7r)U?K`3yg}`{qCqG&cYoHCp$*u?n1xSXIz!|Gpn9K zPhSyta=B7UJEsp!GIqf!!KKJ*+Xde;si-x^q7fT9X#{#)}0?qn? z#X-Qm0#jx(yIB7D_4hZd8tPrCuYaUNK~mk89vv_32WYD#3b6NZ@@*ASZqe<`IQ$Rr zf)zEPb7A_^O1fqvS>SIrk~2c7&(8wzHooKIe+caZP`h*&=gxTjtnu>rWJ{A0%f+;7 z6>bKT>pgF7Gv&^p9eYholp-@{2j;MpcuWP|6}o{x9_+h6o8||&xh}}wThJN+_O>6o z*lepX8-}nwL4~(sXm4K;w@=94hZP!Geu*1?qsH~0S_5!QegW3Mhv-KfAf8y^MNmku zBvbGB?Bp)T*U$50Vlx7bi{Ahmcn%|pm|PVA>x)&x76a~?>I*--U0%hfIT*{K7am^Y zuAy{hOH}<5w&a?-mWy1>$M%q>dk5~x+ygY|_B&HCEoa`rv^R^bIz*aVENyU6+H1rX zu;U>w1OZ)dtIn}16e~ogHXLn&<65U=s+dGN6j$y$Gf2ln!WsXk)kh-Ks=I9e*M!$JA@>ZaeE4#Syp_ta z&V8vrfyQ#TgOHTgyGOY1^Wt!i(oG0DGk%&*LiK@lzSNwd*9Nhu)yZu{r$JEJu)nRt zyEcb)2V}l8?^hwL%Q6A5ZK_X7YFF@IQYsf{XNK@0)oO3Gw_{`o2Mpzi6$E17LYQeI{R-G28o-r_?MdS{TG1)`y^8r zs%|8$w)!*ks?UI%qvR*W@D#X^O?eB}ETGdVLfDAebyp~_$?$OYay>xCXO63MtqYc4 zP_8-iX*2l2AgtT8!M(sH=`QUvPVhARUk&X5Q2UYbP-iCj7XQdtXDH9?HjnAH&PcUq1 zm88uGbWI-T145K0MhdQfh0@hQ3t>NSc|s!7&iPxrxpv1~3ya^GR=(VlI&nqrD~yU( zu8=}D;8On-3@(FMj73+7#gKLSCP$VWtH55P0K^}`!8&Q5SHWrHaYSAk0ONt5m4D-= zKmNA}sV$~kwymz+l(?P{4w`jy{Olz7GAT3P#=UKysU-0+q6^QYrt}B+){#*Iw#8LGyzO>cY`q!w2 z;jkR>Tp7{V%iQ>K?vl|?BZdPMla>DqPPqV}IqI$;5z=!^$mKygG~%0zX;x0@4u1N# z^of@Pd1*gSJq0nyi@;BPLDx7Sc;}c~WSVBHVpJonODh<%fAjH2G;6~ynSB9B&LDwl z=0Qf4_PqLyT+3c@`_qZ{GGQyJ2LorXp&<(Ib{>=%(Q$fwEuVvU1i?*_R=84U52UuA zpo+>w;v{aP`UW5VG;yPcDKE_y`uba(f2<1?^#rwhd<2L8cP*=$oTc<FzLjP($`JBxh|e9-9&OBM1b0;dv1* zmes3SZ~PwZ%K* zc9$T7?=PN?4_c;}Tbf zZI_eHr)`|i8bYrAb#|xZ;_eCSe>H>*!Mo92IKGk@FsT7Q%YuVHhxzK(>a>)6t*COV z=3(0TTCpaNhb}8F*Q>R3_Pk_^SSV+BbHwvVj;GUv$JHJ$s7vRkmM|1ZR7$lrDLo)R zSl%~Kp!6ys*pxaV(ioUuUN=v+bK(ri$3{9Q9kJB(qDCBzKlg<#A~}hOSi0>qD8mWj zw>;u(uJsbdakZ~|{YxCSyZBtR<`fxLquVeDG`yFz@?f)r zyfni=_ieu|3`yK9X#9wpVT(o;C0lp{+N(@rK16nHB7b~^`tSEK)2I7BoN6WbK6`n* zLc0t%kaR44#ZzLbyzxk#TQTDpX$rlOKZ3Tvk%vOWAMbJKNe^k|ttlWBa3EYVIR8$? zg;lYG-OesuJM?#T!{?W%Qmx{As>;0njTX+BuOBjv`G`z++p^qVHTdXx{|6>jp|JdR zd{Har?#Ab?6y76djXh{oB&GX5tv*uVwPS7JcE&sT`X55&35j05w#Xy6k2Ob~ShPZ+ z`85&IY!vyn)m;spYL<=posEeH;^E@t^$A1A{?4d%(A2m8thU%b{mQ9kqgObPYhQNa z7s4R(3uoLzy&c#T&~Y00veLuujFd;co=TD*53ElaT!Dh{)zYO5c;Ul%u9xc8VJ8wh zH=r@NNsQi3CE`FHQHsfC-$)X$Cc;6X-)f@XQZcZ_Rn9Whz@t0A?+E+Euz^;|(f*&i z-2qa`)^HeQNVIVR%t;ca**Z?cG&;SXmZ3PN@TX+87>U76tsigon#_1>9OMpq7C>Ak zZJn6+qpLDlb!c7NGiqc)3G{I3EXjLGPwdgMhBRO3ZiD5mt3wHsjw}JM%A09#-rEO zSIvl^F@fYMaKz41Kk^^?cVZp4HhA^Uv{*N;`{JKCB}(E=t!RrOQ^o0DsJKo4 z@~04^5vHN>9he0w3l0h=TW0a+wwtj}&<@>t$lGo+k-+f731|VMQ6}B?n_}#H;r^dL z+Z{NLFIHA8LWzGc&AI(i<_q?A;G>qyL8=3kTwG+{+}XyZ?5I)#r)(?1@zn5gm1arW zkU!D|Lg}yuq=XJ+rYt~EN~ z{_c47s049~r7HJ*=Af_M0#=onq^Dis2pj7dW)ryEP#oXRc-H>V^VoP=Jru#rwyvt) z?q7@@+1X|7!uJro#N83$h$FIErYI?rpl|e@jh1bH$!f+r1(z)EVKtq-dcXC8VUewAh>qA^e+odju@F5RY!p8P6p&K6_2wAbYzIk zdZBrQN*A+YIQZD}X5g`Z`Zp<@0nnsP=T7)!lifXXd+p2f*)d85ScY{Z1!fA%O^66(*3}XRtjqXLOC3hWd zh?1(C-NXqF+?v%x#q>?k-}RVg3u~p~bP|gQnKu6(3~l2U!#rOHCFg442VZ00d9 zI#IyL79onF4`q1qI2N*FLc(~|ejda6LAY0+?*x7VHWYa@z6u75V$mL$CSlWf`^MG1A)T5-u!A*I*gINWuN({a2FQ!&;L! zO%b7{1r7a>vC6{PsVD&O_0-f{y1{cf5mEVtk62DX+q%5PufOwxe~p ze#|v#n~8ddcL9~Fr|>ArCj6_ai7A=tJa3pbU+TS>(&@WX!w4bitEf!ZhZpoYW4@B+ z!JxpZvX&~Eog`?nz^-8{R`aU`3n$yHK^L8|U&Z)iRC<&bQ?=yj-jq4c2$tZ!>^sS8 zZVF3*lh!uZ3nLGmn99nM_cqd=KgD7b{XECq;JzvbAOXj5@$NO9VV z-H@OZ+3EabMQP!Win+zV*~h)cSDzn%Gi>djZA%X2U4>}J2rJW|rjbMjfV?!6Uu4TH zWIApzj#GOs1JO)seXHB%@LgcxgT*|M74vEd;x=1b+^@MG6x^J7Eo>l#sOy$_ zDkJtz%QKzv*p?%G4tOv|u>R|EH=$_Ry3=_;*<#mYB%u+h1RoSLt`##O{>TNsjODR8 z=FxwIU)=3^|*-?Kq&?cyVeOO*K zQXxy?s_ZH>>M&Xm;zD`CA^}N0SEF8KIv1OwfqU`U>sx~>8I(aC2lVlGppd`67YSaW zMzxVeap(coKZ*U+EJV!cAYEvQ&JFKLuf1ub~NrQQ(Icc<{)cM7U|0RqfP3Rauv= z0E+GwWN{loP}fgn`T3C^|2C({e+YG7&tO4BR?xO$?Ai<5iZ?8wUfXJF7*qjn%ghum z-z;zV#NaZb%MQeBo+kxZ}>;bjOYxbZutiBV?{g<+n1`q=5l`eRx{Q^{KwgYozi+{k9uU@?2L!!7>1 za~B=v#T+WO!?{l;*T$6&T^Gy`-lZi!I?sIu8zLijiEAGfUd&k%j7OaDM+vq{`dWyt zELogKt8$k(EpQUvdOM<|RqMDE*#G3Kcffw8yaTQ={^({{`9thLvs=sd_47c6qLW)Z z#+(I0T5g!Ps+uoUht`q+RmAx+ay4@>)QRCr5fVgkes%C zs)A9!6i|I}0!@QSzUB+2|JUV?Ls1{p%sdE8hU{^>>*UZH5_#|Bb=+OUf-6p1Z{q`U zJ0nXMRRAmi0G|wT)XISWuS$Ul9dL^K84u~Ew)fZ`tJ2vhN4b&$PQ&ua2Py-q@7=aY zW|~`@W^Ni&48eo@whL&nktu6y{ZE({jAFjlZaP|@peLjsdERFpz0Q|Pmx9)!DJy8| zX)(Fn1`!**oYRV;imX1W6N^7vdwsI|GfTgvtfpzXCnUXJVU(}sgz1$u`WPM8=ZA|b zB?3lP;LKUJtr%S*W#Wd>AODifK85T59Nq`NSo@;9vGLmNCv(7f78Wc2IuCC->pLNi z#;3-^iH71T^yDA3$-gQXqEDC*Z7+r)Y{c+TAppjndy3If&|fz$+hv475m`|l0g9CA zOC~4fH!XZ)F{cv}oC$Fo!^WO3A9h;kRFU2DnN0UC-_fhzNI^B3Bri3lYqgUloE?Zs zm`*f2x_udr#YYo&$6dh`u)@eJ8M7pi-m6I*?yyt1QcO)xM$pDe(VL-te&2HxYU%M% zmT{Hk9hG?hW!>{Ic<1>ua&|x>gbcy!lQTO$GqJM88e4b4YQ-0lQq|EknTxBa@sG}w z<1Hvh6~)zwR|Ifu;yL_Ib#b^rrBE3tZaBoG`n>bqjCr2$?+P^0ZV}i4L(@bMQi=aZKmudCg0Z41R?r z1@&>T<<8B{Nvu_G?d@qXN@9f-54oFcSkzyWL5Mp#zn!0d_RT_TSTjNm!y`f^dvDNq z9@V6pRd1-Ikl`4&^P1$S`+HHGW5?U_c2s=kG>G{d)vvI*Bet;JGT_c)GO5Udmpa;K zxp>y2K|Z#ue6QuQ$P(}o(3uHId9}qw-8nnBX`?lYdYK=1W~!MXQk$OUtq@QvONz`b zB61;{Ph0%!l%rUUJrR8%#y=`KWDHaE!}!yX^@=0gkkCI^z1NOwM#aP5NaU#m7#Ax4 zAW_NPvVOPt*TmWE!D-^x+_l30*U+TyyfLY=%T}K8>WDuHyLJO;@O<6P( zY2D+~F5hQ$Ve0Sux`qo*oACzf?gdyCi9y8O9iIfi!k>zg|F)GIZUKik?|?V-lJHsb zzp938x++A_##yj3sEWC?oy#7F%1ZRxz|bL?o5-_{*?JCUTz|hA~9dHKH@?j_K5I){JntG#9m>PlbaH zm3KsI;uXF!k2#cQylFVne75gQ+;qVL`kss z%WD7aE!z6>wIh37<$-31{~yb&U$q_PBntd^r&+Q`wPs4oMb{I76-r(_WxZdB#H-&H z+gZ9?YE3$_}2uU`5g;>D_>d46e}$CG*NR( z`SHfCU}QcFc^!b#cn_9voomzwR5Ip_N^*k)5=O)JG!}26um^Yt41-vrS^;(e1$S!~ z291R(S-44$uuy*mGsMXnEyTx-`b_9c*P9M;WG-QujS+|m#WvLnm(~sFt!lU>oAs1@ z)5;NaB5@LQ$dnBP|CSS+^ny3xqSNIE6!tetDjQ2bL1{Q~N_k7|z~p=|d5(B#T1Rs; zA7;a~x|7z*yC=V+d0E*15CWjWWM>wH=uO^mc1zoX0~7hAbSqmgsiVL0K#~4`3Ne|e zp(wCX86m&KJk8=Z}t?GQeho8-)-0J3?{HtR#E&bblN*iOVA2|hK0s+o2yy9-3)?o!s z`W{Td9eG^uIDwW@4C%V)mmIAJ&YU_o8j-a+KDaXOdFW^;>4JR0OA6zy)?)1 zFcG6H=VVx0L|hNP8li7f98EWPGz!1%vv^Qdr_Od{a{1K%dYEMFRp9tgLBMrY9pjEV6&7Lric=G4GS5?aM0?% z?@srppdir5EjtqfB9_6A^R7CH>M!)QNFImIj4Tvdb}Sc^SIFAfYDAmEGPz~0zA$Tp zBC`L}k;2&3VN+1Ucn~l45)(iHqhE=ODvzbi+Ozj13j*vZjf7ekTDk-&-`v8&Dqto3 zUpfNb$JNdaFhF1>J-!;;->!Q4xMgvZ<>Sf>H|?Xc*hyFDlFjhU#+U%fU7G?`x4SwE zWwGz*NMhe_;N$$f7^4odoL!`rD=C&tc?`_Y?Jdu@l11(7%U#P zw|qwJM;Ox|ojLVvzRTSkuWJ3_J#^4 zvv;TnD#`>dX3pvSKSI~CbH`_VqBhR!JoYvJB_!VI$FyDwg%-UOLmFO;8Azo!auATR z;>u1QivHff8bC#=&rsWn*?l#`jOgOb7fk6+C0nzyV%5Fu!w;!mjfjlLtDCbWroL+I z#WSHKmOkEDy$50bJdvFp+_lo%*1ln0Tjd=ua||=&y5lR%30c2?1{ z+?PAdV=-tj+!d!uQNaU8^*kj=Uomz30jfB-79|Pw; zA6Gw(T3m@agpycIQK{+ASs!QWx~a$4k?uwuyHyo+se<;jPKy}xbk(6G4u7wr-;DkU z9fDud8UI1Uy3{9#(me*3R|FYH=fS5;o&S<6j|n;@RM=7Q-KkY$nWM|H~*6V^5i|lW2$DEJh&hc?d{9eE%y44Ta03xt+u5R)D5F6)p@g$W?}Q>sRR`J0peJIXzd%J z85h6!+Pp9PMYZDhf{agR8kaWy6pQ2-Mb?L@~PzlORCe~ZI_7OF`up79ry@<77h8_)=~+)>zK$ ztWkF1Es2a%~i#)}=3vlm3s@llPO z{Gi1a{KXxe+Oz9l4M9TD2vwKdF3EyyxhxCp%YlhRkNTCZuZ2F*^Y}-XidRMH1o+x$ z8PxMRqD~pz-k|Ha8@s0oWnbE9vcaJSDFi3r@#enuhmM(L%L0AxHCKgJ#T%Psi*)d0gW1B z{#BBL6fC<9Uzw|ZgvN?3!xS-N(HY082L&Q59U6*Fh00BWCV1)%BSXcsrfNY(s&^`- zFPKC>&v&=O#=jk4r@oes3)h^#-6wOnbycxZ-~~q!zhOgFXhI=Ydb!w&FyS4E@*XMh z1V|T%ys%f?#T2w-PQp8H?q>LO#Wh`n12FYB)+~s!O1y^w;N+~5 z_`cTY%k-;_C;8&|RV^I}qrxlHWLqN`!zAizE_KNtuf18qsp$b zvQU!iUb={4;kOkz7FwL_4s^b=YL4KtL#gudiBf#9YINfgbVk4i*oaWrP^g!R56lhe z*|DR^Urt>EA)Bt1B8E1HmKXzUtO>#4*hCH|3(Re&zh#)LJ(d?si;F)%<(ZG1=KHx4U88*QwnB_wwZnwwf}8k zDIPVb*~E_SzBOgE7-n@wgrblCson(^I~4K#6w2HA@PpN7mhk24uH_vV8mCl|8jI_t z$r5JIt?yz}Vkvs(?|&t92tZvIFhwp-t!&iRwjAQ$;@$V?)`|)F_|dvL=kRbOh99n4 zRgdsHJqpq}`OxbrYV7OH}y6 z01B-KL_5Xn-Guzm_zJr;1g5~dd(W|ex%K9sPVQ`9&nsV(qT3k|IVsWQSNlV&eM12l z003J1DsEa;tso^aEXM{*KWtJ2TZnT5jDf8w5*Y1mA3_>ItxvnvtbB@5E4eTe(BE}Q zMpoDBRby}_cetUdE?x%$C)sA$OoX^12aB)VG|nZC?WGGbQA}t`7zZ0?+JOU7L>Rj8 zLB)e4a~3KjBTJl%0sVJ-kH5GHzzJ{O70AsH(a8Zc%O))GnUXVt@`C9w1bW+LSchKk zT4Cu3;1zva`tPJLj)u}%;c9P4@qX2#GWC5=>kvF$8FQXK=t}@dVfkVh!)C6HZXure0bgWbypcbz367Cj>(tF&fbNYX1A&N zN`UbN|J^V9pz&O(f8P+Y$pHm)7L2W#jHA|Z?Q3Eazx9aJj)|anWVv!zm2$Ckpq9^y zTX+0U^Nj8wi;Pv*He8+kXmeDWl}?f8;ic=HA7pe0w>lT##1U z|5@QSs)2`234UNQM)E1f{|7SLpj<6DZME}N^dU6d0Pk0RXOuV_xvpF4fHMim(yv=1 z(o~`W>BB9%#G;n9(&v`dRz@`94!p?RFPnIO)czJ*7YlBKWCr&LZgRL6dWSNhJrOWx zSrDTWmSA?!Uz@S9$lJb+0L=8){s-g}ch1(bi`{F^Nch0((;~<$LPMVbl)crLaI{xO zec*xZw6jw7HQAUZf3U=#Tfs`BNOo``x^#?8SDoZ|XHCgE=Q3oJ0pOaoOa_z{=8|9C z)L1akLeAqCBMv+O^`1e!@Dy9l1lJk`wt>#P4+2XC!W?L@yyck5-CY1-e0DU)eb=t+ z`R%e~VLatBmEzK1|UP8s41eNS4^ed3_JW<{wSIJSRV5q+4Tz27|==6Z-?7u zm18iOeBplWczrq6uV$JbnARS19$$oJ3^}eb1~8ZaeGxRpBlt?lrHDF4OWCiyPQvf) zTH2~u>c0=0bbgsWg|XqW8~S$eE@o!CsM9q7>1*@#eXG+HCQS7|HW=kAiWV(;MpQ$| zmnSO&oXj%xf=6rLIeQp?5%d^wh+lv1?}p4{>#d>m(h(M~;-bC~i5DUjNZ0fPd@m^)b*qBl~3_<4Je`ml(o2OOPJSCU1F*{bn zz#WsQ&?v50w=7IyzKfcv%IQvAC0i0@9-Z&(yo8CyC+U8J(I=z{aq+_)UxNK8SW_E6lMK~wV2Bu#KD*3@6e3c_h$&_oO2IJh&#s( z^o!$Of@eltaPIesrhf<_X$rCYGui|ydWNd&TJ{j##Km52J_q%|@+{}i)jY{sEM2=y zT}OZUlhJfgQN8T!fLA@_H^VK! zUhKIS?C{EVykGl$p?T-)`)5GOY=mKgoUSA8?32PNDgkZOVUvfgK{*5+Fb8<^If912 zP?V+6ZlLAJbJSk>gKk{k+^g|T&!dOc=IPb_E=u#rcDT7#4Y>$5X4|LyF{eep@;wE; zf@iPu!K32u)x0-#=d_Z4l3(d8B`&7e{{4+--m|hLLIOq|2)%hrqaALzsrr6+w+&zX zRoED9L@^2$b#U6ZsS4n^$;W+Z&6AFvdVBk<#Ij`NvTmtgC|Dy8F^6cFJ9bn4{oMb@ z(pfk(`F?GD8zC`Zbi;trozl{bu7RX9k`mGg(vmW|rMtVNyOeH}?iNHr?_hsv$)z-e4-A^$tdMj;LRphK`yU|QN6 z$yip|_^U%UM38}Xp&?*jxLdmig$IN62FRYRH%BHSF#U1~^Gezv=19?B4Q#PVdJBoY z_KzSgFXNHkbg+s1%~QnsmI$Z2xjbfo?Mrqs+p+gdVlhYkRM$E>knpTB>YzGsUN8Id z`D01TX-UVG7@&0;y$^B)gg(zZ61d^(?)_@;1DDgPT|cJS`?IC!*}!$(5iWO%_)x(C z27zWd`rTs;!PcXEwgqk2#g4L2y_PWHzak zZ&f`&2!L!WOZ=PhqB86_8G1vwRy4AK_hM5g{S1Wj5^7_P=Ax<-&#SgG2A*B&u4+C; zH)#+4ejN6n+d!uk?2AFWZt&uCd#o{a2>~$-5&iatxy+&kN+{2CXJsB#)AtZkIg-y< z46dILX$jMm|ZeVaZt{!%j*j;Sp+_<(p=%m_5(OIvJ6{t zf!GEdFG$FDLecYnsz?wYCQe~SV|B*-=M;R15rV~|+@U7VV>~1hWdn~=b?jz26<7#z zO|`y>qz?=!6;VEHEZmeiJ#Kq2)24s`qMHREna{dlKxwv<=%V25yiCM*bHy>;oZ(yp zwD>>diN`LM7#eLVWvHXQWnCpTo16S!UpJYvSFf|SrQdo8-tUo_m2H}lO;vJGz?z_R zMv9!9^o3&L)wh8?E{-T=@Flk0hK-!aGTJy4mWtw4iXG&foBrc-F6(ROJIvJ6_N+8e zt3Zec^MFx(yRa62<(3!E+Z`zy16n2S+Y0_Sqf04jazyZ6EC$0&?9S??!Lsx5T$owx zktRSc!&9#BU4gN#!K?F1UF`;r6}`7trGaZ=qlh;~TNA=f$LL7H%9Eco)w5%8G0d@) zqT32jJWN)PjoegU&WCS0jk3;PJHLuXZ{=#MxdoTM)z$fw3`lfd^b=(R05)2&tG`X5 zBkOW?^|Y|gH^WDP5Pm#Cb$qj*#pYcO&j*n;UN3&;z789dTp#Q?$nhYRv8^RtMz)XD zEX!QnXB-Lps1wi#vGqq7hLN$|5SBv8|Gni70kYU!V;vg0fLBk;Nahuh*cTnoE9JG# z-BN9C!ZSFYQR&F1|D}FVkFHOqs>fKbH(bNCDp@%ucRm_HAe$+kZU$eI@%UUNwbJR{ zM6ke>!Uu*M39?QpYrVtbx}(n|0vBofWF_?7RacLUvrOOYt6_`w;jK`MWcY62rj|;c za5(C_OzBgb$0x9(g+Y;Ruvn-_bF)Z4qkTcvU~eccu~7(?=>t=Xo7pbuc>-$i5ZS^^u4&bMQ_RkEDP0jN$JXNyV=P(GBEe z1Zw&y0qBi|UGNutdtF5GLGZnwN|X8tHI96yK~M7NRnk+?Mm~c}QPs|jqNK7(gktbZ z3LmIWbxA-33gf(!xSoL(o}2^4v-){H9(pjTZcmP#_8_gPH?gu%=l!R;+o2T{KUJQc0nq zt1o`>&4;p+;x!VAcF9MJ?aI`y+z0?jW)#ZEOP27*QnP2Z$OU+yFt9gg=sELSvOeYK zkQ?QwYP8Smv`m7n{Kc~I%FYEz;D$p{w&H=qI~Ph%2{*RN01u9IERszvWcY2>pN5fk z0D5mnR#jOwqh^DuNOmP(0UU({xv_1Zo1ps!b`|0}swwEZD03XPkof&W0?t*T4Hh<4 z+uB8M)`$1B{FA(R?;mpo8Xp2{ZLL@Fg%~9W&u8}&x`*(l*yQ4)%?RW415Wp2p@_VY z=Jb3i)!Iw@>sp|Be!t{7%R%ZbFNWodJ&NIzMh=o`gFn@;!@ONdXZN|2Zxvn(_YFvq z8vHbzI#)F~Kd4;UmP&9m-l9KZG||8_6Krb7@udGYwl1gj^(pxLX~nHnSKCz)T82Ys zKCwq&KN79_3UdMi66k{LnOiISC{aY2bI}Uv*}U!M#;zpSN%2tqc2N9IY{1t@ulXKR zyErpn7XT*23Pr)mwMv}eEP2ikjb_WEPW;bZ?gSt!z^(SQ2|n_Q@zCBzF$J4vP`gh7x6H_Dovwr0XkMq$3w|{3p%H*@lALFPsWFf?DeyT+OJQ3!=ARhU2~tP3m4`lrs@yV_Z*+`r5$6O&?r}#B=I{%%oXeEJ-3}_ zI*kpal#54Vx(M^h5CUt!`=rF<^pTwRZei9iznG_GhF%Jkq5H)0qflVW@xgm}&JOx1 zal!~q?U~8Gn6Cn@&j~&i#yPZ3UFhb#E!_H7Lr=>o5C(XIerB$zf}Mgr!#gm%gIl!; zUphS1YPa~~RBXOSGn=kd91#P`Yv)=r0G)4;Dih^XHnf|rVB`?9%{GT{Sk@QBkB$or zDfE|$l|&mG_s@U>GeXbx*_o)3=D)E`7D$<_#DHgC{q%oJj&VJu#tnpFMdxN>FhyB3 zo4jl{PXd_n^t@4+?mjpe+#r{a8ruk>j+}l@9MIvD|G`|$K11GO727)H^1-xJ25P0D z_)u>&A8y`#-vqhwJK5<^@B!t47zRJ){%TkNLE$do)rii(ioZ3g`%OjZPZHC<9v+-j z2C2DNdT^{=W9jh=j=Tl|vV`3DiP7G@LVfv7#9ZDVGVD2*bkaQeWusn6q)){ZQ4kHy z86*0iA3^D7mJO3IN$J;$i9}}N{`9?d=6Zyf6!xk%6 zFxpGc>;kGQD7k>ar}2EA<-V*JSxHC;^#C_z!2u6uL%X8H0i_scJJI*IhbMj zuK?~9!j+4PAk8EMx}B$4t?`}k!+O0`tzof$gr4G|##prvkL0;bb`P_y^P{i_V%b$Y z5x^+?UAs~)NL6>o10528itP#$xFMd&ORrj(kt#ZA`dXltWj#5t33}`0OBQu%E&sk< zp63IuUHxyeX@dpVUdr`5+glLpU`SztV2w~0t*}UV3X$eizmb?x6ZG4VN%?}&H}LuK zVov(njE?@BSa)_LFL0QJ}pchz0$0dA6bF*<;CMtFea*K zF3?+ClVuom$DFQ``_AN+52bh@$ znpO(;RtuYie~=05sqGOAoAy@>t@65|GW)>`vBmN~$@>E;Yx75=DEpKF{{&`QJT&qUUl<5CP~4B79_#+yXA|rH2pvbr9Mg?<-#(|G24(jM0v7xF)Hq7sF7f2j6uja0cs4)Zm+esUtShZ8mk*52eP$`D{gcgB)x{`~BRS0z>?o83{-^{bP3Ps0Xq zugNw#Vpl3t-{5168yCG|{8(sJp055ylEtQt({ZVp=;E|%do)QKU2bXE!2e-I|C`s} zMn+ss^zU}hLq@wu2}7#QDRC-MG+P@qS#T0;p~SEKo1F*Qmz#^860MMcxxCzJC`oxw za3xNtbU~^n*&8Hc!Nq)Iv9DaXP~OFgbHXpdNF< zPkzd3UO=;-8zn3_>`L)VApx0-Hb{lltZV;?(9_fhbWT6*fMcv~E@!7^gPnjqkS=l} z@p{vVz$!AWI--q6$LryzzZ66zMP&~bhBMnc6kx?EYWK?tg2#|lVm@OyUuNbd@*_iE zi=a%IBKM`tsgkWTb@F^CFInbys=@cXP~p24C(lGVm;zzgnkGF6wZ`9?YR+Dd`_$Cu zXlPnTE-aT2Phj0&ote< zG<5bAbJWKa7-WE+)(iN_S`A3ahF*+jm=ON_Q#*YA{6v8DjFRow)aCFbCx5v<_e) zUN{dpSaXushe1+5XJlRUFp-&Hu|u8MU1@~>5qc5@Q!*%7>f5 zwR`>Jc5hN6mvAclwE8_d4goAr>I*v%KZOj1%wWP6IL*$b4C}~tnF`2$0JT#P%; z5h@tj^yx_+uQ0rm-}s|k-aP*Yeg5ui?sB1GivBzA?T{g?6e<^v)ug$P?r%aWG!0Ex zudBaLp<)22?JGC74!pd=qCTJc4A?qFj--fqt#5Y~Yxp*?->|9;GQ@tqcRM)b3Gj_2 z5ve-jr+Klx*@$FQ@4tZ9swOC*Y1Tr@r`l43gF_N_sXfZh?sH~eT5@W9KmEZ5km4(S z_SZQOdSGHM%7~o>Ar^^kAv*LZQWq)IEr%N~PbLJ()McqL@nW!uL>qc^wB+D!zwpoK z1WHRJeVSBDP)ZPiNzP^pDWfTyBa%`5z&(;!#mcc}Xx!GOL=v zPibGgouX1EWTNXQc8=BHcd?!T9x{n}bPQ*iYB~=b241V?!IR{goWZb9$=ZMSH_nI& z%&v~P5=jRH$Zg8l$ICZsZLtljXc_(O)AD|}-gW+ZG$IF^3zx}>JJ|&_wkdy`i+<-j zuX)|V)J^~(LGb@-AjHH%MH?K9GkCW&s$=pdTf0R1SHS!$Pk9QDHHt~{hW+S#YY>#2 zKpvLN>yH?Sh^)Wintl(#HNg78<0dj*nxV~Wz8mS~VE0*n!Z5L_x`^D!LEt5rU*q&q zfKI#Ed&x{QBv0<(=V!(*gz<^D7{1&rn;$X*!o`?8D!7@&QfvF(z@(7)`KLm;Zl%@c zk%kfmi%3+tb3OeSIj*g6Y9-e=b1O`%50!`>=FCQW?y;6{cODw;sa%{Mt>~q9P=FxR z`T&Y4BNtHV$1B$&7hQAJJHErUl%SLNB~+fVM1$R*VHm@fZX-`A!Dwc&P%`vu#k6uJ z6Z3ZYTQs06Igd-G0=0`MMl7EkM>Y!)qxWU&KSGBQpx?|R4f@CnLq>8D8+Za35zD|i z=++yRu%lF(5=eG6y!Ti#`Tn^2`wS2S(8SbRk?R3A@Clmhv3#vbMSFa$)Cu^e{zHM? zgUgb54C_N%<$GUpO|TlfiP3SHxURU@yHoj7(F;$*t~y()nNL)@&lCkUsL1%mNgIp> zis*w<*Lqf(W>?CZo}9v z1^44A05$Mc5yWgYn?w>$9@ks5$~fp(orX^SAseOi72%sVC7e7Am4<$*zpu=>OEq zmGoNwtD$0eS5b*P3!?Jt+Ee(CH54mRwSY(*VJWCvZBNd>TLo?`qr5~({$a0YO>WNC z)p=OEo{`B(_VU$Rqc6}(1=P`iAypgqmFB|w1iguV@*yLVu{S<#RZxq!r-g0}`DBkb z)+&AsIrsaLI%aIPsC~_=O880P)^p0%7VZ|K;>Eb`xRYK#@Dxnp+oQgH6ccXWzy31D zrs=w^yMK6m^(=nWieuiEt&aD#nw~)(5$`UMczWQhd`eT6jAlCx?}!eYp9`wiSNUIv z)Q-~yV2G}Zg?>WFE5~JFXlg7f_sT(hTRr9E+$OI78yyv9o}{{Wf|L7JE*UnvIqzQz zthV*M)L75njQP;7_0od1Ic-@xS&Yl#N+jA{5R42t9qVlCqAfI8pPM4US^k%NB!KVtFfh z>+{^{tcnV-MhqT3mvY({0f9i`w%uTVK^`@h@dG_VLk4JNK)S-zu;cHXgInZEmCQH(0dIs4st$ z-1-O9Qplm-A(qd07=gX$5#HQr2q)!(x%-A!WFa7&iJfw$9c1xIQ+90dbmClw$I~sV za$tK(i~323DuQ&hgR(NO{{%fO3iDK4iQSN>Lbf)~^2xLHoI&A*&vAAM%|*j;!~9zp z3kOy+#M|pLa$=@RU@ngD@W89Fs`)AyQ81mYr2>)dh6M&4CRP^{B|f^1dg#vj`P_*~Ajf z>+<~aTxp9UQ6{lo4!O#haab?nLq{RIgyg41);tk@M6%kI7J@Ys zp_76{L5WB!>ypOSLOAPOeF>Y>&G8Xq;$P7?^;(&rK=927W;qEpZK98P@o5^HEQ9pm zE}qQk8-S}d-@$tke=2zw%SN+f3>VcG;&hB3w^pM^lhtqOR|WaK_4<+*CDo0mj{Xp0 zSCwAMXDXYP*bUSaJU;%-&1F4$mQ`)%XTFftZX6_=(RcQy@{7~f#}6;?thFbZnXGO- zw03&AA!6EoNilPo`KCm?ScxwT1%G%Bbog90z0t4OPRlOjlim;9Igag16O=Uz~e zJv+L2Sk0KsZV3;Saz*5lM1(S7n4@vLapMirwfyp>EZg?YRO^h`7xjV{t(8sH?>5|u zX$^j;WM~awIz@dJ+R0$~t)+m1A@pL&>Pl9_>}Rca@}@gvP%#C^sL`|GO=&fE=UVIT}^LBd@K*QUkM+SG^1Z`R_gk^ZBi8bLoPSXLNr$_ zY?dD9ak{}x9}^rhDShz^#&JwdVpe}-avD_!(;oh`_OFJXoUu^ZCLB1a^bOJ4IIJyG zz(jfU6Qg{+@4&F4)T)t`*kz`6lG4GvN9V=z+6Wbb$VMg2+6zlIo09*9kA2oXAO&>$(ZBG^BiVV^&Q*K~Mn9-Cl28u0!HpVlu zL>*!8^c?k@RE1A9O=Hf>FQG3dm5t3PYO$UCmXbhsq_)sLm1 z`oZybp~K_XzBW24BmuKE9V6)A8do?1x>d}vYGLJUOmnYS6d1eTvRLl_t@cFFv4UFN z3fE=s=Q80UfgV+gQ07?O#U9!1Jte12Yh;C(b_O#Ji!!r%ubE?7(+`XB>xReEwXcsr z2@h}}N^rasN@t9IFx{XbwrLU=^g&DAfFn|oVhMS|YJ0#cDf#`Dt)(L$q7*titEYq8 zP7ECug7rzfRN^%6aM2UM*mTFPD(R_u`)Vm*!;NA{{~w{JZu<`{d|cHb*ThOh)cha; z4Cv&H6{z0~ASk>EuZBMsc%>BB7Pq@EoGX?*bx1hktUh0)fW>bYxfO-TaoNMTgKZ6d zM<*;#oEl8F!+A%u>EAJ!-`JGV4E-30FvpPx7tiiBL+H1s_r^_>tEWzb&E1L+ytgyo zm7V1;=;?yUXD~uI^T)N`4F9clYW?t1T{+&SCY9R1)q(iPoXxF_WB{A0K|WgR?2wuB zZq{Nh`;SkkkEX(I!kyB4p`Uot+STKV_G!Tk4&-%WZM0*GKc3qvTjgchAL{p>nSIx~ z^qrAEeh|C4p&d5fF*TesqX9&)CA)u_f=8HO`44cin6skxZ|IBMb?S3HoLF2%y{U;X z;iCQ1j=)Jg_qxv}kdJRR;NZ$YDbLon-Bu;&tFjiv!TOjE4P#lexol>Tkw4Pe(txtk zKCbF^6`@1uOpDYO14-D*i*&qMb7ft`a3UKI9&DErADtXi)<^AhJJu-$3 zUEGNd>@`?CIvq^Em+xXpOskDL+d@;&1}m)CHg9`o3)DMQ+KiBY6G%i(3)&C8RT|!W zW3)G|3{zws9tN;|?=u@w7^E}vf|0moAxi-edTAD3msQUItH5G}dH;YtU3en^B& zaVMCk)P&zO@x_Jg2@YxYZ>?I&5sc4alq+p`bwKwoK_MEB+rmvs`AuBM(nr3E!=GLC zOM4LJ1GzXxw!VP{fq6ru+Bt4zpsJJy-x{Pqn{PY*<-btyCp8ud6U)drJLQH+I4gm5 zX)czFzgS6~^`>gl{`F04BQb^?0M!h1m7WDi2cj-6I#dSAf864q8#_K$^+1ei~Xt z!1>&_992zf0ir_j5Mku#88XV5Mp!0j+=)XwBIWgwKX>1@_SGVT%zH!m@Mzp$L{I7 zbtc$~h8YAnJ*;kA=1C6?fLmN~zS!!YT$t2ErnOlw#uo4W6Et21qVPP`kcLB#P57_h z)+qJZg|pDfN)73-B+J15qup;%DhsX*1jB{8JR;5=##?vpz=TngEATbRe zCN6LNP8zz8=9?=TgVLl4Sz(d=s%&Iw44!f?IX%Vk7SuCo-&+w!#+vNFx#>iPiug4Y zc8YTp=bU=GJZ{PC*iTI}wl`;MK>iH%Z`|iD;7HqHQ9gB56(S<1+jiC{HedkvxHWYnI2CdATDu%%lFLfu&SR$?VnVX#p%2mwU1Y`vG%@ zoVI9_BL!Aq{r)H-448ry9=d}eDg6i5mw#4Dt-pNn8|p|)>PY(7TyF=?*jZd{YaV9D`#DYcd1?7DxsrSH4?QL?S- z?t^|SWk+n{yq5`W9h^W2BKnMoW#NN9FdKiJXRAWW+WOy1?ny59?B6lu*JCL28{2$e z6+D&0s3z_#9U2QOORAb8i>QtAzC%x;JzcdXvRJ2(l2O2<{)&YTWW{G%HH}-c!Ca_c z5$$bk-C(r53{N!}x@YDIhwOL9Eyv7HF&SwRKGW`d79CvMlkCj*u9gMl0ggZ!Ft2ZyYRTvOY7(u;i>OOkFrNckq2IumPP|?EBfic?1;4PA-iC9~fb9VC8 zg`@gI-NVEMSB_)t6ntJx0%>$Emjp(JtId$4YMqfU#q`Ggox8`5 z17_)U;M(K%^pctKt^-hwMRT*{#g6#pasFX1N$;y6LcG9HQZ5UjpFQ)Y3Mx8{IfEUS z0p+;Q8R~stb}+1D@+o7b_PkiP=8c{kvEj0+$SnQuQ0)uT>vmhwXWQqfKsBt;g<>)q7FS8+DJbw--ZdUOesZ8N_LrT>O-x4eFnCh^!U(F_*%_ zLTdPGxBXVKw6a+`0mb?IKqmD5nhU+g~2LeZSHuBgpg`2d`~vcT$G3@tfcR zl`IQ=P=?0~o8Rtx>l#PauA!w2&^qLNM6w%l8$X`V$^M)gmx%twUwlkKY)W4>%^}gk zHRW`VEkybE2M<6Xh#mw**+?j#W$`9A|NBDYZRm_BAsF*GYN^K9(Az+Uqb{bGBbMRa zTu7U!AGcrIsENA(?+F)|MWgXGDH%>ChwbNW#H!j$rK9fod42fmo72@UZvy|ob`xa7pow2DVfUf!42rGgn7 z!;SB&X32^+3M8vyU&(IcCQ&7ZX7x?;5UnkYfLD^S1-v!M?gR=urM=Hd)&*Sd#O_?> zaQxgfY%zd95WNx%G@9U;ro|IUqsUlaCI6=s7!rYq$a->D=g|{aFoBbR(UG*I?c416yKhNJ{oisZ zI@MX-NyI#U1_6aP)Ov%kLSb^cD+LI-5PVUb59Zu!76?OfD~|AR?-vm+Vkzy@;=n!% zn_c^lx;hqJ>t8cOG~P5l-`)m{eu7*4tOW74boRi@6}eO1C@u0lnA+z|m2Z!8>*)Wg z2Wv{}5rLg~CPt#3;h$2`gO?-Y0t2t`BY~S%vPzZ~gUGqI=hkZFcri9@X=G}$F5V2a z$3`BuK;bP9_E=HC z*b}A8l$l1`=j{?RL_H8ws66K)!M z3;>*T3$KIM@6SDa&SrV6=iBT-<7G*J3rj-k%P|sC%Yz?$tB!SiCp0KjmFimRp6r0W z?e)&ayVlESS#Pz|;1n`&EP}6|(UApe9_RHJNAbAXNe)H#0mXYh&$<31$kLw zYcX_WZ!I>*`mBOK>RYLf+HSz$QrxnmAB!(tGLoohZO;=Gnmg_V@&OS5=tyoLz1)~0 zDZjZIti|5RF^xzVGf^NP#`)ZqqqgV0aZ?L#(;O^BZA3n-U=76^ z$;A_vOo!&HohjpCQO*Ox0z}uTlTsxfSKO1!Dm}Sw{YMD&gYoGcBEPU;{2eR7;DenN z!k}8~AZ_rm&>%E+gy>WXLn@H&$vad0_Bz5SrGsScDwxH7+wvIyTW}Wr?mihYAv&_d zz@D7NO!UN$&_4-(cZR;w-0sXn64z?N0>_GOZzXrnEO(@$)0rhXfmlOo%5s|Iq!3*9 zs`#@e&^JC6U5X)4m_6RfS^yGKyN8jCPqYFoZO*CW7+4px^Q=rzGF4tDk@$qUcYfSb z))dZ#u2$BYKyQI3U^4EVKuZ+hiayil!7iJ1U%Z!^Bc0eRtVE*5I`_v!z>h5THxJ;c zXaICRgQD@^>^a-lnpn~suj#QVf+n#st@MRt>0iWWyxIPf*vl|BHA0X_(M>6j7p%GfM6-t0VJjIku9mF7zH0iolb=}uWd;gXj*Rm@h1$>~=6T5GNGKE+$L07rXtk=wybJW6dg>nc)0*e7SqY zx8zudYRV^m76QQ`NhQNUN}2${m2DC!?vV1ywW3YyU$$|xvQ`V35YO-?MM{l6Rj&@OCf zah3D5?t!wA@l~+cugR4u*)BLpszpcGkrJFm<2EUIV7MEW<}C=szxl`qkWHMZN}I`` zO%j_729$@4K^I|9M`BS_wD>m)Sgs3<9_QI$Msi_DqEDHejvb?sV;1=IPjq(`0QGG< z0&mI1OnX`|-1zVFv2j8(c5LpXekVJ3Rg1q_Rs0Ig;HsTaUhO-(J>{+4T5y~rlz^HKNyIT-|`<8VnC%J#^k9P(ij6kj8b5MjSN0k5GA>al0N*T>6!T- zBW4p#;eJ;8PDa+SflV{B5cxDZ6da~SQJx<_Xaup^yMk9f1C2%u5jVR@W1{)#l*vcx zPdQ6>(m;wqI7-~aK@qO&3YV8+%zZ~ULjV( z`by4@)g7)~=sqhqGh|4-IK!AN_;T%G_|N;dEuL#J;Lu(~DWcb=%5V~A-rnj~o$G1G zK;F~5rr6XQgAM`?WDgzYkZMjJ_Yi-relRBXYfLbX<2Ws+ z#IxR{6wBR`SRT4`8ASYXtwAR#x|}pyDX86`Uq^R3bis89L;=5g)tsk3K#l9Kyp1D6 z)bjP)+pGq&A8G8nL5n&lfn9wh_FwrG-(2iUq@EBDN?$7hAttOe`tl_!;r{@dyKf0x&?QCb9D; zjaA6LXdtrZWwUec(0`{gtii+mwz1Tx%Xe%`3MAPRPAbm^m%mWPKt~S=RY5y3N|DRT z)bfuqDW)csiF?f#;p$XUN?Ge14$3oI$S(bcrJ3Wd z4;LXJ{?g~Va6)L#^4B7#2=jKU-M&R)3@Yh9O!G7Dpq?l;vp#9KT3MZqnHxv&0Cvz2 zF8%lz%TUQdNz~oVp1}KU+8Pp69_c-UHL1vdak*Acy<8pbqywgmzP5z{x%r+j_C+$C zTJ7szXB^ATaGa?q(4mPn7km);tD~0xRM{vZCM|RqJe%GR&dXbCq_J2w1f%Q#jwu)c zp!}UFO%4LMQ;kWXNox3D+NL7eGZa+$DYG$;+!CfAM3zYOJ z5=SM;gakhvxYMZr776)Ow0W38h6{Cp4qo>G#X)l?K^@O(eqZMEsWF#C{1@ zgwl{v*P{p>bK3-t&<-v^779Ayy*Ec$s~%<7{zo{PK^OpB8RIC`QK2SG%LaVg#1?-C$5*Q!emTQuekD}e*?Pu~KY@+I=9XAz z@k2y@icQMJRyPctP=IAF+x|m+znfZdUWukLdv`YPwD)bR|7f)F>Y{ok#ma11M6xDI zY&AVMIx@FTO8L%9=fSQ+V-*iCo)|BqPIq=6nb4hHGZ4;Ize^mVqapQGoyg)#^yd(y zzOev;$@W-^`C~N;T-C}VaxH8_+UqFKcP?L4agI>_z1h*hEc?1&HNXrDi9P}bqZ4CV zB%LbNv!c@KFuPH?a27kk(!&wi+$>e?gM61TQ{4_UBzDDz=yk2qS40;W5l>z*tdJfL zUe{x{Ivr<=78kSJACqvwdUE-za)y1i(TFJ=B;u#tSgR8XQ{M&dT1Y}nK*&cXNGcSgdh3=!BoMOG>+pLFNjuB@H!7{E5a%o zc)Hq1S>F#h-%=jOc{HM60HFY2(=)PHp*{ZyT?P`I(JGF6!=)@__3x}hJi!2e8nvfh z&J)LSx0rlMzZ@_w2kJ9aDPZ-WsPuoSOy6XUP$8ve|J;5th2DnW&s2CdwmUⅈphb zo4d(CPY0S?Y~M@Q)MYnYEp(8WNS-dT)`j7-b(`TgoAP8zMv377WH8~dK;UpV0nul) zR1+Ngm10#vgwx))Qr3AB=fCZONU52O7LIxWg5Cs-PX@~6y(pzV$}%zcPQ&G$Uj+-7 zsU~oPu`$tlf8xlLBpEb#jf=0xeZ8H>GXV=;Z?2P?Om=97niD1HOdPqmdng|Z*Siz@ zEL!%Sg-{h;bLnk&>nvdD@r9jS6c{f-P`3%OvSAqX;$Zwa(F*n^A>QYG7Dx8PD$-je*+>1Pf?X5y#MP(#zcKgbUiMB!d-lNg>%Jec zNa%_E?LQls?0B{xR`sZ;^81UHQuq4Do1gz9G)k!0-~%@^Ly5TkN64j!R*mOPZ)Sq! zVQMVTeSkrBP&L|Gcg<~gE_CG>QF^vZUeW4dGn~8U_GXF#RTQc zH-xpF4-vrM0)7lMt(&xa2YX?A>U7}BU^#f6IJ;2mt@90F1alV%XJ4=I48P(u3-A} zY7Mt?QNSoEUfwgsy4-2rSiX>6SF_q_f{z+M=f?<$-s*5vB(O0otCX9sEH!w}G>NFS zCtJ%>Kz4OeSd3ji7c2QfXk`j26XN-Er+zUCV9~2t4o)I2!J@*Vr%^w-BpCD}f(X}E*FnAJqOa>eJ~}PkO#@`{CZgQ{ z=%i(|Nz_Yvuk9%fs>~`2-(d;wVTkvF@AG-r(-7 zn8KRIV#@bX`B*aCgQoeNjH=S)Iki##5qj$7jK~$Dz0E9M!IsJY5z>Fn5Gy_??Lh@& z499dCo`{NJ3I}eMu1x-XQl{LwLy64L$}~~oT|>dGW9dt90zHO_I=aJaFZq>^tBkLt zc;z%|YzpG>k&~(2`LW3q<~<`km{oB&i86Wl)=3CV{7rX07u52|JZ9}M77fF!YyarE z-GamGl1f~9oCSmC$75=sx3&s{a1<;E2YQlkafCV*VmsZVtS&UpdGhJYUw`A}?~5|| z12iJ>ZiC^=dk(e8+R_+(NpyN~IK8H1ElaG<)@&f;qFqPl1Mlm^#{C*a$7qiPe6+eJxZ?^yU)b_E;g-j-XQ}1!tV1yCJn2aDvUcob zND2e{JDS3by!aMbhQFWZkln=a2) zq8DEbN{6e&O#M37>Mcf$B6l5lLrDO!%bdOF`K;|{5O5IGw^6SBBoIUi0*o)B4<+E{ zstTLK%?xsXR5MVqk=mDC*L0Kr`A6s}IfaQ$^b>_sZ(GBzl#;yFSN@GqTXIk z!O`EZwLr&wpvom0BdRTRv9jSH$nEcGhZUCz^TkK;{f&{!fo+c30SgCJD{EVnN$i|% z5W1;$?7TT(m&gP<23cJ;*Dum_D9u=lF1nMh z&uq&BDs~L%Ex7A0zR{~K0g3QKHIAD7B@MBRvcIR5Kro0$p!=gfpifqsYQ%?2g+bHq z*@P9HTk*S2YSqiC2pL{Jm*4mTO&IcQbH*byE^x{@pR70)uGLGuWoUrliwrbM3ALby^Q zfT18cZEqU=y!ydVzP*g(Qm()DKj!FX8Q%CRFYKWPJj#=*2841b`X=au0HFXF<-Blx!kQ$9(io#rB>r`M8dGpAgU6T+3=tC(+M#VZ!1(jZh0^>E2i2TEtQD0AaSSk@r= z_4q3;o4)%q8qr^8eT-S1aU-u992Pu23;N{Xg(Z;F#V`Ds%#+)LBO0!F>w1yL8ej5< zWUIp#f|?{8x!gV+Ek!R|YR+LWrhC+QP-+SSzzKoE$#Oq2K{Qys2ATQwRHR|iFOYvk zyGDDVwL}PrT;o^#%B(|%Bi4#9vSBS$5G=Z2i?5%dLB)xcCqQhdr7>H54X9O;x(cUI zMKQhfr6I?q0ys~*VkO6rbN|0{=t;H>piu1B;Mp+fOUJa{6T(iDrFHt;FC9y4xtoHM z8wz3t(pHh}WR8gJ4D*!@e#_(2m^&E`;h8k5WZL0`U~xdc3@33yucqweBh=C4W79~< zm;qtKOJ*KrID!Q^lmvP=O{=C20p+s-$REI-a?l{E#Y9STb%nq1S93@xuKW*SaWFBt zPVbcVU}>aLvHC!70C|sDD4s%@_$c~<`YRU-I1nmZ-DYbQ=(4iL7Y=eKie}-00f-|M zn|gW~tkY1aCX11-$A$@^RiO!?cCNGJo?QI=6bJ3h`vy6s8_7YLp3-36{FFwgy!JLX zT|_M^8gGOp9ZXVmdabA0Swrrd#r)s-Za7UFbKQ!|XteytG66op$7ZS#e7-Y6+Rv(R z7+3C-q5bfeL|-nlD7ACfBp$yTzP&T*y94@eo3Pcm9M#odUpFst|5jVq$`0GZ# z^23Ca3xsg8P@fI=^ICWR@yWK3N0M3M^3e1Xk%Kn6qI#NBtM8c3hDe5F*y-#KY{BeI zSEqVd_f1C!+xs=~Ur(75|IOvjfeb*};eI7bDU)FRU)FoV;6PWZNtk1Aa8f~XF%FNK z2j`iX(fjtUKPUI!yB`+?&k)2cXx8|=%Agg_#6QEPxRpd715Rk8C1aRns~E(Y^xb48 z9lo%+VObf`2v(Rl8%3P{Vqs2?7{xfvPCf~lN~xN-@3h^~2>5>f`t@r24~bU6f-?BK zGqxM*obm-~*^$WW+R;XjMrk2;1V^mca$ig1&ByYIS_0B^+~xVYI;FOc8J(83h_@W% zFQ(mI7$09K2? zXsRpCgwLDUQD2N$C?(P*hQ+tT(m8#I)-3#d=bOSJv9)sl-|_rqDq& zHKFxVfo7i^Qn&!-?B%Lawm(SRQ?BziU?!-^ByleXy2cXv>c85%F6NtZR2y9vjB_}@ z?z*+Xrhx}$4Uvo5ebEl%(yxq&QJ~jiCYL02&{j?-jUu!d)>q+nz#fpm+iG(B7L5$% z#iD92aj?hKiKGk(>MDItZCW+$E20U;0;88YqkW7ph?)vfiV0Fpj)hIfZJ8JkK+dJv z{+dTXgHXXNf|xofo350G)GP*<(mvxeCK{l%5=mD|{r%=S!j-TiJK=;hBHbnueU}I# zSUfB<#SyYLJxE2KNoc0tfd`9)%Qr_BtSX(A?I^S~5QP`gKx3%TM?5>OlcSjkAzANG z8mm;Boe&G!9DC8gX-!?TRvxLQucThWw`tml4NNu!8=n$?II0h)8dNucw#j)}jlhU8 z;b_|qX0y(aR1-N%u8Bb5t1plN{$z&ng-!$WXIs zT#BMpN9-&HOPVmC;s)<}~R=mh7f} zV~o&=M@dtv#!#uCk~S4m2L{75IgnUtNYth*t>&t)|Fxw_#fL#wamID|YfP1d-sLyZ z6oM*aU99;}R!k4uyofo%v5mW9F7$qXSOW%th5|r({lPk?gAFF~7q0vw%)A}iR|!~y z#pKEnvfl#U8abKS>cxTvJq~^x!=vqeIK=`FCdNsgrz}R506jzZFIEjI%dCB-UYU%X7O)at{7pR+A zIjAs#csi#cgupTHuA^TAT0%<8a1J1?vmJY(NBcDVoR4!tK_}mzpq0flZewWQjnBTKEG>7pwl-E=%>2ob-FlG z(r(Je*oCG~8)_ha1o3mDIYsiE^l8+LOR_bS(i#pVgh(!1DJ7MN`dc;z4QTW2TLppFKr_6}YX@rQ4b}|;pr7m9@qFU#TNub*% zTzai92YogshM||pHhPhSG=>H!hGIGpLNXh*&Vu(V&e%|f!v9j_MbY`3mSCV+m1Y=+ zzs^2F(%#CqFN|9dNgAKbLHIpF#Q>rb3kGX4?i?k*YO1*=P72k>TO?7*G#a-cPT`JTU^_h^uYAPfAObNGc zF&oH^70kbJUA1IF`vV;;tj#XXY7!KvV@Kfv?ORJf1+(Nff(R7II!tXQgM*gj1!eOp+y7=QezouUPdfg`x1e(XstxbWyk+obM!#XOi*M z@n}>yjd%&nuy}d!LRUwidgDycjU}h_c_ac%!j~aX5CHZQRoB_JP;pRh_$iJcj$@06 z&TGHoi(T)cZ1ON{2*xfx)g%HJ=wkWG#7&kdk{V(`#pze4s75>}K{Oq&Jbsm~IWj^_ z1)7YlBZl!s6$pm^#^sIzf%8;yb~O0HIzSr(fpBj4NP%yN0)c}WhzCXGB-)76Rq)X* zc9`uWG9+LAPOJd1ZAI=}5}Iv^Z|Gp^jTVI z6zkUh(F!f(PVcC{sQmcSxvAN)w26}wbH(%RyZ$cazX8<^vAgWfrr3q@-&RShuYZgp zgOvhsm7!_HAk8b?>U2g3Vf}TDyFHwI@pSWhb-LTa z?{{j(fgZ!+7SGn+G7B!FeBM^>23Wf2jLo@rA%!LDGRTG1>LTbHn%t;OyH7&>IkR63 zn%wT)fjq;enXQ_W(y>o0!i&ebngKQnicN;fit!Wbl-}Fy$_X&&L?6JX$5Vey? zu;78_8-D_{nhT1*B=lcgE*ANinIwLrv6%LwO}Hcif5ca5+hWizwY2H$kfU6>wA%pf z$XN)J*xCk?jDzK}s{S_V=w6HshZuF{hZOdX2A`C)5MtAP*e#j9r;@{}w=QZVr zB_SxGZV4I0g?ETH- zMnz5{J=9*f9?8#6pN2qrsJ}rw+TE;*0tPyGQ8T+x_5i5q_FGemGC7vt7~INk71~~( z2;>*zXwo@X*zaKQvwYJ`UKtdv(HL6^KPsBo$=JTOKEsd{Iq3(y??Oq-^xJi5B+O6S zmRO4%C=8v=QY;Mg+Rdx{z$btJAQ}t=n3)x|^Vg+VNa8>wTMA1S!UaL5ikO>mKx`l% z4VIHhh4@E$uUX>6RF;{IMM#K#dM2yAyeYceFr9~|oU~%?X3U{<8V(JXz9=GlRGn4} zbFY1pu1=eDO!xmbA5H+ln^e-1G?Z&GM?=f}xP##e9- z^YXD3B}oyRS8vHYnOx@As17?u~`TqUEph?j%X%od-My+wrjw$4w~$%om*lbhe& zskOjUcz*P2f#2ZE_z7r$=rC zpZ_ldtgMkL3I*4pVf|rkBqcdQb)Yc-3t(0`2EnSm|BnS$s*zfjCTMTmWnRS=_B52! z1?!j++YbIa3QXf5c7})({1_IDpsKOXIYMyf0co(g$*mR+DFLAPS9 z&?zd{i^bOdB#NS_XaX)CLcg+*h})z~xWn(Uf$SD+CdA%eU`IrA%(qnAu3qkTiQ^sKmXLGGW!>{>RuwN5^0Op#ij z)`|)PjzCadxAEU-sfXN)IC6gR2AyfmG?mxpwgF5y*lHs|aZqHGgh3dDbqx#Hy(U5ud0o4BpeQggB;z^0gvlbebbiSB`@Wad7kT5ikf*GS8mZc#6kCZQ`>_57(hE4Qh)T7ng246d(z2&#qQGcx+qebSN*LGB%!2rM?M< zP~vtmWgF0{J=LU85>w?6dChAb>vrvGGYeR(ns87>!UG+MC^1V^60%t6S)%mdiCyI2 zP!|u7A;G0YU#O%h%2iWlN2bQ1vHRBnZ*>G6n)-%_QCXncIa@<7?Pj7d0*U`jNbEHl z*O4!Z$orpu*%OtQa{a83tk$!<*Lq!sBUH63Q>Z5Y6+9>gdzRrynpFt&Piy|GJZw|g z)x~xw1P@afcowT=+{u}Z@P?>mr#;nVbuB$Z9v|g^K@ePcC%dL`%D)fmzVLK z%OCA?zHpQMlSZA3?fbtmg)g&hns1|YHu%gMAyH5!l#&P-7T*v}NP_GU4wf@AD@7&p zw~gtf>LxDZk^@!Ui0c<*i>xe|N{=qo$(_)S{#pcE5jVEivCdi+H=`!T+-y#;(yCFk zo0c}nC@8Tcs+TB>rx2I}i^%b$9Zr>7_3GqYnHt8HmU*5YsA^6Ckz)WVh&SZ1a6#SH zRrf_)0W|u4TO4}KJV8{`*od%BY~t^-69U1~`mNOZp5*LYb+cl8a$anEela3qN$y@j z(P6oX-2UYE-16p=l_-Qf>2i^jVeF`ujyX6U14v!w*=;izSPFO&nT@c_0N@Y|@F9nU zOiKL-9VOXFgId0gNJ*KR7|4hx=`KxW3Nx=YlaC8 zBqR?Ht+eBFT6uks3JgLSJ|zuqyvZV#R3lKe*-8?a3iM$78KY~#PFVS$hQ8WE_>|IXmUS9SqM+SaLwwZDe%(EBq$FKWf4G=Oy#b?L(s z&XLD=68JC$Jb4mgIO4=Q;}3BnnA&C@Oo4e9*c34@EHw01bRvWtBVS0B$kfm()GJxU z{4qfqOa&!e6`eXGOwk}H6m-?b3k+gH8qYs2`|Bl)nnyug>sSUYb?!S#aP1@35~d_J z-L(Lb+cJo%r9a)SqS8%_BWTfN6^s>xJbVs{F&dxsdpSs1%y$xl_np@I8V5#M&EF6m zFv|}^*+{}&#L>ky^goVva`dP!Jy!xV!M+2?jVnFJ)2A2eRImjwwwBUNq8jK7%-*K+ zNs7Kbi807Mk$f*4wvk6X z>Z6O=Zh4TYEhNG8);6r*R5Rm!hWP=T*U<6`QY{S!4K+39l!nzp9*XEV7|sCVgwYv6 zKfLW>Bds|T=FtC-GYac~(jtRuV)9L*Cu0Vb`(Q?MVyWrwz~4n+GW;J|=BE0@q1XZS z%Jsh{{5h;{3>^I4gW5RS?6@182qYBg?KX+t<9dFqgy()cv~JFI!cE+P+~_F*&}7JqG#ozK~7OBX~2@u*xvkF#sPVMAY$(#b`+v$c z@6W&sqmGZmZyXqn$4n;{M(|JCUbroB?+HxoIFcM%^({1^u{5}(rZnLWzh6SUgc3gbJk^m?d$FAVF`iT= z$nkb*3wn@T_C&niPrACF zfB^;oFfs$IXrz#1R$1QMsotk;%>O(RGbsIxHA(PT{-miISqjs$<82C04Y-pt3%tdv z%KtJ6zTRC98YZ5YC-?DX*&B(G4L2eZq}W~IV7&ADuGV#)7Vm1LQO|a;?mA78kI&!e zQ?qr7PlYJ9*Bn~!I-lNq`JaZG0N@>rv*$G8$!-)I3MNNJsO;x#3O_vu`|+$fB8t8< zQlaF;C8z7A{#uuaxWoSD{ht#QfLUH)5d|<{FO7bx42N#t{^cKOok|lH`JzXvgKx@+ z87^S=GYk>ZLHF3!uzPi@?vA4$Z}C_NAM|#9?Cav9mjhCg{WdZ}HWWCwMPKK?l;Cne zrq0HXp$1s@u!ie;gJy|TgJM8bXZiba_VspHQRC-P@OtYt=>PzU#9)4A=I*77NvnOX zRE9m_5;$nPT7%4b3wu`L^2bLVl;|XI%~w5JEs`V3OA)lJibwhQD-KHT)uGk%HYM>l z!#8$kXe`0 zjXKBd0l=4CyQ5U1Bp^&q$UhJ^da~D&rixh_=E_YaTa6cwa$Ezv&et|h7Ko*jq3B^j zL3FjSgOZWcy0o7aN<94mPOy3+a005T)DQcB5i0XERO(DJp(>xe-janzlWFx~&x@Iy0H}+h@ zWDca}MhY`a$t|Qu`l)eh;c6}3K{-BG3>PR!%UZktavQmxlFq*3472-CF{hN-OE^!Q zhF@l3vrv^}aIuZ6dj3HjV^)5p%xI)(X&bMlS($=H&ZOdOsq@ut6rnXpkH(2dB|?V4 z)S)o9rnbXgTiObsQ3(02HH6pH$v7#6%aY2TrCsF`@WWeX)$UsbZM=|}=RxeA``;y*&%k>VmM(`r7pHwm^`-460=@N4J13iOX6RwdTkDbcp8 z1+R$)Y@SuSc6kCA6A7?~7^ND9qqBhkVHgssGfUa%LD-~C7VXS`ieD9MjGL@bzZ70` z(kUQ5v`E=uvqRIcSnH0B6@$99_L*g%a@Ix@Lx-tnFL`t8#@UWh+}2x3srig#om#9Z zvDEUqk1SC&6N~LYlCRB)Y+TQYFFKP_ZqI`{T->pY*`(lz`fM+PeVGM}<@<*1{9kNx zK}LQr=_D#8+9JuHB#@Apko*h5nj^+#7C~qG-U7uGlPF~t)AhJjN~V=toOLNOb>`CI zI==G#MQshYJkV||cZKO9?u&&W-L){{`9q2THb8KUgtO3#G zETjSz-X7n)QH9Du4~lpFj|R^sn3-}R#ECjnK2X&YU zb-Qs8aOIMch2~7${~>^p)CR|@bjs5TVgP~eUv(BbxSt$ytEP%?>e^`o!Ai|~NWV9O zYli(LB`>r|k3XdW$AXiiMVR|fLx`b3kg5tFlK_e#mFKb4>i8=J6d z)l5D9L$q4=Q(lI!v-L|p*tn(u8t@pnOF!`!x*{0HGr9${{D*)+D$w?$KtVJNNW=4@ z>if#va?Xbkh%Ki^&o)KToeVD)QXj~mAN@o!3W0OtR!D&*|&SqHmADMFGog6WBx+{ePv~N zc(8M*1=;Cnaq`8a3`dWDzJu`&`RS%N{9kRoV!L|sG{-XwX-^!w2%5U0Qq?`&tcfMR zf0j!jy<$W^Pm9m;XR6bIG3}L$%J!=)cYhe_UNd)JreCuzokn#SgPJ5TI@^Kfx25UQ zzo-)d02Eb4VYF=0Uu{?uBNp`2{iU(!MQ__W`=h6m>5vE9zqk=%Z$mWahuyB%fr;%t1BdUbV^KW8+(*y0|d!hpmS zM-66w%^mi#N~~bWCMgJSV|3g9G=u;UqvBm$gr{%SqSDnhi3msVK39uqeP*%2v!Mwm zl0LZa-2|W9bg_9bCqkxULy002ChYxU74MMvqj7kj4b(Pe|n-hG99X zUA1jc(2=5n3TJl97eFxa#1=pPv70>QH-*DiG0*4Qh^78xI*atYcZK2208yg%$A}K~ zu;xEJDB0q(w9`No7yxx+C%*(5+9e9g0v-=yXch#RM?X|lVK?Q&cvcdx+NSSKdTkRP ztztmHuABgA!rsrm{wz1R?Cx2hY~J}2P~xH-OWaLC%HpFJMCf4JzGJF}-knh6?yMiS zoN{EPQUU?P&>^IOmAIwHW+9fnXJLowUny>N=@?dqK!@$*Ua!kxoS_WaZ|IV zcEdY$V&C}3t{+Y)4WUF?uvSNls(|iy3(#D|{kxNFkaN^MR< z(rss%u2&`dXZm#YcE*;$w%?C!iYh*k98z|$Yr{Gui^Sn86n<(2yf_8S_X{NLOg@OA zJtwr;Dr$5s;54l;J~cfwDm2*mwxR&2DG$qEfIbC)zvY*F%N_&m27 zv@TkEAoiin!Y?&=$x#46370WH%4!WH%A|Aw`anO48siaq#IxDpw&_UHUe}_766!*W z*WN7*_^j33Xa`Z_Y?rHe=Q_=&A{pfO(R;71t9ZX;kG|kGgDNJqJ*B6+0H=qJLF5CWuwtu&n*4Rcv>9391V_AR%2lpfcA#0 zBXf#qUJDGIY+`Hyq_l7dhh*rl|DAGyzz;#S(@QKEqf>Z7fr`PHK?)72)<>7v(i-c> z^ce+al-wo&u`cc8ip9?6R%LaF_#0%CqNRAg_2HkD_r{$#v6~U49;23%KL`bjpl_g> zAE-~bz0_k(3VsiC{#lgc)K>B~&0{jHIW7y4>XmWgI!HSB#l5LDM;lmM2^R%Sa5=olM{lAUyymP{%nh0;tt zJVFNbtj!IwO9;NO2a<>vt6{MYNxxnYSrU`MS`LUOg6EF#W!=QzL$MAWrL6uiB3ZE< zc2QcXmSKg*8`3^kSM@k)NDJ^_v3RolX%F!vdDrv3(s8#dmG?uY^`qc1Ao2TpbYBil zaer1)k_>q+`KfSd5-{J$T^Y^oPgiwHVQr8o_qG9U`0e?4J`4*dCPJQ6Gxmha} zyuW?L_*}*n*uhmzGk3|;#2_aKCcyyTR&#m{@xI%$Ar=|ctr4awM>mFidoea!p-x;E zQZpxM;#+xOzA%{uzl))KeuOT4-+zP<0KgsTb(;&`TtgeYOM!@pnA}B^TJ{E)>{7dv zhrG{1O)3*JFqV18f8_)s>Qn>_Iw5}Aa6Sc2d>a(=Le;?sPihDPQw&!P7$Jui1T30o zqeKB?SN9bS|E`^3O(lwB)Sh~WlY}XgY{>WYD;P_Z2kBWRU809vNa}2PY3!$Ogt-{p zM8b$P6-k}IrJo$iNOqZyz!_G1QduKSS+7WWvFm(!D0f?J9xseCcQXrOyt+X-C)z_} zW%whS0bW^hz-QXkFA z&TBmIZkVILy}?OTIV8s5WV~}5BLSsO9}93J%^uy`PuAsY z)Y$kAi#B^!?B%7dp zyY(I{ksachPZWKinfxmcA-6c3f%kue5P+}jvnfja!uk=LgaWj7G1e8r@-{XiHk4=P z4{(l~u~_zP(?PZzBNJA)zp9D5Ldzx9Fx`(hb8Kp^RaSlKbA$Z98KJG@)Rc3Z9M3&L zn{S&gTT-dD zJie-vc`SqVZ%g9v`h=C{h!3waA)eZ#1Q=VDA@=6O5VlwQhDhK}`JE;coJmwA6!l5GaE@0n9_5bDWfaLWekHU zvnW{|B8M?^^*6}vC8o718O_NeyZ;EGeJ$(Lte>9IZE3;MAqrU8N1s$lUI?g)SYcOL zKjdrs%Eob5Ln}BG(4wdih@<=#R8HF|NF*ws@J;!LKWlFpD>`xzI3jg zvg;1$N$O!;TUUB+xSlIHf5me;*loCfad@F)Y|gxs^_^mH27uu*S%qVqzm=MS=jK&; zj}7UwdQXRY>%Y)J1ehJ~HJ!NYp*Q?A^h&aNt6N6@#*|IsL=^q$b|+z~3IQcYv@QBT z`0A0se*N~Y*07i~t9wYLwh5940s!F$0m-k>tSYevC=!i(tTHTPWqS0%2vLmk3W0*@ zL5Y^@=r_7F>Gxf@@pY`{Q9&G^31>dR8yKEf*1{PNKmWM(b`2wWwg{o$8s<;gL(#F# zbV%3xcno}907(-d&ykRb=1zY{m$|AR^@MX*68Nw#3aj6Zvl&kZGv|b;&&!WJ=AE&j z&Vq$Ej7psw-Q=3~nf^zJ6aehvnO~&QK+%%e@D;F1j#iP;kjmAZ*CM2j zy~{w!;vOag<$$z)fM|||G~+m(_hWm9S5pvw|UBp?8DDIr)hSu>)ScS+XDHmzgE1FQ2kQZ4+{k>$xk1Pai1F~02wNh$51esWnushx-jK* zi}VOWss5iSKE*+s_yX|k3322N>8`)DLDGx4xk%T!o@nU~M_}WV4Lx6h0$40ufg$x7 z6((D=OVCbl_n$nkLM5PWkZ9csDFf$$!vO!f(*63HG-PcGNpxuU%gUd*j9=GguES#q ztq4}sJYYrbOX9d>_J~%TPCeM_R*n-ea!O|=bTK=nQqCjTftdsXV+0=m<21{!C+#} zjI|ksS9>q71H;g>eiI_28ed^;{)q< z`_1P3o$XV6m*Jtv|GI2m1>y3l|E}Uwu>W2oNt1+v`XFAPBoFKePaqfi@ZopOSQK;` z=-IIIr{ce&A?#tRnqlYdl^}SJ7u9K&G~X}|qeY7cE@guLyH|hl-3L8Srn8;We@uq+ z72B1~h+418C=%0eqjZ>hT5)u44n!6XQ6=-0(QZW6s8T)h zjFD=SkpKza@AZ`iKZq_rKh3;<&i=QoFD?fF7O3djMAN;iQ*4NtM2Ev|OKDNn-m(al z(4IWN+3srFO3u8)ylM&AyYsE`IDt;9VSt$%;J5r}VyCs_WObxH25A2V0_44zlsJu{ z+=cxaKZ0xtHpg67c#S0hapTnBjtx!^nl&xo*+63%lIiUO?A;7{nE{_ip4$kHRTmr0 zYU=0x4@~62g%muDWn#?E_zq^7H=IhiN)$?G(vu-vOtEK*7ME2=lmd{uDq>b(PoM^B zP*8Qw2i=tVTtmZLuTagoF35H^m_jTPc83;{lA1TZplEprkAi*w>q{S^q-AZDLV<}= zI-VL>@E$JP!`5Hli;Gy`P;7E$%@uKE(M(WoTeAYnU~mXFiFl0lb#lF)4f9OzJ3SG6 zV+h!0sxT95c+&KBYv(KPshqs&LxF80UwW-#lpUnf3Qvh|e3KTA#S zr`elH*d{O5e4NC*R!aDyWWEu!uR#~${?M?|BY85MmWgy$WRAP{dS!bV`gX<0RTa|ks6{i0~Gw9dH zxdc84@k4c_OLuh{`*T}CR+?_tD+PeEl=N*s5+;K>b8uTHEW}X;?|5OyD#jNPiOM?N zo5ntSYpKp4_2SM)TTyk54Lbp&Z_OJc`BOtR%$nI84H!M@YmV_vx% zUpg%&GZp^Hx0vgwLL1eG5`U_h)he3i|WO)K$! z{^(h>LBs$61%!HFKUU(GNi%utod9~`3oCBS*m^a36lNov#!9FBm`TR z=93NbD?sSuSAN1Er%}=7XZ>`w+O9?S9w!yHZEgwXT$z~*%ddda^0}E_@d*FVIbmrY ztP2|C9giEtoM;f~xhIh~Su+jr8B6^?LUyshEhbf-OO93(`08E(3BTytUSWH74KUZr zv(m=`D4Z}>TxUF{TsDB7jjAK2apg&jOYOC=RMbCP={1g=c8~Yzd%C+4)Jq4B73Q5g zW22tcNtYjT$3md_UjQ;{u23t|GY;$s5&8FLu~{I(Q!z>}>iy7ldwTxHr*F?aM{;MF zVVNk4beVCpQeJ@CzqV)SfY%47fU8I!^Y@MTHl()tynD7tg^g&}ZQ!0&Q`Z0X zk1Oo;qyJ3nZ0U11>%r@7{v<>iP?p4M&VOsgkpgtLA%duSzJ+-W z8YK7W$ik|$nzBS)?rYV6%%w66AC~?44<(Ab0f28R3u=fAmD*K&hytP=G2>ElQmvYM zw)y8Lk2Ic?$Qle|fu+mk4=dzlWD|_Fdylftt{lDhIz1i1p!xoL|BW`EkI(WqCN8HX zrd`TmEg?q6jMelpqw5QBsy`Byx+a30l|j7x{XEa$$OXuag0kBf}^rUX6W#y1Hr8t^xZS`k9Ln$ zt6ZJ&TvZ4}KH-9_lM^>HQ46#=HfcGna=l^Nes5K$KlOhrEsdfIKJRVVnhWLWxARn0 zqV>yK?CJw(W-F0NKe7l6wQ`qxmdP`LNgWKww)Xu*3SmpP8oB)Y7-eRQB7D@JVIrey zos`G*uNMqYM&t1LcHK;QI@_X^Na$xo|4GOP031=(R*6&FGL_6ha~<&jd5s?;v> zucBxFl2CjDHj?_P`i=dMuat_Dggviq^H9@!Ar&+0Yr5((KA*P_rt)r)W`UK?TA9RD z1zVqkR4MRF*ddcH?XmifS-gn+3;- zo&3OWpsXLh8mVNj1|M1V8>u^epGz;(7w2N~>jg*f=PG_9dOC5mVe())vLowoROtq= zXSIV|B_xo;_v&QptzulIWGLh9r+t1&hW2w8uG8iwCQ+1N@NRO?W6D?Yz4~z1eifC& zS8pO*TKAx{kBQ%2wdHJtFNA6*Q8hv}A#^#jMr}thxA8PH~>S&pSih={?(La8QUv*uh?B0^5mYU18r_Hu(&Dky$QL?8AN?WvnLSnu4%`RH@$>gEi3!iTle5&ptBys5$hSEE~ ze~Oj`HxbcB)|el1$N=GBOZkF59eiWH|3}Cd0DM$YmpQ}yX#~eREl@lfn}z+A+J0ve zTY1_RQT07vMRi8~J^EXU8iVw59umRYXbUwHS67-A!OF@NHPcQ`72Be(%c!pzbB$xY z?E9CqK3x1L(RrPs=2CPKDgN()h4Me7kyNd6NE|FwD>4^m7L$TbOl2*i2u%?ylWr0Q z;xaMl1)l`F2S6QNI@dsO!;8Ai>C6338^>MCB(%}EG= zwDj>dc#|4fxT3()B17e@C!v*Qu`0Lpf91VG^jj@&=l*l0FxO`273kk>f=vqOjB@96Z66}YRCB(M zXDk(ndwyKS=?U(XpMyT{`mOx)#~;rG%J5gm2TB1*Q6@|{r)FmdAPdzfN>A)zyvr)( zE{gQ3ur?CZY7?|zru^5)_sVZdw%l*j+W!&q1OV40mSoNa=#6P*4NM#&5NJFUs9Nt> ze5uc=AEQ2_&slY)$?O0JXMRFyX1zbJ`>I{s%lfc!lwP`aC%9IYDAQQ%1ZzPc$*L;R zYYnt5>5$S5=}rL$l6NsyK2^F@R&p1@IF=-6+88v~BI1SMOm6JX^w`#2X(!!eY@q4n z=%Tykq*v*9$r;|m!&SLq$vy0?9&X+-PA|0)2o&;)$|0$PwhgtLe5&^|VG#(45$!QS zWD#V`5U1Al<&)V1zN}`IoR4Vg1mNj#LhK|rHXY7cLnC9ue$`O0wX0vPVYehnNcebq z%@w9ne;IojD7*Axp|dRe;k3Kdul_vzd46S-rFdwT@51A4!#a&SaZiUd@Hqewk?G8r z?4Y0nlEbkVdOLSdzXK3o2@;ERb3~%`q%36VD@-ld>V4(WV9pSbB}>ZA*hi?I9+=cC zB!E(jcN18=8GKcg0h*qoSLH4JwZC^g+P$u*81s42UD0G-trm{_VMwD@)^Yqx^nL?3 z$Y*3wnlP=Bw8KSVUnMCBY+=3ZMMEhl|0CoT3N9iu&xfqGVkSdNWl@nH3o9i%+NQ!2 zTt#aUmiI&3gHx$c`)cS!I&RzDXkdQgqs#&ljQ3VMdl=YuId>n;cH-b0dJn ziA6@XYGxubg7e=kPm_^)B!EG7CqJ3uan5YK^FS7#2&1GX8x5r#?uM&X6!GN_Y}g-3 zdP>%tp`(^GkhJ5(=x;`5e6J}ssD!b8TSCamtS=z`!qZf!yIWq|P40~#KHmHnqfeX# zZAwiNk0;&hav=sWfZKj!hX-dO>qZs}`4_JB1gca9!$r+WtPOnjp+?m^8sD)dRr)q4 zbk=q`IH~`qEQS{gA5?tor2BD-q8h@o=+#R&SsJ|$ZF9CQpFFsjn9#OQkL$OF)ru1X zp;b|404k+Mt8`>j4;!5MU!~5}W@52AL%%(pHok8d{fg;cw__n?9+CIg(-b&*f>DOm`39-?{;sGlA5l-)v?KL$W2bU705K|FyiKR% zih3nC6y7g1O7l79hc?n0a4sM9wHMK*-v0Fah34S9A79*jF6Tlz+?ic6?@ep%?qPn> zfCAokPdXG3KH$&9^DCG@#Q+RA*UyCe!_ay@;$^jwJqfF8DS_*0Qf8Ll(P@Mu0oo)O zJ?hVf^f)B~ap3GLU(p5QgMDclTpMjhYqDOTlDPmJ&t9G3XRl-5k>DJKWEGT4>BCaV z(g)i;X_I(W;7=7Un0)B3Tw1d$sUfoYkH?>+ZU_*{9_Naz={8o{NYN6_LMuG^{p{RDPwD_S)7dfbNI z(qX8^;U()j!b__&-~16_V|GhhJU&s$FYS?`1Z8W^?2E%I?Z$X;rOT5gNu#*_H!7FD zXgqWd_G8}5O-|6$<%`q<&v<@`o%tMM5ZY9a+GcSXnu2d6Gz35&;Y3PBzfLrF7Y46* z0#eWW)BQeug}A8X!)*`zHd6E^`bV$y|5~hHSGhl5|Dj9%KD}`{LV@a(*d!?xszFVc zAkx#Lh;}S-+pqF1t79zcA^FjiS5jcsga=ymZ|vP&aH`yN7J|31HX%|=+c@CTF6gtM zX`+S6O#qGsuTYq^1hthVFE}72&QOcjca0ORJ)CK=*Ex_*@7HSLX(?jW#+YME+*lZtpQFfaem~u@KETHPlrNwPp>;Te4qV%#m`)#*47hO z!z!5}ZLFkb z-)eNf*fA|Na|{%xGsF-0=9~-Jnj(No@}(hjw?J?cmDST*{w*ad-Dm-Y^0+h!Dfxk0 zR1QL`u%!F}DFuRXuAP)Ds1^VICH8Q>)E~xBmR7H53S%A2zml5G7raA5s+ucpe?o>M ziLpO^uHAhMYeM zog+QvKZgM<#7*BtY6w;WK0!vimEbwo9-AN{rwR?yE38H3&6VgT<6z+&Nge<2JX3z7#&Hyq!bup7>yaa6-fM=NX)e&K@6tMAq|lYZ z_1nbsu5b$|cD|F`YsvQ5x70xxOG{kcN{i@Tl)Sm}`knss^yrS4^5~7{&;jm8q=?4$ z2XP<{*i+~O55VfECK)NV;W+CAWiW{F6DvuC03j&r-_;aMlZ1-S%d7;o~;Rm#5kPe{#o9iZl7n#ut25-mMGq^$?#{{jy6qu@C?)86!yaK`9RJH7o>E5LzHfT)FbP->qtZLcs zTwt+noFA(Z*r*xPI`W|Qs^{JGcx4I#Bn^2#r)<+OVieem>Nw(^0kEmd9lM(ED<6yI zEJ#XSmU%0a?jJ6}0F*FlW6a60Qh@%|u&X-TnbUP^1-egx8iIxhD+Cf*A`zDc3UQdk2@Dn(O~xTV69l_<2b8@^M%+qo#Z z*{PYMGpwDYgESZif@-K)oELbhWkisz|x(Ed{*3b#E8|G-Vy(Tl-}QxZf59 z4HMtv$3}GOR)yKMCm#Ol=zgj%t+-t|s9CMZ$_iXEi6k9v-|eI~T^!ajqeRy3GLsf8 z%@BxJuLZhN5W)D@IMCkV_;3;R$CN7Y)ZQ%uQ*0Ff(#(tp9*Ot_fmo{h2faQEUV|DW zqJnGdLbBu1nWXT*=AwcO25R7k7ULrDezpNx9HK0G>9}bjY`T-c;&ECKCVfLnbf?U- zgl`hSS_^pYkySg2Ex~kFw9M3#xr7tnNUHxM6dVX%tFkD^s%|fAae8A?(Tr3ir9j`R zcJqvFefUrvDf7V~U2J2H%V$S;TxI#@cAAO%-fpKKJ?gb|fL|#BNe)c{vYG<}1jRlx z4N+18w%`j+T z;jybat77_edZ=3TNHc7sTVl)8{eXLTUEBPRP#i$5U2^sGpL(B=#QKeiLoqU;l#s$c zJm50JS|g&hbIE=pS1?ZsRr8ER0o?@8-fm@AY8l8SsE#f?$TXI!e9ul`9QXXExWr{w za6*79tT$gF&?{}Bdj=S|(P4H{wPU=!6U)iQ)pG zlWsn3w-qnpc%D<@krvQ>N^sNZXXv{53lpe`uZoh1P&9*H?j!dA)BinfKw`k>&h+7( z2pCE7R&XHpAhCIICMwbYv2+z~O@D8Dq_i~BEg>o0-8s6uQ&PISySp1{DG5OV=?(z{ ziBZxr*go&Rzw7-Ew(H(=cFuFx>zArS^~W0a2IW@$U9FoU!84P22bn80=3zHA_^eV` z93@HzQRr$_HK`iPPFB%|mz$rW)a|A!h9^gWzRK%|<)FM;$%W;#Zhc2ju|xa!>a2S^ z0}{ACb3SRfmofTs>kCkmQa|0-=!$a$sY}MfUE2R4loxXo4_;c~6gUu5fPk#8?C=zn z6f~5(mXP!7$1(*+-L8Y>nKb)+tE*PtH#D)2+4$cp$S&JNn>AJW`~K@tt?QKO#sE=kbT8s!q+H`Tuk!$OqFUJjQKRNq?5g%kdC6=l z_Vq0TURiAH_HlV(0>L*#Gk$kYI5WDcp}I=YN@Tf)**I1Ft&<{a{J^b$*~3#HqgZQb?^6=LEphxZ%6HYo*Ota?c<#*!@MB{{VJ1lXQt-Ykd1JG^>Ebm&tw@SW~9zFIP04Iyyz~T_ObNjkrX3v&Y0~YA ztcH#1QZyS|V|=5DeXnD|S1Qf#eN&33NNmadP?snWh=55pUS$m|xQ;=robQ8o+H_Mk z&UPf+RwnosOHsE;OfN@fCExOJnplCNNZO9HKPk^H=J7)(n%kBORFw(i%nyxg>m3v2 zdR7ydXTAF<-$W(x+^u~+j1t&5HJk=vft7nnk1f1vg3#y^$b;DBIzKNfwhIe)2gDG` zvm&VdxBAvNe~jn7I{3vJ=i{o1_6^)J@sJ-o#hw#-e3!5yFn9RW+vm8R(X!Xb4E=d8 z5knRZJU91&l{iKm%08YKgF9|c z*PAXEqC8;f`bkXp<*?_D=q(hD2=IJvhLvT({zIr2go4SxsB7dhYU$K}_%`@S20Q2^BbuMebA+~SwR77w6b7WK zXWpcSb6DwYC}^FZ1m1tL6G%`W^Qm?A_mMA{PLWorb%gO8ovI~`b6NDiCy1zW)Jphs zczQt3G;F?kCmEY@Umln-1OTvLQAG6=a9dwPv?TE1TlcMd7z*ZeetDWJoB*a)dGxauxga znFS$w4;JG%y^W!rlIOQWlQ~o{CxPyVJK;BXD{oBZk&q-b=KXj+kKO?Df%}UJ0FcT3 z4qq6qv6^UMy?%r?3TX@a)Lkt!5k2&8rlR0iUaVx|-E#uA1*afZyrKk;s@bG-?(J|H zB`QOQdW&IbDoaRPs@sM!%7lQs#V<7?>VqbFAF>qU{^W1Ca>uNAm3j{fociq*Y@U~_ zro+9dVP)y{*W3>-P)RNO*C)f)t5=|6h+eYWu18;Y@5t__S#G_j@Ioe>gttOuTFugU zlc*A_fic7&E@E>R-8)>z6eK$$x~hw-D{nrX)FL6*o)mK` zsnMesncD!VjcXDlw`V&$?jqIZG&EhtO10TS%GdJyC~z0AcW-m2cq&b1+B&SJzVk(< z^WlVyRISA46z%656}vE+9xb1G$H|g3%TT5%3=kcwpM|*@nn)J7Dz=@0dm{aMtL~?( zbXv`KzQuqQ5fbS5&<51wok;828HYwao8J%nrf<4!P0&SNsdc>U(>vDlxz<)aSHf$o ztj2G8i!1JnL^Hpq&iHb>Y4mWNH12w}NHY~839EHBm=B#YHR?~Ry`1#BHCH@r?;A5( zE?9*c85d6PwrKCpB&oXQ=DjkA_uH2m;S@zjwRvaEAz!b8flHtr03iF0ZZrPXdL#1R zJUk(Y7HNDsLfzE-?n*s^x1k;^rTVtXY9PGwpAOf?R@QQan?p5CHAvfE$&8)MxESYL z&$>%WFm~=jK2ySE(jVX_KRyGO4|_}_zQY%#kW=zJv&>4{D{$tQg$|gRiWY3mOOT?b zu2|(Gz3OKxYD&t=oKfEN0aT|rC6*1d7pgc%;{PG^6@*;Gwek&DZ^(W|wM3$}JSkBL ztS+Gj4yk5sjB7Hn_*n1eO1fS_RTY(+pXL3950mI_Bvv520Aco+-FyEx9^M9Ar7t|) zPJh__v<-(#NS#LTfFc?7{wc>*i}XbC2<0@Hd%th<4a)o?vaCv_LgyI#C2JCbKONdK zX(JN0UH=js1YLd6wxOdMxNE$kIquw{{Awj$kwt%49lKF4 zDz$0SbX7MIf=h{YUy1(X1=@!z9L2nxw5ci*-M;c-@fm>HNi(v@4pu_G#f1>NL71RF zU$ge@jyHH&jP~+tRn4NbZXD&+W&qfiFc(NQNv0XWN|!;GWjL0nYB-!92<6m}SKL+5i2b&DkS7VGnuiB^NU z9K5Pm;D#osZ0;w{UZI^$MtRlia7!t!-4)tIhr1qv~V(N+TXpVeF4<84>H}I zpN}p%dR}(S8=9e3On}mwCac6h3W+uvthzAXxck26h%0;L_~EYe?F3~lNGVi5rCv{gQnV_5Yhy*h zX`Ha8&Q$*6GE=Jl%|Lc7klAf2fIP0G_ph?xP3`P(`azO((TZhRT|K-y^-Y5Q*+#b^ zK>`Nl%yS1Hi`#fjy+NDrn&Rod=KI_5=lZ7}pZ{hEk?M0Lx~!|EvuThpGb^FopJH^X zn+o+D=2x&;}>(eM13S*%EN zd_)30S&(k8JsHvOI`Y`*w{nsh*m2ixxkVF7K}fDAMNjUl*}=hsts|_ky!_ef(*jbv zy;L6!Ls`~1y{lhJD%XOIeZjdh{MwOm=X8a-kFRaWbUKsySO&VxifOJj@}jxIZ2xF; z4SfGP6~K=(TF>CTnCso&!Ah4zb^7=P0JBS{FBWWn)$z29dUBPygOzrN4*I{}jpp-f zouy8y&;7@Xw*-<$LzXr9E1rk)x2K{MFDbikjdP^v0v+GEFZZ1LGe~R@1=+ocXaRn~ zMHv1x*FJ+f5|$foaFzT#z|XuZ``u6bB}S>?_Os=;XCE~34PxGl&C9l_5uFNY$1Eko zbvd-rj7>Z=ZZC@$^G+ED@#e|63}q-&A6o=*S&MYVQ}hSrxH5VDKd#DTi!qUdvP?ue zv4yi_Y0lfL3Mvy65mKH?d3~;M8RsRHk-Pt$y9k7iB9L!I*Ii%goLSF^nnfR*wAkU{oHEW8D`uI> zDmhYWqXWF&+GkdZYc5&Jy+wug*~Zdurj%FX0*NSx)Eocwv6P}(~DSVT&}JtE(= zuNn9uwRn+MUq!&NX)|IcwwB!GuHCyHUlU(*8^$xeE%Znci8P8|#U(H244LLAB$5C0 zwswg=v9w_5pE$`*hZm&NaVr3IDnjKG15%;8&E5ND4aHlOK#z7~_C7hesUM0sW?O1P zZ%we7(Z%l}1ZV3{z#qv6nCbP%nOzop`}OnO%zrb4K%71U6zZRGqSj8Su6qVFe`}PZSXhn02Jw3%{jc z2A@<1fvG35AJke8E!1<}Dy?d$E2)*k+fO1;MAxHXU`$69fqWhNbruzj1pZsW_ z^SG4{;Nj4>BT7@F5z=4TqSIfp##(JN6+U;h!hnnLqw8E;P(%?l2nSQJ*_?M~}MH&ZeM;;%H&B z0A9C-1&0zuk(=XAXSKz}<+Kqla>=)+c-#Jj4%$8T6oZ`RA0TjgzRCDXmBA2S&Oum zUSGeuf{%lHaW!9AHJf}lj!gmv;<1BvA$0fz5h`brdDt1>nNb48&va%Y%9v-FXnZ41 zM3xQ-6!|eZg&y_#)}3CYhRbQtBY!zL!a;c~3Vapc3$U8(?k51Y#&4%Z2KblOfT@!Z z;j#vNoGeUc4z$2)6lh&VHCaEc|FV=>Y(PaK{n5$Hpc$@tunU2?XA!pd7c9rcm~{-w z`adwsF~k<9FQpvtgh|fB>dL3l-d}UZq@!pmji?*I6xl%t<1r%5+3BOHh_;0B;tyMS z&IXPPmCLGeVC zfGAQ5>w_ysvtw+w!Dpx}&I>$;(rb(;vsvC%NW_C5lH>EjQP_& zXv;}`+!#6%H1b6W2v@rGw5zctpMHKVw(?vGg;78)E*^}pm%jy!VrD9@-YUG*mnV5u z$v7pdBenQbS`zzbz0C*TxyqXoI@OS~k*30=kbyU#&Fa%&3CE9x3SArTbXn_Y%j9LG z$#4))Cub(!3AtME7oCDp?vsw~W)T~fBWNl>6fZE*^w zigkStnAfJciRnJi;}q_<3Up~6fd4I5O$hg1KG1=*ts?M@N9?u7pL7$AjG7Wwu|Wx6 z@DgmShz`++Poi4Ns&m;uwe*C~erWDH4tM89(^7`BGd&)yYPd$bg&{xnu}YEKvtHcw zt(-`myzd!Udw9R#IrJkHtHHCW?F%R?i2(V@;k@*a3e?;}KZ6IEjp?~*G^aaGnO5IH zF6g;mps3LO=o%odJf3|@cw@Qh@dpW{4aCtqcm&;nB0J-jaCl?CZU~4QSX#0IkC*_g zJQTLSQAL;?hYE5gJ}|HA3*4U3E*2y*LC-BGZKjs49)6c#Rp6y(!$6rmRt!LAXxek@ zExMsHWMTR9G!Nmlxq!vU&8IoD{#{n?>M3hfl|mQjVQ-FZUvn=j?25F=!V$p7ON)w2z7`EKbnBjJ2)hs}DT z)+(bjCQt1ukSCBvtlj@7`6gdV=+k{zgB(VIsX|V6#2c|mZG-Lf&H|)gBhmPPm3T{z z`vHGzXQiGGrv{nwiok0^0`)o3d4nuP89bQhs)0-^7Muxq4tAhySU8*Fvfh1ldxlZ| z0C1v&@v{C=kiiHC2i~d-eiXSK1zCZ-`(z_Iu0cA9%1kay2Sucmth!WTzgbp&l@TRpJm_OCsDaeCK^){sWiIIy;0Rf(c?5T*oJ zh6dk9fNTg9L~LPP)Uxk%E16|aw95sOwZ1CHZnV0|ab#LiB5zT=7CjpKkiw6HSJWw( zMwswJc-S&1?}oC0_xv%OXP-QDP{}9A;Lq^BDtnJF|JWacZHYBFe0 zPD6o6rhuJ|Ef#NJRiL%UEB*9J3Jx2wd=e>{dv?C9zz0d2a`gG(+qQ}eR-(|+E^5_N zzV4e&uVf~Jc$3cm6<;}>`3c`cZ>R}~|AJ+0$b%gQNZv_4>jQuGOYUa+dicYin~rBa zZMRs%E?HSkdYT;u2Li=3>Xmul}pi66e=s%)9y^P zcD4(f2aQu%tWa^|jQ0$(zvb8m6%g6@LvUF^QW-_Np;MPaf!F6Hc%Ja~#-Tx=87N)* zelY@92O)#xR8?O2jyBrqjYSxtXR>2=Tnyo}YbN|OU&afG3ge|^0(&}(Iw5$LQY4sf z_nw=ZCYp(1^C4=G0H4W+ZO!%5hMglAJanh8{*+cV@D5uMT`&Vl&;nxw}`GFw7mUh4M@8L zwii}Ki}(BEfeGxBa89ya2rTvrc$u~KHUhTG*%$$aXk;y4zD*oR;bz{^ zv+@OPPMpVjw40|zU!32rzAVVpVJxzQ)h*1B-QI#|;g+lCdhEd-$5T$wY8vTX0_7-O zkBe`>vgG*%YmWbj-uv3U^MSW6=-%MEOqT>@(y8x85n?ZK9|0bblMVes0zaBVd}Yw%w_@D2Ebr@CB-x~W+H^yvHHhx)iP z4E6z4TALmdBjz*Di_N;PQ*Ox?DT-!fp0g)<6+^OV#7PFf+QZH1a)BS?^>#i@rCBI= z8824fv^-P?J#&xja~oE=|K#OfhfE&)dkFRc3!FopR7g$28;~dreW$_~~~AyQxu_Lw7gddy28Hnjy$(U?E+#__y!I z=lamnH&>DoxIj8kib%t6KVWe5GkEPIp=I&kDJOpxfqVnra3vrN6}kA{H>00g+2W9>I9|5@sbY{8~h!INSKK|2~?RzA<4+ z0=osLn6(h=vzTzsi>`*DHMZifu0H9d6G#;HH@!kFV@cLurA(p4?jbQedgd*`vwYbh;oERUvG%LP(;ZlN zHK1YW`DZr*go2!rbn>K=H0W;PrjX|e#v{3)m=-C@ea>A>k#)3QyA>F;2opeVmOu)a z(6~vH!F#TL{Omg{iAwwIBj{X)J$DZ|6}uJJ2#0CbYS4T8mO@=>NRjOEKWwKJowpd3~4B1kgYyEx7Y%s%6!&#k_}wqH_k-OeH}GcZVEecUaUa% zcW=+DoL4l@C6>*XI6X52Qn2rzVV2kNjpC8%|NRdkDh1dLH=M7C-CSH$=OhvI&>s~I zHyJRm4z;sD0=qN_BQK7}m|THh_Hq84Y5;_fiu%eR4|mtClTsz2S}5xrI)I}wVnyAs zXz>{I%RHT&upI_#_{DLP(bv0FfGzQxW$#xuVSNxtYrjY7jc#I4tl?QYX>1(tnrU#1 zg35NW1VQ2VX5q6|PnFNhFW>%Ng|Q+sAxN!oE)*J zdfLr(+39nZy1eUTK+~DM2*q_)IYQZ_k9p=Zcn7vEfw~B0(T%;z|D)_QwDUWcB(TA>>^aKr%#8b3NY-9`!y>)P((x1=9-0fLsh=62UY+$ z%-C+mcwcvTnsE4IU6;AJ3#$Mn*;x>`IP@#~ZM8y`lE#4&glb&h-XI zqIq32g#!sDtAhFBJsj;-2|uUp6dkuAci(USbFtc;N1&K1to+8+=dhicakExvPA*i@ zXu&n0=V3j4_m@5Hh+X0~yDtQk57)AJ-5cuqWg1f_Dp2r`h>0?0+YhwrQ%A??%Q91* z^*|13FBKeF^!HH=md|tFRgrxL@}~=w@a2EM{hA~EP@x6<4*2~Ks9AN-71Y;ZZXD?< z3?n#g~D>HVL1Oo!9_-+z+=h@F;Lp>h>SUB28xnF~ZYED;npU7+lv;Ls~>%d=< z`(~G{;=j=zZi?(YjeSMLJ55F65xh8xH9fsU*V!hGm`nGOG=CzPIOn9$Ftj8F?~#gI zhZrziAX_dp5cTCwvVDb_FEb={_bp;9OVDCK?ny#my|s5P43K-kYWgf9Rg`7M;U~{d z5G!bxrd(xIvOWIOyaG+`TfsLFHl_VDdjF^R}~ywSoiFsYl`zUDPkm-exLq_5aOngsj*^BWfVTc&W9lVN&ggilZ`@i5*^5*e&lgguWQQgJB`WI2M!*09f3&F}qKgD6b7DyO*1m zdTTd4NHTIK8J&g89=ELouRz6+!6ZHK+5<0VqyK8=r<#dO(kN-h9>eD|klp^^UmyJ; z?I%3{hbw7C%f3-!>+<|iuba{f&F7<*bP3^T^MAI}YX)-{@bKKWYNPHmHPCp@g@#CB z@RzhUzq19pPlr!(8yu7Oxl|3nYh;7MP`v!)g}Hi#w~HwJzY z3b-s3Y4g{W>w3!kOsl625nM;mtL^vD?LEWew8#GtLfp196b+pr3ME>x+lAKW+^M`m zO6uxe1d91?zW)Rj8*=t_>M3igdRFE}$wA{g8Z|YeSCT%6zJYN)g|U9<2v2+wVU?#A zdCl@Cx7TJPyR&-KQ|}#_s9wtI-g5p?VfH*$L^}>`wQGKoN>F-OA}SAWZv*ZuxWg42 z3R|3Udfr7vPodLlBzo^&E>3aqVbICSYRKn8{TF_9?wDnwGk4(c4a8ooSgR`~R&4v` z$~%$zrT@xpKuP#mw^5bIFGTso@h)d|K8tvbost|58!jIW|F{20t5RcJ`g)~9V*RE_)-ACp%X#8%7VtV()2i)UGL7{KhvBK?kMz?zU!(Kp1(pO%yX)l9 zF(SK`w{cYU1W0e8D$fSq-y*G*JG3R22}rPiw`J$;VkFz1=`kN~3|67Ixr23;AgFKg z8&D_4f;bx&`5arB+$N-PovY5WjSGn`^-InwkpIj7lSkCHBZ2jt&&9hDi_={Rx#8qi zCEXT;M>34{^!7eU(v>q8o7wEnlD6>MhiYT%$Th8HY z)r{yWWP+C^+sE^(8@MiiV~D4sQ80Lj%Z-Xwf^+q5(HtP5M$#1Kc||DUZK+2i5$%08 zv4sw_!20}(k@!yGZb*1(y zXyJ8u1}w56e&ehx)rq$g-iGRR^KyBI1JaOiiFy>SOYmlaz_gKg8@X(uG8C*#T9pVcf9ov#l+Um*k>8q-z#8R zqCb_@r8oA9~KIrHg=|6!IwMbchtE%U4(&g?|h&Ya2WSN@XBmF7TCz`;|S6*q6&xU!)GFB#(b zc5&?(^-kY>FAj#f(DSQlZO><-bx=2wW~%w*4;1JBXUK_v6>-X`8!8r^OVsD5S}SR_ zDtBeF=DT#nBkUEOPUaY4DKfgndNj4IgJ<8~x?vu!@YL<sx!kXYE@EA&ehmz-8|M(VV3{TI>~Q#gtqm`QAbO%) z9j%mAJGH$#&whqFp}q+YT;)nxSjHsU7jh@R)HWLl<*Zy6hA$fIGA`dq@&c$rv zcMsjioU~G41Vv3!G|LXSa!>#8zrXdgOa*}d5JF@*tAJO2b8?#LeQSb=|7uQMA~S5^ zH{`WNkPkOWhdaKW^tUqPYiFUD(wD@+!`dJaF?!V7x%^dGzfTde_jPS*Wj%8AnD1-m zf6yJwEKe4{lFfGsA;>I*e_%;7G)r9iUV-0dnag={1;FcWy`O|x#rJlY*Yh5c!phs> z?N9zdqvifVr9ezm>Z@Y!S`)LGVnK~as)5e>+wULlRr(o5{EYbp|8Qg;TU=0-vV9es z%|`0lut>H4!V=Xe<$gV5;&z@44Gx!2rLG~XuS!8<)E}S{^=aF=9MLaHJ)W8Jazy!7 z$>4reFV7{v2?C)aeOP_Jr5D(tP-~@`&2{5yFg?K~jL}45%B|mCiM7=sOP5esSLvnV| z@AFk>K_$Y0W}Vq-USy(9#@U`SD5NrQ-cL~bHGF5LuQ{L|_I_)`dl%qq|DAp|v-Wmf z<$LpmhG^WI)8;yc0yWm9l&0Q1RkHja!enhN7bTb%{CKmiUsa4u#v5xHLVlLfC`WPK z1U1o{vFMitzfe{LH3d7e5p-rJy*)pC+dNE=pSawTI;kWn{b04ImGuj2Tfco8n4+MrpBTRmT~h8u!Y-DC@r?MY+B< zNhZ-ZQtAc&`E)nWfJ(5H7umT_rp)5aZ-ftT6nXr1-2wwH{}m41Lwc3`)fDU(7LxtV zg<=puy=GESMNo^KJO9qg(eOqt#4nA2AI99-?KW`BZ3+AlfQREW!ojK#)tPAX_ z#$62OP^{Is$&y75b$Q6*>FXGf=_L%9`J~S@ zzAW0Ko#)kpD$V~oCQTg^)z~42ka_q6q&$mz)x6t=TAqKXvRSHgtRi6hL#vQh@0Ji$ z(bnEnKvO0FscdR`(PGc%U&p=6#+>C2aJ!Q0Sv|n}MyV8QR5QrlE<>kQp1LnYXvOXI z9tOR+{(QH^w#dj|gMPgmCyB}Xhn84gtyCKf>sqh9_+Oszc->o?$j>gpv^;BMX##ar zE>^3v<)SU88m4q^Co`9Q^38AJm|%mhm67KEA%vJAan|)y?9dkT85efz+M;A;a+MYZ z9k6ySr2D>!sE~4IVSXy~ zo;YgNJ^%ZQyxXIvUkUKFS1qo-4tr`|Wc;ZB9YVW)@T>IJ+-aS}J*-vhnDKi+_;0Cy zUtvJkGuzXvQW@f71A28my2kDMCG0bR2Hp_EteVX}UJ_eK<4^jL50N92axV9&Bag8=C%6B&ET%^k?koNFFqrN#U{a zCz=$r55^{!f9qMbZDADL_b3^-BGFL?kyjJp4+FurD|OoLig*h=rEYv8SaIyY$J-C|+4gWveDM z-H+b1Kl7T-H4tSzX|MAW3*EkZ`?|#JNHCa&2fNe2mTFA|mmCJy2aYX|p_0MbrLGBh zrWYHxA22{xt0H~j^2}4qI%5TVRj+gp(4f~oJ72FxuL|cPbK4m*29N~l=)6QyQS`t# zsppF;Z;a3KP*Oe3DK{2X;qGf2a9v<m6oz z8ip^5*d`vekN8Tst>1hqas)ro`lu`o|A!FbJ*0sk!EuQXtXv=})>WlR2Wwg_5(-Ne z4G{kOCZ>r6GyYJrbX!UcDyVdbay5 z&uwd+h~Ht3sk4ydRRq&4yM}#X!tb1^Av4s=6}lGSJ?L-M9GaN;5|zQIPbea|S-vloFmSl5XSM73d)cKE}yC7jq5ZYVvmA(L&GZ(}8MCP;6|XrmhtTGlHkY+a#U) ziO#G=! zNg_aZ5MVsn@Fcx?(DtHR zGsfPV9*rO)lL;9QW3)FX0?xk}N&Qfw%hx$;H>W{K*#jncWP zJOB5I2PrRH=*~_pXSeu?$_8oU+rPqojbYk}bm08oLtv5EEB_sb1Vsg#o0zX$e;@F9 zke@)HNhBiEI6u6!*V$nmWYqDWyQ~sVpyuv<($S`Yv}3W6Tcl^=d>H}T)|pCyMM1{5 z@!F=_8YQ7V{JPXvqwM8R@1=g-1I_oa{}6(JP;jMJTygc}{ai$its$04%u2fT-8xJ0 z)d+s5%r&TF_i_&|YDu?M!K&ibzJi~?qL%_$$swkp@m8=&LhA50H|p~$a-u9}eYG*i za7z5ucojWywT91q7c*>Y7P~qwAt;;5M&<`w{@Z?2s#qB1XwyGTQg1 zI>c4`ayd)-bk5GC^{_GTrbGuzsScx;Jcyr9v4XBs@BVtO4Wp#cU9)eEQnefvP1VU>9>b9|5MN^h*6R{Z)VH#I=F@d7YNk)+Dq4OT z-pE9U93i8;^Kw^#{G&2M$QKX8gq^nHnBXL@+f*Gc9W+i!4V;`U243n=Q{Dt4q5C>; zZKny-^;@4Ey>AR7yZ!IakAa(Xj$Ts)LUsornZ;~8qFa6utd*nhsj~n%JlQ)L^?D%^$DF1rfZW(tzbv#&ssZ7j(7aFL$+kdf5+Rb zR@W1L&9u;bnZW$!y9_C6>PV-$a_( zfO#v7GAa7g6>mBdeZZj|%Gl(Qiz_-C257-d0YzXRA>S{$&z6j-sQXFy_^s^@Oe_nU zVA#@9(Jhm9)?Y@ce*QJl{*L3DQ49BsE^qT7ch`Dpv`oi$;eWQRHYB3k+6~|HNM(BS z+2vG}5{S2yr~^m)hE@Gd`KS;}Gr$s%y8{4rxfx;_yV3WPJ+druXzyW)R3k%8b|0`J z`AuY$F^EJJj%zl57=*+=d`pHVZ{P1QD9!8z+FHA*?Q_GL>3IS@&TmIUPTQr7W4=?< zx-0v=oJj4GoaadOebeS8*-bN$U}Jgr-wYv&B>O~HZC^UO*+USgTs^|EucT4mtqU${ zYIKgP8t8)qDh7+IGk2sfLy0~<4y-Ei_Efy$$*qWN-n`M&RUX-zumZheEkt{wOJHj> z@`qTNr;^9bf_jDj;we{Ds*mL7&k(Wae?pDWHywm=N7)Xow^R)Ls9<->;!Lvp(22=* z`e&e_Legi>uZ9a2)_&L*6AtY|h->%|%*nEWY*4LOBY464Sdx5EE_)#*JLSbkg8RcvO8*R&2(6{3%5eUX zvi(QTXB(LIN}nl(b=@%2*v)dlK&XZY)o1-Yug|fsp^ok6-+=4D*K%MXGxvU-kD@P8 zlLdaU2ulZqhbB=P1iQ;TTU=SaagoN-DVp0NNUOmmE~v@ZeMHa{Ygg6Ko{<4bRjG-5 zNEQ6y{kejNU?J_ipJ*1^1iI(6%P^_VPjP!`6;JDsKXR1l0ao;1*QO&4MBD!^N<6Qe zIQR1iY2V)k!6&RReAaD;3&Q}*5pU%GW(dKtW`oyU@deHto%^BA^_@6(@3rEm!Mq~* zZho=dZF(Lu&vdcx3=mW^6IhsjU+4m4L8u^mxY<2}Xt%5$ z27bb$Gq0;?#PQ>LPF=aT*U5EFxB04dLC@?)7RN~$1J0UjkB~1z94?Ie)PichHS3HX zxcpvy0{XQ#;`x*d4O@U&nkr|gt5h?&$c1#|vX{#5>D@2)4u849%kh0*|1LC=Nbpf2 zHFV>$NvT_OG<;F$g?Yu!+Z9$~JnF+QsltiR5WtIAEQo#3e$1~fuq8?D@O906A zxS9zf3YavvTykOuRYd5tUk*0o=NK=SEvF-t0>6WuUix^}*GEJ`Pgy9=X)(m7L|$Cn z9ey(^=`(C__RZedbU~iXY0CHK3wJz8yd2O_>^feZf9TiUsmR}YfR+Mv@u1NN)1>jo zm#E|JtLpyZBW0T{BpwRUvO5$yciwp#0-!~D$}Cs)&)N*zHiWu*gGZSZpPfU)+1dcK zKP6uV`kQzZeY5#jtc>8xM296dg8x&J>>{$9!K>5fdP6!dR37ae>u@m1&Fj1GuS`qX zL!$*!S|a_07XV=6NWv+;m33Xb8GYQ=NyDNYidg@AMEdwWPaw}^&1{dXkk5rqX4rSW zf?{aq^Omckam!k^^yGPPxhpxG-A@f(kuv3X)T%4r0^0iuF<<`vG0gf-G@24TqNC{P z?tE%p2(zdL9((rom6o=Y;JvHLNxJkkHK2IWDaho-hi=9wQYo8dS}euJJPZ+-;x*c| zA9eAn#410bB^R6j8I7<&>PXdJJe_RZu5BhDFXz0Z$nwu>i=y{k3lFV|Jhjey3LhLaP8e|u#G7VTLj1gY4e@C zZ`j8L|1|&7tE^4=i=At(OSmO8AT=u8%i%IM~A}o9h!S)hVXNH zCH6;3oDChHvog~7R_0{#WCCt*!vAQr-y%sp!&7ZBk=UO|IzB1Q$wDi0``4&n@)j`I zZ}Iz5n;tvn1iUfN0HG$od&%gvLC?c3f9>Zglg>}9Px4;n^BWJ4Gf(T`ZR{5E#&|tB z-H?i)A1yK4r=%qrha_cmc~e(%K`)yDUTxQ$^*l2?XO|vMtg`lW+Qw0}qc;QjhJr9V z>42|WMK@h#Gr^H_w%U`G2~B!OEMAzs$v0kK2&=yAq>c`5vab0PB9)wZfBaGLkAwPS zcuB({@mUxAX!cs8=)_-Gx%cjcaW!+ ztB*3C!lD;32%mhABTW1g9;I&@VU}C}opM(ol=nK?Vz>$pcI={|R)2cu$HYX>)o+ZQ znNNN1%l4ibpF%#Br8(7x!GqpD9g*%Ywgpqy0dJCF&gc>NU1%PvwE3`;;JpZ zqDZT7ucg;25g+XM3K?^bVZ}BOZ-vJ(p1wSR<)^bgnTK|t2@I2Rxr8)sZLFu~#&AW5 z1TYW$p#tjZc*{*yO3Tn5dm3 z!696%hHgLF-t5&Z(?=MyOZFbWW$FzFpaoN%7roN2xNHx*zme^BAH;`4!~HmOb*beR zYuR_S)!5kErsdeZ!My2m=!EI$si#NkS~ZUDtBd?r zFdtk+L8oR=S0Ak#)aqi}T)O_5#SX!-X4vh4kUuM~IYT7M^y^jKts!Q~h2&B#2)$eg z!|7doqtvFVIMPr#!k z09!aSPeubxMDpjyyr`C?N{rI1rKl`^g^eff^b^HP&b>$aZIHt}yt^ilR{Q=a7*Qd= z^D612eb^yIbbOB1-E)qns}Uk^)0`Di__p!hp<<}VAhiq0RSGH;J_7)E9r^;utvPx| z0m!2IAGcf*IXb41cX~w}LGSnusXdIuNz9DIc?lIIr;fe=kLo*+5Ah6b4d*HAF)BRF z4G$83To)_-R3dc+TlXEo0+7ptg`I(x`nsl?Y8ydL6&z<i^cN|=Y>D$9=ARU=(-v768Ue*++zrNq;;2VafedvZhBP;wO>7hqF=u|5dXE% zvkY>^&6%uxp#T%M!j{>Ei=To8(^>u4AVPm%70rYFPHi_19xR8^7>ckO=4k z{p4X|_qAguhlId}bV@Ha|NlKhUt}yv)!%>B<&9`$Qf*?=5Zczn>Nqjb+==Mg!?-kW z%mWIy%RrCn0An;^Yn{%aa=PWA!@s{Va5=zB+R14ehCft2bvZKYryL@oNm*32Z=|#* zSH~%);63chbw*%TZW9RjO$wBwvu`(vqR;!N0n^kL8z2KjdL*V zcqfUO1HALyB0r?wC)P1C1|Ef~-)-Ldw!TkiW|68Gr^1pCT_o4lpx3^%0RSd|oXcuo z6?0=ODIjE!I27BUYP&FF(rJ_Lo$%o1(E!d`X=uv=ejJSa4`W#6_ejHu@F+&uTCH`d zU9VZ$(B@~gECZiK)t7WS^4AUBoVOQokWs{&J1cu{u162|g~X(4>);XW~SLQQ&0bU0N*bm|%p zaY6s~{6IaA@uz7+?p}lV;eX$uuLT|qpRyf_-K24(&b4V|6vDsruLYjU-#{W7n~V(W zrK1FY-S1`>^_;-ODT4JWGG6m23H|H{d0{84ooC4obb0+CNYo#HR>6;cy7X3M0X1EW zhpo;XlS#oj3$^yck>|$k&+4*d5}0TWUUXn@8Z=)*)CN_&y&0@FOX6*M(TAgrdS;;< zE8&9M71kLrt~2Bk4#**CJDg-bt9S9*clRNU84gWl4|S$=+3h zAH8^*+Z$B{7-5(Tax&m{g1n*1buMXQ!>JY#J=WWfVmPVkzr(t%HNyk7K4hT*Lx7#+ zw!}Q^cz7^0_}+6}rV2o^*RPKW$nd~l|2&YF*0U48W=U0e;LcJ;4m*?BR-iu&42}J~ zt9A#K-M8vyXeGV2C7x@&&vQdVrbzO*&fPn{Qcl~2>7a5T3wTlA2iXsiL^IowP2mk` z342J~4`1*ziMTKwGj7pjnmc8}Q)zhO?7V=;d`{9qe6wSxDsF~>g8&V%03+rn#bmtn zWL+R5^@;XG`*wbB^e8f9?(hE)Iu8QB;Zyx`mt!!t*q#%t&5R@L6JZdn`Rj&nb)M0n ziz9ITtqLfg#ku=P%_sr`p6n}0-PYV^%S-TmtwMgcIn1m)u!L!u4e-%|MUCWtz zG^BSu_;(%*tH0cynDC-?j0Hy;>BqIo=web29k0roIirDj@o7vlpAY)*Px697v1Cx+ z6sj8yMGDV8+<0`57*_BqDWdrKOp=9!Y)^hLUaRa&nr94>m7ffO?z~o5Hhok8MwVR> z41}WP&*=p{qb@xhRh$#?@n7}c@1LKZmCBle~_N&oZGf++)ksfcW!8g)=YX z>vltDK(O>n&N!*SnfkXS0IKwV2wen$@9?Soz7()GmDrlLsL4QdUQ(;f?qH3@w{DN9 zF&(ju13s;$aBMvW6F?5;R0ksoXURyk%Ae4fGs*zK@WGF0Y|zQTbM$1xVfn6~X$xxd zb1+o#>c|rT5>#1%7NLzwu30ZPeD~po^=QmuhSVBD*)k|6-2OJ@hq66Bjk5Jl#~8 z0>Twx1heUwyMVFyZXLHBi*H;f$(f3=Sd6RrpIh&KuOMbCnosDwuoT?8qi27V<+<1j zaSXL383?(3bRoO!k1EF4su!>6sN`?EGrKWvJO8#)J0MdHR$d)A5zs#XlJ{e~!rH1& z4i(w(NQwH-pktgV zR&xh9#Fx(nepKYFcvOlx7AJT4e+Ydk@U}P>#27Jm492#%wwkY&qCJ2CN?r?h1{zISs12w4BoGrBA_>Rf0TaJcrjncX zowhCb(w#sitX(0LU<#igec1$ZbA@afVUM~r4o3);hf*t~VQ4j~UkNya_j|vZo{p(! zc-yP!9$j`2{INUgbg(PR>-FSAq#VoXl~gsjrEB|cLK2F!{~a7=m&#T1dF_F)=056EEIIvyhzx#F<(7ytgc{lLkgA_wJps>QfbWG!(OBAsBD=)@^26F}-b7$n@;n z-RgyE#3vFRKt}I8ECV>)1eto1|J`z5qiyEd6>Zf`W2mhdi<&Zo%_WJ`@|I#xoD-Xj zhQ(E0uZxaeArQ25OpaVk(G^c=D0F=c^Mz6v!ntIIt=IGf($A(aG$XgHP7!>2K8Pvcq8@7xNb z{I;s}A%zx!^rX3=o8YDh}!5Xjfe; zY#ema-5VTHw*h%tVvDsRdT(a9q!XPb7?RlZz1|D<>iL;(2Fwn%CH(9#Hh2yCS|p*w z##4m9SYC(mrosVAs3pK+EU)5!u0CH2JXq$hPC<3^E2dDvq}Cz!Pp?QFr|NINtSa}% z3diAQ6WEjL&-f{$zak;RsT}~8o=c`0#(@y@=vt*z=7@Rajz|V*sPs~@D)8s+3BU+E z6rDZO%=}{%yoSfA2f2JGcx~Hl2h4uk2-)LAjE0u`WKT;+q0OAH_=>AtEuCVoi-|1y zCH{F@8HF`!Of0%h%5-d3_|5(lkf=Kk*6vrb@tv<3>gb#eu-F)L(~Y4W>i8u9w(d?{ z?5x6@=O1~lpY;@3OKr{k31CZvsX{Z^_L2Has$aK`(>99EmJ;d}L!`f(LrVl`M`ai@W&by3)8*emldDJ=K8N z93?wiYILrl^x9LI1m4KYmTE=m1!ADU$nX?*H`H3SL%&MdDc<`89ALF;9U5QVThz~+G0LFSfe?f#g< zg^=lydV`h0Y`RDrX6sOKAc7OTniNmSCHE_8>m$jlxdZMO{^+DWgh!Y(aZHK1$dq7|aK`SAhB6-=Zm*rjVt&{MP>2UBnlGlJn*Z*@k%9lCqdU)4%4Y1XT-=Y8?N?C1YP0$pG_)~$x&!l19dVP$ zv~mweqyTbJME+kLjgFp|qvBj$>zVmSf{3fT;@&JV2r5yU?Fy0`@pJQ9 zl6;Tn7lqB|C^>}RfBzM3O+%0P523Ga8=hI=Qh{O92x?nUu(ti{9pe6aheV<>tvn(d z^oe&KB8Arj70W-%N*LiR>V+;1#7eum^}jl2Ies8zSjJMdY|QZF3W0E_w(a^J*kUAK zpz?(Znb5By&2a+D%yewfmvEl*#&e2cUt--l2ac{(Qp`$Bg3i45HwW~K$FCwGenvZG z5|C5lhj5y(_CVO1ErO!}uezW`P{&J^I;VOt*BegVTf?!y2Kk?Z>Ui|WxepGt6MKo7 zs^?#%Nh<3|5Vso_MhoTeDLtuewVNwbrCG_L__a1R=#f}Yt`S4!Xl+QCl>@pMfqp7V>%0%M)!{j;@A*-DGOZoujpK0?L z*`c}9*HNhlzjgT`Lp{P-vFcC)$bkOK$x0_F6=rN&+y%Xq3@Pzu-A4Gk$GS@KH@<)U zi<)6))vJOT!Q5Ye2^)i4NEq%amaCnq$fedfT^=YzaE)*yVIOHgEQo@3zLd0 z*C4pPkFSXWCND`2*P3faEr;oT9IS|9I+nYGD|>=Zvav2RB2vVp9i)OFCz9i4tqG}f z3;$w;BNXv;?GLjwk7~|eK_`J_xfP{1sj3q5$3X{EqFBP~aMPa5oT_Vf&(yCgd&Lw0 zGfwTj59_Dc^@2K0-Cd(DFpV;mMawyyq2Sg|qsQwO z)@5Jx)6Zo*R!TTA`e=tOq1f!<*SgB05VByJHD7Ou0k|bsvt~>xhX7M(_2rVrjy3Hs zKNYgvwd4j;f%TDkFotSL#(v<&{{pu6f`ThZIHArlt1%JwCS$hyG-w^Ox?d`b|-CIOjIh-Ml$=kB#5Y`#<9S~ zKHFBRT!j@rQN)NdhpGcARw;w?V9y2BzC9J*pRs_R@>)gNNkl0-yWMFO)nc3zsr9@% zxZ3P|(e#XWazZBaBo1ec&RB+9kkNA>NNWczj6yY~ zOnI0p6~X2W)%1!kD$E`=F?I)D0{JgIbTPW@oLDs@$qyqWf_xSvG)kQNSv6Ewhu8Iv zN4{N4k-LUe8`4~*W_M_%Xv8rqdhhcRhO5r^$#|G;K?K_c9FQsi6}5!islLK?I1~7R z9WpG+1QpD0e}io&Fv?F%&h{g?NL#+nTIchTYms%|xVOX6H2kY6gROz}OuP{qeJ~(vOx~7@H3AHPdLp-df(61Z4`$l*SQm#fr z_7uD6P)5N^7yp^b9zNv16P#BTplT@s;zOqO45B6RNXruWSH=c^n!C5&?ui%%rHtIDDgG&eLz(2xBC};lSr?p^x|9fv=4-9 zIdjonPepVTb6I;jPO-#v9Pob#eGy6EX%+0?6?lv!_zX-g!sEny`L6^KOhS@R+y2&! zhU4T)Xnl(&^MwLkqd~Dygy^LInt;ZD?G`_s#x}=wuHoOcfD-!-UHs48!h0S@fqOrR ztPl?;8tp@dtcE$R2!j zmPH8~zZf5(G^ z)^|}JsXPo4@fQGFG&$2>gpvAaGzUEFCsu?kjqcsGuL%30h@*9BFxD*Br+P^s7IzTF z^@e*K5)FRqjFN{(v(Ag3v_BT|vxMaSkDXOgX?Y_14i5Pg1a8fZi-0L17e`XbU{pf+ zj@Xa`arm52hA*|!w=*2>MAiRl==966o@wd)n&F53@%imHh-!qoMAmxNmSXqh^7i1G z!Enn?qn-p0I)YcIuiRa0xM+Jv+$OMW!|Fp8J~QyTWAP?zeB?vuT1tj%e$O;Uj&Jxz zOiV=yXU!Z^1zXg5)G=VDdpE`LwUHi3NuCIip}`rEGb2l)Kz3K<$d1uQu{B9np@3r} zOIjM#o8lc;fN~IQHnoFAoN!foc=S{eWtHDqZr%0L#?Y@@6Vb-t$GSEQ$XoRoIxtuz zMRAns$&1gfcHQy$Ukl>b#bb2>bEpvie|IsOQ`zs_D@=fdlhVFk3q0`pq?kSIDjK4=B2Osp87UC zehS=KpWJjA9+nj>5*SiNBC>Lj?%%8ru;aF^hFMObdtEN0gCs@$3fKuta${N3?=scj z*eHEkCd*pXP2$&!Kc*>(Gc(*#V5+qHam^?@*(k@Xj7qj9v?~J*wxJlST3fH>I;rgQ zEaxH09t~}XjZu6pK!FPG8T;S%LHr5;K}R8XiEpcH4r$|WQJaDAW!7`WqGaNMR{8MM z0Ey?V6MJVw$#cKe=R0W7peqop8Pv$fl1eFG*Y*F)6JiQYu;p8nc4sA3ql$XWqOKHIw{BZ>!dq+mAqZ*v7vF;wV!|Xig%P0}}-lna5AR{cna^h}mVLBWw4}(&56qT@JQvJ6jnNj=9(o`k0RmbTSm*v!#qVI6VmAD8>jk{d@oO!f8*hsF+%#&TJ6!!8^vG5INJe zB}8-g7uT`FYGGdJ=G$lJg+TE6S$fM6V>2dP4vsZ~e%5;xDlZ>z zezn9D^IOqEpW5n%i<(LYNQ&qVUYrxaAWDS(@5I=42@D;G%W*(RTxvq3)ko!?vb#@& z9wemI@{xp4%P9fQ!XB~X!mYcTfqg7ml-k7QRGNyGJs{Gi$AFfFx3Vus4ykUGW7vf4 z9_AZa)rcb__)O7uB403GQHE*mMSV$gy#lH&)>~ffcz+G}ECf|dxj$&05yrG*Di?u) zB@sl|pe0uYpy={EF)xY}rKUC&*Zz_q3W8O)efidbS8xr?O1h~J!X(} zer1=PyXJG7q~nVI_VHBeg{;ntSrEdc?pIT5u=$>|)(Y4+4wvvr(h!sASrpS*%Qdo# zM5M~=)W8o6=O!Jc8&;IPmJedwp1%a=EHb3zoi;gkM+go!I4x6W4Zu2U zd(6P>5n@)7h|ZDW>S_!q$l8&`r2?(=EIueaWb|q1h1~k|{7aA)FHN!^?_BgqyL11v6caVF^&B2W?PNm`Ox~4r*0A%R9H-d^)4p2+w|%6GXM! zsx#@I;j=FmOThGN8vT?lpRg>+*4S_ltyNtP$@zekna0*2cxj*mYL^KUEqFDw0Z z+&M0qo@LSB=vs$y^z`%9!#c>Jq`wEl&<>Z~msKj|*tEP#oebz+4|Y|wLwNq06X4-< zsiY%9NM3FfVX+eUG12~r()&2TrXu_L@^l*23m&m;tWjf(bmIJhN3uflWsh>(EY|?`B^(R?it9PwU*vROXr@F`|^8Qt$A)_MPuk>1H`%Nvme9(oU zn+a?`A$5uLr+DdMyWuZpicUCT6`=o>@!vncT8j63^l!u?=XPM@3=ji8LcVo+O$Yo| z%8=3(lK9f2bo_K>HD8fBSWW^Y9pdonMHZjVe!dox-%RQ%&zy z!$a%Ky8|3b+kUqZcsaR~xl`XPj^yf9dS)zB0$fm!PWc+q>+q8=WsjeA)o$F7yfox| z;FHNB(FV&|>m6x&tI?Awus}#VP4B&dJ`Y`y_(qOW3peEK3+AS}((96FNGEIJYzNjw zWQ9j#RETn8h4lc&&MGm`ONdIVqserc)2JurBG3>iU}&ja&g+^Er9~@RD+33V z&o^4-R)`4$e=6pkxwXW4R@r}<-+%sx&`}V062&|Qp2)sQ`D>oZnL=FEql`Rdqe>2b zl}AXyBF7OUFw=dAqT?|lm6+M{xKx2#bAYqn!}o0yfC?MbX}Q-rel$mJCfNu*7eh=B z*=@6vhIeCZ-Qv4rr7Gyx#9h9>O0c$JMIS4Ok9YLe(6>PtZNa^&MoavjmDw;iOr-8!@i;qm!(|SLHkX5BQR<{Gg>2L>N&ooFf|Jr5K=rkImQ;i-2}T$+pyt zEryN5*89{f=uOc%dahXKmaBfAyVCDl*U6WG3cUi&dWPYC{>=+@G;CB!N=Ir1oFn@6 zsXFV2$&|*@UFxrI)Ay>~95j#VsmcOQ4QArQcNVsNZFNbLT+u*fHyUo8{ zv(o|#IdfQ7D2;MB%3PnGdxTvrY%2epA1A(G)m{JkOFcCnjK!=;aBOZIR298m_r1T= zG;^(D9S)t<>Dx+9shLIyhpLv1fdrM(m|5j3&Lr% zPLZ;lGXYOBgrS`}A%*kVGlP%n(O_NYD8A4Sq0B)O^RtaeZ+jH(JGM!- zZxFnkC`L)|?+D3k743;7ScUG&-Wu+;kJnEd3yYec({p{57d77iGK>X<3;VFfn6pVG zYD{Rx(-c%pG4=ewzN2P*-vN6Q(1FDOZ(wagt~Obw`6W@nghUa*52diAzsAtz zx;Ar;Jwrkei^GwyE4eO>g#*H%sRyU>`c}WEk-t)K&qrkUlpEN*_xL@`a_pCs(fu0d z_hBFaV5WDSa`Q?pNBSKTMtoRAl%C|@O6Y$GZGG*BxYo|kuo)~Waxg4T?Gg1KwJPIY zS^KGLI3x4>^|QaNQ8K!yE=ayrz1rfx{GfR>PwQws@SF%p!yz2EGK=?9G;QJGXQwa# zuU6)ft4)@Iuu@opLTENjEH-$0@S_g@9V4!*Os0@x#=5|e^xFMGHHQ=XcM!5(8W-OX z;X>GE;0Vzk`7?W<6EaT;Y-Xw{`bN9Bt&yf>J)Gh7?zV2~J@R#Nar=sa+&%oZ^s;}D$5!05Jg%n;l(pY72L1{Ccd*@2}E^W+ZkhMp!O#&}!Vn27Z0s9!gyoi5z1vYPY`RLrLZ{0EkdQ)k|g!Yc&g>bqk9&yXqv|)#dZVAR1*y55a>azV4l1MjRpTzCn^eeNKvnfXlzZ z7M!Z0FzfCmfy6!#f6$2ni9~x4?4BKWAu*Q4+RhYbCDIjD2-^86CjSFAg8qj=zE(Bu zyat5zrIXd^Dl7nBf*d1el8!;=4pj`mYXYr`9STjN&tdtyzcdt5dBtr`-FnWV2f$CG zN;Sp`;l0AMo59nPvuHe%^U2Y65K_sY4VxdOzYmdj!n8f&^>^ur8 z^eK^9<=C8mxifu58&c}b`z(#|i7NM`Ly`3Ll!KqyWcga|9TQws&yL|_wzVM4sA08* zz_nX+bEjJS)C1@Jn#U+h92;C4WzgWCq6{Ih9*CRyPyBFu-_5iX~bj6TU_I z%;Xk&s#H$-2k`6;fdDQRfwlUqPZ9tp*rINAqrTr~4!bhmO7=C0jlAuWohet(M3;Vu z+h>mettdd5b^XihC?R?U8=O66Cw4%>pcVr5$AkE7kh{#xYNq z0C}vZD~Xm^AXv5IRH%OuhZ`Oh=o zbzM7{1l9#rW?O$?gH#yhRdCcX7fKjr=UY$M5^dGz_Ba*yFP<@MmDHLR`g(|n>txW+ zmvFzO(${$LG|i(6*|O zxz3UfNm=Unk3PJb9y_nQfsb#8zFKcVunLk#C8**~$XOcfMzE$QikgvX>~mDmhKu8&5GO4_Y4{&{JLG3CX3~i+k3qyoF`3| zKu2?tZnM%FC_}l`s6te?Nl{f5xK%_E0+}SUr2h~q3W5N!DD>*<{86*;BQiNvN^DyC zLZ~ZPD~GndvR&ZF5h*I8Sx@y+6T!hQz4S||3&^v(mxP)SB}yaQw{QN|&uw$SgHCDq zZS=>AdT-Zun6)7n@AJS;(t_7zCZ7))QSMm0Uh^Z71$FT8rix~{8l{nf{_0QFCE3|r z8-_De!q&a_I{;am-WQ=fBvklRxd^TmjXsq@K$ATNm7Vl4B8SwPShHSoMzVmwFB3+D zB)TqIM7)kB`Dd@eB%%DVKet0e2+a3!8K)ioU#I5dQKqmPreWF2{1q;m-#W5z4#!uI zOARWy>82Da&h+27TMo-)c$EEgl@SQ!GOQrt@l3IFZw0N45y!@pXsey~!#2#N5OWLe zUShGRNhDq)nq3qvk(P6tR>`sYiyhx5lPcC{g@T z?y3*TgeU#1I=o=DD|92bD+qqwK0pS#Tkv*Jv9-@nUZV4&vuX3?^VBT%#B{qouXHwXYl3ka&rhGWmtjznW4GxBBzK9VUPZH#8~VO)gP ztquqmX}p&elJbgGUQyX1yj^Ue_;rkW(LSBTOL;guvM)9-aA*f4ae-~mc{zYlu-l@mKoe!E{^<7J~J1)n9m)PFUSFzK02OIf~CIDmfjd` zBA~_Tjz40#*z$48(I4LRyzL&EKG1VoDt+t~7oO>JIW&DSbf6z$bSEB$F4e?tt1Kxefeq3jq|dE_`y;S_yM%}F=GExBrfv1#&h>%(&qhY>^J0A%WH$3L)#p?3mU4J_=5 z0R#b*BkMA6j1JHw;#7no{)Rx{i*yeDSu0q^N}~F=4h6#lGOkN>cMK3+ySeS`8Dpkw z9RNH54>nQ66C$<1n?FFe)fxHvN#_h#9SQ{ZEL|wdOgn zN6HhQA+a%R@|8IF4Uo<-1DM5s=_}n5Lgk1Z`41t_AP7f66+0j!i=u?DuE~WW^82$! zy`bG+I&2!Sy1}k#IiVLt8`TyiGafOu+Eo;?T^p~t7gWwY+lH8=)x%4dX1v{2bN1_2 z{vLD1EvI`l-@8;M)?y_?sn}NnDAP{ul z4hRYg&opl^Y+Y4>*NgtAd=5{WfU4c}HmL!|%9$seTUVg8hQsF>9<;LOcq38y-1dA# zfWlM#myx~ow&twTL&;a9Z%5*i<<#rX5M~Qq&7RY)!cfQ7F-tKk#!iLvy|7oyaZkfZ zmhq1Y*4t&}lXeX}Qg}}?R6n)GeT=9o+($}bTahGjNse2`2q>WAU9c9urRSQ2)7;kZ z9_t|KX-ZKM(lMAXuIZmzitixBf!@x)&keU7A9VhL%cv_ka=PRzhquDt?5zCtMo%)?V1W^TE!$3c3^nq*TKFoa zCt)+fVUBfS)n6{>_@;I2C)GYFA)I-LK8Wth6w}QC=aMz==RO8zMWjaiH^B{usgf#8 zc0HcFkG!X#zjpir<-5RaB_)s(=sv)9sqR@cS=hs6{DAK|sYcj2L536=zq)d@Ngj_a z$Hv^zq@NCQtodq0cs}DoH9OCwD50~XNR|q}wtAHE%9Xn4-(K!y55|Q|3xTCUUO?K~ zzh7%mGd-)RX)D~_q9y7#EpPV_Ca?38LjSJxkV zPirH^|E+`Ff@8_BP4oi%b)7eU|`UUZ80ujH`QVk2M48uY{BhA(kJ~=0lz4O>sZpR z2VO)1^tO=ELDCRyMSle$8hr3YvS)krmlwp!rJ*DZD#+xziqf*jGtY3K%)ZNN)VNO3k&X>H7KkNCW>>rR2&F;(> zD>oV1hmh4904)N*l}inTVVA{OXGtMlqNq@)?MNZI+QyZ{PTD3{EW$U(AfP-L_)96F z8%jZ=DAi{+2mPnEBAs<7vX|7}hLC|4!jM>4Y%8m!Q%~bzB2`gs;m{5RZE%~~pWngw zX*;!Ha%X&sXtPb_;nx&v#9i=I>iA1N%$3G7JdY`cf_TIw-nsk_7rp7K-k3k*EY3q z=QYF!fQa|1doLTbP>53+jbqFKCxbLJjKkJx_*l?6f2D6SsDVCjp7`|1i-rXW`H zf4Q7i{{baeS3MGAo^2LO408ONAXFG0PNy%7cv_&3zAS$@>q#HD>LbOY1eSShN_M{< z;=H(l|FwZBub(@yK2-)qImt5pqkD5RD`>1za%Sk$-(fRLzaaAa%!kUS5^D)ZXXsIE zms3F94MS)-{|dL04FZ_Cli}7PRt2TU^Z{lQQe8~Hbw=3&(@rbySEwAxFMeQQSB^i6 z)lU44y}{GZl{{G({3L=-=EVJjUT?<<>Qwjc-3Aj*pyxD=#K^Zms_Mqh0z6b3X6Rxi za6ARTCJ~a)Y-_RfWtG>hvSeWye)KP%!;tCZvrxRp?iK(&&+*yM?7HPsDauGvD>cP| zL7%N@DY+6@D>Wq(u&WQeMwbA9{Le|kJBSEY+Qi3rbZCI#MK?0z_q1cv_IRKpi>D3t z+g5%H+Q3~e-=2pDufyw|*JT7}yzNlR`ZfJ+I<@~-l%=u?$!9n~I5$6yffttSfRD}q z5t7lH?g#F&)f9<%j!haYF6_d{X34u|M>*IsXW4%U^#LG=L^M?{F|WiRZKDO{N@K5< zcoY~qM98s$RADuP4{%B>1F8vi?^4`bnJb(*9%oH3SJe~RKTkPJg^@r@8=x~`FkK_S z;6aH*I4l^Uqyx)ha5|~q_fk1nQK$sjTIziO!k-s)Df#0^SW&RJj-Q~>77Dqfzz0T+$|@OUKcqR_1eJ6Pf{^ z)o4`NZpW&GZ53Sy?0O7Z7pb-w(ID$qM!1KQwbR#YTSfxV$*?TkG2ma2aGo2UZ(n5_T@PWcB3jOvv&79{b3aWk)Op26rF-JAPBU=!QNwlfAt;+7 zg)2K{-=cuwHI2LwiBjWFrPs&e_3oz7PbAES0D0>I|K8oB|9r7)>pqUd;Qp4QNqm}VfV z@5;!Ns*yRSpY29qEtc1P1@DaME97v+(WrEze^Kr1gVQAV^3a~87Nx=l^P*PnmYrZ# zXOP*w-S5CD{4!ibTwSm6IXc$y65)6_X?LetSwfz%*52-4i9WS#FV=B&4+d%WZ~Z*L zmzkhMkxm-ORb~88f(i4#dBV%DO|ovfhA|tn(3G+zZWIil8b`3Twt@QS>leoZ?1s} zXGcx*=q4Q3Vf?fiswOED=}>*MWJes5`2YhsNB1HE#Ugl^5V#1JY>3vaCi>LSI*IKn zL7mUi1Z7(iUI&{1=lafI$xDt541Mjl2%$Q1K}!Sw|7z&JB{=uiB?8^M;mK>Xz(I5z zX|MdOzxO!hM>0;hKcO!sC&AM8BVoLmtv8rEftX7T6l+(Y~6RvKxvh2rXvj%$UGj&11d8 z*TCp+d$sS{2eF<=&llAR4>B>>1QwXT>6k(wkaBp!bE5BwFpC{jCI3Wb`*k*NekK`0 zh}weQ@O=pPB-{-wI|i{`L#gq#WNPZd8iaD8Q(k3>&So=Sed>{On9NYu~U)n&SJ zqa({Ow8%%ZhYIZSqF}TGoamYI(gseI=Peb!561p(N$?HKd9fDNtnTCc`>vFV2$roC zt=Vj#{^keAB&jg2wO(bL-V(N_xRhB#rp%hDN_9x4x~Ehhe<8HHE4kEp0Gc( zBg62RkP!poz-N^x8Kwwa@&V-7T*DcRsYVKG55k4g6~*rB0O&>QHQlR}w70ri`M5BF2R672-2YT7#9j4lmtW3^5fto4ZWuAtv z?TL+AcqPn$q$*ocG9>Cx7_RyNOXu+W)gOZ&XV{KH_FoNsk+IN3bj}Z$1kB;5YXlCm zV{IT5r0QCdJ*eo;9`Xz9Af1|-*ePp~^8!`V$|V=Y(_$4QcL9q|fI?h-ok(}ltI2xy zw91*PX5P#!G}UQ*GSJqb3bA$z9Jht{;@rkqw}6J5TNu83wLFPn-nE966&V`TXN&cFpx1IpC^(LzG&{&*rnNHP0|((G9UYYAx&;!JKkE@>erpM8=nT}s zk52!rDGX2;@winMfvq8~`RaJR{goh$HKDx+Mva;BJ@&n=@b-H9^=)qd%cisO_A>K& z+lv_lCdth)yE*+q8Z{(Fgt9n;MWmaRE32j;+*CM!k|{yMky~`0 z>+4>F%X{hk7p-gV16`^a5DP@ygfelvb@~}n6TV_(!Cl?rb(H?y}U2yZ25<_GYJyM_me{^E=vlpzLZtZDoN9Axc&z~dpSGiDu!q6s!wxKlcmaGt$@A z(#e{29EFxk*u65eAdK-$ zHZ1%ObkjLHe5Y?TQ90ezN+f0?3HhNUq{)I@Jp9&~o%Y#Bh$SGpQl#?;?`i&^y8~0? z*HDQauG+Vl(W4#mz+!(*o=)(28T`xq6o?jps=?sWwE|L?-YS$@#RJv0t_}jAQuZ5mM z0C+UqU(9!%;e|3rsG$PoEka({D0^NG@jOWE7YYzR)V?acemzqX;}P$yyf%>OSx-%V z9tuq@i3sr9_?CI(=G><~$hXxE+kF)_SkRgpq*>P}zX5%5!l;BgFbIw+NP6@2YL_5B zYuOBpLr~$%P(~$mwZL7+$3w=JC9AMJzt5Cd{;GB*pXv7q2GQX0e`x`McwcVr2V{_VPg8nl=FpqexA<-}S8>rQG>*9F~Rna=b~ z#K*b6Wo~7eW4B*Xzu2|%7wI5KA{m|7e@vTeluQ_&*c;;mJ;J%`@c(yP3EAn zcoHW@`>b}Gp+EP1oVvd$v~Q(*aXV>okw}=U9;-Dvr)P~<5ZUt{CAsu?=P|XJ3NZ0} zJ4pbyte4u&@v#ml^pjEj@_$T@el5s_{c;v9M4&JCOcEidB_|uIZrRvioCICAx^Hft z%V4Y<=q~`IAMLrOxh(24!4Kns$&sSxbJZTcl!M~|=-UG{!>P*3LsO{|OEX6hy(IoCxBvqSo#wT#19& zVfrARi`~vwV|Svny2_kmGljk!gUXEp1OVLF6)iP}C%B>}4T;=VNLpNjXCdY`GPh-TpowP#=x}aQb;UuoMx0~ z6Zlb`OMFcTy0DXr=gN8MSrL((9*4ttN@$p3q(P#ZiXPGp43OM_kRWW+|2SP@|sT6So!kyIjZLiM9R;*u;Jo^)7dww9gBjHjJ5DwQOmys$`{?q>H#Aw>)A|S`>JCg%4n7@C8 zeb@RXPEILt{(+vBq8c($>O~P5ol0xnAlqVASO9c5>~b!zL#^F`(#O7&MXJCzl4YsM z^pdze+kY)+-^4JXvOLimvvG>uMldN$x%V(Ba9EFsZa^KMFp#YEyaP=tHF>!{p+0TGgkZO~;y8+D zlqNA2TL?5xkR*1Ec~S}4#rIapsem{0u`(J@6^&E4PIrPrxMh8o!ayd=Y&tzV%c1dOs6EwJ%D&|wR%w@T>eD$PF0RGZa#ng zxY1~7-M&x&9uY6wfxbvZkPxHrKNbM0Ed3XjayopC=ODL+s;<{&Rg2~9W%?^O-&-^?N$DK_hPPO7n70hx21MMN9jEe`J_q%7jXL|+l(v*n+0PF zv4{!48xMy%Iyyr$Q6b|;t7$tyuYLzQ&zv%^pi}c)g;MRJ2j`#G;e8o+4O_IZA`2Uq zo6+2lvmDb8-P4iXN^LieQBo|Axz897tmNg#iWAF*JZTq6ca|gD5!?XyFo5|m`L@V^ z@Un>CU3sv(JpB-Ize9oL#XjL#K~MQZ=nuYKdd$Bj6X8uD0(-J6en|)aIJ6RsAOu=I zI^fJP7c_aunqY-yL)Ujh7F_W$$KjrxgF@&AglFWHAx1g-h#R_+%e5C2 zaY`E|sOm{SDtF9X-py)tj|A~$-zVhF%PXH}qb9$oHBaG6BSj6j8eZ*V=V7tfyQ3{S zNV*Pszm1ShW#OF@KyW$Ay`ytYb?xI6=E{+VyX=6-vcF`4)>creK1@x_Y5_j9@gU4G z_iJ%=-*sjE_Cq7O`xR?vOF!+XhMKC9_tY+8sr_}#-d}aa>HexVG+6yo7n>?m_H!-2GN?~a?-1&Q zf0ESxqW}))Ek_qV$zz5Q8PUu3h?2?Ci{eKg{t~QAic_%4fc0fQG}GAEcJ`RK-Y>f- zQ~L=YSNVJ8acF%+!-B~{lIr9A=1BfA>^m9_;UW+-Yb(#{n7*2t7qy1-Zw!Ji(2aIcFH|AB3RU3n!b4b`^kk2 zu(h(>C@HzmQC1!+ox12VPH|zXc-{w!w-GIf(C0Jv(8kpt?(Tb^9qD%=w4qzyFz*B( z=pj$Sg$`mJf(>AqIPm?&?IXR_@k-FB#B*!}>me@fJoATzQXfvxA#?~Hu9NYeUW`uF z6eLY%nFpoMdki9qU%_Web|6{}Ue4%@#1TwD90n;P56 zJ-V-?b2vyXwD?wtqpdVbuI)$MwDfeObhZCc(03%Pn2|%?*Ad_pdl?mm4yNb9G;ik6 zhL=)Rokse#sz9FtL!Zxvw2reiAVOzG-=Ka-klBl+h4@pJk+SbE`E$ARVe$G~zFim` z{b<-8;nux9;U&R!5gk}c@c^Ca3?2uH@T<||p_xe`jytZ2%Db~|!dWj=^}qe-847BC z6gvPLvC_6U8R8&TX{TZ}jxzg|9v_8#l?7)p6#(1-pu|vN;;g*h%Mq)g1i2iCh$1av zos*7l8M8Zm%aTt)&_&|6 zc!+rHa~Uq<=8O|U#$WZhgVzp?kkZ1?8^18HBlZz?ldtkH!})9Uc#d*Ds(*k{-Dsa2LftfO&8p3`9AVu`sAw4~2KX*hJ3?_?F>Iggp|o$OU{PYUiw_!fsCT`oTT?mw zofaRH*=nL^JhEQvyA$*#BBR5Oox$2S-KE($7?}`6rro77XFMWAyKTU>R4J2flOhF@l5Fupt3Q$Qnf$T)v3tHn z?;IaXU`e*xV*!Xga?0~y$p2bqaLUhKx0^^Bfs_HCl=M{VA17}*2fwUwTC%X_Lo>C*1I+AdGzd5 zR~$)H8VMkGbfa@W<>WrKey@DKV>)c%fW0V!c;IEywiTE$r>0P|Fff7k%lHNROhrzT zrIh#b$WEj6Jv@Q@Dgu`^8nLVeAM8z<$h5Gzd=+XxPnG%cH;l7`TT%`$VN2sihJ(u$ z3iiU^;6gr?WKTfLD;`tRpSs(ZU11C-V-5QkitViEO-$_MEZ>|irKE`k%N^H87*AU%$6FN^7%Y|hOjpMHezfD z?BpK6FeRJn`NN_$v6SKU_7*>C$p8r zs5J&Ec5LiQFkiy7%IcafLkO8MOXJORW>`;XomBXE2+iy`qF}IBSov3YegFvrFB_L6 zE*L|3_~pc+q!-K`^)`=4K-vnj@dccR5OfC1$vrhdlG7JSVi_ZijE-=fUKzIw-i|Nchp= zoRRs|At9E9ce6RdUXO+aJp4buN%lo1gUFei)%ZP_Ok%ySfvp2f@uw~kHfZR}%jqAB z|I(993X0P?Y`m=ITyz;#HM;Ewt=wIra*fS8vfac{a7m}?LO?+3HMBWqxGGi~hygkc zFk2Ge+;>WKs~>@iT+&7-5b_EAqY;5h%$FZyQVyymyd(yKnD0NLV1ykbE)Nd|t*>WX zt19{Xz=}`m^Iu`Bevp@+*8sfa_{>G*@aEKr8#6ff(gL@9sh1%I#e9nL{Y0|gV}$ta zq=3({U@a9qx0qBE*s(<3hgA!cz&xI|?u$)BWyc7c_Xz8XuwyKXc!ZX|F6 z(k-#0qy~#~yPJm0uI#qCy&l?cRQ7K=1|$A-k;MNpKu}ld&p_?H0sL6}UqM4`*PtH= z(tm^hbplJnQ)a+7-?irU>y(X+00pg>j^Voku1|e4+?^Ane36`$i1X^Z=>+xc1#%jy8yaHSi?|<=KY-cD< zJv7Y4un>@6dVXgsX*b?7kc z;_;En=+hG{DCsr7q6!8xG`p`}ca-@fV;3ybTJwQXp1bcJc&4>`n0-ZE2`P8L znldqg?Z@PYumD5=neW1ZX_|8b@jy*Rp)$vv>*Z&)fulkBrZ%ZRjIEPvz{o= zyW1j~-vS?M)i@~TA5_Zl#a~tig?flvLyCB7_{okER=)+Et$x%i`-jjz0RBu``{xOV zvN4!!T+A{%7V$(>d8;BMAlH%qI*2iUew;GQN(R z%=EE{Y$lfUjVb;w&p|p(CPRm>VpN~D^1EhdD1NX}-5gLuTGKkzdm{X5mJiEURWKM{ zI<%KZ74753BnwY}B<1UleDILl@l1{7S4vt!PjdfB ziurDk#XfcL?@he$FY`)D{}$OVCN~6w7Gb=d>W0Gs^O+AN-4_MRp_bdKsy9APchQgP z^VM!mte^jU9%CR=S`pDeY&LdAfMfOHL)i+EBSk=bHuVxOktNNFA$;F;28=O2*Zmuv z=S%yTgn9DYx-u5IPFM0&v;lb7?XREEQN9)?@^@{5I;iERL zpro~@rD5i`(hL;Dl4OOR24u}$+yqGpW8@h4Xede%NUC{2v$RG%AgIxRM3!x#VkvLq z_c7~A4ZTR%kEhgH;_If-MnDrQsYq(EN#Mfwf!or+8b$JDF|4l>DK{Gbs@z*+fho?7 zVX!{ijIv>hMY(}x8ZG4O4lbjJ91pLuBQ2iTnSTfVZ48V)0@3BfHLZE>cn;@6Lma@f zoc{LaM{EfyuAeQ$%_#Fq_KxjSj)hu6Xz=Dl%`|}A!jngqnOfEG9C~_ME8buJzW2YP zwc>B2_z5keb$k=#t2rjMl^;6NTvsF?6bqNfscG4(-oUl@AQ+#YLQ>$E=!5Yr-w{sX zqwH&T1p5?w=&Rs398I;V%#W)_WwJAGuT>bWn3Bn z?u;`M2~1r|V;Nax>E&}w@D-*~rg7%hs`P@eAE=TaR*q*i%)+$4QQe^=_ZF;K+RbmM zOJw3=Q{8y3gH8+0R@;Y!!9TgPNf+#_8vr%6#kCf1uDP)M?uU>WRV6ssQuo;-XUdp+%s@Ea68XA%N@f?W49uw9Mg{=i6YJY59J2ryjlbWv{$`N7N z;1f|oWaJb&j!Ea!Tgo~*p9@Huuu8!Q9dHzst+VH}qGJz+$MeRzH~v)~o$&>UYA#yu zhPBr8Xdn5Lo|WF!lYe&}tm9S%{n5bBt@1)6N^|~V?zLP7K$L3ou~8U}dZy`t7H@}M zEw%nk@q?2xB4NPsuwZ%?-9u5e0@;NPPStOI=g&>$C=v>H(!OF(0VqwdCX1P}OB3B( z|Je(sN2-=Y>1w@c06d(A&|Fm)$6_Tx-6tMJwO$$)b0Pej>;@#17R!>}$(=O?&v8fz z@7?dGjR1?e`AEX03WS`PDc+5Jmxgb4*7;Vb)w)G0^6O)@Cyo11uGdmTDTdbKkGJhb zr(dGte*R#9OSuSny9~<867&L%K+FoIw>=P_b&!F$OyfQFJ`~R&?xmsCNOEuvf;L5b zzB04YQHl0c_|R=!m~2WTF%qHclz7NGD%320TB|eVgHcYK5xSu6?kMoD%Dsi)5i96w z91&62mHQdmFr}lhPinAppYSNAIXI-{Q8bBF)0487pCkz$fooJKP_1Yf+5g*q4y?9m z%AX|2t$k@}7<%jn%c@}OFYZ6FEB{0m{!<^T2YY3RUR7WSny6)J&o7FOGO`KW-z)M< zuaIchpX!(BRQcR?vfA{44gXEv`EsFZTAqD4!H@u2`s$BhV!|Z}BxB%Tkrk4o?4Ys* zBa_I&mH1PpxCph#rL;rD+M~Sch1xv@FeCpX-oY>a3>W=C+AO}UPNHOYp?HW z>O)}|rF|X!N*4b7iF%GdPYyN76v$PEfR{st*YGDZ5Ip@WIa3%wDtaJn)3O`ZCFqCH zY6AMof7pAAkon()blEF42M|CoO3~=0VHat1Ve0p3;q}dp;K^a4qwv;Yz_LY_v%`yI zlKw5KG!zA@Z3xQ3DwN^$dd@AYkB6tnBA9aJ{}1C*6M2`?tyN@h(>{q*Mp345*|`Tp z9}6L#(lSp{+MwaJ5j}HyWzPLG_ z6cTqK%c_8x+Qs^uUY`(oi~q=svRe*#F^xM0XI;H0mP@=u{JfIUG)U*X>=^`LR1Pj9 z#$ySgZQ)tNo6CUhL;L?LGnwjH-f0-CdRE)U3(xF)v$+{|EjZdibPu0r_Lm834EkVN z`=lmckgKUxvDvVaj`ia2`q36N{Lf} zR%U)g-k~fU-_{9DCcu(yzklcMZ)4R5BU|Nn=y2oZ(Jo*#p)8p*(t*XkbZfz2<1p=C zu=~>sC}C`txX_x)%z40WBmV8u)(O+Tk>{X14ISaT?|D0OpGpVn+-ZhqHVlY`4JYs2 z05ITuP5R;tF z<`NmP9ee9TNc}Mc^&l4u`4EHhausNry0y#}G0#DNzT~Cul1$%Tl2CRhL&ETV7HLdE z4^xSO_xeV}G@dU>Ior(&k3i*m&JwL3FR zMSno*U=BqQ4}|PNbmz)xP4bw75oTl@c4RyTDoJ$`tg#|-wu*ycc$RPJ1)0jG-~9j6 zyD0NB{M^wD?g}4$jeQ-=zQ87dJ=HF5yf^}6=Zn%@0Ph`!Z3%I3fX}O{o-L{~{K2?- z#=p3cz`gvslzGag}YaePah9R zy9WLmdX^<$F(jRqLub3f766C^7+(>n6M)>*(!y%of2fU&TrI=P&WBug_LoN}C@8qi zG?2D)B1lgYfGPR}JwpKNLlO;;J$z`ea$Mg5z;hcB$V^hdMNtqO-30m{Lcs_WQVY|k ztk3S{erICv!{G8WC7rNfn!tAs$8`mWcx0>!(Ni1%=@APeW`h5i_oJ2VfSKL_G3?1) z$yO@XKj9Rd=#m+LaGB!TbR!w3GVRu;UbL4Pr=(XnOWB;l+}fJAX(Ip17>Uoy7 z1_E!}d2|}54}A~fu3+Y>c9W|1Eay4L!(`g&DjtYAW`33;!(uZ?1;~>KW29RUf3Hpr?S+MbZC}_Lo`&#%lB?6vYpZ9l)d}Il!c_-#1wmQ4BmqHP{2r~BwdzIw& zFKXItL0;Pa9DH>sdiZB7b1OJ&GAvLVfeH@2I#aatkoB@m4o5QLws_*_f%YMW8D=&O z!RVg4Xe3Zf@(CYTcDgOYTrTeY*IYUbBq5i~@~K~IIo4wl19*Z8ak_ck5 z{n9?vW~#QX&6?A)_8AK>*th7+wxU^18c=Ik0XVNDsl=wF>CIQ+1>s*G+6SN*`zZf# z4F>FCRg#VW0(>=Qd^YX{00t0uRwHo`y@MWm)d9M!!OTdw#v;Bfal<%{w0YAWi3y1~ zf3(e6(H8KL`t6hpUMgJu5c0Z9Y)QlpS>M82|gfOB~QqmQ<1DcN!?ruSYyF+j%!J%nd2Ntw0bMj0`{kI2n@VJ|Vx0`fm&SF+&I^YK;ol zCz&9DmF5dS?dgdp{8vo!gN&!AXU{AKR`Lf=U-+T_*tC=AfoQrgdf4lg?`z-R0PtUt z@tz<+00jR{O4cPeL2P)|u{x*S+$CH$xc_#D==m-ceTW``_b3&h;OUWZl3pcirkTB>i6r?W%gx%ydWN^ zo;BiH_9Lvk*fMXx4~?rJJ~~fzb08`QXp@cNjC?qOVkfh{gMH|0nn?^Z zi$KT`p}PRTuRIHVg(;SAr8rYOdHA9{c*{-fGfY3FeR@g8NlwC_=dG^59$(9wn=tJU zuHh?P@kR4&Uw18E$>t3mx1!Fqe`y>2th4*Gz4>$W?)8@&kDtB<@jDa-sg@< z>voG$&K7IEQyMwLPn-`8Ux`+YLlKH0qgJWqogI*&+pS3R<(;$NHIYUyw9s=NNi801 z#bKQn?dDzdqs{f1JH=mq{q6iTKNrRDK~;X+*|q%?>&CZ7a!{>75h{<(A^Aq#gC#yj^OR@+AR#o`D4@{1a5b4>zfb;CQ}2lCZV)eahz6agct&VQVqon`E|o$|_)LZ! zz8RCeVtKxcSbidrc1rwFE!9d=C2=+%(rLeHlP9A&ja8p@p$)xVO=H$JbY9TI@2}y~ z?4n@}{tL#`Sz&VyF~APJ%NB}hol>0H+Q=|#bo5B*ri|z39xJp#VW9yOeY>EQCXo`V z5&U#ye+ZrIZW&5-EZo}SRe)9`UrS=G*i0=|qGldy(vcsYOB5xNB~=V6C6vOY<4PFF z@?2YWB%<3L4fE7~S5*x$o7j)nY`ZvmVm<{4(P$zgYXD&A2kr^q0TLxNbH+5i^CR7i z1Dww(C|8pzt7Xj_3aRxcl9KFk?YM&sEMXZc=<5zkT@Y355B6;+;~ZL&~SBDP=B#ZHSgac1663fCCXFW5(*sz0H4L!xS=J2!l?hIwMhX(o4f->LkY)38;s z`fm~?Eikx$P~8QcPDPmjzg#Sqsy-YSH^BP!hn7nPljJI%8YZsY2Rf69;mQ&CUNJL# z_}-C4MnU6K>&qB`!;;$HNw`&NY|tDgVv*nQaBnF~C}5C38;?*|h9Rc!r<1YY0EXH` z2z`zMH3f$Pnhu`*OCz$VoLDWZqRwo32AW=O#_i!$!ZG&O#`x5&hmeNx>dVNa{I)Op z+FW0(N9=gkB^7Yy$Mb%`J^ebxkt07i0fD zq_M`{XRnSL)xn>GKV(4`hagb$=&a5-&_n=*liDgW6;$G>YARbzi3dvF=MTll1fykQ zspxPQ*Ogaj*CR^EXyXT@T8XJw*sW}+bKWBq@o1>JC#*W;r2=$an()iDjp-fJU=DG!QA(eRmf-$Vg0Ne2mMtC#cxPAh6&!?b=M*IoO|Z(-pvBDa9zWOo^3NP5TwT1N z<2kT0HxQt!=;pi4iui&52X&N$fuzbvxTI(_%wFipowV^7atR?7%(QEFgNbX z=niLUS~8koTePvqRtlq{FF!OCgs=;pypCXq3&F#@ub_ebN_bU>mhMG&9?ehhFh8<8 z6kY3vC%|z1EuNsd*(qS(I*nr(c+6VQ-jr|I_-D@cad!m5&Cd=HUe8AT4oiAM*qfK}|VMniMOQZ=YXeN)7b)hV6ov`%ezT z&9S9%W&^ldD6jy%KOqTX3X8&@7T6BbSU;WA7(`gy+0oZ#O3N(f}iWRlu zyHf;dC&TJ^OQ@7?&p$w(yK0$VZ=p260(Y3_Y9rg+!P^?Z!~x;w?LA2fR! zDg$+0vaml+!pN{Vpy_`7<9bMLZ-&bddMy3{o$c2@{&^oDv7n?p&q@NYAF}DTl+eW_ zc*it}?-jU#Q;Jkgz@f>M${_Zk`!|p3QUD12# zX|tUWE0e_B5&NOU32sboS6MIur45dBgZ4@iq&eYC$ljvTX<{_iDKJ0RUkz+xS{um> zUK74$A6L5MC_-1N>PF9>`;Y>FvBaIr9b8Q5NHO`OV&#e!V#SzhVT#F?99{L9F^#_D zYefpBn4MbGbu>+>$IDI;mfpVX#6ZHZ^A4K%`;|h?tvnaHll0A_<9=&?jz02$FLP>N zJ~E@nb~BbPtnKG3YOjEt>TxD)dQqfXs+y-w>2vEqN7w+!n*2;c{5z^K zGFG^v+qL=eQVL}%IzA}2=zhB$rMhjvlVd*t;w4Nj2Dko3J3%iqpS~2)Qi6bAi4Xzy^S^`*ti`X+Cw(`42xzVN?z*;|fG3S1#q=88(!~ z7>f6dsC7#)3+XiMsc7NetFsB2rCbXf9dba+!f^@(=AIa?h15Ywu%!TP^iwl$GVEOL zr;|t5^JnGDtmSD)H!k^$|X zs>aBw?9H`Dl)+~`u9AZqTr8a0`F;5fkHu()dFC+X^OLT)vSQUK@(3BBkKYPq)@T=% z8^2pJ=+=!bO^$yvKPrpykTg&@nj>HEHXmye62J7>W*I>Hco2S0W|g-_W79$h3p57f z4a70jO5j{RZ)m9|>6&bD^f0080vvXZ42UKI)STxQ&qbM$*o>YPbDySS$-RLCl3 zd3*;dn2DX8rCs@ReA#s2wU85&^2CpK6c1d+9dn&Q#wXbS;+I|kTmhHjJPI+y(LAEt z#4H5e1fFGr$^&LE)gm+%Ey{c3ebeeXy-I$61qGZTQ)O^zB(=29uZ)o3vo-HJX%ev+S|K0rFMvyjePkwn#uXE`2bVVIg0*Dz zr1@=ZrNPGm_Tewgm^kj$00?=W2NCx#w^RN)4nntO1VDHbCZX?J?0i8`x_Dj!3{>vSa9zcSWml6*m=+0F`ca?JvXm5i!btkVIU$a z2mR`*jTRprt^0RgB?F7f<#NKt)XBYmHd-noexueBrxW)%8AYcwgGlkqHNRtR9~KP6 zR!P54`8%4ua|b7Br#xNoJR-R!lhCrcl)Ik;EjGHGk?0xDDigtHE9@x%lg1;1&!#~Z zvpKN^KN`u4RdrbnAKbVXtH*1=TpOBV@%$mIc$Hh^2f@JsfaK!rXYY}|{{A+V)@aYA zFVCSeTiWZnxf>gsqV_j#VLNhsZJ0VTAG!?dIEWL>+XLst%Bm_Knpn4NO2fnDs^fe- zm^5RfKN85O*RDKI6ke)F5N9F)-hE5%RT0xc0xp{?sRd?c3RC>!AKw6=i@5wdMUs5& z{)#oh$sn*4JljONN1%OFS=0CEs}OiOrc~ug-r}mjZwW`==2Smg^(w=G3o1C*HSMp$ z`Myki7+U>#D7a&Q)GFA(O+@|t`309;L4-rXedV;oOnl<$T5uw~+**2*IAX*LAC?dj z&p;`KnOP>Hq=+9G&(tSYODr{0iHqmZilO!{5X{vCOW#h7wZz7k4L6agfi%cLjAvbb zZEY|X)l+kar-mjKIp6ey3PluI87)5`V+Y0y6{gSWZ~6!g`~srhg2-p)_Y$hSnR`kI z(cOCP`gaD9_2cK)#+TTWAQ%V$6y6rG1CRo@Ra8&YQ#yeSn-5PVjVCwy8N$Hs_`lt0 z3h}pS=)8&a7kw?tPldOrBn!Q55v0@-d%6x4i0XY1W0 z&GLH(*IJLN6Io7#gTDwTd)VDDzf057o}A<%7Ne-tA`B_875CS$4(Tl6RA(_qyS zKR}3Kiemr$(o@y*9G_E~z8}|^yCo^c1_0tvZz?W;+8o8@+r7H3vBKgzn(UBUsdc(w zDhiLdUEi_E(bP`6KX*?ZTh9IL&0d@lm!ucQ(Dn~y5r3$Pr`k&Ts&KvR{5^ApIeS*W z3U_(-BtQuRB{O@FPU0veS81mrvd<%#I9^c zLlSNlTB*-o>KQvLYch0097ir@3BFDBBQz@<`;QBQnvh-HtFS4XgQD zviHaFsET(Y70b`oV5PYl$Q;vR-S!+kVXaQNnUbNJCy$gU%25CxCrHZz;X=gaYAlzl zNoXM#xGZ6DclF2>r2d9$X6be>rh<}m#-!>}h>q5*H>!ap>w^e4z{ z_-(2b{pK4Mds6$OIf4?dTdqcB%Vp}8W1lP{qt*|@mD$ljz_(46M%$coL=r$7m8t}S zw#@jsy$NT}g72(E2!EyAx?*5R*7(9xoYBHhqUte6l3Apesbe#6>|lC&oT$fEn#I|= zo$x)9BWSWN>#i&usDyNp*&J>{SXI0N`y<_isx_Lk2|m+_Z@hLvGk@`~0ANJ{06iDn$U20NOcQ6 zVL`Q4w!&~O_IoTeaaa{2GeM)|(C98TQCkV6+e!XMdbG$ElAxZkky@z4i4L{jh$S1W zH_f@pK5m^TceoA}hwMr`0EX(jP%s8cNym?bW@t#|a3Ji6k<8WyjAT{!q6DA_Hzk(0 zK;8fxTh~EvVpRIHjv?z!>d>Y4cU_s9!L`fafumi&bu{kz8a-BvS&Ec@_*nUSWKqt zhw;qCGYJzM&j;OCXVNTHUT^wYERF3P;=RUGynQBy9#re46zL&Ue>1cWnru>ejj7G? z6oU*RTyQz}u1T!Kwc^ITf*i6^!1k)5NQHY?3Uxn{j7VujLluH#WE*mqjhQVLmicbfb-q6 zv~?$y$>7I2I3P?pN&daTCLg++1VymBbaSSpgzxGlqtHT(e?#)rpk+OJe#8D}9374F zuXg-PO;tYA-OKW?&kQ|{O=oL%zH?9c+pn*|Z(378!H%ct5XRE| zshK=2vCcb!sN3bn151Pg>{PX#@hI@6QAwM4Hs~ie;TQtrj zE^dc6L52Y!4EOenQUXKrF_xV2EuB7*(rr>7U1ZTK(urH+MNH?XNFF@W+$aCBlmD);XnQJ8y4v8 zEZ-@IQ|Nwqd@g9(3?4;dWVS#Gq9iF^EJ7Q}>9sMRwp05R2U~$oBn5+jnXwXZs{N>{ znmL8$Y^f(^1ybF>bJ4DwXzV}ZA%#)s2pC8Q62oeobRa*2lvzVP27c&RnZ}20sKygg zj*?ap-MPqkV`+rs+D;5b2yAdjnF@ase5lHkb+OfOQcyHUxeaSw zTgr;Zmkxr9uOX~eO{SQb9EWodD3gO1Bx??2^d)2jZ&hghJ6maHPp0ZYj~PuCnI9RY z0^D(&*RISkp>rYm%u0Jp=&i&!RsE^!oTW3RwW-p_GFR>hedBI9G|uX>8+EWbg1DTV zqo(tZ7^SZjS#BYsO93!kXckhR&AzjF;u5clR=XO17IXFQsT%BrIkEMPfN%hOqy3!& z>3lAWSE%U>YPTE}UtGq^T)EV^3OFUTBwR)Q>NTW#F(FKEEWBdZ6Z;qJ%g%x*g#AM~Lk9-ex`?TqfB3lr z04+J?q7=E~^GO1#JQsvr28CX!w_WC68w+(Z5+C2l13Xp_=P9u5Rq| z6T{e=Rzh?hM;W4x6>VAL>VuCfr2K5sKNqA&%8OYL zA4|ZB2CDpMVS>ZzBaz_=KpLQ4!iNRCBaKM})v>}(1?@*^5Y0DyFNr&YaWFiktq0$$ z+s3M$w&U~%ROw@hT&3ulGsDqX$#e}1T(m~7r_J=%cKFO_Jbh6OkUFJ($-rD+V_DZi z5?Lbo)PPA?C>0>T`SP>lAW!8GlUio`b^M5+?$_Rn`m-06*bW;2o=??>dc5wEo@>r) zk7Cr4DPbVIXRe)C=N9S9LT!>eF=3K40>|oJ{)XPbASUg+c zT=Y~jKd6|FYQz@AcJYyjJhUo-5LHf2k*<-?17U!#IV;w-n*Yq@2q4Z#ZE2~rAHWaJ|-SUyu8pNc^g0Lgf;KmO8K7f?OqnpVyu{Dp=x{&7zfNU=dc*IC6}GdjBBh$jpZ)CCysM30>9n+ zHeApVzM4=jF%KSoI^FdCV|e*|tzRZ8l{zmOlgEmUV%g)j691Rimu+0V7o^Xj+F$IT%C04uIdh(y->DRXfKlXsg~!`eLNDdvJeSZ3^>YMX+en-Wa%)PEjDs`Q+x z2Y*ipX*08)*m7fW4&%(USy>Dz?6}LkgPBHvr1zO{%oI3}Aed<0r3UkL6co1+1eo~ptrg!wKmg~0)wGLepPm}Kmf4HO_!d4E}@@kWP1;HDa}$Z z1byx6DBwpay`4eygvi2&@>;qmis+MW?s0WR-C8&R9q*N1V2CAyXe5H0ikVq+unII# zXS(y#=^3n8w|wh!m3wm5Y2p}RlV>G)^;y!Gb|}hTgEBHSJQJK5s9&D}_dOp`2yaPL z{xcmJ3bdN15~j@e8~K~eLxmk*Njk<){ZdNzj#}FC>D?fMND70fcho^#)lz$hi0FQtFcAnHo6;v6`|=RbPL^o z{Br~gS3{@Vh|UdgGaEfI*$cridC#X9{jndf^iU@WZ6?y@ZZt*j1esqyi+jR9vINd^ zavYl)2XA;_tN;O;E2R^;RSM&xKH2->@lpF<)4Z~>@~uJM@^_NHrmfmRoyc8DEYw%R z?;r|OxH&*$W!NQ~euo3E*abBEyw~yBjjK`7WlHf_v$NK`TyBhpWTMiSG8JU_ZTKVa zA#FPbLOu0Sobrl;>PWIDM!IY#DZ#sDQ76YHV+$H3UFHfLtC0-z>!l;m9;y09q;uV8 znPTfYJ zb&Ajt2PK3+G|`@df+7Eg&^&Tg6F!K_J7pbUPJ}vq>`rQ5N&J;pj^tHA} z(nl9=7Gln<--Dxq^mN%cFe$PST07Wt-FQ@nZ=p0Zo!FD1A)g?K=%z|Dmr*YC01yB> z$wezzNr7;Fy|m>W=jZVjJ3oyHYgyOTQ?)J!)uLmGyA)5w(}+!N;f~Nnn3EH1py$B} z9hyvo3z}rJNw1&37R^X@G}xVr)YsS*wb)$_|6@W47{8vy#6)e9p6WZ(z~!pMx-IbA z+9thzL!0~>oGZVq8=kTFHvqHK_mhp%B>5>786I!_3pCzY5k)39SU`p)eJWx2$K16Co`B1?8@gh4#E%Sj zZ9z4-l7u~3TARC266`mbEKlEIT`i|KwJ+Ra4EI~3(HiTH8heo$M;=a-w13%*O?nrw z9z2+%UuELc)V^o%u^LOtS~1(JNM@;6a$&Xgw9&eGPT<`oO}nkV{lh zMOoE!%2>NE)okOFDr(`?Z+Q7?I?*5DQ7WN)R%FHnlR(b#LlW_0A5a|5DASuFV zddZ&{`?Jv=i1l&GU~Cpq=MD-686jp)HDAth2}aVJNFh0f{@Qbk!2Ibvl(9{3&B-CE z%)G$syeUe#LBx$_8yKF--b4PST>LlE+=v7emdVLwCxTaow!ot+JtvDLz24CsC+>4f z^I!aWc1cJyLLn?i?77P%o$DM3W%`6IHTOo!;x`jETzZeodea=z$`;eU_PAx#DT@VT zI9sVRJGerQacbOC8l=>SeBd?`w%yLPZlR__eLJ9oK#c-cwg=Bfyzasq5>Qn5awGc` z1l0YDU$0PeD~<^PelAGN5R#8ga3MzcA2!80w;*t)#cFZvNR+6TDtg+KhC~Yi{%wVE<4kkjP0OoJA zcf-~kvfK11ALkILs*}wNlUCM0Rqvx^a$zUGrzzemxvjb(sFI>n1sazeOn)x@JM50` z-$y+~6e7W;7B);HU}B$8yqeIDXLCiA(C!4~doju&>i2t5i+bw$fp|X__4<;(&n7DV zb^~C%5b=FBwI>{??!w8{fexyw%C^}N=Bm_7#NicgP&ai6Loag09ug6WDQq!#MlI!BFt-p=RG`1vOoE7ATS?^kz)oRqSc zKPPV*djN!GFVfnLu%2w91jUz!iI$5gfx?}g!8d_+3@xrb&4}+Vd7%FkwWRmK*kJz; z4zf9aOoEYMA3ZCUecw2rD;a|R=xl)VNbX>iEIAT5llhK);;A_gv}F{*hi*sVGpp|% zY=Ytb4?lka)L}uB3P>a~j#8`D7JIs2IUy05I-9=by4fh8Hz4T zRMGAVTIc#o7$R?B^Haohr+X7lRVPs5&NLE)3GQ5wMoR|M)oL|G!I^6GYl{b-T5A?P`}~hvD-P}=ZFfPR;ceZh zZ_~C*p0?)}2VisSeHI*2ym;~NXbwYeDjmp7CIF;~p~&2K<0A5X0Jv76*7%DJp_TRw z!W1dy9luHD2M+}o=pS#!ahw8-$n{~)oOWB&--FwetH{*{V_bIFzBghtuT1ZDtTz*(>%eY>*wrzrK)k@nx)LhVKB zEvY`Q)yQxG06@z5Fru}-i&5uq1HPD3<#L6K7mb9G`2C{axK90A)carPFpgf%goli; z9m&EAR6>73F_;)ZH$sgcySJO?7RxnL9VzT)d|MVMQ;vKN5(c!-@BOTfP#!;VVoQ2i z$Ly4mx9y(FjfDlk?_hi_u$Rl;cq^uS30ud8?#s8N~ic!EMS^~V^gKtD*fZ)I{7HrQ@|_UKn4p3 zIWd)4%@^knAPF{S-2Y(?1b7>PazRbf4j*tDlJ{`K4ie;Dn0~q`RMf;@t~UJ2GW{pN zI3f;qBYEx*xd4HqJc}39X?J7PW{G~-@X~pK)do#S5;c#;XRigc#^i%ifTgBHDky}9 z0xMC9&&QBbt?@MfJW^70Ub(-lhFw-Bk#z=$F9(2Nd{#~5Fqf&m>euR*&?t+ueT(1o z*&Qn8ED8{X&sqDz7tK-()XX0%{ppem*zezJO zzu=bT;U}M8;Ef@;EhSnxW~s?cepwy(6!1X!U6$Wa_Dg7sxUjbFKL>j51*7&r${pE7HG>BPi3hrRO}zof5egf>nP3g zpZFCZg{d~q_=%*~b?~^u4kQoGALWjr@(7|KrC~3Qo#EX9`w`>8AtXB66@=b{&WfJac7BX( zms5K^mEF*X^&4Ka*R%Tg7Bt+vPh5I1#iY`az@8tSg()j(fo?zWers4XWaD@B!c)E| zVmcsR6kd}Od7kofj?oVgMRZE)e&EK|vrb_{o-pEHd`XYvn7+G}t1!dcTkpW9C>D(1 z`=(eK4o-#(Iv)j0Tay7=GM8huB-PYRTf-mr;)&0tt852Nt5QWOU_S4k;V2A6j_WKB zWY&^Zm=Y-$^mA`TrhOc8W00$6L=DGPhm1>kQ*X(vW#Uo>HwP&EC~l%t1&2tsm4E_2 zONSU5x6_V`v*_F5fQBswVyi_*$aM^vl40Tt*rfp{M;YXLJ65z`uJ=v0g;+6XEIzP! zh|N`kFU>Z@#DqL4D%RF*E!`!~_BC%TKddY1r^+`tl$kY_36ZRuUpxxUgKddU^K(aB zf5EV^y)i#YTIHW6I>I(F3wmP=Hk1)6B}V1B152fr)gX-ChUNQ{G{NLwiNBKhgyfPQ z4z>@1Am{U1S*^B8%-`;`QVN2m832hRd*SaOy7<y!Q@IR{ot+H zH~dIG0)RriQ>ypuSFV!B&w>N>n9avLpVC|U{W)ojc% zGq=Z+tFta1_zp&-gpXeNxJFv(2j$vnch(JP=Q&{|kl8Lsg(Kd%n=b;m-d-Y4)|4!f z_wvgSXQ4wctOc&c%z?Dq%d7y5X#RWMe0BT#=2ZwgEn9 zS!J_J`)ySIy|z zoiKX);g+~5)j4a11Q;wntTo;Kl#>0bUo}V1JMmK`BM+hALYgKs`rsg*TBs?Az7m68 zfo<|CjeMcP2Xp$E&svurPhqrRW18}Hp9-PTq*-9D)n6pe?;ztV*uLTRwlih(RV{Nv zi8njru=3)&9^X(G`AEDOYA0%Et$0GV@6W!7_-pd*`x6VhwEE+AnnxY>O` zgmdug_Leb3gPj4-PTyVJ#DavB5w=AsiVEF_W^B7Isnel}spQ3UO5+=epH12o$Jhq6 zymqrki7^}G3b{uiV~k!(E>oEFF$ye9

Gxb(2sH(UHIgQpb!e_Dwzuh0l^KXa_aW znK%6>eg&CV8z_y_lhk>PWOV(T->7HiK&hMQyUMqOFOE%zJ5!AoWeK9zAD%s3H=Sel zK5?vHr}Dr){^4f3md~c>70p@}dFSkVInR3~{+~P^pli;|LNy~1C`DjU zMj~Z0&~y{OkP3=I$tQh0l6xGiQNjL)vD?d};u%KAC1um{(}QPX|vLb1nVI z$<8MBD&GA41MCZK^8vARWoW1jIcWEn(mp!FLuV_hl7jjsO0IH)t2FgCe$~nf?_VYZ zV{8(bbVykQsm?B&jsn{bayyQeVV{A31i$Vr{b7cQa*Q~1oKS4>qA;OriG-J546DGG z)Z{O=$x*bSld-I#)I2OTS(IkXs?7x#wjOmBd3uxihloC~4qQF%$tbf+_0Z3GJX{X- zs1m8{rOGfOf#vlB8XVl+U<_&T5W4uJXy=o0LUexfnVN~7tM79a&uZDPmWk8t(FIQV zoh4h0qHk5&%u{XAnuJjOyLZ1}Lg7`MVCASv52U2I$l25^IC~4p%YIX0A#4f>$zgXI zCGP^FjXiH`nc*mc=_0oyxo8%J6W9g(w2DuZrN^IX4AjhHd>4c9^r%{B$#O=#tvx$^2+Fp^;ENwC!r|V!-+SOsn)u}4)6+SMYs_&u>cVZk~-B*PmLG*AzXd< zY{mOG-q%PmqnQfC!Rq|it@`QukGJhW{6y;TvW<(3*HZg-oW}1gg3Sjr!?axCC5*aV z^imre*}{v&lCQph`2V7gV9_`uN|*0vYbaWVivnP8zxAy}Vfd4W?@vHZR15exF;JDo zGn|onam;j<78a`Lu0r}-nJ3g==MBs#`xLH}<}zuj4GB7H2)k?v6gIf(qeAb;+%iHF z&SoxL@XOLm=ow_7)Dw~|mvZTqU+;eQJY58(!caM-AZjzLOc>A}!7thklACT4Dy6GT zKQ>5)05G$iUB98ix+EHSbfH)(7dR`~3c*SLBy+CZ)6hs$R3AYUQzcE=JeZnKYlH0n z3=DMu{FI4;NNs9^;G38f5P)!`-UNvI*K?VAhx)Jlx|9tRQY^=PFgH=W|M=$~78v+t z;*_2Q9B8HEVF4`!^RaU>6@LsoC$~IKiv{y)O2x3@sdPnPuq~7v14^4V^;?!!l%pyP zVA?GL-UcFoY;rg|t@hpP5Tnx1Y+Pzd8Y(~x0zWbez`Q%I#Vr+<)BL$;tRP*3NYh;=W~N+_~Y4N!2EdqnI_VEqrX| zFR3KLQpBIV(Pw*=|GB0iBDWLNF~sg zvSfICIwy>x1ieJM#Pn~V`4MiDdvKJvUOiLnMT?&jK5@0=qD8JxH^FZwzUW!2MyW37 zpmk@>=SjnJ2Nqin3ZS!jPEjO`pfb=fR*j&5}dGGNj<^O(YvWQ2i5x+e#T z*NXeFluWT-1aQ(cBBa8X^Bh2C%eVg!7&Vz#Px0I#e(Z`CPIXx2=1tOd#pC(8EJZ6( z^k~hv&WlE;p(=)DcdzjKhKeq`GT11V<1k0l4xJE-@BLhdzKURA$Qkpa`B*b4v|t5R z#6cCS!3lQH-|Ae9<0oE{S^4HTfr`TxELI%!y+GVXvsr`znnFPkw!QF5!$1Cch3+Tv zzUgP~;)?NOIs4EyjGxDxO2lMvnJ$raoFC9LVNV%j%qW zu8iZi4TsK?L9>y}6PFo?lSm^qBNYv7`aWxJEa{;IPaAfHGvOh?g|I=RV>1fPQrw2)WBM`;hAO6f0-7EC4Hud{3zOb>a-fI)-= zJ4G~AH#9P3(i(!PR6D)lzjNv+m{BC$;YN!?SOi3WLzcNT$~Fj#X1b-oO9!M!|RLV^XnMq z2&6Y|*_Ga~6?W;~({qiS+O+ti(!26krgpfdMnRI+Qcl0yVAxbcBBhh4O8z7Um!S96{m7 zjT9B+NGY5wR!>Z5^0DcUL==nN0zl(z$RQ3vwkOY%&N|WXU?BLpj+@wXUNw$MKw6c zI=@EN$6ufQGHMVnS9)+-&qe5Wz&hiuF2F{#}%0IO$ z$V84GyEn0=VfZeIZ!~zbcd@>Cy-S=d%wggTqA8eSV_2Kwe}BYcK(9c-`?P!WpuGAk z&g}l7^5~{qv5GDjAxUA)yx@9kT#s|qTOK3eZxkxfgLR$lB;{=Jt!nVQo5np7&{T9Mp}`d7 zDKj{ zF3Fl-XC|P=ek`TQA5{<2QnngR0UL%2P3p!lno6`*6~C0(wXKF>>L34X z2T@)qtEkhEKt9vT9SaT=gJt^!KNT;)!n0bA1|z3`Q(HXKu>#$SlcUt0%%vzpVi2Z| zR{6=(zZ#$|e!VX(UUsDRUlx(6&qb{=KrlDPV3abFaTvTgebgNrkOyenq3n3$B0)lo z^MBk>u$e8;X~m!JnO3uP$3N}$f+H@Y)nv&Ox=myUk457vq~N+`$C6PU^W@HUO0=V|@_Tw!e3~Ub_4JdXmS-=BLV0 zG2-Ks51{}6$Tj>?{+={9+5Sjn{C>|AOp2Z7@*dMh&nj=;rBH|HkCKr(E7Q;l8v~80 zC-`2XDH}BTq!>f0boc3OA}4ZDrYC;75GTW>$K1Ye($z1v)H}@*h=OO4dP(&vn)>iC zHhPaCuw6e9pEl%eikBHL7vou>)T`o8r6KFQBAixSq>*I(GLDMFTdUB2l0$iBB{8F7@w2|4;T6^s+N(A1`a^{oVyh;Qs zd6nrSbY5SzKeW^m-`_PA(1ux)NvOJ@(&WwS`8U3jM}r`5HIXohrdEaWm#@x4GrBx# zr#ku#Rg<0$`W%P&6YU*6$@V3;xY(n@U*+2c@JjtXbmd0t9)5ldPY=2GO}y~qZFhYi zu`!Y%dV|V+I*J(>f*%o~b>MDRG zP${1Z0onmVhha;^AUvr{6t@BLz4cTwZc0w99Vu-!k-|C;Z}z6kC9WPCtg|~F<(WEb zD0A|2eVi+k&{{VZsckYk-rMr=XcJnZkmSaIiekxus>xp{EDgaHy1RGfV(PA%+w}SyvGQd^chsnVvG9T=T1Q<(3 zgFS>AAW|0upEas@Vm2ZS!{WAl8qzjU*`&CkcGKFJc!>7=-lbTSpl`^VmXe)#J7XRm z)ip0T37RAg)!~vCFNuwntA+VHz1b)9o!v>hv^6xen*1w$8U5hisPAWi3VhikRvsv! zmPJZhMdRx|@w3^Qg}_diTa1}kr@?WU2I^oyClr$ff@6byYTO)7mC`}atHEuv-Koo! z576-(T^I0L+LkH~97Q(wcn6FaAaZBz##}BoPdTBR_(PdSSj}q#A&ZbKPoHNEE}cn2 zLH~!J8z_~vq{3VNo)4|^j(Dp-WBtNnsJ0L5r^q^3Ek#S8!qPOORTO__WMrW3#C%*r zp8d5`b2r6~Gn3Zl{E^<4!c|3ekW@>exobd50m6V51#1RpESf$@lZ2phzPDCdo@0fy z;8J@PjZnI-IOTkTW9=gfqw@9}{f6|%vBWuQ#C?WAC!g_$N}bWF%y7oRJ|1;*J|M_ z{5A7e(y(z*HTU^w_iIu-5R2nl`r_L8G41a~UsPx4>^ME(@C87hqNhyc}E%1T)YspbacVU5@1Cgais-M{IMDOwZGjBX*4o>0rf7v z-gvp|yW)}^rC~HGWOyWL4=zzqWksPKEKpuEeq0ietZ1WYsb=vYKC%;2I6X+EmNYGNq=NqpNL{j^6Rgvny;8 zV;Nx{OW1kW+}j?~ompfZxRhaog-{mDZ@*O(%vbW=YLGGj3NN)tQ<~h*mEJKhcjG1;U>)X^5(b?06wL7YVw)@20UHog z@~*|ABnM4?10SC|+hwasyd*K5aZz>U(1cgMw;7a%8rzAFT>6d^Lv=>fFGJ%)5tfic z(29+VhNRE%vQtX;O>IU_!hVb)HAq}=zYMX>&v!->X%us?Z-ba*AUNr5=W2yr zX3{zS{haOvD5qP05WcEDAvLbdirlaB>54t$^!Q6!wB~jCcwW9dowYb7zypBqls1;v zfiwC_OZmSzx(dH2yDqrE0!w!avcS>{EGZx;-QC^YARyh{ z-QC??(%mWDA*qxCA|LPfCp^z@?m2hPnVH+!Vp*pqjF5su-GPInc*^HNoIr%`fQ zHlb3Y*c6yb+0yWL(B7^)JL~UZd?+@<2bWUVrKp*6ui%VQRH^?OBI~5FD*JK>k@S-B z*3y#nG(9J3&o>QTdtGV7D3`%C`Pj)e227m3r{6^r*h^|Q2nDSF`w)+hRkvK zN2I|94n%aAp5-TI*4&v*N%v?9)=0C$8vyPPg8sB3ZZ7`n?3D`+S!wkWtMl51$dFPK zo!sVso$MvKm`3U*f%IbVKhUsK;E>1Ba=`pZJY?_1UHg<-Dp0u(^^gUBw1&uRXMhFa zYVSU%2b|z?%Rb|O_$q5l?~^W8v~pzMyr{>8WDucN$&-5cAs9|(l?_4oXIXxxWr<<) zzdq$BDVLhV;EFR_7W7ifcogvh6R95g0cIunDVBKW-+O!4&YRT*SH-TT68H$7q@`k0 zi#{Pq(!!FbJc1-zKc`je>D5Ly%#xXZ(VvAE=BKS>Dk|TN5`yp>H~t#G2uunO=s zyD~+9BazdIO8D|?vQfCL;JZ!@`N_nZwk(3?q1!Mv)cpgD95q4&!Roj9939J7u!Xmz zVhetrVtIY>fH$k-%U|`%&&(?U(Px-6xCo>y4IUhkZD$Imv1A7CJD?Q8ztZf!FXP|K z#&|g9@Jgls9cyjad-~wO`5%5x0YJ_VQ%O?f`O)RPYF5-k82-y5vTyp;2j1iRH~ryM zqm0}OSwx1Qgu70IMrBEAbfS{nX^(7qq z+L|UlC|UB4O^|Gf5P4>8mvfHGI^{S#+lZVo)rHKlnj$uAjOZ}JRXeO=XXSz)1)5Bx z<8l-kSUl)nUnT6O_&%=Mpz*u*U+cPg>IW@WIB{u$drJTK1}t;)+u+0qMR+&>fF!wl8VqERThlAy z5-JJ_S+-Ddq7P<+`V|Td>Z>m>rf5z~Ig-&S7 z1FMis|7pH71E1(K++ZMpeZcmjV4h?=RGT36f~uo#LJT6)yQzHC&^cd-6EkV`F6)O3 zMB&V0_c8~Uu68o5SdxRmpSb-`x%o!CvC{n^ZI4|RL$iBFq*X;pzlV`(R0EE?)wC;%nEn&+Yj>YC_Y!)U!_P ztKV76h`{vQp8>$N%1)D5!cyUD6egc~WeAZ;@@F+XHqaZ-Ly0}YtpoE;S)^r|Xn_O@ z&QqO753Nn6U#BX!(nf+(zy8<1eg{H|MWv!_$@4=>rL?S$gK_&_=+NtzB5v_Y8nQ`X zsCSK_0$Wa=TUD&kRp@(gQ+|1q1iiKBR9p)C@2QUQ_W}2|1vqG_^A!47n9RYdWl`@Y zYP80k7)1^NlUx8j0!*uYc$bac%1dVx3undY3}l|Hhsai%4Hh8B}VC7w*B zg|5}-vAmVTd0}C;^L1&fe)b?M%3!8iGOe30u@W^i4=J)0j;12e<*+ZXMaBI*kZSf@ z%fLXnlA|K{O^E|c(Vld&xnwx>+iLTqwU`$qZVp|FeV371E7Cy6_`Rr$m)T!9M8Anw zr$YbeL@ZW8B?6%yz(a1tQEmvolu}(Kc5(4O6J(?Je%7_9vV=TQKC$ukvdmSdsmITg zuf}!tU*2okxgLQ*#-(iM+Pml9oNn&;hmmKc3!8@^e&=R8!)Vabtj>kDo81osOdUdE zMJ&`JoVDj0bqmfH5;GCG#=IoA7l_%1fMTnX>c;1DU3r?V^A3!IgfC^>*2_~C3|3V( zjqo>s3^Pp0@u6CElAm9UWu(NS6HHQRu)$AOaN()$==s$kzA%KP6n}FCTh+5u+h@f( z$@lnDjTdkMZ|IR?kjScCdCzO3lRUfw=ys+&PZAh0FZiulPCtm49d=JX^9aI>TF}2x zV}{}^xc~mGqh)jdaNU4&qWAUx`k!kg;K*mGCk6JBHL*r&8&MoLP%q&mus6MZ4Ufm;)UmdO;Oc=k#ZuN@s zU=>g&A$=?I*#we~J*=tiY^j%s#f{c3Ts(%KWTsB3*l*mT`_

>gm4b?pUBcXS&@2&%yL(lH@fK zq0vfAE4u0O(@Ccy%rkerX>5$>OG_0B?&1Es{s0sZQm+1(A6}j_nYbv>%v>`6qpCB} zKXFEELN`|lqBg1IyL`042os(Q-Vli!c=q+A>F=-J4|P)7nNnIM%`0k#LHAR>99>BS<=Bx4gmFN6YV`R}S(pg{xU|^nNz;fAXd|>`%nqGV zs!T3S#%^N`wZsW;{12P9cOepf;n#}I%=(a~ic%jvulknY9NtI-i1aNV=SrrRxQp4H zR5g+emJ}FDLV(YP=65Ewg@8>EGmVyrN5cURV~oQd&Nh$~hW6)%Dr=${Rd21yeVNNP zqR$~QfHfxHBH+C&nOUsu-QQ~uKz9Lbn!|c9`VtS zl-T*|m0TIkAxZ~HSSza(h5z8RX4iU>_--%lbDilkhu;_POS#WmtgkzMzPLf`MR}_~ zUOTP+L|Nr9`_Mid)IF5pVt(9PTN+w@Y55MTT?B1$MJ^CQcFIQ*zoJeQMu$K&3 z&Q)sY;iRD~F_NzcOLlG@{4#6~NzaBi#`+lgn?wv%C&ohX-ZWe-QJ*~D=r;H+aNYz~ z=J{|QOfjJ%jwTS893~sSRh4>zDwmq;8W*|k+kJjAJ|lCq?e}l=1!9EtiuS`;_m}v) z?@FKMcZb03nGV+#MnT~Y*esfuM)g#$B>0zLU^Nt>O~~YBi`$fgZi~sDq`Fb=XDjsS} zOJoRF7!y@_lZubG1U3Yo=&oLs`7VmTJozCuFB& zp`wzxYZtb|dgijKFeCXM<#p@&zpcUpQ(@SrzouALH!DjRz30}2cfEEE#bIyZ0YbE_ zK0RuJ{RxHjzb5J2v-4XSiaDGw=_GSl>4Uzs!oFy!M^w&z^T;zJ=e^~n=@*PH4Ernz zz)RDot%*djlvhAoH%Oi;a}_C!xPJj~rl9OlDNiYuJ3rotHkvIg=y?6E_nZnYl8+^9 z`x6Ilm?kv_97P%c(M2_c^W6E^id8i(V)V2pH>ghEQr zMnX&4G4L=uBX7Wzo5#C^COPypi;M|AC1TJh6?X+M>4&kYSVZ!B|H@zXhe@^N2^O1U zNwj3k6TDSvX{LY48aVdKJ}=a?PTLR0r=SH~c)X*Ws;sxZ+_AZQ)(n;=T)f)nL*M76 zj4;0^{)1j|(h@RtL{wt-9QEDvB)zAb7_uO`ankO@@it^ zn1qkDj>ugYnh3@x!RBLiTf*=?%KJJuCsSQ$&;*;%3}Zf~5*#FD8-Ft{pjGMufVoGs zq93pD;XV=TCb(q1W8;!v-E6I`j~y0P3hi+qM^+~5A)SCAqBeb8akbqGQFLI&LN$j`JzYsAyj+3r;*01V?!>X5jVxU~Nnf*8^_r`>u% zoH_c;q-h`90PBLvSZK1J4Ghn=cL=^unA~kejMZ@J$qo1Dn<8|Gy?ivj><|F*|AdC{aYbJ77~aaGR(u{tfV#=)iE&NWasgcJiQWY1|DX3gpF!~N#9 z>iHC-_BU%$wIf<4lRfGIN{VQ<)sG2W>l21LRM|!Q#mWx%99NcIBlK1Jl==KKFt%w( z7HMRz*h)zEOdNsDL3%YfU#KB)y*!ab?0`FL} z+0h%z}OW*JC#t&i98wO! ze&!iP7+=PLi=`&eU$V^N=!|`0ssS#RF5Q#SJ~5w)=#zsC71cDR(aG$vNHdJ(;Uf_d zOXG@Fl`bP>nj46B6G1#m!o|d*{U-C+l?eG7HQZ_o4=Ckdr5@U()i?o?ZZFIUXwLBNtw|KM@`GgDsdfMn$N#JHzHUgzY_OJeeC zt>qJc-Gq^#B3@9Lr4mbux{8lZ zrw%J>m!V@9V;Psx3QccU#ezq%1F-es(KE_>)!3? zQ8Zf;RY=-+eSN5TTg?Ix*R`kpRpEm86VxvF%Hu2z)sBoc;QH0Y5FnbwL$#3_l)rZk zJHDX`6Y`4S$BkNj_jfE?d(Iv>v#k$xZ|UYuHtQ>JSeMtVQc?9w-agsV!7`|@@zLu{ z`-jZxlf5Sk-x9mfpN`LzfJoA?Zz55dd&C_kPa>Pg&HIPTM}ZCyD5@$DvMbNWi}}-s(gnZDI8$ez?-B+V3LK3Z1z$HCBv@#P~Ej&<_80v)APN7Ff~Y zI0BcKNxFZWhLeM7(@PlH>!Bi&*ne41nA4&ek({W&E|ZkKjnH{3Dj9iLjc3R?W+%A! zup9|0iPWH-NHjnrIol5y*+@`=;)H+=AzO#EXh8L zW*#N-HJ5SZhI9J-QW^IcNrrrL+;}`yG@?Hrd+~QRtOX~L8p75&vikBd&>x^=)#<@6 ztX7w_?!+jx9_Mr!&j1(ktSh_UT995#Cr6N_o`@sW{vUqsViBz!#C5{!b1 z39Mx3-{#K&CFy0Bzuxv=zq&&a7yRvEDxa-Xfzk*eAK!5_btjT|obgQ{ASz;ILp6gv z3trHyh9tK_NgG}GxE?R#@IK}K_OqT#+FLDnQ{$u=owGT^w6uST_A7l!L=V%z=Kzdb zmGrB{9Y@!1;Abknj8W9Ljqrl_pSv+bl};1C#QY!$H0Dg%d4_3krJ6BkCP+F=anWyO z+TfB*ssQTyuH^U&Wbw|-Vn$F9-=m6!avqE^Q5jzoacG+BY>)hGzD1$-zQIkz^JTb> zvLRjySmk>JPeii%&P9B{L^hP*Z&%$%TG-0+3#`m=QfSNwcYD#IDO;e zaIjFkt2HTz*MDt+0K-Q13cY1`L!l0m)@o!B{{u{4*6PwDvz`G*fU$xGMO5`7MOxT6 zegHtk9pOu(@FJvAQr<7GUlZ0tpTtODw8znwYdST`?L zPaizVEUtHef=DIR7IE3j&l)f2mvE(2O>dOXjn52LjgRQPMjSLtu-UAZ@Hf1%*{;~{ z)g;!oreU2O25M$2u6&G?0tOZL76rC-MQ0P$->P#yZ=C+DcX=^w8wvgF`i__ij`-~x zYg36o*s1(g0sSV~{=WMjMeSAsF))jotgn3Y7o<_Sj`Nc4Km1%_BC<=%;CTrDv@Nmz zEFd0&*D(l_gJ|~q=Tz|D#(1Mqq{nb2k}-44t&d4udTlT#M%;u6>}70YNGhC6uRE7D zROpm6S3^tdYn3e5ub+DHk^kga*z5A=BKVt4BLGiv3z-KXWHV~}XDp30qS7J9s_zYV z(Fz1o$xj1wofQ4!gW(y!!znNmc3+@p1STv;Xz<*$|Ff?;R75VnH}9bMYQbEFPE#y? zwMRE2ey-tCzR4gLPNiQ*U0?2up2U8b;aI$n3ko^_j-f}YXu8aDOFM2cx)&*VkHFGX zf(MVN8dAAiL{Ca4#F257?Su$T=@|M19S1NllaUMo{nJ}8Eu5h;y)VdA86;*e-n(Bp zqfYg6ZWnSTOioTZ9*;Uw*_OX0o-+agrhNc72G!24zTurJ(on>(&qjSUa4Svd#P4&X zQvq8a@5jr<(@R`xwj;6g;!xt4)#)(`KbO~6<}`h<$iFp~kNm?zFLmXmu3aS1++$q0 zqhF|Vt!k5)qMvvD`Yhy+*qVmYPYWEo>#w>nftPkKV?H# zs>WeAuNUW7Z4^b+AswQ^=*_~ehZ~{OK%EruF5H;B_+bQr>;L_Ow}D7$-ZBVQ6kr2W zhAE@dU_AP3?x|uoDea^m-glVz5xB}punfgMZ_AMdlK}>+ehSc3`s-(fj3M1fEsF8} z0J7;T(tebky4Tm`f~PUN@w{C9&WA+ArV#7i+B-><%!f7m=T=bl0OWZF3I zGjLya;hZj8b6{cog`9THxXt;9lq`Z0Pam<66gTh7kSz3j)m$(%o@Mqh;RniFoGfZa zE@v_31b@9EK4^$Hq(=^O)XG(yXE4bobopl^i}iPcB{sqRa_4E|KO>CWwF;EH6tvsZ zWc-#LjtoM(>^OW;0bwUsBF<*nt$)LcULQ1)zMLPt&fmeu0^nV@5*DH1i+1qi60~vl z#urBuA#FG5U785SF0Uw;lhiiB+y1TvGUfJ zBh+*GU|a|dT6PI{u|<2LcnPIlHPv7b>R<0;rRLSw%2A?=M7GX5M3hQl5$!TREh`5e zHOL%yuLZHH0G7d#bR;iMwpdz>G#n5o4J1UDz~hdwA~`ce1A4O*(3jCLElZP#iFtb> z)b(SJYbeUh#ByL$RO=Nz7`&HXKg{*o~QJ3Y&Q@brT^5nU1`Y+A|@m z?-JfOBRhQtg8y4BXvBNRm-fo)HH!*&KuMzC=dd%ehsW-xM8yT?#vvVd6-;dyph)y~ z$|du(!sZWW>{Q@CJ*L*dWN z>YX2nW*$SY|3i%2*&tEdMktVY#I&@AUOV}@iv>a``L-osH?GFzjR6d@QqlI60<_%? zS)tsgoX@Ey2;oEOl&IL7wfF5RhDZ@ezGLmV2?>_v5Z5D4^tS@e|~OvdA2X{32Ba`b1w1(jN<9!4%15^C7Q6*Wm?{Hr?qcj z`x^N&G5p>B=$BJUR(avz!fCG4LVNG7NIre@nfdi0y!ORTAhu2O{B;~o#ELfk<@J1Y z=i*5a82~?UJV@z&Y8b9cvTKDl$a|x}3Na*+1|`&+s%RZZayi z3-denK^72IlUSh_0k#$Y_0goC7PMpVRM4;Y1zBdXF2m_V>2S<(SQ;C_2CO zi?6pCBP-~!>^J8aNMxsq52QshkH%Z-jU3uMl{?q*DexZr>Pd~8Bq+6E%Rm6SXL2<# z9VCcOO#e+i8`X6;enEn7UbHH3&0p7Y4FC|Y-?Ybm`i#wB-=|U?VBeZ{#Pat&H7+8f z7-4d!$3||ZzXti_e3wS_YT!YZL#7FfWdNEg;}^c9?AZ2&ZejNy>z`ij_g)Xy4{HKx zUzVe{>)1PuOFt(!Z3loA)kPo$Kvg^{*Y)ool3r7F!v-Vy#<~r`U$GYU>gd1c74IvS z_w(=|yR)I;y{kuQK=UDS(&YvKK>j!hvmOiv9VW{J5do1xGbrY~Ceyt0>5nU4)b5)H z!&XejiS5LuY&YDa6Q9z+e^~9`__!Lw=U;O!W9@*zXxKa-9+d*E@V%}*lxyJka>rCc zh=C?=WJ7jNvhnrnITZi_SKvq&m=7<^%IMA;81SAB3AIyKEJT4|>2`2b@mP~(@#07{ z=lB8m9zx~1a>N_Y-k+s%`5# zMk+W@_Iana2HxR~Hnz!W8tY`kVy7zgp(YtMs^NTB!fM#7pQSg<&+0C}HCbQY)@+#W z(3MKxPnfC1N4mNQr!#iT5ns`aSUP?n(mJK1H6En$+*9AXrDRtsAAtU+KQ#Rjh?uM< zU1j^8(=52+#pqOyAQO)BB3(N`F`l+>3i9)xx=E#R!==Jz4Hp}JK+2~1`{Jw8j6gv) z8re+^E&^^XsY+TSaqu=1ZE}2l2M0KR-et6w&o|^ZVbT|}-!Fu2udgo#+miL=05B@U zMCh>H#xUbjFsOhhBZjm|L2M!zck#qg$5*-F$!@f_;lQRZ3@647^uU0)jYro0Tez60 z{?gO%yhgCcMNIzoX4~VJZ0%x)O0~hwMg!_4B^d?ID}&o0 zaalA*QW5&Tv3Ec?#r*GTWl)w%-@8=fdNwt@)v;B_2%%yB5w&mpIbEmnF1SQBx;3oy zKl!#NS8so?kip*B2LGY3G;QfJvZZnn$a8>e%zn8i2UlGnl`Td;|?d2mHNm^M|Ahf{_K9j#v zOaEvnB@s39~#U>59YPAHtn%*zm~Es{gx01I)T_J~_3N>A1>#bvI!WvA2ntcAEY z-&3}%@W*TOJ*Sc_AY+aZ?F#z}6b_sZ>GfPpNa6kuKj#4I z1L67q_`8?*qpKBdF}@EQ2YT^}Fiu7(b17+jYDu2^{osPb3peiWM?CIa`LcJBKN3|E zFX?rAI%R{CZw`yEVNg>NcL)*zI*-J#qCgP>7F3|CKft8&&N&K?tW?3aK{9^hUMc!5 z!vp{TmQw?BFziO6=f5QZOE9%sI?v@*KHuH^Y3DR5#3Cr@5unV_vKms00O%if)YCJ% z2(GL+7frD4LHzwWX`+wSq#otq3NOALLzp;DZc7nv=GEn zaap}mvDEKLL=|@|XDzOLgbO{s@ojxo#10mGo9-~0$FLE{^jT-Fr;=*iKTv|$}1F|t4bLtc7I@Za)vC3*Jm30|c za8O%-M#Y|&Qtk*oVr&fGkyW7OZ$S-SB zDxxSA#^5x8jL42)qEATZsYrP}6`s2G1903T=Sabt&q>6ZF+0gsRv?xI}3B`xI;Ya4wjJX2G**>F5A8#W6`LX8e=~!*LgsPxos!5FR1N+Zn3`af~1h zEPD}eo)G~*>siTY7ieT$Z3{oCB$!fy4ODpL7KH)clw!t(1PFlCdd2wNo;b*t)cO(S ztqho(^5azHX6;{va0l6UDYi>JPWcn-BM(?9%LbcXGAM`%nvpM;;(sidyaTbvx^Gr0 z4oyxFN=D{_)vUnJYNI1Cn#sQ!G*L*-pqaO0f9 z>o?tT#d?fKOlQ(&U=8aj93+5$B=@RQzoaM)GWtXN4j9iuR=Kr?=stkX2&lEHdS8dR zIs@-#S}EWhz_v`H(NCi_i7&P9U-_XlIY)F7(*3}h7R9CNks;^ZW2H}Qgv4~RDlRu{ zmFK3~7A9B)bv=_qR3WA?EW!eri{<_iq%y*A+bCdZ`4sHAJ{%DI;N$pqZYuEAd2n;P z?bedQr`|EzWxh||9!Wf)Cm~HxuatrEyXh2Kr_&oH>_j(}<)zF^6C(Yags8%z8^KPn zk^1)F61;g6QL80|C6EbhlTIrL+}rsmYV_d1iNt9iL~&vs09muH7Am#^~< z=dWH`q+MCQD`JFt$m$dxD%o4a6)QSUQa9G*qt%BW4YmOj8AVVc%F% zWhbi_y2JqRc=VV|0xcB$N%oq{@t}IM0BHvh%@_ z!hOuuDoUnAnjTPwO*z0evx7$d+`qoJJVY8=+8Yx;e+R*VLxwkI z`<<_A=8{AZvXUSSFC5@33(dv3{_3VpEG?bu!$Ey=^Ls;v_t9NNs6(U8^QLng-LXHP zO${DM$EDinv_Gc(8Tp*$xCY$=PC`ABVS6DBQba9r`i## z`XioPMn)ms4WQ~2O$#>z(Y|4sF+FK)20%&?L}*Yi}S zYE`(IE*+2f74(vvnyRe&>1C5I#Itwt_Td~pE$_wC*XZxWR@>C=M?#g);riFAdaBHQ zH(LYX%@BQjJ}%UUMDp}>C7@N1J7|MDVe5|(d#Gm%s z>>q6ScC4K44W%D?sQrz?0ghj=sNsq@Sfk{VHf+&btW?y37nj0nzlq~UDf6)9L9SF;$^SexhW#kFcl`(5Xqf%rdV2c59t8 zQ`)Reo!W8ooV8L9OWBIC;^Mx&no_(noM>fEpXiUK#Qv3A zg$tC^o?66>Tvalg;+mTsa)%zvdhb!DK^O99g=0 zPm0*(yB;D2=~p6OW6JK!5yOx^!L#AyYsr1d4o%&!_d1C!g(+V4mG5L3)})tnO7|+A zFRW&y=OKHXeAZ;qN+#?q9)CMfY|Gcnr^HUWmt)V&pHt%hM#nvS%)TDpuximO;w@NJ zbWB-}bkJq~`Wg^|UnlTA^dBrx*dP0Cp_TIa;4)`4J#{!0eVq;5HABkT;9wmOqrp}m?4y$X0#L~prM^HjVOEG#5u zXT~JEf^X_uF5H9btng^fwP9F4SjX{9y^&zjA>$lE;AFs0)Lx+fctKEc&h)}A$dElS z0SJ#v8{oVjEGm(uR3VkqXiuQ&%IeH1fa^a?cgNS^4rg!Qa@q$&Dlr&r{lS>F!oIV$ zn!zy}R)iOWnsdss%6Pw;fj@YfSKE7L&CusqL+~>YAj*;Daa}l!p(s3=iBmmd*cw3W zbTj{g4RIX1AfFF{|L*VPlnR8$Ft+mge&6Vv(iA^ zyt|abL+Pu+YA_r-g+&pUok*O}2Bw`>cvZv`4G;PYgGq1SV)DZHP^@DZ#puL~t}pK^ z;oq7WBW;AeF?76+2V+Jsu{1lBBL)*C*RVsrwbW|QsKB3Uj;8v&p+hkrowu{#`|sd{82#( znh-bY?pqm_S@a(jWa?9i+M3svN7gL$ynW+oFvSBLm_%z<>r?Hdr=4^QR~$`EYTPP& zFbr%6KvfbP3#_Ahk!>7PD#$9wNg4wk`fk1q7(>pW;St};(ddvX%7jK73NM@DmMP73d9F0?{c zldS*4&jkqB<1+OT${cSWQqgHdosZkk#8v}wle)sDZ3&L;lZ%BN@6lCbhSIUI%ZE9| zbKqLKyFgjsSx`t3<9+bfXZg*}vxE|26m?BBa@ZM!rO|n=W!EJ%)>rg9U#D}#06;bX z>U_&PnoFm&Z*_OKrdas>mmKyYwb}CeJUHH2Tx=P|G@YY_hdx=&Y-J`v30cTO&gDL_ z;4C#gZ;g{NhU@?MRI!YHB ztL9ULueMhR^;17yikJ}*rzxmXN)%LF(mLl{Q;0-OEs(!D>fwDb{8#79)sXzac z+8N!*UBd@sNsUtIq(I|R$cOf2#@`=-&0*RNwFLOBG%8aO`oGc`%K3cg`mK-3OI9Qm zcfRJd&l$dj8*?TPaB>_2);1otUM)_P$w>ux@IbocpVSgFnQV+mf!bsJx>|tlW(zJAc*E&5}Ig+6Gs?yZAP^rE0D=6#3Hdtsm}yOICF9s&Pge- zcqky2XGUE%x?BkaxJUkG-dO%=^WY*bXtZ}v>4)_YUG7<-mb37}Ic5>5m>GLMWklZgejU}UAhj27UTVsiGgWS}O{8GSft1JZ1h*;rY zmPWT0{VqTI&0g>Id`0|SBw*D^S5FLr!7+$mP?2VWMLn9YW5o&@f2L5mW!hYWM+uUX zs6__%d1;&^r*8nC5%BzuOp=jX;@LhVtlJf{a)Ex5UR}SRuo~8~VnE|IQ-<2Otuhn3 zub8v2tlO)1NIBO8s+35s_=oeMrQ^lt*hG=3ix!kOyvS5CQF3Fm@XPATk5ftotKO4+ zQTz)900fDH0ZFNyF1@z@H0){k$5MhN8c)Ak9yNc+PhBAxE=X==azl4e$Ha*#43bbW z#Fb_T`9CVaO*v@P%ogBdU>5VTe0+Z*;hLVatQJ2_(^?Zm#;ZupR1!P#O0~V@R+(_D z+Ucp(YE3xgw{vaB+kILh;hLJcu=!*5Y-IKg76>HRAVx$w=CBncpF78`uWS28%sv(k zYhN~qma}Tif=mJYaG#!AIdi{|%J}G-28t+JgtU+7B$JccPk}KjxRd0fB=8I#|0<;r zO@f2Qic<_2#W(pIZ!2%?B06UUdZD*8NF_6zzdoJk0%*yX$jWH#?41MmXRP*kmGZa7 z6bbSNW!_sAQ{m|BwUYjB3~x5uSF!lD#sAlfHpyd_Lfnc8vzPQbV`DMwai2$r4i(^1 z6rY?G`2GL>odf@10}eSG9#L?hyjG7{V4>nP!pG{j_jjIC%+11L+(S#F!?xg&dl+$v$K~#jFzNkazt& zD=n%eV<_LrL_N#}YGG7Sangn3#x1SGsws1G)f1JKfxpz^HSyoJ9R2_XWk9eabs&2Z zW@S8SMsvRMNUMkj8g54l5UZ?M%jnz!vtUbKx6I#@U8IVX0CEkN)Da^v`7j+_OLj72 ztU$JcG;C8rp5F7i%tV=}){j<fT|MZ8ZC;q?^r>SR2f`L$^jvTX~dK^S#w(5E}p>{m_zoQW@?j6x?QTB$4 zGyLa>7d&yDx+rSg7VBwMjhK(AfFKkyN!30xh7YL7I?DJiR!#k`EseR(Av+H~I!s$T z;KJH-xEf*b8|Q$N>ik)_XuVQo(`yefN5Q1T@!s`jp4L|>C=jd%EEOhqPVik1?P>=h zQwjEeRMj%e-=$gjSGnb;o@#p8^QsckZUsM@YDx%Aupq{Q9Ry8(k`kRIh%4@>jPh;e z8WnvGWGq0*JC#+fk#*a!yUL>Rr^JTAOECe0#6xjd6A?-{r#(uD3y|asga%gIWAO_; zeT*_|L?iXFp=?y1^d#@9Rw5xNb2@gNd8L8->ZjT$HI?#7A37Ny^jF2sQ}FxK$BNqX zu^-dvm}F<&%oB<~n;-`_5=>pRaU7cpHm6+(Pu}O{X1>LHO&H7&UH|jTW06StH!zM? zs;@`;&&OKQ_!roi3IJ?lz6^qNq`B~YQe5mnAxr`-@YA=hi=*z#*U}|_ts3EqGJ-gX z&CmfoEt4Mv<#WZ<(wWNBNFhiI$7zavyPr(CB|l+fK%Qa|3F%pA43YTCBW3}-Tq?fC zB_>>UaFAmGd%R3(N$utPBw@{iNrV4 zS$Gix!%fbHUGV?wUzb3Ha;oWb6fQureMXFxNu#{3t3X+?3%n?WnO1S^_#V75R}e9I$mDf{Ths+Rl)7!+x? z#62Ux{$$8@Qt!|HFF+-9f9ID$CyPM#Ln?qKFTn@?=9^2^ECaN1r35^QmdcbsALHix zy$3pm(Euu!e)sHuA5V7e8V&3oy2E;ZiAW~DgFQtZ_qL@B+%+(xs`s0UZzSLlB}o1Y z^|kOarRjW9JWnb*p*!^E^_z+H%dA2s#G@gK-Veyd`J@n8_^xhCPD}N_Gma{)0a9c> z7qVe?qeSPX#0ayDN58j`rad7FT4*TvW1iH;JVGOis+MhT`)=<(efTelm?I&ozp*S* zh)l#J%83l8p{xKx$$oSq8L}D-m;M806mffJ&X@fo(rSvuI`bm)^lam4UKd7nL`0bq zz@y-v;9Fq<5c|V3>Ka(}&L_%~FnjomiBqPeTk;Ugqxl=UpS(~&imd5%_!00LJAg!14JF}8-j49 zSO{7|VVdG}ce!Osi)MFL7Ohst`F2?r&W!|fcBd+lNM_`I=NTT zC32~UAE#A$=W(p%%U7#M#%g5b4GI26mc}smmiSi2Zg`A1Icb~$*G<)t~t9b zTmWb&-OR`a8#i#T{8xYO0Kguz$whJU!mkG;Jx2TaL>+51ERgmA+l&&D9vZet;y6NN zG`3HYIe{~rW1BeG>Sn}<6u1+wAfzZ*q}@yEScJa`(mSZMt_D1T9xx$7YXDn!6Q4Ip zt(`KNNp8F}!7`2-(Fi5qVY`j~uaH8VLT5FK=~2?;8u#guVC)X>qt)qa`OHh@>vigH z6E(jHkg4E+XIf_4Z`FV0m!Aiu;Q>Y5Jn)8K8ONR)R1wKFLI2RZK5WA9YadB=*hz^~ zAQtKT$DbI;b!u0KG;jr)4+o^~1jW_9^_sG{B>FWaOp!aDEWXheT!ruxP`!b0?ogdB zbBk?LUi=TAzLU#Nu3y%v6q|#_0_J(K7h@^7J`?|5v?g0&^)spQXl8bP=(O20ZU0&M z_8X%_4uCTIow*jND7I33U_;IFfEe0or~spqOC@rJc>=@fs5yn&1hsXYorsv>3Pn7r zCGmUSI%wgeQ__iclN{C3k>u{ld1p`Tlb3;XMWWGe^+g#wzlp}*0$!ppK}E0`6OHOx ze5cr16cc9M$87P!fidT_TurwU|G`tPrGqjlLM>+iu=OaCwfUzuAvm&Lc%8=tAVk4; z^G25zd!>m#>f{DbuLN#oJTBr#dt|G_YyYPA>LfIfx=BQ)3^dW#yD5Hn6maEnxyt{u zIYSnv!A_FVp*-{DV;)mehV50A?4$k<50IIa*~CnZ+hQHp@5_H(tG?>2>$NsLk5v|) zQU&M%hV_5}{4V&2C=leo`ZFDD%Evd^WJ?YX6aS%Z6;z5HGs$^LtR9Jzf28#`e@5HA zU?bSQO#hrwm;43Ib~@$RIfFO0zRzK4%Zqa+YY`O|L2D127>Ua*9btH{$P@g4fn>_4 zZ<6`Dz2`aP$W$aA!;+awcm5mhVjoeoPe7Ibz0jqdK5wfseeXtnE!r&(w4FB@*c$(G4-KG99l^k~ z85S7~Ji@I=+iFvxCOKX3_cb{bie1TA4M41y9oq1YzGQZWjx?tqPMpj3o-8hz%`8OK zDA*=Cc9x-1oao$JaJhX{pgw|reyL0N-9_>G@Ty&=F?;=5@vpOyh@+@y>ngve{3n1h zQ%Eo>+n=-Lr-3!LH2yIsO=tSA3aB)`{Wt3OoqSL?>0XWdzjJErEf8sl-j@hOh3b2xa9M@%6%Vh3?eE3 z?xMYjS$i6d9I%w?u$p9rnEpTf%mxBEC}k!+jg^CFwS=t>3bENRSZa#hlztG?>b>Ph zg`j$|$eW1>UdI@;i;DBeC9(N0=~J)>kM{ZBl!H<{DMhJ=ED%3jOL9BSNj&Bi<`-fq z$S0#1Bd^MUvgivTidF00!Z=>QFyfM+t9Q^_B#}&ca@Qnl8>lkvlv+L8dH|&g^)Taq zyH$cv(U3lYd_x6W0U7R+W30yj@(C{%SM-H!Tn6@nTv)xPnvLE&}Xk>TV;< z#bary+s(c$nsTq7^UfEN<$#iYuL~}K|8aB{Y;Cqn5DxC{TATzc?(SZ^xI^*cP6-~g zxVyW%ySsaFcM23J zl367)?R`536b&Q>-cl;6o2B#Zr?xDo7c6xLK9wyupWF@&UBCCd8H2^eWr6Ld@B#ML zW+;g*bOIEy?)!4VF>j1qC|JVKWGYsO!f~e#^<@i^lQupyWUxD(PP$rYJLFP!N%SzB z<>|AgP;+r1IN3M~s@6n8q_Rz+K1J}H>dOe`N@z^tP!a?QYE_&rda@Xw&1+mRqH6vj(&J)p!9$lU zhnm^x+QUdHvHhFs*W4}uoJlVtrMW1$*BG#a;#4b%Hr}Ws6K1iJZh`Uo2^pQYZ{2hW zQs3O%yH1KrRZ63?pjMa=DYL^{CD`M-QC+n$b34MLQI)2}qzY9aLEkh(R!n3jO6++_ zH-^%KHC{0Lb3X2}KWZC1v8uNHUpLyHlboAh+C$6}kk!$<9rue2cosLy{8Z`{ZuB> z+Xp2nh!u;oXqw;9UqwV--~E1_K-o0Y6eHW!+81~q46gHfL0}CWA=cz+>;odE5W!jO znJBOIumRD zmmltSyK7m-rW094Q+3`z7EsxHjf{M~do-H$!BLTM^Ck*eW4r4aUlSdF3ya9~>K$U4 z2k9jt6%6t>f@pm8OkUPFs=^=x)1y&RJ;ppVyW@X@j9tr0ryZGpqdf9}V>2%))8~t>6q??7r1nBjQAGEFf0G#=L$7=Kxz_4jx5%x$lB){q9P&X6Srb*Q}`3< zVMt;6=(=82{NKEJ=uo)OI0l|Q2XgP+K}FbXYlj4nF8G7vuJ8O6688}EHYj+p@mQq( z-9t-RMm0~-_ls8&d06vQj1P@=BL^;FW zL6T1msSwTlEWz~Vvl6{=(6MTORT0_vqA>>K^%Wk^vPjd6@`Bo^#vQ7y%9fO^8!5OocA<=R{L#>KpR5^!1DSa*)v!}K(d zVtj)M571f?PgkeJzawIUac;e5DaNZx?#;_0HvUd60>96uu4OKCbVJy9w7@5B8VX?* z6N5uhJx3<129mofla0w_h$D4}ITXCR54|n=zn65p|1f%=h0cuy0A?w2lqy_YDA5Aq zFt=k*U9iIrzZV*IxB{7Q$%^T{FL==6v!!0=6cVL?^t5b+Rz09?`lqOhr|to%i-;Ip z_LuGhrH>FAiz4WqOA^T(Hy+Od-9FVM!elcN^|Fp6hhH|x5}G>6^9h)-=?sN5MB!=- z(A)?}SlNnb$c_EPMda8U#Xjdl7`YAA;q=6qh|m;hl+xf`WK)gA01Ztx z+u;^T@^XRN4T0}+*^Z_T3WEk+KC1(Jx+xSCjAQ)F6IHt` zW)fdwo|5~AO^e=?;E)FR&C4my^^qfcYA^+|EFHlHV#2L(zLF^OXoIwzZ9IDOdoVUU zc;i81egMEw4b)2P3BQ@99APSx8etB(s6 ziCF+~%6dhF<9q(6Bv8#cMV`jiOw0QfEIkbn7BY{Pi&QL`#ycEmEIVClHYc0lFa?W% z*1BL$v`Pu%^_V1)-zeLMNkm0JtXSj$F9!D;pZBGAA<-GpM!E8OP}#jw93{6%52Gra z^GKt<%w<@rl~;wsr-?p<1NO@dKzQ0nFA!60xY+P4OEMKEBlC6suhKjr6Uf? zjnLz9`qEd-fa=atSW(!YsIlA>ZAV@yX3h2=KaXK>-YOFtie$YugX?I51NC@)X^d!u z-hEARmLK~s-VPKpA_8h_1!T@T;yuD|INT@8Pq?tyKc_aFrzZb!Sow+n#fQpDj!;6DFJ(e%qJ7CcGUbnI0h@D zWg(mBwXEv=A*5&@im%##r$EeRVl>qFFOwU{;hQafrG^zgkH7nw6daac$O(O|{Ig@4 z`p7)QnRqXY|P3X3ZRk^QDA%*#WaRy6fV)|bdEbrL&W@p6TF8mwYXs#oOM$X z^ZV&5`pAOe2Ax<_<_sAVMrL@9NAPfG&qfM>!;}VT$2?V7s>4)6FaomyGvSWOGrM3j zXntFI>izIf1Nir z(x{h>2(*Z4Ulh`?04SS)#t}Bmp(~GaqM)mi&RBGMYNhT>2-M8J`@osY< zu>-u3B*+41b)6rn&y(=M)TE<*Z~2#Ah{d!<NxPf8WJSp14_t*vT+1!V}lQ7vJ3CSd*gD*(|GIpcL*AxJ;2TaKhK?rZ-!K zU#x5a>yTCiOyh*VG>s#x1V<-f1F&^n7)`nRX`RAP__i6YNTFAXqNN$<*EOAQX-Td9 z6)v<|wTmc{46&#;4%O;?m?sa3v{!r0`-2mLkn<>zDDBdnbz0-kRRV)G?B~)EnuANN}kHSn;^EM;AU-;A_7CGwuOJsxB?>Z%uHYW68_yZj-LHv{`WgQ74`utLDzBX)L^tN>FKLH30CX*3}BY8 z#g1aR7ZnNhqXL6OOW7iza>-Tt4G$SrP304~KGuMDwXUMz3=gMWLUv?jGJaw@5}bt2 z+MS3$7VP4#JZm^K(WOKxl$_yuDZVi8~c;Dve7bUxiIaXgx=RLrMZ_vG=! zZN#>vS`~VEZxFftRAz7a*S)8r)o1GZ-}cL^Wk-qXA4vu{0JbP>Vn&ku5a96>M3Sf2 zYuHq}LSWUyRy;-^yXOeUv})acK|Q9R7}3(y#$S@v@jrfUz_4xtN&+CV_-~eJ7lH*1 z1Y|29rIU%VX0#H{+vHw;cO+WlGuUI5MS!*{oy)bZ)^VZq69H|9?s?Uswu25DGNY^7w3FfDR6i2rh0|)(G>C zl@PN@d}xVTV6rNLeZ2^jhSSLbihVXWoYq%Hm41ul+l4CTfa-j0qESqam!Kw+0cWn*uEwxp8A(_d*eh1T2mg5#e1SdW&Ay+UW7$H3d72Ysrryabt_ob9YX453^l~U`&FPwa_KAi(Yz0d&oBlWgIW=lDA4U6G^ zq*qiy9XS1!CYgZX=z3~mQT{qfO+iE`EDCbcRac@!8%#cl*%&Njwmp4>aPkQ~=Jxrt z++4Ik3fb%!d}q^sFdDlXe`!t9V0=w=R7?}n67ZNhZn{wtv>7xWfIAs;pz?0fp4>7= zvX$cI^)DuxJK|fB(dC9_8(}iyQD?ijD(oMv_DHit)uGj1Kra+*Vxn4Br2yLj)5cqhh)+nUiTKNh!J7wygbmEkR9)eEub?^zB2Cxi^)V{i00;x zg=KY7ij^ZUSgfnM66+y)+kATWcz<9}9)S5&zCuDWx%%OVaMNIofr3>Mxaql+8vN&{ zV{gzTAK;)gm=Mhmc}S=q7<*MaN~19~n)v6DcKR|j8C>uq!eeLNEdFGg=qd%t`;Ak{)F4936x*b z%;g0UGQdQK>;`f%x=F>SIdXvf3yh}HPi1%O&0S5RRjfk@%(M?Ftam3ECD2yo&TR0o zn#N_~91$#-U_phlE}|Dn}^kC7s~5+ji7NsqNz~mdb#0=lspQx`nh6M z^qi5*h$zgHERWp;!pj+~B+llppN+*%+p3z6jN1F}frq;mTfZw~NQ(h@P3THMV(A+Z zf?q0!#^3-)E%wgas9wf{@+!NQ+1Hqt2pq=Tz3o6m4d?N~LOXYZ?b>$SJ>l2_cGc9& zXu&K*WH`j7)lt8;(lFNEzHxx0gf@uy{+pV8jd;jJgkFxh4h>v$q#b?FjEn*2;qb)X zzm?P6BxYDv09a}#2{%s4YL{3&cQGC$L4~HfVM^YAWrsx?{DDO&ZP3inkPBJui>d& zy)D3D*J^wlNmFIZGdi_j46)SFv}%&3ASEN&U}J%!()+bkC)r_1dw7VkAS@gOt}7RI zFif#_X{lcjmCZajjhRdT!L8`2{(SKTjz?9GAH>^`i^&SW^jJg&<{`JuLh9gRT#lvn zxfxiNFBJ#+!v>EF0mjChnBp6-o#0&3`c0h9mi^os&O&1i&xp^##Q!D=XOLJ4yMtL@ zppRmwgVN;XG_Z(fcwOCCk+UoKlqV6221qPDB^sP%=MB^ zYqJO}K$k5LsbsAWG^;D3ZHyU<3?m`T^HO7^WgPp(=<$BNrtqHgc76Ul6KceqrZ&qg zl=+#_x929{@e+1QG6?`Mg~mboM2GmpI9$V**pPeKcvqT`N!QAfgVi_#>>DRmN5P6a z@L8MCSCdz;C^;D*8`Sra_e~#b?pLD|3A7hDxy%#HBWe=Oj~f1?ESpkkk8XX;QJc^e z(K?^keZ99JYzef(GMG}&}=0nxt z`<*&<&Uqq-IIZTx6Q2z!T6K=<-*RjKZr1>3GJCrS2z}ry@??6~!GNOO8MCDx*eKJ8?m)xaJqcnXJj-!8F zI2S3{Rr%YWXKpOO8VWetmkxo#pO{!y=1#7-_0#2#2pHbCd-gvd~E9& zYHcu4a&qN0!(CFLhsX^1k5j0(uu27jS%^)^&0D75k#_uIgghcc!Nddh3HOy=b0r zOHzc$m%Z{QCRexA-1!4)p6tz!-q#vtxT2qm;xU~>BcVD6B%)MVkppO| z%Jm8o1Z+r@dXes|Rtly?tyy60#|PYN)bIYmPS%Y#2u}F+h@oQoAJB%LjZG8#E3x z^K|+zyM0xyzuSf!4JjEq*^I!iy5G{egk4KXJ}ZAqReC5qg@T{j%RPRMYJ*}Uoyd(L z>qXX3@k*Ur8wRgoA5-rr?e!2l@u zmo2!?Bi3UrE^j0_-D$$Yc0DJA(ItVAwr8>4<3`G-?yc%v5VuF7T4v1Bnkt6k0Vr8J|5-6Jm zlbw_7##{DuHqsz44i?pTt+xK zrYl`rrUgtr&g3o~i}CziW72U~C~~4!R!ugO?xF&&M|QU-k{Ka=cm z^u{g`&*i5swFuQT-U;Xo>ygg9^OaKc)Dh5H=WnBQpX_#r_o@Wuvhw;IS_8>5z{de+ zR4->-sfS<~8JiLsn?UL9I1(WwP+D|utCGXw7zSzw?9*$JWvO?vh>O1v(=8OPcA)~hIug;#XgSox1C=Z3> ztaDfvSbrBxrhc|J-R}-94Q~en^o^Mj2Lly^_ZJf}6EeM730;kmhC2kva1-GqlcZEZ zEZOcdEM%D2)KTn+aDmlr)+(G{#&h2}6s;IUpNZ?c+-p=sloMl#pNQ&fBmOKk&gN2Q zpT@GI=C!WGM1WT+WxC*_h%33G#E|Z6<;56E;>}e5p%}PLD+vi7p^DnO zpQekg&XA6R`orT}&#r3Z~#a@x0Ly^?~v(zC_&*QORVtPb~jqzC%{IN|?Ra%xj`vx;cTOWr*mlHB&UQg7aB1#)1 zD9IxUuLC3ey8;O7l4zLQ1VAMWHwPFY>+#chAIfXx#hh%OBUW;lD8&`1SpC_NX$fON zl@a7zw{QC;eE8i=?o*uy;% zJ32|bSe6TcN7nPiKR3^TZ``P!uU((_pHsfLd%Pxy%c0>2Iug1_g}@e&;)A}fyi8#( z?v-$#D7|0285QpA$^dcy(>dAS1$%qxWXS*EFSu?^C9(lr+S*;-`v!`UjQk#8w%T6HjdFxx@R0fs^TE_%^(e#>Kw5(`jiS}XO03l93um;my zL8=d=47_MuvXhFNY&N7VEh~hpN|fC5*J^q$O}QWZub&ACT{)U8$1z`)hGfBNN+8ai zVH4N+wHCyo+El57I~I9aWkrkW#Oce36#ek8`UH6R2kYVTDy85%99^u?|LV^=7}oW( z;=*UnTpKv=FKkC4Sei>Bl{6Xwb-0$%!RcV{AF@J^@`U0J*Q@6SQx6U!NjAi%$l(-{ zg*%euO4W|4aZ>wKHCBZ}4;;!DYA%1bub1ALsP($9+r}vXDAKe9pV`Dv&kF>rm;)yi zO^oAAeu{=BTC0blahjn%J6d5jNWe=mhG7=QPkx`+0g~0GThmCz!vVtrO5+0?9`v}g zl&m2MFn3g;U~A57bH0RN7^q%RS;4esIdK+*CxK6RIa%$#oVd{r!}kaw5K{YHDBM9> z$Vcw(qU_Xa1G?`TB%M#hP)|o;@%{UE^DBM{_tlg#lt-6qRPH|ylYX$YJU4rL2!8D1 zZJiLAJd%zT9v=5@g)Zu_U;Z^XBC{#X0|&Yi?*8w;3W??UffIuC{3?{tRW^6yk0cKf>J@eG07{6^Rf~I+GYD3hoW7 z{Z-5YM4p`$guN7>U=0Ip7HcOY?t|63;tsxe_K*B-RuLv}g=nA8q?t^^iC*$ojky2b&mZ$2 z%N{@Z3PrNnV5?^*i;%)3sSv4>B0QuVY|Es?bZ`+f5z@q#VWOU&Be=E-UX!)t;DT%* zPSKyiLpBrV>hIX~uRks-(DbM+CYl}ZQ?@at!CnBDSgFsVBBc`t8!I7zAlMVSR z-zU85`OGrBifp_;%wA6}0eC-78R4YIq6*w3{TWtJbMbKYb?bCA&TZ}~G~0MBV|7#P zp|l?UQTCoMNFX~KCP5W*nLd)^U?*vNr(b53PWxvJ+ZDGr1R{qH!AKLsAYx+421jKy zzUZcJdX(RjDba*VY+yeH=vt#4d+QEEl_|Q#;y%p{=D`C1tQ&e%a1hR;@y$F@JJA-{ zBq`50F?0zj!Ci7>JX+NKDaZLJ=+{zJ&jBb&rx1m_^`4ICCiO}?3!yrE&>Eb=k7u-* zRkM%+*=!2%M4HK+=?J6tF;8XT-gF(BXS??P_I8B+b$iZ7Fg-tVtSBusbS!f2CJlH6 ztC;(k3fU-L9*^x1F3Yb{^NRdtdcC9cZ*yITtw-=#8b0tCt1R%efkmTwhO)uR60QEI zE&Y(d=eGa+dT%DpPF~KGKe?rPly3Coms0Ala%IDxj5ayCFjn06k_8oUCd(@~^O*6@ zbKh!;XBtr@pmy1Af)bQWhSko#{FMC4I`JeRMk2S0WjEjdS!F%2P_M=7$Izu3e|q_U z{EPt12G~_pkH`S_v^uR8NAeiqPEsXNUnvLij@g1^V%^PUb$m-M4iB$%4cR4g#{ab8 z8WN>wj&qM_C@gCN&a}qMbx|`_>Nc{s#RO$Lo|DezvV1(df4RJ3GwjiRZ+al##~1o< zjtGDos>5e~87Nb47z1fTJU7i66MvU&zc=HllWQ`;g*@a0BhaC1T$V(T*|(DLVq%bt z+KjpuUO7aW4F%~~+O`Wsu(TJRoOsxeW|haRsS0TdZbeL7yfwm_jWN~dSteH+cw_NQ zq@c$r!#Ni#1(LWA@(?&}SJ_t~1%E6~K;!dp0O&ODc*-Ax23X&r!pO3dH!@wbJdx}Z z89|z7_m6fDaPXtkduqx*w@W4bBmScOm3=#2A?K~gMPoNxYy9RkyE6RP|0Rds<#WI)>jaoiab3s3 zOe@iS_Cw1AJBEX%5ixi<=abCDI4##*jJ%-e{-q)r!XJ#KO4T0j`F2qvE&gv60E-S_uBNV%^s z(r7)FRbCgUrK|~@yfdZtN1*xqNOBeha37YgC8#t;`YqK`nI97>!Rr<{M|^$wYkoNk zZZysJu_ouYxj~N-HEF|@%e$lg2?wsP+L3Z*LLb~^W=L&RDr9lE=iHnMO=9l3k=s&c z{qOyA9w>1yJ*|#}hZz+^wEm&r!B83CQvArjz`#2~`lvsWtQwlb@+2^IY6GJ~fjfKY z+8>ja!7F*xW+?N)f^~-}+u3CJrge8;vh9StISJ| zvH03_A(sIO>$bM;^jLnVSl!Q7DzDWFMIF6Bb;xLRvF^?n%YHn2XfhwQSKTr3dtKCC z6!%{N?LZIG!s@*aO~)|{Y<2yc!OB;Z(Nhjz^A+ta7()1Mn?A;_#|D|kUNCg z5-2EzTV0F1;VB2*_dWyuke#y+%utO4dLF9W!cde2oYNK5WR0O{C<7^CR}4ED$dX1& ze4ycEQ$%9=_jOF+wWt&o`|^)=feiXiQCcF`M116@b#+xjo^7J3H0H57d3rk(n7zpw z*2|ED{F3nqE^6m5>?KkEHWHKNiz4Sy`-N`-zN!2*f6v%!KfV<>3F~68Q6wA$*nRPH zBQ=S>5e8VC?amjCj)urDn`$ zKcxMSpWmUdazY9XAiP>X$uermatTs zZM7CH=awGVH@-h_XQ}dApZ~ge?t`Wn6*NwX&;3Z+Do~?%gvE1ObVr0Gq>#Zfug}or zAeC-NuLF6=Tv%y3agVXHmFV2k7v$PfVyC$YIfWUygY(}oU6X>cu)bi_Z z1r@p0*J!rP4cI$RXh{e*7I;Tfbu@+{tB%fKrW9iZyIs1ye8Z%fH)!BJ6M;I*rVDFX zoD!5o0Bj&^go6R8F%`7nrMD%ts%ttIxOKqIDg=tW6-#}9?Zl@|%0>f1cdeoN^QC~y9BLlN8i?E9_vlSK?5hWPRaP*{f(|P&!<-G&nU_N7dyXqp0 z6aL`Wv40THo<(a+c+0={8vo)ZrPAG-{^?c`XiG&iJ@N{bqJc?w4u# zbDFdKlp&J2KK+^gOWhk8tqJ^}y)(n5<@*?aHwd+xD;4n&kTN9F``{5bH+#N#EnK-9 zev11%{C%sz6j##0YYs8DC691bb<(HS-#@3tf*RlK{Ev>?)!SHN@W?~*hkyGU7{IEb z5F$`_L-Y-Ci|4l!xJ>N*y>!JaOhZr)YO&ILOcy^#I(NTS<3hV>A2{D)Wj3iQt zov2A8j1A43(`r^D4S-CixMC&e42jzZaNO%$Z)aLi))6QbW=>@EwIr2oB8) zc1$2ZGGd)0np`rn$u%VL#5n9esh2-xLns*l6#sQ7DdiYS$2dCWb5njSb67|GH1%Fr z%sJ@tu1un+dio>y_+kamv%x$TN9YjMa;`^8)^-W`>HtY}>tqJamUWRj zw?~tw>_RL;c2=`krt!S9Lwd2KzF^$)+);D)?Y9oyw+asL?;3ezN(~%*9#xkDii${+WcL1X z?LCB1w~ECR8E09tph(~b)3oDw#^pWex~+;9foZnXnA@!Pk2)o)kHjPQ<*(%jy|||H zXt{}^b4Ej@BHiCjZyC4XS+OyMAaje6EE+y)==?ctSAd_S4m~RTR8|D4H;Ih&H}&2z z5}P=Q0td}o$umhui+%vU0(6{thK<)DexEisJK<5?juaJh=V%PjP{ zEY@QO!(snP4X2K75X@SyCj8j{dgCysN0h~uAan}-d9Fz%><{cmY~8aQD5UW-AnWCaG|m!;YCe{&yOOGqPdCjh__Dp`D5M! zDT8w-LmxNxKYm_92MgU4)S2*D!y!m&)+hRykU1tM8h6##1lqBbG*;{gsgLKwrLa;E zri#Y+WEvwaw}~j0bfl_8O`d!_4g|AF`;Sh`dX-ag-qX^@NEeZTe$N-C-3;1U(gIE= zb5PLzu36_R84=d{9cNn{k0A#rZm?)Ps;O~4EmR39d7l})BX8822=j&f;;dFjGC!(V z5KFTZzYbGflVis=W{I?U7td-2Jt_L3`$|Cjk(9((#rwXd2xTfYty6D4At+`7hkw*s zQEJc-UtIf>{^H+UCo)&jnO5ZiA@4}1 z$MckwzL8HZD5e7eZ=^2fI!<(Nc-(X*H4?GHM+F+##e{knwRfg;F}!;R0Ln9-ZnFB% zyQ*`I4>X6SX;REF2R%k_n*+bzU*E44=iJKp_`Baf{q?FfX)8ZbIRW#3XJ>l~(#nyq zMl@VSAWC7T1;3Qg1?r=-^`jO2KPfPLXRMhg27(`5poP*mI{6ATq>tcup(?q zfeJKs+7jG>yC3|RKZTm(2~2drlf^=Y%CtW8rz+;x42m?a{q%W7AO1h7DhTQ*+R;Dn zd}l&+T$hpYOjN35^n@v#ENX4MqeKR+1#_~7bDjgtyKMdog9iXSXH~d#1&K31JP3Nz zP|yI(CsR!9aE-j7dv>Tw`(vLPxEJ*a=IAC&f6erMuG`9VJHi3qk)p9jz)uq26xvut z5XE|$2eT6tB32AcZV)KPNU09xocCC}Z0l?*>ncZ~v>KR(q~MCjFLAcW4SR68k9^0f z?0rO!8ts|$lLj2`yuy4S1aQM)X)k6sjm%9-Ld;-3kt`6Qd{xLHRJL>_ zrJk0E-36;a#m5c^vTD^AHA2O)Rk34@DkFxj_+o!KH--WrZ&z19*%$w(fXVEtCR%&y zO1Z(R_`?kJ#{*fLb_m{rm2wx>-d`C+6Dh*LmRA)2elRBR@=*NpE&NE~vE8>+LO<-> zphChUtASQ^Zo`)Hzxwk8FsI@gPhjH0j3^guKeF_se27 ztn2MXi1=h{Hf76I&Q;#FqdBEx=4J_};9~8Sm zzS}4T$HcB!rgI)#=+3eUzhECyN|GGp8T@iRw^_XQ3$iY^-%NUBhRxj_wB0uFTK0s0 zsUGMz3*7Ny0GR7XR{8eDflT>y3zCvIOc#`rit!2SHbwX)GDs4HB|?a^dJluh4eo4` zfsWG$zru^1l8JtLF;S|xiGC&FLWe~TD`gn2A;scCD}-3s?zv+vp4^Ab6Ei>&&mwP> zP{o<5NNR_OQ-7f)DJpms4@18V_^{%gY`Uo za3|w`x1dnF5snaiyFC3-(BxdFSeCY82R70xxwT%ZHPN#~^5ewX#+L=!m00qRL z#O5bTS;_gLdPEGQ#2Av=BM-|MshOnNU%&IKtW|+)Dq~hY1TJsaXPs8BH+I# z(%gcfN*3W85ySh!j<&h=ZZ*Zt{MkKp{vC(xg%}h6&*&;mY?W@nLT1 z*~^-7gAUQ`!g8tXCT1D5F ze*V2=edLf@*&_o#Fi+?>Q|>{bKt*s6(R5P=k~ObWuyB0Q%I9DkQT;Vf;!{We-LzZmYeU=~?|kl-bigWy#NRs5e-c zo84;Ls#3;66U*-Eb?=Sm`v?Bx=U)s>J-ZwVuiQ<<@F}Ik2pN>!BS#+ zYz#6a{wy(E_qw=GCQaEfT=(it6QuZIwq2uMi#68qv+!sdRCpV&t>IOp`WaqnmS_)L$YEEoX&J2>nwsuW|vm(XNQ0FVhJ54>DSNrE0CiC6yo zXW%x*F(N*VKjofm)RPYfhE6d)EOB>}!3f*a9X;y!_UlgmwZiSu!-;pxNtlMYT9zAo z7Jm05kNQIQ#lx3J+dKC5dBRmIax^~Drt|B?(ovuSAm@%^VB?BXP>9xA8^9-{u`H68qX-6)?BRc& z7Q17ga3&lTzzA@qIWM?%s{11ky-Wx#FFs?o*%4z?CIezoi#^)R2Aw!f-3yKvz;M7E zTRk1#au!(H2+T)^kJfH2E(8x|N>G9peS^|$&$vTK__LI+aImQWDgjPJO~L5qXSSw4 z>!6eK;lDb}a%|{F--|K09t#W;u5H$LwE*@1o_WyHIyO7%pa zotZ`mjk;$jo1a#zTIa_+gcBN>{KwB5*xX)VJQ9_R!t;#J+PJ@%;vYRDQx}(5M*87U za7=szFgF45ERo`i+>hsPYX!yMfENq7=)`K6%p#2wEsmJrUDdo!B}d1owjrlox+eoqhWaY*kKvPlk*#cL zbP&5!FFhx8IQxwKueIg9;r-G7fZ=20wfv@`r-<;)rm~w{W}W;ITNmpls2-K^q6hJ} z{--(Z!M|O3)96<6pQt2jiBJoMZ$n=#y>TT2HF=2Tc=5m4)6Smr93_?@UL^YASy)ux zF>lLriz6WY9o@yG2PwDILWyVaXYojZp%puMqhSM8S+02LanZ5xFP+Wa#KqPmH^fu} z4P&}B;r@xDSQ$`kwtRa-kN@Y(4eS5@-eZjLu}cwLKvg`^%Hnw&^;d-|nO@HbB`UlmGlq<18zXYNOXqBQe0IA32q&e72sg{pt5xRmE`fk^#*YK}f96n2}i z>b>K`>6o+U$$#a06{eIZG+GB+6JPjfF1_x)9u%jbX@8X^jGb~9)hmJE!^N4}piS>E zEVHdzNmr!hF!)0S`Dw}?zw~z&dyM=qaygt~`*H87=LeDkiErV_Fv`#lgtzB_3~TAK znWS)0WS@QqahHP<<#INO_+2qe_B6yPv>z|uf@_fUdxA5OAH^ZL0I5_HG8Jd&(B^AR zRVRdg=`2@wmb2^vB2cDom-hZWV9=TUKYnHdXsolVedHHlqUMQ2(JVqG z;Lx)L6GNa3G{GgaO|_)thtxfDKlsyA`}*8(y=K~1ia$G#!)^QO12yM-S>DVwfl_N8 z{&SyR$vfsO5{G&|yae7?zyJUejg~5EA+PuWKB*P7^g1>r40+OwL5v9A z9;;W77*oo$(4Ftq!=p>lW0^%OueffQs!ZI#UhSCudZOd8umV+pQSn@Sr~pzcO+aA8 ze1*xqc0`9RNTZUP=Ir00!oKT4!#n#qk;g8lUhIJ%{Lt`4TyFWgDmG8K8$)(S=)>GH zAb|Db!wkt8x^$IsKx1B;3IY9@!py<*FnCk+m0VeGib`GM)0gk~dxL}$oT_5IOJ9*G z17Bjnu|O)7{3tx^&~z(?${W`3cn6|dPg#vsyRkfFOJs^0rev1$SPT;l?B?O0T`@$y zWh(6DT~?m?At7hG?OTjgdxQWeUx1Lv_s=r-ScOOKGqlrQIRd?w?14uNq}(kqAFtiH%8# zC`eyyfB(CXAzB@`WAe1@py-@U=BTMUKI(%X$!mBTqpwp9(qy?_L1|ki`|?;Hv6Re2 z_|Uk-mQEl0i$bcHO0zqSnRkehPp-;I`!E&?Kp`|)*+A@6ETJSbARa5}TxJH`?{X@o7ddtdx=sZ1`qvowoDdo~Rw1rs;VhQO+mY%VR0j0&{@zAu;vq#I+$ zo8r;t~qymm~-Q zAbg+JR|$QyLv&`((6W`IiZfQW>6js+bW;YK;N}hH4>kI7#m=XF>V~gX9*}RAB%!;{Gy0u%X0Y+k`H4twab{TkzZONzwzP?)==I*8|HKDb1MY! zR6lgH#6Hl6z0es3Gh${ElIcF(Nv>i>Pfa#IBq>Io>n!BeNmO7nfOHe)lUT}@70%m+ z98Vvdn!sQU0Dz7Oxi^J!#@NXtl$H@DW3eNCx>^(4kxUI)T(GFEtmJ4>S@&qV4!e(nG?Zc39JvgDXQ5p{emLh~_VuLUXz z-2+L8EV&Y6V#D=t*U>cNeB(CT0)aw(NLqqGsTHQTnKI?jz=M!@Ud zwCs)I(qQ=;&;us|)QZ1gy9(-usvQa>vo^2C7XD$cjVA51NmAecd1P0Sa@3L^C?`^L zblv&o{F?PVU83UTAcEuj(e4ObW0I=UtD>xnA1$OimS9sTlUk~CMn|k!F zjg$iF#O)g9{Rh(&1U=lP?~ki_>p7m-`YW0Z6W+H#Yx{|Iqf4;wV2)V^lutGxtTj&A}T6B!laHDyxcjlI=n~nG<6NRP%K&OZW zQdkk@7cF+Xd0)jHb_pij?U#HQ?4@ik*oz21MNo7_AhfQSutI*|M07#uL?BT@m#^~e zp4)914T-_bGKd~@IMx;$z#k8MX)u+4IJ&}Y*?)&WpUy#IUYyoPAzf0mH#JZ53RvWN zi9%Zp&XKQ%(?8^X(f|H@CP*~%5!KUB@N8r0xQjA=ornc12L{L!C9w}bYNF|@HaEi< zSk8=Bm_WBTynb&AX(uL_*H{VosW$$t`d51LHQayw>lzH3!84^!M+SBrJKZv&F2o!D zBveV^5x!VJhxFmU@eC@mddjd3J1O|maaljq^h`5J#M%y(Ny*mlq=zfrcyl7Kw-qBq z5z&eF*}vg^d9ri#t-o|JR~DOr4ORYA4kI4rzmy*mcTIqg?m@gdWLd>ruprIXlTw2`ewAFA_gK z1$2JL8x^<47smy;s~Pf$4Bx3FT$TEXR4B}d$o-F_t88ndQKA8YTX3hiLkSLr7I$|q z?ydz&ad+3^?ylv<-QBgg7cH8b-Vb^51NQ99?97q7n8>$9C%1Gyc;687%#dOES@4L} zZW5*`P$du*v99H^sF%r?^lO{*QJDt5FlGzrSuZDkPOcckdm;Lug8uR&LkUN}QU;gJ z#sF+`fC!V71ajYDc!GEl*W}Fws;snjoPDP)t;{(}zcd%_D4=3_<2vGexJclxfuK&D zh=EY&p?nW70K`5pQY0xyy)U$Vu`hH7$N|7`wIJ19xqW4n*Ke^DJOf)agfW_Ohny}% z)+1)gL&R8M##2TOb~KR@*2avXF6gqP^gqB3IiZ=}%~}OTgD_F#2o8v7yW7Z=dbG5eJ&H8!JrlN~gpsaX^{j6#4;c>p}<^BGAR*d8*&ojF|%CY3K| z8$&oygDowPC|M|QA<1%CflZnm;$+@>kfdSz7VwHM^bbELfiz(Csc~d7fGZ-hlR0$> zZVd^TC0%>8E{U!=ET26Z85EQz*i6x671=<<&+(8&j7CFdqpaT(+0rGoMPU?gQ0W>J zSJoF(5Qwqy`EnyyBGY#b@>LevF8&?F&{eoOnzrSIqbzhW-P4+F+XS&ZtQJNr!kngl zAQhy4yC8BhJfCKoQQD=^n=Oks5-t-*ViN_#km_2Pj~AQ%PNw3<*?(^w#xnHLn&X`A zt{BN+&B(8|5E5o#i30d(e}{Bz~Pl|oZp$+tGOQ{}Oat8Ep3gaHM;-B4y|L`Qn} zL)Fe?0jiWhsUDC{gH0A&!c9PiD zE{jkl6`99Q-NYWvV6mdalK$o<#?b!9!cy5H74;foR?}1u#!PnB_a`nlaR!QmE zvGqy$JVWoi-S_LdJJaSDd#S?LNQf~Ov_Sxb8CUK;w^~rMUcke7f{k?(r?mTG1OFHm z_?O;Wr^gTMx*{;UH^L@YX&#Q)dLb6$l6)dDn+mG=^#A^a08EgUht{9EaJmQ&^UxwJ z`&qDjk!J4!6hCR1AaL<|*K2p%IF&I55(a%RP}andRwaMMx}bL^sMIiG< zU}GSQ*m^L$d&;yUL`>6@k?6w}-2X8IqEO|)6AJ@!K%&h;`3Ufx2Vznq1(D(Trib-& z<_^V=g8?nWe~c(^D%ocg`O3I3*by`v#VCmhb#?=F#v~Z9x0UWPc^O*J?Phdx+RE^Y z6=D*Lh%M6aF(Xxe88GD&97q;u*>5lEOtbuD)%Gmj&nq00;wOw1l-&($UgpG(wP7!4 zXp5UUUW%0s7;B3*dfTmc{;jUC*ssa(Cu>VN5!4j2Vt~e$+1hzg!!8T}*t%`D3Ix>j zuPYz`DAv>eYUhS3+gT|$GZRnn{rxI)_gu-#V#dY6yG7Fx9CADpR+u}LtNd&;e!mL5L2{=4kBQFfZ6b%HX+pW;rZBxkSG zqTO1CGHRu1QIhi(h4EmDoWgtPxuIln;To)R{gvis~&jx?PkM!VTkpN9jhwh>A zU(s>AcONmyHhX^0I~}n|1uqCQ-nHvb;nTu(ja$jf@2l@7iiVb5mSuQMrR^(QbQTp> z=J-802+a<*wWeMN5Eup4kgh-=PYOq2{t>U^WmXdtoq#N~^&iBC&{5l4c?fhNa*Rt0$V ze8SCynt1)|8_9c&BjmtOlTmgXBGvsQ;CI*he6{~0Q?wzE0)XtK@OW|Wi~AostrE4c zE?v`zXJYT7+4D5I-6tA;Y@fF=MI<3N>^$Y|bHr)7bYZ(LKPBPQbXuqaFWA!bTTVGtW7 zM1}`_=!%<5p6wi>*GT<#>)ILPo4u_y5oef_l(M5$+dl)KNM;=-2#ekw+{l`k79(_( z4G*3n6z;JQF`DZweUoiBqp%Xk&8n{T=*Pm0r3fh{|A(Jve~E^0g?UD@0%t{;G^4%- zf}aOmlQfjL)fi>GdsxviJns)t&CFy8aD<{C=c2BHWgs+c+ifM>HI=wfrZhQkGKJW-v5*XnD2Vn``(XQi=# zN1U263q@vf|L{of-=2LrRhHGU49!=3|^y zO-0^pw>Y9mSci+4wHHRLYsxH~v|x*>QKP4=>?eQ~Bz*Y@SYK8vLXJrp(c( zFg9X*@iN8y_o_i5p7q`#J2EB6c@jRWm^y*Cc>a7d8{cetU3{;BC|QWbvP%S@0*D3x z3Db^>lYK~Q?pR#V=o3ESny~N9Ahh!HlD(Dt-+- zCH?{7@wCD0+-$les^#GP)B(pQJ$^6V$T)wT)H!JB0Ga%_(b#`Ik;k4Ms3XN?e^Wi7 zhE0Z*#KP4Fq8-fpc6iL-EJCnT!3M!aViy4r@3u3Sz5U1Cp}r~{5B>rEkgAX(uX-M* zK^zOjyK_Udvy}fvap~e1kVmj3PeK+z5rl|Zzpqxp~SQT@OY z%H_m!P+yg&!VXPQ6s)U-PUg$j(cJsv+VUvQr)<};Z}>Q-NRz{%D~nzBK5RcSW?Ri> zwc?*}**GE3eE==_RHKp#vo$G3DNcGmpik779~Oe@RICzNIAmMk`BqBkYUmO zfVpx`FIyJfVXGkcX$Z@rZ4?VO^=PCyFjk@OppPjZD^y(r|K z=T=qOr#f+pb8gGpFaQ7uMN!aUZqIio0z*QkU%;%^7A<-uF&;_rwYalD!CJ$efSH^m zyQZR%(~N@uLXBNJInNzoNg{Og$cH4{xEC(8=5OtYWrv1iSrq)pkH0$_jga zs?ErzURq>f2&0`@G7XS(#D*9TjvV5X6&wm5VEi!DRbpW}cevB7sv;vf&k#`$|I0)n z`gs=995->7)|0$4O6Nar(D~Gw$%wSiZz7k^hZG99;V}prZY}$<+G+9l0*HvRGys>n zgrUqb8&F~B4eB%L0>*0fdv>Qd?GVr(KxEc*_suxz;{u9?4^)?dTvmM-j}y-z%|^af z*Aql|CwPL7AQiudsd0Eh5)Tzsgl%`MO6esHdoc`$98O8mBzW}w!_N)WbdZ0X*-^aC z|5&HOoVoyO<%&h2&LhyFm{z6*W9CPB2aAD$HQJvDo7?0hs*{KP{=5;ZWN12sG(C6(HHXmEpzi;riP0@;KZYNA3wFDCcX zRR^+851l(yO}xbrCM1j&;zXFfN|OjyIBy|Hdt}#rq;l%Ue%x%x`YmG|Zf~%bEOJZ} zd}^Y_RG=`b${mS0#aF|!wH3-$8lUgCY&1Te#esi0B^VUL=P%$KE3l74AaKh9`oO~C z+XJu%ni21r#)V)`p+6Kwoz;Er?*8~-$%i$(`JVY<1|STe0460uZK>4wnn$e;Xe8)m z^FqejnMwJ`H5k9cjl=fM1jSiA99vsCOUAp>y1igGt@5fc$cPc4nkuUpQ%=&FWcHCT z@h25f^5#GnV3Q5IL}U=UF=UFjlEA=l_kA=p={Rn!Kkf|oE@+4E7drIt#vG`Sby;D6 ze!dmj20I+Q7#I&<$)6*iI`y82wZUV;I5_o8Iwd&6E2a4%k<}eoix?&SCTw@lLUu|= zN%Vc=XibZDRa7Gpcgu+9?t}{=!F77`pRDJWFY*`*pe~sV!dN8`U??jli9M{}ueFr$#}Q?jGK(MUa-nwQ5SG}l zKU$O1SZSp4Dcya$iP*ulRQ{l8GKiomy;PhUF!F&|OYJgU*F`9=4**Bl74(p9DNAZl zPX;uK$mdshsxbeQ>Y#W{DY3R9wY~NwT;v7P`nQ{7&#?-#P+oof{>G)tFHkB8vu*p8 z8kus3%nuniKNSxM0Pw<$T9H`(Ec(Shq0AN2agzE#TxvGkP@e5cvUfAA$xB~BYPJ94 z)54!27|#rxM2d1&=^5VId{Lm(T2_*{zp~SX=!$|=991e7MWYsoArP)-1)j8SDjg|> zPiVF>HozUh86`X{6^D)SSW#U2+tbr3e-@jxT=2(>ntsk`7BXOol-ox`hX=9}-K%d( z!Ns+Qer#V4t=3WXe^uE8=-%sx=0WFj0E}~*^ext20s`YiUVzA@J#(xP3n$rL1gcX2 z)&`$!&zA3}SHjQ{X4EtHH+2*1Jqq%`l~VgdGlpcg!jO=AR)HTBcwn zPGmAF-6F3EA%p^~c0a)?8n*;TEPAuMSpPzAfGB{3*uAX%mjpq6yr8h+mtu>t6n{zO zo?!_wb1#_np|zCiKc3R!B`e+b@>0~{e>nqX#TzOFln)2|q0^--dwkuuzY{vs`MoX6 zy82Ecl@E|vi>t2?=#*w-93V^qb+YUv_1p0JWOp&J;}I)?eZg;#S?a$j7aVHq+OX9) zBH*IurLx?fV`^21)6g?EuK*2j*v2B&jb0U}-lrY3sxeyDW1;ZnzGxXcJJM!;D{SVG z8t}H#lw`P3e9cMK^4-4Ax`d?p_|}u~8q}bZJjK^`KlH7Su=(jT5g=wobYCt*PJloo zC|3~o%#=&b`4hUlGGS0~q+&&9N_c$G0Vap$UOIjF{ouHTF1mKsW8L=Oi@~!?gktp?}O0g7RtxC<_v2c92jg-ZV+xli2bs|tM) zYM4x{wCTtRfuWi;NA1MP`0uA^CB@I;x-78S9aV?dj5sQAyoy_>^3Hc8%RRvVbrA*Z zP3=)WR{A&C!dt?0%IW5zpQu%5diV7xfeNy9+X-PN$7M>@94mgR{59`!=O{DI zSa!=)3DSXOba`GNIhhSWe}gD011=%W!r%(>p-Y7USZYdwIipm=`FpGh@57$XHwq6| zXBauHusq_u{*2S!Wob)vb&FYvG{!DmdEM#6KHuEZG%9x!$yJEoV%%Ta4_8s8_y1*W zJ)mFc{r&V6;>TSJxpr#V;Mm3m5Nx2xSnbICWES(^8wlnoIRi|{5oV6+D?Xuw6+2k0 zqkNq^)rI>+rT=BI#WxgXR!xEqRu>&EeP>Z;g8fJm`~&ZO1FG8;%L3 z5=(VQZk)Ygf3rpsPAE0ZPMN0z9G zYB-OUG7el$BzC*wHr=Y1NI1ZMLPxl4vJL4<_cuuU#R2ZI6=(_&IQDfW*5Fel9uaP}Su zwEbcD#*adUN+M!QrSBjwd{&>hf@{NF_Vhuc3}Z08TEVlLpBCC|0BqxSZL%wKtKQZe z6up>z#N(3f+^{T6+D3KaGheGsU&9w%k&u{~aNh2$*6qH;pUd$NOc6WZxKPRuQI18` znqCeb8p`BP5a2CNP4SZxo*wAD$M%N3m%phV?Y>`r$=uAfv^nZNy1b}9H%uA6f;??a zl1)F71+A=?Lx(KqqJ?iH6MK7DM5p3dAF$}T#+$5cC(7Ym2%Gu~y@($h;m0+Q+Jh;kW=@D5qnu?WYyk$+gnm0tMQlAbjv<3Slc~*R*E|LtCe;oUG^5NuB zV#T?{@qD=9e4WLQ>x!AC(=&hDzWcWO?TU3<9$?g%IzAsF!!0$qK#3+;blT`gD7P+t z-t4c2DDfpMXa7u0o^~;i5>1|3_c1d-fA;UyI6S3nlMVXQ->fgo>5mxBLH+%bjHpPW zR=ox=>ASt$z|LcMx`4p7GNfw?;XPH9Uo6Ua0;EhRFcG<2umtugK~!SKPH-6iEr>{T z>5*_Tbc7MQK=cZ;OY`vcY^D|y$1;MqICrtd2c|xsj?hMA3S?_Ql}&GaK8BCymFB?j;SnZ|$zx-NRhKEFT)P%Q3YelsBcDUD>m2Mfn`a5R1hlbfhDL%M zc*=Z-mE$L;?G+c~qtYr(DpU=U_A2ouq{L{jSga3PExWnGAVtQ;Dhy87$jScw&Lt)& zITIsVI$&diL02ZFiUb3=mqc?obtC}aY5R|AF=d=Ob#8csWOD^7v33}S-I9-L&gKmj zUQN0AzUT8qfggMeMzBeU_366m(9JNgUKw{N2 z8Ef9ELlPM^=&k8G2{C}x2Pe97WFH@f9POL3B;mKSG2=mX!#x+$)DoJ9^ElxJY!p zj)snn0k4Z5mI@A*4%6eHF%gBALrBe6PY_GM(+gYz$yQz}bf-tg0w^8E3ywLLGRW=3 zXbCb#=?Xr6_Am`m!J#AcT@HEHd6XpdEody|?dMLLcB&0Iu=Jv%6~jnY5y#_z9LN-e z{p(-%p=P-PWBu0L@qs0b?$$v?iSfT#(Th9+{-)B=RuQqY;}YA?Z&qurTui^CTeiA7 z39-Sb%^?_L7r2L^B4J|*P;+SfDOl+8deEH|F7@;8%PJy_!Nu%yRuu*`PA>Y<!U`$22p*NyEKd$uX=PF}5->-2f@END~o9WRpWIPt0&%PIx+jrbV-kOsme)@Q+z$ zf&WN5?7thU+be1fS^MQ0t~R!OAwP|J?8Rg<@zzt~2rVzCTG&*mbgQUDhw+Djg<%~f zjuQq)N)8%HN*$k@g?is(7q%yYuX(arxpeZ|Ku(6Cx}oA!7!+&fVNg+inzGTs%1=@k zrBO8T;=@J3@*o;Sr2-*z8=Q1@&3nOcTli=e>l3wFx>3t}YA=(HX3CDp6k;Ac|AG8uGnR`u!2N_~@Pm*U5e^3S|YBVZ+8)z#?_+S|T!ysMEJgMc= z*mT~?+TZONsWtTcC0%nrnWPmZvKHer&u!&@M1DhM}Y4bZQQTb3@xR~+yb$myr$lLUJ`pnWy?>;Ehymt!2*1j z>OLAh|L}7e3hYrCgR{UFcBPe>H2$rNV?54sQRflSnPkZ@LD6=^11yN>pHF-AoOg1G z(_r$2qe6g5bo#Yfi7l9ORT`qNA3n>z7B#Z;n?N>KC|9v^)Qp=Ya*ZAU(=?)?o{Ryj zqr{|0NX6V|n;4;6++s}{&gjPOoD3VO-Sp)nEr=P`4x?ff*H?xJ?yFv-3%@*93)!kz zRa4t{f!p7}$T-#ZP~4m0O&sroWU|JkSokv3b(Mg{4@Q=-L8xLJEErR{A&VSC!WU?x zNQanECBt1;BacHl1amcMxk}FUX0&l4xkDTAy5W&7G-RD$>nAf(B&%5O=XBa&s3QWf zlx8zcmC@kXa#s}vfe~xCo1zTDF!$@fP3$gAhRa=N!HX{HM9` zhyF6B&X?YeA;-7Fj~AD2H>{|T$z}M@ zq!eEtA%zg0A0jln=YFU6ROC>h94)7db(H%@bw-y4$^qTc?s3|W8rtLGF=t5QBZdsW zi8SDXi>TP|MniDXUWbdg9CCg$YIdR2VC!ET4A^IUl>2Xf1P9PgxDR#{=Dqw{qNY&MiFwvOULpi zEG>{M2VITm|#h6+7}W|G)uVv?o#0x;EB*dD}t9 zTr!_f8Rzf%ho2jO1dN+(k{wx|FEFFRJg7kBX_{01mz&H$EZu!5u|MK}WqdsGyEyR7 z==oO!!$Ji?eWiVt*vdT~_4_)`1*bkb_3EsQKgMDfRcvq@vb0Jb$eebQ{L7aAeQ5&_ zXvMPEhw&TVb9z-NLlg$2F61l=l~0q4QO=0W5Ny$f6A%BBH}BL+BXqNhBGyw)|Ias^ z;dk^3eaUTkNK`f5AZwAQ7{2dfWN~vM&B}euN3YO6JCE=yHnUAHhsfE9A!=F;B@It& zd0D*wqy*#`?>&6+UrtxM3#om)Pq!aT{YtiPP9x78z^ui}8;Y#Gap@Q&w-e z1-CTJJxsI<)vVchFLn`La8|V5+>JeYPOf);glN>#Klr`5)#P}He!9bdha4AU+rk7C z66|RriI>5?qK2^QI87NWU2e(!mR<Er5)ZuNZp2=;zzjPM-8 z2rkYwA4|H%0`}%*fcu8~jj7FR*PIj0DwWOMOlXM*%yZBwPfg+Q(G~q!RoJ1+F96qy zMUFN;E6EJj2nOAqZ?6j(a)T^xNJ+R&K(c4CQZ#67U9C=*!4T-Y~pe-MC{F5{mh@hQHsbr=|%geBo#F(}Y1iM)S2D2D34inoaM?Qc@r zZjtrDOK%-I*P+dtKD4DK6Db={XK!q`#p73V4Qf=Q5Xgm=D^uPcwS_n@+ky{9XzYE( zKgthLVuSIcLyUco!u?%Dk5XFTb>k|@XJTXHV9OrW;~y9-2&L#_x?zRE8b0Pq2i#mo zI-R+sg~$|AT=3)}DVhOy$yDA=DsXlUOC53c%{$4~%X#c7X5|<^sVB-r8Q@9dgMx;& z^LIr<{Nucwu7^;Sq9}_Liq7EWVI6;KmsZ!Bv$Pr{fiP8|#xFqTcLO}HpKVR^J+bj% zJIc(D5ekXr_?EH(vLuUM%=PX1y|(spw49rXf3@Jg{_&W{VJ(hfW^dRic`9F%k1Lr! zP0y!QP4Nnqjn=2Fhs4KG&(3EKv54>k0wpC_OGhF@n)DeLkB~JsSXm|d( zykW)&n_oSATftc1i?&6@-V3h@BQMH0L{B>?ZZ~V+jrp_%sEgMd2*GBIOi9>J*V7FN zNQxMm2bO-+zkU@RFqK{~9w^RHj4nXP-Ro$~nUE}<)TO7{P+OFJ8Td2GxjtQg(Mr}D zwnmKrSLHH26`GtGbVWg|!z8LxFuHJUkI&7tut|fbou{jVABPqQ0svKO(O`7^>yPX~ zh_VKzT-6?kzlg6#2W02?S#|Ft^a<#L+?GPM3!1Kt*BFNJS`vd2BL+RtQe87DF{XA& z`WwZXuoG0jkMsP0{<%gFH{+E1mw#$MXkBg`RDeTzKvNCofrTMII7z}7dDoR4CjI-| ztw!5Ej*Vp$)Ko3`D*;QntnQk@jjL&is|>4PUsj03Skp)SUZ6AuNM;1X4Xr#OC?dr> zJ(`J|N&4b^qBKuaTH(^}SwSl=C2L>0cb@JGF11gzh^Q%+%Dx#k5b6GvM%==}6dv(k zlgbNStYu680Ao|wxpWqHkF}W1X_32FVwEM{e(I*qE^l{?BZ~k^RwtPKcKzt?kGv-@ z$<>pxFFG6)POklQot{kwOz0s&)mpc1;V;d0ncei}UP`d108v8AAv`VF!|c6kF~m8Z zPR)U!H>^Y*8ZJK!D-*s>?AXvcYSTO+Wrc=wrY2P!Ei?vJA8K>@kP8l-^lSnD`hwFh zD0AZ7>j*l%c%W!o`>&D=W#{8`;}A4vEJsha4H~X7E$v^d{0@FG z3yVc59p85fxG)DmU+-Fw53@@&!DI*DpPMpb&5JTBeMc|5(%zC3<8qnZ#MCa!CUwcM zNqbVYR900*;H&Qg^bc-MBnw7g8;4C=Zl1n(sIH&@Wb;#hDuB=5SoWMV_$VTTk4?Z7 z+5IW0qRW0obTXy?>dzglcoy#{)IX}o4?ZHs{5TJ%c#*Z5MhhA8lXkTzcIKc*jJ(!t zl8NH`^1|C-hK(n~#=Z+86Tc1Dr6Y()YCP1muq4Cyt{qVJQHH-s(06_aHO`LD{ERuR zqA!_S5zTC9({EtP9C{Fgt%)Bm|2$sOTcXRZO-M*CBTfSvuIvUCC~MAs5DumDSI=(V zRyWW;I~endKG~r{_Wp0ac7I4URTP1|cN3dJ315}j z4^8`V+#}@6^0VLZ53`+oqF}oo78@V{n3Xv6!AAwjduu%XuRn`E(p@0abFZR3ixD_+ ztto=7cWsh9DM8%Qx@sw0Sc3V+3P~!*J6_B7+!0<&U8-91LJlD4i(J&wl6zRioC z+wuteBD;a|*vVK<>@E$!NM@Z`U#7j`pE5Cl4?W5$xM{-M=MuZRJb&0gDuh>}Y zdCJTzQ~sH8k|8zg$aMUMW}&IcEUA{9#=+4oF5lc}@WB`BT(ecYqq26RyJb7A$^Cg=}hZkZ4C2DG_TsxH}hrx=teDArXz7ksAm zu}C;uaE47DEVCss&N>^ijg7r2ki@Q0Cvmd*AiSfJD11$}fQ5|djtb7+hWz@nfV*Q9 zGiG_vhw0P3?GKCHdAg^Trr&}*^}=vd$g@MB%SXwe&UajJSbbKIk5{X(be!<(`sE_@gllX0A6gCDKMm&5gIC!cU!* z=#k?}-%js&$o(!lXFUEb=0E)0A|W8@jKbORQ1~D6dsrVA;hhX}vqI}nCe`sj|DRo) zEy1e~XXaX0qY1(@0JxY*wiKnc!f?LVP#&i6xsANIIb~_PW(~GVHSckZxzbm%T$klv z?q>02>DrXim21Och(ZqnOgT}_|8cb=6&%4a-Od*fKE9vtr||fBLiv#(@KSHd2Z+)i zw;3J7Rm@kYNoxW0c*NKaGdCf5pwh;cxzeJE-1tkCdu&tlSGzjN`Gw2tD+ysr8K<8s zvub+vvu$1GUnaIo#Ur{Tn6*GhG*ydSBolX6mSy``EpBQBmBgnqm4W(cgKEvyq&U9{ z<;>FA0pygP>F}~KmO|a(YBy?z$exX^UY=5k6$x&^k`n0;k<%wGGj|;0;q0*1`U_r? zX(6OU>N<$z6kjcT2LRmAdjt&f?bN_|M~?am;|PboG`SpD@sFuMp9(ek1gO;At7p=a zIa5n%)jHy!2T>ZyUNQEBq1a2mO2YI~{>Q7L#J2+AlxIl&aN% zn?u#2q#KM000RW&mdZvhyDRpou@372s(zL%#vzF`2o|E{ZO7@=ZT==&L!1;bb{

    0?IC@x8=Y{X%by3f>n68quzwBOP3L5-UA9be;$Z|h}=w@A1*x$Ba! zQ>-p3LbvU|VSHlW&1eIYvWh`=Sd18p`y8YuNxR$4j(AvM0E!6i@9?SW*>P~h=NSg| z=&e);14nwp-_n@V^>WSqfB0Dhz{6QhpCOR-n1dNNjRS*0j3i&6`MEHu1V`>cC^K*O zPqrF27m91&t!h_8mm+GJxi==-A@v~v<2Ln(d>K*)zS0mk;eJ{SiIb3@{g4yL`G=Q- zE#V_uF26}3ghfc$DG-1r6aVFcf6jKc1vY6&HUJB6(}L!*B>nmbPh?0~)|}m@Y~6kX6B_(IP`J?-Wlql%uuv>0R>Xko%oqH_ef?HbJBF z!M;_8_A^#bVzTX>#+LKagNK#o6z6}APa7k z64IPkDN(*@3x`E&$xGG!)nezD9sxnMu6a{4$&|8~q~zj0NTRsr%NYt2sfQ+wJRT>} zv~Hx-i+GDM25C^>lQ}e(uj|_r{(Z}f%*Nh$`ha6X~J zOpF|@OC(q^51|mX8ot}E{@f~Hc^BV=W$uejDoNN1O-)8xGoyCE@rW*!ak7I$BVDHw zH2*U^mcd3Yl`a_@F5N_3s2&YPHjPdVBdd=+D209DB?J3#()imltNJ=qO^#)-XLc)} zTQJSiZyc}n^jVz;T4dwEf$j>sGWROk2!0smn#%9@$N?yM)$4BGq9b&cV`yj)T5@#? zx#h&f4I9K7k8k870anLVLjUwTX6JG6P6l$7^4y}%CM7r#jBMyMO9?RvM`nK_ zM*{RanT__>Ikn#{xqJI7zZ!4R;IVi=@F}UCnxgV2H4`+)6)~BeB4ZmlQ58<}7`r4> zI$)ds{x9rd_3^j>2qO(0TY=?_$-+)|`@CQp+fqc#4vdw$m;08qZ~(y`7S->ir(P|c z@JKNA0gm99*q4tTZLWC@{08OEGfGZ9YIH8QrL~Ihk96Ty_GHLn%p?h95gJm3tF!wz zI{N!#p`I>mWtP9X$UcD`=eMdHd8gf;Wz$$KU738dJZ~HSn~vhSDg*2Q03^KR0r17N zw!I(h)9=wMgXECvW*6_e{*u60NRb5^|HVvrKnZzmmGuGD`NQf8D=r(6(LaSzB*XGF zcwt4!6m#uV_SfgO3vlG*xa26apQ;Kw@j=731lKN@gh%Qc`jot*xDyqSLg~#&&su2R z;X&Y|5P%q#WstH%BjV@MXXtm&pk?TG)lT|Z+j`fL!8on?-JA^EYp%^-38YobuZ`TB z1Q(rSWYp@y)pC^xUmDYg^U^yQr?nD&0npvL7#{>WyijnwpBToPSI^0nyl^^^_1U(LQA6{~kc6`~G|DR8H#riPV25qC^75 zk`VardIl`(RMRzYANQRzV@~hu>L}g(R=KI+%)5_)IBuB5pZ@t`Tv!}OKo4CzGe55y zWGs+!h?iFJNnv}E{m-mVH5f5(*c^%<@hjMUHa7)ZI=sB#P~-?sNj9t}EcOc;k3cGZ z3-lzE74s8w!arHyov;gVvqAdAaIqT4kwuMqOKp3LhFpu8y;e=w?6|DA$MT7Ni<)A@ z?D{9G=!Y{&*KEdNWKrWWA)GjktqNEuhB%N~_w8l$wwuNjJjSs2Xy|HaYhGW(ncA7iiE29RIM$OSk#TOan|FO)JpoX__WnM6`7NBY2FoB)@YE zB0)?Y70aH{{7(HxRDlqa@s?_~uXXy}TtvRrt?NzhQep zC*c@}nch}rh0c#*q?H!Hyw2O~ZlW*%Fo)2wCNyPMGfv`%?lQZh>Xm+}j;oA)h)mD+ zY(-CW;z~>to~ykpQEBYmgVX{^gM^0u5w><)4hjs1cz&IGd!V#{@(A>axk!I3hy;cZ zOH3khr^mVxYIl+}{2jKfsCjTb;hBBaP`Ghj=Ce?50ptN-XiD};2n?BDNs&eEUEUG2 z75hI)Bqsw?m+}MV-RZ{X9+=p|+e zw>*~TER#=GJk-9Ft1^;4QrZ>{Qv?9__5kgzL;*SLE}Xvr}7aUgko<_8UJC z*_-Ekka2VLM zPb@h|CH5cs%2Ultq1rFGK|zJlOob2*%T#>{$FoSva^3Sh2-lm+SghQo09X7#W@+QA z==>Rzr?mDApjTkmqQU(u^876#?Rb{=V-&mQiphr{(r=@v%BpmEbE{R@>!q(qmqerG z9IyW=`)q%V6*qvTxUew$K@nFfHVPaG^&vSMVdmW{Q`96RYH0cueb@cb_ye$BoK04`534z$WA#%fspxD;txc!t z7Vq(MhhwL?A%K$+1Jd;#~-%15pAVeXi(kkvmqajTGT`LOB{ zUCWsp7GZpB?cdVwKIK{*QaY>jja=PC4%7??eJIRKDWcsbhG{@jC(jC zc|tdlT-;KI2j~!&i42IOXGq6`V_&M?B!IX){OFFTyFDVEGg zlxv4(lf|{hsf1!}tMFBy1foocT||en0m8}lnGxHKbXFA_LRfwlM(wcYuzUW}v#_?> zMS=*iH>IfXP6@El>o5WIp6Tb_awFK?>rM)%ua7uQ;Xyp`N%O#O<=HBl6M zYN21cCVN)@x+xMH1Aws5wPk%yc5lnN_(f%7xmWcr;rj;sT7VK)C^GT$D0RII4W@PI?F?g?=hCN?C>p0S<@U>bQ6ig0w|K|J?315(6$U=&{1&HzR+H zJD6o8^r$Bxwxt;bp3hQxcKKykNM6NkP)6{~!t2n`ZTHr(Y7jF@*W&^$9CBzd%{ns8 zlcv%`Td-b3br~B^)?lLzk@RFA+1ah+niPy>k&i4;r(#*0^OzHLWRpL?whTth(aCFa zLH)EoPB#9ny5&1gwBsCZ*Y@VUV6yJPI%KWWaq=I2UIU9kE>QnfGIlo!>kc960-T8H zkE}Fq0fm_*GGQ4E!_`~1$P(C%oC=Ygfy17TjXHj_P3t)hpP3nkAhOtb^Wqx+jO#er z!0hZA8qh|?7Qqx_iwi5F;I%2`M!|uE%KY^5Z3`%>Y~jF%MAI(@r(a9$zt83skH$`4 zznk2XJd+U`nUu_y@eHd)#(;)HIbN8Y@ZqxEalc=>EXR>saLmzFPF4pnh-`FPXxyzv=hJ~Zb!#0=71y*^LmxnWiYivh zpun@|Od_xE6c2=mQE^ppor6y$A)8N-A-Ut34c$?YvAV!c2`8OX_rFFhymw;~HvZER~uUa@uZ|Efm$BooS`l;q)^#!|LY^>B_G+p~J7P zTX6V^hY1vLT*CqVSP*<-0CeVoYi_+%@Y*d+n~_#BLJEylF9@ZY{!}F(Y@mZ%;8Jb5 zHW<&8Kh}3ZaBRI}{n-jLuh`fw>-)#x^Z)LqX)tmrgsWHG^w?ibW3jUvmsBvS!ZzCc zxoDz-cG#(U@-0y*=CVsK^NqveW&1Jy8?)Tzla}3RAQ}zHP&k<@6y1wfd)w`1o{YubuW&((uxGZ6i|`Il}}^jIh3n!D_(M!?_gf<+V@q- zgv5EL(aaV^$bwHykGcUtJf%rc2GF2bjpC)}XJ@qk!w)1BzSU*?HzQe31OicnIb9t- ztsKWByC+O$A$@aLD(GW+Oi(f_5*uOKn9XD5xq;d}+v2aqW16it!{o}Klr$ADl(7V` zgjQyO^aRcF_|*h1*MKHv@Bm3+Iccts`hvJv%35dl$=#nAyxFg{kQ48YKhjwpSFZ)( z|DGbCQ(mt=3g=3L1(A&_#Btx%(nz*=3dN(+i)7YBMS%5KZ2Girud1#0`WrO`~hv& z^y(4LnU4I>v)I#D=MSr#&~U~E0O4p^1wUWM$c1?-*46C`e~jvE@!W#j9Mu=~GCt>U zW^%s1spu={TIVbD_g=l0N%mpEPoKmN^Mun#--jW53Ht9k#sPe~!Ur6!SMN$$H_8Z|YT{rf~ z3%^6h{aU?5Tq#xqhAN9~dcjcV2Ua!XNxWVb9D#FzW`5cVhQBGDS^K57hla_E*hm{( zr*Xh|^?8XtY9K6pe<8NE${E*mfiSG7@qV%XI88;Dk|YPfUqpftw)a>N1Z=?i$RG*q z+;6E-`|UmMOjXp1el*T|3_6hL{4vR<%uxm23>~HVho9Y0puVt@DOA5~VzTwEtnKO+;(204ZFF65-@z#5xWkij)gx~Fm|B$$_v zWOdp>X#Tqponml$Z$17nR}WW~Dh~CbAJW7d$_yeI6%x=QJh4BUUyE1!{@PQl{#b5N zYFKVsj5Z)&aYy0!7-E}RIV878{OpZ6RlTf=^^<3gl8*J6GtEzIz?SeA*=v$iRvPBYl9fB*p*^2JKMK64@@XP$z=#YS!sMVZZ0W_V#cCem z=VRA*+(~Y{Ptg4BDO@>G;dWZCzbX{;ZKm0x!pQ2bcCc)0Gj^ezJnzjX0<@B62LO-C zA^E9$NpBK7wy9qRMskLu(kslwcmz1LQ(#D`Db>caUZ0(AE>^JT6&MHB1 zU+Vqc7BPuhgwoPS>BH%zgP&%ogcHdnDlH_|f!iA5}=>|YfD3+vZ=y#k`Q+!fsb0A^&tVYI1zBu=#G@}scjP_QPw$fJ6=S^!!Y z*vB&|4(v$ZrmzZ{sNA@Il2sOwKu;Pg)^TXrVDVxZ*uisMRFSGUG%uPSF;JCZ61A|V+DU*T z*B4lh>J`@o)+XE8r2B3x#j7=Y<}`xD`FMyRNxG+(&XQ+udvkine-dhlRG8LzBJB`> zsw#O+VrxoI3NW>g-Pd*ClY{neDLt*+9+M?!casJH5a0rIh5`@{USpzF68P}Y=z9!oZZp8#8 zICOTu&n;J7Q)iJ=|3wUF3`=fH9D7s*DZ4H|NEF^^nyg` zNXnB;HIQxP>8d=#uomSPD9ybziM1vs34;pI@9o@lC6%W+lCVS3II|C*n#G=;dz2LM zChtYbmcf>XG0Mg2%Ch-rG`7g<$*EEIPerrtrbYDNs4RPzezUv!&wuLmI)aJ-MnZ~> zQXrGzaR?TfEnv{n(<6#X7u=XIu##b77l$khI#?BIa#gUt5BxqHn20pJ%ZWp6 ziOEIuPfXZx%3+WjX&!N&HjNabk(m+|O|VdL0SKr@JPDDaTu>u!OcEai6cjASBRCjP zxgi9pj-2|AW()!+&zUgJV3JK>Y=4b*W@K(9x_uS&&_vv5$EC3+D5DMVf`$Bymsbspe4jlHr+QTms&nwPEute&n zbR>Me7maZ!8MQ5$YPC$7D-YXg{(<#2f{w&kZ>-$T%VuBZLG|uU1BYARu|NEF^ z>VhTqNy`gKHXvUmDX3xQOcI>|JB%=N$%>UGX_gv@v@ot>+U50iTA-#0l+`}RYo0=k z2ih$-3QOP zM${tHrxd!h2W~AotmI9-(u^kMFzpA5ESBmBzJiOh>g?9;?EgaeO(YEx zTD#)R$+0f5Y{L9;iaqQRnWsWHh#`U=Ft-vIk6NJ378Y&mPF5M3yK}d$JU$AoEEL=+Ci|rdO9UUL*8BsPES)(fYH};m zi#9S_H4FY}2AbSQRF2ecyOpH_z}k-~=MI@HdJ=M(5hX)cScOehiJy)l@Tsz(wQ2|o zxQaVI9RMlB9;SxGqIIOPhtyUAT1?f{vi(QpDor70_RSS*uYEKaFiI7SzSsDKU;-GG z|NEF^?So{8MNUH|9wcqwnqgQ#ik5zj{CG$@h-j6H{Fk^ra# zr(ewoQm^(912MKJ@sTWIQ95P66*Ui(EsD^{U#InvCUA+trdKMKMItj*oK}iv<%ujdi8C>{9sl6WTM=XernwHhe&l94Hxr`d5XYVOMt_WptG^_sN%83t7Q- zYh2szPGcU-3Q3L~b4)pc=nJW51qwFzy&Co?Xr;{}DuMJ>sT!ykB(IEMaDoDW0097$ z<)?xuqtOeMD$<1IEakA|2>VmX-=bFm$1a?cQ7u*w{~j^13pUiU5l&T~xy0CRfs4pSEI!)6?9e{nOIihI;v=OxHFwHR50n85PHHVrIY6D`V!HM(0xe`yf z+l7Jm6+YpLTv6xd-i44=W)dYzK&FF&n=lqMeX=Q2wlNG%pSAR=OhHWm6}WA%WbDDP z`{52g=h%&M?Yk3B`-szpc0I^%r4?+_%4^0BMPGa?VH}cWGfIwCtg6K_bcB^wZLg+T z5j51cHk2xeXCf+s43IaWm#ef&Kw^3kM3DyMue5&tCh0AaT_cxQHpAiKf(Fg?T*gfS!p27sM&GIf;(iF&-p@x!gJwM zrBkx8as}G5rrIj!vF^55Qj{uYJ+>@dd8h=5WMqN636sw|p#3J^TQ=~v z&qF{cB!DQG8gY$sc6xMx2$TQ{ZnMTj*HLwI!SMWA7%;&~%jEwt1#-{gF=81LW;qx* zaKft61~H?oT*|A7X6s#G*s?@JG}Qrs=Q1rIGcG>B0Dyo>XiR8GHKGE*5yHWt0tH1Z z0EG%EpqNah|NEFk@`@xgL`;(@9gueA`Ke_iq!8SnGVDE&LH3a5^@a`*3{7HE2{?xD zeoK%&LX^ejS^jjS;vP9nrT|jB(xU|lF@PF)1Q5}t#9>67t03cn2~{Iy0ue@3 zhARePOd6AK5?4;P2mvZxau7hQ#m;L_q($Cc4`cMNhKDq_0v)#l$ykeZZY-{L0ETI6 z%2`_QuIa;YEIWN5(6DhTA~U-*DHD{JkIVj4Gh#N0LPDNukH-s%(mKM@-Rx}fgLNuC z`ieJhiDHPEGfcI)*P3@I&E9_ZM{Q~;?r+cnrNp2M4JpKWIvM<|WcpV-2KC~dG;=x3 z!@g_UWX0(`@ZtUA8=rO~4%cAnI2j|OmH)~sJHcYkeSrV~0+lO{D7!6wu$Dfi)Hg|N zz5R(xEyhJ&h4N_?ijkYPATVnsQ0>Ua*utXbZz*#rHNcrnDn&`Xtj#8KP@^S*tD!nM z;Mj80hlhJ-@l>rM?q33`Yco;s`{0LQI`-J7hC>>=JrvdWfB#3Q;eT}i01f~E8Fm+y zZ?{(6yF8a&Q^h9Pr@PN<#Rk@>o8o6p#b~GE^pzXBT{p5?l+&ok;FQN$T*=YtIdb&Z zti5$^NevUr?ZtO18+lw}&qT&uV=f^>cw9%#y*UQw*}_p>1u{A1FtOm*xFfj^+R>kgF=Z~gQ>*nFtoe#Y(@|m1_%LpK>(nLb)jy- zRn9?S?&b$;G#E2R|NEF^|AGeLhsp~~9=38v35H<9vJ$luHH@%z0YQwXu$P(8wWb}Q z+6O|Z^8eUyH)oBBsKTtu!Lh3zzyll(0ZKKAYMr;GnGJtW|3z*k|EPCLfB&a#vegM7 z0004#JShTsC~~(xF_?8UkK@)AK;rb#*L9-82umX`H>X$Ow?}f3tPCI(~a?Y&-!>hg~GY$>Zw&W~C1bCQ@b=o9)6qUIurdlr-d$XX4#obS|1VsP< z00}PxHX+7nDw*FRz!@(9uftBBs{2E2vbL(yY93?6fcdo~1(xoI*D4{rO*u~b zj_IuJifSSv=tB!0Doj|B$6Ij+lQNkLvSN&KPL35(Cx1uu`>`kKy@=_5^#TI|88b5C zWI!kj-lB|U+)h(PJ|PM~Gw|cXNmIt~btL0MROVRHp9?4`{YOk>#CU==VvX&S#A?fk zh#yx%o>`a3uCx8eacIzph9V(=3MmzXtGzYBB2h#Q5THRi53Y*EMv>wpgc3mx7Ji~y zM#+{wh0n&M%+UDKhGF8Ym?OP!+@Z5$#%xMf)N^;vIilknKtzmL6jI7=jvSBt&)e&K zkiU_F!nyNg*4fagnU8pU&VDFs)I-z*N3sAw026vfm8yPBtKo`h^P*zBbp#?(U4#2G z4o<1q@br0Zl7hxJOWz5x|NEF^^9BUYLd;7nH6UeVxvV`Sq!&4vD6OP<5$8B&^^uCu zV;W6I^#D%yidZ1ZvCDW?3#8^jda6G=R$pAG$hGY5ccr`iJ6B{2hzJw8SI<>9N=UZ9 zf9zLk{X!?TpukO$_BRm3N{8g`nWgUmn(SAX@;2|Avi?Lj{(pZw>4HYe z2zHvy%6N^5;}jI3JhHww?bnq`OX1ts*M+Z78Orw3PPH@HN<_f49oEuZj*l6x3$R znbFi3N6}^J`jhE)s~DoHR+y3&|NEF^=Yj-TI80+nHGpuaiOgYxpb`!NH?0^~0s*=v z^_B{Vsgf*|n|CSKUC|ruyV3tY=so$F$e(qbsL|goR!nj&|NHg5?bSTCd0k4U(OR;; z%#2nEIaCv&3;I64X}Sgg0IvC|V*7y@6Y%@R*s--{;wgIsbtmM_d6x>PIf-X%L@ZLn zZ+jX7ViGk)-^!A9dAbJ$kjwR45TtN4ftKK`G;K*`XTND(0zu`DqmkB+^D{e|6SdSu zXA){(6Qthj%xI~PnNR_t=hDUo@`J~eU{T5fI8X@wOPe0y^P?r_(QTV(djN&OD?i?_ zkP!et1dvLZ6-Z)G90e$^S0O-BP4Aq!V+&H41dSQn_Au@KKZk7aRDHGrA$F!dxYy3Rvu=;Y2YSWahPHtgqdd-b-muGj=Jus{_`6~`cbp( zJKC~#qG;FKr(FM7Gvo|0iD#!3t(ri!+KkW1d>Ju*SiK7ilU@f%D9I%n7=G_)4Eym1B>vo3Dh zRYxI!lHR19ZklR;7~Jli)xkr6Arm%;0O7}r7kP!G^2{L5L0YC;?rhxgg5t@bFv0{9 zH39l33zAoM%3T)Z0C)rx*r1?*5_uQpCgChVLfPtFQ0tPFNaBvnSYXi{RywpG=I-4T z?5r2zoHmtQ_6oQY?J|NEFk?*asDJIpgoMUZ~Z8SP|Zw-P|vGEAh_ zf@~$_#ikCRd|>dlslo(DZ5&ml@N?^vBx{QP>f+TU$6Q3=QoD;g?Q1hH+mTYyYQ|6( zFha^pc-x#kb9Z8$hn+m#W(lHutZ@SD7kdSSuH+6qhRdihqb_WVOVtnful4(?0t^5E z0n~VPFDn;^Rl}<-hR$zcuO!=Jj!Po#2$w=hfg_W(=`yGA7GQYHF#AGtNUbRVN|6w3`2%qo)mTZ0U|O{~yJE!(ZVE5}02Han zK}yqQ5_pX4Y~m%WCb_G7jLszZ`>C(zz-)rr0qktzeckKViSO;8B|;<7NudD?FQwOf zzuQpi9AHjRWIo6M>d?D}EpMbv#9G}^n@FIE9=AOkS!F-}+x*u+0KfoU3WBzDh^+xi zC2>P@9x1$^DXCPA?jhhkb)>qpoYh-H)@W0hTf$wHSkzCQ7)c5c;?@URRp>}8Iy1VA zC5kwx;DVGV#aT$5+Tn+x8>_CWe&YFRj6@EStnWIj?FS;9Hl}aL=2c?4-HKFAb)x-S zYBRNOSEh`bnH78gb<=8VeFVg5UZG^!+<-xj7wxM(cH&YwOSF1>Dsrc(@=clF3^$&*gIrx)lkmV7P>0o;VdD38yxuUwCk;r!xk;Y`?t_0O?<*D3a@|)q* zxBRO~cj;np8se$w>>_f*1pBRxC6l5OD9jEviY?i4|A#kjyEi!9-b-EeS>{W;bd_Ch zR=$yE^M$WcnJV&g`1Q5fuUcE>qqtaC{hOH4@A{}Urr#m4zTZhlOBhL63N&&fJj*$m z*FDJ0MGES!b$*h^5Jkq@S*e3x*b%_2en3JpAOJ!IH86BD2Pz>-JHc?o6#h$P zpw8Yg-G)id0vjiilUMBBdUMDv_(ANO|kJWYwr<(f|NEF^^#TQ3gG|#V9pYxhN!?-N zOcCM_F^m{kf-4|pp_iHwJaemT^x-`vtn$-p%q1VZ8!e|ba_yH`?OpOUi=qDlP^>;e z2p(25Af3%4pJn7;~H zZej6|UR-5d0L-&mc!YYLko79n0#oJ33UbVh|xvbc6>H?-yo*WrOxf4qT=@k+w z_v;oz>%35o!NEn(bn>2(9TM=9)5B@>lVKHaAjC}{}} zJDQxA@;K?Ah)mUGAzbuzY(Kdek`)!A({mkq(0V6#yGokn~1ZlT*#E1T$q1hDS+jA+)ItZ-D z1mJ^{I!>g#;=WTBSoNhv z#z4U$4ld9j#G)#li+d2-$3t)}h;$9R7eY{hkwA0^Cu|B29}7P98KhYz&t@#BY5*ao zmPApcXdAJi3}G?j^V7@P@zx7`mQUI#<}8fW$cwmH8KQN5ke-vmzkm zdOK(gOu{b+Cm2if{5u9Xh6*qq<|fgh7*Tg1aH)O<)sTpy2dv-_!vQq zHGco=qZrfBVFYcK!V-OFd)o~sQD002p9^@Od5uP2emiA{xW1R~n!|NEF^=>i1;KFdpK z9gu9LY7ISOf)&|CFwG=)30e{-afhKOR5MdhT~22hI^Ly?3@9*X&gSlefUJKstw^!| zH zk17opdRJ?xuq;r)$v0pic!;Hz_oRSVBK~O#7@!2*TC3Sm){oG&PxO9cOsWd)Bbh=zn@+AeN z@G|XrMEsfh56hD3H)3mQ#GsZVZ-Ptt7@@`RmI`%dMtoB2e4_R%jlm1<8L z#A``H<)j7!w89L=>k>^t zR@l39PN)ET1Hv_SuhWcjcs*b`5&FzW8Ik6pm}=wW+mlT{yG#y;bWf+AZ3*U}OQzqCX!s#qp6sr7*6gcYQU$cLNx~uvZ2p=A@L2Nq44C z0vJXPL80oUPCZhUJh}0UIBwa`?Jg5}13&G{&ip=XL`7$%%Dz#xZ2y-Iqq;g6P zZOrAyRAi|ut{>8lu9rOoFaYdC5W+K~BlL`6{$g%e6GN7A1Ops+ba(Nt4Mn{rVI=MdQRfQqx45 z$^b=xEG|-U>mqbVM13!Ayqf7syZUKeXa9Hq`o7<-fDixy1>i;^>Zx;cg;ikVfgjSO zTh8y=aOt7NH82qd9IqfLMdrx7%u)rr5rXf=U#Rq^=)npS{XurKas`BdiS_D2R*siI zM9AYU$uN?oC6yIRsLgg{(bjt-1ie2sYmwGLB(aU9(pE+z^HNG*b}`EYON^n0${j;$ zewAP~xV%nIQ2C{ENMVn2S%kVE-akHn5OIWh(*E#Y|pg&3|l28FlhI&X(sR8@ITx2 zK!5+mut)(iLn;RS3KHW--Pjy+K{4B^xuTUl?KvueC>&q{5mMLX*@t12O*58dDNznq zjY_hb5D3d)sROMVo{U99J2IU_~7#+g5|~JDcc^Mi$l5w-$KKY$eLV1|NEF^ z@&pC7dd$ly9l~X#IsH9@J`*7KGYq_TL7kgtWb+Ep&L$EmR1YnhZ0EX(%7W7JQ)a>s zI$}m*FZ5zAidLQWY!izvg^hEo*5NQ3D)xU*SJ_=|e*ZoEiU0rz0T2rYkwyb5;YDK2 z&YX8(WfEb`A&E-n7&5jNXdZYSIia=7u&0V(XEA~ZG93g`hUBiU=vAT>Hq8D<`=*tm z6K!F{wPsf%qj(to4pG@n%1@6QA~6O|p+FBdb|fV0K)GvOStE*U1B&qy-zu~PK@b+^nnG8sg^rc197C^|*p~>~R(NHkM z%VDVhEXB(!c9prp3_u7SvbiqlMN5bw=uJdOY6Mum%e5aYe^JA6{NkxHK!z5D;P%t# znTv!Kmsr>8sDuJ&h%zmu6%y?9Ae<} zwWxT}gN)=6Cvc3ANpv^%X-#$pK&3Wu{vM=H(JAzaR-EP2X^mX99^1h$TxUPiVMhi3}KhN&6I;H_OR z5!%$H#hpt`kXYuD5|9zpH(e>2G(; z_ga>6z<$E@dKDn(2d5(sDh z|NEF^@qz`ZSxUo69-wbYs(&%1YZN)hEsZc%fuxVHt=d%4rx~U_d9W&`iunBfHggZA_SE5XV`pH3dI^-7hf29&D|2O} zZ$QxFl}-!dKQoENcxQr;RYVimq=C7sQQMFEDMKx)i{lGf+m)-lK0I2r|g9<|oiB4ey^TStyvDTm<5NUBYs zFJEAKPDLXGfGbivRL4`kfh+~&6y0IFe#CqhlDIg;)o7sC$SWlX}0#l??jLo>L9PPMqt99dW?3 zlD%tT|NEF^=Y$1JMaqj$O_XP(nfyG%vk@q(J4`&(L9q}g^vA6DP+5pNnBAvD##~$z z?Lr0#%8^jBbv?O8J8{LCS*IW}G$T_l**pHaP9tFDEl$xie=fks{`lwI!3ecfM2F=uFYn1ebq5ypojqY!3n zjE`n^vZ+J8#3@bv2u3e$uXu7q87!jByA2wBbTRPECeLMtwI}~jyvs$kY$*|aegJShmVMIwHTXyW?AF)b@oQH zg$UqtK_tmw>4=RJ1s4U8u*nG=h7&cBmKIo;(3rbKvA+ncnNoQocA0Cg!W9d`;@mdw z?iCm(wEwj})o`y0;SE(|f(obE)y`6AL?N*L9N5&Y0(dw~)huG16U083j{*!J))in9ppCgyZ`y(oHCWIZygb5EGSPPfQ#2M9 zsqeBW+goQ@VR0ex7`p<9)o#W5$ukd5RNgE5Kl+Vx503VC+$k^h>Z9FN3L*fQ0000y zyif`g7&moEL2yA2K|vG_O)rW+4d!J*P4(B4^IC2hkF~<5NuHH%-36I{UjRu(1y9p` zBy!d0UUIwAx=e{k3@e--OekoZh&1ep=sr!y$$tgz~}OXY z89t$8clI#`-1iS5U}_wyy$fMMl`ioMMuz9sE3QmRuyAPQD?VqaQd#MBcepG_07@vO zs78j<*zJ9(bCFzU(q} z&b8qc4ydACSuZX@Ot&hop@tNAfa-K)Js<4o^N9b`NDhj?Adre~YK|2u@@G=^y6q6T z1=b@Zrn>!F;bAJqtu}XsNnOO|B3!nP@7Ijn8R6<1K6iLmD&S(7zot+SYl^XO zu_<@DUGnH>oyz7{XH>q@sTj;kRBohZq7QLauoN)YreUo;aX7osR)dsbR~kKP|M5_6 zES2mb?y6b&YrGbLW3D*e>jRSclZQ*SDn3ZDV}Gb(RCzANjRN72@}<7UxLA2%K4A_d zPCcn3n{^4Ktko!)|NEF^*?<9@?vE6ckgzzJ}8=ZR`KZ1%cJNTv4T2_Pd=zXafKM0Z=0Y6EGkE zx*G9q(G~OoiNB9X*GD)+Icevsn-^ zmdPqrWNP#k2%#-$KtuqlP&}RIhGU61w_>n6@f(MCJj@WRY=-ID4_K&39~_}bBt%Cf zB~>*<>JmRmb!Md5IV6hdJCU)ilHn?>!l5X59FowJOiwMFf~@LSai~jW0Ac5-uF$b* zlq(F-x@1(s`QnGsdsRxA#jX>tM^2>lKP7AUkeRb)u)q8uhRS5n3jhFsN>Um}nAQQ-YEFXf|9pdR~s=rY1gIw_e^OEe_MEpH_JZfpS{DHJ#gkEACW%xLc1bRYiO2 zOL+BGUQSePz`Mysy9V)`cr!|FBtvkb7VTOtAwb%G%ngeW+?gTxk( za=-w@PEbJW6!n{9_iz9I^S{qK9%GS{k?N{j*ph}DMyI*(ZXK@A0TlRDh{?%w zuh5CekU?^4MH}<{|35Utm?i-UqycMBpUZQuQI?9FYQX_QUA;it>HrbI000Qm`C3ox zB9g98OQ;6q$tekT>1Lx&+c^;=4;}`loyTiuLF5BrNh`>?ad@g}H8Sw4uymahlR{?a zsLk~$M)|^P?{E2z5QyQi0%CGzi1jX5K_`EGPWG1m%GK+C^%xt!?XConKmY`%7sLg~ zMcBMkkuf8c?M!agr>iMtR`%r%!TvfAGR`)6HwIQ2-PActwVr$4|NEF^=YS<(UQIJe9x!Oc*{o&egb;kUKTMc*No$j5 zA%%{JLYZj0PV|&m8EiHdENo14;Ya$@M202nb@JqB`IVB-zaRQ6V z^K&tVNz9yj^^I29a8ExHU`9D-wsP|}QFBqRY@Giz*y<2OH4y3#K-ZDPh$wVHy^i|rf22SKJ>fu|2vnET;=B7jnxHI4nDxn zv4W&X$x`GNgrBVKNvz;8fC@!9HxZ_PW$)xJakvoNT_~sLAGxRWfD!Q_KUTHbyKwkm zB?;7@<(%L;INLs@>3gD*NI_YTrenE2hGy+dYq!mAz)Z}7;#7EQ^d2l4$s9$uwv?qD zMDU`CvMR<53I-cCM2xbtpsMImyT~+3fVj#~&Rt{($*Vwx<(}{f5CDiEl2eadP4r|m z6R|o~vP`FO^SjHnk$KtZtM8L@8kM%Ys%nQSTG9J-xl7JiY3j(T09sl{H#B~DWc49| zQbV0z9vMpR-^YBnY z@35khF&$fV8fZ_BCiSZRovaTg*w&Q6jH`_EO01OhcBH|&Zw!g3dl9F-|??z~z z2(-l8JRYi|q`!@tUZ}hTM0-~--N^LMmibOE^R;!3eN+0yU)tE{{u#bVIvz<%!cGs8;dg;ksaL$b=I@hmd&G)8t!(j7MA?K z?Qj40`Kp?tSrYo^bWCN!P)Hd7001xhhRjjP{cf4VM5YZ4K;kKJDxc(Hm9-EfG+?Oe zQuef_SmTf(Z)|6eY&QvOviw=LAodTGqd{*5Fx0W3(B~P5rLb z*w;(Bb(Z6}*5mz2WkgdgXZ_XoOO?L%%=G%TKa=D`r`+2j5UR(ubk>XTL|g}M=p|>! zyqPVE9dHQ-L;@=1u1`uH8MOBmJuradg#&S?TJ@_kXGt1@hm8smNvPC?%XOBBkVH98 zV^lS#@C1(4F$IM1Intk=G7C6YE^b?6S|^)kgeq|i)}o=Q?Tk$`zc4bOf{qacoUxPH zA|~1wP2 zt-m4z(V~}nk|T^GY+y9X$xFdK9C>MM(_%kzSKD=>|NEF^>;^=HbW7_@6{3YsX^d$C zxKS+OEKRW%0mzc(^_Q9u#`bBpf(N&M_nbJ&%B3n33bE}xt@DU#)YdX}*F43pG?1n* zAyqxX@>Zxy%GSXF+jYJtTg5;D>uLZ1WmgsrO=gI<9X7&E7nC#O`$T0!%Z;w z6*r5{EMiL{Q&OU#gfvk|p~J7AxlmV01Q@(^{;7Zm#E`oLz#xM`gp(d{SleK618A69 z^oIp1S)3}f%rbxg`}7oH_D(Ke)&OPKbcm1u0=4^dqA>JsHz#-o3~iE^%X8cQ`_o7P z0V0horCHkMoDabHEw%Y>6EdY*u^vSO%)aM#=|VzH4km*KCq-e?H0+4^e37-tqR&+0 zZEw@&Q}6zt{wSy=tF(HYr5Poq##X z01=7soPZP^Q5kF~ShaBwC`h25Vr_>f!jOkqGhC0wS_L2q3oqpS<*CG{UAjgPc?ZBme*@mt?L~|NEF^@dE_;Jxdd59uRMc zxv61;h8ac=F08P3L5h;+X@rgtRwpm5t4S#{t@hDQCaxM*rp2wZA!^)*0QJ~z}nQb6C}ZSlNAc07h_TX|G(Y;{X%Cz zCwh=5X%#X60QVl9Qv|92$@Jd0f06&Y?wO&VU;qDK*Q|gC76L3if9eNwOYXWr&6?bbo>1RyrJYJdkX@KQgJ5PS9g`U?zIS0gQOQyYI#_L|S1f7S zf(U`Zu$1VMH)q)s7;d4s#fY*GH)<@agraj##+t31X}@JN!fbARuRG|kz5m|E*6wH4 zzwzYV0K=x5U|iz_0>q8c9C)!>jw^Moimi@!6*OoXc;}TVSEvBs00aiS<{@IXzc5?+ zwgQ4yw}s$?p{ZgsloiKViK{HPlE@k)B1+*UQ{{To_KT$m1jZU+|NEF^;Q|D;ch9p1 z9iUMoY7ISwk`#eQHmxw!k)@YsC4`QMjUj`P|BMMxI06|u9>Y(&iyE;(bV1ch(z6&d zf(zQw3l_;kZj z*^CgY-C3B438CjUC~l`(-KI-N6ht08;=(YfcT0C0D-_a?m3eB|);Itl2+w9~77DAjFnV*o+&z##lx$^iq%P(ASN?4&uFk;l5U_va4YHJNlF^2r_8@ApF&+=bjQ7J&! zp;y5sXe!j2ETO@{ts)#kQe`hyFS&vznHCeWnIFZ13siT#4F<#P_zjJ4)*ow|YaSG?%hX-%mzK`c`G=2JmIO`+|NEF^^aKRc zM@h(QuvVH8c9aLGOj~65*E>S|1XtS!+D7^qM00D**M7+cRMBz|od<-X;_zD35K`c%D zhM5>%24r9=69PksshE>2 zrjgQw1pSrYUYfl0L(yq>L;V8(0z*(yH-95(alNjY$%D00(qpFgy9E7EkIs^udypFA}K|juL z98%_oU~?A}p~nEkfCum?6s(SE6q^|4X2xllW{>!}COw8sRHv#XSFfc|h)N*P08)wg z%78+sO&tJD$RL2cIB;l@2StFNm|={NR-yZ3FyIscLL`yb7@)PbY*0EJ@{^&>ieZz* z55(EjT5y8{k}z38-eGUDOLo zUgS{eCtuo{11&LsaXUq>-&*7sJ;d6Ik8@~RK?ytU5OUAokr5OR`O^XJrDq>gH;+HLk9A0U{E~0s?P7YlauXi zVx&1ev}WTsal>Y2F0w|a3SjOA_QAB0am>Hx>J7*>FbOjP6)~X z2toJ08XW18OViR6bjv4J*OxgzJhW5X(-gCz4DDBASx-+&ztim7N>9(*_1{m~K2$>_ zF!wz{P=-0~a+?2Re)T@p02lxRqk6{xk!Xuf!on6MmeepRB&kGWQ4+2(E$o4uFjz%> zKg=?_3Q}nh{8N=mt>0~N@tGhDG|RPJ(rRSmC>VhNbPLpE-UY6fuGrXZ!fT@NsdFBo z@{ZS>Ta}S6pon4OhfRhMJ;|~LVkL-1xFqPsoflZRU^oL?&%EHu*KB>=O7^Z0KtTWi z0I6m1FG8!XN@-CbFd#DTRd`~&p%Hx>2K5agWU|Kt5(VVUdrDK8%ZNeV;Wi*8?%q_R z5a=eEhmWhJV$lkCYIT*eoBhpPA@BT{U)++T|EW}6R`=?OOBlu#ED?(j z6+1yJ3QC`dwW)>odhKYD-hN2IA(KAgv(n0x}rsU;t=Tax5HrAE)6Z zfS2&k7*JFsbuIX?CTm27vS=#{27t-}#enNLL*YUC6g3xtmoX&Ujw+!LG1Za8-}l<3A3Ul1WBi9(;&E-< z`aHdRf9kaq1&{y~X(Zd3-gTG#rpu0$c?%HaX#+XdC~V#dw1AuPHi55WZOe$AFoLN) zsE|sBYB9q8er~NfVDLypDlVsv76DfgAp!ubST$=B=0)eloIDynEHp(tL3!g^_}{Mg zUm$oLQaap(rA&*0N8YGGwOR%P1{u;#E#Ai*0)sH{6u_ckAPlnyZfwAEQ0fq%n8HCa zVq#dpjkw6<6v<|5WvnR$MG_ehqAa>ugr&Q)MS&s&yK+6jQY+0VapMMrjjSc6MtPY3 zP`u&v8lRnTwbrrpF*`J?5m(&FK1l;JJ00KdnXffObNSY`*DpctA}BCnPUVywwb0J@ zC_=^z%w&Wl-aIp>`FhVR`B+ld_AjZ^0px}%m~Sd;^K2Z4CWB2B4uAkoryFi|wv({s ze6PUvPzO*Z#Q*!4Wc>yO{9;K1i5{|W1FA1!=2;eb#VF1&c*&v?DE#&gjJNkbV|4;H zx7kvfziNY>ln!Rjr9LMM5SFGu02}u5r=>DB@w$#Mf{#pq9g?|^qc{c!8Ra>t zhXsfQkqK0T7Mza7VOI1kUQYASWDd};OX9C9j{~EsOv?2Vh)b}x)#$S`3Uvv` zk~YH;s6jSom#pHMt;Gv)p5!4aUR1E4V0s18pr}?*9u*!g4xc*dA$MNTqH;-1CKT;# zdo#{!eZ^>a&ZqoBjSDiHQM8Co_7dA}i%ry$0bHQz2Noj&u=TbKh_ryH3UZQIZVFET zm?iUFdpQ_!0|iz>vqNI`j$JBc@cU^+S!Vmh^+f`JfN(Gv8eXWWGi4ENi=tAFW6l5n z>~<>MKAU>Is{pwECP9t@(TPDj$=r2_o1iP=5e`exU_js%C}L(EjyC^Rr>x#1odNQ#vY#y4;9~u%giS-cEpRT>xZ(H`4Rta9p%*n*J z_{`VZZLamce5cKIBVo(uGTXJ^zQ6l4Ck{EIo~I4}f2r`rX>H_G*){ceiMwH*uH?1z z<6!^*009j-BZGyO;LN|T5a6G1ZJ0GZ_Qda?flg`{LPs_cHfW|Wndh#FK;#irZfxtW zCJyveVs}Ow)}fBhtpEF%WaS0~hc3%2c}u`$B#ND3>06SF+b(S+)=B1|rm2Pwh+Tjb zA#T+Emo`E>nU1{qn$k1mW%oTw6@0J%{#2|kj-dnq0HPHhAHMY#_H6dorbne}vEtXb z$>GaPK>#2C0@IS?J~>_7;fHcS@dV0l9ZbSPDToRJ&~2$eFmcMJBN{YlT4$%+>?Vr| zVRj~3hKi-d3F1$IAC+uyaJ!W2`j?#1SEYj~NL!X<^OE+p0J9L%$`Tej=}X5gCPIWxai;oYRf>)u8E%A_1aD z4+UIEu=a((N)ZNB4Y}6Th8AHiOz|YeN9tYnrZSW3sh+ggW6N1AE*Jm+Wrc4AiZu}d zWMT~+NM@)KgF|(!z`^FgrISr*4OtSrIv8Pn6Z+$?cis3MFEG$yNR8PDfQz#rjlXa? zEUuEvp(&{)kxVG!sSb_m_1RvtDy8e?ewiLO$9X)}=}l(Tm4g-@7js3@5)vRfJ?%1* z>k>R%HN7udA*VEA5x`zrmyWMp4JQzX++8TBRy3F(5}cO0k6K|g$A;=13;xYkOg{lv zX++~dwF!eJmHwU?b(#9n&Xq?)Zwqx|Qe8y*!q-#z?*W=>>E|RJt5t>Gmgs9994!Ac zQ;stgBD;BcmD5n52c9}zlK=adMDPSdv^37q=^X%mgo%P>>~<0qxhhO)i$UEUXB~u& zh*beC8yJy0TtMKF! zwZh=xj|QYZCBYd3gaR8^tK z9kAIr3_-J@2a~x8ITHwyTViP7IxyhF8Xu?3l%mTu(zUlK{G9h*lKD5QpFGNUZ&}!k zX%*PbDo)eLVtfrPCjgeZ+tDc4G-XHNd%&ckBv_Ie-g%=uvnfpD`)99a@&p6+hV zzu8+ZqN+@sZRki401yIBX1bk#V&o`n?u?o*@+~u#FDU?W=DeU~!bi9Rsocxbj_OT>$q}-K!%^`bF%O(HHf_4jwC~&Gz z2tt8^VL))%9j%xZERv^{fG{;>Rnl2-BD0RbZ-rQBv8o3nfpT)-}GmzIpfd5ITRw#2;2~*Z=*Fce?*S@d000697*Qbh1*CMA5E<0m3O7Rx_yzFN8cBkI9|(DarWtS` z;Bvu}0v2@J6B$bTb=N!nK7kd~{wcF9maxD=Gk1)}UwDIaX3O5L5X8Brs*5+CoWlL4 zz|H3O0>vwe387|Eya`ex3X_luUjpUcp_10KTeUpMBcxE+Y|(f_B$lkiz0cCxanLH7 z1vtY;Mz~0)GKal853B~jYGp)Y(`tB&^avbpQL9WbS}OWiw672{oE_=NZajBZL*C_av=t zE79H@rm2UG;P*oT?Yc{QUGAB-{%Ua86N-&8t)Wz`TK5}!&D%52GVW85?2yZpCW&PY zEcOsel5uQ4Q0d5Y0$>h>NBWSvTyM2Q7Z>hn6)=Ids(=6h1cJh$h7u~JXvo$K0Tg^N zqE!c*qL!@z3Tc|70$dl+bY%orqmUk6zRd7AHDq0ikj|Oik*%0hU6vT?&q`VAEya+v z?4xrNx2m!TwP`F)T>&wSsX0yBl%#JjK5;3#d66MBGCr(%Ra8{RS1DH}Yg}io*1l_1 znp&{-^{oZSA2Lnvk6W+4if|t9`+q<0zOvkv&d>L{f|smYzWx0+HS&mBwq=AMAQDn> z#$vMBgj!P>k_Akr>0H+3c{~Hf4~X&-M>9Bnr31LjKN%DQ>7I4vjPowkg4qCYdjs}f zx=VtC6wK7{+x!SJoJu2YdqrI*nNS>=)t_VPi8ifau>!zu3k5@tCa`V(@leO!Vrv(2 zi$tv0*pUd{bXjKsU?38knOI~uI?vQVBt&^sbRl>!H<$<}ae_dBVug@EV|Nk+6$B!l zZRO-a?A`)}6$jb_J!r3Wc*s}^mfZ8XEqNchv-#D=<06$@Y9%L$WfbKdqEjlcE)!&z zL$f0)Bb|=WN4Abw=T{B8DiqfJotfyDYAQcYY0X5iXuF;(k}4Wn{wRdC#9S~g`SbLT znR0A5SV6ZEX!Q0J{Bl|k&xeK2mpnbIBh$312Ibg_5){+w&j~zK>WyKpMGoiq0)+t!v!c$-GS0d|9v^4u9$q+yl?G6_t5k&z30008y zw(Sy@MJ>|YQA7zh7+ZlF+NdCDRFb&!V}Vv2Pt=H9M~kq7Fxj!*a*NlbCrkz)-y)Izjpj6QJ%}Lu9KMI06ZPjetgmi2&@1k*K+4oO2v7XvlZ-I23DVl4uj>E+WE*hWD_?+M5*NkSF(T1P zv$_hx1sr+Cn0El7Wu69#Nu)##I_^U#JkTL!)H#7)*kOG(%9iq!$Y@cg#YxCHa?2Jx zu_kuOA&I^W*aXjud5JwN$2JsL@Jv`>3FOlE0fn|erYF|Z`I<6FID|Pn*7|j5l5V9I zmUa_u&ZKA^3=F(Y|LQ;i00gGqN|X{3i%#7jlB0wRkr*Qp4O*OJMp4dMi;XJ#wRp9} zf3Q9o&@Ti9s}hP$vs*lgbrG;#>uq)_%TR;5k&PW7OIF0)k;tA~6Zl{jupq3@M;SXKG4< zl2@-sPE10NW_@N^v-dXPxKKMF)xZ%$$U?pqriGSv=zk9H-@g5su0~X)IO$J!FV5so zQ!JxFY(ln5CYqc z%9Cd=F`^&mkPCsnaai^|PS>YVUR?wr)=3hzVzF~RN^DG8)@LPouB#QhZ}%p$+IzHV zY`j;`Opaz}O!%OI;xZPvY^#!K4-E>)p&r^UeP-$Xlq|Q>Y4OW|Uv z69zaew2?R}`JYN+0E)H-1u)JGa6<-s+bDaC5CO>r@fth`1_3Mv3JgwBoD3pm`2><3 zgAr4=>H%x$I)K*pAfu5u8UbmJ6w~)&VyphFaJNbZf?>OIx%RhKI93}Oq?L%3HtIyh zle4wRjMsA2r{|V&lrbTM=+4jWgoYLvA-@@ehbxnu9yYMUPn0^oh)W$`(99WE??zHV z6SVo9ozoXh9*YBaB7RVv7pCsw3Cnm#p&{sJ)0GN*5M2M~{r~%zWb6b*R(DG~OdXPD zB=-4f(odBn@pm|r)O)j3+i^Y~n z+tWh+xspPq0SJrVLD9#nv-YFa6;wI?|Nrv8|D7xaI#mFG5K>b?l_Tb{7lGZKpm7mhAqP-4{)m+M{cctHz zukG~04dmB_rI(6)1gJB+TPQIH z_>yB{jgXzb+uumieE%b+Vx?@me=J7~#x#VekH^oeLBDsat6p?E;2DVnkvMQR?=<5RE!=9B9kpe{T}`)_Dn^o+Kc`Fe_ZYO@8Klu>*_%T5%S)vyjAq`< znCH0TQLq+xp(hGCk4hUe&9!c!_1cio9QN@L6vGix2rWBcmzBcKY$Gu6(UfH@Y5O! z1zO)SLbymSB4a>NJIErPLf)>HSNnhe`bZEr3+y@yqR> z!-ImsN=+++x5tQJ5J+3{LbQ=eQ%t`>!GTjoeF9ndxp~?gw%Yh(l12tD82EAkWUGlR zrAEtcr_Wz;Y%v~-ri4i^1PcyzEpoC& z&Jl#|pW{3eJvX;+Fa~deM6#zd8qzbo(0FGs?Kt1Nk+yge)ac(k&z$K)Ixn5uR#Hm0@1{wkSPA$dvNef;x-NZoU$_ zi4>y}sa*IzvW~EN1Ri0hq|*HKtTuJwVbW#*d({#~ff_Tv(=a>2sQKG3T%RRL@Q)@t9DuCfJI=$bim)6 zo5U0c7y_gydjm)t)GZP=JQbn8?t}I)C5px))NW=`xZLcvjv+tVh3GmkCTB<|JJqq# zl4|f_7=$vYm2amhOI7QjFf;pJ+_NaNX&FcMQeuQga*%gXT0UI`r$0iH{V7|eXFT=u z8Ck)oe~Sv5WJkZMaI;jwZyn0R_(0}mKmUa%o0fn7{{&L~5C8y5l05BB6!W(MEhEuD z-=?rjSWBYGij*6ZASlzLDQ2nB3It0t<=Fun)1K1FvSP(AYIxrI53hmYz)46st^fO& zWaIz?r5sP|3^f2_V_F?OCX*HB7c5N}c!}4QCRvA$7yyWd)5wl793cu+v8rqi!}1P3 zId3S|0!fc(tmw1ihF)E=y$Ed|i@&V~MpU)84czLG!H5C?00!)})n;9)WMe85VlNW* zRd!rCT?*74xdHioJZG{Fkh!+R?GE|_h0+o^Z zG&q20eG1l5lJRe5jtKRn45tXMX+s-gh-+bhlU2oUeaG)4Bgk{;Am%=u@yD#pyO02(^&^AhSq7001>V z1Hcm5gg_o!c3J65UpK>z7M{Nnxt3;6+?>{(U_jI$#+%Y_a<~JH{7Z^M^ zZ(NuUR$=Tn#hT`O*s|9Am;xs-5Cl=9*I@jHa;miSsyK0_cOq{`S6i6ob*7TFDOc%w zH3gZ3hjl9O8iff9D)UUOA{B_430+LuoB^T%0kor=sZhhE^^Zigl|u@r$*)C}l5P1X z(Et0GWb=XrNOsIKNfp3pW|^j8<8~JPlQ9i2w*fSdD75nl(Ap$w-N~RGwVe~OhIeZ^ zWm)zV5K*Ri@X83~Iut4>CJa_l)U~hw_M`oMpZ~va#8pf|O+{5zRRGWc0b+{&8)K5- zEeGU;5r-%YCc~8o8{AE!DQX4M`KY-7` zU;uy;i#fiRG^vsi2@SXy(2Y;q(q&_#AeS*kq#|sJqZaHB9A^Bs?>e>8PJYrK3p9!| z0GjQzQLf2jS>25(`BGkTtM5cK43<0@y#k?;a2OSYO&dIs4df7YvahA6T+@q*fgyK@ zfxx2xPa<8#l3uWzR~c(cQhEwZY2^?(Ay68U^-VjjS+C&=@fGE7OD z!PXSUG13sWcd2}~s0wyB1(ZX1=JbArfP(3a9PYlOf{fjBuUCV!=e#DlQ~Qz)B&OtX2|mnkeup)#So8}I?3f? zCa{#7b+Ak;BYO(8cUa&3pK2}&-3Kgq*v zEzNL4w-zGN`Pe<02aPjRl(^=ynaVWM9Jws>wa#bf>O9b$6q|sG8xs-YN^7rRqoik+ zcRiSL%BmR_Mdg-xjKNn3S{hO7i0t2zJg|Va!w&#lHRK8d#LNy01U7bJjkGmu>Z~tp zE{q_14uv`V+*<`IKz@pHJbNy}Eqz%z%KS|rWes|%CUYxH5{*&^x=I>yZ2S_)DW=NF zXaC!>|(7GcIhfc7eE*=E(G#h#P&z-$+r9 zJas~kxWF9aX%{;jeR!bbwyEcuZ-&@49?weWMcT#sd%aCD00E_CQYjTLnI$3BD6GGh zG!Fn185|J3L*9CDNMevpA=R2^ir8Zaz*43M1hm#3{M?{g9`ri1y`G`lp^d#|lobdd zXJd{nR^~*-YzFnbizZi&ibID@ii8OZo?}qJIH^XMO3%HFdX}d+x~BuGQxa0Q@p%bI z)(vyy1aPSr`$0^2B$Aw@^~_Z>qE*J2*0GheG;fm9h+R(6Tkusg+AC6hc>_<%O0GnogfH zNcbWb3&NR#C&~i%6R-I(lI}xb2hpJ|3_o#zr1SMNLPT<6s>dk+$OaerN(hd0S2|o3AvmZQZ61?T+ZRMU*FhWO8fgKZ{xfPghjx^)PlZ~;LWr8~d zC6?CMi61jWCva>O(wsmbDGej!X^>QJ1^l$2MhQX(VvcGsi*)ghd*Rd@*0e!ynLNJ{cxSXZqSY@hL#r$Nx(k`9V#tM?ty#%J@w>}m z;OP?{?$xtIWPtcuR_GKF!2kQ0WcdI_>3hnHOddjF1Zm1)<35owcSlF0a;Z6rXBd~5 zxV2PW!U4c%hJ_f2KDG(M>qy3^*dqRMNi|nOs{jA@djJ35@9UI6P^i&P*Ka2BtUBeT z^VuYDz0ffo&x^Eoche}E)+3o<(iHq}Ukeik{^Vw7PIBfOXO=TwV?tQV(YW!7;t^j2 zmC;Y?E3tLeiNwplGhl?n&_RQ=L8h?Q##$ztSZA8PQB#Ay-k_1pvF>f2OH?sk+4h-v zY+T7oITl@F4I-d|0v!x3Ii*BmE$1s%UvFhEYHWAvgdgO`mH8$W{$E)B^gRFo2uW>X zRKydo9f-sOxmF{nV&ElliMnLCmFxzV;jb9X%5ubrM*y_Q>JsVk=%BXon?#pYzE|9q zsG#A_Ic){BwM%f8YBRMHe~e>arQ@Bd%_ zub=_|I=EWJyIt$SVvArv3vzYc(m)0l16+YfLAMoG3z`;1GZ?PetCI_t#NOC?1zNFc z#v}(OHBz?rfQqsuxg?|7FvU&!1}wcOS|`uq8-2vr4bZePF#`ty3PXDC$D4t?C533-(~!o(A?-7?KEbb+B0 zCYgtz@MKgl6A3)Qqif6bOQ_^Dh#OJb%5FiL94AMT+#$6F(W(b z;gkTRu}@9Gs`BzBM+IOY10^{=m6}(atU)V01knkM{BO!)y*}ZOgU)0ZmXcJ;uE1LA zXIQg8x3iz0{T|}itCF(1+XbS`oUI}%yO~w`-o0WO+=u)7yYHu3YU@*)H7O&YcBPMG zvfXw46og~-FZ(SXyU?wD|Nlb@eqe*+5C#EB#mMI*#2F5S%K=2Q*+S-z4vcRMmD1(2 zc?4n)8s&%wd9HxwXEabIpo6wJ_|AwUwl<%tGqoDWNDX;Sar9nLvPcg*1#YQG zuH3x8-1jO$F;rnBH%QTy#Po}tH%oaBIRE>YWc>sMv35%nNft12F1hMGB8XAte>F_7 za>1z?C^YnrAX)I*5&hg7LTi)2qJ0PrqIwp z&KX$}YjMwX+59EgexoxGBvHYRWk*)!SPex%qf*#Dv4|elcYu=5b8LkUhA9k`xzm87 zqDi@NV~VEe>5)igu^%p%+=#FJGnnyBqnVcLNeI4j7t&f^OW2E7M(z44gn=WtoLKNi z3Wv_rP*{AvFaJmc08*4}dXf8wlSM93ErKz)1@I2~`f{Z6)f+i&m6W*lpe79DXAM2E zNz}${!LWQ9ktnJhC`k~5F`2gfA#6(JZi^&vMF(YZQ>8I^myK(%h)Tvh zh-g$SUNNGsV!@F?po)OQS28tItdVX>y$cmzmNph14GZ*u1qCHw=Y5qwa_N<>M>TjL zMQ5!@1p;1$8<$7kHF%&R6K7VDo7;p073g-?tFuXAbYH(M)T{*NNL*C`2zqhcX)e^J zMTm|0o@{NE%EEIj?gkzWfQ~E$Q&T=Fxit%vfgJHh8%h~ay)+JL_g`h4Pfz`cQM1PI z+eRmUR`n_X006eKT8a{!DcAwSWXzF_Pa~swlKhZJuuQZDFOk7UV9D4ol!+4-n`Ev^ zm>GI0Fy;WZjay}@L`Pt^W3#TuCgL*1s_ti?p#S@rWcLO|9c4?iCLR!gg&C7!Bcu=D z$2Y7Pb^)Fgrm=*Ui1CJz6Zbe%_(ZGgb*a9!&7n6rr~d!f^;_utH+293bb1y!7G0IL z2_(s6n>XbN>92WoK0r4_CW{vB!bFH^0JTt|Ro*gz*1PCY`(TVHv_-}Nu zp4F#UNP%3!s8fzQ0ylte$BALZy-tv5B09sgbYVK*qwMh8Kl*I6zTb+f>+mz5_4won zP!t3QBw@HB05D(zD3G2AnhtQxBtS_CgeI?fDv7kB^U8(tP?a5IAmJPPAg&to=3F>o z#C$^L*}mejG4OTo{*OOXBbZ< z*ubn=dL?Pf%1qfW9;!BmkHDje1DlV*OO*=e_cOj0bTH0~^ALn}2AIU{OzKNrtv#7} z`q=CDZdN;Ch&{fy#%X7>)|(4S<*(+%K3jIo`u7q3ef667?b{FB`#<(J?js9K!_O12 zI(O}JFbRPoJlnR-@1bCjFU&!rDAN>Zp4kwVrT_bwWdDvu2sq3WEHx5qgxS1d zgN7Qx@h(iT*vZQlCB>JT7_2yLb`u7JDCTMff$Yv`7X(8H!(}_BJuKpov?uTxEjxlq z9`OjcqLUyXoSbDD1a`7g%qfadbZI-NA_|6#gQ^dlchfA70KGNyzw~r{*)Rv||G)ax z0Fn@rNG;ao3lg<}?ipdZ6w-X2!mJTVwG%vdaZ#+XV6f_>Ew%%suc!^@&C!9lhr_Pg6*3@FzAJLam%(pY^sSc?-s_whW zm1JT4UF=e(m0P7`r}V#1MxvO2eeYiJVtmAKGRytb<2C$X1^@!6sk{_Rl_AQ6z;fA~ zD~qK<usskkGQpLl$9Nv!t5RjS!t5f5HzExi1JjsBz%FqVS>hyI~?rG zh2<1W2op%<9B0kDDhZLB@gE{yYY}!n+x^#^r)S4=Mrh=Nj(P*DB{wB4uXE~O-fkbe z7ytjyWtTP?2mQYjf6TT@{R@ErG&3PuO3|uN<{M2Zvfzpd;LKF1+fbS*&wwJR9+#yG zR#3!jQMg3ZbD3D&^7aW0q)908mo0DjR5A^+d^$n+2X z-OlLT|G)XK)$;Z#0Du4i83p>O-k6-FJ@=`qUZU@-u)_NEBjQ;`~+kpro z00DIbMo$1r;JAbf9EYi$rF#%*@d?XudV67)d9Oy;y#;lXDx{9&X`oeg-1V3v@VdmT zX4t4Jjdl6FGwDGGPKC)m$CfmSghyY@ylH2t)b}wFAkrj=g5o25oyicW@)2gaG{6Ri zF)H5X>}Tjgj)*ZFX+{;sfGp95x9yfgvLhr4bN32%*&F^bYY6Fm^K%VVv|lni6MiHr^);F{GVt zYJ24rA?EYEOJpSttfn%9K(mP&3yKPs5U%l4r^t&dM*too+oI1!sa3QE7?FBIWxtZ8 zWQYJj2}pyT4k=zw8(eGgcWxPpIFj`8t9}coXiXGgoc2eGL)GBgO#l0sWcY^$3xP{R zOcl~^r1{BVBcKxP3oZ=c%aOwqX5ELH5K!*J>CDIX)il7 zjDm4y18+mjwzP4Uj*uQ&)Mcf=X8Y03XH$@)X#gTD9gHifrMptK2q0N~AB3($1cXo6 zUVR)HiHp4PP?;7WBoZBZ7@ldu<1c{HmbzsCRqBzh~L+i7F^)$f6`wj@8Evf{9WvNKSzO zh+nHcHbgEFG5<6?xq>vILRi7R86>%mXmxs6|0kpQq;N=MP#szy}=e|uiPuQslhzJLK?M(OWH8j-XPE|cJ#ni zMJ>1S#?Oo*R|}d*mX2MlRZTId)Va=iQ=O$8YjII(N^I-Nb5z^ZFuAR;dbhRlb~1bB zj1I71l2!qoa3W!c(?0adIFp|A>ECHJsscy=U;rSL>kV_B*Ae?}DdQQg``M`8Mpy630dcdT?66(ycX<+{P@IucXEKmb4j00AW@C64lP zs!A8=%bcO7J;7%w_}h>aC|f9N@XR?2V?e=Jl8!PeNK4jakpd#n0FcacFaR8%4gdR? zWaa<`eoo1QY)$}orAfVEW1Kk8oSVdATFs1&CH1wm(-~= z)O~~;aXgjtkU>$2p&_*z>$d^B>VNDVFHN{2*fq5id0vn&b`BH_Kf@F^)cPvokdQp zlq4(~V$uvMg$!m!GL(Rd&Ss6a`$eM$0-y;l z)u6=zk-99O#$d)qTEZB7^FT3SK~xi4AXa8X#$ZR0KSrD<TaAl|$P+$=(JYv~h2A`)T$EU+OJ8EFyvVcILr?%7 z2><(-Wat6~lVM2%X&x|q#EG3{hN2U&`6$h>) zz}PwzrGc%~?TUm*3WdbpI*(~#1KtxuRMi-8Y-gL7Ga?bOl9l-l0+S_BWWd7+p1L&) zVlm~x?Ttf&_P($r^htutW>;ZiysW!QCP?zwu0qP|z17F-f#nYthdQ7^yHa`gK-4aJQO z6-WrdAdJXS?`fnt?zZ5}&>aGUrXDK}vp(3AKIRl`VJ!g!YmR!|XaEoZ0#kBF;*7ho zmcl3ja>MeN<3~kSkQV!P~4b@R}7ulC!RDR2z_6 zl}7(aXOSHeqhJ9tQlugcx&tQnADs<{i4kc~lu3ie50p%t+lVAUK7}&GwG<1AEKcUV z_31+{hG`Kk$TfSY%z^8G1{w*+kJBZqkH=uAg4Vkc$tr|Q+^HG(Hwe5L}! zDV(ts)&KjLMBRX-RW?lPD;Y388C#Oc{_9B_WQt=s*Cj7t8%Jk{P^o?D~KQI6PbC6(}86{o5JUqOWE&2-_E7?t0 zf)jrY?+$VL|N8d|v=sz^01Tr{yfDE_p$eSLrVS9ZIEh$V!IF%aNbreJC=!;-F+%`C zDY7FCF_BAi3XLHca3lblt@m`qAgTtjpzx^rnj<7GRW}_jTUV4zsO~8D7CJ z(ln|$u^`~-LY2kj^|j9I>OyQ)+BhhwQ1>z0x+$3?vrMd5!b|lE+Gy2~O%a$*(9o3O zxR)M)h`vZ$xhN`+$cFy(G1gMJ1Z7yvm=vNdkgEIV|881r`N*8{mYlNuIT0uT2n0}4m7dbZ+!fVa zFM-x77dP)(N}<%e$LOdmoC?gMvAE%+Dg`F<)*OmvyD!V5H4umIPnQw-i9M5d4guzg z*qpF!8xCIH^U##0!kYK}yk(hoHqtFKtVItjh}%$|uDnU!n;rKcWQ9>mRqn2n5*wyp znSG__`SB;048K3*GzIxT6spXH7u{~BEPnl7ZK;DY1Q?bm0CGx66)NHU*t)EOSAkGJ z?6*UBdiTE;694;{Waokfmq1A~Y&H^UQ1rE<+aCpK8I=gCF<`I zXNQA`J7Qkh*nJYJb4RKG03ZMX%Z7#^W1!VA zKm;Qp8X5)%ocNINI3OXN2*J?ezD5KRb<<5Z(ZXy-GDtiv5@I{1ddEOyFYL>qW?G@E zjP00-DT~6^C~{>1*=VdkoV}@z&U!X+v=*;^w?zUg#>1A|ZDs`JRveai{U@VSygvbwp3(sqSFq#32T=blgNbQT2`%+U^3o7Iy}Cq(>Cs~O zZ=p@-**(NNO@c!+#KHw`dDw~-2^~B$FHNPr zuUu4I^GUhwqL=hAa=;7*QANZ(f7Yv3pLKwNMA=A{`~DOfUa>)^h-4g=4Z(%AFkD&S#pFf7_?ksV!1{CCKerGVq_2@H+?83 zi^A$~(dZz8MvPV^Bwck_Q~%o@FuF!J3>e)hNOw1kR=PnzTIufY?nXLA>F(|ZX^|8V zc=!FiyRQAcb8YvYb3V^qQavr4H@IQc0hmBx67DYw_T7Lf7EmkZzb=ZdwD#Ibu^G0o6@NW%5HlIL9sYPK+n z0PCHI$yGs?utkGZuS;pdaJ4;7Y$Y!rwD6vdFHd`=QL~XQ+t}C9`A;#d=|YifQQL$1 z*Zn#ifeZn)O{qW@;=Dw`{06x-J&M3+aW*{_VIC63Q&lQpuTNbJ$A+*4=n!>9-YIg zG8k>N*p~aol99J7hlhu)N{RizTH#GnG2QX(m*wHDySJV6;t{nEG+Jvdo%usuorf9W zZ6itMg5O)^{bAO>9LZ&Gq}UCD7y|%7DYJ2HiGJ>PnrNwJDfAEO;qWtviXl;Y{5D0O zbfo8z0_QG+}B!7v5T#7l04aS8ErrsCghiL5 zJ`kX3WY+Oe#q)53zznAd%VL_gAY-r^?o@%V*Y(3GD|G;FFBx9OFGGirz!JCk?>`LP zHT4dCQl?z-^z{G((JHD0Nd{}T6u0~!V?jjrPX>@Y1@x;>)BT%&8AmF~1~}Z#22NKK zaUW6yzN!kQ8b1z+Fc>P_HeEcbJ$-!pmHx(){8MPOe}R395iao}wUvF%;VzfgbOO4`L2 znPLpiA3MIja*li9Gv`$v624;i)4^ea%6*F{OJr{ybzgGLbJCTq2n$AsYc%Hg^T_|` zb_Hf}=X>dt;M%k|khG=Q((Gpl(hSRwfLVFF&Xjut^CJ?2kb(zD`U#y9;8_xUEc8Bj z*u1tp@P|C>M3XJ{qdyyJKZ6baGnkx-ldMvcEWjFS*A zNd_Xw;`dB}sX<4R?}u$=)$`ikg+#F4EH0+c>h+vmZqpgU%gL;K zS)%ALu?NI!#F=3$LtBg4k*Qdfn|OKe`+Ue!`mqOxBD#(cjAe#xpXFo~;X;Gw9fSA^ zzyV*-owdyh5+y;-;$EBZU*|em2ny;0kBD4Vw}>o9HRio=)v_9(xpME(kkX-!zkUUH z@v4V^_25=2Q|}q3(6WVlP05grr1}0^Ik1=QAIT@2Tx2b;Cwts}cujPCgD7fLa#L2D zgYN>4-9w@KMImfvM37RgF*J`l7M1;x07UuA7ZZSbApk`Ygt@Snaol+@thAZ)&z_?? zQ64lW>set=(x+Ny+A!C^gcTFCU=6iE2eEVC*s}}olz+i5LXa~L!eVWx;F^mKvx^aj~ zDX}8*JG#4&s8QKf{L0Esh2h+amdg$0oJu=`b!q!`lFy8J(;YNmwsp4Z4p}#k30Y~~ z#>F|6ND)j6OHp&sy4ZHpOSZ1!b zT%L^yx$&3u2>@{)5;GK^N;RaX0GLxH-+oq$#Xbm@0he8b?2)R5zbsgO>oVm*Dv(&g@B{L zg_&&i+b-rQtPAmp6U1FiZ`mEs(Ph7tKF{J>zTHlWZ5M5)ZLZ6MCj^S?w-8Rphe}>? zU)G1Ks%`n6c{X*gr{De%T_%VZDAVy~sZrx7k4eJwIHo`#Iy4?73tVCrHlg2ZDGfX> z4Fba64N>X1ts)qvi@I`EUJ~&A?7MVbyBdnGEIZ^~agiuT3xN{VeteUAJBHEZF4AmE zIi*e*Zy)86rm~*k#7lyT@gt}KYkW>l5&&_86`PRbjlvR{y}*S)56uWH3K#k$KMs=R zUX|oZ`?iV2r!UGVjdxDO))!38QFOjPz1vv6QdhVq*U-M<{ZtSn@+h+~q2b0?Mq49> zfHBy=8uJt@;?VsC;&%{lZ_u$vB9#AY84$g_hI3?gV|QeM_RqY#tkpO{@%OXe*N&~j@@u^k^LWjwy_Xhb;gnvC~=J~qSzQvU9(OZ z*i|5FfoiGDZs~fhLmcY9)C*V5k!&fMj6LE*)DP{&0 z4Qev@IJJEY3qu0SNu{OsNrp3p_s0kkbK+}c-&Vj2#^3H<3jMy8^eTJ2X>R)M4?9}s zs%@=}0+1&0DBFHL?^Z&yVanejbH zlBD3x2Kn%cQye)019I0JCd)6A563r@aTWC>%Eg01_1@6S0QoKPN_R+;aPc> z5OLFyNYJkb;(J#PAoY4)HbgaZYKh8VN_q@;an;uAlfg;0MwaX9reI#*WU}Id+bP(i zsTgvr=@hK=1O8KZ~j=#DeWoyDeLWf)#0J%SGo7brsF%V zG;?UiFALF#E2%`n!p3rXr)+jf%@G~2PNbIG$k4jOXh0rl>dX9x( zYs}{;yb!3Eb;1>b1ueLdHvE)6#Y^Zs@+bPkSYF9(-GauR75BuAy9GD;@69+w(k3_X z+{Zu2vR34Z*pRt3gZC@bN`I|+nqIrbLa)A<#J~Oj>x2;n5dd&U4sb}WTOT0wRbcMg zn-{GZlRG|qkA<|0jm&C-aX+)QqcQrZuN>VZPpFw%$WjHSp(H zgfq>Mwt#RuG~T^VzWtRX(0Hos= z205E2ufpwaYGCmEAyjpp5RvEyhX0ml9|w;1lFd66(sU`p!`r#YDzy?YC80A);>yso ztYzHR^&~Xvwcgv1cep9e%9Re{Qi}8A!l!af6oK$;euiftGTddVJ;LRTYjM1O<;S5s zStK+=T7X+$CK#TkGER$p;6vMhaxFXikYZX=0hj|K1fw%EfKkaXM>#h;9X2OHn8<(lISvBWNl&c9_zNpw<&-IH5#Fu> z8z!+=??mj;?G1N;B3op($6yT}Aarn)*i=9z78nt_mO&wf2{15JIx@ysQv=oevhvYL^_Ayc{g5lb-n*}J6u+=f~Tzd7v z7>!#$0j+s3Yn0-_p-Jk}dBSC`DBLdlol|5^A|>#uI9wl#crO z^AD66&rA`|U0+!FkksOI(6KH7r#ryq4x2wDhuDd{&?-lkM=|Meb9ILynldgbF-fHP za48-qSH5E7?k6lmlgxAZkk;u>=kb_q2!#{^8$YEst5ENiEKZ^t9`Hj`L+*J@RQ;J) znR&>bUzx@%?LBz(6F4v?ju9QJ8UNtCOIxx~t;>2kgn)|SeP+IaAx8Z6!HZS@yMH2k z$lXX=`=3Y$9kRUKnH_Q+y%{zJ^55_D7+1R}en!>Qau+~VZT!x47hn~!=+`GVP_O`9 zG@(sEu)2wjobVOA5Ki1@{aKqjx5c;)YyHdP1N+qf^oP>FgJ{nL}QLy}Z zP{uy@#P>;f%J*fqVQ9qeP7$FpugeBpl-aX43~RO3uUY(tolOjeuOaTgJ|qi&gp$l6 zscQY}41mSnqADy#cad&SF*{|2|*NaKI00jO)gu4|l9DC!{Fw zuacF6^QSec4jzrHmHIZyN=K?PjKB>bb;VCi7%$%!v<>YgnL|3_Ys4J3HGjp<)m++qyBL z1@AmBJyRAuQW)KtatPGlu1(yVSUQ6U`hSMQ27NZ+$Gf^ zVq-0xGe}zc2j7m)PuhDbRBMhHntGk2$AX8}r7(H)H>H3|UqL@A9Vtrj`1c9v4da65c@U%tqb@;q@t;6Oi&$aGJOI9FI43nT?T#{~4nu5=~ z%Q&U^%AHVNQ9(mdu)wCNH48>dBIv!*OvS_O0+;XG!IJoY^XD;uF2-$g9@anTFrZ&) zdSZhgXv#YQ@r27xvnseJ4Om(F5WP@%$1dOfwLf2DT9~7G^#5f`zsJwpx3|X<9Y7%L$4LuD3|KLrIFD(E&uzBHj9e}Wo7B`!j$LaQ zE@88qia%Ocb6{rCk6w;ZF^t0FQ3#oCQ&&{2%A*|~xq?PHi=!ous@%%zFtfH-6j{K< zPAoT%e=eb(0>cV|cW0-Zs{zUraBxr&AH25=2l^OT93a{<%awuw03^ zL3|`BWQCchJ+f!jwNNqX;8rxNvH}3W&;}r=lL$Qi>2XEM(iT?|)A{mVU$%PwJmds; z(bEGG(`Mpb8)IEvkb&G>>wM&$$$pSy)O_2}-Qf&vZU{ zSvAOP5W1AMOrm$rSLBM$P)ytA=+X62o{pr?tT+0<9s&YkK3P;K_uFF3cwyO*hEWi} z-->zx4b{C}%7d=yOnfT0yF~$GA)aZyyNaB9N86Sk|8sXU-ssTI{%0(e*fM5ohsw1$Xy(3our$y|0zv9*&i zaU>xeU6uf>g5(rwZ_~K;YnG1Bx!dorcnCO%Nw)7_}GmyuM=umixS(#Au+_@h;*bxMi-RR(U_IV_0;L zqL|LH^kSoH$?ze@QPE|ss>!Y2df=Xv0yz{58wx_Lc|MA4ie! z71)tud1Qwzh$>Pe>_~%^ZuuuXv*N^QAZ(^!StI&gkfm7^+(hFQk3H!vH!B_3orMz6 z3!5_YbYU!#F+b%Y6lEbrAeP;~T4~&Y{HMJc1cBUTB{oJb{r#_eeN*P5Nn!C0uos2VRvAsQTcRG=Yy*kq#@Lbhr3M~ca z1E~Wt8YtEGY^)E?C6G`80OIV#z-|L``Xu1QjVd)xD(Y;bC3Vf|2ri)+dCg}FbA2y3 zo!ln&Qq5pEi8h)(mR*?;aII#TpBnD=E!fSL5Fo=Rpzote3)KC0DjWagVT^mhT5q7eqBMa)4A& ziWxo0D!KXSvqu=~W5J{-ZMJzWm1a{i=*XVi!&^kGu&=i8@6yj0?E$MK`nrl2>T6i2Ll6s!thWzNj8c^Q$bDKi{vL$-j{N;b~giasT711q1+?b55(zEhfJ~Ts7$w zG07n8DNJg5JRm@r=cTIG%ONogS^(Sq;)j0gy%m1qZh(cat{F_({WpK!f)I5y6~|%y zO|FBZSz;&&1Z*#`{1#|$MhZRgZCt|j&&zR=KjXxayOo!pH9fvdmy1PF-2{ zXPK|d8y6q_lr>|%*!%5$6$D6|$_79*0?7+)ngxu?Y`wI3_4Ya32T;YewNhRCF$Fpm zT1#kayO6?mh>mUNwKQ2TDLLd7JYt|!$@Lj&Mgm17b#A1#^O5b?9Y=ER_v!6E_Uu!?fY3Ddd0EK8 z_G`M^5s?U9yPwOP&@uoCc3ebj#J6`0d{WK`*~`sohH`ascgQ!cc3ECngJ7ARi^UZQvCM( zdv0j>djFDF{iW#R+Y1;Vz8mmC`R%8R;SF{mC66z~2^0r6B-W*)7<(R6pMGe_WD{1| z+h9vZEjbD;h>xB?Qar!IbtTi2ex&(#s)ChGi)#36dE_&EH#u@t{7Eo3o080}xF@-w zg;snA$w|GT5Hwt9b_LKwN~>;8fFw$vuXQlQ%CYi zNTI)x;H^vdU+u@uJ8eJ&5)J^+6*5TnK2y}$rZ~V&DnfJd7j`#70FIdqdRc$>%RSx>s_Cj+G=hsinV>V?C3TV5vGD`QuP zkDFu8=TnqS?NAL})JCC4d{yQjM9((7RgIOMj8W3gRnjFkKu^Oao*LKt(A!kIL^YPH zXmgS8G0P)b7ers!Hr=$R$WV|=@UPG}X4O>sI74;=#EB%)0wXZqss3TTN>TW5!x~ck z5kc9SutOhJMK(-HPD>#$e7ms@WA)gWvHe!3qmBH%jUY%;UViC)ic(ru^k8g;99(QW+ru^-VZG!AfrZ!S+bmtiI+vTKsJrq)z@}XMy5Z-;{*=_ zI$M!wYu$ccJLYF5G|jR~W$!Boa*rbA_G#7~E51i&8|%!M8$p@XwKa{ppM82i8e_a}Hd zB{SuHdddRp1DJ#tMUOC5hB+3p5*dkWbu~e4qUrQxSWZeLwP+9!l|CssVJIByimuM8 zQU_uGg#=le%Q{v{fm=U+*?#-3g-)xDabS2pwg?ah1K4^gAXC@M+mi)#*&-2yub<0o%XMl73b6jsqh~=1z<{ZK`npZFKWt zj_9sC=+QKrv_DFt;53$Uwzt_#cPpN8D0bZU=$9(zN{tdq2!3wjgJQOOe6l~Dv#wNC z;+kB=W)0Fcwo{0!G4625+CJCrxA$Fce{;V-f@MFF-x6{WpdgKxypEG?SVH1iZIn3y zAawxT4ab+SzHgB0O-LekONl91qe{c4@DNjf zXHOc*0HrV{F9zEVCtEDYu|faX^B%YMD}%-SglZM3<~d4dwX@o!(D>?us(O<#bbi+p z_v1*)UyYJUwLjvIl9eb)*3y1nf3g?;Yxrs*0odF#=FBama4mHrG1LmdxAetptm7$F|k0>1~Oy9=>V9hBbm|}I7Ld9 z!hKk9>EKGRP6Y2)RM_-XnRCOJ()lZyBwWV;l3)K|H#jlW-M-`1YF7BjIjmnhcVYZ(8$Q znS!g2kN1}8f6nkPwmj^=d2~+l)HJYmb|=~mw5X0%E$TW>IZ|C%tUVaK*Sj=tom>CX z(j_#%kSb%>rB>ZU?T|kVQb$tR790`XPz4eLO+!CE&No#rW5v&F;{02+LPZihOuv4Y zjqoB|^G<1R0wmazd?~xa!WZv_&zKCo4etcghFY6hStq1Dq9T4~>>c~{Ta6fvky&GD zG3(bHf7;u_;@uZF8<^VwYte}Qkqf^eHdMbv>$Lv(0dD7`30dh<)LEShlgmcy{d zcHAw#qea4eGHei08uLDKyA zAAU9fz-bqyaaMwQLps|HVY5cOmF=1_foJ)W8!)T4j%H{aC0?Y? zwzc0FzPbr+lFY9^99eJUUAA&WCs1;k= zm=}%NJH>aaKj+4O_U6fMA2S0^Ktk7yKlEVFue=7(4uq%$T{w;6le8XHJGUcw0#y6> ze`VTlO=RYASoCU9Xrtlt2xV9jy%%l@GjrHA93p`RwD`Zu)sEU7z8_*CuX-vi#T!>q zsK%z(hbvAHe)hHIv#KSF7y8rui%QVb;HSUxmj>>KT?q_RR@Nyi^iCAqO9F=p9}$U- zT67Ql^oLsP({oJoTDX@@*W4?`a_IS+!}b6{`D8KCn^wLZ#relKFeuU_(A5TQ*fw5S z#wzYV{G0&jE<;r3QQ1t4kTWZV`^pFezX(ilYK%%n!}{OrXA%zV4GW9J02m($`J4Gg zQmW=xlrB%L$@w27w%kpbaBQwn#4Gt2i|bk3-#)#Xd%iY?Q11&Dhj@R@nSPCb64}0X z2Ly=&kk?qtIPZScOUW@hBqd%|d$%X!%*HvjEha5in|*4Ah}EK@>e+KK6m;NJ)k1wcH`5mE@sWeAt2Q9`DZ*8~<5K+wLmmyp_C zC*2~<*v~LSBL;CKl+N0liAt*QoqMGW%Z1RBM;w`P=b4<39=|J5?jk<;*=a6ZzUC%&!T>o_w#ey z(CN*|kL-wGNb^VrV^w695k&QtpCg3y0Tp)OcZC3nva#(EAV#?1LPi4;72C4L0YFSnMR&pA>*jEDkms@p=)IqJrCu zZHz4G*ON|j&QVmE=4Esqb)3ixAY-Y-gYX^r6=oXYFU>;q6yDqwJLPvB-$n7Q0#B;a9TMn_T#_ zxklGE)ktA+Fu|CCkClV-`kIw%~V- zA}#<}y}_C=2z?Z?ep-i} zDK}fnOrD)o2<6Ex$=_Yi)?(z_Zx3;FB!MqxF&bx|b~5GfGXJPOZmc_fEImn2WV`I< z6^w!>QOXMnk)p?;C--zUUg|L4I%<4*HWeP-HURX40c2}-Pr(G5ft{@byL^>wL)qj| zXA_j5v-kekqtYC56f2gO9w&&ghF1q+`aUC(j5$AoTs_Ws0_n1!KSQk}OD>_blS8}h zX7ZM=_@Rn~DGiGm1;s*`9ok#j`5O^Os+PF_=;%T;#J;anWw?kZYc>>Q?)^eWp(G|Q zsLdiIAn-z>b%gT6MDQTkJp`3`0M*O{vBsN(R1ULs;1_iSeHIxax#FVZZ5pUlLVa-F z7cZ&Dc_2O{aPx-)F9o&3zMeq)_Kb8%OLI2abq&|IM%*%-j=DGd3oC}#ldacJb(fUm zE}u&g3IQdG0?zdEc(|e;C6G#_96xok==3`kLO?x0fL6?-pfzr6l9*i? zw9``++EAVg7;7hBRht$8phS!x&Sa7d{dun@AZM`v;vgyu*D#mg8O1r|!i?3tu#s1k z?_4G3Z_5YGNvbXhh6DkEgQ#0d)J!UUNW&8qjNp;@8F>eK8pojbQRMpY+0lYEdRbi3 zcw~$^?(eXk5JI)#LcpjxDMoG+b`FJadM>v`+$+AX^F5R%oD7utF0e#|S4l6oRMg>g z$?tfL6zpq_s-FRhfoa@x^l6cQ!-^ zORk(?C|?U$m=cRDcnzLs6to@Y^fW2*`jS1&(o6yOGGHCb9+`;DIv z#s|Z>?=|t!G9ir;<-kt{(K;JJC*Y`0)WFdi1}k;6rFgk4cVQ;|F{Z6uxuuj}^)A1C zW;Sf#P>u%mpx7h_OA?f#QFsH_y}m6oam#DpdZ)yGzC6f!j)bY9B4AOlDnYmU0$|12 zo>XEhs}pv2Jp?8KSY+^>+y?!9jY*A36U~QG1v5858Dbb?&1wBrnLp`eIaw{2ZQh#O z(VvF%gI}2@OJI2sS8@En4IrB?ri%80$2$pWsNmj9rtxqQO+u12^$ssHDr-rZ!a-f? z@{~KK+Gl+zawsQ-Wv{(N>s!)2QRk6xrwym+W`L45 z(K6*{r+)fZK;F44_ahB&;=vbpp}SfVjoeyho9cW0S>^{7qC~@P$hrYvn_=>l<@Ohj z+&kk-aTA0JKra}$_sA2+f|#i)2xKX1OtQGzlL%A2Dp55itZOG?2mF)q2s2`3y z1l=Vcwj{|I*-51Rho6feh=ka9GK_!Eav*WA-H%Ym8Re_N&>MZjrDsZyt;8D>CD-Ex z0q&N<(>CEko(P19#a9m)!ASh(*Z&yw*8e<@G~g?3|6KN=(*4$2f3VT(@-3n&z5JQ= z>EG|4E?F;MH2r?0IMo4QWVpdrNS)?_;LijcI@39xc6!F0@v-`FJBYI+#6t$ZaNMl(m_c+r5B|wmNRUtwKNdvL^kHn45lhu2{O4NCe;RLX6*5&QT`c@kB~*vAH8pG&2wygZpDqD3OTsrnfIbHB#Bz8X~o=50J| z^GRXTBC9r)g)j`@u^ZB{_Ek(B-= zK;-nE_B{04wUR8IY5P=lU(v7kHJ`|kvtwz^IWoomR8~B9Mr}w0Hul_&(!vK$%F`m0 zs-Hbuv^tW*ER;zVLKrDiKwJl#L=2OJ;tTRIP*U%~@>+EcD>Vq-j5b@rV2x~b0V%iZb@mBd`>|+iO+5nwxZgCO zXTjGgk@1E(niUT=~%Ru-s>$)ya#B6 z{ef2F=rhU|`vSxd76EX#@tX9PX zxCFycJuuu&Hv)rHAJnyG^+Q}pJEJKwUBxIH8f2oyhiS75^y8>mCcNwJ$tG+fSyh3u z4u#j=YP+mT18XjM|HDrJgjY#v9;Scx41M&pr7gl?UFAG4@lbe4Kh!VJtjOkXLVt8p zN0c-(`5YXdPi~3%1%;b2H4wLDEr~_texoq0gP;P}Hm~O~r2dB4WB;@iW{s7U)Y+Rr z98XRGLn_JL0L5@)iGm@~3yYb=qcl6}`jSU(6gNUF-c+*gyyyH{9Mp~rgSp&{?w`Z4 zFfl}L=j;0QW$xHZQ#lnz8t2l)t93A@s08>BtRF0KoO6lVXG$sbHA-8Js|Ks4k`@J5 zAF5b}?POwhIN2H1mluq7kF|+FiEQ}nnh7;kK5qN&S$pys>AK)4!Kn9(HAi9;bx++Z z)h$Nj%VKvu$Eo?zpI)EKPu_kf&m^3;N|K@GPy_IjlR`*4$lN35#F;cAmaacALks^tV0v{H~8EgV(d>i+~bG-MT1<3?|pDo;C}o$0%ISx z9-SC#VbM26zYLykC^dpiLLj5mTr%7p65*nLqEbd`Fa%BhwAe9|L^D!Fu&f{P6KKdz z(XhI@rasNAessSiO$<*)nl*w=t`ipjzV-Iqz{9WY1o$u>RBEZX?MKvNX1ctN;0z2@_FiF;7@Vzol5;-k?p?$aK7G zko!>d=!0>jf456ZY+~9dXhkceDe4%Q3#PJF<8Qn+&M4iSwm{ltJGbQ)VzK><94#!2 zCt80AUxNIZbO6MxrFZ7$)tK<3^MQSKQZhR9geUSN*S5y;Aprmh9v=W`&=AZ@>llbr zs|G+p4$Un3@i7<+NjP9u zYt!KAoSC{KDZ64KiluDZ+P7t?<+~S(33U$~lpOrMFfHXxqgn{)mLa_HjoybhLMDi` z3o*%;L70|}3Mi)KIUc*4qDpw1II8hBfo3f96}R?v`^(l_=If;-ZBQRvFd_-K_>jb^ zgZtgNwxc``MSz*ZWX5f43IBn$q7K8O9F zZA>>k5KypscN(M)5d2^N`!)dLs5G`OPY5ftcV4-{sBB{> zKmZN^{R;rVhz3s_z+!6Gqd7BI!q7MiVlfRI&)`ddBBEuF@k-)Cyh6tez$h4mx_xl_ zLAsLRFQcjB=5TPhaZY&#e|d<*+<&XBm+aCvnxdR0z&EdzGW9GppU7vK3#SdY+<8P~ z9<;vh*{`SCQ+>BvgvU4}Q^p2sC^YP*bBl1$VoU>41dBGM9uSwr=YER3a} zL>}BaPGUU^W%KI+0BUgn9V=64-9bW}J;)WSj;zrz;p+)%XzQ}s#gs;$`cWdDh;zPV zFf0LWsKFt~conLJHj1UY@oyJ^EWJ^1K`a?rs2xcnvZRQ$u$eceJR)9e;qjpA*w68) z@Skk!507Td@0o+7q768Ul{W_ul_&7lRQ`tEBTur4#1d$X#N7+(tr zK(wr(hN$pz;$3FRM3<+)FWFzxI)@KZkQG$Y-$o$yPkyZea%vk)gw_j66$h+jrtt8- z{4fs$$m|d~(TarRs#Hxa79=diTTJ#6q9#yFiDss*XyLF&X(g-lmeR_DbCscCZXl?8 z+*DkGZf>hy6U*XOu<*a+YMrD;!YG9b%MT7|pZ$GV(~nA-p7`@G?W^^B#~Sv@5(~oU zO7^gd5TuEQnf7*MKW_gwf35=%rQHFe=g$C+XwdJ0+?jBfXk1-<7o zvg{Q92<<@P6Y$>|Bqin-TKA*7LDD)N90{qbQ-$^X$>>t6C0^rje2pLk=FeLBZ$}hw zZ^h+@%*%fwHILZ4uE#_SWW#APHV;YT_3o9uVN8RGjCbZ(oA~g)h zQrbr>PE=F^bOh)Kop+f(YgQssj+rNLg-2OJTf%3*PmbrT&$yvTmuGF7_sZ-gmt?gm zx9jOC$UcDVN;{-6BdaxoO=Q#AJLedY@a^l40X?7!iUf$=Ori!nO*R-~5%rTjmz8=U z+vH8>al6x+4XTS(Q$6#JMo`U^kDl+8UcskH<)pdZ?gk~cr13rOy!mkp$Zcu`5avkz zE=r^sjsBtBCo<emy~iLVj?S8uhjfn zO@A|ON#NPRI~3%dsKGfj0QcZxt?_3O=BPx`Qe>>h+nwhyf8&*CG!fuBSyjIFL`X$2 z-c-C!1%`Z2svI?PK`tct@A>Bf0pct?TCK(x5`mN{YfIaR2V7;qG@0qceSdft7!78z zjCD6nIqi11|My$}Q@WtWi9|+qRq_fp@9$Q*V%5ZTraRX=1*$jER56*eIcg=9vCr+0 ziPuTdjp)|6PIrIfq}-bP@}ECmNNdX7)^u6pg#Lw@*Rt1J0>!~_medx3a1jV&YvTG$ z8;DpnbWnTt@V(1{{co}lb0o9uHvt<4>wCx9aNI1-_59~Z$R>#$RUKu(F@~|VmZ<~` z>!k%k(LKfkeOwYc3E8dH{T`&UK1r$F7Ch1QDWYupGzL=tEIUb|5m~c4;nG3r9GaP^ zWEY_}Mtw$WQ!C4@q>O5QE?jh6a&ZVfU30o|=tVIrQE`2T%#CK#&)G}4;F>L<$OAQY z)RZk}9DalomsohlF(O8-Q7L&|b)pd{#znm+FvFN*fve~MT3TBHv4j#Uq+@fj|6+1~ zd;9zQx94SNo_Z!a9P!`lR$~2#lf*D!I`MHprcDH+zyKJvTer+k5sRIY#85G(uTPVh z8;&Y^N?*o#U@PDVQsI3m#Q9Vmg%FO^lfWbRl@3cd(osNVaz1>=D+a-LMwPw`yFE@O zO?pivuCKi`{1=;&H(hRoluOLMjRPK-OqNnI#TZUsA=?qid8|$i?-mpoIU#A9SD`8pa@cPtFs zn{}CU%Z<#N22nL&;eh!7CIK3VZ5Dg|!1RPh3kki<8)=4F13fHWQ}f9K(U3g>93zdK z&G_E-MLAkj&oAlN6hm+t=TWi5X1O+%vY*bjRU=e3Bv1<%$==>z@xQO*Z`ZajM)nGz zWHH7)s%d5*Y=4bYVvhje@kCvO$P1^C2NON}9GL~6|6cGJ|M3h+jvmcrG^&C^ER45u z{7#P3`X89eJj^-l8<>9*S7RAwrgYM9!}_XHW>Sy`9oE*C5^DYCI>_bL-EqMB$jjHz z?{rPIu8k|Ryw=Zrz|X1&2~V>ebf*d38bXBeM{i8Ol~C2rTAe3;K8pMurVSM!2Y*#{ z-5)Tn3al&r(>H*^&FSSqIKu10t*DgZf{cQt2*(?fW6O-1V?2Cv6<z?Z6ffR4?S$Ps;g-=mt`cA*Y#h@MkaOyoXvI^e&wgR=f@CDBRUg8B$< zX^yD_-^H@Vrqw9hjT7U$9^DCgLAYAX@(@CBMyGO^8(e0U>BJc5yCUo*qh-ahv3B`7zNBZL z+*QJ3E)k+LH{``R)M%V)_A&e&-GBI5hDWs4pM>oX#dZ2S#Y`h)aZL#XS&2P*?bGeK z!ZUu{Gn>ZBP}bUWrJJm0YdKCJ*V(b}nK!-Rc;Z^heDckI^>}-oybADlWJ}NL+wJr2 zoWhvX{P=;u+1Q3eV%OiHbTtYQWdID+pc9lsW)xDvH{_Q^Lc=MyP@Yihlv|vn=Jrj#cN6czqRsBH60`Ra6i)h}~IIF7rgyhI_ zI^p`rh<>{FOaT?zOv6k6z81=)aZytU<_RIOOYuxlC8QJXvX*}HD9KxdWP#veD%K1T zGjCv^u|S`Hf4G(uo`d8Bx+1zRn?mAD!-Cw23yEp0=Z#elH1Q2+yLmr;a~8CGf6> zAp=#ocB9?MMhu_6IoQ0f+wvkV|1z}r12Ie^Rf5SZS%p~-Ov+6#+5qg!Yc;FR!Z-+{ zX7B{1(x*sa!MaaRX1L`gK-YUkrV03jvYcNY`9d{6bjS=dF6jk*Ra7B>SN* zD%ni&6ZC6cVP6qLu!eD27A@}@Tv>b5mr5LJ>wpMA0Dve0jZQm^j5c$%Sc8{nEttv zkp542tbhSi(V5WmH~1l6#hymkT?Io_ZMPj@fB}XMhji%f?(XiE?vM@@7`nT=Q@TSsq`O19Lj@EOfjjSa z|H3)z>}Nmgtaugu>-}m9FYpGzSswsbx9S^2YAvcaiso)`*TV7}ahZi< zo<>~}%d|!ncK1j}E=Bwy8n(WjWF(5Mk-9tBKGbDa!UiYvCTAXR0N_qaJGA#keH)W5Fsbkfe< zN%tLL@Jby#;AWO@KblKOI>^Vd8=}ABx1Yimsx_ONAMfh>BOn)fl$XnQ=;*8@MSGOp z1{{`fR~2Dg5zLI1CGbXCU*x*p%>pb!&D@j&cO_WrwR8bbrS9a4{NP07krtq)k5LRx z<|`@Q8yZ?1Ryr><#G5ff(>l3=%j~#Rd2wRKPp=&eD`V zunJ$_eow_LmRwrUW*FrcXz(KTm!LB*ARZhJ+xos@4K%?UtdM*Mh7rld6T7i)iQvW5FB*m&N2#QT=wWW<@e z_sUSM%sf&$t{`w?8CNzjJ~OfB0*X#L`B;F4{~vz7hXA)Fl&x5Jq0W)nGw}Wt>}(`% z78H}tc}^E`i%mWD9Jxx2L@ese}GEY%4U!XsG_jNG<$nRI*2 zN<9om?a7FqcVpP}TZA>DIuB5}7$r?P^txBohSAoTWZ$IMaUx(QV8mLT{V9%fRkaTy zNq3yEX>k^Aj*mu5ej_X6Di~56(i)>;p>$ZEjbi@>wAJvus1ESlfJI1q_V?05F{g0A#Wn8%(M3@fD}Mip#8u zwV}CFH!G=1xM1aQSWe=ix^V<5^3WS~y|Q#Xs=u@RSa?{qDRPT0H1{V{$Qf)Oes336JZ*j6gO|fFl*;YyA<~aQ$MdUiNhZo`ZyTb zLQPi5N?GCO;7lnWV{KX0P;5(O>1m8;yBCAkN!x>xM{MCPpr93`%Ns;2O0{!yE z^ICUzJk(YJVQ|wp`gBp)3>UyscZ8BYbw*xTnT%4vDV^+Uv^s#wVoB~enemLFf5ojl zbBq=}{BVV3@ShFdRRr=l+H^)UCJ+ z`RuGTJ~0x4Hs=voNx=CM(1yyivYtT|88@u!!d=D@1^s7=@=9L!_yL#Rv(&%u=Y=!T z9Wzt8zzS*HJN69k=ikhFi3fpvvP&CLS^Hi1W(kh28uQa>bst`JMww(TuxQmyufI6e z3JkhlWTf2~)p_$VX85ieDA{X-0D_&nhgoWWMU%UO_z^S))(Og2rX}2urVMT>&a35P zdk8}gleWKfr@NJfYpTZ}u(sLS4*C(i-LfgK#YMUPAOCy>puVJ=Isc>Fm{`$e?31q-6st*`l2W*s$AMTcEn~E zt{*6hNKFsRO@Y?%a@{h(LVHD$9E1u0^tUM>Oa+MPT)z^S#IXjt2mp|5qzEB}ThFdE z7dsAGq9kc(Rb=^JKXuVo%n(nfh#3m)O*cc+-WdiGonDru{r0$gj$iS_uQo@H9z#iM zCE+dzH;nlGRp83*N0b*oLb72nuq(5wwqWbGtYSinu3XudI>s1SHZu-J^BxZsJ*EsSDf07w$>IP&y6rBPD810L ze&ERZW|4N3Dot**SkXknCKtK&f*IqE6O#Z)$b-edlMwINbw)bI-ZjE)P0OU28!dXq zigAc8?O_MIOfehF^cM+^h>j*w*w_Nyy#|H(No_LTI9#;;Xn)|QONX4p%l7AMWgWJ# zMF@P8kW_0f1s;d9hlTiy%RiQ-OXntmwE0Io4)yJhmd}H#z#ljo(UMc2YKSEievf`) zYVStPGHTmv<+~E-Tk#+uG6hHDW-+(AFA_^wMV%8=rvEp8PGLZDdbs|O7Y37`_~a1U zjMX_PTubAHNK{CBo`FUD%J48layo^SUbdDkQkMU{(b7;H`|0`l?+^W(T|HG1ufg1o z^ok$oNXd(wf+B2(7s|r`Lghun?NFeT++ufJb$+efo*>u-%|Jn@oX0w;D+_U%XMJPx zGpnajGAhfki%)>=0`huTzjr-5)t2H_-@=AYd+;|cx`_-S^9jWY78vXk&6Miqn!4n; zj;jWj$c7XHqo8NUXZKA=-8ctaYC0VweSmv3);Pi;@_UGKkTLGhXPqaU+qM<+x60CU zs7a}oRJxd{A3G|J!Xy!60D!V>1DUt!d_T{o{)lDs<}Rfi3~O0?(Wn1S>xuQKYNC&$ zR${;@wG<)gDBB58oMrLsk(N{UdOltAXRpQ7M#F^^e@Bf3WgHBq1phq7`h!Nj$WfGT zyWBW!N8si_+<`3LuPSH4G#rw05-wZNS?JT;M7|GBDmtsmIrhtpit3Uz7;arJ>SBnR zfMF|&+A^G{BMu#EcAc3taY^hg{H7=>i7ZG@W@%xHTj;RqeKQy2F`W<=RXTcCJCe5_ zJ;gJ1NFh;UA|iJJ9=>)5OfYsUhVYS^zk3(dzsqF!2kB=V9s%uTpAvdF_d}lW6tr=8;P@3 zOx35d+{nBL+A7O`=&L%@ggg{$tP@2Y@w`+&caB4u?7e!)B0?aV62o8Q@vw1%`eK}$R-Jw68+!4_<-*ld_tm{V?zS&JXSde%_f4l>OZn%> zvM z5g^pbPH7MD+>bSotbT?`K zwBR_tDD`Pp;!gRU&nO4+o2|v-x`c=Zyie4-TU4Q$1XqE!Bk8Q{WR=U@Q+JueqOw}M>u)z3o^U;v`2I9&O*t8OkgCHqGuftxY(a8@hZ z(iUN<8reDx1zU;=b-fQ}4nAClDJF z9pVK(W&LbR#EfFCck!5NX#drOpfNL+0k`QK^GsqEQ?8}Dp~^u3D<8C@A|*az_hVuK zRW8na->zGl3CUW9@SZTM+UjALfJr}MB1rfi##s6Eey_tfVrLKZ$U66-zq6$HMB2GL zw?m7(80!yrNof_N+3{G?bA8nHp03{hV>his)se_%Vyr;?(R6hCi^#4`NwgLO`91UB zi$3cCMEEBMR+?wh#PiP_RchAyT5kiWn1b)7iM;(!GFia>^}St4FI?-b^*m>$=Y8Ntx4 zGAT+xDa%{;NxhEsqaoi}&AUroLnS#&@>o8OV9)@7)Cb6fDu?B=YkEY26mFx5@>twS zs}V9iFf06J?R{NK*}mXAACfJ_Q4jz{KooW%>XsaipMPAnocvNT#njHFi_(Mp#XY5RfSGi&%GmH;K_Hf>z*J1DK}1@Bhx0q znun= z7=5&&MQAhLNDm{+IjfQ|@6keXtRd#_+H1N+R20TiK|L%bg6bHz0~qsL8v+Iq`>$3( zKLhU)I%0b^=7g=O!hiJiB)jFCE+)2zy-Z$gU7=y;d+B6vG#?1h1z?kgz!S0A7N_bQ zJ}5O1kzrFi>F)4ahK0*VZ`rF~^Rh1B3`0lPb0XjLN|i_h6R_h|O(WmIiipbJiH6lc zSISLx-u)9r1db+iyv_Sc7Vp=k&+E1HMIvs}vsq<~$9%mfEyrWE0M8}%{K7T&tTnf$ zPdOmF;I}kOrHy|L>h~$8fn@2>4$BM zl1RPR>mtlG@4U*cypwphp`AUIRV_Q;u5?bqxZkhk)o!hinB=dlDrHpSSt?@`;3nui z17@lp#GwcrZP)c1`Krq}qm*Lm`;}tgXf*=>h(rKDV`Q40m$>TjWn=(86R(J}XCNBe zv0;6l`W|ILS*(22b(@u{m(dy@wxRpU3=6SBq}&lSu2(S4_qRc42 zQYuk_jh_Af<(O`wnTlFI+XiC=p6@4qZHEWOUovZ`W;8DsfyBG-9*s!2>1 z^uVHA6xTGbY@Jjf1oO*N0bxmtYb%AK3vUbuv;GpHAI)gHo`BO+roPD?{&#-;3Iz_k zPgp4uHin-#sMs7j5|r;y)s^`L{}Hs~%|O>dKn!s&-S+Q?7?ot0oZ2YNc$k-bPT4)iu?6tY;>@ zI8-mV974<88qj5VHBw5dUu*VV8606UtFc#0Gn@q9S&2xWc?)5parg{FSlwgEUj222hj$N zc^{%Ba?$54+52)7i4dk_S2*r0@U`PRjPE}(#5%20y*}~}z;Aps7FGn0BoVq} zNhm^=@?7cTeQLxiDk-ww=&c&T8AjQ>r#AS5B~;W2VZ#6_bCR%bVilq9Eesl9&HP*i z+8N3mXuhSK!6(J+@fNS4UM~8TyWyspnrK@B3E*r=uc8&al55L>U`o>SeLY(5QdHV` z0*ECaM?wlK*Mb{SK8xLX_^I`MC4K*Ix%=SHV@_OZqBNVln69=IvPC_1A6F-~9##|Q z@|T_qeSi0-KRJo}{CYXPd;RLgnVT_#W8HBsAbmKTBy#rk%j?6;hVU>wD@=g_@E?8- zU_fqfWh*2iE}P+;x1tfEBqfya`}?|LQH3qd&sY$hyu}SiWbF>U^&cmO6gy(KkW_?V z8w@M=i1%TFlH~hIQ#eJTcHgUN)v8yIGg3H5+I_KBe_0r57;Ytdyj!i8)p4$?5&BZl zMk{DG`P~2V_tVx7TG9r_+d1HKKio3=dPiEe0?>586N4#LaNzA4W7VqN-Y6J4S!sZd zco`F?itYv4vQ+vfrz56u zoDjo})UZNy{O<^8gaV+7iKSkFqW&frHBG6f30#`(`S#IFSoVdEcEmk3W1o-rgIIIo zg4jipC-Xuuwq=y`spl%a!$CLXrzBovPA5^c>jz0j9 z7z7N7s;cW(zWzXY-UW)kE(E=f=arovRR%2q5aD6MS*}tNL*ohwb~mY?TZP@#i*#Fz zV-YLei$kD4HjiSutORn%qb0XQ6CNC7Sb0?ZXe;p>H$3D8GsM4g0S9}A-d)m@f z)NuZMI(O6UGyQC~K||MRUlZ!5K%h9K6R+!5S9d=-ss}tnsv~{_>q(xL|wSq9t_r zsc6x)xpWKnp~a9ihzOt$GeP(UiANiIN-3luxZpp$}r(t5}{>7*yP@VQ!)qup2*`UNu)NoXb5OC)YW;be;kmkr%B}` zVkr#425)P|FU}<%tM`9EtA@fNLqeMydB{qp43=5ra9yk=lA>8$m5N8Ge*+;!Nclv~9=I>*4h`623)#bAuIEFW^z zTF*HYiE4p)#?RA8Y~&8KE_?c1Pf>y{Mw$>G+t2kSXb=z&A^?X2!FvED>){rM!3u?` zcP~;?Pn*;`7LlSy*&|nORL|SO#@!&d4s(+(Zdod)7#%9ZWFrlnKDb7M*XwPewA)_u zpo^-oiuq`M$=$E^*X#kwqYn$$p8u#h)of=p?9q-n)3-eg1kb(%&e!4Tjndz5Y9O?{EC|n57x&C$A|5RKrgN_UUo8xL6v{H1#76(|W^L#b`IsYV zn2pidnd6>cqW+G;KM7~K;|!$wB#Vs9NMyT8QLGRZd&kmGoq}m&jz7dHqrs*IsPk44FEC>2H`6#&2i@(J@_#|lP8ksp%TUok=cw~szJL_no)6yJb) z{;d)%RXbxhQ7&_6%98@k>;71dYM!28spl9HF4X%Xi5mg1x+;3EmR0Q)W2uU#omgGID_5e72v&H(NpK{+J1 zASMQAv47f((WOQr5s;DE9>Q;}Zo{tCV|)*2=>kZ4cq449OJ$)&L@L*KqNRp3@?>DN zBxd}$3|iVEig$W~Hv}N`?6?-XddbW!hZaf>)pKfw4A+vx>0zpKlI(=uuf7w3k8#er zuN7u_GhROsXwy=N6fXHo+JsjMx8C)nexQH>p#X$B0}|OE+=+TC{_7bE-6pdtUyCyH zz$jm(_c7h3Y!{Lm=Kn%bapreaYb*>TBWlV@S8RM&N_z$9yo|g2yB;+?{of67-rNK$0Dyx8&!I18ohsb#pRPjeTgxq=Mh4@^N?4yFPLtH7 z&x1>8PCUtzqDQ>To)FPk8q4|Srn&I(kC#OSntW`H4hZO4UWBW3R`3`K5Ej7_vRRdM zR-lloO4jaoru-}y+U#T8`qS&SIsK?v7M!?|=%XkYdRI$SymMU_#@ZUyYvi9mCs-da zd}uEis3_hVfZ?Q%qFPOk8kucowTCp9pU#qah(CjprS0aYK)H?iza^I;h+4}I=~yf( zaqceC)Iir26}immMhTF@-WhCd01d(26p_1mM%~o%lx?D~#%5FFPU5VaB@yBvePQF$F&W&$T|qj+!WMCL+tEK6Ut|COEoZbjbYE%KJt>(en-4$99d*L_# zSH?n*vOq|!Ld!Ci9SyT5Fyto3TjMKI3+vq)?sSt-ZFG{rl}blKFMBvlG7t71KW1#K z=q7^WN9HL47%r-mip}*So=x$C4Yi+oIVn2}hdUx-Ub$jg0u(*eJBMaI`)^1Yo-*Ec z-K@Ni>~QW;qTbZIvaxczByvI!^gLbGZ2)1dG5`QIQio;F zFMiZT7?4p=p(yLtrKBOjh%SZAX7Z!BQ9Rk~r}tMJXX6~UsN~3^Xoiu`;|81W2aQTR zzHq)Puk<*5cTF&7HpT$*R=3|0WcsJ-^kjW6`owK5G`A@n$l|?Q@hySl-=_#h6t|&7 zTL6h*hQ>_tbo&NofkV?$pC+cq`_os4eQsv!ce?w0sU>7i=Y+rsc(jb-)MOPckAKP+ zC7j(BZV?koM457Hs{kR2EQZ2sHyM#R^7Scmk3S84F{qo__)_Fjt%0HDizg z&^HitN~rC(94R@F)ByPkwO%esCIb>P?`5!@bhevQJ9m)#MwN2&&u8Zv-v5RXybm_ni5i6J=;Zc137ZNWb z5AKDa0+4Ab%ASmr$bL)PrZm~b(F~+Tb8%?O8(WznZASg^9XaEobT%TU@%L~4z=S0^ zwoRJ1^>h2b`STP(Yhb1j>P$Z1E%%rM*Z<&Oe`REa7)8%wRX9YJ{fIkSp;CbefP&+K zhXSqMG?Q!*%Gx4Il3J^Nq(&mJ$U!7om(P>#ln>W z@K!xW{)J&)B%7nqiV6l6o-48HdjK+zvNkd<0np&)VmQkbwRJUEa;SLd4gL+uN2J6C z%=DC_!Izr(Nr!$eP=2oSEU|AT_v~#!2dAhypp#2WX3!0^7rn-zpkSy?n1dkb+#uENj_B#Y!eO$_sfE${q8S=d z#1)d8#T3rB+!t|KO&d_uRO!C(&5s4e*?(#7PUhQ5{8;F3Cb~POdW@JU?LjjEOF5Ry zU436YJtghZcQ&z8a?)4^x~Sm}?0uXx^Xv_bz9mUj=}+ja_z~Vp-3uE2>B-~{HW>1e z+7ua`9zbNJD1ew($e015Niu{1eYps^p^2zFMo#)mDbgseq0t>8I}}~xLU~t*i-axH zp-`Fl4+9?YJDLz%sHsalYLK*1b#qVYyjt_}>EKDOD?*{D+_K5x`e|C47HnbH`C5R*M61e3@fr*2d-FK5V;xcL`C&!=aW`!zkDW zs&Yu-WW;{{H=U}=)3khI<~nOi9X7?Jm~HZX9rNLhlD1{C@3mGSCLg~eJ6ews%MdTf z*>JfP{jv6f3Pxg)%6;L&;c#;v58_4SaMK&K!MHc)EfN%nw+L$!8w-Xm;ljx1l(0~z z-f=Xx)iT-oM1V!nxGK~Jau!1^cZIZ~Wo+qGFcJroF|fu5kY6*TE-YwV`Rb;wmg9$8 zlLg{#YA00z=%R@BFkT0IP&BIA4?!tK-Jd8fDo8buyQ5Lc=6)wnmXg$AXtCS(i^H~u z5Nl?`U@=^T?W14D4mdU1)T_Ts=c88c2rG0|7Ya-2xFfW9%8`-fj@+jfz6yA=ndOHt zoqN<;i$<6Vjjjs;id!8 zAh!&O#KLofm=cI3rL4yZtioFMZsos?C16~DW4*slPBohDa7kHaL>e_E`q{hzgQJE3 zA;IP!G;qqmM3e~N+4l~1IaM&61JO7jgyWibefS+mGNOGtHfva7xrV<;^IOxzZ31c> zPs%Nf3I_+a7{7snrd@AutxSm!#ItWzAaTF_ud%yE8~1z^&FkGLmZi=}7@NS)eFi{|b@N{FWzqMZt$ zbThf(%Kco|Fp(0;S<9)@#fdVXZ4Exr7vB0gx#USal1JfP{bMT*O+kd1ri~CCXf#Nh zEE_gD4MfnwqM>!0-!~3=fXC1`f2Pq7g8^V@ieoHkCmJ5rs^wj2IKAcuv1MIqpVK9l za;}J@;uNK_KZ$M^@nm(`Ix!(ge2g)UbZnp2N;t;kS@RtV{$}Kc{O$|ya>6cm!*b*u=2&Nm6g|DE9>!jnU5t&7*pLO%`qS{=3bD}+>F+wV~4*VHiYZdElE zuXh#l#I4N!@Ffk#!ku_CX9H7dX$i(dX2-jao$Yi_DOi`s);;I0e8H7&6NskN+jd)Pv#*2SR0~~lwur(#r5WpdJ zNGD=r1gLZBmRstK==Ughsi>Oy!Mr!PSsEoNy1%*VilmdCKJaIEg&rsETimRL(%mNU zkp}*p0|Qs=I+euMbNLrtElRD>KKqtezJKn-`?}a1i&4@<5=Kh~cZ7|G^mglURD&=S zSWn5qLDk)4EXp?k;#4aHa>UB1tjq!i;K(S_ELBZrWH}FX@_t&XE|gq%4uAQps-(vp zawO8wN4r*7xz)kO^k09a%rQ{I8l zL{5rfstHqHT{SF^B*t2yA-c2{SKnme*OD`?fEEDYUb(t zvK=%JSfMoxfb9)Z6XjLe-)d-zf4`h}nx?9w^K{!QZnp4W)URfDx~(m|^y92)zZ*kr zx_^NPTGY$-+mEzOf+zeF1y6>4>gVaLc>R0+z51c6Vp z<;Y`{>dRG@ru<6`XR~d@mZ#`+Fl3DslxYnb2zrGAj15e}Cq&n+gNTGc7nYJmBP4+m zL}Ww)z$%$|p#0P|vv)?2_3lN4Dg3#VKKP>255q$q!fmw1DNmf)mv`+9BrELVK~#X#UKIc3ksLYikrH$x|F9FTN z`|bLZ1O;0KKc4br9fp$NvBsIG4e}^QxfgzSeos+FvTX&nJ2|YVmH(yzAbcbwB7__U zRG^c9T0`wgEefmGX~--tq!5uKm_){m6hIJoR0}@Ru`vqLz+i-_N@7A8jezhZdu;I! z>@q>OVPcj*k~zJrUs~spxaYy47##2MNYLjGg<<v&t>Iu9Q zW-?&BudZj5?fXj5<#VFWYrYND8pI^7tqK8}>oId`q+Gi%mjUW>3-svz?2NZzw}n{~ z-uuqC`k~e1{l$+^tG~l+Hv<3LpLZjG&wPr*iR2uPR*y9{hw%KCcs7-5X2sd$%C^Yt zq9Mt>D;JZG#@Sh)EmNM1&Pv*$b?RbJ3Mdmp^`NKSKVlj#HUkR+29^5Xr9$BbE515- zLT*GOiAoV8;d1D`E~*IaDTo6=#8g-7lZ@~PTfz}AvXO$Ialgcyt3d~bEs+8@xvUn9 z8Z%TA+??eG!y;E{%DFPHmt)tk41`@9Bmz(QXqRYZe*&IRV;UWH5nM{{a=JQlHL{_l z)=uH>6P4*&#;B;*^_HLwtTt)p+p_yRetxkxp7I_d{h}5sQ|Aw_|B4TFKSFYzCpnM& zXPU=ls6gZ+8WTB1dlqW*M?iQ3qI1Le&nPQGT?tqYdDilvMhi3Y3UjqUYFWCfSKpwV zcWdVY7Q}t@qG=o7yf02J$oxI(Ckp^H^dGbX7b$}0uQgN>YRLo$ffY{@46`Q3u=2DJ z`5RBqw;$C8%Kk>X<>=>aQtNLLWvM8!O?O&Ter z+z#R(BqR)?{cAiPiU1WEo|w5TNqb8pYw>#hmk_(acT<& zd1~*yCogTbINOf@-%sBrHkGwA@;*4oK1SXm$L{+SY!_pOSf8+I<%vdEOMW92k<-jI z$mN&&PM7`N%s1EPZ=$Mq^_26lbWhf&ga&!x8E;PAopjuVZ{LHe^|NGWZ*G2nPE{5z zZ(&%9WmHo}sfOe!g<9sF{$7{zFoe`SayOTy7+iMppT_OnkLs{T909gZpRbRRhud2WsB^-TNt zr?%@COxwihYpM3c?cAGH04^n95{K&tBT$QrFq9?{y*-=u7cW2^&GIa(NhKT>&P4gs zCDQ?Fa1OJswfAs!W;MV$hNyQgmzo0!hKi`!KM5iVyKWg6B!7TVNHclACE-;B`R#OF zV7MU@&RHUyvgDEd)cIE<7_Cv5osDMQCXZURspi_a=jD3U*VuSV>?$M}0!YgK@~Oen zP>cGjp0lBnjec2$k#F^@qn#G1?T|$wjedzW%>b9!S zl!{$^*>r(oGym^NK_~`L_}~B`6B<0ol$4V)ezfAWF|?QA^=CFLq~t(rHMA{%i;=Y% zjN>Yg$HA{p=KSU#{z}B4B0?uKS{V6fk*2A>Pv%XF#{zk~IG~_^=Mhn4qW8*4qN91; zj-V$QhJfXg{=a8xSazb6n47NEhPX)}3I_MnByRa_Mu~uHI(1Y&N@IW_tGu2*2&XX& z>qMu%d2n{5`0F>YENuY(P^yKs1!WTB(Y0Wh^$^rts+wR!hKR0yQlO-qR~>8hm zv470z(KId8f_;cWW;(T|zSWmyF*R0BmuLDkT@>R;CFzoLn1=a{1Q+4A2_> zRipPlGIoCHtIfm)?DH0n132YRa01jgmO4a&z2oWYUeAj67dQq>x}$x{A6%-Fi9HNH zfb|4NgJyN%iVSR-b=>O%e^d=L)l?mrndG`04K;>I!u9|U)Y`_rC}y~dEtTu~?J-v^=P)1c0)f#NtrJ zB)8OnV`EnTqPl~X9I{eoi2mu0ob_MW5juO9Lcxd8!Ab;@0r)!y(8}#&5+J)MT(Q~{jAWtFcrut}>WBqACwm>_yEaGl{|+~{l? zk}7@kTVfE%0|4L^yh~WXgMu6ausxV%B{f2$0+`-2ZP0wyCIQEwwhqW=yePgu+9o?a z%wQ7LxZ-7>IE8+$PfXCeY0G{K+W0t-C}YW-tjIghff#Xc_^PR}lalCd?5yJE-kUPq`2Mo9G9Xm+EyofFF#4wcTK@wzfGivl3IHf5PQPocL6A=zSpXN1RkLQ| z7$P~dPof{inzpo`caiI^U({NZYYqoJGL)13w?ChvApNADSu&8CkD86{wLomfE1P0v zh4(N26|m!l#X4l>FIzqYu5_!~MBbKPA8UX5imk!zSLC%82Ar9E0fKG>O4GAWGo2= z-Y?dZ<`$i#v3pyM4|zkM!02l}TOm1!AUq?D8@&L{8>z)If+k>t^=z5=nS1>Ui(G$le-hA|VDh7su0V`#*C;`(Dz?;3eT9)6#6iiq5rY!4!7W zbQY)h#`3AmF9KxXk}X=YZQmy-#XGU+fo>ye3fMsY;ERkm=)RMxl$Pd*X!t@75smfM zk#gZ|=kLHFVf41xi4|y_7Y8~Oavj*C4FCfqRVO24cj!WUz<9JG?1Jnn)@^4CZ8JpK zS`oiy(_8pLDs*s6!{X66LL6AC5QijXnq*Xq9=vo)%dS|KG+m&YA739;Z>dg+#o^uR zO8zq)mKQTtYVbXSFwW#W@3!dSC8#Z1VZk4~&{cg*$u{FMesv+;h9;}X9@s31lPfHI z6=8j(({pq2+`DE+E$npR%C7a{@fYDFARhi&0Alh|vxf`6e$%0ZOc0xJF{PQ8%*tQ6 z1RI68rXVVz8GYCzw79g-!z-$v(0GV<+S;A@Z~mNy0Z-jkwHXPE-XSWH(_Ts8$KZ?B zLbL~S(kp(3B|uz^orfi&*w;1}FKo_6xW2vXAQvTj*ENzZeP(Lfu6a}VTF7yb+MyoU z;Tb&lT0=`j02a`la3A%EmvF#4ecS-P=R&dmRmi7Y5Kb7$ z^Ybyq0W__t7z+On|ul0r}`07E1MQWW7-7b ziYKltN7)Oy$i$Mdfsip$(c$Fhi;`_m#D3LmuFa#7;l<0=TN7#J`kB{WpfONo9u2!o zw^&1RWk(bRL$Zp;zPNP_+HK0Bd{T}it%w;5lo+)XuDM@Q$!Jk{(;mhWbL2y7 z!q^DoX1d=u$AH;%YUa>6#iY!?^fGDr{pIIr>id3w41?o&wws-!OH#7;lg}E3 z$J^O5aj~kQds+va#aB|3r-TbyxVSu9YH~%!my-YC=XV5hjQUilA|KRBHoM2-(1c)c zh6CQe99#sp!ip;R#TVzN|DAGImG38&-y zRxHhDo^GQ5IYzjyOwTALEgNb!@gF>|HJdbj-|DXxC5jl>%Vc}y;Z;!e3gVS49HDh~ z-GCDX@c6tLieRH?cf~hO&6Uw3=8Z zY$o#Gq;pjQ$+xk8%2ufYAWbM|>F9#FMN&c4M5Xg*jr)31;|gnlI$%Ievt$E5gdmQE z#TdhS(AQ>;N@o~~Pu%o#APSGtlymO~RAj-XG<4~$CgA2^)4155LW^XPW=p?8zBjAI z?qlp+bw^JR;^|MvD!A06*y1q8=)y~ep!60QGql3Ylunl{D%JD)aU-97YU0-EE=XWR z$?Wb>sw;gh$Ln?ao^?t>4xw5CuKx~pr3|569G%C+GYFViotgi;S#7{*$)vh*i0tGa z=1Dt{J3J<5fF#-)x^(zqlT1bkzvK;i!M8xo<1B*z@N);CfhnlM{qJ1frH|dB5h(=3 zGb}2N-UywEbdB)-I6Mxp08E946oLr|zo7As9n|YB8A<%&&AOAQxiG2H_}cMMTBN&L zYNi(kae|`51^@sA#?`>4VJU~7NZwkn^xN-L82-HD;C8{akVjz*_+|D(6(m?K#3lYP z?_H$0S2b>6)8{9_M7r5n35rC@IP@l|X)a%_>-gIFC?NFH*)FWu`$2@gAXs4_%o^9?Xxv>Zs zAXeX^2s*xc@E5THF=AS%n#?9CGTQlneEo8F#aKiuP%yzq2hEd4SavRtE#iS8a~g_k z39XA`mpPhnnQKKPe6V*1rjx>88bsg`5=f)EgAk;=NM$jYA zOO3yLO45+dGV7JWfDyu_GA;SVDhzd<&XiRolA!Akwe^&w=9E^Fq^kin4$q=JkiuSv z0vTnL%j*L?s~W)l<(kLFA`z@|NZ-{m#F3^*s_c3MYRLjqec2qT2Fg0~sW?;#^2`(# zOBi4m#@POQPq!o5XR0{VLkJvOXzSG$E;r@~2enfD|NN0z@mJAiA&0u7IP{8!mf$u1 zg6ALlA+(kJcYZ}l9jW{le@UBu{c~>XLDQYUXFu^0F8f*k?*BNt3a+-gEf^qByusZq zxKo_q?ozzCyK8ZG_u}quh2rjR#a&yZE&lSo{DItc@2s=u>^(EvKktwqFmd+vNeqgZ zuW8yN8rX8W-^43TpwbOq6t_Ucs9K-`@RwmD)Ov8Gw?EZO^MD8+Uq!D=j)%^q!Op^U z*TpcluqFM5#Ec!`QpM?vSv(}BGtjyeI?axGcw6|iyROFblPUjdCcbw#;*0E(oFWHn zu_~)NI$znBXgIVdKEdyMt@5pZ7YsASvkoE=LJumwqkT0(MC_2|Ec-mwVR2M(yxll| zSlApRY!E6#g+tm4?1)NX#R_IszaWOscq(iG0I+49au@cJ3R42}_1F*k>>7Rk+R?qdS6b}C1&QJ=S$Vjpo1i5tKS{`y?7A>eu?=j{Ofl8j zuVom*d<9)45%Sc-Q~KxYVocnFD$l{IkP~KZm$-xN96QdO!Os-oGU>40@tsFd$l!=V zmd%;giT}#i_k@>0iG;~b3z;tx8oa>S@ektjEgwQEZ*4|uo~4)OYyIsB4JO;Bpn1U7 z(_;dsdowvN^thY((yVIwt>Uej)8E%qG~ObFOs|J0HxFjGRJq!k9k5{vQf71$iE*VnW7js*l=un*_nf|b~`Xa6X0H;pLDOCU3$v2n1j4?*`yKW|nv-1!;CV)@+ zv5kyd-N<<=0)@UDbTf=U4-D5eFBC}Qu+s(4V*lKmUy1*RpVtqP6B?7^OeB3)w0sjn z2MSp1I2<)>%NTtOWs!Hta{{cMVJwLj5~)NULYp(?EZL;J_2O6p{GPJ>3?BX$f5)rC z1>Qi1G_LZ!BPlU~G$ab5ijtfx>}+hBnljL$0iapquB4Ed97s_VhVPcqHchSL$R(!= z;KoXy%pf+AFEK7%MdE?oL(!$+`1-@5eS*~EWEuY2Goa?}gcp{RksJBJ26BL9X5o$0 zLp4H4Gm$Lh(?+7(&8OJq|F|ow)a721+AOyPW3OLtqp7o&O zxG4;H;q)#K`9$!~oC`<^A%{Aj7X3ZMwdEuEQn$w8^@yW7<=xy!@;G&DZT1 zg|S&p?>yw#zqJNrZ$D?>H{W*pgT=@vrgNYA_WgL zcDyFOj1jx$Micl(us5SVVts3^5J^uaMF2Yh|@GN1EEOpg|rW+*I znF@rHN-vvRxKbKqjdRb;mX&tX))w~u-DzB?lw-!M8Lq9m|FZq``1VR$*JJ&5k~aFC zeqqc*x*Y&jM1jjcz+uePDI)}>?>>b{7?I9rk}=@eoX8|wV`uSuSg_X3CnXF_EN&Qd ziZ(+^T6P!1H!_Oq={c@mPMyrW=liHo*b;fYG%#a20xzPwTK>b&F90y?>x?=ov6y=u z!L*=B8>n%bTY*|5s6VwF{0Cj8=C{3kjrM7pMwGraiOFhWRK8HS9J)|(y|oW20|2Dv zp3-5e6?g{npRqUh1BqdKb<+;G`G4?!h)s2v+Nef%Q92$)^h_502P$-oahAGqiR#n? zQ_^zgVCUXW)_Axbo{I43n2H`FWG7Z>KN{z#6_=z~8OaoRbq0r9Ooizv;MkBdoy7?d zHd=M8GB~>F60)HX?>oUO^vlMiH)z3(77E#l*FT1OgPjIzeqn;Xp)U#>XfEZvT5PMW z_6nehOTg8dy8b@2jkBoc;8lyVvs7hVK`x3wSEVRZxq}Y(1HPX#EBvu-8vB|4C>ida zaqRf}{b)b+DnI26ewOWbcs2$qFt#Qokit}YRd-43kDw%gZ=t%&x^b<0*3q%$DECJV zROu}$+B`QTs{@IUe?{cTWd`X>!D*WRdgNT)j}=$2ABFGP{o3R#C22n2wx;%g#t6Yc zatk&k*S80m*o-M`+Lwgx4dHhSPV5a=ZxF~kIa=oer;}2w14d0W6z;mYUYl6{zVo^= zaz5v=@^u~(8fHXb{4jIk(Bwc8w@`kuSs7sAfde8(!m;wv2dSih5np*zy_=#@%=yT^ z8z=RsY93XRg!YNej@m;^koZV6BU=}cf{WDRWiNw6pk8Zb&{^`ip3WDCZ4mK$9Eoun zys~DEF!YEivY#0Xz+L7U+d$k9R8A8MjCP`O8lD&`jazM^_7&>88pYFeo@hnu4S(5b zW;R&_0+v}_zo1puGO5h5;qf|Ug7yXnCb1Ee@Prj8{iNt!((P;|R~{m+6aSX%`IDA7 zT363tt5wsMU!}ABwnSxyelg%MsXP36A-P(J4<#QbVa!`kXyEB|!--^sICFo+blW8X zhl6xc`|9Q_)>nPEqloM&muv(2@DrM4>^gb!vJrNfIOE3xbiS-DP7R|0Usm~^P!H{t zVRVV=c|hLfcxh*SA@RBT66MgWaIzAGT=}mqb?Lj#8G6Pc18O0cySuHf_xD+mAE@s~ zC-3i-zp}~uOa(gtF!Lqg#-r+FlPE)Fo>3wiE{@bwTrKV;IbuuzwhVt12m)22dIEvIHrJ<@NMHu(pLp#&!Z4kz+v&zmxzP^B+ZnC9wx74qXHrZs8W2 z1wzM*EZ!rie#bZ~+E4KFMJ^&5D@|NbrB@pyWvSxae`aaXiEo!UtxS$Gmur7}6j{c) zj9Yx|>ZuuNEY>47uMiq9QFQn^Iq8Vh5_J zi*tCdOt!mHlfVS|oHU>ElBPz=gB!p@$w0P^R(8u|1BUpZkY<2%-GBIb15(vd%%7c^ zXs2IinHp0KVC&p4DN<_}6__p*a6uDBvbvvrV@u;QXzA!iFm)xzJ7n0PyIox@Z^(K?Lvy;&>Bdvr9wz zgSZA$n|1+(1UUbUR}&c3KY@`Yh-JJQQXmyx46L{mY#3$1?RU8d)+D73f4H zA=BO`&1qQTl0G#fCoX^kj>Bsp$QOMG@7Yy+okG)G*-XJ{)`0Y;x@X81vZZd{#-aYZ z=NOI*ue2EsVhEwnDBTN81R0;j;HN^y{`3tKx6sT4@@!?|t7^}oG;L1OB7w!~O4%;m zVH8_v)1g^giSwxaFqkcJ2#ha#-n;wb=Ts4Te3eE$9PO?7nFT#);NjZ8wmGV!j>pL{ zxU>_trpIl1_K7~J1qa8mr;$9L-RFvm*sZW{bY9rqYZY$D`uQL2H%$*YTep8yeIGaH zP9y04_8!+YWqB2GFW|d%c0G?+s(bl*I`}FIih4&9VO%-5=trkDPS3IbfBpIhpBkco zr9jeTQNb@KNL7*XdcgtpzmHcT(%+Ak#!eXx?{zM4iv_QsnQYeRbKxIudT>#D@~lPg zPe$~aVSb?ICC3HH_xOe}njnLKAAo?Pu&usbhT^pChOkmKD?HbMeaqaUs(f<3YO)L* zb9?4+Y$qn7S4{f}6afh9XbaL?7vR6nCt!0JU*EYB1}H8sTiC9-gMWw^(@}%UqQZYZ zAbdrmSTQOYxs|G{UUZ(6Kaw8KHCKP@(g{hvD0wlzR4()(){&Oum8F;^F#3HvzZjPD`tZc9F)hMZ1U$#@H1xO}vfwV}#&)-?_B z@yEAvR0{+hBUtoG2OP@6VD(5{)gR}tzpl@T+cD0XtKW;GYddQzcx3(*m6k%(X2*+f z81M(qBw4#i?k$P-?46?hQ@aPci+I9q!&j0PG@5t~g(f!c){IPoVtBMXQ|p20-r z;bCG1R$n*M6#6J~vL?>#mb-0@Wau2jW5Xl0;AOs%XK+(HhR92dkEc@NS~!E{x_qkP zz(l`CrP7cW`Ap+IXzH<_dL)WNT9#VYt3@0nHStP!?9JHQV(h0o>iiJ&iLk+7CaFe! z6|rkdny1uUeKw>IV;hMf2A1(c(B5Ys5T?U9y7%U#R-(X5!}yydJf>mCfB4yg1;+Ev z{O5oF0=3&2ADH9p$+9t*tR>}tJaLZ5noC!$Fso8$HgCuo{+OoMEvjwrQZ4%r$(+C> zGM?FG^q@?Q!`P)0`9)sMtm3eO9PFH6ir>%_C7mjK>sk6RS!jJGzN9(fSls*hy+8l_ zH!~stxmsKR-Yr-wKpq{ANs8B(>FIBFB{aO_o^3 zuWK@`sTj5vOOa5M#6FChdR;@e;Di&i^XR(((WK!KjoQXf;`nF62|2!>sYVJv^6otrw3%AqMgr5u1&ap5d?1+f1rc*aP@}^g% z)A3B!2&B!q5sp5=e;+uZqJ#L0J_rGuUd6AHL4CL`)CNkLxu04smT198O=;IxWxUY^ zP4UM+^G%5>7}xL*`of5eZWeDkn3X#W!qqL0ecbV0Nr`j~{_vzfm@!HZUs>5rdJSPO%>vQemUfFWCqwQ1s_DeLr7`O<`luL;m|8evT2~ zEtDr2WqB~IC9S87LsBu9Ia!(6Jo@cp=-o#1DAo4N!4c#a-jr;OaN*I2QS7qV3^;Jx zg&r5yxEvI0{-OJiVq=8f<$-Z%6$}QZ*c)4{6vKEMJtP6|@qoTSC^^ys&djTNweFJ) zeUZ)RXpn1*rWGo{cAs{zLuBpf50hKbLL{9Fe#97Wk1$qXe@d1-z%4>R4V)|Aw@@d* zX5!Khj3xIcoG!yMMu0{b#bJxC=3+s_TrS*cDepnXpfb#n%%5_ukI`NZpm1S6ugbP= zj0#ot(m!Doik?UlBIs3b&VM()HfK;ii;XQu#Vchi>!g|ry9bn8zK=)H4upb8KE4;g zfdM%#^1GeY+gk%#1s1`%41uM5v+fpRxC;&}oIcARP7_^R!Z#h@FIK_&`MXNEh<{}< zCKk!l62{_$KH!Gm=+q`o~wOvmH zF5G1;f{ubY))b~OB)BuB9dF#8W6i!leqg7=N8=dULscM&3q7RTCr@@!)G(~lUXZ{% zo3zE(HTkOVQ*L>t1<|2lY>q&RtLhJ7D%QAPp>mC&ve9;73a`VBj%kv}$|Gpi>Pqt| zgmzqpJajFne|qGI!oTl1LhNwJA=3qG3V<$LQcC~XFwTtlZA% zmg}UtEJ=gKa5S=zsTY2dCN(_rzVW?gm2LO7~|m-+S$5`bUV zmIOkuP^)3V$&s3F&EK|U=zV+s_GS9}0E{>iThVV(FcTgdLwIzM5-zTwA6Vn}5uZtc zTP^NZS{iuBh^LfTDvSc&7M^r4mSiQ6^RcyCCzT#B4ger24zHA1VVN}`g+PSiRRu95 zui59$BRr54S4NBx-%o4AXJiZyh9mVSbbr;K#BWxzAmD&G^KD7<+8n`?taS z#8ag0Mm!O2;kGEuDe#_N*iFaEj~#<4K z0?8EOTo4?g^+$Fl^GisQ(2TTJuMMN9`6&)(ZHzBR4Fm|i?HlsKzLde9N5aDce%AIM zcn66BP{d3l-kz@~AuM}vDyR)?>;eO|7O-$B$$M1=(6OgdT1|)3T%auXm-1_MCdALs zkmFy6!81cPAC`3S)CTSiC#gqh!d4iM(r*tQV$3(LHj9o84U|jJ22h!iP_aM6g3KRp zJ0^_T6NNW${nf+Puf3WBnN-))?dP&G_)O086{drNKC6a?Sd~+lj3N?{x9lG|{)eA+ z06e0}l)ARqu6fynh~Pj9uFyDCKkqS~8hzsT!q(8U|AcR8-3&m4!GKPR)YF;gE$_vG zScq1AKB0p!WK{QT>HJqlT81@o953e13b+yf`QKm{uo%zCv$BwRQ;Hqu4uvAbhy;A{ z)Cim9>1_eyua9!G8QYXC>kNO6V?0^)kr<5^PW^V;Y;O z|D;U-g!axMvLg+Lc(_t;VZkg~jHrEhl4j^bZFwdAg;p8vlzGGMsF77dT{0paT%{w= zORdoqyi1)1bug6<9*PyoA1idq=6aC^j_;UF*cTP5$-g(>Hn&s#3MX2Xb zoCMy+zU}u72~rX_M##-`D~zlQSS%7a%FpwE?%K;)4(h1rx0nCJKlr?8&i>)qw=v#5 zpLcx7Ail7Ivtd<6>h)SpuM>NglBGWC_sp@}atrN|XzOMPyNM=DZaA)nW|9hA5mQLD z80LKeEc6Lr_%MZqC5YRQV-wzD+aW5L9JLG`(sMy$YpK7l_#m#WnhwO73 z(Pj)a%MKb#;;)`GMz=KxG981_&HbVJW;35_4$9Y8pa0l0JqrZ9?(F>h!gNw|#rpEx z^Yc${uST8YzqLt1dKio>S$dj_id!J3)@v`NpzZtt)dKC;#8if^!0(R7zY2Oe zF1J1+&Hk2EwW2DMyYM7qzc%TV#Fef_XQ%V!04^$hq`|Woa1C55)j=thd9MBvdpQFPuhgW(yPK0 z(r2>oMtvjJ1@!|=s9;jFU5j@6Kyp%Dw|_@888!K^Uks9U9BP_+9?`skR&q@tsa(RF zWb}k_&?q%-0^c)o7YZxLU^0?KGEV5+XE&}{S$gR(j&WUYw+S-N$9GZoBUCix4}XWW zJDL%{tIpsW8=R}dQy%a`;;8A9d7<hfc1$3qS1ADUeAUZ>{ELDq6y_OA??m0{s6!0i6dhmU<{K_y?su7gcgtkSeMgPL( z(zpF-X6FKvL@#2~iy4B{nrJ2%RVDXS|LIqdHI?Ow>P)ByZD2>Msw)u~ z4gW0?&!PkM1*Rf+dn}u8S=yCHXZIbML5_Fxr!ICq0=imkmu{3fiG+dAdI{bgRn7;dj+HjkeHXP%dXU0pL-LND!H;@+uMjauFh~Ew4;XdG=`#CtY0rt6qvW8o2 zY6t+t-8ZYW0cNto9$O?^mDTz@8!#m0k#}g>ywv>867{^^tBAEts)qwF_{G2!7X2^aG`BCMLktH zO~oa(SV_W(tkqy8?Vqbvkwflz1A(trwXVD$2E@wawlO?2ZF?)qIa`S?b{OmA@t zHtG)l!_N*7i0d?CD~p3^WibJre}$#zErYM1`9BYt$22LFV=ytbnmLg3nc>8n*yK+_ zjO3(rK551BJ3g&{oT*_6IAjFhGa%(twRmVYc^scz4l;dNooR}HO0%VC8C4jegKJzi z8mK|I)VXGDhRjF{g1@Wf9(Pc}UCfShX(xZ%r~Nk>S4&=ZU3ir*svaT%hT8Hti#R2u~fY_{?yD>+>mYd)+#5#odByWUo6dP8c>gg;=cSC-Y=`Ns49+SJN}2`g6VZlYz*@g=-%1Z6$37 zhLa=32nGV3tkQq7X~&1}5NFhAP5Nhpr_+=r%#OI#I)H|RKwU^rn{?Z>mBYm>5k)}t6-il!tnTp&pFKZG(62dEmZRfN7@W(_vX>};bj`HzF zwX%f-wk*Gm*f7zJ0CAKygHJ1R{bvWfE?V8X8H1jzJ%}kDIzpR?d%6cTzrmMtGKy_~ zk%#oF8ESr)noy!NHXgdDNo?>Pr5-DJtCdM{`Nj6c?=gZz+a5vT)13 z*b0njVOq}x0$_9P6#V6rJ#2WMs!!fN0nifwueKa?rI3_N!a$vCHcH|egtZ7Tg_OYn zXfP`8|MhDdC_yASy$HrB@EZ26H9ro#8S{P z&1gzay9FMK2kTgX39@j=^>^90X~UGG;P-=u>6t1Sg391O2JbLMVHK%^jlB=;31a}t zltw%z=?LpBIRMl^z|qMx1;Fud){5UZFDEAgp3+fCNT*>4n(9Ch&9pPeKoc48`9w|( z3`sTRi0@ZNwzgzhJT?0GjcdGufkIJPvF{zTw57Z?%aJKVB&|~JJtSlh0$t%tQLC5G zl60KMZ-?&Xh3Mo1gIZ)BF?+1-HUZf;vzpUS{e?7b_fyfcbF2d!t}Yw^Rm}sGmYQ}&@ z7W1MZ@XGk2p@8^&qK|=U4D4kVknXh--2o=`1vs{jn&w5Rdl_&~2K7QpD{Q^KAX_G2 zwX24Zf0fLrVo@RKN4e(*NjMm?wPI<*e?>x* zcu=HamO0?1ve3NprzM-gmyV#O`1>`ntT?Yjs*bYJ=`_K@+7E)SZR%W8Mx*nPn4bY2 z$$3xXTL)<)j~oQ1`PV-?{`miW`xh<($An((P#V!Ig6;DYXMcMPfWinjY(Sge1`Er! zqvKb2k4)tu;a)zWMG%Rvaj>CpJ}!|OZP#V2xM)9L7pu&`qUi>p`4C>3N>La;PaZvSozY<*VWrJY;CgyVb#5 zxW1wg)J_hdQgp2A7zr167sKSE+uoh)o>lWD)^=)PF@feH!wQ?zvAR`@Ym134^W2A+)~Z@6C_%~d#9N%lG> z_uR>W@JR7tsAAP7w}EuYOqBnfy}Ikow9ZRmjAAO-cd^}W2jq(8^Ty8dv9ii=nyE5{ z;hI4b)Y05re}wequ>F52xdozb=!QrS@_D;A-<3OL?POo&u^I8J7&Z3oBH|%e6b*gA z!Da}HZ@s3LDiz=vLxcbKCtHw|%YU{}D^1JDKxRhDpw8X77K@1snh}11yt zXBXsSQKK%3+X1kZ6a*WoAgUGK3C2nxp|J&%QS71o>kE_g);A{0xO46f-1^?VNNTl#6&JK@$@-6u2F_3eWkOe#anJB`I>YF=eI zb3d=7CkWer{72#@Q2dlr+4dt7x&I-OxAkQ(X6-bKJ-)UaEkP-KTo$isLNn8SSyRRi zOXLl?kHsqm=Qh;>)=5_XqckF*USTj@v>v;$Mhvqstc1VX_wNIZfUi(_s;mi7WGVCm z0-}_(d{H^Pwi-*bkfO++N7ZydASZm^N*GCLI3`LipK9#&<^4%Qo8PR~q1=o+-A*01 zSUX_&{&3@YCOG4xo zN=52=mv0YAR;=&e`mSyDnj(^DvG>;4_S2LdGil~Z-N78%msbgBS}g$CMXwaM(#U3R zD=fYf!&i`q&mbCu@_yX(p-6|7vjFcw6fi92;#-4xe$zO z(k3LRVo1dT%&8wC<01ev;|^Rb^h6#UrqpUW?cSFfnNgIrqOZ^R8HqB=uk1jns8d() z&kCspV_(w@R>vOdJwLgA*EJW}IZMCsq@8dTfADKryc0PNxCltO`PMzqnUv>wj%!^i z4GdEWco!So`+N%nfB{zKxcP4B-e7j?q!nKdRRm?gB4Hwk?KgADMzIIt*>&E#r@NQ_ zLQf}2Z$lc|{C|GkuTY5?SA|7-oJ7wO==l>Kid}obbeZWR`Wvr|BsDv6Ac;y*$TZxj zWl`kGK8t1=BWV|#-ntfqSdwn|ePJN#&;oh#`Lkj0Z`qbXct6op8 z5UB2=_u_YOREO1m24F(iVDM#VH8t1nm1h@M6bt3vTM^WP>_>!d&lKf|M^?%xmPO}= z?~wbuQ(#`1@Qfy{Z(Lm|r|G>*A8bBhUr%--QD)`X_vk$lt!k(r7T?QxuYW2M`25%- z0q}H7ga>REtyEBC$%7;Y>}t#|jMWnW7m5B)F2-Zdyy39avB$K*hvnj2k4&rqnOruNO5=pK%5-z>~+doA0{q}yFs-viz$J8P)Ei0uWubRK*SoP&eb zee7}<`wn(&pwR#Kr<@@mAlOc#$dZ=WJNO{8A8BL8-SNe;d58w3oX|(agHnIiv;@j% z%m`L>BL-2(p5u>6Jx={Qvt?>-Wyv+?zr4}D=8VL(yQXg95~E8_6PciU3>Hh%^1^cd z>~Zr}UwZZw@cx=m9Dp%9rsjAq@*N$5qa^A(Xe=z86i}}r2dg7Xd@{r@lwyyL}6K3Qn8LP^76n?zFCA3NqE&h09)%D+}m*I-0 zUpf9j%SDZ7+!y3J4*IgK}pT9(mco# zN2BxX?@rGd?;@>Kon@Axh~Asu*T1`cR%sOxeR~w(Sglgx=X2w-nBT%b(QFPJ$jh^y z-rtrG$pJ792ioae-Vz0E$}x9nl9>AB#pOhv-jyAB8lD4V$ylnSY!Y6sVa?GYm;8jUVe#bUs`|V5^(Oj zDp*n*sQ9E#SG9?MQpK$psWCtjM6yc4``$|SHxM3rM}b)P1(+{k*D!Mt=*}4>q%hEO z^e2GdA#zOF;by8%b=pJhTJ?d^Nj<+sd`ly9zJ_)xzjE{3<-5TvHny2vz+b8w^MtfU z!Iq97bM;v*ps~@hsgV3vzm5Xo9^B@=4{!>cPNRi{sJubkA3rfsdx&0O(l$n9RS=3# zUkUH|pV43r7RCtYY!Mo>Y{qm(U@r4Hb4*dVxo-i205I|=%$ZdwhBU|~U(l4j4awOm z*}*Wt#DcICuKHNsaN=}h$}Em(o=6e+M8RQNmmf(hI%u4n)wFpPm&!ZI#v2yPwhEhh zOV#SwuV0VWj6EJ)Ar$2OxL&u_-PuyJDb%mh8m`!?ur%RPKi9&Fiv7Hb&7~U0lUC#} z9XCFE@B9=R-h-%eiuWxlgHhQ>`Ux5VdGUN0SZd*dl3mp(VJ=`=rN*3co%a9;ED*k; z241y9p?4CarCpaNi-9TcI%F@eP4I!c;ej>x0*uR1qMyFJG((Yi#hCtipMtD9??lwGN9fkq0hKHXjuF;A;8>h{F>a>Dxv)Rit@$YUu)(Y z1^Q@|2WOGZ8vV|F;4T2M*7bH|foRx|f(1S&z#>(Xbh)KQN{UuQCzKdBu2g|%4Gm^5 zY&!fg*j_o^ZjVdm!-8Mr+kf}>*Kl}y>4`yYE{KaHQkn5_D7New!!)^W&}brUDb&9l zyp4tW2quw^NXSL*G!{Dn9K%*QiMc0a86icC!nZ7ZKLu!{EUJ;FQG&@Hkp7a1pDj#$ z-kxAM2LNDHDllArqpl@cqO2+U*Y-2!k-){*1QKNF28u_1`j}h-rnWM^XtfbS0bX?~ ziL>Ybx#Y!NZf^yr{y6^-`xAAPc@hjhAQpge-ajN|=RK{$NS`W?_fs+4^pR$@7>~LF z#k@Z&koNlgm_L5J?y0>te6?>)y!&?k2u~m$dqBp9twOFFz2)z*zm&@4T zP(*;X{U;hN=IXf-8W<1f$g3AuNiVtFygJmIN$I%ygLO7>BQMvzIWmv0P5yV*IGboG z5@%(pOY=i`t(r@ps0w>MlrSW43RGbl`u|q!&DDqYN-68$FqCUf;hsNeTROh?(jkIm zX;!D7Y?=NLL8urpEC0BW3~VQ($|R!Vc*Am1W89}UrfBYW+Ez~Nx51kz`f?^RjYw8S zlsa$L`{CxR^El4#lw`Q2`3KRvF(byZ3|aw$!x3iBfVCHNV}e?EKCnRK_9{J_er3+H zmJ#Z435yfo*pi}7R+31a2nLqy0y-CyP75igm59m%$e%|c_7c9!t%qynY}^u_r?F~!!g5eWuC?NDd7OwihxrQu?pcZw!wC-nk#z($ZE_BjZ*Hc;#ls3-};zvmx*A$viteEUdHpAQG~WuxQu_gjmJun2(veOlH#KQ+jfU}{%#ch>2C z0?eb2og+<`2V|qn3ZkOHN&KHb-yoPBdrvPy_45|6<6G1unZMW=N7nw`~Gj*so5HiaocqPbFCa;yq;bndp zaZFW0VMduGp^Sx(LathB7$aX6QS)1E0Do!__aBWvc%J~>kZp!0$&?~ni2sfZOz<9L zW_aXQteWbq3MLDo>MWf`Yt2ndcJYl(oMu~Jab>9e@K6fRdhfCIzj^f0oPQntAWOP_;BzPADI);JJ>0Wgw%(G z>{qgm7eY0nW)9uYrl_T*B~KQ`#}`Jz+dP%(cdt)w0z~ivTz=#Xrg~XUvaCM8zjXfA z$@v@L@>Vkk%wog({!494i#=W=Mg5w8SONAAY<#>kbhl6oZg>W8hQc%yo1lL2Kzf}X z$=zpoa4;prsnso=sV<$p)Mhj98&eRUxZ(I};#?DJ!WtEE zMVV2`z9jx(V)oW_LKOuT6~r`EV^qw=rZ!x1Y2pN_GtqUl!Y^8@)%(G=t=uRjl(Z@e zLPPO9Lzb@u7dSS4@F;Z#6l2nZVv zVUGkM_|bneO5FGb7>BOBC-1VT{zLbPMnGr}) zNMp=4PDj18nQSys7Fa~pU!jt$v4SY2>TnExO0@C~&#elpoEbH{F5E@je(#@`-2AA_ zOy2nMu(4if7^2r?iI0I|x0ZkU7GB*z<-@4jqRCzX$j;(0V~R~eDZk8KZoOu|YyOD|_!AVcGdFg<^L|6tx1+Max>pf|(>u@ON- zBE+R8XW{ie5HTKGK>|COwS^^$tZIHW+wI-cIN8Ew)$Dm_p!-?tt6_F~aLylh1Ji&$(?Vd9hYG3@nMl6skl zT7C#RkJ15eHUG)NfVl#3>5C7>hV%!m_$$Rqc(kIs>~tGMC3*hj{L#g7S2|JrcYjn4 z+`_+Qf^fR;Gmu!mv=vmd}VB{G9&LK{@AN z6f1b2=z9!brM_G&fRNHQ4I$~f>_Ac&*tQohnKA`Y znCO@KMGL$4)(LmbuwjoYlh&f#S{t+sDz82NaCxuq*=gSv^;;HiyJ!1akz?u^5Z4&@ zkSsXVz+^r!&gGQ_5A<9X)RudUVWdZtPv+rcF3Cyw%i-G++$JG!>itz`3hfiAu7rTo zuhqhCa*W@w9Wxu()(WC>O|8hzn{R`0|M8!vaC19Zc@zci7PrI7E)fI^tTcD7DYhli zY&y&PU-75Y8Lw|&mICgkW_b;_%qC2iN4}xSM9&0sB6gI-JdSE zEVRG~M3&a6{*Ag#ZPyK_GYj`K&67HL5{W6j^XEbtr%xVcFs=jz*?T`I6Wxg8>h=vC zE19^}T@p_OFVGcS#H%TaPggW3tL&Un2kp!Rz~xeS`sT)0 z{8ViW0H7tRr-*|_w~bR?mIW7rfW-tsP6 z6S(np*eHFz^<=Txjzg&RDO*7MU}&LZF3EP2I!!PpjpSj+Zzhb^?E*T{uYrX<>5a3` zS`>&9Mw4Rm;Fj1DhrwG~#pDb7P|l%699g&fpTi%`igkzq{ysp9y{t(m;z)?Zk0BW* zmxf=BmhIP2n_26X$+=ujy1qMeGS7FX!&WS8;sO^D75x3E(CxoXa)NY7-MrUZ>6o_$ z!2Z7<3NEh#a%JzB@u}gH3+K;w1Gj?)L(zH7ec>{5MJQ$tFgsK=n8*&nHZ*X9X*sY}gho6UViHXmOSWF~rUqYfu*lDeB{Lz@9 z`e&UQDvPN*^f5{wjIZb4c8&x7oN!OYWD#GhT!2%2TT<(e-mC9jGTeldkhDwbQ2(RJ z8?g8X0Vde}MA2_$RX9o z%`8r)(j{=3r#@S1n~HX;EX(R zUKHy)6H|K29Y2EyzLdeEn1%Q?$BOTa~Iz-5jkJ7VGgckaJbFp}Kr< z%FD}`%E5^TH?1{wBZ9&Kyz*UN7578Y{*?9HvPNyA_)~!v;!wl4f=!ulQ~4e7*d8o@ z2!Tg`U#yBWJCZ`IlWGM7fqWvXN@Of}8sG_}lKRFJtWpRIq?~4;I~&iJ@nYby87Q|E z{c^22qG5_LvW8~P5uh&#YnJ5ePQ<0PRQAm$TR?8gvf{9%y?K<-+U3v^P#Pzz8_dh@ zjyvddI}vk|U%OC@8k^Gb&2g%qWpeCWFXf$R5|H%r9T z@fP_s@@vuB`%-BwQ4gsMLd;==)`xv?Be7kAfoK#^^13U}&td!`WB;K^`h4BsDSa3XCC+J-l^78u;C)MnC~o4nia1ARrP_*O%y} z@AUmcWO-z)RUHi3zOEeUWbR(lf6Pp74!5!--doP->kn~muD0J{`;+QuwkOq zXxNAvhgt&>+VPQJFeM!^ySP~HnZ}8cjY^c8oZdeKp7>hov?(6ByXJ)OeQ0I~?h}@ptJKc6Ry{O}_U#AI34~Wf zVPaZ%p{KQ{0-HWe3MQG2#Ie8#MdZ{*Fnr-R|IkPp>@e43N2zu-=o;IA%dtc&KK2h! zi+2ee7Wsv7IU-r(oJ*Q6M!Tj%%b{^H&X;{j9YoyV5`*$ezT^oWf>f`La=Vo%-c-?b zM(2Q9h=*Pj;!p>L1hVh3?3$m`(VIY@;KUV4N|n?Zn!OZ(r90}}>Bfm%C{ZT6d)OdY z2mm==F9GGH)3#Hq!23xyJGCBVlVpc!yrA)b&Fw$@9EQV}`$2d5k~C?WZxFIkm0{Hy z^D(ix^?Q8-bB9JFa!9#V*svQOyK%sF63i3U+TyH};4iv4-WKEBQgtqqz2CLJV7xDp zU|7776Fh=$i(*9nK1xyj%&UJ#F9aa}$5i#GD3E+BQ|gYvyA19HRgTq8F3DW=D2O`n ztC?0eOBZ_STg9nyL_W@o8H*!0mja%uuVQT34&RS%xO~%x=5^w1Kpo@oGRSBFso^Fk zE@zdb>a{cFjLtk8TjkOxD^^VW=+qLw3atKpM?K<*QIaZY8F_kwxs6czF6pu)-U)5v zF@v#^g(@zc*RFbY|LaLj$g;^Ha$;60Fgu*#D6Fe!rH1e!eI3&&YKjUyuQlE?9%QkJ zp$H1Pyq27^Ean!JZ5x3Y?Io$HA_Z29Suv2peGnI)EKFy3ANyMSxikEXigI6#y{!jU z^HW!(sec|f5Yxn^vg222g9RE!QjBMLTql=QFE2Uz5~x-ifxthJ^4I>Q*fI4P@9UVg zPUyoohG)StKMqRZM7@g~Rm=Tx>Fl+#V@WmQ?>z1kC%G3jM;-rFdx<$#8sGJ$5l_xN zG$FgPOzRsJud7bm8t(Zi3n>nPChhF&Lov!EvKu*7jHw<*i@LMRvHbHw)adoxtshf5 z4jkb)>vEE8lcm*p7aqhK%{F(l_k6&bz|d-w_bm^ZpE5bjPL!+mEn(a};DU~=AR!Dg zv>8S>O-f!{3))^|ZKD-ROcN`79AOjG%vD|IVXm?N@N*MflEbfL%EX;$21HsHf-Ar> zc!%~6wfg6>j>Dk&2_@$KL77Z$Odr7e)Y7f!2$IC8M*IJ3?7&$#UgY0s9*QJ;~4_T$S z>v$AgmIxiaQe0+fv_MP{JG{u|y@0Lag_`(iQVB5K^Qu7R&I+LBIwbbc44@?!kxNdZ z;Y2=A+DbQvDdr+EjER^c^9*dp)6xe!07i)Y|B$2+D=Spuxaq~jA>od0jIh5aM*)4> zPT!;+Ff7KYXskymM_LVZo0(`pUyiFLOmTWv`YAX&|I1Kf#rikA(*{^G>HOR2 z%Ks*9@xuv-ajYYK*hsB_D*QNPJkwA}?V%>k<$H_&a$efoEp&WTFBk?XW-6v7we%7b-zoppXr(yr_bB`<# zYch7iPF?S|T|UaQ&q>ra&prV%=q=AGrM(6QH^5E%g%3mVu_AjrrqWHmxzDDo)3kr= zZQV*9hPKB8h5ytp%9F8Wcxttllj*iEU&L^4*hK6n-<}o7(sZM;aRazt1YPE@>0#r5 zwphKX(4OLfh;1L;grC77V@xtnKhPRF0maME87Lh}E*Ax3Tj%4~(m|T_vCpdTEN3$n zElup3`XA)NG0%o2F3Z=T-Yme`sI6>!4LbYC%r=*a2>hrBQw|3=diYYj+j|)2s%A&x z>&9byp5b2hMr;J%mdy5dGnSTWL>u|3Ewlx48r;2MVp;`oO?V*aO#r!$f9ZIS|F3ng z6dgj$bx@$?{Z_d!jbC@^M$#?Qqgc3&>wn92Yyt4#V)VsNF+Wy$5_M`R~QXq zW;}R9!NBjf$4QpWq4B`F!}=N*m}i_cLqpy2tpRD^DS+<3UpWK*Ph8k|dfxtakIWF= zY^2c)P(-V|q(F%>Fqz@i1JpF1Gv#D<_&GyH zd*l1B|D0@xQa&o)2O}T+%gJ8r|FfFF{_z&`5lX-y7pE$c8;NNba4tLU-UkzNcpqK=2{G?-Ufig= zccRCB1(dHj z1$54Ub-?^@sDkXFG}b%0FxX1x0YjG6Ps~)|bw+=k_%M}#K>}DklU$jdmLf@){^Oke zJC}un{W*(CC5tI#(Uy_Dm+;bgF=-yDBVsHdav6tHoF$uH?Oitq zd$x8L7;xyh3_U6cHMTP<{8R~f@S&xP zfi!*rMef2n0^Ah4cepiY5P>+#ywH-fpM;Tm9lC#;7Bbh)>Z=|57iW@CIVmDg`|33L zS3VjCBe`i@6jV|wRXz`#+U;=@DYBV{3Yy9BgQS_@H>aHNd6W=J>w>B}^CRrsfB0EO zMzt4t;hzcwYlX2sNW2XtQFY^CuOpKxNjYS>#rcyaQd$50hpZF(uBZq_YAcu@=ot38 zss(Rzvw#N<%UuQ?y0K8H)7;U9&ofh;+~<*{`@)eDzc0dO4@qVE(?(`^i`KcgosOnt z!2lfdkp&DKTP&uv(U=5Rk}sx8`uyfHv|}M0I+TFoYl9C7g>zX*9d)5NnAW_71HrWijY^tZ~@s3|TqRXM6mt77H&&uY|(q#DbGV8J2s6b!b# zS_I6?98-(LPLR_+4*BcY#ZE@iSiKG$f6SSjw?(t091%1oypdfeW1KhF)l@QIMA6Om$QWEB6-wY`1qLi$TujhyjkN!^Q# z^?#IMcDYRH;qoeSZ6@}wy*~{yXn@=uasjRuJPlylV>r)Kt=Yop~6abML z9jz&r^2jcei#ZSIQ(DlT9qldu8z%D-oOV)JXd?}_$O`^bK5DW}^f>v})jD*mZ{N*` zRu!kZX+%#M9_t3%?m-FlabxunZ8s^DL}PSWs0vP2;B&X@rc&lE21rQ*Rwim1#UXfh zbfVjA71hWOpHkl`q)3COnF7_=4Rmsxg#?E}FZ?Cb)|0O+bP0JvT|uDCTLP63I>CSV zIYLH@*HnJtp9VWXuvR3PLJ50uxi~<+QVziK#_McQ8?pZotlEE6MKoD==}$IUxOv9! z5J4FKtP~HSULDOu!E1vC{GEybu*?uJ4d+PR?8*4Fe(LF!j)yJ)U_0&U08BvhptrE> zfk&smahp}N&|*mlFp^^$K9le3)j}8E6IdE3OXeVMKNgb0DG>OZ7r-^ZiW5@YRjn{m zS3TNGw?xJp)^B!C+6ZUX%$dz(#u^s$PN@!l93Cxx#o6bCP6)Q^7103xGOVH!*(7Sz zLWk&?aP)zB7y4f)d=9E@+OiQO^ksZ68_%evZ#J0*BWLpz0`bP51h#pbTW|*Y`1>gN z1;{&vH8({p5f9c`fda=;WG>o<;SUBif%j1|#*lF_ZN7WMFC~NPHwDUV9@Zu076&uL zU*3tE}FM6oHB2WBuk4CEeAl1Z~Tu0gF|yMS(-13khoXOrsY+F#*du#_0t z*4^&VkF6VS2MW}k>jzbORZaYi&VdsJCvLr3E{h-UT2-x4p6NU<9CluIrw$aAtgJZB z*|_<=Vk@CDT=NPsgUytTOzX}#i|sKa&B+wGx2Em&UkWT(@iru>XaZ%FjCeCyk@2K%>CtuNL?K?$AcQN(dR2!EDwD^ZS?q7)}TJC!3IhBm4ArePjXT3*HgW^7|a0 z@(22WgM!Qndp-l|LLvE#wfE!RD4ylC4tB+F)A$z-Y4_3bvR;!>0~PX|AlWFWmT);u zQ|_XPbOI!lvXMn;HM`&A$FjJl6)qo}3Esbt-$q~3>L6UVXA6zyPVi-nmXKAnc3!2y zeJ&yuQrBH$E%DaBt8A`;tVpa{eo5X>606UVVNl`>El2rW17k4ZIDWF)%}b zjawR0biCppsdxV4P!+bf>;XS`wpIY%2J{8>fKmGLC8$^v)(ms4Tt)v!oGfT>mKm zOp6{Jn_5O!W{eRWo?$PPp)}}b=}p#PlM`%HK*!42+)W&>3|COH-cXkn97o;;Q8}Mh zdIPq{y=Jv|@A&V&c_+>Ap4g>v8ccN=>|`4KsBYESyUXuXKV`eDk>zVmFM(->ZE%+W zjwox$oqDy>rO1NSoRmz^g&P)$4d#p3`_fQjB$4akAih2P_7Ml)Pv+JFIS%B1 z`iK98qVM{QVk>fSdxwjxOGJ9UMtd#9{^Gxr0zIsY#AR2rcanv%1g*hoC5RQwg6NO9 zMaPfXar>u_aP6Se=`QEOJ>GRaANj=p%M>J;*MXmZD|j|?qCkm|YU~6!Y6`Q6KN`s4 zsh&SIk^1r8^tpIJb+2uo&zDKke%XA_kHCsYLVc-T%N{bJRrZh_#)>a70ZRFsusX6)CVQk4$DITs2xsH(G8$kz6=qeVCcncENV zX5Ql9$PiIR;hB^MwD|wl0%6Hrdc+a${!k37v}6<>P`VqzE*(3e^=%Qvygj!v zD*xWEU%{vaZZG`P7keLBr9cS=Nn$Kk&T8%@X!P1;nJgN4!4YOMr1(DH2dNDT6p(|R_Is%k1#x+idBrMtj;p@O1uj32B6iYi z_$wlk(2|O77TeLTO%j=&8-M8~?v*}ubN=HiBSF{9FX=I*MQi_RDB(94#B(=W%Fs`e zNol-&7nx`RQ&66Q zK!-RWdes}78e*fZtx&D5i&s|p7N?T<3kKRQk`x*4y;RsBme{V0WYxR6Ax+z}mBb{( z;&Qy$=jU$CU=$J(pkd=5+fbhUO`Ph#dgzl^m^AqVfGrg1LYF}vO{7YyJXN}T3-*FW z{!p?BS;EA1L+RyjU02sc9Qd$`yDsawtSARa+`-T=>d(-<{_Thqaf{K#+y#n4QE_P_ zeG{9`6^`)sQ%JHXk2%>PWH>%nIpPDyj+y zINXw9QcJoc%dnQ>A=0J#U2eA4J8{cx4}qYQh8G(X6No?X*ucF%8wq4eLs8Z zjKz&Mu?LvyML=E^^VRm@_%c?yNT#Gw{C;(OroFCvsvsDnHD<75bRN z=IyBti$ll`*0$aK`uZgGd4UTFEXJ$dDx(S=UL~8U7Y=%3sIA^3o`P=PIfKcqF@LH! z1l0&bV(ZD*9faPfz4Kj53vcB;_$usNWE+Wlf}2uNAR`Y;$=EAOWUisNc5-^UG6S}r zExk{*)1V-DC%CX@L_-$Q)-wxHrm(Nls(PpS`-@WFVo8du$>(xGm|dUQb`7I|WBq=z z96-A2C$g3*RCCJCx0&&0xj?%VTUFGQ?|Kz1j+!-(%NL_FfntLzm7?LVFY)PT$@wDR zx%=Q@O`x!)qNOn*XYX|Keimrv)78w2XIa7oQ_j+xCR;;K)paAoUhS5Qh59RdB7MRW zOS5Mp1n@!0Nmz(*op1-e8wN^?j@>RWBygXk8JX}Bu zkuG{XWap4L0%Fw(5{kkM6?6^-Kc>$HhTeRKuBts8i6i;UmT3l{2}830STECLR?KMW zR2#t=Yyjy*%zJ0!GX;1DDwe2MhcgI33pcAw8kD9HqB7(Cjb#!CWKFkEWES2UNZ-Zb zkx0Mc+WFT%gzO@tiV3K`<&x2GlNDKhsecH}SNY<_C!`hfOBkaPYM%pJ z^nH$KF;aK8!ov!Z`z;3m86O!5OFmnS*kD08YkG4WIZVNJg7{VH^BcBJ0<2gH^pZV7 zuhFPnWoVHxXV~G++h#OvV0{LZT;&xCd;)ztBw=S&9AnOc9%-Rr(mY^G81T<)7@_uIR>Ca8w{c&fB2^BUpCz@ZZ-ei1RP zOnnE*8aqhXkdSz?z}j>VA=y1Xwd0dV_Id`?_|!RqFQiJF{B;{?oB1`>g}EJ)6qr{3 zFr)@-j!VGqZ7^^#^g0UZP)NdsPK+fUII{a+u$Xv#7a9h!lK#ujK`QnJIUcD=VA{EM z1g3`E3BvLk5>A+)mZH9>E1B#)^`tfzp!q`V`DoE6E%iNC4<(6?j83WAtq?R@qKrgl zJ*6&4r;csGGIS|6W0)zJPL_Yf?@x#NEtrK5Uqu2;)E|=XX#V*BdhZShr>6W zx0{t*G{>`zRfcGZ_8UFOh<=N_tb`sIIipk%%DPIE&Tu*$O2+~KxT4EUA8M5UnqeN* zbLmZ4~_D5yROr(VHof2=oA=;4%&@)ElKN#i+!oBfbFk;24y`G8}Xshj$^%Ft$L`3 zvHCC-UtMeBTX;Ln2bLQ}S+(ug6b`{maLSOF4ygP>82pG(p^EG(ac6nZnQ@=~DcON+ zKt-uw^moJI92>Q}Pwn6meh@R?rl)lw<$dnUV`SaH=K zhfFzejt{v}RcuYldD+Ddm|3*Qg#?N2ubT_2o3IR1+^5j>d;lXNMX&fo2}IhyJOx+Z^kI_>O?g_xw?`F+5KMqq5T;T+fDSP?!2#Ruda zdQ=%}C{qf{*l?fp>XcUB36C9xwICx?QJT6JwER_$-DZQo1!dlXT+G-^dHSb%W~(FM zEQJWTMHbQCU2r!toT^lqCA#Kw`=H3ceFt;(cuZ>ih} zWQJ3*J6DopMtx@(U~qF{M|>s4G)ZXjw8 zb@=In`v9O)ew-NKVXMVI{M;s@v$!dSDbn=W%apH4M7|^5f5pa8Yw)u;j`=AH)|v~V z-xP(!r+H&0?EGoXIui?9+UkiekkSbfKBL0!HF_TAvqRO4v9EVzG5_;oxu{tbTxm!_ z)LR9vkA`0u9)V9DI@v%*HrqFsZA(G)3wV;JURE>P^5dL<7cv@B1QPo9nu*Ukh%b7c|CcQR7BT#=o#A#DlhkWqTaplkFemXC^~M9*ifONaw{}o?9N7)6D+vSP^6_qSe7;S2vCDTB^e)_~ip*pNUMr3pc<;FV z%Q_=r!Jn0+r!qsm9Q9&QZ?^v7XA6j`&pWQnB@k=^VKbH3Q6@P$;dZL^k;+T77l+0t+nuxy4*S@`-P5@(JZ8?tZ?lK@HHmi5XZL3oS+L!$WR(yh$`iRGEc z-Tbngwu6VyVreq!D;a0uU;?jj>`*Raym8F1CAekd)-p|yFOcO$ZW~1*`CL9>PCQh> zgUWRg&n*jR&BQQs${;cMjojLWqGR2S{dJ`>H-P5Qgh={Y*v=tpcIreOLkSq!kBzCr z;SesA2&DD^6qKgx+TF9$m;G3>xHpv6c}VWS9Q5=&uX`clq9lYXF<_d*r0LLzzo!^v z#=a;5h24+}OjxtyKC+=;YAOtW-5dBVxoP|Rp&$4)@+*usI&O5gAAbbm7QdS{a(U_B zPQ&Sg1}AWuQzA^BAXj_n3&hd6nz(oxNkvxz3gf4s(^yPhZZ1Wn|3_q~A`gYXjxt_R z)Y;l#GVYR~XgRsvnBBP_Nl>gHMCfMx%)jeiu!jHqBt6aY)mE`~$bZr`StMpU)J8`7zP6PLJ0rF;~pnnjge-R7kO?!9?WTnf`V* zI>PQ=op=++pSI*pZ}y@TX<7UnNJWvlr|b(&gI}&3gGeG#cB;BE(Mb%noF0`@41vNH zaFa*YVXz2T2+w*|SDG)u0lj%@xy*p2V6b8&L+3ldSO$B2$y7X1fRaPI5?3ePmcwBt z&3A4WJ)}UCQS%EucRZhq-B~`L$0rC2YUp5Ev5MR>5fq<3q*3%VxcG};y@1h2ehs58 z$0ns%^N>V2Ay7;N-YNT?Mvz6(mFuutuJqV2VlvI|RM#UU4~%3?rhSPUFvhw+tv@rS zDs1VX;k68aRw|w=?MLN=xeI#8LmG^msW?;FtnTcZv=I&u=tiBN70jO&CVB0o{xhd@ z?9*?u4`VBm0o(kEiG*=aU&C>WvU?d`$I`vGmhbU+mLna~ zuOmL*oPQ)ya2UdBs4bPrDMWt{^po! z-|iWaY*zTU#d6WFn7zf44$#)9_&xIvKL^NYnHEa@iZt9-kd;po428*m`uW)HMta|4 zA4^0Pb*f_~>$&r&(5V8ESv0nhkYnw7#InMo1Dc|XT|N!6AP@)52%o3SikC$nBu*g( zbY%YQiNNd;UXvBM-#7|UEU#EKCW8hbq?>YI>ZFwWFbMcQMDM&&G52j^7;^tP7+_3s zs{H}|@9Tpp#&0eBQ(mTV-dN*k<3RLjymD0qKNfNHo!+_|^3Fnd%q`PWW`MK#pCcPv zfGV!BQGGa+K}rkb4dM_9C& zKdu|w6q{$FP2snqJ&W;YFmdQ4$j&QxB#woi+EDq-f2_WaYT_+wR%c~kPCOf>p%x0A zmOdV_Q_4+2{!RfQTwksDFz3TkfFQM4tb~&_rGtMW;u;3-0T5~@LgC3s0?RZtf~56T zn#E$E9K1)s;u62V-ZjYKkGqL*B?1>W|8ZAKl{GZ(LEpVx|0~H`CvV4m*;tO81G@u< z89Cdvg}`d>b9`HKF|8;TqHSdJ=3JL_F&d`DYIVA>3*U|1gdpuvr+q_}n5jrJ^*uz= z-*epJ$peua#j%zs&dHWgmx&{#2;z0xWz{QL$x#j*6I4Am&*g+8s;-)5%s*K<+b)y3Cx z>WLMQ3_v13B;ihmRaw?ab!WXqfLxGq#vP-%la#D51o4LM8#;rFoj3QGO+iPvnesTf)UEm-GXSosY{& zsg~9?dc@7dfr3R@=vIXUH2P-Mzqp*W|E3kR3k z$q^{9EN0**YxdfPZWo~qs+El`l@4K7#V&m!d7Vr~=3Zu$2qZH&?mjqB!11O2`^%$b zQ5g2w=ZT0Q8W0Kqlp_6UBGT7om?o4uVx+@ym!#arE%DRTsl|Edk;EQyxe%+d zPJX;7Y~G_%*k5>B#pLog1zM&80c(gZG_-GUIh zUO;ME5+8{NcFF7LhM_8K;C`#zGP+RGvku7U-~Dq3KxYV69#bT#_eII>w1^HRGOHvrK`&b|LJ7bamknCc&U|N0=10_79)kKqA;A^KuGB-&1cKE5~`4~XM#AZ{bms_6kDnhMi~X`rBs7;datgUMmEQUI`&3ZVdX7RTwj zp_AXbw%6oI9g822Wuv11da5j!wd4 zP73DQLCA@LUEs7&r4e*~&#v&leg?dCJ|7c2)hP0PQ(ir2UDHbI$>+*e50GGyFx&Z@&MQpBe*#N9Z2e8;Wih{MwgM@u9;gCe&k>A*?NUxZ@d2PfrSR*M6##Gu zpz#UDN6k3E8UJ-4Apd^v{MX-?eBB_ruFSmUeMBI!CG}kah*u2=1bfoceTEJ5(eJe` z%dQSD5zDT})NxJrLBerO7PO+x30>2J+iHFyW~5*P9Aw4el)O}yt$|bzbz(GS8iG$y zk-uCQ?01jx0tiHO9ESeOc^p`j@xvTih<$Fh>FB|zCt4?7mj%T%^bN(4G{!|$Tjghr y29EcWQ+@`Hhyjg7M2u~brNLXxt=9;I=Jp1H@LYQrR&(=zIRWYZ|M-89f&T*~JI`AH literal 0 HcmV?d00001 diff --git a/audiocraft/__init__.py b/audiocraft/__init__.py new file mode 100644 index 0000000..6b8594f --- /dev/null +++ b/audiocraft/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +# flake8: noqa +from . import data, modules, models + +__version__ = '0.0.2a2' diff --git a/audiocraft/data/__init__.py b/audiocraft/data/__init__.py new file mode 100644 index 0000000..708a3dc --- /dev/null +++ b/audiocraft/data/__init__.py @@ -0,0 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +# flake8: noqa +from . import audio, audio_dataset diff --git a/audiocraft/data/audio.py b/audiocraft/data/audio.py new file mode 100644 index 0000000..2048df6 --- /dev/null +++ b/audiocraft/data/audio.py @@ -0,0 +1,215 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +""" +Audio IO methods are defined in this module (info, read, write), +We rely on av library for faster read when possible, otherwise on torchaudio. +""" + +from dataclasses import dataclass +from pathlib import Path +import logging +import typing as tp + +import numpy as np +import soundfile +import torch +from torch.nn import functional as F +import torchaudio as ta + +import av + +from .audio_utils import f32_pcm, i16_pcm, normalize_audio + + +_av_initialized = False + + +def _init_av(): + global _av_initialized + if _av_initialized: + return + logger = logging.getLogger('libav.mp3') + logger.setLevel(logging.ERROR) + _av_initialized = True + + +@dataclass(frozen=True) +class AudioFileInfo: + sample_rate: int + duration: float + channels: int + + +def _av_info(filepath: tp.Union[str, Path]) -> AudioFileInfo: + _init_av() + with av.open(str(filepath)) as af: + stream = af.streams.audio[0] + sample_rate = stream.codec_context.sample_rate + duration = float(stream.duration * stream.time_base) + channels = stream.channels + return AudioFileInfo(sample_rate, duration, channels) + + +def _soundfile_info(filepath: tp.Union[str, Path]) -> AudioFileInfo: + info = soundfile.info(filepath) + return AudioFileInfo(info.samplerate, info.duration, info.channels) + + +def audio_info(filepath: tp.Union[str, Path]) -> AudioFileInfo: + # torchaudio no longer returns useful duration informations for some formats like mp3s. + filepath = Path(filepath) + if filepath.suffix in ['.flac', '.ogg']: # TODO: Validate .ogg can be safely read with av_info + # ffmpeg has some weird issue with flac. + return _soundfile_info(filepath) + else: + return _av_info(filepath) + + +def _av_read(filepath: tp.Union[str, Path], seek_time: float = 0, duration: float = -1.) -> tp.Tuple[torch.Tensor, int]: + """FFMPEG-based audio file reading using PyAV bindings. + Soundfile cannot read mp3 and av_read is more efficient than torchaudio. + + Args: + filepath (str or Path): Path to audio file to read. + seek_time (float): Time at which to start reading in the file. + duration (float): Duration to read from the file. If set to -1, the whole file is read. + Returns: + Tuple[torch.Tensor, int]: Tuple containing audio data and sample rate + """ + _init_av() + with av.open(str(filepath)) as af: + stream = af.streams.audio[0] + sr = stream.codec_context.sample_rate + num_frames = int(sr * duration) if duration >= 0 else -1 + frame_offset = int(sr * seek_time) + # we need a small negative offset otherwise we get some edge artifact + # from the mp3 decoder. + af.seek(int(max(0, (seek_time - 0.1)) / stream.time_base), stream=stream) + frames = [] + length = 0 + for frame in af.decode(streams=stream.index): + current_offset = int(frame.rate * frame.pts * frame.time_base) + strip = max(0, frame_offset - current_offset) + buf = torch.from_numpy(frame.to_ndarray()) + if buf.shape[0] != stream.channels: + buf = buf.view(-1, stream.channels).t() + buf = buf[:, strip:] + frames.append(buf) + length += buf.shape[1] + if num_frames > 0 and length >= num_frames: + break + assert frames + # If the above assert fails, it is likely because we seeked past the end of file point, + # in which case ffmpeg returns a single frame with only zeros, and a weird timestamp. + # This will need proper debugging, in due time. + wav = torch.cat(frames, dim=1) + assert wav.shape[0] == stream.channels + if num_frames > 0: + wav = wav[:, :num_frames] + return f32_pcm(wav), sr + + +def audio_read(filepath: tp.Union[str, Path], seek_time: float = 0., + duration: float = -1., pad: bool = False) -> tp.Tuple[torch.Tensor, int]: + """Read audio by picking the most appropriate backend tool based on the audio format. + + Args: + filepath (str or Path): Path to audio file to read. + seek_time (float): Time at which to start reading in the file. + duration (float): Duration to read from the file. If set to -1, the whole file is read. + pad (bool): Pad output audio if not reaching expected duration. + Returns: + Tuple[torch.Tensor, int]: Tuple containing audio data and sample rate. + """ + fp = Path(filepath) + if fp.suffix in ['.flac', '.ogg']: # TODO: check if we can safely use av_read for .ogg + # There is some bug with ffmpeg and reading flac + info = _soundfile_info(filepath) + frames = -1 if duration <= 0 else int(duration * info.sample_rate) + frame_offset = int(seek_time * info.sample_rate) + wav, sr = soundfile.read(filepath, start=frame_offset, frames=frames, dtype=np.float32) + assert info.sample_rate == sr, f"Mismatch of sample rates {info.sample_rate} {sr}" + wav = torch.from_numpy(wav).t().contiguous() + if len(wav.shape) == 1: + wav = torch.unsqueeze(wav, 0) + elif ( + fp.suffix in ['.wav', '.mp3'] and fp.suffix[1:] in ta.utils.sox_utils.list_read_formats() + and duration <= 0 and seek_time == 0 + ): + # Torchaudio is faster if we load an entire file at once. + wav, sr = ta.load(fp) + else: + wav, sr = _av_read(filepath, seek_time, duration) + if pad and duration > 0: + expected_frames = int(duration * sr) + wav = F.pad(wav, (0, expected_frames - wav.shape[-1])) + return wav, sr + + +def audio_write(stem_name: tp.Union[str, Path], + wav: torch.Tensor, sample_rate: int, + format: str = 'wav', mp3_rate: int = 320, normalize: bool = True, + strategy: str = 'peak', peak_clip_headroom_db: float = 1, + rms_headroom_db: float = 18, loudness_headroom_db: float = 14, + loudness_compressor: bool = False, + log_clipping: bool = True, make_parent_dir: bool = True, + add_suffix: bool = True) -> Path: + """Convenience function for saving audio to disk. Returns the filename the audio was written to. + + Args: + stem_name (str or Path): Filename without extension which will be added automatically. + format (str): Either "wav" or "mp3". + mp3_rate (int): kbps when using mp3s. + normalize (bool): if `True` (default), normalizes according to the prescribed + strategy (see after). If `False`, the strategy is only used in case clipping + would happen. + strategy (str): Can be either 'clip', 'peak', or 'rms'. Default is 'peak', + i.e. audio is normalized by its largest value. RMS normalizes by root-mean-square + with extra headroom to avoid clipping. 'clip' just clips. + peak_clip_headroom_db (float): Headroom in dB when doing 'peak' or 'clip' strategy. + rms_headroom_db (float): Headroom in dB when doing 'rms' strategy. This must be much larger + than the `peak_clip` one to avoid further clipping. + loudness_headroom_db (float): Target loudness for loudness normalization. + loudness_compressor (bool): Uses tanh for soft clipping when strategy is 'loudness'. + when strategy is 'loudness'log_clipping (bool): If True, basic logging on stderr when clipping still + occurs despite strategy (only for 'rms'). + make_parent_dir (bool): Make parent directory if it doesn't exist. + Returns: + Path: Path of the saved audio. + """ + assert wav.dtype.is_floating_point, "wav is not floating point" + if wav.dim() == 1: + wav = wav[None] + elif wav.dim() > 2: + raise ValueError("Input wav should be at most 2 dimension.") + assert wav.isfinite().all() + wav = normalize_audio(wav, normalize, strategy, peak_clip_headroom_db, + rms_headroom_db, loudness_headroom_db, log_clipping=log_clipping, + sample_rate=sample_rate, stem_name=str(stem_name)) + kwargs: dict = {} + if format == 'mp3': + suffix = '.mp3' + kwargs.update({"compression": mp3_rate}) + elif format == 'wav': + wav = i16_pcm(wav) + suffix = '.wav' + kwargs.update({"encoding": "PCM_S", "bits_per_sample": 16}) + else: + raise RuntimeError(f"Invalid format {format}. Only wav or mp3 are supported.") + if not add_suffix: + suffix = '' + path = Path(str(stem_name) + suffix) + if make_parent_dir: + path.parent.mkdir(exist_ok=True, parents=True) + try: + ta.save(path, wav, sample_rate, **kwargs) + except Exception: + if path.exists(): + # we do not want to leave half written files around. + path.unlink() + raise + return path diff --git a/audiocraft/data/audio_dataset.py b/audiocraft/data/audio_dataset.py new file mode 100644 index 0000000..cf21422 --- /dev/null +++ b/audiocraft/data/audio_dataset.py @@ -0,0 +1,525 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import argparse +import copy +from concurrent.futures import ThreadPoolExecutor, Future +from dataclasses import dataclass, fields +from contextlib import ExitStack +import gzip +import json +import logging +import os +from pathlib import Path +import random +import sys +import typing as tp + +import torch +import torch.nn.functional as F + +from .audio import audio_read, audio_info +from .audio_utils import convert_audio +from .zip import PathInZip + +try: + import dora +except ImportError: + dora = None # type: ignore + + +@dataclass(order=True) +class BaseInfo: + + @classmethod + def _dict2fields(cls, dictionary: dict): + return { + field.name: dictionary[field.name] + for field in fields(cls) if field.name in dictionary + } + + @classmethod + def from_dict(cls, dictionary: dict): + _dictionary = cls._dict2fields(dictionary) + return cls(**_dictionary) + + def to_dict(self): + return { + field.name: self.__getattribute__(field.name) + for field in fields(self) + } + + +@dataclass(order=True) +class AudioMeta(BaseInfo): + path: str + duration: float + sample_rate: int + amplitude: tp.Optional[float] = None + weight: tp.Optional[float] = None + # info_path is used to load additional information about the audio file that is stored in zip files. + info_path: tp.Optional[PathInZip] = None + + @classmethod + def from_dict(cls, dictionary: dict): + base = cls._dict2fields(dictionary) + if 'info_path' in base and base['info_path'] is not None: + base['info_path'] = PathInZip(base['info_path']) + return cls(**base) + + def to_dict(self): + d = super().to_dict() + if d['info_path'] is not None: + d['info_path'] = str(d['info_path']) + return d + + +@dataclass(order=True) +class SegmentInfo(BaseInfo): + meta: AudioMeta + seek_time: float + n_frames: int # actual number of frames without padding + total_frames: int # total number of frames, padding included + sample_rate: int # actual sample rate + + +DEFAULT_EXTS = ['.wav', '.mp3', '.flac', '.ogg', '.m4a'] + +logger = logging.getLogger(__name__) + + +def _get_audio_meta(file_path: str, minimal: bool = True) -> AudioMeta: + """AudioMeta from a path to an audio file. + + Args: + file_path (str): Resolved path of valid audio file. + minimal (bool): Whether to only load the minimal set of metadata (takes longer if not). + Returns: + AudioMeta: Audio file path and its metadata. + """ + info = audio_info(file_path) + amplitude: tp.Optional[float] = None + if not minimal: + wav, sr = audio_read(file_path) + amplitude = wav.abs().max().item() + return AudioMeta(file_path, info.duration, info.sample_rate, amplitude) + + +def _resolve_audio_meta(m: AudioMeta, fast: bool = True) -> AudioMeta: + """If Dora is available as a dependency, try to resolve potential relative paths + in list of AudioMeta. This method is expected to be used when loading meta from file. + + Args: + m (AudioMeta): Audio meta to resolve. + fast (bool): If True, uses a really fast check for determining if a file is already absolute or not. + Only valid on Linux/Mac. + Returns: + AudioMeta: Audio meta with resolved path. + """ + def is_abs(m): + if fast: + return str(m)[0] == '/' + else: + os.path.isabs(str(m)) + + if not dora: + return m + + if not is_abs(m.path): + m.path = dora.git_save.to_absolute_path(m.path) + if m.info_path is not None and not is_abs(m.info_path.zip_path): + m.info_path.zip_path = dora.git_save.to_absolute_path(m.path) + return m + + +def find_audio_files(path: tp.Union[Path, str], + exts: tp.List[str] = DEFAULT_EXTS, + resolve: bool = True, + minimal: bool = True, + progress: bool = False, + workers: int = 0) -> tp.List[AudioMeta]: + """Build a list of AudioMeta from a given path, + collecting relevant audio files and fetching meta info. + + Args: + path (str or Path): Path to folder containing audio files. + exts (list of str): List of file extensions to consider for audio files. + minimal (bool): Whether to only load the minimal set of metadata (takes longer if not). + progress (bool): Whether to log progress on audio files collection. + workers (int): number of parallel workers, if 0, use only the current thread. + Returns: + List[AudioMeta]: List of audio file path and its metadata. + """ + audio_files = [] + futures: tp.List[Future] = [] + pool: tp.Optional[ThreadPoolExecutor] = None + with ExitStack() as stack: + if workers > 0: + pool = ThreadPoolExecutor(workers) + stack.enter_context(pool) + + if progress: + print("Finding audio files...") + for root, folders, files in os.walk(path, followlinks=True): + for file in files: + full_path = Path(root) / file + if full_path.suffix.lower() in exts: + audio_files.append(full_path) + if pool is not None: + futures.append(pool.submit(_get_audio_meta, str(audio_files[-1]), minimal)) + if progress: + print(format(len(audio_files), " 8d"), end='\r', file=sys.stderr) + + if progress: + print("Getting audio metadata...") + meta: tp.List[AudioMeta] = [] + for idx, file_path in enumerate(audio_files): + try: + if pool is None: + m = _get_audio_meta(str(file_path), minimal) + else: + m = futures[idx].result() + if resolve: + m = _resolve_audio_meta(m) + except Exception as err: + print("Error with", str(file_path), err, file=sys.stderr) + continue + meta.append(m) + if progress: + print(format((1 + idx) / len(audio_files), " 3.1%"), end='\r', file=sys.stderr) + meta.sort() + return meta + + +def load_audio_meta(path: tp.Union[str, Path], + resolve: bool = True, fast: bool = True) -> tp.List[AudioMeta]: + """Load list of AudioMeta from an optionally compressed json file. + + Args: + path (str or Path): Path to JSON file. + resolve (bool): Whether to resolve the path from AudioMeta (default=True). + fast (bool): activates some tricks to make things faster. + Returns: + List[AudioMeta]: List of audio file path and its total duration. + """ + open_fn = gzip.open if str(path).lower().endswith('.gz') else open + with open_fn(path, 'rb') as fp: # type: ignore + lines = fp.readlines() + meta = [] + for line in lines: + d = json.loads(line) + m = AudioMeta.from_dict(d) + if resolve: + m = _resolve_audio_meta(m, fast=fast) + meta.append(m) + return meta + + +def save_audio_meta(path: tp.Union[str, Path], meta: tp.List[AudioMeta]): + """Save the audio metadata to the file pointer as json. + + Args: + path (str or Path): Path to JSON file. + metadata (list of BaseAudioMeta): List of audio meta to save. + """ + Path(path).parent.mkdir(exist_ok=True, parents=True) + open_fn = gzip.open if str(path).lower().endswith('.gz') else open + with open_fn(path, 'wb') as fp: # type: ignore + for m in meta: + json_str = json.dumps(m.to_dict()) + '\n' + json_bytes = json_str.encode('utf-8') + fp.write(json_bytes) + + +class AudioDataset: + """Base audio dataset. + + The dataset takes a list of AudioMeta and create a dataset composed of segments of audio + and potentially additional information, by creating random segments from the list of audio + files referenced in the metadata and applying minimal data pre-processing such as resampling, + mixing of channels, padding, etc. + + If no segment_duration value is provided, the AudioDataset will return the full wav for each + audio file. Otherwise, it will randomly sample audio files and create a segment of the specified + duration, applying padding if required. + + By default, only the torch Tensor corresponding to the waveform is returned. Setting return_info=True + allows to return a tuple containing the torch Tensor and additional metadata on the segment and the + original audio meta. + + Args: + meta (tp.List[AudioMeta]): List of audio files metadata. + segment_duration (float): Optional segment duration of audio to load. + If not specified, the dataset will load the full audio segment from the file. + shuffle (bool): Set to `True` to have the data reshuffled at every epoch. + sample_rate (int): Target sample rate of the loaded audio samples. + channels (int): Target number of channels of the loaded audio samples. + sample_on_duration (bool): Set to `True` to sample segments with probability + dependent on audio file duration. This is only used if `segment_duration` is provided. + sample_on_weight (bool): Set to `True` to sample segments using the `weight` entry of + `AudioMeta`. If `sample_on_duration` is also True, the actual weight will be the product + of the file duration and file weight. This is only used if `segment_duration` is provided. + min_segment_ratio (float): Minimum segment ratio to use when the audio file + is shorter than the desired segment. + max_read_retry (int): Maximum number of retries to sample an audio segment from the dataset. + return_info (bool): Whether to return the wav only or return wav along with segment info and metadata. + min_audio_duration (tp.Optional[float], optional): Minimum audio file duration, in seconds, if provided + audio shorter than this will be filtered out. + max_audio_duration (tp.Optional[float], optional): Maximal audio file duration in seconds, if provided + audio longer than this will be filtered out. + """ + def __init__(self, + meta: tp.List[AudioMeta], + segment_duration: tp.Optional[float] = None, + shuffle: bool = True, + num_samples: int = 10_000, + sample_rate: int = 48_000, + channels: int = 2, + pad: bool = True, + sample_on_duration: bool = True, + sample_on_weight: bool = True, + min_segment_ratio: float = 0.5, + max_read_retry: int = 10, + return_info: bool = False, + min_audio_duration: tp.Optional[float] = None, + max_audio_duration: tp.Optional[float] = None + ): + assert len(meta) > 0, 'No audio meta provided to AudioDataset. Please check loading of audio meta.' + assert segment_duration is None or segment_duration > 0 + assert segment_duration is None or min_segment_ratio >= 0 + logging.debug(f'sample_on_duration: {sample_on_duration}') + logging.debug(f'sample_on_weight: {sample_on_weight}') + logging.debug(f'pad: {pad}') + logging.debug(f'min_segment_ratio: {min_segment_ratio}') + + self.segment_duration = segment_duration + self.min_segment_ratio = min_segment_ratio + self.max_audio_duration = max_audio_duration + self.min_audio_duration = min_audio_duration + if self.min_audio_duration is not None and self.max_audio_duration is not None: + assert self.min_audio_duration <= self.max_audio_duration + self.meta: tp.List[AudioMeta] = self._filter_duration(meta) + assert len(self.meta) # Fail fast if all data has been filtered. + self.total_duration = sum(d.duration for d in self.meta) + + if segment_duration is None: + num_samples = len(self.meta) + self.num_samples = num_samples + self.shuffle = shuffle + self.sample_rate = sample_rate + self.channels = channels + self.pad = pad + self.sample_on_weight = sample_on_weight + self.sample_on_duration = sample_on_duration + self.sampling_probabilities = self._get_sampling_probabilities() + self.max_read_retry = max_read_retry + self.return_info = return_info + + def __len__(self): + return self.num_samples + + def _get_sampling_probabilities(self, normalized: bool = True): + """Return the sampling probabilities for each file inside `self.meta`. + """ + scores: tp.List[float] = [] + for file_meta in self.meta: + score = 1. + if self.sample_on_weight and file_meta.weight is not None: + score *= file_meta.weight + if self.sample_on_duration: + score *= file_meta.duration + scores.append(score) + probabilities = torch.tensor(scores) + if normalized: + probabilities /= probabilities.sum() + return probabilities + + def sample_file(self, rng: torch.Generator) -> AudioMeta: + """Sample a given file from `self.meta`. Can be overriden in subclasses. + This is only called if `segment_duration` is not None. + + You must use the provided random number generator `rng` for reproducibility. + """ + if not self.sample_on_weight and not self.sample_on_duration: + file_index = int(torch.randint(len(self.sampling_probabilities), (1,), generator=rng).item()) + else: + file_index = int(torch.multinomial(self.sampling_probabilities, 1, generator=rng).item()) + + return self.meta[file_index] + + def __getitem__(self, index: int) -> tp.Union[torch.Tensor, tp.Tuple[torch.Tensor, SegmentInfo]]: + if self.segment_duration is None: + file_meta = self.meta[index] + out, sr = audio_read(file_meta.path) + out = convert_audio(out, sr, self.sample_rate, self.channels) + n_frames = out.shape[-1] + segment_info = SegmentInfo(file_meta, seek_time=0., n_frames=n_frames, total_frames=n_frames, + sample_rate=self.sample_rate) + else: + rng = torch.Generator() + if self.shuffle: + # We use index, plus extra randomness + rng.manual_seed(index + self.num_samples * random.randint(0, 2**24)) + else: + # We only use index + rng.manual_seed(index) + + for retry in range(self.max_read_retry): + file_meta = self.sample_file(rng) + # We add some variance in the file position even if audio file is smaller than segment + # without ending up with empty segments + max_seek = max(0, file_meta.duration - self.segment_duration * self.min_segment_ratio) + seek_time = torch.rand(1, generator=rng).item() * max_seek + try: + out, sr = audio_read(file_meta.path, seek_time, self.segment_duration, pad=False) + out = convert_audio(out, sr, self.sample_rate, self.channels) + n_frames = out.shape[-1] + target_frames = int(self.segment_duration * self.sample_rate) + if self.pad: + out = F.pad(out, (0, target_frames - n_frames)) + segment_info = SegmentInfo(file_meta, seek_time, n_frames=n_frames, total_frames=target_frames, + sample_rate=self.sample_rate) + except Exception as exc: + logger.warning("Error opening file %s: %r", file_meta.path, exc) + if retry == self.max_read_retry - 1: + raise + else: + break + + if self.return_info: + # Returns the wav and additional information on the wave segment + return out, segment_info + else: + return out + + def collater(self, samples): + """The collater function has to be provided to the dataloader + if AudioDataset has return_info=True in order to properly collate + the samples of a batch. + """ + if self.segment_duration is None and len(samples) > 1: + assert self.pad, "Must allow padding when batching examples of different durations." + + # In this case the audio reaching the collater is of variable length as segment_duration=None. + to_pad = self.segment_duration is None and self.pad + if to_pad: + max_len = max([wav.shape[-1] for wav, _ in samples]) + + def _pad_wav(wav): + return F.pad(wav, (0, max_len - wav.shape[-1])) + + if self.return_info: + if len(samples) > 0: + assert len(samples[0]) == 2 + assert isinstance(samples[0][0], torch.Tensor) + assert isinstance(samples[0][1], SegmentInfo) + + wavs = [wav for wav, _ in samples] + segment_infos = [copy.deepcopy(info) for _, info in samples] + + if to_pad: + # Each wav could be of a different duration as they are not segmented. + for i in range(len(samples)): + # Determines the total legth of the signal with padding, so we update here as we pad. + segment_infos[i].total_frames = max_len + wavs[i] = _pad_wav(wavs[i]) + + wav = torch.stack(wavs) + return wav, segment_infos + else: + assert isinstance(samples[0], torch.Tensor) + if to_pad: + samples = [_pad_wav(s) for s in samples] + return torch.stack(samples) + + def _filter_duration(self, meta: tp.List[AudioMeta]) -> tp.List[AudioMeta]: + """Filters out audio files with short durations. + Removes from meta files that have durations that will not allow to samples examples from them. + """ + orig_len = len(meta) + + # Filter data that is too short. + if self.min_audio_duration is not None: + meta = [m for m in meta if m.duration >= self.min_audio_duration] + + # Filter data that is too long. + if self.max_audio_duration is not None: + meta = [m for m in meta if m.duration <= self.max_audio_duration] + + filtered_len = len(meta) + removed_percentage = 100*(1-float(filtered_len)/orig_len) + msg = 'Removed %.2f percent of the data because it was too short or too long.' % removed_percentage + if removed_percentage < 10: + logging.debug(msg) + else: + logging.warning(msg) + return meta + + @classmethod + def from_meta(cls, root: tp.Union[str, Path], **kwargs): + """Instantiate AudioDataset from a path to a directory containing a manifest as a jsonl file. + + Args: + root (str or Path): Path to root folder containing audio files. + kwargs: Additional keyword arguments for the AudioDataset. + """ + root = Path(root) + if root.is_dir(): + if (root / 'data.jsonl').exists(): + root = root / 'data.jsonl' + elif (root / 'data.jsonl.gz').exists(): + root = root / 'data.jsonl.gz' + else: + raise ValueError("Don't know where to read metadata from in the dir. " + "Expecting either a data.jsonl or data.jsonl.gz file but none found.") + meta = load_audio_meta(root) + return cls(meta, **kwargs) + + @classmethod + def from_path(cls, root: tp.Union[str, Path], minimal_meta: bool = True, + exts: tp.List[str] = DEFAULT_EXTS, **kwargs): + """Instantiate AudioDataset from a path containing (possibly nested) audio files. + + Args: + root (str or Path): Path to root folder containing audio files. + minimal_meta (bool): Whether to only load minimal metadata or not. + exts (list of str): Extensions for audio files. + kwargs: Additional keyword arguments for the AudioDataset. + """ + root = Path(root) + if root.is_file(): + meta = load_audio_meta(root, resolve=True) + else: + meta = find_audio_files(root, exts, minimal=minimal_meta, resolve=True) + return cls(meta, **kwargs) + + +def main(): + logging.basicConfig(stream=sys.stderr, level=logging.INFO) + parser = argparse.ArgumentParser( + prog='audio_dataset', + description='Generate .jsonl files by scanning a folder.') + parser.add_argument('root', help='Root folder with all the audio files') + parser.add_argument('output_meta_file', + help='Output file to store the metadata, ') + parser.add_argument('--complete', + action='store_false', dest='minimal', default=True, + help='Retrieve all metadata, even the one that are expansive ' + 'to compute (e.g. normalization).') + parser.add_argument('--resolve', + action='store_true', default=False, + help='Resolve the paths to be absolute and with no symlinks.') + parser.add_argument('--workers', + default=10, type=int, + help='Number of workers.') + args = parser.parse_args() + meta = find_audio_files(args.root, DEFAULT_EXTS, progress=True, + resolve=args.resolve, minimal=args.minimal, workers=args.workers) + save_audio_meta(args.output_meta_file, meta) + + +if __name__ == '__main__': + main() diff --git a/audiocraft/data/audio_utils.py b/audiocraft/data/audio_utils.py new file mode 100644 index 0000000..76d4bc2 --- /dev/null +++ b/audiocraft/data/audio_utils.py @@ -0,0 +1,174 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import sys +import typing as tp + +import julius +import torch +import torchaudio + + +def convert_audio_channels(wav: torch.Tensor, channels: int = 2) -> torch.Tensor: + """Convert audio to the given number of channels. + + Args: + wav (torch.Tensor): Audio wave of shape [B, C, T]. + channels (int): Expected number of channels as output. + Returns: + torch.Tensor: Downmixed or unchanged audio wave [B, C, T]. + """ + *shape, src_channels, length = wav.shape + if src_channels == channels: + pass + elif channels == 1: + # Case 1: + # The caller asked 1-channel audio, and the stream has multiple + # channels, downmix all channels. + wav = wav.mean(dim=-2, keepdim=True) + elif src_channels == 1: + # Case 2: + # The caller asked for multiple channels, but the input file has + # a single channel, replicate the audio over all channels. + wav = wav.expand(*shape, channels, length) + elif src_channels >= channels: + # Case 3: + # The caller asked for multiple channels, and the input file has + # more channels than requested. In that case return the first channels. + wav = wav[..., :channels, :] + else: + # Case 4: What is a reasonable choice here? + raise ValueError('The audio file has less channels than requested but is not mono.') + return wav + + +def convert_audio(wav: torch.Tensor, from_rate: float, + to_rate: float, to_channels: int) -> torch.Tensor: + """Convert audio to new sample rate and number of audio channels. + """ + wav = julius.resample_frac(wav, int(from_rate), int(to_rate)) + wav = convert_audio_channels(wav, to_channels) + return wav + + +def normalize_loudness(wav: torch.Tensor, sample_rate: int, loudness_headroom_db: float = 14, + loudness_compressor: bool = False, energy_floor: float = 2e-3): + """Normalize an input signal to a user loudness in dB LKFS. + Audio loudness is defined according to the ITU-R BS.1770-4 recommendation. + + Args: + wav (torch.Tensor): Input multichannel audio data. + sample_rate (int): Sample rate. + loudness_headroom_db (float): Target loudness of the output in dB LUFS. + loudness_compressor (bool): Uses tanh for soft clipping. + energy_floor (float): anything below that RMS level will not be rescaled. + Returns: + output (torch.Tensor): Loudness normalized output data. + """ + energy = wav.pow(2).mean().sqrt().item() + if energy < energy_floor: + return wav + transform = torchaudio.transforms.Loudness(sample_rate) + input_loudness_db = transform(wav).item() + # calculate the gain needed to scale to the desired loudness level + delta_loudness = -loudness_headroom_db - input_loudness_db + gain = 10.0 ** (delta_loudness / 20.0) + output = gain * wav + if loudness_compressor: + output = torch.tanh(output) + assert output.isfinite().all(), (input_loudness_db, wav.pow(2).mean().sqrt()) + return output + + +def _clip_wav(wav: torch.Tensor, log_clipping: bool = False, stem_name: tp.Optional[str] = None) -> None: + """Utility function to clip the audio with logging if specified.""" + max_scale = wav.abs().max() + if log_clipping and max_scale > 1: + clamp_prob = (wav.abs() > 1).float().mean().item() + print(f"CLIPPING {stem_name or ''} happening with proba (a bit of clipping is okay):", + clamp_prob, "maximum scale: ", max_scale.item(), file=sys.stderr) + wav.clamp_(-1, 1) + + +def normalize_audio(wav: torch.Tensor, normalize: bool = True, + strategy: str = 'peak', peak_clip_headroom_db: float = 1, + rms_headroom_db: float = 18, loudness_headroom_db: float = 14, + loudness_compressor: bool = False, log_clipping: bool = False, + sample_rate: tp.Optional[int] = None, + stem_name: tp.Optional[str] = None) -> torch.Tensor: + """Normalize the audio according to the prescribed strategy (see after). + + Args: + wav (torch.Tensor): Audio data. + normalize (bool): if `True` (default), normalizes according to the prescribed + strategy (see after). If `False`, the strategy is only used in case clipping + would happen. + strategy (str): Can be either 'clip', 'peak', or 'rms'. Default is 'peak', + i.e. audio is normalized by its largest value. RMS normalizes by root-mean-square + with extra headroom to avoid clipping. 'clip' just clips. + peak_clip_headroom_db (float): Headroom in dB when doing 'peak' or 'clip' strategy. + rms_headroom_db (float): Headroom in dB when doing 'rms' strategy. This must be much larger + than the `peak_clip` one to avoid further clipping. + loudness_headroom_db (float): Target loudness for loudness normalization. + loudness_compressor (bool): If True, uses tanh based soft clipping. + log_clipping (bool): If True, basic logging on stderr when clipping still + occurs despite strategy (only for 'rms'). + sample_rate (int): Sample rate for the audio data (required for loudness). + stem_name (Optional[str]): Stem name for clipping logging. + Returns: + torch.Tensor: Normalized audio. + """ + scale_peak = 10 ** (-peak_clip_headroom_db / 20) + scale_rms = 10 ** (-rms_headroom_db / 20) + if strategy == 'peak': + rescaling = (scale_peak / wav.abs().max()) + if normalize or rescaling < 1: + wav = wav * rescaling + elif strategy == 'clip': + wav = wav.clamp(-scale_peak, scale_peak) + elif strategy == 'rms': + mono = wav.mean(dim=0) + rescaling = scale_rms / mono.pow(2).mean().sqrt() + if normalize or rescaling < 1: + wav = wav * rescaling + _clip_wav(wav, log_clipping=log_clipping, stem_name=stem_name) + elif strategy == 'loudness': + assert sample_rate is not None, "Loudness normalization requires sample rate." + wav = normalize_loudness(wav, sample_rate, loudness_headroom_db, loudness_compressor) + _clip_wav(wav, log_clipping=log_clipping, stem_name=stem_name) + else: + assert wav.abs().max() < 1 + assert strategy == '' or strategy == 'none', f"Unexpected strategy: '{strategy}'" + return wav + + +def f32_pcm(wav: torch.Tensor) -> torch.Tensor: + """Convert audio to float 32 bits PCM format. + """ + if wav.dtype.is_floating_point: + return wav + else: + assert wav.dtype == torch.int16 + return wav.float() / 2**15 + + +def i16_pcm(wav: torch.Tensor) -> torch.Tensor: + """Convert audio to int 16 bits PCM format. + + ..Warning:: There exist many formula for doing this convertion. None are perfect + due to the asymetry of the int16 range. One either have possible clipping, DC offset, + or inconsistancies with f32_pcm. If the given wav doesn't have enough headroom, + it is possible that `i16_pcm(f32_pcm)) != Identity`. + """ + if wav.dtype.is_floating_point: + assert wav.abs().max() <= 1 + candidate = (wav * 2 ** 15).round() + if candidate.max() >= 2 ** 15: # clipping would occur + candidate = (wav * (2 ** 15 - 1)).round() + return candidate.short() + else: + assert wav.dtype == torch.int16 + return wav diff --git a/audiocraft/data/zip.py b/audiocraft/data/zip.py new file mode 100644 index 0000000..1f11542 --- /dev/null +++ b/audiocraft/data/zip.py @@ -0,0 +1,74 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import typing +import zipfile + +from dataclasses import dataclass +from functools import lru_cache +from typing_extensions import Literal + + +DEFAULT_SIZE = 32 +MODE = Literal['r', 'w', 'x', 'a'] + + +@dataclass(order=True) +class PathInZip: + """Class for holding a path of file within a zip file. + + Args: + path: The convention is : + Let's assume there is a zip file /some/location/foo.zip + and inside of it is a json file located at /data/file1.json, + Then we expect path = "/some/location/foo.zip:/data/file1.json" + """ + + INFO_PATH_SEP = ':' + zip_path: str + file_path: str + + def __init__(self, path: str) -> None: + split_path = path.split(self.INFO_PATH_SEP) + assert len(split_path) == 2 + self.zip_path, self.file_path = split_path + + @classmethod + def from_paths(cls, zip_path: str, file_path: str): + return cls(zip_path + cls.INFO_PATH_SEP + file_path) + + def __str__(self) -> str: + return self.zip_path + self.INFO_PATH_SEP + self.file_path + + +def _open_zip(path: str, mode: MODE = 'r'): + return zipfile.ZipFile(path, mode) + + +_cached_open_zip = lru_cache(DEFAULT_SIZE)(_open_zip) + + +def set_zip_cache_size(max_size: int): + """Sets the maximal LRU caching for zip file opening. + + Args: + max_size: the maximal LRU cache. + """ + global _cached_open_zip + _cached_open_zip = lru_cache(max_size)(_open_zip) + + +def open_file_in_zip(path_in_zip: PathInZip, mode: str = 'r') -> typing.IO: + """Opens a file stored inside a zip and returns a file-like object. + + Args: + path_in_zip: A PathInZip object representing the file to return a file-like object of. + mode: The mode in which to open the file with. + Returns: + A file-like object for PathInZip. + """ + zf = _cached_open_zip(path_in_zip.zip_path) + return zf.open(path_in_zip.file_path) diff --git a/audiocraft/models/__init__.py b/audiocraft/models/__init__.py new file mode 100644 index 0000000..92c7a48 --- /dev/null +++ b/audiocraft/models/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +# flake8: noqa +from .musicgen import MusicGen +from .lm import LMModel +from .encodec import CompressionModel, EncodecModel diff --git a/audiocraft/models/builders.py b/audiocraft/models/builders.py new file mode 100644 index 0000000..77ee5f9 --- /dev/null +++ b/audiocraft/models/builders.py @@ -0,0 +1,218 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +""" +All the functions to build the relevant models and modules +from the Hydra config. +""" + +import typing as tp +import warnings + +import audiocraft +import omegaconf +import torch + +from .encodec import CompressionModel, EncodecModel, FlattenedCompressionModel # noqa +from .lm import LMModel +from ..modules.codebooks_patterns import ( + CodebooksPatternProvider, + DelayedPatternProvider, + ParallelPatternProvider, + UnrolledPatternProvider, + VALLEPattern, + MusicLMPattern, +) +from ..modules.conditioners import ( + BaseConditioner, + ConditioningProvider, + LUTConditioner, + T5Conditioner, + ConditionFuser, + ChromaStemConditioner, +) +from .. import quantization as qt +from ..utils.utils import dict_from_config + + +def get_quantizer(quantizer: str, cfg: omegaconf.DictConfig, dimension: int) -> qt.BaseQuantizer: + klass = { + 'no_quant': qt.DummyQuantizer, + 'rvq': qt.ResidualVectorQuantizer + }[quantizer] + kwargs = dict_from_config(getattr(cfg, quantizer)) + if quantizer != 'no_quant': + kwargs['dimension'] = dimension + return klass(**kwargs) + + +def get_encodec_autoencoder(encoder_name: str, cfg: omegaconf.DictConfig): + if encoder_name == 'seanet': + kwargs = dict_from_config(getattr(cfg, 'seanet')) + encoder_override_kwargs = kwargs.pop('encoder') + decoder_override_kwargs = kwargs.pop('decoder') + encoder_kwargs = {**kwargs, **encoder_override_kwargs} + decoder_kwargs = {**kwargs, **decoder_override_kwargs} + encoder = audiocraft.modules.SEANetEncoder(**encoder_kwargs) + decoder = audiocraft.modules.SEANetDecoder(**decoder_kwargs) + return encoder, decoder + else: + raise KeyError(f'Unexpected compression model {cfg.compression_model}') + + +def get_compression_model(cfg: omegaconf.DictConfig) -> CompressionModel: + """Instantiate a compression model. + """ + if cfg.compression_model == 'encodec': + kwargs = dict_from_config(getattr(cfg, 'encodec')) + encoder_name = kwargs.pop('autoencoder') + quantizer_name = kwargs.pop('quantizer') + encoder, decoder = get_encodec_autoencoder(encoder_name, cfg) + quantizer = get_quantizer(quantizer_name, cfg, encoder.dimension) + frame_rate = kwargs['sample_rate'] // encoder.hop_length + renormalize = kwargs.pop('renormalize', None) + renorm = kwargs.pop('renorm') + if renormalize is None: + renormalize = renorm is not None + warnings.warn("You are using a deprecated EnCodec model. Please migrate to new renormalization.") + return EncodecModel(encoder, decoder, quantizer, + frame_rate=frame_rate, renormalize=renormalize, **kwargs).to(cfg.device) + else: + raise KeyError(f'Unexpected compression model {cfg.compression_model}') + + +def get_lm_model(cfg: omegaconf.DictConfig) -> LMModel: + """Instantiate a transformer LM. + """ + if cfg.lm_model == 'transformer_lm': + kwargs = dict_from_config(getattr(cfg, 'transformer_lm')) + n_q = kwargs['n_q'] + q_modeling = kwargs.pop('q_modeling', None) + codebooks_pattern_cfg = getattr(cfg, 'codebooks_pattern') + attribute_dropout = dict_from_config(getattr(cfg, 'attribute_dropout')) + cls_free_guidance = dict_from_config(getattr(cfg, 'classifier_free_guidance')) + cfg_prob, cfg_coef = cls_free_guidance["training_dropout"], cls_free_guidance["inference_coef"] + fuser = get_condition_fuser(cfg) + condition_provider = get_conditioner_provider(kwargs["dim"], cfg).to(cfg.device) + if len(fuser.fuse2cond['cross']) > 0: # enforce cross-att programatically + kwargs['cross_attention'] = True + if codebooks_pattern_cfg.modeling is None: + assert q_modeling is not None, \ + 'LM model should either have a codebook pattern defined or transformer_lm.q_modeling' + codebooks_pattern_cfg = omegaconf.OmegaConf.create( + {'modeling': q_modeling, 'delay': {'delays': list(range(n_q))}} + ) + pattern_provider = get_codebooks_pattern_provider(n_q, codebooks_pattern_cfg) + return LMModel( + pattern_provider=pattern_provider, + condition_provider=condition_provider, + fuser=fuser, + cfg_dropout=cfg_prob, + cfg_coef=cfg_coef, + attribute_dropout=attribute_dropout, + dtype=getattr(torch, cfg.dtype), + device=cfg.device, + **kwargs + ).to(cfg.device) + else: + raise KeyError(f'Unexpected LM model {cfg.lm_model}') + + +def get_conditioner_provider(output_dim: int, cfg: omegaconf.DictConfig) -> ConditioningProvider: + """Instantiate a conditioning model. + """ + device = cfg.device + duration = cfg.dataset.segment_duration + cfg = getattr(cfg, "conditioners") + cfg = omegaconf.OmegaConf.create({}) if cfg is None else cfg + conditioners: tp.Dict[str, BaseConditioner] = {} + with omegaconf.open_dict(cfg): + condition_provider_args = cfg.pop('args', {}) + for cond, cond_cfg in cfg.items(): + model_type = cond_cfg["model"] + model_args = cond_cfg[model_type] + if model_type == "t5": + conditioners[str(cond)] = T5Conditioner(output_dim=output_dim, device=device, **model_args) + elif model_type == "lut": + conditioners[str(cond)] = LUTConditioner(output_dim=output_dim, **model_args) + elif model_type == "chroma_stem": + model_args.pop('cache_path', None) + conditioners[str(cond)] = ChromaStemConditioner( + output_dim=output_dim, + duration=duration, + device=device, + **model_args + ) + else: + raise ValueError(f"unrecognized conditioning model: {model_type}") + conditioner = ConditioningProvider(conditioners, device=device, **condition_provider_args) + return conditioner + + +def get_condition_fuser(cfg: omegaconf.DictConfig) -> ConditionFuser: + """Instantiate a condition fuser object. + """ + fuser_cfg = getattr(cfg, "fuser") + fuser_methods = ["sum", "cross", "prepend", "input_interpolate"] + fuse2cond = {k: fuser_cfg[k] for k in fuser_methods} + kwargs = {k: v for k, v in fuser_cfg.items() if k not in fuser_methods} + fuser = ConditionFuser(fuse2cond=fuse2cond, **kwargs) + return fuser + + +def get_codebooks_pattern_provider(n_q: int, cfg: omegaconf.DictConfig) -> CodebooksPatternProvider: + """Instantiate a codebooks pattern provider object. + """ + pattern_providers = { + 'parallel': ParallelPatternProvider, + 'delay': DelayedPatternProvider, + 'unroll': UnrolledPatternProvider, + 'valle': VALLEPattern, + 'musiclm': MusicLMPattern, + } + name = cfg.modeling + kwargs = dict_from_config(cfg.get(name)) if hasattr(cfg, name) else {} + klass = pattern_providers[name] + return klass(n_q, **kwargs) + + +def get_debug_compression_model(device='cpu'): + """Instantiate a debug compression model to be used for unit tests. + """ + seanet_kwargs = { + 'n_filters': 4, + 'n_residual_layers': 1, + 'dimension': 32, + 'ratios': [10, 8, 16] # 25 Hz at 32kHz + } + encoder = audiocraft.modules.SEANetEncoder(**seanet_kwargs) + decoder = audiocraft.modules.SEANetDecoder(**seanet_kwargs) + quantizer = qt.ResidualVectorQuantizer(dimension=32, bins=400, n_q=4) + init_x = torch.randn(8, 32, 128) + quantizer(init_x, 1) # initialize kmeans etc. + compression_model = EncodecModel( + encoder, decoder, quantizer, + frame_rate=25, sample_rate=32000, channels=1).to(device) + return compression_model.eval() + + +def get_debug_lm_model(device='cpu'): + """Instantiate a debug LM to be used for unit tests. + """ + pattern = DelayedPatternProvider(n_q=4) + dim = 16 + providers = { + 'description': LUTConditioner(n_bins=128, dim=dim, output_dim=dim, tokenizer="whitespace"), + } + condition_provider = ConditioningProvider(providers) + fuser = ConditionFuser( + {'cross': ['description'], 'prepend': [], + 'sum': [], 'input_interpolate': []}) + lm = LMModel( + pattern, condition_provider, fuser, + n_q=4, card=400, dim=dim, num_heads=4, custom=True, num_layers=2, + cross_attention=True, causal=True) + return lm.to(device).eval() diff --git a/audiocraft/models/encodec.py b/audiocraft/models/encodec.py new file mode 100644 index 0000000..69621a6 --- /dev/null +++ b/audiocraft/models/encodec.py @@ -0,0 +1,302 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +from abc import ABC, abstractmethod +import typing as tp + +from einops import rearrange +import torch +from torch import nn + +from .. import quantization as qt + + +class CompressionModel(ABC, nn.Module): + + @abstractmethod + def forward(self, x: torch.Tensor) -> qt.QuantizedResult: + ... + + @abstractmethod + def encode(self, x: torch.Tensor) -> tp.Tuple[torch.Tensor, tp.Optional[torch.Tensor]]: + """See `EncodecModel.encode`""" + ... + + @abstractmethod + def decode(self, codes: torch.Tensor, scale: tp.Optional[torch.Tensor] = None): + """See `EncodecModel.decode`""" + ... + + @property + @abstractmethod + def channels(self) -> int: + ... + + @property + @abstractmethod + def frame_rate(self) -> int: + ... + + @property + @abstractmethod + def sample_rate(self) -> int: + ... + + @property + @abstractmethod + def cardinality(self) -> int: + ... + + @property + @abstractmethod + def num_codebooks(self) -> int: + ... + + @property + @abstractmethod + def total_codebooks(self) -> int: + ... + + @abstractmethod + def set_num_codebooks(self, n: int): + """Set the active number of codebooks used by the quantizer. + """ + ... + + +class EncodecModel(CompressionModel): + """Encodec model operating on the raw waveform. + + Args: + encoder (nn.Module): Encoder network. + decoder (nn.Module): Decoder network. + quantizer (qt.BaseQuantizer): Quantizer network. + frame_rate (int): Frame rate for the latent representation. + sample_rate (int): Audio sample rate. + channels (int): Number of audio channels. + causal (bool): Whether to use a causal version of the model. + renormalize (bool): Whether to renormalize the audio before running the model. + """ + # we need assignement to override the property in the abstract class, + # I couldn't find a better way... + frame_rate: int = 0 + sample_rate: int = 0 + channels: int = 0 + + def __init__(self, + encoder: nn.Module, + decoder: nn.Module, + quantizer: qt.BaseQuantizer, + frame_rate: int, + sample_rate: int, + channels: int, + causal: bool = False, + renormalize: bool = False): + super().__init__() + self.encoder = encoder + self.decoder = decoder + self.quantizer = quantizer + self.frame_rate = frame_rate + self.sample_rate = sample_rate + self.channels = channels + self.renormalize = renormalize + self.causal = causal + if self.causal: + # we force disabling here to avoid handling linear overlap of segments + # as supported in original EnCodec codebase. + assert not self.renormalize, 'Causal model does not support renormalize' + + @property + def total_codebooks(self): + """Total number of quantizer codebooks available. + """ + return self.quantizer.total_codebooks + + @property + def num_codebooks(self): + """Active number of codebooks used by the quantizer. + """ + return self.quantizer.num_codebooks + + def set_num_codebooks(self, n: int): + """Set the active number of codebooks used by the quantizer. + """ + self.quantizer.set_num_codebooks(n) + + @property + def cardinality(self): + """Cardinality of each codebook. + """ + return self.quantizer.bins + + def preprocess(self, x: torch.Tensor) -> tp.Tuple[torch.Tensor, tp.Optional[torch.Tensor]]: + scale: tp.Optional[torch.Tensor] + if self.renormalize: + mono = x.mean(dim=1, keepdim=True) + volume = mono.pow(2).mean(dim=2, keepdim=True).sqrt() + scale = 1e-8 + volume + x = x / scale + scale = scale.view(-1, 1) + else: + scale = None + return x, scale + + def postprocess(self, + x: torch.Tensor, + scale: tp.Optional[torch.Tensor] = None) -> torch.Tensor: + if scale is not None: + assert self.renormalize + x = x * scale.view(-1, 1, 1) + return x + + def forward(self, x: torch.Tensor) -> qt.QuantizedResult: + assert x.dim() == 3 + length = x.shape[-1] + x, scale = self.preprocess(x) + + emb = self.encoder(x) + q_res = self.quantizer(emb, self.frame_rate) + out = self.decoder(q_res.x) + + # remove extra padding added by the encoder and decoder + assert out.shape[-1] >= length, (out.shape[-1], length) + out = out[..., :length] + + q_res.x = self.postprocess(out, scale) + + return q_res + + def encode(self, x: torch.Tensor) -> tp.Tuple[torch.Tensor, tp.Optional[torch.Tensor]]: + """Encode the given input tensor to quantized representation along with scale parameter. + + Args: + x (torch.Tensor): Float tensor of shape [B, C, T] + + Returns: + codes, scale (tp.Tuple[torch.Tensor, torch.Tensor]): Tuple composed of: + codes a float tensor of shape [B, K, T] with K the number of codebooks used and T the timestep. + scale a float tensor containing the scale for audio renormalizealization. + """ + assert x.dim() == 3 + x, scale = self.preprocess(x) + emb = self.encoder(x) + codes = self.quantizer.encode(emb) + return codes, scale + + def decode(self, codes: torch.Tensor, scale: tp.Optional[torch.Tensor] = None): + """Decode the given codes to a reconstructed representation, using the scale to perform + audio denormalization if needed. + + Args: + codes (torch.Tensor): Int tensor of shape [B, K, T] + scale (tp.Optional[torch.Tensor]): Float tensor containing the scale value. + + Returns: + out (torch.Tensor): Float tensor of shape [B, C, T], the reconstructed audio. + """ + emb = self.quantizer.decode(codes) + out = self.decoder(emb) + out = self.postprocess(out, scale) + # out contains extra padding added by the encoder and decoder + return out + + +class FlattenedCompressionModel(CompressionModel): + """Wraps a CompressionModel and flatten its codebooks, e.g. + instead of returning [B, K, T], return [B, S, T * (K // S)] with + S the number of codebooks per step, and `K // S` the number of 'virtual steps' + for each real time step. + + Args: + model (CompressionModel): compression model to wrap. + codebooks_per_step (int): number of codebooks to keep per step, + this must divide the number of codebooks provided by the wrapped model. + extend_cardinality (bool): if True, and for instance if codebooks_per_step = 1, + if each codebook has a cardinality N, then the first codebook will + use the range [0, N - 1], and the second [N, 2 N - 1] etc. + On decoding, this can lead to potentially invalid sequences. + Any invalid entry will be silently remapped to the proper range + with a modulo. + """ + def __init__(self, model: CompressionModel, codebooks_per_step: int = 1, + extend_cardinality: bool = True): + super().__init__() + self.model = model + self.codebooks_per_step = codebooks_per_step + self.extend_cardinality = extend_cardinality + + @property + def total_codebooks(self): + return self.model.total_codebooks + + @property + def num_codebooks(self): + """Active number of codebooks used by the quantizer. + + ..Warning:: this reports the number of codebooks after the flattening + of the codebooks! + """ + assert self.model.num_codebooks % self.codebooks_per_step == 0 + return self.codebooks_per_step + + def set_num_codebooks(self, n: int): + """Set the active number of codebooks used by the quantizer. + + ..Warning:: this sets the number of codebooks **before** the flattening + of the codebooks. + """ + assert n % self.codebooks_per_step == 0 + self.model.set_num_codebooks(n) + + @property + def num_virtual_steps(self) -> int: + """Return the number of virtual steps, e.g. one real step + will be split into that many steps. + """ + return self.model.num_codebooks // self.codebooks_per_step + + @property + def frame_rate(self) -> int: + return self.model.frame_rate * self.num_virtual_steps + + @property + def sample_rate(self) -> int: + return self.model.sample_rate + + @property + def channels(self) -> int: + return self.model.channels + + @property + def cardinality(self): + """Cardinality of each codebook. + """ + if self.extend_cardinality: + return self.model.cardinality * self.num_virtual_steps + else: + return self.model.cardinality + + def forward(self, x: torch.Tensor) -> qt.QuantizedResult: + raise NotImplementedError("Not supported, use encode and decode.") + + def encode(self, x: torch.Tensor) -> tp.Tuple[torch.Tensor, tp.Optional[torch.Tensor]]: + indices, scales = self.model.encode(x) + B, K, T = indices.shape + indices = rearrange(indices, 'b (k v) t -> b k t v', k=self.codebooks_per_step) + if self.extend_cardinality: + for virtual_step in range(1, self.num_virtual_steps): + indices[..., virtual_step] += self.model.cardinality * virtual_step + indices = rearrange(indices, 'b k t v -> b k (t v)') + return (indices, scales) + + def decode(self, codes: torch.Tensor, scale: tp.Optional[torch.Tensor] = None): + B, K, T = codes.shape + assert T % self.num_virtual_steps == 0 + codes = rearrange(codes, 'b k (t v) -> b (k v) t', v=self.num_virtual_steps) + # We silently ignore potential errors from the LM when + # using extend_cardinality. + codes = codes % self.model.cardinality + return self.model.decode(codes, scale) diff --git a/audiocraft/models/lm.py b/audiocraft/models/lm.py new file mode 100644 index 0000000..c8aad8f --- /dev/null +++ b/audiocraft/models/lm.py @@ -0,0 +1,527 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +from dataclasses import dataclass +from functools import partial +import logging +import math +import typing as tp + +import torch +from torch import nn + +from ..utils import utils +from ..modules.streaming import StreamingModule, State +from ..modules.transformer import StreamingTransformer, create_norm_fn +from ..modules.conditioners import ( + ConditionFuser, + ClassifierFreeGuidanceDropout, + AttributeDropout, + ConditioningProvider, + ConditioningAttributes, + ConditionType, +) +from ..modules.codebooks_patterns import CodebooksPatternProvider +from ..modules.activations import get_activation_fn + + +logger = logging.getLogger(__name__) +ConditionTensors = tp.Dict[str, ConditionType] +CFGConditions = tp.Union[ConditionTensors, tp.Tuple[ConditionTensors, ConditionTensors]] + + +def get_init_fn(method: str, input_dim: int, init_depth: tp.Optional[int] = None): + """LM layer initialization. + Inspired from xlformers: https://github.com/fairinternal/xlformers + + Args: + method (str): Method name for init function. Valid options are: + 'gaussian', 'uniform'. + input_dim (int): Input dimension of the initialized module. + init_depth (Optional[int]): Optional init depth value used to rescale + the standard deviation if defined. + """ + # Compute std + std = 1 / math.sqrt(input_dim) + # Rescale with depth + if init_depth is not None: + std = std / math.sqrt(2 * init_depth) + + if method == 'gaussian': + return partial( + torch.nn.init.trunc_normal_, mean=0.0, std=std, a=-3 * std, b=3 * std + ) + elif method == 'uniform': + bound = math.sqrt(3) * std # ensure the standard deviation is `std` + return partial(torch.nn.init.uniform_, a=-bound, b=bound) + else: + raise ValueError("Unsupported layer initialization method") + + +def init_layer(m: nn.Module, + method: str, + init_depth: tp.Optional[int] = None, + zero_bias_init: bool = False): + """Wrapper around ``get_init_fn`` for proper initialization of LM modules. + + Args: + m (nn.Module): Module to initialize. + method (str): Method name for the init function. + init_depth (Optional[int]): Optional init depth value used to rescale + the standard deviation if defined. + zero_bias_init (bool): Whether to initialize the bias to 0 or not. + """ + if isinstance(m, nn.Linear): + init_fn = get_init_fn(method, m.in_features, init_depth=init_depth) + if m.weight.device.type == 'cpu' and m.weight.dtype == torch.float16: + weight = m.weight.float() + init_fn(weight) + m.weight.data[:] = weight.half() + else: + init_fn(m.weight) + if zero_bias_init and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.Embedding): + init_fn = get_init_fn(method, m.embedding_dim, init_depth=None) + if m.weight.device.type == 'cpu' and m.weight.dtype == torch.float16: + weight = m.weight.float() + init_fn(weight) + m.weight.data[:] = weight.half() + else: + init_fn(m.weight) + + +class ScaledEmbedding(nn.Embedding): + """Boost learning rate for embeddings (with `scale`). + """ + def __init__(self, *args, lr=None, **kwargs): + super().__init__(*args, **kwargs) + self.lr = lr + + def make_optim_group(self): + group = {"params": list(self.parameters())} + if self.lr is not None: + group["lr"] = self.lr + return group + + +@dataclass +class LMOutput: + # The logits are already re-aligned with the input codes + # hence no extra shift is required, e.g. when computing CE + logits: torch.Tensor # [B, K, T, card] + mask: torch.Tensor # [B, K, T] + + +class LMModel(StreamingModule): + """Transformer-based language model on multiple streams of codes. + + Args: + pattern_provider (CodebooksPatternProvider): Pattern provider for codebook interleaving. + condition_provider (MusicConditioningProvider): Conditioning provider from metadata. + fuser (ConditionFuser): Fuser handling the fusing of conditions with language model input. + n_q (int): Number of parallel streams to model. + card (int): Cardinality, vocabulary size. + dim (int): Dimension of the transformer encoder. + num_heads (int): Number of heads for the transformer encoder. + hidden_scale (int): Scale for hidden feed forward dimension of the transformer encoder. + norm (str): Normalization method. + norm_first (bool): Use pre-norm instead of post-norm. + emb_lr (Optional[float]): Embedding-specific learning rate. + bias_proj (bool): Use bias for output projections. + weight_init (Optional[str]): Method for weight initialization. + depthwise_init (Optional[str]): Method for depthwise weight initialization. + zero_bias_init (bool): If true and bias in Linears, initialize bias to zeros. + cfg_dropout (float): Classifier-free guidance dropout. + cfg_coef (float): Classifier-free guidance coefficient. + attribute_dropout (dict): Attribute dropout probabilities. + two_step_cfg (bool): Whether to run classifier free-guidance with 2 distinct steps. + **kwargs: Additional parameters for the transformer encoder. + """ + def __init__(self, pattern_provider: CodebooksPatternProvider, condition_provider: ConditioningProvider, + fuser: ConditionFuser, n_q: int = 8, card: int = 1024, dim: int = 128, num_heads: int = 8, + hidden_scale: int = 4, norm: str = 'layer_norm', norm_first: bool = False, + emb_lr: tp.Optional[float] = None, bias_proj: bool = True, + weight_init: tp.Optional[str] = None, depthwise_init: tp.Optional[str] = None, + zero_bias_init: bool = False, cfg_dropout: float = 0, cfg_coef: float = 1.0, + attribute_dropout: tp.Dict[str, tp.Dict[str, float]] = {}, two_step_cfg: bool = False, + **kwargs): + super().__init__() + self.cfg_coef = cfg_coef + self.cfg_dropout = ClassifierFreeGuidanceDropout(p=cfg_dropout) + self.att_dropout = AttributeDropout(p=attribute_dropout) + self.condition_provider = condition_provider + self.fuser = fuser + self.card = card + embed_dim = self.card + 1 + self.n_q = n_q + self.dim = dim + self.pattern_provider = pattern_provider + self.two_step_cfg = two_step_cfg + self.emb = nn.ModuleList([ScaledEmbedding(embed_dim, dim, lr=emb_lr) for _ in range(n_q)]) + if 'activation' in kwargs: + kwargs['activation'] = get_activation_fn(kwargs['activation']) + self.transformer = StreamingTransformer( + d_model=dim, num_heads=num_heads, dim_feedforward=int(hidden_scale * dim), + norm=norm, norm_first=norm_first, **kwargs) + self.out_norm: tp.Optional[nn.Module] = None + if norm_first: + self.out_norm = create_norm_fn(norm, dim) + self.linears = nn.ModuleList([nn.Linear(dim, self.card, bias=bias_proj) for _ in range(n_q)]) + self._init_weights(weight_init, depthwise_init, zero_bias_init) + self._fsdp: tp.Optional[nn.Module] + self.__dict__['_fsdp'] = None + + def _init_weights(self, weight_init: tp.Optional[str], depthwise_init: tp.Optional[str], zero_bias_init: bool): + """Initialization of the transformer module weights. + + Args: + weight_init (Optional[str]): Weight initialization strategy. See ``get_init_fn`` for valid options. + depthwise_init (Optional[str]): Depwthwise initialization strategy. The following options are valid: + 'current' where the depth corresponds to the current layer index or 'global' where the total number + of layer is used as depth. If not set, no depthwise initialization strategy is used. + zero_bias_init (bool): Whether to initalize bias to zero or not. + """ + assert depthwise_init is None or depthwise_init in ['current', 'global'] + assert depthwise_init is None or weight_init is not None, \ + "If 'depthwise_init' is defined, a 'weight_init' method should be provided." + assert not zero_bias_init or weight_init is not None, \ + "If 'zero_bias_init', a 'weight_init' method should be provided" + + if weight_init is None: + return + + for emb_layer in self.emb: + init_layer(emb_layer, method=weight_init, init_depth=None, zero_bias_init=zero_bias_init) + + for layer_idx, tr_layer in enumerate(self.transformer.layers): + depth = None + if depthwise_init == 'current': + depth = layer_idx + 1 + elif depthwise_init == 'global': + depth = len(self.transformer.layers) + init_fn = partial(init_layer, method=weight_init, init_depth=depth, zero_bias_init=zero_bias_init) + tr_layer.apply(init_fn) + + for linear in self.linears: + init_layer(linear, method=weight_init, init_depth=None, zero_bias_init=zero_bias_init) + + @property + def special_token_id(self) -> int: + return self.card + + @property + def num_codebooks(self) -> int: + return self.n_q + + def forward(self, sequence: torch.Tensor, + conditions: tp.List[ConditioningAttributes], + condition_tensors: tp.Optional[ConditionTensors] = None) -> torch.Tensor: + """Apply language model on sequence and conditions. + Given a tensor of sequence of shape [B, K, S] with K the number of codebooks and + S the sequence steps, return the logits with shape [B, card, K, S]. + + Args: + indices (torch.Tensor): indices of the codes to model. + conditions (list[ConditioningAttributes]): conditionings to use when modeling + the given codes. Note that when evaluating multiple time with the same conditioning + you should pre-compute those and pass them as `condition_tensors`. + condition_tensors (dict[str, ConditionType] or None): pre-computed conditioning + tensors, see `conditions`. + Returns: + torch.Tensor: Logits. + """ + B, K, S = sequence.shape + assert K == self.num_codebooks, 'Sequence shape must match the specified number of codebooks' + input_ = sum([self.emb[k](sequence[:, k]) for k in range(K)]) + if condition_tensors is None: + assert not self._is_streaming, "Conditions tensors should be precomputed when streaming." + # apply dropout modules + conditions = self.cfg_dropout(conditions) + conditions = self.att_dropout(conditions) + tokenized = self.condition_provider.tokenize(conditions) + # encode conditions and fuse, both have a streaming cache to not recompute when generating. + condition_tensors = self.condition_provider(tokenized) + else: + assert not conditions, "Shouldn't pass both conditions and condition_tensors." + + input_, cross_attention_input = self.fuser(input_, condition_tensors) + + out = self.transformer(input_, cross_attention_src=cross_attention_input) + if self.out_norm: + out = self.out_norm(out) + logits = torch.stack([self.linears[k](out) for k in range(K)], dim=1) # [B, K, S, card] + + # remove the prefix from the model outputs + if len(self.fuser.fuse2cond['prepend']) > 0: + logits = logits[:, :, -S:] + + return logits # [B, K, S, card] + + def compute_predictions( + self, codes: torch.Tensor, + conditions: tp.List[ConditioningAttributes], + condition_tensors: tp.Optional[ConditionTensors] = None) -> LMOutput: + """Given an input tensor of codes [B, K, T] and list of conditions, runs the model + forward using the specified codes interleaving pattern. + + Args: + codes (torch.Tensor): Input codes of shape [B, K, T] with B the batch size, + K the number of codebooks and T the number of timesteps. + conditions (list[ConditioningAttributes]): conditionings to use when modeling + the given codes. Note that when evaluating multiple time with the same conditioning + you should pre-compute those and pass them as `condition_tensors`. + condition_tensors (dict[str, ConditionType] or None): pre-computed conditioning + tensors, see `conditions`. + Returns: + LMOutput: Language model outputs + logits (torch.Tensor) of shape [B, K, T, card] corresponding to the provided codes, + i.e. the first item corresponds to logits to predict the first code, meaning that + no additional shifting of codes and logits is required. + mask (torch.Tensor) of shape [B, K, T], mask over valid and invalid positions. + Given the specified interleaving strategies, parts of the logits and codes should + not be considered as valid predictions because of invalid context. + """ + B, K, T = codes.shape + codes = codes.contiguous() + # map codes [B, K, T] into pattern sequence [B, K, S] using special_token_id for masked tokens + pattern = self.pattern_provider.get_pattern(T) + sequence_codes, sequence_indexes, sequence_mask = pattern.build_pattern_sequence( + codes, self.special_token_id, keep_only_valid_steps=True + ) + # apply model on pattern sequence + model = self if self._fsdp is None else self._fsdp + logits = model(sequence_codes, conditions, condition_tensors) # [B, K, S, card] + # map back the logits on pattern sequence to logits on original codes: [B, K, S, card] -> [B, K, T, card] + # and provide the corresponding mask over invalid positions of tokens + logits = logits.permute(0, 3, 1, 2) # [B, card, K, S] + # note: we use nans as special token to make it obvious if we feed unexpected logits + logits, logits_indexes, logits_mask = pattern.revert_pattern_logits( + logits, float('nan'), keep_only_valid_steps=True + ) + logits = logits.permute(0, 2, 3, 1) # [B, K, T, card] + logits_mask = logits_mask[None, :, :].expand(B, -1, -1) # [K, T] -> [B, K, T] + return LMOutput(logits, logits_mask) + + def _sample_next_token(self, + sequence: torch.Tensor, + cfg_conditions: CFGConditions, + unconditional_state: State, + use_sampling: bool = False, + temp: float = 1.0, + top_k: int = 0, + top_p: float = 0.0, + cfg_coef: tp.Optional[float] = None) -> torch.Tensor: + """Sample next token from the model given a sequence and a set of conditions. The model supports + multiple sampling strategies (greedy sampling, softmax, top-k, top-p...). + + Args: + sequence (torch.Tensor): Current sequence of shape [B, K, S] + with K corresponding to the number of codebooks and S the number of sequence steps. + S = 1 in streaming mode, except for the first step that contains a bigger prompt. + condition_tensors (Dict[str, ConditionType): Set of conditions. If CFG is used, + should be twice the batch size, being the concatenation of the conditions + null conditions. + use_sampling (bool): Whether to use a sampling strategy or not. + temp (float): Sampling temperature. + top_k (int): K for "top-k" sampling. + top_p (float): P for "top-p" sampling. + cfg_coef (float): classifier free guidance coefficient + Returns: + next_token (torch.Tensor): Next token tensor of shape [B, K, 1]. + """ + B = sequence.shape[0] + cfg_coef = self.cfg_coef if cfg_coef is None else cfg_coef + model = self if self._fsdp is None else self._fsdp + if self.two_step_cfg and cfg_conditions != {}: + assert isinstance(cfg_conditions, tuple) + condition_tensors, null_condition_tensors = cfg_conditions + cond_logits = model(sequence, conditions=[], condition_tensors=condition_tensors) + state = self.get_streaming_state() + self.set_streaming_state(unconditional_state) + uncond_logits = model(sequence, conditions=[], condition_tensors=null_condition_tensors) + unconditional_state.update(self.get_streaming_state()) + self.set_streaming_state(state) + logits = uncond_logits + (cond_logits - uncond_logits) * self.cfg_coef + else: + assert isinstance(cfg_conditions, dict) + condition_tensors = cfg_conditions + if condition_tensors: + # Preparing for CFG, predicting both conditional and unconditional logits. + sequence = torch.cat([sequence, sequence], dim=0) + all_logits = model( + sequence, + conditions=[], condition_tensors=condition_tensors) + if condition_tensors: + cond_logits, uncond_logits = all_logits.split(B, dim=0) # [B, K, T, card] + logits = uncond_logits + (cond_logits - uncond_logits) * cfg_coef + else: + logits = all_logits + + logits = logits.permute(0, 1, 3, 2) # [B, K, card, T] + logits = logits[..., -1] # [B x K x card] + + # Apply softmax for sampling if temp > 0. Else, do greedy sampling to avoid zero division error. + if use_sampling and temp > 0.0: + probs = torch.softmax(logits / temp, dim=-1) + if top_p > 0.0: + next_token = utils.sample_top_p(probs, p=top_p) + elif top_k > 0: + next_token = utils.sample_top_k(probs, k=top_k) + else: + next_token = utils.multinomial(probs, num_samples=1) + else: + next_token = torch.argmax(logits, dim=-1, keepdim=True) + + return next_token + + @torch.no_grad() + def generate(self, + prompt: tp.Optional[torch.Tensor] = None, + conditions: tp.List[ConditioningAttributes] = [], + num_samples: tp.Optional[int] = None, + max_gen_len: int = 256, + use_sampling: bool = True, + temp: float = 1.0, + top_k: int = 250, + top_p: float = 0.0, + cfg_coef: tp.Optional[float] = None, + two_step_cfg: bool = False, + remove_prompts: bool = False, + check: bool = False, + callback: tp.Optional[tp.Callable[[int, int], None]] = None) -> torch.Tensor: + """Generate tokens sampling from the model given a prompt or unconditionally. Generation can + be perform in a greedy fashion or using sampling with top K and top P strategies. + + Args: + prompt (Optional[torch.Tensor]): Prompt tokens of shape [B, K, T]. + conditions_tensors (Dict[str, torch.Tensor]): Set of conditions or None. + num_samples (int or None): Number of samples to generate when no prompt and no conditions are given. + max_gen_len (int): Maximum generation length. + use_sampling (bool): Whether to use a sampling strategy or not. + temp (float): Sampling temperature. + top_k (int): K for "top-k" sampling. + top_p (float): P for "top-p" sampling. + remove_prompts (bool): Whether to remove prompts from generation or not. + Returns: + torch.Tensor: Generated tokens. + """ + assert not self.training, "generation shouldn't be used in training mode." + first_param = next(iter(self.parameters())) + device = first_param.device + + # Checking all input shapes are consistents. + possible_num_samples = [] + if num_samples is not None: + possible_num_samples.append(num_samples) + elif prompt is not None: + possible_num_samples.append(prompt.shape[0]) + elif conditions: + possible_num_samples.append(len(conditions)) + else: + possible_num_samples.append(1) + assert [x == possible_num_samples[0] for x in possible_num_samples], "Inconsitent inputs shapes" + num_samples = possible_num_samples[0] + + # below we create set of conditions: one conditional and one unconditional + # to do that we merge the regular condition together with the null condition + # we then do 1 forward pass instead of 2. + # the reason for that is two-fold: + # 1. it is about x2 faster than doing 2 forward passes + # 2. avoid the streaming API treating the 2 passes as part of different time steps + # We also support doing two different passes, in particular to ensure that + # the padding structure is exactly the same between train anf test. + # With a batch size of 1, this can be slower though. + cfg_conditions: CFGConditions + two_step_cfg = self.two_step_cfg if two_step_cfg is None else two_step_cfg + if conditions: + null_conditions = ClassifierFreeGuidanceDropout(p=1.0)(conditions) + if two_step_cfg: + cfg_conditions = ( + self.condition_provider(self.condition_provider.tokenize(conditions)), + self.condition_provider(self.condition_provider.tokenize(null_conditions)), + ) + else: + conditions = conditions + null_conditions + tokenized = self.condition_provider.tokenize(conditions) + cfg_conditions = self.condition_provider(tokenized) + else: + cfg_conditions = {} + + if prompt is None: + assert num_samples > 0 + prompt = torch.zeros((num_samples, self.num_codebooks, 0), dtype=torch.long, device=device) + + B, K, T = prompt.shape + start_offset = T + assert start_offset < max_gen_len + + pattern = self.pattern_provider.get_pattern(max_gen_len) + # this token is used as default value for codes that are not generated yet + unknown_token = -1 + + # we generate codes up to the max_gen_len that will be mapped to the pattern sequence + gen_codes = torch.full((B, K, max_gen_len), unknown_token, dtype=torch.long, device=device) + # filling the gen_codes with the prompt if needed + gen_codes[..., :start_offset] = prompt + # create the gen_sequence with proper interleaving from the pattern: [B, K, S] + gen_sequence, indexes, mask = pattern.build_pattern_sequence(gen_codes, self.special_token_id) + # retrieve the start_offset in the sequence: + # it is the first sequence step that contains the `start_offset` timestep + start_offset_sequence = pattern.get_first_step_with_timesteps(start_offset) + assert start_offset_sequence is not None + + with self.streaming(): + unconditional_state = self.get_streaming_state() + prev_offset = 0 + gen_sequence_len = gen_sequence.shape[-1] # gen_sequence shape is [B, K, S] + for offset in range(start_offset_sequence, gen_sequence_len): + # get current sequence (note that the streaming API is providing the caching over previous offsets) + curr_sequence = gen_sequence[..., prev_offset:offset] + curr_mask = mask[None, ..., prev_offset:offset].expand(B, -1, -1) + if check: + # check coherence between mask and sequence + assert (curr_sequence == torch.where(curr_mask, curr_sequence, self.special_token_id)).all() + # should never happen as gen_sequence is filled progressively + assert not (curr_sequence == unknown_token).any() + # sample next token from the model, next token shape is [B, K, 1] + next_token = self._sample_next_token( + curr_sequence, cfg_conditions, unconditional_state, use_sampling, temp, top_k, top_p, + cfg_coef=cfg_coef) + # ensure the tokens that should be masked are properly set to special_token_id + # as the model never output special_token_id + valid_mask = mask[..., offset:offset+1].expand(B, -1, -1) + next_token[~valid_mask] = self.special_token_id + # ensure we don't overwrite prompt tokens, we only write over unknown tokens + # (then mask tokens should be left as is as well, which is correct) + gen_sequence[..., offset:offset+1] = torch.where( + gen_sequence[..., offset:offset+1] == unknown_token, + next_token, gen_sequence[..., offset:offset+1] + ) + prev_offset = offset + if callback is not None: + callback(1 + offset - start_offset_sequence, gen_sequence_len - start_offset_sequence) + unconditional_state.clear() + + # ensure sequence has been entirely filled + assert not (gen_sequence == unknown_token).any() + # ensure gen_sequence pattern and mask are matching + # which means the gen_sequence is valid according to the pattern + assert ( + gen_sequence == torch.where(mask[None, ...].expand(B, -1, -1), gen_sequence, self.special_token_id) + ).all() + # get back the codes, trimming the prompt if needed and cutting potentially incomplete timesteps + out_codes, out_indexes, out_mask = pattern.revert_pattern_sequence(gen_sequence, special_token=unknown_token) + + # sanity checks over the returned codes and corresponding masks + assert (out_codes[..., :max_gen_len] != unknown_token).all() + assert (out_mask[..., :max_gen_len] == 1).all() + + out_start_offset = start_offset if remove_prompts else 0 + out_codes = out_codes[..., out_start_offset:max_gen_len] + + # ensure the returned codes are all valid + assert (out_codes >= 0).all() and (out_codes <= self.card).all() + return out_codes diff --git a/audiocraft/models/loaders.py b/audiocraft/models/loaders.py new file mode 100644 index 0000000..19837d4 --- /dev/null +++ b/audiocraft/models/loaders.py @@ -0,0 +1,90 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +""" +Utility functions to load from the checkpoints. +Each checkpoint is a torch.saved dict with the following keys: +- 'xp.cfg': the hydra config as dumped during training. This should be used + to rebuild the object using the audiocraft.models.builders functions, +- 'model_best_state': a readily loadable best state for the model, including + the conditioner. The model obtained from `xp.cfg` should be compatible + with this state dict. In the case of a LM, the encodec model would not be + bundled along but instead provided separately. + +Those functions also support loading from a remote location with the Torch Hub API. +They also support overriding some parameters, in particular the device and dtype +of the returned model. +""" + +from pathlib import Path +from huggingface_hub import hf_hub_download +import typing as tp +import os + +from omegaconf import OmegaConf +import torch + +from . import builders + + +HF_MODEL_CHECKPOINTS_MAP = { + "small": "facebook/musicgen-small", + "medium": "facebook/musicgen-medium", + "large": "facebook/musicgen-large", + "melody": "facebook/musicgen-melody", +} + + +def _get_state_dict( + file_or_url_or_id: tp.Union[Path, str], + filename: tp.Optional[str] = None, + device='cpu', + cache_dir: tp.Optional[str] = None, +): + # Return the state dict either from a file or url + file_or_url_or_id = str(file_or_url_or_id) + assert isinstance(file_or_url_or_id, str) + + if os.path.isfile(file_or_url_or_id): + return torch.load(file_or_url_or_id, map_location=device) + + elif file_or_url_or_id.startswith('https://'): + return torch.hub.load_state_dict_from_url(file_or_url_or_id, map_location=device, check_hash=True) + + elif file_or_url_or_id in HF_MODEL_CHECKPOINTS_MAP: + assert filename is not None, "filename needs to be defined if using HF checkpoints" + + repo_id = HF_MODEL_CHECKPOINTS_MAP[file_or_url_or_id] + file = hf_hub_download(repo_id=repo_id, filename=filename, cache_dir=cache_dir) + return torch.load(file, map_location=device) + + else: + raise ValueError(f"{file_or_url_or_id} is not a valid name, path or link that can be loaded.") + + +def load_compression_model(file_or_url_or_id: tp.Union[Path, str], device='cpu', cache_dir: tp.Optional[str] = None): + pkg = _get_state_dict(file_or_url_or_id, filename="compression_state_dict.bin", cache_dir=cache_dir) + cfg = OmegaConf.create(pkg['xp.cfg']) + cfg.device = str(device) + model = builders.get_compression_model(cfg) + model.load_state_dict(pkg['best_state']) + model.eval() + return model + + +def load_lm_model(file_or_url_or_id: tp.Union[Path, str], device='cpu', cache_dir: tp.Optional[str] = None): + pkg = _get_state_dict(file_or_url_or_id, filename="state_dict.bin", cache_dir=cache_dir) + cfg = OmegaConf.create(pkg['xp.cfg']) + cfg.device = str(device) + if cfg.device == 'cpu': + cfg.dtype = 'float32' + else: + cfg.dtype = 'float16' + model = builders.get_lm_model(cfg) + model.load_state_dict(pkg['best_state']) + model.eval() + model.cfg = cfg + return model diff --git a/audiocraft/models/musicgen.py b/audiocraft/models/musicgen.py new file mode 100644 index 0000000..2870b27 --- /dev/null +++ b/audiocraft/models/musicgen.py @@ -0,0 +1,361 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +""" +Main model for using MusicGen. This will combine all the required components +and provide easy access to the generation API. +""" + +import os +import typing as tp + +import torch + +from .encodec import CompressionModel +from .lm import LMModel +from .builders import get_debug_compression_model, get_debug_lm_model +from .loaders import load_compression_model, load_lm_model, HF_MODEL_CHECKPOINTS_MAP +from ..data.audio_utils import convert_audio +from ..modules.conditioners import ConditioningAttributes, WavCondition +from ..utils.autocast import TorchAutocast + + +MelodyList = tp.List[tp.Optional[torch.Tensor]] +MelodyType = tp.Union[torch.Tensor, MelodyList] + + +class MusicGen: + """MusicGen main model with convenient generation API. + + Args: + name (str): name of the model. + compression_model (CompressionModel): Compression model + used to map audio to invertible discrete representations. + lm (LMModel): Language model over discrete representations. + """ + def __init__(self, name: str, compression_model: CompressionModel, lm: LMModel, + max_duration: float = 30): + self.name = name + self.compression_model = compression_model + self.lm = lm + self.max_duration = max_duration + self.device = next(iter(lm.parameters())).device + self.generation_params: dict = {} + self.set_generation_params(duration=15) # 15 seconds by default + self._progress_callback: tp.Optional[tp.Callable[[int, int], None]] = None + if self.device.type == 'cpu': + self.autocast = TorchAutocast(enabled=False) + else: + self.autocast = TorchAutocast( + enabled=True, device_type=self.device.type, dtype=torch.float16) + + @property + def frame_rate(self) -> int: + """Roughly the number of AR steps per seconds.""" + return self.compression_model.frame_rate + + @property + def sample_rate(self) -> int: + """Sample rate of the generated audio.""" + return self.compression_model.sample_rate + + @property + def audio_channels(self) -> int: + """Audio channels of the generated audio.""" + return self.compression_model.channels + + @staticmethod + def get_pretrained(name: str = 'melody', device=None): + """Return pretrained model, we provide four models: + - small (300M), text to music, # see: https://huggingface.co/facebook/musicgen-small + - medium (1.5B), text to music, # see: https://huggingface.co/facebook/musicgen-medium + - melody (1.5B) text to music and text+melody to music, # see: https://huggingface.co/facebook/musicgen-melody + - large (3.3B), text to music, # see: https://huggingface.co/facebook/musicgen-large + """ + + if device is None: + if torch.cuda.device_count(): + device = 'cuda' + else: + device = 'cpu' + + if name == 'debug': + # used only for unit tests + compression_model = get_debug_compression_model(device) + lm = get_debug_lm_model(device) + return MusicGen(name, compression_model, lm) + + if name not in HF_MODEL_CHECKPOINTS_MAP: + raise ValueError( + f"{name} is not a valid checkpoint name. " + f"Choose one of {', '.join(HF_MODEL_CHECKPOINTS_MAP.keys())}" + ) + + cache_dir = os.environ.get('MUSICGEN_ROOT', None) + compression_model = load_compression_model(name, device=device, cache_dir=cache_dir) + lm = load_lm_model(name, device=device, cache_dir=cache_dir) + if name == 'melody': + lm.condition_provider.conditioners['self_wav'].match_len_on_eval = True + + return MusicGen(name, compression_model, lm) + + def set_generation_params(self, use_sampling: bool = True, top_k: int = 250, + top_p: float = 0.0, temperature: float = 1.0, + duration: float = 30.0, cfg_coef: float = 3.0, + two_step_cfg: bool = False, extend_stride: float = 18): + """Set the generation parameters for MusicGen. + + Args: + use_sampling (bool, optional): Use sampling if True, else do argmax decoding. Defaults to True. + top_k (int, optional): top_k used for sampling. Defaults to 250. + top_p (float, optional): top_p used for sampling, when set to 0 top_k is used. Defaults to 0.0. + temperature (float, optional): Softmax temperature parameter. Defaults to 1.0. + duration (float, optional): Duration of the generated waveform. Defaults to 30.0. + cfg_coef (float, optional): Coefficient used for classifier free guidance. Defaults to 3.0. + two_step_cfg (bool, optional): If True, performs 2 forward for Classifier Free Guidance, + instead of batching together the two. This has some impact on how things + are padded but seems to have little impact in practice. + extend_stride: when doing extended generation (i.e. more than 30 seconds), by how much + should we extend the audio each time. Larger values will mean less context is + preserved, and shorter value will require extra computations. + """ + assert extend_stride < self.max_duration, "Cannot stride by more than max generation duration." + self.extend_stride = extend_stride + self.duration = duration + self.generation_params = { + 'use_sampling': use_sampling, + 'temp': temperature, + 'top_k': top_k, + 'top_p': top_p, + 'cfg_coef': cfg_coef, + 'two_step_cfg': two_step_cfg, + } + + def set_custom_progress_callback(self, progress_callback: tp.Optional[tp.Callable[[int, int], None]] = None): + """Override the default progress callback.""" + self._progress_callback = progress_callback + + def generate_unconditional(self, num_samples: int, progress: bool = False) -> torch.Tensor: + """Generate samples in an unconditional manner. + + Args: + num_samples (int): Number of samples to be generated. + progress (bool, optional): Flag to display progress of the generation process. Defaults to False. + """ + descriptions: tp.List[tp.Optional[str]] = [None] * num_samples + attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions, None) + return self._generate_tokens(attributes, prompt_tokens, progress) + + def generate(self, descriptions: tp.List[str], progress: bool = False) -> torch.Tensor: + """Generate samples conditioned on text. + + Args: + descriptions (tp.List[str]): A list of strings used as text conditioning. + progress (bool, optional): Flag to display progress of the generation process. Defaults to False. + """ + attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions, None) + assert prompt_tokens is None + return self._generate_tokens(attributes, prompt_tokens, progress) + + def generate_with_chroma(self, descriptions: tp.List[str], melody_wavs: MelodyType, + melody_sample_rate: int, progress: bool = False) -> torch.Tensor: + """Generate samples conditioned on text and melody. + + Args: + descriptions (tp.List[str]): A list of strings used as text conditioning. + melody_wavs: (torch.Tensor or list of Tensor): A batch of waveforms used as + melody conditioning. Should have shape [B, C, T] with B matching the description length, + C=1 or 2. It can be [C, T] if there is a single description. It can also be + a list of [C, T] tensors. + melody_sample_rate: (int): Sample rate of the melody waveforms. + progress (bool, optional): Flag to display progress of the generation process. Defaults to False. + """ + if isinstance(melody_wavs, torch.Tensor): + if melody_wavs.dim() == 2: + melody_wavs = melody_wavs[None] + if melody_wavs.dim() != 3: + raise ValueError("Melody wavs should have a shape [B, C, T].") + melody_wavs = list(melody_wavs) + else: + for melody in melody_wavs: + if melody is not None: + assert melody.dim() == 2, "One melody in the list has the wrong number of dims." + + melody_wavs = [ + convert_audio(wav, melody_sample_rate, self.sample_rate, self.audio_channels) + if wav is not None else None + for wav in melody_wavs] + attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions=descriptions, prompt=None, + melody_wavs=melody_wavs) + assert prompt_tokens is None + return self._generate_tokens(attributes, prompt_tokens, progress) + + def generate_continuation(self, prompt: torch.Tensor, prompt_sample_rate: int, + descriptions: tp.Optional[tp.List[tp.Optional[str]]] = None, + progress: bool = False) -> torch.Tensor: + """Generate samples conditioned on audio prompts. + + Args: + prompt (torch.Tensor): A batch of waveforms used for continuation. + Prompt should be [B, C, T], or [C, T] if only one sample is generated. + prompt_sample_rate (int): Sampling rate of the given audio waveforms. + descriptions (tp.List[str], optional): A list of strings used as text conditioning. Defaults to None. + progress (bool, optional): Flag to display progress of the generation process. Defaults to False. + """ + if prompt.dim() == 2: + prompt = prompt[None] + if prompt.dim() != 3: + raise ValueError("prompt should have 3 dimensions: [B, C, T] (C = 1).") + prompt = convert_audio(prompt, prompt_sample_rate, self.sample_rate, self.audio_channels) + if descriptions is None: + descriptions = [None] * len(prompt) + attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions, prompt) + assert prompt_tokens is not None + return self._generate_tokens(attributes, prompt_tokens, progress) + + @torch.no_grad() + def _prepare_tokens_and_attributes( + self, + descriptions: tp.Sequence[tp.Optional[str]], + prompt: tp.Optional[torch.Tensor], + melody_wavs: tp.Optional[MelodyList] = None, + ) -> tp.Tuple[tp.List[ConditioningAttributes], tp.Optional[torch.Tensor]]: + """Prepare model inputs. + + Args: + descriptions (tp.List[str]): A list of strings used as text conditioning. + prompt (torch.Tensor): A batch of waveforms used for continuation. + melody_wavs (tp.Optional[torch.Tensor], optional): A batch of waveforms + used as melody conditioning. Defaults to None. + """ + attributes = [ + ConditioningAttributes(text={'description': description}) + for description in descriptions] + + if melody_wavs is None: + for attr in attributes: + attr.wav['self_wav'] = WavCondition( + torch.zeros((1, 1), device=self.device), + torch.tensor([0], device=self.device), + path='null_wav') # type: ignore + else: + if self.name != "melody": + raise RuntimeError("This model doesn't support melody conditioning. " + "Use the `melody` model.") + assert len(melody_wavs) == len(descriptions), \ + f"number of melody wavs must match number of descriptions! " \ + f"got melody len={len(melody_wavs)}, and descriptions len={len(descriptions)}" + for attr, melody in zip(attributes, melody_wavs): + if melody is None: + attr.wav['self_wav'] = WavCondition( + torch.zeros((1, 1), device=self.device), + torch.tensor([0], device=self.device), + path='null_wav') # type: ignore + else: + attr.wav['self_wav'] = WavCondition( + melody.to(device=self.device), + torch.tensor([melody.shape[-1]], device=self.device)) + + if prompt is not None: + if descriptions is not None: + assert len(descriptions) == len(prompt), "Prompt and nb. descriptions doesn't match" + prompt = prompt.to(self.device) + prompt_tokens, scale = self.compression_model.encode(prompt) + assert scale is None + else: + prompt_tokens = None + return attributes, prompt_tokens + + def _generate_tokens(self, attributes: tp.List[ConditioningAttributes], + prompt_tokens: tp.Optional[torch.Tensor], progress: bool = False) -> torch.Tensor: + """Generate discrete audio tokens given audio prompt and/or conditions. + + Args: + attributes (tp.List[ConditioningAttributes]): Conditions used for generation (text/melody). + prompt_tokens (tp.Optional[torch.Tensor]): Audio prompt used for continuation. + progress (bool, optional): Flag to display progress of the generation process. Defaults to False. + Returns: + torch.Tensor: Generated audio, of shape [B, C, T], T is defined by the generation params. + """ + total_gen_len = int(self.duration * self.frame_rate) + max_prompt_len = int(min(self.duration, self.max_duration) * self.frame_rate) + current_gen_offset: int = 0 + + def _progress_callback(generated_tokens: int, tokens_to_generate: int): + generated_tokens += current_gen_offset + if self._progress_callback is not None: + # Note that total_gen_len might be quite wrong depending on the + # codebook pattern used, but with delay it is almost accurate. + self._progress_callback(generated_tokens, total_gen_len) + else: + print(f'{generated_tokens: 6d} / {total_gen_len: 6d}', end='\r') + + if prompt_tokens is not None: + assert max_prompt_len >= prompt_tokens.shape[-1], \ + "Prompt is longer than audio to generate" + + callback = None + if progress: + callback = _progress_callback + + if self.duration <= self.max_duration: + # generate by sampling from LM, simple case. + with self.autocast: + gen_tokens = self.lm.generate( + prompt_tokens, attributes, + callback=callback, max_gen_len=total_gen_len, **self.generation_params) + + else: + # now this gets a bit messier, we need to handle prompts, + # melody conditioning etc. + ref_wavs = [attr.wav['self_wav'] for attr in attributes] + all_tokens = [] + if prompt_tokens is None: + prompt_length = 0 + else: + all_tokens.append(prompt_tokens) + prompt_length = prompt_tokens.shape[-1] + + stride_tokens = int(self.frame_rate * self.extend_stride) + + while current_gen_offset + prompt_length < total_gen_len: + time_offset = current_gen_offset / self.frame_rate + chunk_duration = min(self.duration - time_offset, self.max_duration) + max_gen_len = int(chunk_duration * self.frame_rate) + for attr, ref_wav in zip(attributes, ref_wavs): + wav_length = ref_wav.length.item() + if wav_length == 0: + continue + # We will extend the wav periodically if it not long enough. + # we have to do it here rather than in conditioners.py as otherwise + # we wouldn't have the full wav. + initial_position = int(time_offset * self.sample_rate) + wav_target_length = int(self.max_duration * self.sample_rate) + print(initial_position / self.sample_rate, wav_target_length / self.sample_rate) + positions = torch.arange(initial_position, + initial_position + wav_target_length, device=self.device) + attr.wav['self_wav'] = WavCondition( + ref_wav[0][:, positions % wav_length], + torch.full_like(ref_wav[1], wav_target_length)) + with self.autocast: + gen_tokens = self.lm.generate( + prompt_tokens, attributes, + callback=callback, max_gen_len=max_gen_len, **self.generation_params) + if prompt_tokens is None: + all_tokens.append(gen_tokens) + else: + all_tokens.append(gen_tokens[:, :, prompt_tokens.shape[-1]:]) + prompt_tokens = gen_tokens[:, :, stride_tokens:] + prompt_length = prompt_tokens.shape[-1] + current_gen_offset += stride_tokens + + gen_tokens = torch.cat(all_tokens, dim=-1) + + # generate audio + assert gen_tokens.dim() == 3 + with torch.no_grad(): + gen_audio = self.compression_model.decode(gen_tokens, None) + return gen_audio diff --git a/audiocraft/modules/__init__.py b/audiocraft/modules/__init__.py new file mode 100644 index 0000000..81ba30f --- /dev/null +++ b/audiocraft/modules/__init__.py @@ -0,0 +1,20 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +# flake8: noqa +from .conv import ( + NormConv1d, + NormConv2d, + NormConvTranspose1d, + NormConvTranspose2d, + StreamableConv1d, + StreamableConvTranspose1d, + pad_for_conv1d, + pad1d, + unpad1d, +) +from .lstm import StreamableLSTM +from .seanet import SEANetEncoder, SEANetDecoder diff --git a/audiocraft/modules/activations.py b/audiocraft/modules/activations.py new file mode 100644 index 0000000..8bd6f29 --- /dev/null +++ b/audiocraft/modules/activations.py @@ -0,0 +1,96 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import torch +import torch.nn as nn +from torch import Tensor +from typing import Union, Callable + + +class CustomGLU(nn.Module): + """Custom Gated Linear Unit activation. + Applies a modified gated linear unit :math:`a * f(b)` where :math:`a` is the first half + of the input matrices, :math:`b` is the second half, and :math:`f` is a provided activation + function (i.e. sigmoid, swish, etc.). + + Args: + activation (nn.Module): The custom activation to apply in the Gated Linear Unit + dim (int): the dimension on which to split the input. Default: -1 + + Shape: + - Input: :math:`(\ast_1, N, \ast_2)` where `*` means, any number of additional + dimensions + - Output: :math:`(\ast_1, M, \ast_2)` where :math:`M=N/2` + + Examples:: + >>> m = CustomGLU(nn.Sigmoid()) + >>> input = torch.randn(4, 2) + >>> output = m(input) + """ + def __init__(self, activation: nn.Module, dim: int = -1): + super(CustomGLU, self).__init__() + self.dim = dim + self.activation = activation + + def forward(self, x: Tensor): + assert x.shape[self.dim] % 2 == 0 # M = N / 2 + a, b = torch.chunk(x, 2, dim=self.dim) + return a * self.activation(b) + + +class SwiGLU(CustomGLU): + """SiLU Gated Linear Unit activation. + Applies SiLU Gated Linear Unit :math:`a * SiLU(b)` where :math:`a` is + the first half of the input matrices, :math:`b` is the second half. + + Args: + dim (int): the dimension on which to split the input. Default: -1 + """ + def __init__(self, dim: int = -1): + super(SwiGLU, self).__init__(nn.SiLU(), dim) + + +class GeGLU(CustomGLU): + """GeLU Gated Linear Unit activation. + Applies GeLU Gated Linear Unit :math:`a * GELU(b)` where :math:`a` is + the first half of the input matrices, :math:`b` is the second half. + + Args: + dim (int): the dimension on which to split the input. Default: -1 + """ + def __init__(self, dim: int = -1): + super(GeGLU, self).__init__(nn.GELU(), dim) + + +class ReGLU(CustomGLU): + """ReLU Gated Linear Unit activation. + Applies ReLU Gated Linear Unit :math:`a * ReLU(b)` where :math:`a` is + the first half of the input matrices, :math:`b` is the second half. + + Args: + dim (int): the dimension on which to split the input. Default: -1 + """ + def __init__(self, dim: int = -1): + super(ReGLU, self).__init__(nn.ReLU(), dim) + + +def get_activation_fn( + activation: Union[str, Callable[[Tensor], Tensor]] +) -> Union[str, Callable[[Tensor], Tensor]]: + """Helper function to map an activation string to the activation class. + If the supplied activation is not a string that is recognized, the activation is passed back. + + Args: + activation (Union[str, Callable[[Tensor], Tensor]]): Activation to check + """ + if isinstance(activation, str): + if activation == "reglu": + return ReGLU() + elif activation == "geglu": + return GeGLU() + elif activation == "swiglu": + return SwiGLU() + return activation diff --git a/audiocraft/modules/codebooks_patterns.py b/audiocraft/modules/codebooks_patterns.py new file mode 100644 index 0000000..c5b35cb --- /dev/null +++ b/audiocraft/modules/codebooks_patterns.py @@ -0,0 +1,539 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +from collections import namedtuple +from dataclasses import dataclass +from functools import lru_cache +import logging +import typing as tp + +from abc import ABC, abstractmethod +import torch + +LayoutCoord = namedtuple('LayoutCoord', ['t', 'q']) # (timestep, codebook index) +PatternLayout = tp.List[tp.List[LayoutCoord]] # Sequence of coordinates +logger = logging.getLogger(__name__) + + +@dataclass +class Pattern: + """Base implementation of a pattern over a sequence with multiple codebooks. + + The codebook pattern consists in a layout, defining for each sequence step + the list of coordinates of each codebook timestep in the resulting interleaved sequence. + The first item of the pattern is always an empty list in order to properly insert a special token + to start with. For convenience, we also keep track of ``n_q`` the number of codebooks used for the pattern + and ``timesteps`` the number of timesteps corresponding to the original sequence. + + The pattern provides convenient methods to build and revert interleaved sequences from it: + ``build_pattern_sequence`` maps a given a dense input tensor of multi-codebook sequence from [B, K, T] + to the interleaved sequence of shape [B, K, S] applying the pattern, with S being the batch size, + K being the number of codebooks, T the number of original timesteps and S the number of sequence steps + for the output sequence. The unfilled positions are replaced with a special token and the built sequence + is returned along with a mask indicating valid tokens. + ``revert_pattern_sequence`` maps back an interleaved sequence of shape [B, K, S] to the original alignment + of codebooks across timesteps to an output tensor of shape [B, K, T], using again a special token and a mask + to fill and specify invalid positions if needed. + See the dedicated methods for more details. + """ + # Pattern layout, for each sequence step, we have a list of coordinates + # corresponding to the original codebook timestep and position. + # The first list is always an empty list in order to properly insert + # a special token to start with. + layout: PatternLayout + timesteps: int + n_q: int + + def __post_init__(self): + assert len(self.layout) > 0 + assert self.layout[0] == [] + self._validate_layout() + self._build_reverted_sequence_scatter_indexes = lru_cache(100)(self._build_reverted_sequence_scatter_indexes) + self._build_pattern_sequence_scatter_indexes = lru_cache(100)(self._build_pattern_sequence_scatter_indexes) + logger.info("New pattern, time steps: %d, sequence steps: %d", self.timesteps, len(self.layout)) + + def _validate_layout(self): + """Runs checks on the layout to ensure a valid pattern is defined. + A pattern is considered invalid if: + - Multiple timesteps for a same codebook are defined in the same sequence step + - The timesteps for a given codebook are not in ascending order as we advance in the sequence + (this would mean that we have future timesteps before past timesteps). + """ + q_timesteps = {q: 0 for q in range(self.n_q)} + for s, seq_coords in enumerate(self.layout): + if len(seq_coords) > 0: + qs = set() + for coord in seq_coords: + qs.add(coord.q) + last_q_timestep = q_timesteps[coord.q] + assert coord.t >= last_q_timestep, \ + f"Past timesteps are found in the sequence for codebook = {coord.q} at step {s}" + q_timesteps[coord.q] = coord.t + # each sequence step contains at max 1 coordinate per codebook + assert len(qs) == len(seq_coords), \ + f"Multiple entries for a same codebook are found at step {s}" + + @property + def num_sequence_steps(self): + return len(self.layout) - 1 + + @property + def max_delay(self): + max_t_in_seq_coords = 0 + for seq_coords in self.layout[1:]: + for coords in seq_coords: + max_t_in_seq_coords = max(max_t_in_seq_coords, coords.t + 1) + return max_t_in_seq_coords - self.timesteps + + @property + def valid_layout(self): + valid_step = len(self.layout) - self.max_delay + return self.layout[:valid_step] + + def get_sequence_coords_with_timestep(self, t: int, q: tp.Optional[int] = None): + """Get codebook coordinates in the layout that corresponds to the specified timestep t + and optionally to the codebook q. Coordinates are returned as a tuple with the sequence step + and the actual codebook coordinates. + """ + assert t <= self.timesteps, "provided timesteps is greater than the pattern's number of timesteps" + if q is not None: + assert q <= self.n_q, "provided number of codebooks is greater than the pattern's number of codebooks" + coords = [] + for s, seq_codes in enumerate(self.layout): + for code in seq_codes: + if code.t == t and (q is None or code.q == q): + coords.append((s, code)) + return coords + + def get_steps_with_timestep(self, t: int, q: tp.Optional[int] = None) -> tp.List[int]: + return [step for step, coords in self.get_sequence_coords_with_timestep(t, q)] + + def get_first_step_with_timesteps(self, t: int, q: tp.Optional[int] = None) -> tp.Optional[int]: + steps_with_timesteps = self.get_steps_with_timestep(t, q) + return steps_with_timesteps[0] if len(steps_with_timesteps) > 0 else None + + def _build_pattern_sequence_scatter_indexes(self, timesteps: int, n_q: int, keep_only_valid_steps: bool, + device: tp.Union[torch.device, str] = 'cpu'): + """Build scatter indexes corresponding to the pattern, up to the provided sequence_steps. + + Args: + timesteps (int): Maximum number of timesteps steps to consider. + keep_only_valid_steps (bool): Restrict the pattern layout to match only valid steps. + device (Union[torch.device, str]): Device for created tensors. + Returns: + indexes (torch.Tensor): Indexes corresponding to the sequence, of shape [K, S]. + mask (torch.Tensor): Mask corresponding to indexes that matches valid indexes, of shape [K, S]. + """ + assert n_q == self.n_q, f"invalid number of codebooks for the sequence and the pattern: {n_q} != {self.n_q}" + assert timesteps <= self.timesteps, "invalid number of timesteps used to build the sequence from the pattern" + # use the proper layout based on whether we limit ourselves to valid steps only or not, + # note that using the valid_layout will result in a truncated sequence up to the valid steps + ref_layout = self.valid_layout if keep_only_valid_steps else self.layout + # single item indexing being super slow with pytorch vs. numpy, so we use numpy here + indexes = torch.zeros(n_q, len(ref_layout), dtype=torch.long).numpy() + mask = torch.zeros(n_q, len(ref_layout), dtype=torch.bool).numpy() + # fill indexes with last sequence step value that will correspond to our special token + # the last value is n_q * timesteps as we have flattened z and append special token as the last token + # which will correspond to the index: n_q * timesteps + indexes[:] = n_q * timesteps + # iterate over the pattern and fill scattered indexes and mask + for s, sequence_coords in enumerate(ref_layout): + for coords in sequence_coords: + if coords.t < timesteps: + indexes[coords.q, s] = coords.t + coords.q * timesteps + mask[coords.q, s] = 1 + indexes = torch.from_numpy(indexes).to(device) + mask = torch.from_numpy(mask).to(device) + return indexes, mask + + def build_pattern_sequence(self, z: torch.Tensor, special_token: int, keep_only_valid_steps: bool = False): + """Build sequence corresponding to the pattern from the input tensor z. + The sequence is built using up to sequence_steps if specified, and non-pattern + coordinates are filled with the special token. + + Args: + z (torch.Tensor): Input tensor of multi-codebooks sequence, of shape [B, K, T]. + special_token (int): Special token used to fill non-pattern coordinates in the new sequence. + keep_only_valid_steps (bool): Build a sequence from the pattern up to valid (= fully defined) steps. + Steps that are beyond valid steps will be replaced by the special_token in that case. + Returns: + values (torch.Tensor): Interleaved sequence matching the pattern, of shape [B, K, S] with S + corresponding either to the sequence_steps if provided, otherwise to the length of the pattern. + indexes (torch.Tensor): Indexes corresponding to the interleaved sequence, of shape [K, S]. + mask (torch.Tensor): Mask corresponding to indexes that matches valid indexes of shape [K, S]. + """ + B, K, T = z.shape + indexes, mask = self._build_pattern_sequence_scatter_indexes( + T, K, keep_only_valid_steps=keep_only_valid_steps, device=str(z.device) + ) + z = z.view(B, -1) + # we append the special token as the last index of our flattened z tensor + z = torch.cat([z, torch.zeros_like(z[:, :1]) + special_token], dim=1) + values = z[:, indexes.view(-1)] + values = values.view(B, K, indexes.shape[-1]) + return values, indexes, mask + + def _build_reverted_sequence_scatter_indexes(self, sequence_steps: int, n_q: int, + keep_only_valid_steps: bool = False, + is_model_output: bool = False, + device: tp.Union[torch.device, str] = 'cpu'): + """Builds scatter indexes required to retrieve the original multi-codebook sequence + from interleaving pattern. + + Args: + sequence_steps (int): Sequence steps. + n_q (int): Number of codebooks. + keep_only_valid_steps (bool): Build a sequence from the pattern up to valid (= fully defined) steps. + Steps that are beyond valid steps will be replaced by the special_token in that case. + is_model_output (bool): Whether to keep the sequence item corresponding to initial special token or not. + device (Union[torch.device, str]): Device for created tensors. + Returns: + torch.Tensor: Indexes for reconstructing the output, of shape [K, T]. + mask (torch.Tensor): Mask corresponding to indexes that matches valid indexes of shape [K, T]. + """ + ref_layout = self.valid_layout if keep_only_valid_steps else self.layout + # TODO(jade): Do we want to further truncate to only valid timesteps here as well? + timesteps = self.timesteps + assert n_q == self.n_q, f"invalid number of codebooks for the sequence and the pattern: {n_q} != {self.n_q}" + assert sequence_steps <= len(ref_layout), \ + f"sequence to revert is longer than the defined pattern: {sequence_steps} > {len(ref_layout)}" + + # ensure we take the appropriate indexes to keep the model output from the first special token as well + if is_model_output: + ref_layout = ref_layout[1:] + + # single item indexing being super slow with pytorch vs. numpy, so we use numpy here + indexes = torch.zeros(n_q, timesteps, dtype=torch.long).numpy() + mask = torch.zeros(n_q, timesteps, dtype=torch.bool).numpy() + # fill indexes with last sequence step value that will correspond to our special token + indexes[:] = n_q * sequence_steps + for s, sequence_codes in enumerate(ref_layout): + if s < sequence_steps: + for code in sequence_codes: + if code.t < timesteps: + indexes[code.q, code.t] = s + code.q * sequence_steps + mask[code.q, code.t] = 1 + indexes = torch.from_numpy(indexes).to(device) + mask = torch.from_numpy(mask).to(device) + return indexes, mask + + def revert_pattern_sequence(self, s: torch.Tensor, special_token: int, keep_only_valid_steps: bool = False): + """Revert a sequence built from the pattern back to the original multi-codebook sequence without interleaving. + The sequence is reverted using up to timesteps if specified, and non-pattern coordinates + are filled with the special token. + + Args: + s (torch.Tensor): Interleaved sequence tensor obtained from the pattern, of shape [B, K, S]. + special_token (int or float): Special token used to fill non-pattern coordinates in the new sequence. + Returns: + values (torch.Tensor): Interleaved sequence matching the pattern, of shape [B, K, T] with T + corresponding either to the timesteps if provided, or the total timesteps in pattern otherwise. + indexes (torch.Tensor): Indexes corresponding to the interleaved sequence, of shape [K, T]. + mask (torch.Tensor): Mask corresponding to indexes that matches valid indexes of shape [K, T]. + """ + B, K, S = s.shape + indexes, mask = self._build_reverted_sequence_scatter_indexes( + S, K, keep_only_valid_steps, is_model_output=False, device=str(s.device) + ) + s = s.view(B, -1) + # we append the special token as the last index of our flattened z tensor + s = torch.cat([s, torch.zeros_like(s[:, :1]) + special_token], dim=1) + values = s[:, indexes.view(-1)] + values = values.view(B, K, indexes.shape[-1]) + return values, indexes, mask + + def revert_pattern_logits(self, logits: torch.Tensor, special_token: float, keep_only_valid_steps: bool = False): + """Revert model logits obtained on a sequence built from the pattern + back to a tensor matching the original sequence. + + This method is similar to ``revert_pattern_sequence`` with the following specificities: + 1. It is designed to work with the extra cardinality dimension + 2. We return the logits for the first sequence item that matches the special_token and + which matching target in the original sequence is the first item of the sequence, + while we skip the last logits as there is no matching target + """ + B, card, K, S = logits.shape + indexes, mask = self._build_reverted_sequence_scatter_indexes( + S, K, keep_only_valid_steps, is_model_output=True, device=logits.device + ) + logits = logits.reshape(B, card, -1) + # we append the special token as the last index of our flattened z tensor + logits = torch.cat([logits, torch.zeros_like(logits[:, :, :1]) + special_token], dim=-1) # [B, card, K x S] + values = logits[:, :, indexes.view(-1)] + values = values.view(B, card, K, indexes.shape[-1]) + return values, indexes, mask + + +class CodebooksPatternProvider(ABC): + """Abstraction around providing pattern for interleaving codebooks. + + The CodebooksPatternProvider abstraction allows to implement various strategies to + define interleaving pattern of sequences composed of multiple codebooks. For a given + number of codebooks `n_q`, the pattern provider can generate a specified pattern + corresponding to a sequence of `T` timesteps with `n_q` parallel codebooks. This pattern + can be used to construct a new sequence from the original codes respecting the specified + pattern. The pattern is defined as a list of list of code coordinates, code coordinate + being a tuple with the original timestep and codebook to build the new sequence. + Note that all patterns must start with an empty list that is then used to insert a first + sequence step of special tokens in the newly generated sequence. + + Args: + n_q (int): number of codebooks. + cached (bool): if True, patterns for a given length are cached. In general + that should be true for efficiency reason to avoid synchronization points. + """ + def __init__(self, n_q: int, cached: bool = True): + assert n_q > 0 + self.n_q = n_q + self.get_pattern = lru_cache(100)(self.get_pattern) # type: ignore + + @abstractmethod + def get_pattern(self, timesteps: int) -> Pattern: + """Builds pattern with specific interleaving between codebooks. + + Args: + timesteps (int): Total numer of timesteps. + """ + raise NotImplementedError() + + +class DelayedPatternProvider(CodebooksPatternProvider): + """Provider for delayed pattern across delayed codebooks. + Codebooks are delayed in the sequence and sequence steps will contain codebooks + from different timesteps. + + Example: + Taking timesteps=4 and n_q=3, delays=None, the multi-codebook sequence: + [[1, 2, 3, 4], + [1, 2, 3, 4], + [1, 2, 3, 4]] + The resulting sequence obtained from the returned pattern is: + [[S, 1, 2, 3, 4], + [S, S, 1, 2, 3], + [S, S, S, 1, 2]] + (with S being a special token) + + Args: + n_q (int): Number of codebooks. + delays (Optional[List[int]]): Delay for each of the codebooks. + If delays not defined, each codebook is delayed by 1 compared to the previous one. + flatten_first (int): Flatten the first N timesteps. + empty_initial (int): Prepend with N empty list of coordinates. + """ + def __init__(self, n_q: int, delays: tp.Optional[tp.List[int]] = None, + flatten_first: int = 0, empty_initial: int = 0): + super().__init__(n_q) + if delays is None: + delays = list(range(n_q)) + self.delays = delays + self.flatten_first = flatten_first + self.empty_initial = empty_initial + assert len(self.delays) == self.n_q + assert sorted(self.delays) == self.delays + + def get_pattern(self, timesteps: int) -> Pattern: + out: PatternLayout = [[]] + max_delay = max(self.delays) + if self.empty_initial: + out += [[] for _ in range(self.empty_initial)] + if self.flatten_first: + for t in range(min(timesteps, self.flatten_first)): + for q in range(self.n_q): + out.append([LayoutCoord(t, q)]) + for t in range(self.flatten_first, timesteps + max_delay): + v = [] + for q, delay in enumerate(self.delays): + t_for_q = t - delay + if t_for_q >= self.flatten_first: + v.append(LayoutCoord(t_for_q, q)) + out.append(v) + return Pattern(out, n_q=self.n_q, timesteps=timesteps) + + +class ParallelPatternProvider(DelayedPatternProvider): + """Provider for parallel pattern across codebooks. + This pattern provider is a special case of the delayed pattern with actually no delay, + hence delays=repeat(0, n_q). + + Args: + n_q (int): Number of codebooks. + """ + def __init__(self, n_q: int): + super().__init__(n_q, [0] * n_q) + + +class UnrolledPatternProvider(CodebooksPatternProvider): + """Provider for unrolling codebooks pattern. + This pattern provider enables to represent the codebook flattened completely or only to some extend + while also specifying a given delay between the flattened codebooks representation, allowing to + unroll the codebooks in the sequence. + + Example: + 1. Flattening of the codebooks. + By default, the pattern provider will fully flatten the codebooks such as flattening=range(n_q), + taking n_q = 3 and timesteps = 4: + [[1, 2, 3, 4], + [1, 2, 3, 4], + [1, 2, 3, 4]] + will result into: + [[S, S, 1, S, S, 2, S, S, 3, S, S, 4], + [S, 1, S, S, 2, S, S, 3, S, S, 4, S], + [1, S, S, 2, S, S, 3, S, S, 4, S, S]] + 2. Partial flattening of the codebooks. The ``flattening`` parameter allows to specify the inner step + for each of the codebook, allowing to define which codebook to flatten (or keep in parallel), for example + taking n_q = 3, timesteps = 4 and flattening = [0, 1, 1]: + [[1, 2, 3, 4], + [1, 2, 3, 4], + [1, 2, 3, 4]] + will result into: + [[S, 1, S, S, 2, S, S, 3, S, S, 4, S], + [S, 1, S, S, 2, S, S, 3, S, S, 4, S], + [1, S, S, 2, S, S, 3, S, S, 4, S, S]] + 3. Flattening with delay. The ``delay`` parameter allows to further unroll the sequence of codebooks + allowing to specify the delay per codebook. Note that the delay between codebooks flattened to the + same inner timestep should be coherent. For example, taking n_q = 3, timesteps = 4, flattening = [0, 1, 1] + and delays = [0, 3, 3]: + [[1, 2, 3, 4], + [1, 2, 3, 4], + [1, 2, 3, 4]] + will result into: + [[S, S, S, 1, S, 2, S, 3, S, 4], + [S, S, S, 1, S, 2, S, 3, S, 4], + [1, 2, 3, S, 4, S, 5, S, 6, S]] + + Args: + n_q (int): Number of codebooks. + flattening (Optional[List[int]]): Flattening schema over the codebooks. If not defined, + the codebooks will be flattened to 1 codebook per step, meaning that the sequence will + have n_q extra steps for each timestep. + delays (Optional[List[int]]): Delay for each of the codebooks. If not defined, + no delay is added and therefore will default to [0] * ``n_q``. + Note that two codebooks that will be flattened to the same inner step + should have the same delay, otherwise the pattern is considered as invalid. + """ + FlattenedCodebook = namedtuple('FlattenedCodebook', ['codebooks', 'delay']) + + def __init__(self, n_q: int, flattening: tp.Optional[tp.List[int]] = None, + delays: tp.Optional[tp.List[int]] = None): + super().__init__(n_q) + if flattening is None: + flattening = list(range(n_q)) + if delays is None: + delays = [0] * n_q + assert len(flattening) == n_q + assert len(delays) == n_q + assert sorted(flattening) == flattening + assert sorted(delays) == delays + self._flattened_codebooks = self._build_flattened_codebooks(delays, flattening) + self.max_delay = max(delays) + + def _build_flattened_codebooks(self, delays: tp.List[int], flattening: tp.List[int]): + """Build a flattened codebooks representation as a dictionary of inner step + and the actual codebook indices corresponding to the flattened codebook. For convenience, we + also store the delay associated to the flattened codebook to avoid maintaining an extra mapping. + """ + flattened_codebooks: dict = {} + for q, (inner_step, delay) in enumerate(zip(flattening, delays)): + if inner_step not in flattened_codebooks: + flat_codebook = UnrolledPatternProvider.FlattenedCodebook(codebooks=[q], delay=delay) + else: + flat_codebook = flattened_codebooks[inner_step] + assert flat_codebook.delay == delay, ( + "Delay and flattening between codebooks is inconsistent: ", + "two codebooks flattened to the same position should have the same delay." + ) + flat_codebook.codebooks.append(q) + flattened_codebooks[inner_step] = flat_codebook + return flattened_codebooks + + @property + def _num_inner_steps(self): + """Number of inner steps to unroll between timesteps in order to flatten the codebooks. + """ + return max([inner_step for inner_step in self._flattened_codebooks.keys()]) + 1 + + def num_virtual_steps(self, timesteps: int) -> int: + return timesteps * self._num_inner_steps + 1 + + def get_pattern(self, timesteps: int) -> Pattern: + """Builds pattern for delay across codebooks. + + Args: + timesteps (int): Total numer of timesteps. + """ + # the PatternLayout is built as a tuple of sequence position and list of coordinates + # so that it can be reordered properly given the required delay between codebooks of given timesteps + indexed_out: list = [(-1, [])] + max_timesteps = timesteps + self.max_delay + for t in range(max_timesteps): + # for each timestep, we unroll the flattened codebooks, + # emitting the sequence step with the corresponding delay + for step in range(self._num_inner_steps): + if step in self._flattened_codebooks: + # we have codebooks at this virtual step to emit + step_codebooks = self._flattened_codebooks[step] + t_for_q = t + step_codebooks.delay + coords = [LayoutCoord(t, q) for q in step_codebooks.codebooks] + if t_for_q < max_timesteps and t < max_timesteps: + indexed_out.append((t_for_q, coords)) + else: + # there is no codebook in this virtual step so we emit an empty list + indexed_out.append((t, [])) + out = [coords for _, coords in sorted(indexed_out)] + return Pattern(out, n_q=self.n_q, timesteps=timesteps) + + +class VALLEPattern(CodebooksPatternProvider): + """Almost VALL-E style pattern. We futher allow some delays for the + codebooks other than the first one. + + Args: + n_q (int): Number of codebooks. + delays (Optional[List[int]]): Delay for each of the codebooks. + If delays not defined, each codebook is delayed by 1 compared to the previous one. + """ + def __init__(self, n_q: int, delays: tp.Optional[tp.List[int]] = None): + super().__init__(n_q) + if delays is None: + delays = [0] * (n_q - 1) + self.delays = delays + assert len(self.delays) == self.n_q - 1 + assert sorted(self.delays) == self.delays + + def get_pattern(self, timesteps: int) -> Pattern: + out: PatternLayout = [[]] + for t in range(timesteps): + out.append([LayoutCoord(t, 0)]) + max_delay = max(self.delays) + for t in range(timesteps + max_delay): + v = [] + for q, delay in enumerate(self.delays): + t_for_q = t - delay + if t_for_q >= 0: + v.append(LayoutCoord(t_for_q, q + 1)) + out.append(v) + return Pattern(out, n_q=self.n_q, timesteps=timesteps) + + +class MusicLMPattern(CodebooksPatternProvider): + """Almost MusicLM style pattern. This is equivalent to full flattening + but in a different order. + + Args: + n_q (int): Number of codebooks. + group_by (int): Number of codebooks to group together. + """ + def __init__(self, n_q: int, group_by: int = 2): + super().__init__(n_q) + self.group_by = group_by + + def get_pattern(self, timesteps: int) -> Pattern: + out: PatternLayout = [[]] + for offset in range(0, self.n_q, self.group_by): + for t in range(timesteps): + for q in range(offset, offset + self.group_by): + out.append([LayoutCoord(t, q)]) + return Pattern(out, n_q=self.n_q, timesteps=timesteps) diff --git a/audiocraft/modules/conditioners.py b/audiocraft/modules/conditioners.py new file mode 100644 index 0000000..8279231 --- /dev/null +++ b/audiocraft/modules/conditioners.py @@ -0,0 +1,990 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +from collections import defaultdict +from copy import deepcopy +from dataclasses import dataclass, field +from itertools import chain +import logging +import math +import random +import re +import typing as tp +import warnings + +from einops import rearrange +from num2words import num2words +import spacy +from transformers import T5EncoderModel, T5Tokenizer # type: ignore +import torchaudio +import torch +from torch import nn +from torch import Tensor +import torch.nn.functional as F +from torch.nn.utils.rnn import pad_sequence + +from .streaming import StreamingModule +from .transformer import create_sin_embedding +from ..data.audio_dataset import SegmentInfo +from ..utils.autocast import TorchAutocast +from ..utils.utils import hash_trick, length_to_mask, collate + + +logger = logging.getLogger(__name__) +TextCondition = tp.Optional[str] # a text condition can be a string or None (if doesn't exist) +ConditionType = tp.Tuple[Tensor, Tensor] # condition, mask + + +class WavCondition(tp.NamedTuple): + wav: Tensor + length: Tensor + path: tp.List[tp.Optional[str]] = [] + + +def nullify_condition(condition: ConditionType, dim: int = 1): + """This function transforms an input condition to a null condition. + The way it is done by converting it to a single zero vector similarly + to how it is done inside WhiteSpaceTokenizer and NoopTokenizer. + + Args: + condition (ConditionType): a tuple of condition and mask (tp.Tuple[Tensor, Tensor]) + dim (int): the dimension that will be truncated (should be the time dimension) + WARNING!: dim should not be the batch dimension! + Returns: + ConditionType: a tuple of null condition and mask + """ + assert dim != 0, "dim cannot be the batch dimension!" + assert type(condition) == tuple and \ + type(condition[0]) == Tensor and \ + type(condition[1]) == Tensor, "'nullify_condition' got an unexpected input type!" + cond, mask = condition + B = cond.shape[0] + last_dim = cond.dim() - 1 + out = cond.transpose(dim, last_dim) + out = 0. * out[..., :1] + out = out.transpose(dim, last_dim) + mask = torch.zeros((B, 1), device=out.device).int() + assert cond.dim() == out.dim() + return out, mask + + +def nullify_wav(wav: Tensor) -> WavCondition: + """Create a nullified WavCondition from a wav tensor with appropriate shape. + + Args: + wav (Tensor): tensor of shape [B, T] + Returns: + WavCondition: wav condition with nullified wav. + """ + null_wav, _ = nullify_condition((wav, torch.zeros_like(wav)), dim=wav.dim() - 1) + return WavCondition( + wav=null_wav, + length=torch.tensor([0] * wav.shape[0], device=wav.device), + path=['null_wav'] * wav.shape[0] + ) + + +@dataclass +class ConditioningAttributes: + text: tp.Dict[str, tp.Optional[str]] = field(default_factory=dict) + wav: tp.Dict[str, WavCondition] = field(default_factory=dict) + + def __getitem__(self, item): + return getattr(self, item) + + @property + def text_attributes(self): + return self.text.keys() + + @property + def wav_attributes(self): + return self.wav.keys() + + @property + def attributes(self): + return {"text": self.text_attributes, "wav": self.wav_attributes} + + def to_flat_dict(self): + return { + **{f"text.{k}": v for k, v in self.text.items()}, + **{f"wav.{k}": v for k, v in self.wav.items()}, + } + + @classmethod + def from_flat_dict(cls, x): + out = cls() + for k, v in x.items(): + kind, att = k.split(".") + out[kind][att] = v + return out + + +class SegmentWithAttributes(SegmentInfo): + """Base class for all dataclasses that are used for conditioning. + All child classes should implement `to_condition_attributes` that converts + the existing attributes to a dataclass of type ConditioningAttributes. + """ + def to_condition_attributes(self) -> ConditioningAttributes: + raise NotImplementedError() + + +class Tokenizer: + """Base class for all tokenizers + (in case we want to introduce more advances tokenizers in the future). + """ + def __call__(self, texts: tp.List[tp.Optional[str]]) -> tp.Tuple[Tensor, Tensor]: + raise NotImplementedError() + + +class WhiteSpaceTokenizer(Tokenizer): + """This tokenizer should be used for natural language descriptions. + For example: + ["he didn't, know he's going home.", 'shorter sentence'] => + [[78, 62, 31, 4, 78, 25, 19, 34], + [59, 77, 0, 0, 0, 0, 0, 0]] + """ + PUNCTUATIONS = "?:!.,;" + + def __init__(self, n_bins: int, pad_idx: int = 0, language: str = "en_core_web_sm", + lemma: bool = True, stopwords: bool = True) -> None: + self.n_bins = n_bins + self.pad_idx = pad_idx + self.lemma = lemma + self.stopwords = stopwords + try: + self.nlp = spacy.load(language) + except IOError: + spacy.cli.download(language) # type: ignore + self.nlp = spacy.load(language) + + @tp.no_type_check + def __call__( + self, + texts: tp.List[tp.Optional[str]], + return_text: bool = False + ) -> tp.Tuple[Tensor, Tensor]: + """Take a list of strings and convert them to a tensor of indices. + + Args: + texts (tp.List[str]): List of strings. + return_text (bool, optional): Whether to return text as additional tuple item. Defaults to False. + Returns: + tp.Tuple[Tensor, Tensor]: + - Indices of words in the LUT. + - And a mask indicating where the padding tokens are + """ + output, lengths = [], [] + texts = deepcopy(texts) + for i, text in enumerate(texts): + # if current sample doesn't have a certain attribute, replace with pad token + if text is None: + output.append(Tensor([self.pad_idx])) + lengths.append(0) + continue + + # convert numbers to words + text = re.sub(r"(\d+)", lambda x: num2words(int(x.group(0))), text) # type: ignore + # normalize text + text = self.nlp(text) # type: ignore + # remove stopwords + if self.stopwords: + text = [w for w in text if not w.is_stop] # type: ignore + # remove punctuations + text = [w for w in text if w.text not in self.PUNCTUATIONS] # type: ignore + # lemmatize if needed + text = [getattr(t, "lemma_" if self.lemma else "text") for t in text] # type: ignore + + texts[i] = " ".join(text) + lengths.append(len(text)) + # convert to tensor + tokens = Tensor([hash_trick(w, self.n_bins) for w in text]) + output.append(tokens) + + mask = length_to_mask(torch.IntTensor(lengths)).int() + padded_output = pad_sequence(output, padding_value=self.pad_idx).int().t() + if return_text: + return padded_output, mask, texts # type: ignore + return padded_output, mask + + +class NoopTokenizer(Tokenizer): + """This tokenizer should be used for global conditioners such as: artist, genre, key, etc. + The difference between this and WhiteSpaceTokenizer is that NoopTokenizer does not split + strings, so "Jeff Buckley" will get it's own index. Whereas WhiteSpaceTokenizer will + split it to ["Jeff", "Buckley"] and return an index per word. + + For example: + ["Queen", "ABBA", "Jeff Buckley"] => [43, 55, 101] + ["Metal", "Rock", "Classical"] => [0, 223, 51] + """ + def __init__(self, n_bins: int, pad_idx: int = 0): + self.n_bins = n_bins + self.pad_idx = pad_idx + + def __call__(self, texts: tp.List[tp.Optional[str]]) -> tp.Tuple[Tensor, Tensor]: + output, lengths = [], [] + for text in texts: + # if current sample doesn't have a certain attribute, replace with pad token + if text is None: + output.append(self.pad_idx) + lengths.append(0) + else: + output.append(hash_trick(text, self.n_bins)) + lengths.append(1) + + tokens = torch.LongTensor(output).unsqueeze(1) + mask = length_to_mask(torch.IntTensor(lengths)).int() + return tokens, mask + + +class BaseConditioner(nn.Module): + """Base model for all conditioner modules. We allow the output dim to be different + than the hidden dim for two reasons: 1) keep our LUTs small when the vocab is large; + 2) make all condition dims consistent. + + Args: + dim (int): Hidden dim of the model (text-encoder/LUT). + output_dim (int): Output dim of the conditioner. + """ + def __init__(self, dim, output_dim): + super().__init__() + self.dim = dim + self.output_dim = output_dim + self.output_proj = nn.Linear(dim, output_dim) + + def tokenize(self, *args, **kwargs) -> tp.Any: + """Should be any part of the processing that will lead to a synchronization + point, e.g. BPE tokenization with transfer to the GPU. + + The returned value will be saved and return later when calling forward(). + """ + raise NotImplementedError() + + def forward(self, inputs: tp.Any) -> ConditionType: + """Gets input that should be used as conditioning (e.g, genre, description or a waveform). + Outputs a ConditionType, after the input data was embedded as a dense vector. + + Returns: + ConditionType: + - A tensor of size [B, T, D] where B is the batch size, T is the length of the + output embedding and D is the dimension of the embedding. + - And a mask indicating where the padding tokens. + """ + raise NotImplementedError() + + +class TextConditioner(BaseConditioner): + ... + + +class LUTConditioner(TextConditioner): + """Lookup table TextConditioner. + + Args: + n_bins (int): Number of bins. + dim (int): Hidden dim of the model (text-encoder/LUT). + output_dim (int): Output dim of the conditioner. + tokenizer (str): Name of the tokenizer. + pad_idx (int, optional): Index for padding token. Defaults to 0. + """ + def __init__(self, n_bins: int, dim: int, output_dim: int, tokenizer: str, pad_idx: int = 0): + super().__init__(dim, output_dim) + self.embed = nn.Embedding(n_bins, dim) + self.tokenizer: Tokenizer + if tokenizer == "whitespace": + self.tokenizer = WhiteSpaceTokenizer(n_bins, pad_idx=pad_idx) + elif tokenizer == "noop": + self.tokenizer = NoopTokenizer(n_bins, pad_idx=pad_idx) + else: + raise ValueError(f"unrecognized tokenizer `{tokenizer}`.") + + def tokenize(self, x: tp.List[tp.Optional[str]]) -> tp.Tuple[torch.Tensor, torch.Tensor]: + device = self.embed.weight.device + tokens, mask = self.tokenizer(x) + tokens, mask = tokens.to(device), mask.to(device) + return tokens, mask + + def forward(self, inputs: tp.Tuple[torch.Tensor, torch.Tensor]) -> ConditionType: + tokens, mask = inputs + embeds = self.embed(tokens) + embeds = self.output_proj(embeds) + embeds = (embeds * mask.unsqueeze(-1)) + return embeds, mask + + +class T5Conditioner(TextConditioner): + """T5-based TextConditioner. + + Args: + name (str): Name of the T5 model. + output_dim (int): Output dim of the conditioner. + finetune (bool): Whether to fine-tune T5 at train time. + device (str): Device for T5 Conditioner. + autocast_dtype (tp.Optional[str], optional): Autocast dtype. + word_dropout (float, optional): Word dropout probability. + normalize_text (bool, optional): Whether to apply text normalization. + """ + MODELS = ["t5-small", "t5-base", "t5-large", "t5-3b", "t5-11b", + "google/flan-t5-small", "google/flan-t5-base", "google/flan-t5-large", + "google/flan-t5-xl", "google/flan-t5-xxl"] + MODELS_DIMS = { + "t5-small": 512, + "t5-base": 768, + "t5-large": 1024, + "t5-3b": 1024, + "t5-11b": 1024, + "google/flan-t5-small": 512, + "google/flan-t5-base": 768, + "google/flan-t5-large": 1024, + "google/flan-t5-3b": 1024, + "google/flan-t5-11b": 1024, + } + + def __init__(self, name: str, output_dim: int, finetune: bool, device: str, + autocast_dtype: tp.Optional[str] = 'float32', word_dropout: float = 0., + normalize_text: bool = False): + assert name in self.MODELS, f"unrecognized t5 model name (should in {self.MODELS})" + super().__init__(self.MODELS_DIMS[name], output_dim) + self.device = device + self.name = name + self.finetune = finetune + self.word_dropout = word_dropout + + if autocast_dtype is None or self.device == 'cpu': + self.autocast = TorchAutocast(enabled=False) + if self.device != 'cpu': + logger.warning("T5 has no autocast, this might lead to NaN") + else: + dtype = getattr(torch, autocast_dtype) + assert isinstance(dtype, torch.dtype) + logger.info(f"T5 will be evaluated with autocast as {autocast_dtype}") + self.autocast = TorchAutocast(enabled=True, device_type=self.device, dtype=dtype) + # Let's disable logging temporarily because T5 will vomit some errors otherwise. + # thanks https://gist.github.com/simon-weber/7853144 + previous_level = logging.root.manager.disable + logging.disable(logging.ERROR) + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + try: + self.t5_tokenizer = T5Tokenizer.from_pretrained(name) + t5 = T5EncoderModel.from_pretrained(name).train(mode=finetune) + finally: + logging.disable(previous_level) + if finetune: + self.t5 = t5 + else: + # this makes sure that the t5 models is not part + # of the saved checkpoint + self.__dict__["t5"] = t5.to(device) + + self.normalize_text = normalize_text + if normalize_text: + self.text_normalizer = WhiteSpaceTokenizer(1, lemma=True, stopwords=True) + + def tokenize(self, x: tp.List[tp.Optional[str]]) -> tp.Dict[str, torch.Tensor]: + # if current sample doesn't have a certain attribute, replace with empty string + entries: tp.List[str] = [xi if xi is not None else "" for xi in x] + if self.normalize_text: + _, _, entries = self.text_normalizer(entries, return_text=True) + if self.word_dropout > 0. and self.training: + new_entries = [] + for entry in entries: + words = [word for word in entry.split(" ") if random.random() >= self.word_dropout] + new_entries.append(" ".join(words)) + entries = new_entries + + empty_idx = torch.LongTensor([i for i, xi in enumerate(entries) if xi == ""]) + + inputs = self.t5_tokenizer(entries, return_tensors="pt", padding=True).to(self.device) + mask = inputs["attention_mask"] + mask[empty_idx, :] = 0 # zero-out index where the input is non-existant + return inputs + + def forward(self, inputs: tp.Dict[str, torch.Tensor]) -> ConditionType: + mask = inputs["attention_mask"] + with torch.set_grad_enabled(self.finetune), self.autocast: + embeds = self.t5(**inputs).last_hidden_state + embeds = self.output_proj(embeds.to(self.output_proj.weight)) + embeds = (embeds * mask.unsqueeze(-1)) + return embeds, mask + + +class WaveformConditioner(BaseConditioner): + """Base class for all conditioners that take a waveform as input. + Classes that inherit must implement `_get_wav_embedding` that outputs + a continuous tensor, and `_downsampling_factor` that returns the down-sampling + factor of the embedding model. + + Args: + dim (int): The internal representation dimension. + output_dim (int): Output dimension. + device (tp.Union[torch.device, str]): Device. + """ + def __init__(self, dim: int, output_dim: int, device: tp.Union[torch.device, str]): + super().__init__(dim, output_dim) + self.device = device + + def tokenize(self, wav_length: WavCondition) -> WavCondition: + wav, length, path = wav_length + assert length is not None + return WavCondition(wav.to(self.device), length.to(self.device), path) + + def _get_wav_embedding(self, wav: Tensor) -> Tensor: + """Gets as input a wav and returns a dense vector of conditions.""" + raise NotImplementedError() + + def _downsampling_factor(self): + """Returns the downsampling factor of the embedding model.""" + raise NotImplementedError() + + def forward(self, inputs: WavCondition) -> ConditionType: + """ + Args: + input (WavCondition): Tuple of (waveform, lengths). + Returns: + ConditionType: Dense vector representing the conditioning along with its' mask. + """ + wav, lengths, path = inputs + with torch.no_grad(): + embeds = self._get_wav_embedding(wav) + embeds = embeds.to(self.output_proj.weight) + embeds = self.output_proj(embeds) + + if lengths is not None: + lengths = lengths / self._downsampling_factor() + mask = length_to_mask(lengths, max_len=embeds.shape[1]).int() # type: ignore + else: + mask = torch.ones_like(embeds) + embeds = (embeds * mask.unsqueeze(2).to(self.device)) + + return embeds, mask + + +class ChromaStemConditioner(WaveformConditioner): + """Chroma conditioner that uses DEMUCS to first filter out drums and bass. The is followed by + the insight the drums and bass often dominate the chroma, leading to the chroma not containing the + information about melody. + + Args: + output_dim (int): Output dimension for the conditioner. + sample_rate (int): Sample rate for the chroma extractor. + n_chroma (int): Number of chroma for the chroma extractor. + radix2_exp (int): Radix2 exponent for the chroma extractor. + duration (float): Duration used during training. This is later used for correct padding + in case we are using chroma as prefix. + match_len_on_eval (bool, optional): If True then all chromas are padded to the training + duration. Defaults to False. + eval_wavs (str, optional): Path to a json egg with waveform, this waveforms are used as + conditions during eval (for cases where we don't want to leak test conditions like MusicCaps). + Defaults to None. + n_eval_wavs (int, optional): Limits the number of waveforms used for conditioning. Defaults to 0. + device (tp.Union[torch.device, str], optional): Device for the conditioner. + **kwargs: Additional parameters for the chroma extractor. + """ + def __init__(self, output_dim: int, sample_rate: int, n_chroma: int, radix2_exp: int, + duration: float, match_len_on_eval: bool = True, eval_wavs: tp.Optional[str] = None, + n_eval_wavs: int = 0, device: tp.Union[torch.device, str] = "cpu", **kwargs): + from demucs import pretrained + super().__init__(dim=n_chroma, output_dim=output_dim, device=device) + self.autocast = TorchAutocast(enabled=device != "cpu", device_type=self.device, dtype=torch.float32) + self.sample_rate = sample_rate + self.match_len_on_eval = match_len_on_eval + self.duration = duration + self.__dict__["demucs"] = pretrained.get_model('htdemucs').to(device) + self.stem2idx = {'drums': 0, 'bass': 1, 'other': 2, 'vocal': 3} + self.stem_idx = torch.LongTensor([self.stem2idx['vocal'], self.stem2idx['other']]).to(device) + self.chroma = ChromaExtractor(sample_rate=sample_rate, n_chroma=n_chroma, radix2_exp=radix2_exp, + device=device, **kwargs) + self.chroma_len = self._get_chroma_len() + + def _downsampling_factor(self): + return self.chroma.winhop + + def _get_chroma_len(self): + """Get length of chroma during training""" + dummy_wav = torch.zeros((1, self.sample_rate * self.duration), device=self.device) + dummy_chr = self.chroma(dummy_wav) + return dummy_chr.shape[1] + + @torch.no_grad() + def _get_filtered_wav(self, wav): + from demucs.apply import apply_model + from demucs.audio import convert_audio + with self.autocast: + wav = convert_audio(wav, self.sample_rate, self.demucs.samplerate, self.demucs.audio_channels) + stems = apply_model(self.demucs, wav, device=self.device) + stems = stems[:, self.stem_idx] # extract stem + stems = stems.sum(1) # merge extracted stems + stems = stems.mean(1, keepdim=True) # mono + stems = convert_audio(stems, self.demucs.samplerate, self.sample_rate, 1) + return stems + + @torch.no_grad() + def _get_wav_embedding(self, wav): + # avoid 0-size tensors when we are working with null conds + if wav.shape[-1] == 1: + return self.chroma(wav) + stems = self._get_filtered_wav(wav) + chroma = self.chroma(stems) + + if self.match_len_on_eval: + b, t, c = chroma.shape + if t > self.chroma_len: + chroma = chroma[:, :self.chroma_len] + logger.debug(f'chroma was truncated! ({t} -> {chroma.shape[1]})') + elif t < self.chroma_len: + # chroma = F.pad(chroma, (0, 0, 0, self.chroma_len - t)) + n_repeat = int(math.ceil(self.chroma_len / t)) + chroma = chroma.repeat(1, n_repeat, 1) + chroma = chroma[:, :self.chroma_len] + logger.debug(f'chroma was zero-padded! ({t} -> {chroma.shape[1]})') + return chroma + + +class ChromaExtractor(nn.Module): + """Chroma extraction class, handles chroma extraction and quantization. + + Args: + sample_rate (int): Sample rate. + n_chroma (int): Number of chroma to consider. + radix2_exp (int): Radix2 exponent. + nfft (tp.Optional[int], optional): Number of FFT. + winlen (tp.Optional[int], optional): Window length. + winhop (tp.Optional[int], optional): Window hop size. + argmax (bool, optional): Whether to use argmax. Defaults to False. + norm (float, optional): Norm for chroma normalization. Defaults to inf. + device (tp.Union[torch.device, str], optional): Device to use. Defaults to cpu. + """ + def __init__(self, sample_rate: int, n_chroma: int = 12, radix2_exp: int = 12, + nfft: tp.Optional[int] = None, winlen: tp.Optional[int] = None, winhop: tp.Optional[int] = None, + argmax: bool = False, norm: float = torch.inf, device: tp.Union[torch.device, str] = "cpu"): + super().__init__() + from librosa import filters + self.device = device + self.autocast = TorchAutocast(enabled=device != "cpu", device_type=self.device, dtype=torch.float32) + self.winlen = winlen or 2 ** radix2_exp + self.nfft = nfft or self.winlen + self.winhop = winhop or (self.winlen // 4) + self.sr = sample_rate + self.n_chroma = n_chroma + self.norm = norm + self.argmax = argmax + self.window = torch.hann_window(self.winlen).to(device) + self.fbanks = torch.from_numpy(filters.chroma(sr=sample_rate, n_fft=self.nfft, tuning=0, + n_chroma=self.n_chroma)).to(device) + self.spec = torchaudio.transforms.Spectrogram(n_fft=self.nfft, win_length=self.winlen, + hop_length=self.winhop, power=2, center=True, + pad=0, normalized=True).to(device) + + def forward(self, wav): + with self.autocast: + T = wav.shape[-1] + # in case we are getting a wav that was dropped out (nullified) + # make sure wav length is no less that nfft + if T < self.nfft: + pad = self.nfft - T + r = 0 if pad % 2 == 0 else 1 + wav = F.pad(wav, (pad // 2, pad // 2 + r), 'constant', 0) + assert wav.shape[-1] == self.nfft, f'expected len {self.nfft} but got {wav.shape[-1]}' + spec = self.spec(wav).squeeze(1) + raw_chroma = torch.einsum("cf,...ft->...ct", self.fbanks, spec) + norm_chroma = torch.nn.functional.normalize(raw_chroma, p=self.norm, dim=-2, eps=1e-6) + norm_chroma = rearrange(norm_chroma, "b d t -> b t d") + + if self.argmax: + idx = norm_chroma.argmax(-1, keepdims=True) + norm_chroma[:] = 0 + norm_chroma.scatter_(dim=-1, index=idx, value=1) + + return norm_chroma + + +def dropout_condition(sample: ConditioningAttributes, condition_type: str, condition: str): + """Utility function for nullifying an attribute inside an ConditioningAttributes object. + If the condition is of type "wav", then nullify it using "nullify_condition". + If the condition is of any other type, set its' value to None. + Works in-place. + """ + if condition_type not in ["text", "wav"]: + raise ValueError( + "dropout_condition got an unexpected condition type!" + f" expected 'wav' or 'text' but got '{condition_type}'" + ) + + if condition not in getattr(sample, condition_type): + raise ValueError( + "dropout_condition received an unexpected condition!" + f" expected wav={sample.wav.keys()} and text={sample.text.keys()}" + f"but got '{condition}' of type '{condition_type}'!" + ) + + if condition_type == "wav": + wav, length, path = sample.wav[condition] + sample.wav[condition] = nullify_wav(wav) + else: + sample.text[condition] = None + + return sample + + +class DropoutModule(nn.Module): + """Base class for all dropout modules.""" + def __init__(self, seed: int = 1234): + super().__init__() + self.rng = torch.Generator() + self.rng.manual_seed(seed) + + +class AttributeDropout(DropoutModule): + """Applies dropout with a given probability per attribute. This is different from the behavior of + ClassifierFreeGuidanceDropout as this allows for attributes to be dropped out separately. For example, + "artist" can be dropped while "genre" remains. This is in contrast to ClassifierFreeGuidanceDropout + where if "artist" is dropped "genre" must also be dropped. + + Args: + p (tp.Dict[str, float]): A dict mapping between attributes and dropout probability. For example: + ... + "genre": 0.1, + "artist": 0.5, + "wav": 0.25, + ... + active_on_eval (bool, optional): Whether the dropout is active at eval. Default to False. + seed (int, optional): Random seed. + """ + def __init__(self, p: tp.Dict[str, tp.Dict[str, float]], active_on_eval: bool = False, seed: int = 1234): + super().__init__(seed=seed) + self.active_on_eval = active_on_eval + # construct dict that return the values from p otherwise 0 + self.p = {} + for condition_type, probs in p.items(): + self.p[condition_type] = defaultdict(lambda: 0, probs) + + def forward(self, samples: tp.List[ConditioningAttributes]) -> tp.List[ConditioningAttributes]: + """ + Args: + samples (tp.List[ConditioningAttributes]): List of conditions. + Returns: + tp.List[ConditioningAttributes]: List of conditions after certain attributes were set to None. + """ + if not self.training and not self.active_on_eval: + return samples + + samples = deepcopy(samples) + + for condition_type, ps in self.p.items(): # for condition types [text, wav] + for condition, p in ps.items(): # for attributes of each type (e.g., [artist, genre]) + if torch.rand(1, generator=self.rng).item() < p: + for sample in samples: + dropout_condition(sample, condition_type, condition) + + return samples + + def __repr__(self): + return f"AttributeDropout({dict(self.p)})" + + +class ClassifierFreeGuidanceDropout(DropoutModule): + """Applies Classifier Free Guidance dropout, meaning all attributes + are dropped with the same probability. + + Args: + p (float): Probability to apply condition dropout during training. + seed (int): Random seed. + """ + def __init__(self, p: float, seed: int = 1234): + super().__init__(seed=seed) + self.p = p + + def forward(self, samples: tp.List[ConditioningAttributes]) -> tp.List[ConditioningAttributes]: + """ + Args: + samples (tp.List[ConditioningAttributes]): List of conditions. + Returns: + tp.List[ConditioningAttributes]: List of conditions after all attributes were set to None. + """ + if not self.training: + return samples + + # decide on which attributes to drop in a batched fashion + drop = torch.rand(1, generator=self.rng).item() < self.p + if not drop: + return samples + + # nullify conditions of all attributes + samples = deepcopy(samples) + + for condition_type in ["wav", "text"]: + for sample in samples: + for condition in sample.attributes[condition_type]: + dropout_condition(sample, condition_type, condition) + + return samples + + def __repr__(self): + return f"ClassifierFreeGuidanceDropout(p={self.p})" + + +class ConditioningProvider(nn.Module): + """Main class to provide conditions given all the supported conditioners. + + Args: + conditioners (dict): Dictionary of conditioners. + merge_text_conditions_p (float, optional): Probability to merge all text sources + into a single text condition. Defaults to 0. + drop_desc_p (float, optional): Probability to drop the original description + when merging all text sources into a single text condition. Defaults to 0. + device (tp.Union[torch.device, str], optional): Device for conditioners and output condition types. + """ + def __init__( + self, + conditioners: tp.Dict[str, BaseConditioner], + merge_text_conditions_p: float = 0, + drop_desc_p: float = 0, + device: tp.Union[torch.device, str] = "cpu", + ): + super().__init__() + self.device = device + self.merge_text_conditions_p = merge_text_conditions_p + self.drop_desc_p = drop_desc_p + self.conditioners = nn.ModuleDict(conditioners) + + @property + def text_conditions(self): + return [k for k, v in self.conditioners.items() if isinstance(v, TextConditioner)] + + @property + def wav_conditions(self): + return [k for k, v in self.conditioners.items() if isinstance(v, WaveformConditioner)] + + @property + def has_wav_condition(self): + return len(self.wav_conditions) > 0 + + def tokenize(self, inputs: tp.List[ConditioningAttributes]) -> tp.Dict[str, tp.Any]: + """Match attributes/wavs with existing conditioners in self, and compute tokenize them accordingly. + This should be called before starting any real GPU work to avoid synchronization points. + This will return a dict matching conditioner names to their arbitrary tokenized representations. + + Args: + inputs (list[ConditioningAttribres]): List of ConditioningAttributes objects containing + text and wav conditions. + """ + assert all([type(x) == ConditioningAttributes for x in inputs]), \ + "got unexpected types input for conditioner! should be tp.List[ConditioningAttributes]" \ + f" but types were {set([type(x) for x in inputs])}" + + output = {} + text = self._collate_text(inputs) + wavs = self._collate_wavs(inputs) + + assert set(text.keys() | wavs.keys()).issubset(set(self.conditioners.keys())), \ + f"got an unexpected attribute! Expected {self.conditioners.keys()}, got {text.keys(), wavs.keys()}" + + for attribute, batch in chain(text.items(), wavs.items()): + output[attribute] = self.conditioners[attribute].tokenize(batch) + return output + + def forward(self, tokenized: tp.Dict[str, tp.Any]) -> tp.Dict[str, ConditionType]: + """Compute pairs of `(embedding, mask)` using the configured conditioners + and the tokenized representations. The output is for example: + + { + "genre": (torch.Tensor([B, 1, D_genre]), torch.Tensor([B, 1])), + "description": (torch.Tensor([B, T_desc, D_desc]), torch.Tensor([B, T_desc])), + ... + } + + Args: + tokenized (dict): Dict of tokenized representations as returned by `tokenize()`. + """ + output = {} + for attribute, inputs in tokenized.items(): + condition, mask = self.conditioners[attribute](inputs) + output[attribute] = (condition, mask) + return output + + def _collate_text(self, samples: tp.List[ConditioningAttributes]) -> tp.Dict[str, tp.List[tp.Optional[str]]]: + """Given a list of ConditioningAttributes objects, compile a dictionary where the keys + are the attributes and the values are the aggregated input per attribute. + For example: + Input: + [ + ConditioningAttributes(text={"genre": "Rock", "description": "A rock song with a guitar solo"}, wav=...), + ConditioningAttributes(text={"genre": "Hip-hop", "description": "A hip-hop verse"}, wav=...), + ] + Output: + { + "genre": ["Rock", "Hip-hop"], + "description": ["A rock song with a guitar solo", "A hip-hop verse"] + } + """ + batch_per_attribute: tp.Dict[str, tp.List[tp.Optional[str]]] = defaultdict(list) + + def _merge_conds(cond, merge_text_conditions_p=0, drop_desc_p=0): + def is_valid(k, v): + k_valid = k in ['key', 'bpm', 'genre', 'moods', 'instrument'] + v_valid = v is not None and isinstance(v, (int, float, str, list)) + return k_valid and v_valid + + def process_value(v): + if isinstance(v, (int, float, str)): + return v + if isinstance(v, list): + return ", ".join(v) + else: + RuntimeError(f"unknown type for text value! ({type(v), v})") + + desc = cond.text['description'] + meta_data = "" + if random.uniform(0, 1) < merge_text_conditions_p: + meta_pairs = [f'{k}: {process_value(v)}' for k, v in cond.text.items() if is_valid(k, v)] + random.shuffle(meta_pairs) + meta_data = ". ".join(meta_pairs) + desc = desc if not random.uniform(0, 1) < drop_desc_p else None + + if desc is None: + desc = meta_data if len(meta_data) > 1 else None + else: + desc = desc.rstrip('.') + ". " + meta_data + cond.text['description'] = desc.strip() if desc else None + + if self.training and self.merge_text_conditions_p: + for sample in samples: + _merge_conds(sample, self.merge_text_conditions_p, self.drop_desc_p) + + texts = [x.text for x in samples] + for text in texts: + for condition in self.text_conditions: + batch_per_attribute[condition].append(text[condition]) + + return batch_per_attribute + + def _collate_wavs(self, samples: tp.List[ConditioningAttributes]): + """Generate a dict where the keys are attributes by which we fetch similar wavs, + and the values are Tensors of wavs according to said attribtues. + + *Note*: by the time the samples reach this function, each sample should have some waveform + inside the "wav" attribute. It should be either: + 1. A real waveform + 2. A null waveform due to the sample having no similar waveforms (nullified by the dataset) + 3. A null waveform due to it being dropped in a dropout module (nullified by dropout) + + Args: + samples (tp.List[ConditioningAttributes]): List of ConditioningAttributes samples. + Returns: + dict: A dicionary mapping an attribute name to wavs. + """ + wavs = defaultdict(list) + lens = defaultdict(list) + paths = defaultdict(list) + out = {} + + for sample in samples: + for attribute in self.wav_conditions: + wav, length, path = sample.wav[attribute] + wavs[attribute].append(wav.flatten()) + lens[attribute].append(length) + paths[attribute].append(path) + + # stack all wavs to a single tensor + for attribute in self.wav_conditions: + stacked_wav, _ = collate(wavs[attribute], dim=0) + out[attribute] = WavCondition(stacked_wav.unsqueeze(1), + torch.cat(lens['self_wav']), paths[attribute]) # type: ignore + + return out + + +class ConditionFuser(StreamingModule): + """Condition fuser handles the logic to combine the different conditions + to the actual model input. + + Args: + fuse2cond (tp.Dict[str, str]): A dictionary that says how to fuse + each condition. For example: + { + "prepend": ["description"], + "sum": ["genre", "bpm"], + "cross": ["description"], + } + cross_attention_pos_emb (bool, optional): Use positional embeddings in cross attention. + cross_attention_pos_emb_scale (int): Scale for positional embeddings in cross attention if used. + """ + FUSING_METHODS = ["sum", "prepend", "cross", "input_interpolate"] + + def __init__(self, fuse2cond: tp.Dict[str, tp.List[str]], cross_attention_pos_emb: bool = False, + cross_attention_pos_emb_scale: float = 1.0): + super().__init__() + assert all( + [k in self.FUSING_METHODS for k in fuse2cond.keys()] + ), f"got invalid fuse method, allowed methods: {self.FUSING_MEHTODS}" + self.cross_attention_pos_emb = cross_attention_pos_emb + self.cross_attention_pos_emb_scale = cross_attention_pos_emb_scale + self.fuse2cond: tp.Dict[str, tp.List[str]] = fuse2cond + self.cond2fuse: tp.Dict[str, str] = {} + for fuse_method, conditions in fuse2cond.items(): + for condition in conditions: + self.cond2fuse[condition] = fuse_method + + def forward( + self, + input: Tensor, + conditions: tp.Dict[str, ConditionType] + ) -> tp.Tuple[Tensor, tp.Optional[Tensor]]: + """Fuse the conditions to the provided model input. + + Args: + input (Tensor): Transformer input. + conditions (tp.Dict[str, ConditionType]): Dict of conditions. + Returns: + tp.Tuple[Tensor, Tensor]: The first tensor is the transformer input + after the conditions have been fused. The second output tensor is the tensor + used for cross-attention or None if no cross attention inputs exist. + """ + B, T, _ = input.shape + + if 'offsets' in self._streaming_state: + first_step = False + offsets = self._streaming_state['offsets'] + else: + first_step = True + offsets = torch.zeros(input.shape[0], dtype=torch.long, device=input.device) + + assert set(conditions.keys()).issubset(set(self.cond2fuse.keys())), \ + f"given conditions contain unknown attributes for fuser, " \ + f"expected {self.cond2fuse.keys()}, got {conditions.keys()}" + cross_attention_output = None + for cond_type, (cond, cond_mask) in conditions.items(): + op = self.cond2fuse[cond_type] + if op == "sum": + input += cond + elif op == "input_interpolate": + cond = rearrange(cond, "b t d -> b d t") + cond = F.interpolate(cond, size=input.shape[1]) + input += rearrange(cond, "b d t -> b t d") + elif op == "prepend": + if first_step: + input = torch.cat([cond, input], dim=1) + elif op == "cross": + if cross_attention_output is not None: + cross_attention_output = torch.cat([cross_attention_output, cond], dim=1) + else: + cross_attention_output = cond + else: + raise ValueError(f"unknown op ({op})") + + if self.cross_attention_pos_emb and cross_attention_output is not None: + positions = torch.arange( + cross_attention_output.shape[1], + device=cross_attention_output.device + ).view(1, -1, 1) + pos_emb = create_sin_embedding(positions, cross_attention_output.shape[-1]) + cross_attention_output = cross_attention_output + self.cross_attention_pos_emb_scale * pos_emb + + if self._is_streaming: + self._streaming_state['offsets'] = offsets + T + + return input, cross_attention_output diff --git a/audiocraft/modules/conv.py b/audiocraft/modules/conv.py new file mode 100644 index 0000000..972938a --- /dev/null +++ b/audiocraft/modules/conv.py @@ -0,0 +1,245 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import math +import typing as tp +import warnings + +import torch +from torch import nn +from torch.nn import functional as F +from torch.nn.utils import spectral_norm, weight_norm + + +CONV_NORMALIZATIONS = frozenset(['none', 'weight_norm', 'spectral_norm', + 'time_group_norm']) + + +def apply_parametrization_norm(module: nn.Module, norm: str = 'none'): + assert norm in CONV_NORMALIZATIONS + if norm == 'weight_norm': + return weight_norm(module) + elif norm == 'spectral_norm': + return spectral_norm(module) + else: + # We already check was in CONV_NORMALIZATION, so any other choice + # doesn't need reparametrization. + return module + + +def get_norm_module(module: nn.Module, causal: bool = False, norm: str = 'none', **norm_kwargs): + """Return the proper normalization module. If causal is True, this will ensure the returned + module is causal, or return an error if the normalization doesn't support causal evaluation. + """ + assert norm in CONV_NORMALIZATIONS + if norm == 'time_group_norm': + if causal: + raise ValueError("GroupNorm doesn't support causal evaluation.") + assert isinstance(module, nn.modules.conv._ConvNd) + return nn.GroupNorm(1, module.out_channels, **norm_kwargs) + else: + return nn.Identity() + + +def get_extra_padding_for_conv1d(x: torch.Tensor, kernel_size: int, stride: int, + padding_total: int = 0) -> int: + """See `pad_for_conv1d`. + """ + length = x.shape[-1] + n_frames = (length - kernel_size + padding_total) / stride + 1 + ideal_length = (math.ceil(n_frames) - 1) * stride + (kernel_size - padding_total) + return ideal_length - length + + +def pad_for_conv1d(x: torch.Tensor, kernel_size: int, stride: int, padding_total: int = 0): + """Pad for a convolution to make sure that the last window is full. + Extra padding is added at the end. This is required to ensure that we can rebuild + an output of the same length, as otherwise, even with padding, some time steps + might get removed. + For instance, with total padding = 4, kernel size = 4, stride = 2: + 0 0 1 2 3 4 5 0 0 # (0s are padding) + 1 2 3 # (output frames of a convolution, last 0 is never used) + 0 0 1 2 3 4 5 0 # (output of tr. conv., but pos. 5 is going to get removed as padding) + 1 2 3 4 # once you removed padding, we are missing one time step ! + """ + extra_padding = get_extra_padding_for_conv1d(x, kernel_size, stride, padding_total) + return F.pad(x, (0, extra_padding)) + + +def pad1d(x: torch.Tensor, paddings: tp.Tuple[int, int], mode: str = 'constant', value: float = 0.): + """Tiny wrapper around F.pad, just to allow for reflect padding on small input. + If this is the case, we insert extra 0 padding to the right before the reflection happen. + """ + length = x.shape[-1] + padding_left, padding_right = paddings + assert padding_left >= 0 and padding_right >= 0, (padding_left, padding_right) + if mode == 'reflect': + max_pad = max(padding_left, padding_right) + extra_pad = 0 + if length <= max_pad: + extra_pad = max_pad - length + 1 + x = F.pad(x, (0, extra_pad)) + padded = F.pad(x, paddings, mode, value) + end = padded.shape[-1] - extra_pad + return padded[..., :end] + else: + return F.pad(x, paddings, mode, value) + + +def unpad1d(x: torch.Tensor, paddings: tp.Tuple[int, int]): + """Remove padding from x, handling properly zero padding. Only for 1d! + """ + padding_left, padding_right = paddings + assert padding_left >= 0 and padding_right >= 0, (padding_left, padding_right) + assert (padding_left + padding_right) <= x.shape[-1] + end = x.shape[-1] - padding_right + return x[..., padding_left: end] + + +class NormConv1d(nn.Module): + """Wrapper around Conv1d and normalization applied to this conv + to provide a uniform interface across normalization approaches. + """ + def __init__(self, *args, causal: bool = False, norm: str = 'none', + norm_kwargs: tp.Dict[str, tp.Any] = {}, **kwargs): + super().__init__() + self.conv = apply_parametrization_norm(nn.Conv1d(*args, **kwargs), norm) + self.norm = get_norm_module(self.conv, causal, norm, **norm_kwargs) + self.norm_type = norm + + def forward(self, x): + x = self.conv(x) + x = self.norm(x) + return x + + +class NormConv2d(nn.Module): + """Wrapper around Conv2d and normalization applied to this conv + to provide a uniform interface across normalization approaches. + """ + def __init__(self, *args, norm: str = 'none', norm_kwargs: tp.Dict[str, tp.Any] = {}, **kwargs): + super().__init__() + self.conv = apply_parametrization_norm(nn.Conv2d(*args, **kwargs), norm) + self.norm = get_norm_module(self.conv, causal=False, norm=norm, **norm_kwargs) + self.norm_type = norm + + def forward(self, x): + x = self.conv(x) + x = self.norm(x) + return x + + +class NormConvTranspose1d(nn.Module): + """Wrapper around ConvTranspose1d and normalization applied to this conv + to provide a uniform interface across normalization approaches. + """ + def __init__(self, *args, causal: bool = False, norm: str = 'none', + norm_kwargs: tp.Dict[str, tp.Any] = {}, **kwargs): + super().__init__() + self.convtr = apply_parametrization_norm(nn.ConvTranspose1d(*args, **kwargs), norm) + self.norm = get_norm_module(self.convtr, causal, norm, **norm_kwargs) + self.norm_type = norm + + def forward(self, x): + x = self.convtr(x) + x = self.norm(x) + return x + + +class NormConvTranspose2d(nn.Module): + """Wrapper around ConvTranspose2d and normalization applied to this conv + to provide a uniform interface across normalization approaches. + """ + def __init__(self, *args, norm: str = 'none', norm_kwargs: tp.Dict[str, tp.Any] = {}, **kwargs): + super().__init__() + self.convtr = apply_parametrization_norm(nn.ConvTranspose2d(*args, **kwargs), norm) + self.norm = get_norm_module(self.convtr, causal=False, norm=norm, **norm_kwargs) + + def forward(self, x): + x = self.convtr(x) + x = self.norm(x) + return x + + +class StreamableConv1d(nn.Module): + """Conv1d with some builtin handling of asymmetric or causal padding + and normalization. + """ + def __init__(self, in_channels: int, out_channels: int, + kernel_size: int, stride: int = 1, dilation: int = 1, + groups: int = 1, bias: bool = True, causal: bool = False, + norm: str = 'none', norm_kwargs: tp.Dict[str, tp.Any] = {}, + pad_mode: str = 'reflect'): + super().__init__() + # warn user on unusual setup between dilation and stride + if stride > 1 and dilation > 1: + warnings.warn('StreamableConv1d has been initialized with stride > 1 and dilation > 1' + f' (kernel_size={kernel_size} stride={stride}, dilation={dilation}).') + self.conv = NormConv1d(in_channels, out_channels, kernel_size, stride, + dilation=dilation, groups=groups, bias=bias, causal=causal, + norm=norm, norm_kwargs=norm_kwargs) + self.causal = causal + self.pad_mode = pad_mode + + def forward(self, x): + B, C, T = x.shape + kernel_size = self.conv.conv.kernel_size[0] + stride = self.conv.conv.stride[0] + dilation = self.conv.conv.dilation[0] + kernel_size = (kernel_size - 1) * dilation + 1 # effective kernel size with dilations + padding_total = kernel_size - stride + extra_padding = get_extra_padding_for_conv1d(x, kernel_size, stride, padding_total) + if self.causal: + # Left padding for causal + x = pad1d(x, (padding_total, extra_padding), mode=self.pad_mode) + else: + # Asymmetric padding required for odd strides + padding_right = padding_total // 2 + padding_left = padding_total - padding_right + x = pad1d(x, (padding_left, padding_right + extra_padding), mode=self.pad_mode) + return self.conv(x) + + +class StreamableConvTranspose1d(nn.Module): + """ConvTranspose1d with some builtin handling of asymmetric or causal padding + and normalization. + """ + def __init__(self, in_channels: int, out_channels: int, + kernel_size: int, stride: int = 1, causal: bool = False, + norm: str = 'none', trim_right_ratio: float = 1., + norm_kwargs: tp.Dict[str, tp.Any] = {}): + super().__init__() + self.convtr = NormConvTranspose1d(in_channels, out_channels, kernel_size, stride, + causal=causal, norm=norm, norm_kwargs=norm_kwargs) + self.causal = causal + self.trim_right_ratio = trim_right_ratio + assert self.causal or self.trim_right_ratio == 1., \ + "`trim_right_ratio` != 1.0 only makes sense for causal convolutions" + assert self.trim_right_ratio >= 0. and self.trim_right_ratio <= 1. + + def forward(self, x): + kernel_size = self.convtr.convtr.kernel_size[0] + stride = self.convtr.convtr.stride[0] + padding_total = kernel_size - stride + + y = self.convtr(x) + + # We will only trim fixed padding. Extra padding from `pad_for_conv1d` would be + # removed at the very end, when keeping only the right length for the output, + # as removing it here would require also passing the length at the matching layer + # in the encoder. + if self.causal: + # Trim the padding on the right according to the specified ratio + # if trim_right_ratio = 1.0, trim everything from right + padding_right = math.ceil(padding_total * self.trim_right_ratio) + padding_left = padding_total - padding_right + y = unpad1d(y, (padding_left, padding_right)) + else: + # Asymmetric padding required for odd strides + padding_right = padding_total // 2 + padding_left = padding_total - padding_right + y = unpad1d(y, (padding_left, padding_right)) + return y diff --git a/audiocraft/modules/lstm.py b/audiocraft/modules/lstm.py new file mode 100644 index 0000000..c086617 --- /dev/null +++ b/audiocraft/modules/lstm.py @@ -0,0 +1,25 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +from torch import nn + + +class StreamableLSTM(nn.Module): + """LSTM without worrying about the hidden state, nor the layout of the data. + Expects input as convolutional layout. + """ + def __init__(self, dimension: int, num_layers: int = 2, skip: bool = True): + super().__init__() + self.skip = skip + self.lstm = nn.LSTM(dimension, dimension, num_layers) + + def forward(self, x): + x = x.permute(2, 0, 1) + y, _ = self.lstm(x) + if self.skip: + y = y + x + y = y.permute(1, 2, 0) + return y diff --git a/audiocraft/modules/rope.py b/audiocraft/modules/rope.py new file mode 100644 index 0000000..4b8c70b --- /dev/null +++ b/audiocraft/modules/rope.py @@ -0,0 +1,124 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import typing as tp + +from torch import nn +import torch + + +class XPos(nn.Module): + """Length-extrapolatable positional embedding (xPos) from [Sun et al 2022](https://arxiv.org/abs/2212.10554v1). + This applies an exponential decay to the RoPE rotation matrix. + + Args: + dim (int): Embedding dimension. + smoothing (float): Smoothing factor applied to the decay rates. + base_scale (int): Base decay rate, given in terms of scaling time. + device (torch.device or None): Device on which to initialize the module. + dtype (torch.dtype): dtype to use to generate the embedding. + """ + def __init__(self, dim: int, smoothing: float = 0.4, base_scale: int = 512, + device=None, dtype: torch.dtype = torch.float32): + super().__init__() + assert dim % 2 == 0 + assert dtype in [torch.float64, torch.float32] + self.dtype = dtype + self.base_scale = base_scale + + half_dim = dim // 2 + adim = torch.arange(half_dim, device=device, dtype=dtype) + decay_rates = (adim / half_dim + smoothing) / (1.0 + smoothing) + self.register_buffer("decay_rates", decay_rates) + self.decay: tp.Optional[torch.Tensor] = None + + def get_decay(self, start: int, end: int): + """Create complex decay tensor, cache values for fast computation. + """ + if self.decay is None or end > self.decay.shape[0]: + assert isinstance(self.decay_rates, torch.Tensor) # Satisfy type checker. + idx = torch.arange(end, device=self.decay_rates.device, dtype=self.dtype) + power = idx / self.base_scale + scale = self.decay_rates ** power.unsqueeze(-1) + self.decay = torch.polar(scale, torch.zeros_like(scale)) + return self.decay[start:end] # [T, C/2] + + +class RotaryEmbedding(nn.Module): + """Rotary positional embedding (RoPE) from [Su et al 2022](https://arxiv.org/abs/2104.09864). + + Args: + dim (int): Embedding dimension (twice the number of frequencies). + max_period (float): Maximum period of the rotation frequencies. + xpos (bool): Use xPos, applies an exponential decay to rotation matrix. + scale (float): Scale of positional embedding, set to 0 to deactivate. + device (torch.device or None): Device on which to initialize the module. + dtype (torch.dtype): dtype to use to generate the embedding. + """ + def __init__(self, dim: int, max_period: float = 10000.0, xpos: bool = False, + scale: float = 1.0, device=None, dtype: torch.dtype = torch.float32): + super().__init__() + assert dim % 2 == 0 + self.scale = scale + assert dtype in [torch.float64, torch.float32] + self.dtype = dtype + + adim = torch.arange(0, dim, 2, device=device, dtype=dtype)[: (dim // 2)] + frequencies = 1.0 / (max_period ** (adim / dim)) + self.register_buffer("frequencies", frequencies) + self.rotation: tp.Optional[torch.Tensor] = None + + self.xpos = XPos(dim, device=device, dtype=dtype) if xpos else None + + def get_rotation(self, start: int, end: int): + """Create complex rotation tensor, cache values for fast computation. + """ + if self.rotation is None or end > self.rotation.shape[0]: + assert isinstance(self.frequencies, torch.Tensor) # Satisfy type checker. + idx = torch.arange(end, device=self.frequencies.device, dtype=self.dtype) + angles = torch.outer(idx, self.frequencies) + self.rotation = torch.polar(torch.ones_like(angles), angles) + return self.rotation[start:end] + + def rotate(self, x: torch.Tensor, start: int = 0, invert_decay: bool = False): + """Apply rope rotation to query or key tensor. + """ + T = x.shape[1] + rotation = self.get_rotation(start, start + T).unsqueeze(0).unsqueeze(2) + + if self.xpos: + decay = self.xpos.get_decay(start, start + T).unsqueeze(0).unsqueeze(2) + else: + decay = 1.0 + + if invert_decay: + decay = decay ** -1 + + x_complex = torch.view_as_complex(x.to(self.dtype).reshape(*x.shape[:-1], -1, 2)) + scaled_rotation = (rotation * decay) * self.scale + (1.0 - self.scale) + x_out = torch.view_as_real(x_complex * scaled_rotation).flatten(-2) + + return x_out.type_as(x) + + def rotate_qk(self, query: torch.Tensor, key: torch.Tensor, start: int = 0): + """ Apply rope rotation to both query and key tensors. + Supports streaming mode, in which query and key are not expected to have the same shape. + In streaming mode, key will be of legnth [P + C] with P the cached past timesteps, but + query will be [C] (typically C == 1). + + Args: + query (torch.Tensor): Query to rotate. + key (torch.Tensor): Key to rotate. + start (int): Start index of the sequence for time offset. + """ + query_timesteps = query.shape[1] + key_timesteps = key.shape[1] + streaming_offset = key_timesteps - query_timesteps + + query_out = self.rotate(query, start + streaming_offset) + key_out = self.rotate(key, start, invert_decay=True) + + return query_out, key_out diff --git a/audiocraft/modules/seanet.py b/audiocraft/modules/seanet.py new file mode 100644 index 0000000..3e5998e --- /dev/null +++ b/audiocraft/modules/seanet.py @@ -0,0 +1,258 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import typing as tp + +import numpy as np +import torch.nn as nn + +from .conv import StreamableConv1d, StreamableConvTranspose1d +from .lstm import StreamableLSTM + + +class SEANetResnetBlock(nn.Module): + """Residual block from SEANet model. + + Args: + dim (int): Dimension of the input/output. + kernel_sizes (list): List of kernel sizes for the convolutions. + dilations (list): List of dilations for the convolutions. + activation (str): Activation function. + activation_params (dict): Parameters to provide to the activation function. + norm (str): Normalization method. + norm_params (dict): Parameters to provide to the underlying normalization used along with the convolution. + causal (bool): Whether to use fully causal convolution. + pad_mode (str): Padding mode for the convolutions. + compress (int): Reduced dimensionality in residual branches (from Demucs v3). + true_skip (bool): Whether to use true skip connection or a simple + (streamable) convolution as the skip connection. + """ + def __init__(self, dim: int, kernel_sizes: tp.List[int] = [3, 1], dilations: tp.List[int] = [1, 1], + activation: str = 'ELU', activation_params: dict = {'alpha': 1.0}, + norm: str = 'none', norm_params: tp.Dict[str, tp.Any] = {}, causal: bool = False, + pad_mode: str = 'reflect', compress: int = 2, true_skip: bool = True): + super().__init__() + assert len(kernel_sizes) == len(dilations), 'Number of kernel sizes should match number of dilations' + act = getattr(nn, activation) + hidden = dim // compress + block = [] + for i, (kernel_size, dilation) in enumerate(zip(kernel_sizes, dilations)): + in_chs = dim if i == 0 else hidden + out_chs = dim if i == len(kernel_sizes) - 1 else hidden + block += [ + act(**activation_params), + StreamableConv1d(in_chs, out_chs, kernel_size=kernel_size, dilation=dilation, + norm=norm, norm_kwargs=norm_params, + causal=causal, pad_mode=pad_mode), + ] + self.block = nn.Sequential(*block) + self.shortcut: nn.Module + if true_skip: + self.shortcut = nn.Identity() + else: + self.shortcut = StreamableConv1d(dim, dim, kernel_size=1, norm=norm, norm_kwargs=norm_params, + causal=causal, pad_mode=pad_mode) + + def forward(self, x): + return self.shortcut(x) + self.block(x) + + +class SEANetEncoder(nn.Module): + """SEANet encoder. + + Args: + channels (int): Audio channels. + dimension (int): Intermediate representation dimension. + n_filters (int): Base width for the model. + n_residual_layers (int): nb of residual layers. + ratios (Sequence[int]): kernel size and stride ratios. The encoder uses downsampling ratios instead of + upsampling ratios, hence it will use the ratios in the reverse order to the ones specified here + that must match the decoder order. We use the decoder order as some models may only employ the decoder. + activation (str): Activation function. + activation_params (dict): Parameters to provide to the activation function. + norm (str): Normalization method. + norm_params (dict): Parameters to provide to the underlying normalization used along with the convolution. + kernel_size (int): Kernel size for the initial convolution. + last_kernel_size (int): Kernel size for the initial convolution. + residual_kernel_size (int): Kernel size for the residual layers. + dilation_base (int): How much to increase the dilation with each layer. + causal (bool): Whether to use fully causal convolution. + pad_mode (str): Padding mode for the convolutions. + true_skip (bool): Whether to use true skip connection or a simple + (streamable) convolution as the skip connection in the residual network blocks. + compress (int): Reduced dimensionality in residual branches (from Demucs v3). + lstm (int): Number of LSTM layers at the end of the encoder. + disable_norm_outer_blocks (int): Number of blocks for which we don't apply norm. + For the encoder, it corresponds to the N first blocks. + """ + def __init__(self, channels: int = 1, dimension: int = 128, n_filters: int = 32, n_residual_layers: int = 3, + ratios: tp.List[int] = [8, 5, 4, 2], activation: str = 'ELU', activation_params: dict = {'alpha': 1.0}, + norm: str = 'none', norm_params: tp.Dict[str, tp.Any] = {}, kernel_size: int = 7, + last_kernel_size: int = 7, residual_kernel_size: int = 3, dilation_base: int = 2, causal: bool = False, + pad_mode: str = 'reflect', true_skip: bool = True, compress: int = 2, lstm: int = 0, + disable_norm_outer_blocks: int = 0): + super().__init__() + self.channels = channels + self.dimension = dimension + self.n_filters = n_filters + self.ratios = list(reversed(ratios)) + del ratios + self.n_residual_layers = n_residual_layers + self.hop_length = np.prod(self.ratios) + self.n_blocks = len(self.ratios) + 2 # first and last conv + residual blocks + self.disable_norm_outer_blocks = disable_norm_outer_blocks + assert self.disable_norm_outer_blocks >= 0 and self.disable_norm_outer_blocks <= self.n_blocks, \ + "Number of blocks for which to disable norm is invalid." \ + "It should be lower or equal to the actual number of blocks in the network and greater or equal to 0." + + act = getattr(nn, activation) + mult = 1 + model: tp.List[nn.Module] = [ + StreamableConv1d(channels, mult * n_filters, kernel_size, + norm='none' if self.disable_norm_outer_blocks >= 1 else norm, + norm_kwargs=norm_params, causal=causal, pad_mode=pad_mode) + ] + # Downsample to raw audio scale + for i, ratio in enumerate(self.ratios): + block_norm = 'none' if self.disable_norm_outer_blocks >= i + 2 else norm + # Add residual layers + for j in range(n_residual_layers): + model += [ + SEANetResnetBlock(mult * n_filters, kernel_sizes=[residual_kernel_size, 1], + dilations=[dilation_base ** j, 1], + norm=block_norm, norm_params=norm_params, + activation=activation, activation_params=activation_params, + causal=causal, pad_mode=pad_mode, compress=compress, true_skip=true_skip)] + + # Add downsampling layers + model += [ + act(**activation_params), + StreamableConv1d(mult * n_filters, mult * n_filters * 2, + kernel_size=ratio * 2, stride=ratio, + norm=block_norm, norm_kwargs=norm_params, + causal=causal, pad_mode=pad_mode), + ] + mult *= 2 + + if lstm: + model += [StreamableLSTM(mult * n_filters, num_layers=lstm)] + + model += [ + act(**activation_params), + StreamableConv1d(mult * n_filters, dimension, last_kernel_size, + norm='none' if self.disable_norm_outer_blocks == self.n_blocks else norm, + norm_kwargs=norm_params, causal=causal, pad_mode=pad_mode) + ] + + self.model = nn.Sequential(*model) + + def forward(self, x): + return self.model(x) + + +class SEANetDecoder(nn.Module): + """SEANet decoder. + + Args: + channels (int): Audio channels. + dimension (int): Intermediate representation dimension. + n_filters (int): Base width for the model. + n_residual_layers (int): nb of residual layers. + ratios (Sequence[int]): kernel size and stride ratios. + activation (str): Activation function. + activation_params (dict): Parameters to provide to the activation function. + final_activation (str): Final activation function after all convolutions. + final_activation_params (dict): Parameters to provide to the activation function. + norm (str): Normalization method. + norm_params (dict): Parameters to provide to the underlying normalization used along with the convolution. + kernel_size (int): Kernel size for the initial convolution. + last_kernel_size (int): Kernel size for the initial convolution. + residual_kernel_size (int): Kernel size for the residual layers. + dilation_base (int): How much to increase the dilation with each layer. + causal (bool): Whether to use fully causal convolution. + pad_mode (str): Padding mode for the convolutions. + true_skip (bool): Whether to use true skip connection or a simple. + (streamable) convolution as the skip connection in the residual network blocks. + compress (int): Reduced dimensionality in residual branches (from Demucs v3). + lstm (int): Number of LSTM layers at the end of the encoder. + disable_norm_outer_blocks (int): Number of blocks for which we don't apply norm. + For the decoder, it corresponds to the N last blocks. + trim_right_ratio (float): Ratio for trimming at the right of the transposed convolution under the causal setup. + If equal to 1.0, it means that all the trimming is done at the right. + """ + def __init__(self, channels: int = 1, dimension: int = 128, n_filters: int = 32, n_residual_layers: int = 3, + ratios: tp.List[int] = [8, 5, 4, 2], activation: str = 'ELU', activation_params: dict = {'alpha': 1.0}, + final_activation: tp.Optional[str] = None, final_activation_params: tp.Optional[dict] = None, + norm: str = 'none', norm_params: tp.Dict[str, tp.Any] = {}, kernel_size: int = 7, + last_kernel_size: int = 7, residual_kernel_size: int = 3, dilation_base: int = 2, causal: bool = False, + pad_mode: str = 'reflect', true_skip: bool = True, compress: int = 2, lstm: int = 0, + disable_norm_outer_blocks: int = 0, trim_right_ratio: float = 1.0): + super().__init__() + self.dimension = dimension + self.channels = channels + self.n_filters = n_filters + self.ratios = ratios + del ratios + self.n_residual_layers = n_residual_layers + self.hop_length = np.prod(self.ratios) + self.n_blocks = len(self.ratios) + 2 # first and last conv + residual blocks + self.disable_norm_outer_blocks = disable_norm_outer_blocks + assert self.disable_norm_outer_blocks >= 0 and self.disable_norm_outer_blocks <= self.n_blocks, \ + "Number of blocks for which to disable norm is invalid." \ + "It should be lower or equal to the actual number of blocks in the network and greater or equal to 0." + + act = getattr(nn, activation) + mult = int(2 ** len(self.ratios)) + model: tp.List[nn.Module] = [ + StreamableConv1d(dimension, mult * n_filters, kernel_size, + norm='none' if self.disable_norm_outer_blocks == self.n_blocks else norm, + norm_kwargs=norm_params, causal=causal, pad_mode=pad_mode) + ] + + if lstm: + model += [StreamableLSTM(mult * n_filters, num_layers=lstm)] + + # Upsample to raw audio scale + for i, ratio in enumerate(self.ratios): + block_norm = 'none' if self.disable_norm_outer_blocks >= self.n_blocks - (i + 1) else norm + # Add upsampling layers + model += [ + act(**activation_params), + StreamableConvTranspose1d(mult * n_filters, mult * n_filters // 2, + kernel_size=ratio * 2, stride=ratio, + norm=block_norm, norm_kwargs=norm_params, + causal=causal, trim_right_ratio=trim_right_ratio), + ] + # Add residual layers + for j in range(n_residual_layers): + model += [ + SEANetResnetBlock(mult * n_filters // 2, kernel_sizes=[residual_kernel_size, 1], + dilations=[dilation_base ** j, 1], + activation=activation, activation_params=activation_params, + norm=block_norm, norm_params=norm_params, causal=causal, + pad_mode=pad_mode, compress=compress, true_skip=true_skip)] + + mult //= 2 + + # Add final layers + model += [ + act(**activation_params), + StreamableConv1d(n_filters, channels, last_kernel_size, + norm='none' if self.disable_norm_outer_blocks >= 1 else norm, + norm_kwargs=norm_params, causal=causal, pad_mode=pad_mode) + ] + # Add optional final activation to decoder (eg. tanh) + if final_activation is not None: + final_act = getattr(nn, final_activation) + final_activation_params = final_activation_params or {} + model += [ + final_act(**final_activation_params) + ] + self.model = nn.Sequential(*model) + + def forward(self, z): + y = self.model(z) + return y diff --git a/audiocraft/modules/streaming.py b/audiocraft/modules/streaming.py new file mode 100644 index 0000000..fdbdf5e --- /dev/null +++ b/audiocraft/modules/streaming.py @@ -0,0 +1,135 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +""" +Streaming module API that should be implemented by all Streaming components, +""" + +from contextlib import contextmanager +import typing as tp +from torch import nn +import torch + + +State = tp.Dict[str, torch.Tensor] + + +class StreamingModule(nn.Module): + """Common API for streaming components. + + Each streaming component has a streaming state, which is just a dict[str, Tensor]. + By convention, the first dim of each tensor must be the batch size. + Don't use dots in the key names, as this would clash with submodules + (like in state_dict). + + If `self._is_streaming` is True, the component should use and remember + the proper state inside `self._streaming_state`. + + To set a streaming component in streaming state, use + + with module.streaming(): + ... + + This will automatically reset the streaming state when exiting the context manager. + This also automatically propagates to all streaming children module. + + Some module might also implement the `StreamingModule.flush` method, although + this one is trickier, as all parents module must be StreamingModule and implement + it as well for it to work properly. See `StreamingSequential` after. + """ + def __init__(self) -> None: + super().__init__() + self._streaming_state: State = {} + self._is_streaming = False + + def _apply_named_streaming(self, fn: tp.Any): + for name, module in self.named_modules(): + if isinstance(module, StreamingModule): + fn(name, module) + + def _set_streaming(self, streaming: bool): + def _set_streaming(name, module): + module._is_streaming = streaming + self._apply_named_streaming(_set_streaming) + + @contextmanager + def streaming(self): + """Context manager to enter streaming mode. Reset streaming state on exit. + """ + self._set_streaming(True) + try: + yield + finally: + self._set_streaming(False) + self.reset_streaming() + + def reset_streaming(self): + """Reset the streaming state. + """ + def _reset(name: str, module: StreamingModule): + module._streaming_state.clear() + + self._apply_named_streaming(_reset) + + def get_streaming_state(self) -> State: + """Return the streaming state, including that of sub-modules. + """ + state: State = {} + + def _add(name: str, module: StreamingModule): + if name: + name += "." + for key, value in module._streaming_state.items(): + state[name + key] = value + + self._apply_named_streaming(_add) + return state + + def set_streaming_state(self, state: State): + """Set the streaming state, including that of sub-modules. + """ + state = dict(state) + + def _set(name: str, module: StreamingModule): + if name: + name += "." + module._streaming_state.clear() + for key, value in list(state.items()): + # complexity is not ideal here, but probably fine. + if key.startswith(name): + local_key = key[len(name):] + if '.' not in local_key: + module._streaming_state[local_key] = value + del state[key] + + self._apply_named_streaming(_set) + assert len(state) == 0, list(state.keys()) + + def flush(self, x: tp.Optional[torch.Tensor] = None): + """Flush any remaining outputs that were waiting for completion. + Typically, for convolutions, this will add the final padding + and process the last buffer. + + This should take an optional argument `x`, which will be provided + if a module before this one in the streaming pipeline has already + spitted out a flushed out buffer. + """ + if x is None: + return None + else: + return self(x) + + +class StreamingSequential(StreamingModule, nn.Sequential): + """A streaming compatible alternative of `nn.Sequential`. + """ + def flush(self, x: tp.Optional[torch.Tensor] = None): + for module in self: + if isinstance(module, StreamingModule): + x = module.flush(x) + elif x is not None: + x = module(x) + return x diff --git a/audiocraft/modules/transformer.py b/audiocraft/modules/transformer.py new file mode 100644 index 0000000..e69cca8 --- /dev/null +++ b/audiocraft/modules/transformer.py @@ -0,0 +1,747 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +""" +Transformer model, with streaming support, xformer attention support +and easy causal attention with a potentially finite receptive field. + +See `StreamingTransformer` for more information. + +Unlike regular PyTorch Transformer, we make the hard choice that batches are first. +""" + +import typing as tp + +from einops import rearrange +import torch +import torch.nn as nn +from torch.nn import functional as F +from torch.utils.checkpoint import checkpoint as torch_checkpoint +from xformers import ops + +from .rope import RotaryEmbedding +from .streaming import StreamingModule + +_efficient_attention_backend: str = 'torch' + + +def set_efficient_attention_backend(backend: str = 'torch'): + # Using torch by default, it seems a bit faster on older P100 GPUs (~20% faster). + global _efficient_attention_backend + assert _efficient_attention_backend in ['xformers', 'torch'] + _efficient_attention_backend = backend + + +def _get_attention_time_dimension() -> int: + if _efficient_attention_backend == 'torch': + return 2 + else: + return 1 + + +def _is_profiled() -> bool: + # Return true if we are currently running with a xformers profiler activated. + try: + from xformers.profiler import profiler + except ImportError: + return False + return profiler._Profiler._CURRENT_PROFILER is not None + + +def create_norm_fn(norm_type: str, dim: int, **kwargs) -> nn.Module: + """Create normalization module for transformer encoder layer. + + Args: + norm_type (str): Normalization method. + dim (int): Dimension of the normalized layer. + **kwargs (dict): Additional parameters for normalization layer. + Returns: + nn.Module: Normalization module. + """ + if norm_type == 'layer_norm': + return nn.LayerNorm(dim, eps=1e-5, **kwargs) + else: + raise ValueError(f"Unknown norm type: {norm_type}") + + +def create_sin_embedding(positions: torch.Tensor, dim: int, max_period: float = 10000, + dtype: torch.dtype = torch.float32) -> torch.Tensor: + """Create sinusoidal positional embedding, with shape `[B, T, C]`. + + Args: + positions (torch.Tensor): LongTensor of positions. + dim (int): Dimension of the embedding. + max_period (float): Maximum period of the cosine/sine functions. + dtype (torch.dtype or str): dtype to use to generate the embedding. + Returns: + torch.Tensor: Sinusoidal positional embedding. + """ + # We aim for BTC format + assert dim % 2 == 0 + half_dim = dim // 2 + positions = positions.to(dtype) + adim = torch.arange(half_dim, device=positions.device, dtype=dtype).view(1, 1, -1) + max_period_tensor = torch.full([], max_period, device=positions.device, dtype=dtype) # avoid sync point + phase = positions / (max_period_tensor ** (adim / (half_dim - 1))) + return torch.cat([torch.cos(phase), torch.sin(phase)], dim=-1) + + +def expand_repeated_kv(x: torch.Tensor, n_rep: int) -> torch.Tensor: + """torch.repeat_interleave(x, dim=2, repeats=n_rep) from xlformers""" + if n_rep == 1: + return x + if _efficient_attention_backend == 'torch': + bs, n_kv_heads, slen, head_dim = x.shape + return ( + x[:, :, None, :, :] + .expand(bs, n_kv_heads, n_rep, slen, head_dim) + .reshape(bs, n_kv_heads * n_rep, slen, head_dim) + ) + else: + bs, slen, n_kv_heads, head_dim = x.shape + return ( + x[:, :, :, None, :] + .expand(bs, slen, n_kv_heads, n_rep, head_dim) + .reshape(bs, slen, n_kv_heads * n_rep, head_dim) + ) + + +class LayerScale(nn.Module): + """Layer scale from [Touvron et al 2021] (https://arxiv.org/pdf/2103.17239.pdf). + This rescales diagonaly the residual outputs close to 0, with a learnt scale. + + Args: + channels (int): Number of channels. + init (float): Initial scale. + channel_last (bool): If True, expect `[*, C]` shaped tensors, otherwise, `[*, C, T]`. + device (torch.device or None): Device on which to initialize the module. + dtype (torch.dtype or None): dtype to use to initialize the module. + """ + def __init__(self, channels: int, init: float = 1e-4, channel_last: bool = True, + device=None, dtype=None): + super().__init__() + self.channel_last = channel_last + self.scale = nn.Parameter( + torch.full((channels,), init, + requires_grad=True, device=device, dtype=dtype)) + + def forward(self, x: torch.Tensor): + if self.channel_last: + return self.scale * x + else: + return self.scale[:, None] * x + + +class StreamingMultiheadAttention(StreamingModule): + """Similar to `nn.MultiheadAttention` but with support for streaming, causal evaluation. + + Args: + embed_dim (int): Dimension to project to. + num_heads (int): Number of heads. + dropout (float): Dropout level. + bias (bool): Use bias in projections. + causal (bool): Causal mask applied automatically. + past_context (int or None): Receptive field for the causal mask, infinite if None. + custom (bool): Use custom MHA implementation, for testing / benchmarking. + memory_efficient (bool): Use xformers based memory efficient attention. + attention_as_float32 (bool): Perform the attention as float32 + (especially important with memory_efficient as autocast won't do this automatically). + rope (`RotaryEmbedding` or None): Rope embedding to use. + cross_attention: Should be true when used as a cross attention. + All keys and values must be available at once, streaming is only for the queries. + Cannot be used with `causal` or `rope` (as it wouldn't make sens to + intepret the time steps in the keys relative to those in the queries). + safe_streaming (bool): Bug fix, will go away with xformers update. + qk_layer_norm (bool): Layer normalization applied to queries and keys before dot product. + kv_repeat (int): If > 1, will repeat keys and queries multiple times (need to divide num_heads). + This will lead to faster decoding time on A100 or other GPUs with tensorcore. + device (torch.device or None): Sevice on which to initialize. + dtype (torch.dtype or None): dtype to use. + """ + def __init__(self, embed_dim: int, num_heads: int, dropout: float = 0.0, bias: bool = True, + causal: bool = False, past_context: tp.Optional[int] = None, custom: bool = False, + memory_efficient: bool = False, attention_as_float32: bool = False, + rope: tp.Optional[RotaryEmbedding] = None, cross_attention: bool = False, + safe_streaming: bool = True, qk_layer_norm: bool = False, kv_repeat: int = 1, + device=None, dtype=None): + super().__init__() + factory_kwargs = {'device': device, 'dtype': dtype} + if past_context is not None: + assert causal + + self.embed_dim = embed_dim + self.causal = causal + self.past_context = past_context + self.memory_efficient = memory_efficient + self.attention_as_float32 = attention_as_float32 + self.rope = rope + self.cross_attention = cross_attention + self.safe_streaming = safe_streaming + self.num_heads = num_heads + self.dropout = dropout + self.kv_repeat = kv_repeat + if cross_attention: + assert not causal, "Causal cannot work with cross attention." + assert rope is None, "Rope cannot work with cross attention." + + if memory_efficient: + _verify_xformers_memory_efficient_compat() + + self.custom = _is_custom(custom, memory_efficient) + if self.custom: + out_dim = embed_dim + assert num_heads % kv_repeat == 0 + assert not cross_attention or kv_repeat == 1 + num_kv = num_heads // kv_repeat + kv_dim = (embed_dim // num_heads) * num_kv + out_dim += 2 * kv_dim + in_proj = nn.Linear(embed_dim, out_dim, bias=bias, **factory_kwargs) + # We try to follow the default PyTorch MHA convention, to easily compare results. + self.in_proj_weight = in_proj.weight + self.in_proj_bias = in_proj.bias + if bias: + self.in_proj_bias.data.zero_() # Following Pytorch convention + self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias, **factory_kwargs) + if bias: + self.out_proj.bias.data.zero_() + else: + assert not qk_layer_norm + assert kv_repeat == 1 + self.mha = nn.MultiheadAttention( + embed_dim, num_heads, dropout=dropout, bias=bias, batch_first=True, + **factory_kwargs) + self.qk_layer_norm = qk_layer_norm + if qk_layer_norm: + assert self.custom + assert kv_repeat == 1 + ln_dim = embed_dim + self.q_layer_norm = nn.LayerNorm(ln_dim) + self.k_layer_norm = nn.LayerNorm(ln_dim) + + def _load_from_state_dict(self, state_dict, prefix, *args, **kwargs): + if not self.custom: + # Support compat with regular MHA + keys = [n for n, _ in self.mha.named_parameters()] + for key in keys: + if prefix + key in state_dict: + state_dict[prefix + "mha." + key] = state_dict.pop(prefix + key) + super()._load_from_state_dict(state_dict, prefix, *args, **kwargs) + + def _get_mask(self, current_steps: int, device: torch.device, dtype: torch.dtype): + # Return a causal mask, accounting for potentially stored past keys/values + # We actually return a bias for the attention score, as this has the same + # convention both in the builtin MHA in Pytorch, and Xformers functions. + time_dim = _get_attention_time_dimension() + if self.memory_efficient: + from xformers.ops import LowerTriangularMask + if current_steps == 1: + # If we only have one step, then we do not need a mask. + return None + elif 'past_keys' in self._streaming_state: + raise RuntimeError('Not supported at the moment') + else: + # Then we can safely use a lower triangular mask + return LowerTriangularMask() + if self._streaming_state: + past_keys = self._streaming_state['past_keys'] + past_steps = past_keys.shape[time_dim] + else: + past_steps = 0 + + queries_pos = torch.arange( + past_steps, current_steps + past_steps, device=device).view(-1, 1) + keys_pos = torch.arange(past_steps + current_steps, device=device).view(1, -1) + delta = queries_pos - keys_pos + valid = delta >= 0 + if self.past_context is not None: + valid &= (delta <= self.past_context) + return torch.where( + valid, + torch.zeros([], device=device, dtype=dtype), + torch.full([], float('-inf'), device=device, dtype=dtype)) + + def _complete_kv(self, k, v): + time_dim = _get_attention_time_dimension() + if self.cross_attention: + # With cross attention we assume all keys and values + # are already available, and streaming is with respect + # to the queries only. + return k, v + # Complete the key/value pair using the streaming state. + if self._streaming_state: + pk = self._streaming_state['past_keys'] + nk = torch.cat([pk, k], dim=time_dim) + if v is k: + nv = nk + else: + pv = self._streaming_state['past_values'] + nv = torch.cat([pv, v], dim=time_dim) + else: + nk = k + nv = v + + assert nk.shape[time_dim] == nv.shape[time_dim] + offset = 0 + if self.past_context is not None: + offset = max(0, nk.shape[time_dim] - self.past_context) + if self._is_streaming: + self._streaming_state['past_keys'] = nk[:, offset:] + if v is not k: + self._streaming_state['past_values'] = nv[:, offset:] + if 'offset' in self._streaming_state: + self._streaming_state['offset'] += offset + else: + self._streaming_state['offset'] = torch.tensor(0) + return nk, nv + + def _apply_rope(self, query: torch.Tensor, key: torch.Tensor): + # TODO: fix and verify layout. + assert _efficient_attention_backend == 'xformers', 'Rope not supported with torch attn.' + # Apply rope embeddings to query and key tensors. + assert self.rope is not None + if 'past_keys' in self._streaming_state: + past_keys_offset = self._streaming_state['past_keys'].shape[1] + else: + past_keys_offset = 0 + if 'offset' in self._streaming_state: + past_context_offset = int(self._streaming_state['offset'].item()) + else: + past_context_offset = 0 + streaming_offset = past_context_offset + past_keys_offset + return self.rope.rotate_qk(query, key, start=streaming_offset) + + def forward(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, + key_padding_mask=None, need_weights=False, attn_mask=None, + average_attn_weights=True, is_causal=False): + assert attn_mask is None + assert not is_causal, ("new param added in torch 2.0.1 not supported, " + "use the causal args in the constructor.") + + time_dim = _get_attention_time_dimension() + if time_dim == 2: + layout = "b h t d" + else: + layout = "b t h d" + dtype = query.dtype + if self._is_streaming: + assert self.causal or self.cross_attention, \ + "Streaming only available for causal or cross attention" + + if self.causal: + # At the moment we specialize only for the self-attention case. + assert query.shape[1] == key.shape[1], "Causal only for same length query / key / value" + assert value.shape[1] == key.shape[1], "Causal only for same length query / key / value" + attn_mask = self._get_mask(query.shape[1], query.device, query.dtype) + + if self.custom: + # custom implementation + assert need_weights is False + assert key_padding_mask is None + if self.cross_attention: + # Different queries, keys, values, we have to spit manually the weights + # before applying the linear. + dim = self.in_proj_weight.shape[0] // 3 + if self.in_proj_bias is None: + bias_q, bias_k, bias_v = None, None, None + else: + bias_q = self.in_proj_bias[:dim] + bias_k = self.in_proj_bias[dim: 2 * dim] + bias_v = self.in_proj_bias[2 * dim:] + q = nn.functional.linear(query, self.in_proj_weight[:dim], bias_q) + # todo: when streaming, we could actually save k, v and check the shape actually match. + k = nn.functional.linear(key, self.in_proj_weight[dim: 2 * dim], bias_k) + v = nn.functional.linear(value, self.in_proj_weight[2 * dim:], bias_v) + if self.qk_layer_norm is True: + q = self.q_layer_norm(q) + k = self.k_layer_norm(k) + q, k, v = [rearrange(x, f"b t (h d) -> {layout}", h=self.num_heads) for x in [q, k, v]] + else: + if not _is_profiled(): + # profiling breaks that propertysomehow. + assert query is key, "specialized implementation" + assert value is key, "specialized implementation" + projected = nn.functional.linear(query, self.in_proj_weight, self.in_proj_bias) + if self.kv_repeat == 1: + if time_dim == 2: + bound_layout = "b h p t d" + else: + bound_layout = "b t p h d" + packed = rearrange(projected, f"b t (p h d) -> {bound_layout}", p=3, h=self.num_heads) + q, k, v = ops.unbind(packed, dim=2) + else: + embed_dim = self.embed_dim + per_head_dim = (embed_dim // self.num_heads) + kv_heads = self.num_heads // self.kv_repeat + q = projected[:, :, :embed_dim] + start = embed_dim + end = start + per_head_dim * kv_heads + k = projected[:, :, start: end] + v = projected[:, :, end:] + q = rearrange(q, f"b t (h d) -> {layout}", h=self.num_heads) + k = rearrange(k, f"b t (h d) -> {layout}", h=kv_heads) + v = rearrange(v, f"b t (h d) -> {layout}", h=kv_heads) + + if self.qk_layer_norm is True: + assert self.kv_repeat == 1 + q, k = [rearrange(x, f"{layout} -> b t (h d)") for x in [q, k]] + q = self.q_layer_norm(q) + k = self.k_layer_norm(k) + q, k = [rearrange(x, f"b t (h d) -> {layout}", h=self.num_heads) for x in [q, k]] + if self.rope: + q, k = self._apply_rope(q, k) + k, v = self._complete_kv(k, v) + if self.kv_repeat > 1: + k = expand_repeated_kv(k, self.kv_repeat) + v = expand_repeated_kv(v, self.kv_repeat) + if self.attention_as_float32: + q, k, v = [x.float() for x in [q, k, v]] + if self.memory_efficient: + p = self.dropout if self.training else 0 + if _efficient_attention_backend == 'torch': + x = torch.nn.functional.scaled_dot_product_attention( + q, k, v, is_causal=attn_mask is not None, dropout_p=p) + else: + x = ops.memory_efficient_attention(q, k, v, attn_mask, p=p) + else: + # We include the dot product as float32, for consistency + # with the other implementations that include that step + # as part of the attention. Note that when using `autocast`, + # the einsums would be done as bfloat16, but the softmax + # would be done as bfloat16, so `attention_as_float32` will + # extend a bit the range of operations done in float32, + # although this should make no difference. + q = q / q.shape[-1] ** 0.5 + key_layout = layout.replace('t', 'k') + query_layout = layout + if self._is_streaming and self.safe_streaming and q.device.type == 'cuda': + with torch.autocast(device_type=q.device.type, dtype=torch.float32): + pre_w = torch.einsum(f"{query_layout},{key_layout}-> b h t k", q, k) + else: + pre_w = torch.einsum(f"{query_layout},{key_layout}-> b h t k", q, k) + if attn_mask is not None: + pre_w = pre_w + attn_mask + w = torch.softmax(pre_w, dim=-1) + w = F.dropout(w, self.dropout, training=self.training).to(v) + # Key and value have the same format. + x = torch.einsum(f"b h t k, {key_layout} -> {layout}", w, v) + x = x.to(dtype) + x = rearrange(x, f"{layout} -> b t (h d)", h=self.num_heads) + x = self.out_proj(x) + else: + key, value = self._complete_kv(key, value) + if self.attention_as_float32: + query, key, value = [x.float() for x in [query, key, value]] + x, _ = self.mha( + query, key, value, key_padding_mask, + need_weights, attn_mask, average_attn_weights) + x = x.to(dtype) + + return x, None + + +class StreamingTransformerLayer(nn.TransformerEncoderLayer): + """TransformerLayer with Streaming / Causal support. + This also integrates cross_attention, when passing `cross_attention=True`, + rather than having two separate classes like in PyTorch. + + Args: + d_model (int): Dimension of the data. + num_heads (int): Number of heads. + dim_feedforward (int): Intermediate dimension of FF module. + dropout (float): Dropout both for MHA and FF. + bias_ff (bool): Use bias for FF. + bias_attn (bool): Use bias for MHA. + causal (bool): Causal mask applied automatically. + past_context (int or None): Receptive field for the causal mask, infinite if None. + custom (bool): Use custom MHA implementation, for testing / benchmarking. + memory_efficient (bool): Use xformers based memory efficient attention. + attention_as_float32 (bool): Perform the attention as float32 + (especially important with memory_efficient as autocast won't do this automatically). + qk_layer_norm (bool): Layer normalization applied to queries and keys before dot product in attention. + qk_layer_norm_cross (bool): Same for the cross attention. + cross_attention (bool): If True, expect to get secondary input for cross-attention. + Cross attention will use the default MHA, as it typically won't require + special treatment. + layer_scale (float or None): If not None, LayerScale will be used with + the given value as initial scale. + rope (`RotaryEmbedding` or None): Rope embedding to use. + attention_dropout (float or None): If not None, separate the value of the dimension dropout + in FFN and of the attention dropout. + kv_repeat (int): If > 1, will repeat keys and queries multiple times (need to divide num_heads). + This will lead to faster decoding time on A100 or other GPUs with tensorcore. + device (torch.device or None): Device on which to initialize. + dtype (torch.dtype or None): dtype to use. + **kwargs: See `nn.TransformerEncoderLayer`. + """ + def __init__(self, d_model: int, num_heads: int, dim_feedforward: int = 2048, dropout: float = 0.1, + bias_ff: bool = True, bias_attn: bool = True, causal: bool = False, + past_context: tp.Optional[int] = None, custom: bool = False, + memory_efficient: bool = False, attention_as_float32: bool = False, + qk_layer_norm: bool = False, qk_layer_norm_cross: bool = False, + cross_attention: bool = False, layer_scale: tp.Optional[float] = None, + rope: tp.Optional[RotaryEmbedding] = None, attention_dropout: tp.Optional[float] = None, + kv_repeat: int = 1, norm: str = 'layer_norm', device=None, dtype=None, **kwargs): + super().__init__(d_model, num_heads, dim_feedforward, dropout, + device=device, dtype=dtype, batch_first=True, **kwargs) + factory_kwargs = {'device': device, 'dtype': dtype} + # Redefine self_attn to our streaming multi-head attention + attn_kwargs: tp.Dict[str, tp.Any] = { + 'embed_dim': d_model, + 'num_heads': num_heads, + 'dropout': dropout if attention_dropout is None else attention_dropout, + 'bias': bias_attn, + 'custom': custom, + 'memory_efficient': memory_efficient, + 'attention_as_float32': attention_as_float32, + } + self.self_attn: StreamingMultiheadAttention = StreamingMultiheadAttention( + causal=causal, past_context=past_context, rope=rope, qk_layer_norm=qk_layer_norm, + kv_repeat=kv_repeat, **attn_kwargs, **factory_kwargs) # type: ignore + # Redefine feedforward layers to expose bias parameter + self.linear1 = nn.Linear(d_model, dim_feedforward, bias=bias_ff, **factory_kwargs) + self.linear2 = nn.Linear(dim_feedforward, d_model, bias=bias_ff, **factory_kwargs) + + self.layer_scale_1: nn.Module + self.layer_scale_2: nn.Module + if layer_scale is None: + self.layer_scale_1 = nn.Identity() + self.layer_scale_2 = nn.Identity() + else: + self.layer_scale_1 = LayerScale(d_model, layer_scale, **factory_kwargs) + self.layer_scale_2 = LayerScale(d_model, layer_scale, **factory_kwargs) + + self.cross_attention: tp.Optional[nn.Module] = None + if cross_attention: + self.cross_attention = StreamingMultiheadAttention( + cross_attention=True, qk_layer_norm=qk_layer_norm_cross, + **attn_kwargs, **factory_kwargs) + # Norm and dropout + self.dropout_cross = nn.Dropout(dropout) + # eps value matching that used in PyTorch reference implementation. + self.norm_cross = nn.LayerNorm(d_model, eps=1e-5, **factory_kwargs) + self.layer_scale_cross: nn.Module + if layer_scale is None: + self.layer_scale_cross = nn.Identity() + else: + self.layer_scale_cross = LayerScale(d_model, layer_scale, **factory_kwargs) + self.norm1 = create_norm_fn(norm, d_model, **factory_kwargs) # type: ignore + self.norm2 = create_norm_fn(norm, d_model, **factory_kwargs) # type: ignore + + def _cross_attention_block(self, src: torch.Tensor, + cross_attention_src: torch.Tensor) -> torch.Tensor: + assert self.cross_attention is not None + # queries are from src, keys and values from cross_attention_src. + x = self.cross_attention( + src, cross_attention_src, cross_attention_src, need_weights=False)[0] + return self.dropout_cross(x) # type: ignore + + def forward(self, src: torch.Tensor, src_mask: tp.Optional[torch.Tensor] = None, # type: ignore + src_key_padding_mask: tp.Optional[torch.Tensor] = None, + cross_attention_src: tp.Optional[torch.Tensor] = None): + if self.cross_attention is None: + assert cross_attention_src is None + else: + assert cross_attention_src is not None + x = src + if self.norm_first: + x = x + self.layer_scale_1( + self._sa_block(self.norm1(x), src_mask, src_key_padding_mask)) + if cross_attention_src is not None: + x = x + self.layer_scale_cross( + self._cross_attention_block( + self.norm_cross(x), cross_attention_src)) + x = x + self.layer_scale_2(self._ff_block(self.norm2(x))) + else: + x = self.norm1(x + self.layer_scale_1( + self._sa_block(x, src_mask, src_key_padding_mask))) + if cross_attention_src is not None: + x = self.norm_cross( + x + self.layer_scale_cross( + self._cross_attention_block(src, cross_attention_src))) + x = self.norm2(x + self.layer_scale_2(self._ff_block(x))) + return x + + +class StreamingTransformer(StreamingModule): + """Transformer with Streaming / Causal support. + + Args: + d_model (int): Dimension of the data. + num_heads (int): Number of heads. + dim_feedforward (int): Intermediate dimension of FF module. + dropout (float): Dropout both for MHA and FF. + bias_ff (bool): Use bias for FF. + bias_attn (bool): Use bias for MHA. + causal (bool): Causal mask applied automatically. + past_context (int or None): Receptive field for the causal mask, infinite if None. + custom (bool): Use custom MHA implementation, for testing / benchmarking. + memory_efficient (bool): Use xformers based memory efficient attention. + attention_as_float32 (bool): Perform the attention as float32 + (especially important with memory_efficient as autocast won't do this automatically). + cross_attention (bool): If True, expect to get secondary input for cross-attention. + layer_scale (float or None): If not None, LayerScale will be used + with the given value as initial scale. + positional_embedding (str): Positional embedding strategy (sin, rope, or sin_rope). + max_period (float): Maximum period of the time embedding. + positional_scale (float): Scale of positional embedding, set to 0 to deactivate. + xpos (bool): Apply xpos exponential decay to positional embedding (rope only). + lr (float or None): learning rate override through the `make_optim_group` API. + weight_decay (float or None): Weight_decay override through the `make_optim_group` API. + layer_class: (subclass of `StreamingTransformerLayer): class to use + to initialize the layers, allowing further customization outside of Audiocraft. + checkpointing (str): Checkpointing strategy to reduce memory usage. + No checkpointing if set to 'none'. Per layer checkpointing using PyTorch + if set to 'torch' (entire layer checkpointed, i.e. linears are evaluated twice, + minimal memory usage, but maximal runtime). Finally, `xformers_default` provide + a policy for opting-out some operations of the checkpointing like + linear layers and attention, providing a middle ground between speed and memory. + device (torch.device or None): Device on which to initialize. + dtype (torch.dtype or None): dtype to use. + **kwargs: See `nn.TransformerEncoderLayer`. + """ + def __init__(self, d_model: int, num_heads: int, num_layers: int, dim_feedforward: int = 2048, + dropout: float = 0.1, bias_ff: bool = True, bias_attn: bool = True, + causal: bool = False, past_context: tp.Optional[int] = None, + custom: bool = False, memory_efficient: bool = False, attention_as_float32: bool = False, + cross_attention: bool = False, layer_scale: tp.Optional[float] = None, + positional_embedding: str = 'sin', max_period: float = 10_000, positional_scale: float = 1., + xpos: bool = False, lr: tp.Optional[float] = None, weight_decay: tp.Optional[float] = None, + layer_class: tp.Type[StreamingTransformerLayer] = StreamingTransformerLayer, + checkpointing: str = 'none', device=None, dtype=None, **kwargs): + super().__init__() + assert d_model % num_heads == 0 + + self.positional_embedding = positional_embedding + self.max_period = max_period + self.positional_scale = positional_scale + self.weight_decay = weight_decay + self.lr = lr + + assert positional_embedding in ['sin', 'rope', 'sin_rope'] + self.rope: tp.Optional[RotaryEmbedding] = None + if self.positional_embedding in ['rope', 'sin_rope']: + assert _is_custom(custom, memory_efficient) + self.rope = RotaryEmbedding(d_model // num_heads, max_period=max_period, + xpos=xpos, scale=positional_scale, device=device) + + self.checkpointing = checkpointing + + assert checkpointing in ['none', 'torch', 'xformers_default', 'xformers_mm'] + if self.checkpointing.startswith('xformers'): + _verify_xformers_internal_compat() + + self.layers = nn.ModuleList() + for idx in range(num_layers): + self.layers.append( + layer_class( + d_model=d_model, num_heads=num_heads, dim_feedforward=dim_feedforward, + dropout=dropout, bias_ff=bias_ff, bias_attn=bias_attn, + causal=causal, past_context=past_context, custom=custom, + memory_efficient=memory_efficient, attention_as_float32=attention_as_float32, + cross_attention=cross_attention, layer_scale=layer_scale, rope=self.rope, + device=device, dtype=dtype, **kwargs)) + + if self.checkpointing != 'none': + for layer in self.layers: + # see audiocraft/optim/fsdp.py, magic signal to indicate this requires fixing the + # backward hook inside of FSDP... + layer._magma_checkpointed = True # type: ignore + assert layer.layer_drop == 0., "Need further checking" # type: ignore + + def _apply_layer(self, layer, *args, **kwargs): + method = self.checkpointing + if method == 'none': + return layer(*args, **kwargs) + elif method == 'torch': + return torch_checkpoint(layer, *args, use_reentrant=False, **kwargs) + elif method.startswith('xformers'): + from xformers.checkpoint_fairinternal import checkpoint, _get_default_policy + if method == 'xformers_default': + # those operations will be saved, and not recomputed. + # According to Francisco we can get smarter policies but this is a good start. + allow_list = [ + "xformers.efficient_attention_forward_cutlass.default", + "xformers_flash.flash_fwd.default", + "aten.addmm.default", + "aten.mm.default", + ] + elif method == 'xformers_mm': + # those operations will be saved, and not recomputed. + # According to Francisco we can get smarter policies but this is a good start. + allow_list = [ + "aten.addmm.default", + "aten.mm.default", + ] + else: + raise ValueError(f"xformers checkpointing xformers policy {method} is not known.") + policy_fn = _get_default_policy(allow_list) + return checkpoint(layer, *args, policy_fn=policy_fn, **kwargs) + else: + raise ValueError(f"Checkpointing method {method} is unknown.") + + def forward(self, x: torch.Tensor, *args, **kwargs): + B, T, C = x.shape + + if 'offsets' in self._streaming_state: + offsets = self._streaming_state['offsets'] + else: + offsets = torch.zeros(B, dtype=torch.long, device=x.device) + + if self.positional_embedding in ['sin', 'sin_rope']: + positions = torch.arange(T, device=x.device).view(1, -1, 1) + positions = positions + offsets.view(-1, 1, 1) + pos_emb = create_sin_embedding(positions, C, max_period=self.max_period, dtype=x.dtype) + x = x + self.positional_scale * pos_emb + + for layer in self.layers: + x = self._apply_layer(layer, x, *args, **kwargs) + + if self._is_streaming: + self._streaming_state['offsets'] = offsets + T + + return x + + def make_optim_group(self): + group = {"params": list(self.parameters())} + if self.lr is not None: + group["lr"] = self.lr + if self.weight_decay is not None: + group["weight_decay"] = self.weight_decay + return group + + +# special attention attention related function + +def _verify_xformers_memory_efficient_compat(): + try: + from xformers.ops import memory_efficient_attention, LowerTriangularMask # noqa + except ImportError: + raise ImportError( + "xformers is not installed. Please install it and try again.\n" + "To install on AWS and Azure, run \n" + "FORCE_CUDA=1 TORCH_CUDA_ARCH_LIST='8.0'\\\n" + "pip install -U git+https://git@github.com/fairinternal/xformers.git#egg=xformers\n" + "To install on FAIR Cluster, run \n" + "FORCE_CUDA=1 TORCH_CUDA_ARCH_LIST='6.0;7.0'\\\n" + "pip install -U git+https://git@github.com/fairinternal/xformers.git#egg=xformers\n") + + +def _verify_xformers_internal_compat(): + try: + from xformers.checkpoint_fairinternal import checkpoint, _get_default_policy # noqa + except ImportError: + raise ImportError( + "Francisco's fairinternal xformers is not installed. Please install it and try again.\n" + "To install on AWS and Azure, run \n" + "FORCE_CUDA=1 TORCH_CUDA_ARCH_LIST='8.0'\\\n" + "pip install -U git+https://git@github.com/fairinternal/xformers.git#egg=xformers\n" + "To install on FAIR Cluster, run \n" + "FORCE_CUDA=1 TORCH_CUDA_ARCH_LIST='6.0;7.0'\\\n" + "pip install -U git+https://git@github.com/fairinternal/xformers.git#egg=xformers\n") + + +def _is_custom(custom: bool, memory_efficient: bool): + return custom or memory_efficient diff --git a/audiocraft/py.typed b/audiocraft/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/audiocraft/quantization/__init__.py b/audiocraft/quantization/__init__.py new file mode 100644 index 0000000..836d6eb --- /dev/null +++ b/audiocraft/quantization/__init__.py @@ -0,0 +1,9 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +# flake8: noqa +from .vq import ResidualVectorQuantizer +from .base import BaseQuantizer, DummyQuantizer, QuantizedResult diff --git a/audiocraft/quantization/base.py b/audiocraft/quantization/base.py new file mode 100644 index 0000000..1b16c13 --- /dev/null +++ b/audiocraft/quantization/base.py @@ -0,0 +1,107 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +""" +Base class for all quantizers. +""" + +from dataclasses import dataclass, field +import typing as tp + +import torch +from torch import nn + + +@dataclass +class QuantizedResult: + x: torch.Tensor + codes: torch.Tensor + bandwidth: torch.Tensor # bandwidth in kb/s used, per batch item. + penalty: tp.Optional[torch.Tensor] = None + metrics: dict = field(default_factory=dict) + + +class BaseQuantizer(nn.Module): + """Base class for quantizers. + """ + + def forward(self, x: torch.Tensor, frame_rate: int) -> QuantizedResult: + """ + Given input tensor x, returns first the quantized (or approximately quantized) + representation along with quantized codes, bandwidth, and any penalty term for the loss. + Finally, this returns a dict of metrics to update logging etc. + Frame rate must be passed so that the bandwidth is properly computed. + """ + raise NotImplementedError() + + def encode(self, x: torch.Tensor) -> torch.Tensor: + """Encode a given input tensor with the specified sample rate at the given bandwidth. + """ + raise NotImplementedError() + + def decode(self, codes: torch.Tensor) -> torch.Tensor: + """Decode the given codes to the quantized representation. + """ + raise NotImplementedError() + + @property + def total_codebooks(self): + """Total number of codebooks. + """ + raise NotImplementedError() + + @property + def num_codebooks(self): + """Number of active codebooks. + """ + raise NotImplementedError() + + def set_num_codebooks(self, n: int): + """Set the number of active codebooks. + """ + raise NotImplementedError() + + +class DummyQuantizer(BaseQuantizer): + """Fake quantizer that actually does not perform any quantization. + """ + def __init__(self): + super().__init__() + + def forward(self, x: torch.Tensor, frame_rate: int): + q = x.unsqueeze(1) + return QuantizedResult(x, q, torch.tensor(q.numel() * 32 * frame_rate / 1000 / len(x)).to(x)) + + def encode(self, x: torch.Tensor) -> torch.Tensor: + """Encode a given input tensor with the specified sample rate at the given bandwidth. + In the case of the DummyQuantizer, the codes are actually identical + to the input and resulting quantized representation as no quantization is done. + """ + return x.unsqueeze(1) + + def decode(self, codes: torch.Tensor) -> torch.Tensor: + """Decode the given codes to the quantized representation. + In the case of the DummyQuantizer, the codes are actually identical + to the input and resulting quantized representation as no quantization is done. + """ + return codes.squeeze(1) + + @property + def total_codebooks(self): + """Total number of codebooks. + """ + return 1 + + @property + def num_codebooks(self): + """Total number of codebooks. + """ + return self.total_codebooks + + def set_num_codebooks(self, n: int): + """Set the number of active codebooks. + """ + raise AttributeError("Cannot override the number of codebooks for the dummy quantizer") diff --git a/audiocraft/quantization/core_vq.py b/audiocraft/quantization/core_vq.py new file mode 100644 index 0000000..e1896bb --- /dev/null +++ b/audiocraft/quantization/core_vq.py @@ -0,0 +1,400 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import typing as tp + +from einops import rearrange, repeat +import flashy +import torch +from torch import nn, einsum +import torch.nn.functional as F + + +def exists(val: tp.Optional[tp.Any]) -> bool: + return val is not None + + +def default(val: tp.Any, d: tp.Any) -> tp.Any: + return val if exists(val) else d + + +def l2norm(t): + return F.normalize(t, p=2, dim=-1) + + +def ema_inplace(moving_avg, new, decay: float): + moving_avg.data.mul_(decay).add_(new, alpha=(1 - decay)) + + +def laplace_smoothing(x, n_categories: int, epsilon: float = 1e-5): + return (x + epsilon) / (x.sum() + n_categories * epsilon) + + +def uniform_init(*shape: int): + t = torch.empty(shape) + nn.init.kaiming_uniform_(t) + return t + + +def sample_vectors(samples, num: int): + num_samples, device = samples.shape[0], samples.device + + if num_samples >= num: + indices = torch.randperm(num_samples, device=device)[:num] + else: + indices = torch.randint(0, num_samples, (num,), device=device) + + return samples[indices] + + +def kmeans(samples, num_clusters: int, num_iters: int = 10): + dim, dtype = samples.shape[-1], samples.dtype + + means = sample_vectors(samples, num_clusters) + + for _ in range(num_iters): + diffs = rearrange(samples, "n d -> n () d") - rearrange( + means, "c d -> () c d" + ) + dists = -(diffs ** 2).sum(dim=-1) + + buckets = dists.max(dim=-1).indices + bins = torch.bincount(buckets, minlength=num_clusters) + zero_mask = bins == 0 + bins_min_clamped = bins.masked_fill(zero_mask, 1) + + new_means = buckets.new_zeros(num_clusters, dim, dtype=dtype) + new_means.scatter_add_(0, repeat(buckets, "n -> n d", d=dim), samples) + new_means = new_means / bins_min_clamped[..., None] + + means = torch.where(zero_mask[..., None], means, new_means) + + return means, bins + + +def orthgonal_loss_fn(t): + # eq (2) from https://arxiv.org/abs/2112.00384 + n = t.shape[0] + normed_codes = l2norm(t) + identity = torch.eye(n, device=t.device) + cosine_sim = einsum("i d, j d -> i j", normed_codes, normed_codes) + return ((cosine_sim - identity) ** 2).sum() / (n ** 2) + + +class EuclideanCodebook(nn.Module): + """Codebook with Euclidean distance. + + Args: + dim (int): Dimension. + codebook_size (int): Codebook size. + kmeans_init (bool): Whether to use k-means to initialize the codebooks. + If set to true, run the k-means algorithm on the first training batch and use + the learned centroids as initialization. + kmeans_iters (int): Number of iterations used for k-means algorithm at initialization. + decay (float): Decay for exponential moving average over the codebooks. + epsilon (float): Epsilon value for numerical stability. + threshold_ema_dead_code (int): Threshold for dead code expiration. Replace any codes + that have an exponential moving average cluster size less than the specified threshold with + randomly selected vector from the current batch. + """ + def __init__( + self, + dim: int, + codebook_size: int, + kmeans_init: int = False, + kmeans_iters: int = 10, + decay: float = 0.8, + epsilon: float = 1e-5, + threshold_ema_dead_code: int = 2, + ): + super().__init__() + self.decay = decay + init_fn: tp.Union[tp.Callable[..., torch.Tensor], tp.Any] = uniform_init if not kmeans_init else torch.zeros + embed = init_fn(codebook_size, dim) + + self.codebook_size = codebook_size + + self.kmeans_iters = kmeans_iters + self.epsilon = epsilon + self.threshold_ema_dead_code = threshold_ema_dead_code + + self.register_buffer("inited", torch.Tensor([not kmeans_init])) + self.register_buffer("cluster_size", torch.zeros(codebook_size)) + self.register_buffer("embed", embed) + self.register_buffer("embed_avg", embed.clone()) + + @torch.jit.ignore + def init_embed_(self, data): + if self.inited: + return + + embed, cluster_size = kmeans(data, self.codebook_size, self.kmeans_iters) + self.embed.data.copy_(embed) + self.embed_avg.data.copy_(embed.clone()) + self.cluster_size.data.copy_(cluster_size) + self.inited.data.copy_(torch.Tensor([True])) + # Make sure all buffers across workers are in sync after initialization + flashy.distrib.broadcast_tensors(self.buffers()) + + def replace_(self, samples, mask): + modified_codebook = torch.where( + mask[..., None], sample_vectors(samples, self.codebook_size), self.embed + ) + self.embed.data.copy_(modified_codebook) + + def expire_codes_(self, batch_samples): + if self.threshold_ema_dead_code == 0: + return + + expired_codes = self.cluster_size < self.threshold_ema_dead_code + if not torch.any(expired_codes): + return + + batch_samples = rearrange(batch_samples, "... d -> (...) d") + self.replace_(batch_samples, mask=expired_codes) + flashy.distrib.broadcast_tensors(self.buffers()) + + def preprocess(self, x): + x = rearrange(x, "... d -> (...) d") + return x + + def quantize(self, x): + embed = self.embed.t() + dist = -( + x.pow(2).sum(1, keepdim=True) + - 2 * x @ embed + + embed.pow(2).sum(0, keepdim=True) + ) + embed_ind = dist.max(dim=-1).indices + return embed_ind + + def postprocess_emb(self, embed_ind, shape): + return embed_ind.view(*shape[:-1]) + + def dequantize(self, embed_ind): + quantize = F.embedding(embed_ind, self.embed) + return quantize + + def encode(self, x): + shape = x.shape + # pre-process + x = self.preprocess(x) + # quantize + embed_ind = self.quantize(x) + # post-process + embed_ind = self.postprocess_emb(embed_ind, shape) + return embed_ind + + def decode(self, embed_ind): + quantize = self.dequantize(embed_ind) + return quantize + + def forward(self, x): + shape, dtype = x.shape, x.dtype + x = self.preprocess(x) + self.init_embed_(x) + + embed_ind = self.quantize(x) + embed_onehot = F.one_hot(embed_ind, self.codebook_size).type(dtype) + embed_ind = self.postprocess_emb(embed_ind, shape) + quantize = self.dequantize(embed_ind) + + if self.training: + # We do the expiry of code at that point as buffers are in sync + # and all the workers will take the same decision. + self.expire_codes_(x) + ema_inplace(self.cluster_size, embed_onehot.sum(0), self.decay) + embed_sum = x.t() @ embed_onehot + ema_inplace(self.embed_avg, embed_sum.t(), self.decay) + cluster_size = ( + laplace_smoothing(self.cluster_size, self.codebook_size, self.epsilon) + * self.cluster_size.sum() + ) + embed_normalized = self.embed_avg / cluster_size.unsqueeze(1) + self.embed.data.copy_(embed_normalized) + + return quantize, embed_ind + + +class VectorQuantization(nn.Module): + """Vector quantization implementation. + Currently supports only euclidean distance. + + Args: + dim (int): Dimension + codebook_size (int): Codebook size + codebook_dim (int): Codebook dimension. If not defined, uses the specified dimension in dim. + decay (float): Decay for exponential moving average over the codebooks. + epsilon (float): Epsilon value for numerical stability. + kmeans_init (bool): Whether to use kmeans to initialize the codebooks. + kmeans_iters (int): Number of iterations used for kmeans initialization. + threshold_ema_dead_code (int): + channels_last (bool): Channels are the last dimension in the input tensors. + commitment_weight (float): Weight for commitment loss. + orthogonal_reg_weight (float): Orthogonal regularization weights. + orthogonal_reg_active_codes_only (bool): Apply orthogonal regularization only on active codes. + orthogonal_reg_max_codes (optional int): Maximum number of codes to consider + for orthogonal regulariation. + threshold_ema_dead_code (int): Threshold for dead code expiration. Replace any codes + that have an exponential moving average cluster size less than the specified threshold with + randomly selected vector from the current batch. + """ + def __init__( + self, + dim: int, + codebook_size: int, + codebook_dim: tp.Optional[int] = None, + decay: float = 0.8, + epsilon: float = 1e-5, + kmeans_init: bool = False, + kmeans_iters: int = 10, + threshold_ema_dead_code: int = 2, + channels_last: bool = False, + commitment_weight: float = 1., + orthogonal_reg_weight: float = 0.0, + orthogonal_reg_active_codes_only: bool = False, + orthogonal_reg_max_codes: tp.Optional[int] = None, + ): + super().__init__() + _codebook_dim: int = default(codebook_dim, dim) + + requires_projection = _codebook_dim != dim + self.project_in = (nn.Linear(dim, _codebook_dim) if requires_projection else nn.Identity()) + self.project_out = (nn.Linear(_codebook_dim, dim) if requires_projection else nn.Identity()) + + self.epsilon = epsilon + self.commitment_weight = commitment_weight + + self.orthogonal_reg_weight = orthogonal_reg_weight + self.orthogonal_reg_active_codes_only = orthogonal_reg_active_codes_only + self.orthogonal_reg_max_codes = orthogonal_reg_max_codes + + self._codebook = EuclideanCodebook(dim=_codebook_dim, codebook_size=codebook_size, + kmeans_init=kmeans_init, kmeans_iters=kmeans_iters, + decay=decay, epsilon=epsilon, + threshold_ema_dead_code=threshold_ema_dead_code) + self.codebook_size = codebook_size + + self.channels_last = channels_last + + @property + def codebook(self): + return self._codebook.embed + + @property + def inited(self): + return self._codebook.inited + + def _preprocess(self, x): + if not self.channels_last: + x = rearrange(x, "b d n -> b n d") + return x + + def _postprocess(self, quantize): + if not self.channels_last: + quantize = rearrange(quantize, "b n d -> b d n") + return quantize + + def encode(self, x): + x = self._preprocess(x) + x = self.project_in(x) + embed_in = self._codebook.encode(x) + return embed_in + + def decode(self, embed_ind): + quantize = self._codebook.decode(embed_ind) + quantize = self.project_out(quantize) + quantize = self._postprocess(quantize) + return quantize + + def forward(self, x): + device = x.device + x = self._preprocess(x) + + x = self.project_in(x) + quantize, embed_ind = self._codebook(x) + + if self.training: + quantize = x + (quantize - x).detach() + + loss = torch.tensor([0.0], device=device, requires_grad=self.training) + + if self.training: + if self.commitment_weight > 0: + commit_loss = F.mse_loss(quantize.detach(), x) + loss = loss + commit_loss * self.commitment_weight + + if self.orthogonal_reg_weight > 0: + codebook = self.codebook + + if self.orthogonal_reg_active_codes_only: + # only calculate orthogonal loss for the activated codes for this batch + unique_code_ids = torch.unique(embed_ind) + codebook = codebook[unique_code_ids] + + num_codes = codebook.shape[0] + if exists(self.orthogonal_reg_max_codes) and num_codes > self.orthogonal_reg_max_codes: + rand_ids = torch.randperm(num_codes, device=device)[:self.orthogonal_reg_max_codes] + codebook = codebook[rand_ids] + + orthogonal_reg_loss = orthgonal_loss_fn(codebook) + loss = loss + orthogonal_reg_loss * self.orthogonal_reg_weight + + quantize = self.project_out(quantize) + quantize = self._postprocess(quantize) + + return quantize, embed_ind, loss + + +class ResidualVectorQuantization(nn.Module): + """Residual vector quantization implementation. + + Follows Algorithm 1. in https://arxiv.org/pdf/2107.03312.pdf + """ + def __init__(self, *, num_quantizers, **kwargs): + super().__init__() + self.layers = nn.ModuleList( + [VectorQuantization(**kwargs) for _ in range(num_quantizers)] + ) + + def forward(self, x, n_q: tp.Optional[int] = None): + quantized_out = 0.0 + residual = x + + all_losses = [] + all_indices = [] + + n_q = n_q or len(self.layers) + + for i, layer in enumerate(self.layers[:n_q]): + quantized, indices, loss = layer(residual) + residual = residual - quantized + quantized_out = quantized_out + quantized + all_indices.append(indices) + all_losses.append(loss) + + out_losses, out_indices = map(torch.stack, (all_losses, all_indices)) + return quantized_out, out_indices, out_losses + + def encode(self, x: torch.Tensor, n_q: tp.Optional[int] = None) -> torch.Tensor: + residual = x + all_indices = [] + n_q = n_q or len(self.layers) + for layer in self.layers[:n_q]: + indices = layer.encode(residual) + quantized = layer.decode(indices) + residual = residual - quantized + all_indices.append(indices) + out_indices = torch.stack(all_indices) + return out_indices + + def decode(self, q_indices: torch.Tensor) -> torch.Tensor: + quantized_out = torch.tensor(0.0, device=q_indices.device) + for i, indices in enumerate(q_indices): + layer = self.layers[i] + quantized = layer.decode(indices) + quantized_out = quantized_out + quantized + return quantized_out diff --git a/audiocraft/quantization/vq.py b/audiocraft/quantization/vq.py new file mode 100644 index 0000000..f67c3a0 --- /dev/null +++ b/audiocraft/quantization/vq.py @@ -0,0 +1,116 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import math +import typing as tp + +import torch + +from .base import BaseQuantizer, QuantizedResult +from .core_vq import ResidualVectorQuantization + + +class ResidualVectorQuantizer(BaseQuantizer): + """Residual Vector Quantizer. + + Args: + dimension (int): Dimension of the codebooks. + n_q (int): Number of residual vector quantizers used. + q_dropout (bool): Random quantizer drop out at train time. + bins (int): Codebook size. + decay (float): Decay for exponential moving average over the codebooks. + kmeans_init (bool): Whether to use kmeans to initialize the codebooks. + kmeans_iters (int): Number of iterations used for kmeans initialization. + threshold_ema_dead_code (int): Threshold for dead code expiration. Replace any codes + that have an exponential moving average cluster size less than the specified threshold with + randomly selected vector from the current batch. + orthogonal_reg_weight (float): Orthogonal regularization weights. + orthogonal_reg_active_codes_only (bool): Apply orthogonal regularization only on active codes. + orthogonal_reg_max_codes (optional int): Maximum number of codes to consider. + for orthogonal regulariation. + """ + def __init__( + self, + dimension: int = 256, + n_q: int = 8, + q_dropout: bool = False, + bins: int = 1024, + decay: float = 0.99, + kmeans_init: bool = True, + kmeans_iters: int = 10, + threshold_ema_dead_code: int = 2, + orthogonal_reg_weight: float = 0.0, + orthogonal_reg_active_codes_only: bool = False, + orthogonal_reg_max_codes: tp.Optional[int] = None, + ): + super().__init__() + self.max_n_q = n_q + self.n_q = n_q + self.q_dropout = q_dropout + self.dimension = dimension + self.bins = bins + self.decay = decay + self.kmeans_init = kmeans_init + self.kmeans_iters = kmeans_iters + self.threshold_ema_dead_code = threshold_ema_dead_code + self.orthogonal_reg_weight = orthogonal_reg_weight + self.orthogonal_reg_active_codes_only = orthogonal_reg_active_codes_only + self.orthogonal_reg_max_codes = orthogonal_reg_max_codes + self.vq = ResidualVectorQuantization( + dim=self.dimension, + codebook_size=self.bins, + num_quantizers=self.n_q, + decay=self.decay, + kmeans_init=self.kmeans_init, + kmeans_iters=self.kmeans_iters, + threshold_ema_dead_code=self.threshold_ema_dead_code, + orthogonal_reg_weight=self.orthogonal_reg_weight, + orthogonal_reg_active_codes_only=self.orthogonal_reg_active_codes_only, + orthogonal_reg_max_codes=self.orthogonal_reg_max_codes, + channels_last=False + ) + + def forward(self, x: torch.Tensor, frame_rate: int): + n_q = self.n_q + if self.training and self.q_dropout: + n_q = int(torch.randint(1, self.n_q + 1, (1,)).item()) + bw_per_q = math.log2(self.bins) * frame_rate / 1000 + quantized, codes, commit_loss = self.vq(x, n_q=n_q) + codes = codes.transpose(0, 1) + # codes is [B, K, T], with T frames, K nb of codebooks. + bw = torch.tensor(n_q * bw_per_q).to(x) + return QuantizedResult(quantized, codes, bw, penalty=torch.mean(commit_loss)) + + def encode(self, x: torch.Tensor) -> torch.Tensor: + """Encode a given input tensor with the specified frame rate at the given bandwidth. + The RVQ encode method sets the appropriate number of quantizer to use + and returns indices for each quantizer. + """ + n_q = self.n_q + codes = self.vq.encode(x, n_q=n_q) + codes = codes.transpose(0, 1) + # codes is [B, K, T], with T frames, K nb of codebooks. + return codes + + def decode(self, codes: torch.Tensor) -> torch.Tensor: + """Decode the given codes to the quantized representation. + """ + # codes is [B, K, T], with T frames, K nb of codebooks, vq.decode expects [K, B, T]. + codes = codes.transpose(0, 1) + quantized = self.vq.decode(codes) + return quantized + + @property + def total_codebooks(self): + return self.max_n_q + + @property + def num_codebooks(self): + return self.n_q + + def set_num_codebooks(self, n: int): + assert n > 0 and n <= self.max_n_q + self.n_q = n diff --git a/audiocraft/utils/__init__.py b/audiocraft/utils/__init__.py new file mode 100644 index 0000000..0952fcc --- /dev/null +++ b/audiocraft/utils/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. diff --git a/audiocraft/utils/autocast.py b/audiocraft/utils/autocast.py new file mode 100644 index 0000000..ed64484 --- /dev/null +++ b/audiocraft/utils/autocast.py @@ -0,0 +1,40 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import torch + + +class TorchAutocast: + """TorchAutocast utility class. + Allows you to enable and disable autocast. This is specially useful + when dealing with different architectures and clusters with different + levels of support. + + Args: + enabled (bool): Whether to enable torch.autocast or not. + args: Additional args for torch.autocast. + kwargs: Additional kwargs for torch.autocast + """ + def __init__(self, enabled: bool, *args, **kwargs): + self.autocast = torch.autocast(*args, **kwargs) if enabled else None + + def __enter__(self): + if self.autocast is None: + return + try: + self.autocast.__enter__() + except RuntimeError: + device = self.autocast.device + dtype = self.autocast.fast_dtype + raise RuntimeError( + f"There was an error autocasting with dtype={dtype} device={device}\n" + "If you are on the FAIR Cluster, you might need to use autocast_dtype=float16" + ) + + def __exit__(self, *args, **kwargs): + if self.autocast is None: + return + self.autocast.__exit__(*args, **kwargs) diff --git a/audiocraft/utils/export.py b/audiocraft/utils/export.py new file mode 100644 index 0000000..b513b52 --- /dev/null +++ b/audiocraft/utils/export.py @@ -0,0 +1,56 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +""" +Utility to export a training checkpoint to a lightweight release checkpoint. +""" + +from pathlib import Path +import typing as tp + +from omegaconf import OmegaConf, DictConfig +import torch + + +def _clean_lm_cfg(cfg: DictConfig): + OmegaConf.set_struct(cfg, False) + # This used to be set automatically in the LM solver, need a more robust solution + # for the future. + cfg['transformer_lm']['card'] = 2048 + cfg['transformer_lm']['n_q'] = 4 + # Experimental params no longer supported. + bad_params = ['spectral_norm_attn_iters', 'spectral_norm_ff_iters', + 'residual_balancer_attn', 'residual_balancer_ff', 'layer_drop'] + for name in bad_params: + del cfg['transformer_lm'][name] + OmegaConf.set_struct(cfg, True) + return cfg + + +def export_encodec(checkpoint_path: tp.Union[Path, str], out_folder: tp.Union[Path, str]): + sig = Path(checkpoint_path).parent.name + assert len(sig) == 8, "Not a valid Dora signature" + pkg = torch.load(checkpoint_path, 'cpu') + new_pkg = { + 'best_state': pkg['ema']['state']['model'], + 'xp.cfg': OmegaConf.to_yaml(pkg['xp.cfg']), + } + out_file = Path(out_folder) / f'{sig}.th' + torch.save(new_pkg, out_file) + return out_file + + +def export_lm(checkpoint_path: tp.Union[Path, str], out_folder: tp.Union[Path, str]): + sig = Path(checkpoint_path).parent.name + assert len(sig) == 8, "Not a valid Dora signature" + pkg = torch.load(checkpoint_path, 'cpu') + new_pkg = { + 'best_state': pkg['fsdp_best_state']['model'], + 'xp.cfg': OmegaConf.to_yaml(_clean_lm_cfg(pkg['xp.cfg'])) + } + out_file = Path(out_folder) / f'{sig}.th' + torch.save(new_pkg, out_file) + return out_file diff --git a/audiocraft/utils/notebook.py b/audiocraft/utils/notebook.py new file mode 100644 index 0000000..019b9d1 --- /dev/null +++ b/audiocraft/utils/notebook.py @@ -0,0 +1,32 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +try: + import IPython.display as ipd # type: ignore +except ImportError: + # Note in a notebook... + pass + + +import torch + + +def display_audio(samples: torch.Tensor, sample_rate: int): + """Renders an audio player for the given audio samples. + + Args: + samples (torch.Tensor): a Tensor of decoded audio samples + with shapes [B, C, T] or [C, T] + sample_rate (int): sample rate audio should be displayed with. + """ + assert samples.dim() == 2 or samples.dim() == 3 + + samples = samples.detach().cpu() + if samples.dim() == 2: + samples = samples[None, ...] + + for audio in samples: + ipd.display(ipd.Audio(audio, rate=sample_rate)) diff --git a/audiocraft/utils/utils.py b/audiocraft/utils/utils.py new file mode 100644 index 0000000..86e1448 --- /dev/null +++ b/audiocraft/utils/utils.py @@ -0,0 +1,234 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +from concurrent.futures import ProcessPoolExecutor +from functools import wraps +import hashlib +import logging +import typing as tp + +import flashy +import flashy.distrib +import omegaconf +import torch +from torch.nn.utils.rnn import pad_sequence + + +logger = logging.getLogger(__name__) + + +def dict_from_config(cfg: omegaconf.DictConfig) -> dict: + """Convenience function to map an omegaconf configuration to a dictionary. + + Args: + cfg (omegaconf.DictConfig): Original configuration to map to dict. + Returns: + dict: Config as dictionary object. + """ + dct = omegaconf.OmegaConf.to_container(cfg, resolve=True) + assert isinstance(dct, dict) + return dct + + +def random_subset(dataset, max_samples: int, seed: int = 42) -> torch.utils.data.Subset: + if max_samples >= len(dataset): + return dataset + + generator = torch.Generator().manual_seed(seed) + perm = torch.randperm(len(dataset), generator=generator) + return torch.utils.data.Subset(dataset, perm[:max_samples].tolist()) + + +def get_loader(dataset, num_samples: tp.Optional[int], batch_size: int, + num_workers: int, seed: int, **kwargs) -> torch.utils.data.DataLoader: + """Convenience function to load dataset into a dataloader with optional subset sampling. + + Args: + dataset: Dataset to load. + num_samples (Optional[int]): Number of samples to limit subset size. + batch_size (int): Batch size. + num_workers (int): Number of workers for data loading. + seed (int): Random seed. + """ + if num_samples is not None: + dataset = random_subset(dataset, num_samples, seed) + + dataloader = flashy.distrib.loader( + dataset, + batch_size=batch_size, + num_workers=num_workers, + **kwargs + ) + return dataloader + + +def get_dataset_from_loader(dataloader): + dataset = dataloader.dataset + if isinstance(dataset, torch.utils.data.Subset): + return dataset.dataset + else: + return dataset + + +def multinomial(input: torch.Tensor, num_samples: int, replacement=False, *, generator=None): + """torch.multinomial with arbitrary number of dimensions, and number of candidates on the last dimension. + + Args: + input (torch.Tensor): The input tensor containing probabilities. + num_samples (int): Number of samples to draw. + replacement (bool): Whether to draw with replacement or not. + Keywords args: + generator (torch.Generator): A pseudorandom number generator for sampling. + Returns: + torch.Tensor: Last dimension contains num_samples indices + sampled from the multinomial probability distribution + located in the last dimension of tensor input. + """ + input_ = input.reshape(-1, input.shape[-1]) + output_ = torch.multinomial(input_, num_samples=num_samples, replacement=replacement, generator=generator) + output = output_.reshape(*list(input.shape[:-1]), -1) + return output + + +def sample_top_k(probs: torch.Tensor, k: int) -> torch.Tensor: + """Sample next token from top K values along the last dimension of the input probs tensor. + + Args: + probs (torch.Tensor): Input probabilities with token candidates on the last dimension. + k (int): The k in β€œtop-k”. + Returns: + torch.Tensor: Sampled tokens. + """ + top_k_value, _ = torch.topk(probs, k, dim=-1) + min_value_top_k = top_k_value[..., [-1]] + probs *= (probs >= min_value_top_k).float() + probs.div_(probs.sum(dim=-1, keepdim=True)) + next_token = multinomial(probs, num_samples=1) + return next_token + + +def sample_top_p(probs: torch.Tensor, p: float) -> torch.Tensor: + """Sample next token from top P probabilities along the last dimension of the input probs tensor. + + Args: + probs (torch.Tensor): Input probabilities with token candidates on the last dimension. + p (int): The p in β€œtop-p”. + Returns: + torch.Tensor: Sampled tokens. + """ + probs_sort, probs_idx = torch.sort(probs, dim=-1, descending=True) + probs_sum = torch.cumsum(probs_sort, dim=-1) + mask = probs_sum - probs_sort > p + probs_sort *= (~mask).float() + probs_sort.div_(probs_sort.sum(dim=-1, keepdim=True)) + next_token = multinomial(probs_sort, num_samples=1) + next_token = torch.gather(probs_idx, -1, next_token) + return next_token + + +class DummyPoolExecutor: + """Dummy pool executor to use when we actually have only 1 worker. + (e.g. instead of ProcessPoolExecutor). + """ + class DummyResult: + def __init__(self, func, *args, **kwargs): + self.func = func + self.args = args + self.kwargs = kwargs + + def result(self): + return self.func(*self.args, **self.kwargs) + + def __init__(self, workers, mp_context=None): + pass + + def submit(self, func, *args, **kwargs): + return DummyPoolExecutor.DummyResult(func, *args, **kwargs) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, exc_tb): + return + + +def get_pool_executor(num_workers: int, mp_context=None): + return ProcessPoolExecutor(num_workers, mp_context) if num_workers > 1 else DummyPoolExecutor(1) + + +def length_to_mask(lengths: torch.Tensor, max_len: tp.Optional[int] = None) -> torch.Tensor: + """Utility function to convert a tensor of sequence lengths to a mask (useful when working on padded sequences). + For example: [3, 5] => [[1, 1, 1, 0, 0], [1, 1, 1, 1, 1]] + + Args: + lengths (torch.Tensor): tensor with lengths + max_len (int): can set the max length manually. Defaults to None. + Returns: + torch.Tensor: mask with 0s where there is pad tokens else 1s + """ + assert len(lengths.shape) == 1, "Length shape should be 1 dimensional." + final_length = lengths.max().item() if not max_len else max_len + final_length = max(final_length, 1) # if all seqs are of len zero we don't want a zero-size tensor + return torch.arange(final_length)[None, :].to(lengths.device) < lengths[:, None] + + +def hash_trick(word: str, vocab_size: int) -> int: + """Hash trick to pair each word with an index + + Args: + word (str): word we wish to convert to an index + vocab_size (int): size of the vocabulary + Returns: + int: index of the word in the embedding LUT + """ + hash = int(hashlib.sha256(word.encode("utf-8")).hexdigest(), 16) + return hash % vocab_size + + +def with_rank_rng(base_seed: int = 1234): + """Decorator for a function so that the function will use a Random Number Generator + whose state depend on the GPU rank. The original RNG state is restored upon returning. + + Args: + base_seed (int): Random seed. + """ + def _decorator(fun: tp.Callable): + @wraps(fun) + def _decorated(*args, **kwargs): + state = torch.get_rng_state() + seed = base_seed ^ flashy.distrib.rank() + torch.manual_seed(seed) + logger.debug('Rank dependent seed set to %d', seed) + try: + return fun(*args, **kwargs) + finally: + torch.set_rng_state(state) + logger.debug('RNG state restored.') + return _decorated + return _decorator + + +def collate(tensors: tp.List[torch.Tensor], dim: int = 0) -> tp.Tuple[torch.Tensor, torch.Tensor]: + """Get a list of tensors and collate them to a single tensor. according to the following logic: + - `dim` specifies the time dimension which will be stacked and padded. + - The output will contain 1 new dimension (dimension index 0) which will be the size of + of the original list. + + Args: + tensors (tp.List[torch.Tensor]): List of tensors to collate. + dim (int): Dimension which will be stacked and padded. + Returns: + tp.Tuple[torch.Tensor, torch.Tensor]: + torch.Tensor: Stacked and padded tensor. The output will contain 1 new dimension + (dimension index 0) which will be the size of the original list. + torch.Tensor: Tensor containing length of original tensor sizes (without padding). + """ + tensors = [x.transpose(0, dim) for x in tensors] + lens = torch.LongTensor([len(x) for x in tensors]) + padded_tensors = pad_sequence(tensors) + padded_tensors = padded_tensors.transpose(0, 1) + padded_tensors = padded_tensors.transpose(1, dim + 1) + return padded_tensors, lens diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..aa3fa0d --- /dev/null +++ b/requirements.txt @@ -0,0 +1,20 @@ +# please make sure you have already a pytorch install that is cuda enabled! +av +einops +flashy>=0.0.1 +hydra-core>=1.1 +hydra_colorlog +julius +num2words +numpy +sentencepiece +spacy==3.5.2 +torch>=2.0.0 +torchaudio>=2.0.0 +huggingface_hub +tqdm +transformers +xformers +demucs +librosa +gradio