From b2c31ca27bfa842be03e3aadba25d9c1ce3c2ef4 Mon Sep 17 00:00:00 2001 From: J-shang <33053116+J-shang@users.noreply.github.com> Date: Tue, 16 Aug 2022 21:22:15 +0800 Subject: [PATCH] [Compression] Transformer pruning example (#5017) --- docs/source/compression/best_practices.rst | 8 + docs/source/compression/toctree_pruning.rst | 1 + docs/source/examples.rst | 8 + .../hpo_quickstart_pytorch/index.rst | 57 ++ .../hpo_quickstart_tensorflow/index.rst | 57 ++ .../sphx_glr_pruning_bert_glue_thumb.png | Bin 0 -> 35467 bytes docs/source/tutorials/index.rst | 377 ++++---- docs/source/tutorials/pruning_bert_glue.ipynb | 223 +++++ docs/source/tutorials/pruning_bert_glue.py | 563 ++++++++++++ .../source/tutorials/pruning_bert_glue.py.md5 | 1 + docs/source/tutorials/pruning_bert_glue.rst | 809 ++++++++++++++++++ .../pruning_bert_glue_codeobj.pickle | Bin 0 -> 46998 bytes docs/source/tutorials/sg_execution_times.rst | 6 +- examples/model_compress/.gitignore | 4 +- examples/tutorials/.gitignore | 4 +- examples/tutorials/pruning_bert_glue.py | 563 ++++++++++++ .../v2/pytorch/pruning/basic_pruner.py | 4 +- .../v2/pytorch/pruning/basic_scheduler.py | 2 +- .../v2/pytorch/pruning/movement_pruner.py | 180 +++- .../v2/pytorch/pruning/tools/__init__.py | 1 + .../v2/pytorch/pruning/tools/base.py | 3 +- .../pruning/tools/metrics_calculator.py | 2 +- .../pruning/tools/sparsity_allocator.py | 43 +- .../compression/v2/pytorch/utils/evaluator.py | 10 +- .../v2/pytorch/utils/external/__init__.py | 0 .../v2/pytorch/utils/external/huggingface.py | 141 +++ .../compression/v2/pytorch/utils/scaling.py | 5 +- nni/common/graph_utils.py | 14 +- .../pytorch/speedup/compress_modules.py | 35 +- .../pytorch/utils/mask_conflict.py | 14 +- pipelines/full-test-compression.yml | 4 - test/algo/compression/v2/test_scaling.py | 2 +- 32 files changed, 2881 insertions(+), 260 deletions(-) create mode 100644 docs/source/compression/best_practices.rst create mode 100644 docs/source/tutorials/hpo_quickstart_pytorch/index.rst create mode 100644 docs/source/tutorials/hpo_quickstart_tensorflow/index.rst create mode 100644 docs/source/tutorials/images/thumb/sphx_glr_pruning_bert_glue_thumb.png create mode 100644 docs/source/tutorials/pruning_bert_glue.ipynb create mode 100644 docs/source/tutorials/pruning_bert_glue.py create mode 100644 docs/source/tutorials/pruning_bert_glue.py.md5 create mode 100644 docs/source/tutorials/pruning_bert_glue.rst create mode 100644 docs/source/tutorials/pruning_bert_glue_codeobj.pickle create mode 100644 examples/tutorials/pruning_bert_glue.py create mode 100644 nni/algorithms/compression/v2/pytorch/utils/external/__init__.py create mode 100644 nni/algorithms/compression/v2/pytorch/utils/external/huggingface.py diff --git a/docs/source/compression/best_practices.rst b/docs/source/compression/best_practices.rst new file mode 100644 index 000000000..5edfad5e2 --- /dev/null +++ b/docs/source/compression/best_practices.rst @@ -0,0 +1,8 @@ +Best Practices +============== + +.. toctree:: + :hidden: + :maxdepth: 2 + + Pruning Transformer diff --git a/docs/source/compression/toctree_pruning.rst b/docs/source/compression/toctree_pruning.rst index bc5f61c34..df10eb910 100644 --- a/docs/source/compression/toctree_pruning.rst +++ b/docs/source/compression/toctree_pruning.rst @@ -9,3 +9,4 @@ Pruning Quickstart Pruner Speedup + Best Practices diff --git a/docs/source/examples.rst b/docs/source/examples.rst index 1d45863ea..093b6d1ff 100644 --- a/docs/source/examples.rst +++ b/docs/source/examples.rst @@ -74,3 +74,11 @@ More examples can be found in our :githublink:`GitHub repository `. :image: ../img/thumbnails/quantization-speed-up.svg :background: indigo :tags: Compression + +.. cardlinkitem:: + :header: Pruning Bert on Task MNLI + :description: An end to end example for how to using NNI pruning transformer and show the real speedup number + :link: tutorials/pruning_bert_glue + :image: ../img/thumbnails/pruning-tutorial.svg + :background: indigo + :tags: Compression diff --git a/docs/source/tutorials/hpo_quickstart_pytorch/index.rst b/docs/source/tutorials/hpo_quickstart_pytorch/index.rst new file mode 100644 index 000000000..ccd068fd7 --- /dev/null +++ b/docs/source/tutorials/hpo_quickstart_pytorch/index.rst @@ -0,0 +1,57 @@ + + +.. _sphx_glr_tutorials_hpo_quickstart_pytorch: + + + + +.. raw:: html + +
+ + +.. raw:: html + +
+ +.. only:: html + + .. image:: /tutorials/hpo_quickstart_pytorch/images/thumb/sphx_glr_main_thumb.png + :alt: HPO Quickstart with PyTorch + + :ref:`sphx_glr_tutorials_hpo_quickstart_pytorch_main.py` + +.. raw:: html + +
HPO Quickstart with PyTorch
+
+ + +.. raw:: html + +
+ +.. only:: html + + .. image:: /tutorials/hpo_quickstart_pytorch/images/thumb/sphx_glr_model_thumb.png + :alt: Port PyTorch Quickstart to NNI + + :ref:`sphx_glr_tutorials_hpo_quickstart_pytorch_model.py` + +.. raw:: html + +
Port PyTorch Quickstart to NNI
+
+ + +.. raw:: html + +
+ + +.. toctree:: + :hidden: + + /tutorials/hpo_quickstart_pytorch/main + /tutorials/hpo_quickstart_pytorch/model + diff --git a/docs/source/tutorials/hpo_quickstart_tensorflow/index.rst b/docs/source/tutorials/hpo_quickstart_tensorflow/index.rst new file mode 100644 index 000000000..89654ff29 --- /dev/null +++ b/docs/source/tutorials/hpo_quickstart_tensorflow/index.rst @@ -0,0 +1,57 @@ + + +.. _sphx_glr_tutorials_hpo_quickstart_tensorflow: + + + + +.. raw:: html + +
+ + +.. raw:: html + +
+ +.. only:: html + + .. image:: /tutorials/hpo_quickstart_tensorflow/images/thumb/sphx_glr_main_thumb.png + :alt: HPO Quickstart with TensorFlow + + :ref:`sphx_glr_tutorials_hpo_quickstart_tensorflow_main.py` + +.. raw:: html + +
HPO Quickstart with TensorFlow
+
+ + +.. raw:: html + +
+ +.. only:: html + + .. image:: /tutorials/hpo_quickstart_tensorflow/images/thumb/sphx_glr_model_thumb.png + :alt: Port TensorFlow Quickstart to NNI + + :ref:`sphx_glr_tutorials_hpo_quickstart_tensorflow_model.py` + +.. raw:: html + +
Port TensorFlow Quickstart to NNI
+
+ + +.. raw:: html + +
+ + +.. toctree:: + :hidden: + + /tutorials/hpo_quickstart_tensorflow/main + /tutorials/hpo_quickstart_tensorflow/model + diff --git a/docs/source/tutorials/images/thumb/sphx_glr_pruning_bert_glue_thumb.png b/docs/source/tutorials/images/thumb/sphx_glr_pruning_bert_glue_thumb.png new file mode 100644 index 0000000000000000000000000000000000000000..b06c4e6a17748efb9e7d009eb1e161759cfa2b74 GIT binary patch literal 35467 zcma%i^;?tw`!+FRG^3l1?vO^1!GIA`(j}eJB`M&5(IccpNd@Wd5{c2>sUj&MNDF@P z{dxX?=RS`8vK{xm>vhF>ombqkI$Em41oQ+L7#PHAaF`wj1}5s^LyM35@Qt^5ejo-$ zoSqsCs{bMO1EzmSV>*=Z+};E=6m!b+j2&Z z$>H&#dvdVD>d;b#z)+pt5B0KCUPo%5aex8}3Da`CzqmKq|&3VFC#JcU_|ar80edVT;*Put6uqW<46Hk@Cr znCqlm!apD%Q)Bd#m{NEKny*(VvZIhlq!|`TT)3VUN8MxGh}S@0DPuf-yfQ}fcy>_< z7>ywe)|VTTR>JUtccR@Gt`9n$kF=!%) zxqFzsC}4!_5Qxoq56?g?Mk#L9E=LZC93IpTNe^wL-1VN!RY-mvWW`pe7(}s~x5Sra zfdqia$wj!Vypl|<_;eCP%a*-gJj3G0ZNff?;Iw+;iD|;QLxy>dW#s5`S5Kaz@Svk@ z0%}Y`-S9sI!YJFf_DCs1@~sU$tEkrlfS`9sJ86O{E)h0Y-#-3|2hErea((Tl1>S|h z2<}W4(g-t#y!HR;#_c}|(#_qX_mqnG-qac3LCo$MrXuJ-4f#(M^&~2c8Yx8W(ST5v z8#NE3*LpsohpHTtNvryaO-q7yofTf5z+J4i!$olt!TyAb)&HHrb(6h*7&TXm-_I?` zP8aaT;H{7f<}YIInsv-saH>?ALOjk)CM4dLJT8{;sZ~#`(>C@9)2yn|JRF7Fbt5PZzTODev#;1J%jOVlsCLd$GYdb?RP*}6et z;-hV`|NXu0RjJCac>A6Mcul)6IKLo6L2w0SFcpvgt)}xmUriXfu_FA=VqgE!v`2r` zy9-lW%>$++_&>|z09|XlD7aNWH67lLSu#hCpov23Aw9||14RZl;BS39=D=s1!HS=8LE5;6Hf=# z;U)GBoli$TgG(uEvICQ05>!?K@87Of5iKT|8#47g;=b_0u{WI4w&9UO|7gUeVy{xj(3bU_Gwm{x?n6%~l0Q29 zJFvs!*nrL33&@x~8n{hRi1W{lzt>0{h;!>$;&#M{-C2KjFS!~BcME2L#OrOcdmCXk zXc`pc*;_I-o>eM{vudEqE53yBmV7h!!Tis!9f%yfDpaxBC_c=SaDpcH4ng|TUDHiG z&3_qmz?&u&Q(wjOV zj5ph9TVG2QN%hLwnZ7PA8#5}*i5F2ak(JKi}vY>04%=~?J``x z5nLa3?wtDP!&zDt0C!dDKHyduJvFN&zfx}yv9}Rb!Am9V{paFLRb_7rnC)#3VeZZG zTG`fe7wg#QPHNx-U_fljHermix=picltwKXMPoNMc3u@KuQJ)K&;z_8tPna0`w;2^ zg-1&j#rkj0)vfS6Nxgx@kkc0Yjs<#hH*w=rfu)QTJ5aNmA?Xq?XSvFv}3LDB-*a5m!0i07aUc1@oYB7U8h zk{#!`Z4h@vnp*_2kRK@ewdZA9M!}D;vQ?6zp_D6X_>X3VvZ$sV221@WGni^+jmZBQ z^;@*CqA>zLp9VOs6pMvka?5DURkHQdaOG`Oj9~y^(|V-0%(t;M^sgJci*t@)_3S2*fFC z&Uyf)-x$+;9Xm@+V+PxZlWYy634t?>X(al%EU{+HR4vg-1-;hvS1&&JxAa`p3oL4> zx%km*YOepDtJ2fz(zMZdBQz(aH#R z(18r_rn~&bTP+n+lg?6^uGmSZT8h@r=*e?IPEEbsng1Zoi;2_e5^|}vVG>I74upA4B?D1s-!C^4OaYj1{a79mlbRny>N2t?+{0sJ$t1jjy#`LhP?!g)P zuaw6HfD|}RTa&j+TLt5np}v9^w97%y%J2!(&ASRzIg;P7z)g!&*vc!_#_k=}aA+3w zKeM+)S@-F_7TcqOPcm!42$tMvSidbs+2AXe%G`GJ6I6|kmJLQ>&gy-jZ{3QcGkein zfC!(n!|$7XZ3Cu4pPC+|r))C_7-GL63EK8nG5p6$%*X^smyDyZwy1jAW0(Eyn)y^r zZ?!hRz}%2zi(_EiTq)nx}wSPcA^}}>tCPbC*Gi{Ph9}%uiY`k{S z_6JY@Mk}-5pW9GkJ1YT1O!LYgA+A}?CPRP1yiy4$xm?))DYTCf%2U=Q9}ck9NOEuG z3zy?w_X3&lD)*R`-l6BP;(S)gKJ{GkYz>rINtrlQl1?@@K~9%`q|;93uyL;=S6_`x z%B}5hX5q}r)?z8)WuCp$5oY@50$zkYL#ew^MK^rlY;?t|^`)1bmI1%x9aHU4_q?7v&T9AtoiHn;TI=>Z)71k9XwJIKiZM1zkfsrBU=fV zDfyT}Vj|Bl71}9$o|*pSzb6Nm0+^uyZ|W=FR`DHm7=3#ROE*5S^>wNJkki?SoeDXJD0j;Aac=}S@Z#8OB|454u zKr82*30+vKWTNM$HsIlLIO3snRQ3bubb}iBgUOpZLM8m(ORAshCXJ^|q&B=Qz;;dJ zNP>@~wa_KSY$8pQr>2*8^uUL)nPqJdn*L@k0z{chk#cJS87%o`O7LGkc971+w_<{j zJSo4`Pm(zJfF`bI0M^Z+e|Qr#GE2W*)c+jP8&%tEG-+z~G+sz+OEHHMv?~f)Lck^8 zM$I`?8|3r)Z}f)ZlMe^EHe_)a$C&GDq(s%P?#bj(IQc+|m9+*B-xFq7{EHQnxb%FB z2(fi}eWI7*_3wlbW^p;d^;l7a;L-%9K1QZd#I}BmQG7)qFMER#`Eo_I_|q@K;^m8MQ>69_3Wm z;yYjMq>$YUd)rD!a@1kHq_=;}#7YrQ%N0U(e+a0fg~2*7*JM4ii_LP=A(19~>ECh_ ziy9V13>TYZ$-w?(rrp)e2e}Ozf_eV}QH0QHzV99Uq=90&Lejp`xTNK?Xz@^U{^rZ5 z!~qb&YnQZNs)z-)sz3M?38{> z^z2;Z?2vJ3Ab!ty=&{+YxTv@&wb-S5%{$~D!8YNkTILT%B`am4>i@i-+bWBr7BolB z4rlAe&()xaaXxiLX*kGw{~{`Em@P;TUo8l?5goE7v{;1m=V--i`_X;g zde_Gl|If@(#4HUK1Ra^&w)-;X+yL{}<|6GW^MB;06PVySwSvvNZ6S`& z9bY|%ve!-BzmGu#>u!6#Zqnw6d#jHb`|M94UyRwFN=rDttF*UVW%Jo~T=h#)x&}W% zPu|I&S{`d89bg)HE8D7QO zz|xv!7Y(JtpvF!va5YIn3a!l6P+5H8o&D83aKWJImR7~cqG%_AwBb33prE!pX3#*% z=#E?l!R#t~v4S6gHPt*q54Hg$v5SY zPIXnyzK?q;Ca_?|u5KHbLl)xi329LM^*!T<<)?S8b#~wI4BVhgZgSk>lJU$iu@$li zUkd!Gm-F#dS8{0R)0lx0ADM@RL|+oo7pZ zYw+EJ?V%JK73Qb-l%b>CMdJMF%e1arJMW4Eo+k?^l?f>}UWnBz;{Y_T5zCose2KZH znq^^`w6D-aW&f3S$Pp`xi(@V`)S5SV3ySgMukXn%2x_A8jE z8J!nqF)^gct*fk4?@?}FCoTqIC77EYhMY0^j&kz-%j$hZU;{eK*KwGVtlif}Dns<9 z2>5q-m_#W2gc)}o4r2{`tuaQ!Qa)LKSk*dd7)l44HWGHlGyx`ZOZVW_tHvw&LQ)0B ze#s*i)x_iI-eHr_r4VfyUt+X%zd?i42caN44)c0Hnh)~^b~`7-J;EAXy8B1=Fk~gS z?WmdF+HSo$vI{S8B8!3VUuiPR$Q#a;aw1I=dc#C#}|Iq~s0K7Kfc{faj&o|7mU`U0(W3t%`2 z-1LD9H@+n-7t*aRC%k4SH7oJ3xHp-_2>{lmQiC;|G2H4IjB^Qyl7ZVYK{T!IZDfJ` zidYhdk;AO>qrbiW zrFukDVEU~HqN0NEJ}9wR4W_5m<0478<lKm^JQLE^RmK1;Qplw?5Ry#T z)aiqRiuztm=b?OHu7dX7heEeZBdHI`vR4WJ;M>zB(%8da$`pTfv@mMok3(6p!0zhG zQszAAoqsauev3%fwQJvNoArt&+nZhV%C$In*5k}|xoohdVs159Veh-}=E*(l=e2dKi*DV+6(FIZNfn=N^#_|=EKbgrz-zN08mO!Dfnjz`Aa6?JH9=<4Y4T; zq}@E2xDfzMUC`wS^pD8R~ArPC{n?nZTbYO6~IA|TW1>m%Py8U z9~R-zOZIW<2ufZ@KRg-As3_ROmNDi#om7w0x*FmJ&p>t5cqkx#0E-vHBp^1QxQubS zZl0`sV{bRqemS@MzT@56fVtg$$lOCwvOtEJxlF$w`;V2mZMlZMi+y10u%r6 zLt@kw5By)5>+P^87*2-V?m_MR=@PRJG52#t-g@G!Dld1zlQOLHo@ChGy zu626|?CX|9h@CT267td~AM_(WP8jkz*x~9aBAN6dBQ}$k&q*qL2*LuE1_U13rXQNw zpqBQo0+87%ezt(xZ~1OJkG{1{I?4^7s)nu^l?J2%OSy)7%2Y3>CqkTk3zD`X4_DOw zrQ{_-n&lE9y!Q$d=#_Z(TLTALZjYy6;e6bJTaRW#dfFMtd(`cKcFD*1I3dSd z#(&7X>6kb)Wz4Of^e4P{oyz*HCAwm~?rThY!Ffv5lY6IFoOj$;-B@pDck}zFk+QW} z$*4!uoxQkre@~U(H~i($_z$8=?T9 z$+?~&uFPkyf}lsK?YoMjJCHi_5F?9jZD1XBh=XxeP6C6y^>r{_o1|aqnY))n*84g| z!GZ|~>6`cBW~Kphyiyhss}?w~|IuNd7L)*sJW?J+m)e03J$so+~Hs));zlCBZzCie=eb_=O9h6{O>$68NO8+a9#}EhC1In8=kZq5%xKGR=|FTQMjy{7wq~ zd=R>#ih7fJ=@7HKWQnLK#RKk@b^@SMh`1j&sL9S0t9`sSP@}1Ho|ch1XxlqinNB~{ zSe;LD{tAyA_!E$n!{DS}kcUw0mEjtBK{Ne39@rlzj(DkYW4?5j7AOb39F`oF-}^!N4-FWV4jrT z%|hH-D1cv&(>`GivGJ0(t|hJN$-f@@LiqU+l6wL2HRGxMx%*rBS@*51yC;x9D|%rB z!HFTb&IL;6X0l*PO#)nAnaZhW@>2cc%w8;+Z;T=}uI6a2>C$Bc_#T_;WUL!1jtzOk zbTU~n69MKy{7P5ja@Yu0Y2&tl}ICeYbj)NpzJ(A2y`y&T6)e^S4P4b zpO5YEQExruZzcDx+(Z?d8?ab{iQ2=n>Ck^GjFBRO-Fo#%jr~d%rCgWu^=xs_CN#Zp zaTv!={LP|vUT0X&R5#xOSu4LXtcAFBNruc>EI%jnObrtSe+XwY5-ltrG9dl)r4Z2} zeeJgc&4f5Um-$&}Ru}s)THwNr^q-|i1o49vN;y>v=1gr(FL+;wOz#S@7M4P(vq%jy zK`ty>g%YPUoUL7u7Qx~{qM5ejLe>kdXftL>h{*G)uBBDCr;>{j!Q;%7qXVl>@xacW zX#D>8A`=!MWP1d0lU{!j@W@l365a7s9|~>-t^G1iE@aDUl=KD}Pe@QOIIK zMZPb&{Y{7b_|{6p0&88MbhhzA1c_ZBf8BR|84eMmoVRX5ko2UGFqVhXYt%XGb}LL@ zc|Cl&Vu34LQDLQt@$M%Sx@gI+pQjvg+96*d*+ZxJ%Yv?`x|SCBQIL|qE4(gwyCD3$ zzNd$MsE8J);8@kY7-Bc&reJ!WwWh|?04Zq2Ze{VH?zuQVFQOd~6>awK7w9m3+A#fO@|A&T=ee?Q(ZSKEtUCB(P+tTxh~TWOTkpsu+ZFJof) zz!hI9b$Ci#2ynxtj@rIBdxo&YhSg=EZ>t=#(t z8=AT}qoAxVjO-TJy??{X_Z`Eu1Us5jsqaGix76Kw;t`YJC|)Z99!UHj<;VSqc?@;_ zbcV_Z1>p?Eh^`1iKNTc{F)sz!v({CQ%3jA}6$VD7{k`+wlvD924WKenYu9b_GOW1( zzq(c|?$3j|f4oMd)HTcY`wwVB{JV{cf-Pg_+h)h#an`zTg_%T7>kVq?%b|ytlaf+8 zyUdq#?*B%i2|n3^6xXz-6M2Varvko^cDErwBp+o83R%d#fHz51{t(5D)6h?8Zx5oy zg=V56S+Z%#MX05?eF`Shgz(EXHrr!!{`>$j%7!Pifm-93(LtMUV6{mdDKXD#m-HxF z7|4babB63N?=(t}dYU24&7Ry+mJ#O6U5`%aVXUDSH=zn|7&_$wgF@d>AC#&T`>}W@ z&n-{%oQze|Y_COBql)}ow{Ml}9V6>$Bys($W8yERa* z=sxUyn_&9WQGlYQzzY_MAy>l*Pt=;&D-uukg{Fj1~wo)bMupE6j znW`P|<~i5!pBu7aEVC&5&FBNs-r#qr?C(QgZns>?!G4fJb7i=*pi(4y5a!;{g@Y`m zVO7+XR`CO`Tht$P)me^e}2s^5riC_FV zH-`9b^G0mn7O^`b=3ALlz8S9yjg?~E%Co;PB{!*H!@qFf8ow^>+)g%2;D3i3{EC(z zY+YYjO-t@U4EgvB8P5CYfM4;`1gl2bY}GIN?|C%ePzT!pNHx`J2YO5n+~q4fOVW!T9Yxp+^TY#Q0)-z8&h458qqixNm7trYdm^do!t{>MNCW8-brI)M7TUVy3iIJ0dDUFh(Ykxng zIdJ>Qnuzm}e@dLsrtv*)J6sJ@xi;l$eztBFkw69fEzP4wjBHpaXgG~uJeS2;Y){+U zL*-J4;6A9-3hMpDF+`x~D)`;9{juUvcAlbF3aD75I|i`Uvf`r2VNSL>$9;J52>nC< zb;EagN}}gc_{AbaawO*}Qb_Zl(A}_zBNGb9%K2`XL$Wjy@zd?19vja8G~i8TErkiy z2+D6Z`l$A}^G%b&KG3~V3Ee4Ci0g>uq;NFM5SrAFD9|a#w}@LoEy(lgr&3{+VTh^3#eaW;Y??;eKko&{0c~WJh1~W z7*->zfWaiuCs*+St*gn1{{CO@SyOOokzdDfLt_^G?XrKUN1a12+ff?(`JK(!z%{|% z(KG{e=p4d^t$6&+VldZXqphYjb`MOPZU>*fmFvsT^9I()l>~(6lNDzIANnLnMfEG87vS%IdyLmu%N>S>f{3ExXyaK#p@j-uac-milP|h1f zf$D}zp9q!dvA%WG-NR-_Ucl#08dOFXF}ZFEQ#TXv6|LOWPg}gi zi%UbhY_WpKzBf%>d>6}W@b_0Oc-(6j>7B>k>=K&PhozDCwnQwVKVm00Cj8CX^UjYf zVq|ltJqT@@2Dpt8^4&iWKUGfRoXv`y0^r!-cP^TA4ZEi%b~u5mkRN=9&Dlb``N!2MJ|RH5H2P zB(I)(G^et#U?g`!XT{fw{ROrCWdeDQgu+Kev z(4}}}KEAUFmp4xPSVJ{z>z5QLBj#bRh{fPNjegn3%0Ntzae2z5hLLLk__GcIe*j!w zZrk^HARH#3Ajqb8NA!8_E7xse-HIPyZvamh3(!1oY7YN#EdED4@-?ht%seQa$xbTJ zcY{f2^v!uh<-~;ZuZ6FUQ$j3Vgvx+sN6Y%-I^iGA(vT1qTNl;hsc_0d*X&Srx4dgO zkJKPh>-K}$x0o&_`IWOd%ur-6MKl-O@4v3=->yZyfPcGz^0-?T|r4`p1; zWV;J!^P&&+pf0@KUh`W|{4OAOYIjs)4!xeHp57DxAM97xCirx%zK9 zqQa|97`>qu{zFDT#VscgREi3}ER4BkP)ov8`DpK^(z^$N(mq#sO7@&r!N`CbSXWh* ze_#nKHT!iABJN41z_;hs6KzLPO((2E!a^#aQxI{b+n5bX_fCHDW-sm3C?^UfSwL+D zJsKj~%HhfS;K#@VVZb+BGw|)s1VuyriVmc*s?UR3tcb6zUgALlwC9O?Quw`Egs(;} z(Y#-xwZ?qqo;|Sejvle9c3q*eXFbosm^rR0s-N82$y+}O7RETT#Q7xz8f#;+LaWOA zi2|NC7PvlFD5dR2Q{uP6o)T>|zG_}>^)ZMDzBHh#+t&hp{6z&mCSj6C{~%)+Q#o6ULJZ8^EzRAh(HfEREpXRPU}Jl`RBF|LNS^RCf9+xay7 zAzcTCKQo-`gj?NHd5fEmrG^te;;_VJhLX-3v{d;?Xe&8P`1U%QSK3KMP_ogik`eq2 zqDdgl(4voBw$?AWH33L8?r8dN9ug%3V72FU+7@NpU_J`$Jg#r@cP*v8EHhQe>~AIB`_7w8ybW z&4QV(?ck@50Zc`6wxN!lp<)1V_3Yx(8|ImaNMF1gqAZCuE%+AHn-CA#Y%oYjK4%tV zi+2*gc~}|?GAjw{q=@K02?-COs(YxyHR_p07)L;ngxSVnmZ5xlEvNT)$qzDB*2Fl_ z1sNxSS$@;$_lajMR{?-?W8CHYy6MViP3zkNo4%<#`HsULa$;JUpF`NiNI>*tL5JN* zlinfnAkLlaZ&f|9rs27;$RLI<2b_-?A%`kvMO>)*rn|yVlmpxE&23!Um_m)>Qc}Gb z;?@$JkD5Q6&$D4w)Z6;*d}&OJZgr($wOK(PeQMqnglLSX#Q7E15z*w|K8xl*s(;ja z@fQ4meGQ)@LtS-;chonZ^mvYZZsJku`_g-y|N-6uk*3Tn=e#KOh?({f=P6e{qArLTZ#MsnNm1SV-i04(@QX!7@hUl` zN!IY?J2Rt&0USC#&+U~wqv(nJGg3r9{#C?>F)sfn{Y19MhrbU~kzZ9$C&GD^(d z-$H5^k&ysjOs6eI*+AeQ1&9>=ovoZ_kAyB4n(pK(eETUv3LM2lQ;IHHx(l& zjl+h!Be>{Nv=;TCgd*+|9VJ|-)MoMD2An1^mp_pSJ+Ts}`8$a*+h8Q6`d0q7lf?P- ziNf-6gyPAW3~*8oP5Sq70pSToT@^D-@$iI}#3;VuCZ79k1qe9ipAI__hM>S(^Db8O zW{Gd9yqV{;ezNP!<|$#VX>8Q@srbJ7t0E{iMwKLVqn60(we_5 zX3f-)y~I?8;0y`PjqCM<8;jSuGOi84;al!L=!g} zG=*JHBB!u1B9B25H%in^(`d30_vi3-Qgbg81Z?Q&Ra>lv_9v1Sx`yBZLpKP^NLzEY zWTa$q`Ru@L+FU5==ScuHLeOfMgf!ybVCJxHS5-0OQ)MM_YvV=X;tU4p#T}fX3mb?g zY#PAR)w%C+GW8zLBb~0wV+EkA3tX$d4cJMCr>Dwgq!{SXw-Ww}qe2pqQcS;}Se%Gu zq>f2GyJih+c9UfCs&-a-z;3TEfY-;}^?1MzhfSgBzLe!EqOtT{j3X%4*T>!-p*V6< zfg|Z^mG!t!g+c(pbJw0^6zaTkWM;3b9W{Ty{ZuQ3)o4Jy*e?^}x4n#JZjsZ!W&-9NU z{o76xA)n26uWi>=nZRM^t|Rpqclp*#e?f?i;w z`+bEzhyIvK{&P(#^YW7OR~(pKI7HVH2x{Azy89LJ(}ul9d)(^hj@<7h12$0&lzj_* z%DOe|qxeM-Mcj8OI|FlJKK?tpD#h=%3s|p^@@F_r6J$9VEfnPYiAQg{jS|tkE&}1l zajF7!!-n94S#aGhmsllZlOfIaO10{|C0@M~uZ`eCSBUlz|KufIafBttJ*C|07w^Nd z$A6_jkN}WF#sqd4ORIw5kM@sjQM|Lf{f^8ZZaKw4<~n;XlbRLhv65x3tgAHXN`il& zYFgrjiwY^$l16`uEixr|0F z;0)Eu3#+l_ZwV*w_EhtNv+sUC3fZMXxVH+R)#VcT-*vQC|D=~Jn&_mrFygDD*De?> z%o{q(cUKy>!RWBwh7uvTg7Os-`P<#>ikjztcNxd zm`Luu;QN`i$6xGIO5&s$akGbbSj4bPtW>-#A^xyrsbJz$nH?Ik@QpX}wpx0~hhp#( z?7{zat?3|L0W(^5Wq%D)_c9LHiRM{9>qa&-_`xwc2s^M`1mtbwSy*x+Xvn@^e}*5T zLwy+9NL$SXNKD8XP_o~NRU9T}^Mj2Mh)GuOuly<^=?!yw+DS`XGsHCex;Zb1#gV%? zFGBey?{+!$M{XkruXjzU!KbB73$j2z(f0~}I#9AepJ{M|$uYa;XbV5;VzoyJNP>~eUbN!kE^;n71&3M7Ihs43Bo*D4jK(|+_N`w+Bp1l_D= z`+6dAn#h9Jh0~E!iOD0z?lQWFbYXk1c>hQ4kVUfVcKq0Lh;0{A|9+^SVRoojr_-<9sj+ok4Ef}(EdkJy)yU0FS7mScAW*>`SbKoG!QDA4<%`bU40fH8|B z6bI7~JJx-d%V=9B2zq9Bozna^zXTN_Jfy>eg<5hP;uTaCg7^Ok>3%;=d7T5)|_`o?8g#f5yBz1s7sv$N2+0oQxg;6)f-Ph#LlERDOUMAfFpJs zVQ2D@m-w`m>tRVcDr&pD-ElSaO;l_BJ~<{otg49+$w&=>?pdTNIgNFq^)%WWmqEmJ zrs`Rj?-ze1u~`R4t4Y}B613=j~QQmcP=uH(y}<4l2~7Yv^T(h}-4 z$m{r%*uGwo)%A8MISpF8M*NhgRJg$LRR$0@^K3+lMUj#XxhWU##(K~}Zv_>phgrcN zr(%wb@Fwk)eMWTnKMXdlk#^)E6;26)aj6R(tz;o7VZWK26c*$v2Kra^2~mohguDkl z;el+|3hXE|q6>^sCHbpl)^AtxCwS7#>7wi~`PXqjc~e+JSft+XV#-Pv(Fpr+t0>Mm zGW<|f%o4p~Rv+oaHla9PJaE-z+$4a+X%}r?Te)mtQ|cTS!EE))9~RUxfVYiAERk=> zcqb)=Lz>Xy#X47`_4ROc6MK$ewo!`?Pd7D396wmB`Gv~H@z0JN#N@k|Y-I{h;!Imd z1C8dK9`tu$&2&=G>AzK;HT!EcWAkJ{17Erz-}7d$_LVIoOeK~g$)^g4$UyW2%70V6$2F-z~P3F z%0CmR%|nIW3pE(4{Ij8nrmh5Z$>3hc3W{ORl=)fy1o(#yp7#tt?DF%V+s)(+P0_&- z2Rnr?#j2T5zDQ-@go-yLsS=^6ppC1FQk>1OYh#mgYhG*Wj9g7)Tdv(G`=>7}e7RYc z{G6m0Y0cu?S?e*YX34CTH@9$9N8YGKz#Kk1Xd6%WB9VsG^E5VT$ ztcOh#mS#%$U74&l((1b|=2}jSSGrbcoB*A)zj73Y>V*jvce{VOa+b}kly=*{#{URW zuTmlySYj{!zTJr18xv?J*? z*U`|yI7^pftVBxTimuE<+^)TP?OH^)zPuv6a)*F31B?flOZ0oW&+~U{QAB1a+ zH(1GPU;3Kxs6b+-&?e8Nk6a%offqhL_aLk*zF?Eod6+NHFA@}Eya0=pzvy$xt-BnL zk`UC;<@KN`lj#0XS3VF2XZ0bHps`*ueGn9CKD!=11CT4c ztt`paVO@w7TNx{Dp7T!+eM2Hf$_kN+Nu|?Txeq>ER#0@sCm+(<3gd?~0Ht4-ZA^f1 ztTAYFYzRN#1NA(t+5MRurvfUL=TGHN3=PvJ@XTv=nPGzEfrx89JTk8>mwI^?ocU(9 z-mq5^^N+^ZArYf;f5gg|<#GSWdY3F1V!a}6Rcl&w%}m+8O1jJQNGxBa!_1GldByho zje5x}H5;Y`0^m%p4$6ut>)LPx@njg%{1S!4b^<`$P{jt4{)~|PP9oq{J-X75JY~ct z(o=wQ)r0J-B&+DYidWi;$#rsM4S@WfAbm?1A-|*12R3+oedc8_Z4d%>(%N92d#SEbEM^Q&_L>qv{cv3by!{53G1mp9HgR;4kQ%)Q;*-TAk81&oA4_3?j%%N6Mm^rpT2!xFMi1i zZU&~js>(_8y~xz&5zulwkOgOAJLV$oF$?xKu7NklF_V{j67BDyW&wosN~@WaD$T1@ zXmth!;>V#S-8<#r_P0=z1>FwZ$Q1Q6$a?FMxy>^`UuC|B#X#~pANI~ThV!~t>o8f$;bENH^;ts5$7t-HJ&CEh)fBBWbC>3&H@6WIFPJ~0Fc5Y(fnjAw2}4^jfCGNT zE`%z;(kg`_^;aNZ=Xa!&B}Q2KQ;S@lmd&_Ns#wRi=&0?LnC1dk3+>52^}CISrn)CS z^@rc_sLml8FEG7{-8}1>#uI0rzalYlX)d@usxR>e!N*9W%D6_`PcqW}nuFMD_E9Gr z;Wm=m;Eh|@8fRDd@#D5v{k!*qO_!2IVFs2h^Vr1WLHZn%KYCYh%%zgX+Tn_}{UJrT zp&tVlH)8Oqjy%}wQmej1)%M4eWoHz)w+0^H%bJk`?WjM8bj%@|2x%f{d>(Uiz|n8A zf~{pi4;kV1{?Uq%kQWIJZ-gXfp3*ad%p%uY41*hHt^9r*8k7f5DrX9W(sh@ONW?U$ zsx$Aag?@Po;XV`wf3>+q!6Nb0ON^cOw`{EF%#jj6FCM?|_<2IZ+A}mL@L{`W!i-WG zPP@Qn-8I>?lpyx40t+=R#xTaKhF?`5v}tYEDh)F{W-}VY3Jz_rY)4mpr}H@z@F}QX z5ayewOj87tXyd-sW$n(~9C(~|_xM?D$`6G8d6Ak!)ziMKhg11m%emO?IzE*Y78Q_$ zW*b~$4l@4v;I={ZM({g|bv8v!p`)3Auxs=2Nd+BogH@ z;)9>LpmcVd7w?}F{BhgV*3qu61?^Gko3~J{bqB(^g9#scXg?n{b;$x7!NpCj;Q&i% z(0n_2ww>8Pa`bPxlN0EnjPH}3m4mgWqnMu|VFFcw*D&>=hwix z04FcAe@WbuYd7FNE|%MHN?NqE6s9w!4x`)wQJqj>#u6qnjsQRVkg~upmjBSJw6%4x z$|tTeY5<=Sm8YCPJKVgkU8T`Dkc&YFH@*`Jt-{G(rkR{D-~SL z!p;3R6!Kn~iHf$nQ0Or-dV4f3`A1C)I?O-35*E>``TKeotfphpC+=;BY+==8!Ih8r zvp>OhxXMwmXJPr3lKq~xE~yN~dBt3prLIfSO%DqTOMM=64>LSFQoJmRB|}{9H2|$O z%RO3kpE^NBkmmLbWY9$5_HYm>`80j>WprC4fA}@``lc(_pJgVsx=5IQ@!B86@}+J7zqgyjx%)Nz(FPLh^Xpyt3J94UJ$BPOn)@{*(I5XV?YTg9 zM%T&CrE`#*m6hm%GG5IYNU!`S5lN$C2x*b z(M`2-t>_vp%4hphY0$hj-=WyLy3x&SGN3T>YhQL7QJY(y&F{bt6!P&Y!APnpLSFK3 z_M4*P;K(?R`fP*svI^yiahn(0aL}#cQwB%`CH_y$tYqfYiItIwh#OJ+CO*~3i_C|Y zTYT)?h_E&QU1@O7Z6>XS1}q#Q7DcQFuDkhY7y3MD8g_tUeQ3aJd*AL^w-Amr&s_B+ z?L2Y`d5839or*HoVwjSdrc;Tn{2F<9>Du$kvn-QVd&v6OG zc)VqG9+5&Ba>rdj!nYr3nu}5VU^Vh{uVJ{r@Pm!c6FSOzLGQDnj@j|HA(HzUlo2+T z%Jp-I^mf{Mijc;;0L6C(7*2RTi^+{-EZF_1 zbx$OZ89*eEJi|2fPa@aOn@Ed2pX82p>_x3gK8%D=BT5;)YFb!cJD+CZq@8a^LhxQB z7<{{rYe?(;IMSZ4pzmWp5(bx1n~bA`iFV$)C?T{AsYB#@7qZMc2`;ZLBzQiGG}QDk zq+&a^Lzzb<$V$4)DNF5!L1YW4A3y~5X(X`QZ7o|F?qVvobse-9K)qvL203P*tIM~pKArfQ-ezoVeYf* zSx0CHS&m{LH;h%8<~=Nl!pY)UWIv5|tk+OY$6jQmmD#b+BhomA+O6kx{~+}jp*SykF1TCM%lc=wmm>_Uo;Tr zkRO=#1y2m6gnnD62%|u~E5%zJK0fk&Bp54!gX1N{M4J2$kTB^(0=*Au<%Y1`wIt6?x~fbF%tJ1VYUEZqVEeQH|2ipvxzeM-u5}Uc z6Ue>PO9*+ycMfj|=*OF1%iI~MvO|a^^ftO>9qc9K&WM)ta{2U^KcTljqiHyJ2X}d3? z2kmvFvG$|S=~>$>udatNpceIYT8ylMrAX8E#883=q!9^{OGpTJGBvGLno*(OB18~A zzTs0}ePQ2zTbs8SJjwJU7t@`ZvxKgSW8mZ>J^7$DDL2!_qC$Wy3(%s zV<`4D<)3W<#RS%%IM{ad8Tl@`4UD2R7NrbkdJ^$iQqG6F6AcL~a*x*pd-_h$o2 z0d|CV(H&0rp`a2WrVvfY%K89V5!X#{8Im#4I zeY1e`pp%gDaTlL?)r;{MMAT|j_1kYe!?hN zTQc`yopHx{owEs6YjVT;=)y$6rS?(-svwWuatjoZK&B8*LU98UAorqF&=(SE+3D?y z&_nVRYO#K&y>$!;)T6V=;z(l|iXZf$jGs@TAh~Zg=sVFm*F0Tafv+I}z8;Zv z5z>T@*t&cHVwOM#QAX1SV6B6bW7K}%Mx?nGk-`ze)WJoFC|gjRWGO*07_v3)cACFk zNVOT`f+ zMi0*+;7h6Q?L%wcq&*4f|F?bq%>)%w3;>uEz)ke}6& zi@bcS#-Vu1cruud0e^u=WG}Kl>+G22DDJa>;V~H>ykCI?@LNvBp|iqui87?_L2kH3 z$PHJETIJsen^1;c5hB~X7dA$W50OA70Vh9p9kL+4i*;7brlLA&SD!<|;m@$)J-~0E z%#zcy)Mb^$5~OYWHX8F7-ah0ix>wtf&^f|1cSyA?siP?Vv=9+OKeB)-87av9-+joH zR0S+>3gV7MpkKpMlF{eA3|VB}D%(YF;!5oIO61!7^0+QE4=|VQZu?n@Tu=k(b8fi7 zew$HdULQ8E9B7I`^4W(-AmfC&q3%WX1%BQ_Tau}mHnaYA6o`EYRXCqU8t@t5cYz_}n$1Y?o zjc6~E<6J{dv5s@-9#o)f_UWqY2yIo;qy-`sKYI5~r;NRV>M{V=T~8G#4f`%+ISnGk z=mh$n*X7v9Cm0l?=ioBB#@Xt!z2y-JWQ?%Z3mcK}xd%PyCjrtAxe>=&q-lN{rSUF7 zWb-XFgZUD(K*Jm-YEi7<91<+))LCD=w-4RF)ku(^1&ZCw+N zRzwV^8fj(U2L8zYp3A2C1?1LCw=MA)BJ^xpg~;RrB4yt!a;CJ=D)gDAhdp`tK>tZ( zCEiWQu$#{~U?k$F{S6_v-faZSC+oiQt=Uhb;>G>sx&25AyHQc)Iz$+6qI>f_6vyh! z@jTQb;#h&KzgL+>HBAv?LnM$Jz{v%*0cp!$M*;G5H`D}araucjNti3qgsRS;1r8^n zC~z*L2xZmmWVnC7m=IWAgWNL>h)mWXK{=DeT1lADmrIbBA=lbP;HS_tGHwDv2kv*F zjk+(AgW=YWpO10`wf8uLGO+U9yQ&N+O}>@L8uYzBhg@slLWu=knf;a_!l^-)%3+{6 zDS#2V5mS#yASu|xz7Po+-wvZ4IFaNI3Kk))^0UYv+<@AwzltiDkIklh{z+mU(mH*u zg-0>Ehli11sYQLE<^ojOi;R%!NMWUP>~r`~Q8*^b$gqdO5?B0>*4ita@Z+`EJ}>u=4_vTq2xri#>086imAhxH^%#vUBL5r&XJ z%o1h^B9ud@R;CCAU9TYFyc;;<;K~fuz_i=>R}yMHJp7cxgwQS4zn&$WF~QP^O9_J2 zx0P>18u~uq)dH|MN)QPQ+V6W1iCjX~;|HkKeoJOQ-;!ntp?B4xMAk~g9LFt9ASPhv z5!y_wM+7mffnF*gSc}GwdtJceNSMEl&9#_jblX~1FfyQ>Ac0!iCsg4?Jcl;V=YpM?nIW+^T?_` zg5pyn)@kH%A5sV|q3=#>cDsnVk6V&JoXqVXL-C6vz;B@ddG8?w?pV-hL71noc}Xz3}d`f)@U#ptu? zLRQBRDv))ledZG^j!VdjYC_W#^ZpD(%sg%>0x^H>Q%D=0hqU2$5(O~R+m)dQ>tW<} zxC1?KucC+M1+1PUpAU|ct6?t^es2LELc-uI(k3?&Y75-(U55$CL8N*679;1$G=dx> z8W3^qCXm#56mb2Rt@8|W>1;ye;aw#~4pt;LXn`?B@G&6&H-wmwPfy**@E%^ROwz(M zNuIGP2H6OTFI7cDudbirhG*h?t8I=_~{8bQ&W&o$f#%EH1MD zJ%M&0D`7EmU-_VRv8}s>SqulIi4=+Ts1v{?bk8rMEU-7TpSL{=3()7c1Sw#rli{Cn z#4U_DO(6CFlp!JearFOxN>ta8-mZk;^DagH-91Q%yn|u`e?CFC&XmCgw^I?SivI%A z?w>>2;VR&Vw)uk$FX&7A`h9TQ|2=@Rr0zumehBzmPNR3Pjn&FZpv_6Rv?EKV1(8Js za;3dNXldV%T!{ThnC(Tb!Um_Tr0Mdf&qvCH&%i1|0`dgX#upIc9X`l@8d*)V4yJ6B z12s-$UWUk|$Lah14U_?wt|Bi+pVLaDOud2Jdjr$>xkSuD%vl04Vfz&7WmJdeWFIDD zJ=nwYA>e;P7RIxvd+s^lPf)dO;|!&kjtX1ngGd;^h=lI~q%~eagja_A*J}t_L^pyL zqCH6HHzEOh7mA5IiU?^uwFyngo%R@E_Q!4L{v9IJRa78N_>`?-0dk>z0R?ZTOXFWa zXz1tAJ!`Qwf0WQs)>|f*(Nw(I+Gcz)`ffBMvhF0bWH|y1A;dN>MP$hw}KwBq}KUq^zo3u(+RBf>eG6o%jWF%wxQb##kJ<4YB9K8uAZ|Fe)O;3rwF;U9z!H^2q_e^X)Z~`3}em^h`FnZQD@hF^z#;)aMqpK_7Qu(6bZ?P z(VUJ`Xxkqpd3ITt1+~l8_JI99j2`YaC^feS`LKJCV0;^C^_#hQ+NlpFw;@e*4{}wk zL-7h{=ilq#@>ny6tRCbbB8Cs_c$?Av^exSI+Ohj>yIO+gKMz@s=bY*frZQt`E3&LS zifv7oZH@C8K0nJe$(ek|`XUM+ zbAmvegy~B3u(W{$8L5hJa{;YEg6n5dQ2ZPcmd_#g)mby=^GzPBQB2^Y_A?*3TF#&c zd?&h|^GNtrqs*bN0#Bn_5buUb$2+npLHFzciV@ATbJf`EKHvYCZ8KIhqXAnNP@uem zFblm01!T7)k~@PmUr!*Q&vFNF%!2uoZnS9GNB5`B>)ltU(S3XrY2kx}#DWIkRH3`C z%;H#r$m0R@{WyxOs& zwHgVxmyu9>nZhvDmjKJ1wEbPkAKyt3cHT1b!mb2q^(9D)d>>gYO9>MIFWP=95y=h` zM$9IJ#(@>K|I>(wz4hV;BCQ(l#U!e}UV#KuJMyV_6NYkmLDy(&Ud(W}+cDIO=}zFE zGw~*awZN|;A)B5>UW{Cpptui&tSsNJ>jerl>&Qr8H>$5$O~@Xr2cAKR2qW&9xq5dZ z!g&q#Z_9fF$B5a9Sw|pN(DY0CllM_)*R~|%^LG+bKZhQaJxGwXqEWKHhbE`xoj9Dd z^v@z8{cZFxS2Dbx6i6d)K-%JNzLMz+N!SC!{iOKo(6g(mF3A zLOP8sgT)Njx{HBD4yuyz#{%9*F^AQt%6SkG)LL8H1-qWbh{SpbZmmj0s{Lq8>@Ndv zC!qttO<``U&mj_8z;G+3gir@`4}nKyRcAN@I=f)%o+l55}NlTVR;n^yf>4PZKW$wHSy1(Lc5K~RrL+j?C^(( zJo2hpItk9DD2V@4NOSyi6m%|UI3X4xKm9dCgiDbCK8NBR%aK*pWv?AcGFqhz1%_X= zW0aySol{6lUx}=<5{47V5E4+;jFUhTXm*M(E%#*YL{{qvbHlxL>(WjKGLe>^cUa0~4XW#S8a?0#kwAS6Y2)c^nIa@81_?C`XOIwXLc#yD+@Nwi zfjXx|foeor)yVodj0Dny1Ob&!^~KIN(eKK;w!bIM4Tt~_BMW35A}Alc9zs^eUV@dh z%ht99DH2ERrw(}8g2=lGf#m&!rif{a%9GRpy4Mx9Pno^eja-^yh(OZYl_LSt zfds`Sq@~s)i{t+VUN5j*c0VTdKD733LOfs%p_{FDUnPhf66C_`BLua7gxZkY6bZCC zjpcm=-P<;#;jctiTMff4A1`qGnV`GRr&JqhZY0!ubRh!Sg9N$1-ah1h`hA&DW<4S$KV)hj@Xe$EdOX?v@^vv!pcu}d zqwjkM%9i?W4vVA6zU!N8pB`id=G|QzG51iA1cEfw9|t~z(n0?T(l)cLM3$lXuk#Ndts71)yn@pQ`(0A<)Wcgs{Jdffs-o?~o=kcviP9a5Lr|s`M3Kya8!Z9Rd z7u)Y^NT6P@V{T#OXJ1SxYP=m;K&>d2_PZ3$Af(mEI^Bb=uLoI-R}g9TlKHHhl&)n? z1lWd>0Io1{JV8Xnl%Svp#K{-_3rKr92mD*4dDo>M0JA(QQFhh;WZQT_@jP;yoJtBi zEwK*?$hAoD)f47)c#RxFH{A<}Y`joBHG!ZucC43>uxUj1d=FuEw}-cWn#uG^nji)c zp+AU7vekb4_4=%^ql9UImkIYIomAjsCF#JwPdh$>NMJXzK(a*Kfyj6*64I@xUCsBC z$&=)y;baXyjMl#zU0*A*nCgJ890?=ykVUo@-M@OIkPKwEjfj{^6a;}dJtR*c7sou5 zN%VfAES~gs)u`cL8+y1tgM`apVA`0uAM);k7z6rIo0VMzVOfC!;@&6kQ)%mv6|fb> zgf0M=?0yVo1LikEqKu}qh-ACbF&-vp_}NUQeMsO=3PwQZ9DEIt(q*IsEJN4pn;ITO zmRXCfZ#A;W(lb1Kti&e(EVcKBkZZ3O-KUidPnIYlSQ`(b@5u+iEBSU?S<3ScQfdVfw>F`)pV=zAyn^CjD%?^5}ZRwaC9=xJbpX(2|MOm zL}J|X_bFDZZtZxex81K&m#SSJ!B-gV@Q z+6IwZVi^+lE$AMswCx5Q%o~r!X)eHKLc8wyNcg75ybhum#hZx){*)n6ixQu4)??@I zM;1u0t?N6s2JePSClz?IxQG;rrM7=7vaYt;F}zaYuc^iMsYA&XV{TPHm2hENV=*Fu zUq#A+v$Y*Z%@5DyaNU-ndvYiG-h9utE2ubM#I1q65{Nwze}t@vY~4Z$$`<-3DC=e! zdbqw~+x{E$&=hQ|i811bgqo0t3FBuILcsT^z3*cOhmc@dPjG2HH;!}GT8)P9x#fF9 zd6Xc*{@<7a_f;+=nn&?E`doh+3Ht`*`!7O5bQvQ9;YGH#OX#y-j$$AUNCWR76xjKI zdofZnUP8pwYHP?>V7UspVmBkg^cK`Rzz@*poh$I|vl1Q3ygQ912uAXV$O~qaK%6qG zRs;WU66O$$sF-!KNPY?xfo(@x*9qVc5aF~Y1=?a_yfF7kwE{M5^ z5=5@W_P=){H6zPwEpl0WfUaUNT@#U;D@6LM~fr*AyF?OZFUo~Y|`y4hLE6J z11xkRlZQ~y`6AMKk7w%-CJM8JUQL~|8VUU>Tc7XY<%8s9NPwQR_ZOmj(T#|-)(-tH z(&W#eW{i3J-q^k0jy|is=>Kad-TWowl0BY`Ov~MZuCWWbcRLFqQ!FCFW7-JBw5=6L z$lZ@zEia-%t+Zcvo^7`WxiWqN`NUh05cn?er%9f><<0>52yNtjP`Q&(K)4QB9j8po zUX6ar(Ea;7u-`#T4!`+o^cj2(eIEZE5z>|%>lj2r`Ce4HUW;N+zlOB__kg#Z+-T>g zB0gd+m@X%B>O$X%N~ERlClrj9*z3)R7`7l6&>2E|lV!*i)QSSjdCNv}`W`Ps#PJ}i zJLv-vtEQOucauKQ-dZVj@H{MA|h{?P77o^D(>5heChuoo9$x4Z6|7u z`143O`1*)%0l%G0lMjBPa6!0xi(H6DZcP#I`wN_w5NJT)L3f z{s6MzUPdl4KMUJ8mHY@IgMG-Nc@k;2={6sZ5HENiX~;W~5Ntz&{UPK|I}beT;4hJt z(l^=rU~b9lmV(ndUq|G*+OD$}3Ey*epWd+hbT|p^*&8Oaj>*F59+smFu}>f(NC)wM zm}I6ry5K%sg3_3Gplf?Rl1D_LVmb-rqe$ERJTkp=rPyWOlDGn%tYB#r%+bSrwJWvPZBiI)5rx_hBBBIA))K+wO>NVIE4i6 zPXnJsLFRdg5Dw{d*Q>CzNeI57;IoL(?;NWbqQxyw}+Kl?1`==R^F2(`$Dk3uB4BUxEbuQKa3jB6QyMLO8MG zb)wJbqFwJ=h6^$u%=U$kD}cM5Tu2Sc6cLG8G=7FjE)-PlkJCMWkYKe{qx<~|vivS* zlM*au^I`N|tHTDJ&DPZPh+7NONg(ec0ry1|`)EP}VIEN7q**3t_{|9v8+msc*E1%# zP~E!=3Gz-9#P31D{Z$05eFzEtQj|UPV@~O`-<(MHk_pVymLav(&UuzFqx?J)ruPwA zprhm{03N?PL|BFr`4iIj!&PVUG0 zTOqyFg{F;rkjrW(3X-2i-}?i|z1EW1&&N2Hpzl)yaAGQAN)Zt=g=r%YOVK=rG^vLj zylAg=5j6M(=pp;FB+vC6PTJyXL>f<_RLh;fH&MsiwS=_eUP42^H|+O4D2{VCQ0mkv zb)p$F&!D2hB0Jv};+P%^^bMVfDR|1v}#?<32?A)*?M z2ogkc`_Xm(7*Yn_2I|e7GnmJEi)n?WUT;U4gAb$o*M!=!e1N|5ZJB+l5ov5d-?v^A z=V+d}l}AKO4W>Jd_dM`Bz^|iN!Fy(!x5M=|Cgn&Fotx&hjDQQ3y%i{C^Vd=Oa0wE? z14z5Rf|3EeAcl};`zDGdtU((7a#ZBl=+vNZ+`=l{JXyFC%x=^Jz=Mg`&{;w$FX&vwS{DjVFkRm~EI=0!g6HNdR9(=K6VtYvtvr zvi36Yucvb@qhKLQ8~z)}*YCUFdhNX!t-Tpp0qGizA|!C%KrXNKh){-*+p7Yoa_XKs zk$BFl$bbGI`kao~v2R21khF#2;Rz#4S|uo+(S$VRdG>lSfk29oVF&PEP#wV#YDMBvfdY?-Q-%5YnedxC3A<0BZoa!wW>uN3&ljyFgdtCZ zC?K8I)`#r<4^Tg^jf9xM5AFY7K?M4>@vhObvat1)qU@=8NP8uq2_;(}1xQwPF4!JOgehu)8&ITgQ)_NWb zcF#|2gtC)Du>dv1d=?|FTWzABvd!n05lO2kL;6mbSsch9rz$ ztwdJ9T4V+MTc+{Bvi$t*sG#nXC{R8R`SE==lSdH}h>gfn+DzzERgA2R6S_p!5@HI2 zC|&a-h#a410t904MRMM)vq3;o6R36&vk3P9RNxmzw}tC*TrU1^AgkBN;sdW z0pFKyKpy@8S5XlEw~?0J!SGh^KBiKS+*f`eR0E-%h)Aqda^4tmhL*Q=#i`>Xm z3J$BG&gS!}@d_A7p$A|sEj$g+J9k?+Yw?y5*05pxnVMj(b4P)FS~o=0^9 zrN}baF@@)Q3ROw}J>;TUk2LD}41cJz!Z=MJMJVMm4NoBZ2m#~-xx8LNX|}6T+@j6S zUp0yAxI!rC%esEDgpr1CW&SGT95!SE@=f5Mq1hRAh$uXPoVVBRvi~~~dGsPu^@LeW zXusZr$ao#n)MxtpcSAV2R6h;;3zS^pTf)4Kl!aXTBq%=icMx&B3H*P+(Yczw91-C$ z)AK;iF*1ny0U8YT2_%TW;H0(fo5%@F(|-`9uz zfA<0<6I`809~p0}URA!0de=cq#Q51(f2-CgI=Jz0eA(O1c1YxOjok~tnl zu@2v%_@do+P;$Wtx2jV{+}-FJUq;`T*k(QAmcfh?NG{=-K&O+|_H~p~{~?qTxo^D3 zchb6}AB?G?y<(TD_S zHS&L-L4wn_9AAknl8O5KkJ|qCW!7w$J_3ndXFU?$RSs&>ZkVwmQM~7Mr5xLJ+>qcaH z1Wnm{aU$oQN?5#NC5p#AX}|liStrQl_fC%e%8=5q+kPKIGivfSQ#~RgpU4Y=xS;Jn zjIvqwn-%gBDrRd#f#<`ck%$X3BJMybv%ifphc+Q$Fo>@26{JD$MZw)3hR@g@vez!4 z0&p*M;eRyU@hT z?dY{;6r=ej@X8HbPz%xL*^H{lr)^qeM8s@BUI--R)Bg#S<+C3NoZmp2?9Tze2>d4p zZ;T>%YJtCxT9&_yG|G)gK=@u&rAUA#sQw{+9B)ZnLGwz!j7a2WZiwTypZ9nOfc%IHGbw;FjBxs{e6t@#&FtCh*RTe{G&uVbi& z=OiMM+wA=lsNUpF3ZU-86pnk43vw^Icjf4F`W{M`?!3u5o6t265HqVHBJ$_LN?_2* zHB{^1fdt-gN;fS*KIV@g0r)PtALwQ78o0MSd=}Ajs51U| zGR7f9mYpzi=BjaeeyPL|hyM)&U#M5e!mh~s=AXz5Nw{{+GHRE6%%^T<-HOUBSX zJqgxoL_`F6BoM^ety9u%HGv)@-ZxNo*8)ZofuH^H zhOK8KA`tBNPoi3naR=0MtY^?4$0@Pkuh4zXYj;_v&v*-RIo)BuE72OCMI`@zGP2FO zpHCBKazsSj1bHQp1kO1HSf6lu$Pbu+JdA28mZE8e=MY(>_icBYg7+TkNp&}&@n5n1 zewNUpy^KI0L&&AG)qMF~+Kt73L1P2jORymzx0F8g0j7#Sl z@Moyc)NSM@kQU%ovsyOVn!Ri9FK zwA#0ftd*!(vz6Ra10y0LQYeH%t{0I8|3yOO?+YlPouHK5yK_}?CvuZKgBk=bW_V}W zUOVm%WC^4RVgL!5N9||4M7j)FH3LWp_uKhv36{}%!hDTN6mb6-a;5BWs*+D8Qi+{t z#>NXMcGN+r4Y9WwLInJQ-M2M_YWW1+=NdPOtgu3ZRfEq*R>#i*{|L3{??)-e-va() zqK1xFQu~!UeQtYgzgplza+7NPh=_>%yRbm6jvpdFw0BdT1ipa+oA^K2Fes+)V!f#Zbl*57j ztpdeN{w1=$ik*VD6Y;Ho&Cc;LLad<#1?>+JW^(wUP~LU-HX@bnPVt{v)7bC^Sc?W) zJ&wq-9Ff5D$l7WkbDfp~{v-;n*P|Y7SF+nhL`399-YpPw9W?`oohBD9Fu(dC)Iwtr z`TC!Bp)fFw79_-57(T<}W#qcqOqlbL#vsyIZv#GQLGiI-4RTKn0N+Q}$n8i-y@Rr^ z7TY=J+usZArxsZNe)!b?2K>5HKz_W~k212Z5-gBXluo=K1@gC}>-YC&0J;9Qp^U62 zCUD+@f|K>|GsxmthsgMwz`qB+nkbmx=aU%BT6r4yRdkQOgFeT)^mY*u5jRHO2?Rn$ z4{}A_Wx~7}wYxZs0>zur!=Bx@50TB+2!gZ`k;)?|>&Au6N1z&6DeFgDpSd~ic5sK& zI#!~4=SRe@LZos9Su^*c7>iH$>_ZlguTSYfE~X`jYH+p`uC#T&pgNv3h6Q!Hw={cN`VUr3bT5D^hEK@@}q0;ujuqfE0)L^uagO73A4#2!R~zm}0jUkStOMFvqi@gqK{ZT|6l&^-6wL2- zTYnL?v|nx4a~@^ERZJwHKU45Y2lnqe^m*+=MA!`cR-&IHseOtq<$4Q>MLmd8jQ{UQ z9uX0fM|Ym)cq{A*;#9R^~B2)S;2U zA4Nj74v|_JvOava)m0Q5`MZcRa*ehRt2 zcA#2?)|=g=%mocO#WnsCWvUG#GFoJ7@h#oYp`?K-J4dr!(=OCL<}{gln|#CEh?QuK z2Q5|iDynwBlq-A9YKrbf*Y+w>K5}jw5fL$Q6dZwAY~WqwditD$zsUIK%Lr}Pv-|b~ zElyhIhmhv#Yad3lKD?msL#d!UkpSLc=Ndw!vY!w@KZ{au??M6oPDB7j$YLq7WB9AO3U@vu?v01y-4Ub z+i@$ArPF|{lmCH&&v{K>b-L~~hy=cX+=OS)HN1v)+IRq}A5C z(atsK;4jCSUV7P%w+0bXDRN77BX?Vct>F;xTp}(Mpn|d6KHTE=*-IE^ZU|5xP0-e*?G5E3*$f%P?FWGCG#s1?f3 zAVFTt=rhngsrUlKdQ|YY90{>bL<|jf-g=ZtwFqg*{{;!-PPDcwcFdEgmCC1pe~rkf z7{x$ZknmfKVkw<=uG0w&IK^4+u-B^`TpI_89Jh7cgJL+?Jt{_ZF~?A5+lv%Hr(DAW zz-I}>QHDO-llFdFj_a>MO(9n!>*P04MSl#EM?}mJ3YI`jke)=DRp|^YF9;W*T7WO2 zpm%y-=OoB~2ly9AAm40$gP#r3gxpu9=>D8RWVRRy^_P&Qdk_WA&n3g-F<#IOqIk?R zh>-p{5{&Ig>-QDuhi&^MNDzV=0j@??&SvBm8`Jy=OFAe*f%0->d3=Dhc%Nx?zCbLG zJ5kfg-2|868Pw7DEo7CEB@d@WfX|?ev)6$7B-2$lA|hh?QLqF8kU+1K_M8ru4k0bO z1-WQG?$jvHkDv7zK(BukY2u~m{j?h^`}Y+TC)kOEa1pW)E+CiIOQ<@2zr|i|+Hfy{ zMkj&zDWo-bAQD-J?!Uj5c}ei?Z$^Z51^M(3G5!HYml4r5A)$N)IF(Fs2i}RmDRBQF zYGipYatoeD2?Xx~Z>8=L@t0+7g!lm5To$u;yQ^7G%0 zipqYIl#n&8^|F!6M&us3nGkM7#IT7_2zU_*`2mzV`av@O$%1vL&~F>E2)YqLl%Qbz zc|rkW4a)SIZ}0m!$(UiTzT>E+M-$S}+sGl5XA%x7?B3l8{2IE>3&=ux6;+2{N^PIO zFbm{*5=El>77-DVM-)7PAM!X=>7isKd{E!v?q)+0f?2c<9%5=N+wCkv|(aW@)!TY;>wa#ZyA z_fZP*up|g;L2{#vxrm5}0>G>y5C`iW{ECBO3tYd11k;PaUw5$6<+iF`XLN1clgTCI zPp?2Rh*IPRYH+d+9vkJ@<5)f|C0#*2^0tU71R!7 z7_5^0RW#yuh|p@p-o73zZ$~kQWwy;h!noW|GQkOc=9=1!$f6x-;8mzPyeYMY5?jBw zKw1fnA!h{~tZ?dVTxu`*zEwW}S`%nVL>w1W#4+C~mG^U~An+{asv2hvOhiP)3}Uv$ zKw41|-zR~uByxQ;TUO8Q_H){4%3ih}-~#a9Q7Z0IM!HD+Xxhz$7A6Of@V*DdEXtTz zky;s=8d!>~kaDDvA4IOJAtYp%Ab~p{5zC2O*+_*5C*s|N?$t(=P1UcrIE7q@xh4Xx zLn+I<(RII;s4aU$L_~pORuM=7=bc)<{~?<9eFX_9KfGxLa)G>zY6Q~zUURZQ-a+Z3 z3kU+O2&*|j>AF6E?fXS7MjG@62PenVTgl04*@RkiyhkV^oQICp>a>VNg?40q3r1$Wpv*P>L~ zPNbP%CS;8{WRdJapGhAQrq7P!Jo!Ks@Yhgf`>!JFtsPk$H*&k(j#|q9Ecz^e3l$DV zL`2LY%qk1S&35^Tw1=J2dtXU62(;?<`zuT!#)<)nHNZY@Jte186({KCwPP*wawNIm(j+_KyH4l&3&r?Wf`j+yo zh(L-FfehL?dQkS)?RMNRQ($vBK*4S_b7;E0Hb$z#?Mh+!}4 zsnp`&u#w7hNYK@wdV)HXxs}C_fbXIyglCXoUxtKU9cp0s1d3HOqBia2sHUVH*yw^4 zQD)ozO_Xglh)8Hy8y1wp+Hb+L4?w7FZLOEEVI|o zqX2tP0g*=$a>JFQ`>_$FKc7HZa4(^OUN>r={tmjn@%9djh=`a4nAHT5K)aLssSmir zi4-oP{z^ZAvVwlzUVACC@1Rqv?C&9MdI{>+)Pb7&^&(e`uW#_frQSfk{GUegkbdO$ zswQ{uDo-LU4A<0)kjv{VvP=qQj)f%vJcX>T|BQ%qF-lZ;5fO4*W}j7PuoH<=tw9q6 zV7bzylf@x+rNerRjd~2bQhf~168ChD7A&a945wHU; zCz%=TPWR#w6vrt-aS~c_*XimqMQ1E*m>el;Mj(xil zIA#^?%LoMW0dm=#LP7KU5lOs^0_)olDfA%s%*~oD`T+9vz<2^fiH1?-b^NM>A_r@n zERIj1>nlO-xNiY<2{b0^NL-iNuhyyX@j(=eDg}O&C_649BH|XntS1md53(fgKp8@y z)Z9O|-wyykeQ3$VN?is7x23Y{EtM08xaw4i(oFeu>6pz@1Y)9 zcc7HgUZ;+)Pa>^)FYpBiUm7Jv*3UCPk76HJk(+9hz2Ach5I=@eg-5eG(t-10L?9g~ z2GcyL3!PeQLUEjrq4-Y~O3A&7EVBO$^rhWURt)$F6zez*{4VM-RxrdE5fL$Sm=gp7 z*Gc57sP^OY3H*5iZBA9|tB|JsONeM1a~$H^D7&gC(ZHvxs9nkyl!AJg5NMxINYV9# zk@okOAPeRc8q@j>+kaT#IUjIZ|4$>UY$u9yv;lvL?!o0G%pvjn%tvvK$59;UD~M?8 zlF1L+iin7qrI<4WVt5U?mmY8uhMmAO$ojY)StozZ8M&l)OyGoruK_jY>S{y+b_cRd z-aziLrHnq+uoP+O_2~RBp+d$(Q+t`?hf|E`e&l+36j>mRC}ZsvRNrzUO$c@$9zxam z^N@w|0dTNT(v2e`BJzhhMIZ@uSq9cJ6cqke0(Dk({x~9%UqS_kZ)LND5-T`#FyD@~ z6&3ltjfCvaAp*&|!2Eq|LutjYB9i)GI<9j(aI%`#qe}WuBO<+wYBXL4zL(@^WN39D zzD<4w@J12~0V5(J<~kgF#KE^LQz&t=D*mpMo69-)r!F*N%(Z`!gJlk0axmoJYYtXA zIN&srw!^6g#s4%r%^~?iC)ZHkVb`$8!LK^_k%Nm4&Nz6_!S6V^`bH#CIE|AnwqW^( z9qe$jJ|ZF_BIZ6EY<2MO9Q?Qm(Zs3H@P|esC!FE{qwQbj;Ij@6IgPaaO{bBzbxy(g zcBlCwT}CE9jp|6|p;$*1%G7!e zjnsXQ0%%KaX}MoOF`G72DZdQ4-@cCARu_`gKrE;0*@)sHT_|~=4V0KLlk1O&h?rfN zlLTTZyoV8y+~J@(feTJP{2!tC$nB^QE*F6eBI5AFs!jm^E2;_UMZx+X*s)&8cP5h4 zy6#4qQct3+ulLZp{%^G2_p)u)o$5aBLWKEsz!|xWdE#P3M8vI&IY}UfepE>KMI<;c zpu)i4McGyJkj6gRzTX3W3=y4O`8(8W8<)X2)YzBCI6`V<>7_8XA&oVLb1YfXAaaR@ zA!G?-jw~a)!bvJIwsCATnr!2gEfva^CDWnlRSr|Ok@21PJN*go{qud@*YkXy`@XNw z=XtK{xhI6K#YQ8H+<20#u8i^Y7bEaQ5iO`iwnMv&ku^`TLUrlh`%ojN>mSGfIs|p$h%usx+XRh^9DDuknw;xLNa`cm@HQV zHz}?+1~JW2*1?MkYujzB#0&Qk6R&t`Hdf0r2ywzVmzAB7bzDxZ6#EvwvzUBM39h>T zfc7L+-2eGJGVq~R`=L^ONpSO;aNRPXwO0eZ3^;t6LiXl17koWS@K?S*?hsHd|FUZU820(A%<2 zQ1UPd6l@nC;k{83J)bWgEDFa=Js8r;OY=crn1H)hN7U1XU!uabyN*XwYjzvew}59y zw4{?Q9bd14OUWveSdayUapo6eH1T(@(;AJ{QZz&uRJ3mMs9Z}LU?DHgm)6KhB@M4A zUmLZVu3@iSRlyweqO%Q$fbhrcOb~7v8M%xQb?Obyt25?T zxEaPBzuVS_Axzw($%V_(txrDmsc=!8CU<7SoWBp2a3|BPlVuOSs=^rWd$X_h?n`{r zomN+x@$pzel_2#@1(?neF34mR4rrY`~6|1skxa zaGNijN$Yc-#9Q{b)=;UEAw>$+fLCZ+tlCdJ-ieT9JSYHedD3%ND=Cuv4v|}rj?DHX z@pIfM3tmt?Y)kPw5>OUv42^ryekyCuB-ib!8s0VB6Oqb0q-5((!wFDxTt~t z(_PwY3L)jc^9RC^o>im`)OPjUN4wS5kaEvWz4mMQQwgBnz+mLzy3}2%%2TsdF%o#! zoibZ?$`1%7&@0lMd-2U}w@J zrKz<%OkniT2g8890@6iQ9v7A8xy%~6pT+E=_O7#xB-26{uV#x5o0xHpC|;XZ8#Ypn zg1u(h{PD{Hw31EPXqkDb%JZk}!4DL-^m|4KIl9(BDAsdDYWAy6B|EDrszKS+aG%;z zHd^KTDui)e$=^$GKpBVIg&JH6W_-KV2`!WTo%gTe5%C-o|GX_<^TBEuP)xc7gTJ1e z4c{m=n@SGettW7-%(+&%sP)+BK=5S&^GagjyMZ*>pab$Rwv;t0JAiBk2l$9b*iuSrm-1|{mTaIS<~=OnBN*pJQ@@_;v7>8t z^=)I1?jBn@r|^#-F)1zIjHaZ09@2n@B>yVVwIkQVRsmy1XYW3 zlIsmTaZ!+EFLZDFh{u~6d3C?L?8jnJbXC44!GK5Z&c|nf3*B1(Oe%i*R(>f};OVSB zXl>}QQahJ#*p82hng~)~vTe;*6aK32VgXb9%J!cu0m$vT55aItKQ$D&0LV_@fa+X=5~y2iGt@uCFpiu^p+m}5EV zt^LB>d~h&iyHxGxS` zVyEr2;Wo6xJ||D6eU=-zAixE$4LY2NYJjH-Gg|eloxWEyamU9ChH3;!gON9Da^pZ@ z0JeEBOOS@08jnQ{#k@g&es;G0bmiY5J^jDU;4_C0(VJ+WF2wl}g+Oz(#bTOm0@D8n D?3*K8 literal 0 HcmV?d00001 diff --git a/docs/source/tutorials/index.rst b/docs/source/tutorials/index.rst index 97b2380f1..f9b859581 100644 --- a/docs/source/tutorials/index.rst +++ b/docs/source/tutorials/index.rst @@ -1,24 +1,167 @@ :orphan: - - -.. _sphx_glr_tutorials: - Tutorials ========= +.. raw:: html + +
+ + .. raw:: html
.. only:: html - .. figure:: /tutorials/images/thumb/sphx_glr_pruning_speedup_thumb.png - :alt: Speedup Model with Mask + .. image:: /tutorials/images/thumb/sphx_glr_pruning_speedup_thumb.png + :alt: Speedup Model with Mask + + :ref:`sphx_glr_tutorials_pruning_speedup.py` + +.. raw:: html + +
Speedup Model with Mask
+
+ + +.. raw:: html + +
+ +.. only:: html + + .. image:: /tutorials/images/thumb/sphx_glr_quantization_speedup_thumb.png + :alt: SpeedUp Model with Calibration Config + + :ref:`sphx_glr_tutorials_quantization_speedup.py` + +.. raw:: html + +
SpeedUp Model with Calibration Config
+
+ + +.. raw:: html + +
+ +.. only:: html + + .. image:: /tutorials/images/thumb/sphx_glr_quantization_quick_start_mnist_thumb.png + :alt: Quantization Quickstart + + :ref:`sphx_glr_tutorials_quantization_quick_start_mnist.py` + +.. raw:: html + +
Quantization Quickstart
+
+ + +.. raw:: html + +
+ +.. only:: html + + .. image:: /tutorials/images/thumb/sphx_glr_pruning_quick_start_mnist_thumb.png + :alt: Pruning Quickstart + + :ref:`sphx_glr_tutorials_pruning_quick_start_mnist.py` + +.. raw:: html + +
Pruning Quickstart
+
+ + +.. raw:: html + +
+ +.. only:: html + + .. image:: /tutorials/images/thumb/sphx_glr_quantization_customize_thumb.png + :alt: Customize a new quantization algorithm + + :ref:`sphx_glr_tutorials_quantization_customize.py` + +.. raw:: html + +
Customize a new quantization algorithm
+
+ + +.. raw:: html + +
+ +.. only:: html + + .. image:: /tutorials/images/thumb/sphx_glr_nasbench_as_dataset_thumb.png + :alt: Use NAS Benchmarks as Datasets + + :ref:`sphx_glr_tutorials_nasbench_as_dataset.py` + +.. raw:: html + +
Use NAS Benchmarks as Datasets
+
+ + +.. raw:: html + +
+ +.. only:: html + + .. image:: /tutorials/images/thumb/sphx_glr_pruning_customize_thumb.png + :alt: Customize Basic Pruner + + :ref:`sphx_glr_tutorials_pruning_customize.py` + +.. raw:: html + +
Customize Basic Pruner
+
+ + +.. raw:: html + +
+ +.. only:: html + + .. image:: /tutorials/images/thumb/sphx_glr_hello_nas_thumb.png + :alt: Hello, NAS! + + :ref:`sphx_glr_tutorials_hello_nas.py` + +.. raw:: html + +
Hello, NAS!
+
+ + +.. raw:: html + +
+ +.. only:: html + + .. image:: /tutorials/images/thumb/sphx_glr_pruning_bert_glue_thumb.png + :alt: Pruning Transformer with NNI + + :ref:`sphx_glr_tutorials_pruning_bert_glue.py` + +.. raw:: html + +
Pruning Transformer with NNI
+
- :ref:`sphx_glr_tutorials_pruning_speedup.py` .. raw:: html @@ -29,162 +172,21 @@ Tutorials :hidden: /tutorials/pruning_speedup - -.. raw:: html - -
- -.. only:: html - - .. figure:: /tutorials/images/thumb/sphx_glr_quantization_speedup_thumb.png - :alt: SpeedUp Model with Calibration Config - - :ref:`sphx_glr_tutorials_quantization_speedup.py` - -.. raw:: html - -
- - -.. toctree:: - :hidden: - /tutorials/quantization_speedup - -.. raw:: html - -
- -.. only:: html - - .. figure:: /tutorials/images/thumb/sphx_glr_quantization_quick_start_mnist_thumb.png - :alt: Quantization Quickstart - - :ref:`sphx_glr_tutorials_quantization_quick_start_mnist.py` - -.. raw:: html - -
- - -.. toctree:: - :hidden: - /tutorials/quantization_quick_start_mnist - -.. raw:: html - -
- -.. only:: html - - .. figure:: /tutorials/images/thumb/sphx_glr_pruning_quick_start_mnist_thumb.png - :alt: Pruning Quickstart - - :ref:`sphx_glr_tutorials_pruning_quick_start_mnist.py` - -.. raw:: html - -
- - -.. toctree:: - :hidden: - /tutorials/pruning_quick_start_mnist - -.. raw:: html - -
- -.. only:: html - - .. figure:: /tutorials/images/thumb/sphx_glr_quantization_customize_thumb.png - :alt: Customize a new quantization algorithm - - :ref:`sphx_glr_tutorials_quantization_customize.py` - -.. raw:: html - -
- - -.. toctree:: - :hidden: - /tutorials/quantization_customize - -.. raw:: html - -
- -.. only:: html - - .. figure:: /tutorials/images/thumb/sphx_glr_nasbench_as_dataset_thumb.png - :alt: Use NAS Benchmarks as Datasets - - :ref:`sphx_glr_tutorials_nasbench_as_dataset.py` - -.. raw:: html - -
- - -.. toctree:: - :hidden: - /tutorials/nasbench_as_dataset - -.. raw:: html - -
- -.. only:: html - - .. figure:: /tutorials/images/thumb/sphx_glr_pruning_customize_thumb.png - :alt: Customize Basic Pruner - - :ref:`sphx_glr_tutorials_pruning_customize.py` - -.. raw:: html - -
- - -.. toctree:: - :hidden: - /tutorials/pruning_customize - -.. raw:: html - -
- -.. only:: html - - .. figure:: /tutorials/images/thumb/sphx_glr_hello_nas_thumb.png - :alt: Hello, NAS! - - :ref:`sphx_glr_tutorials_hello_nas.py` - -.. raw:: html - -
- - -.. toctree:: - :hidden: - /tutorials/hello_nas + /tutorials/pruning_bert_glue + + + + .. raw:: html -
- - - -.. _sphx_glr_tutorials_hpo_quickstart_pytorch: - - +
.. raw:: html @@ -193,50 +195,44 @@ Tutorials .. only:: html - .. figure:: /tutorials/hpo_quickstart_pytorch/images/thumb/sphx_glr_main_thumb.png - :alt: HPO Quickstart with PyTorch + .. image:: /tutorials/hpo_quickstart_pytorch/images/thumb/sphx_glr_main_thumb.png + :alt: HPO Quickstart with PyTorch - :ref:`sphx_glr_tutorials_hpo_quickstart_pytorch_main.py` + :ref:`sphx_glr_tutorials_hpo_quickstart_pytorch_main.py` .. raw:: html +
HPO Quickstart with PyTorch
-.. toctree:: - :hidden: - - /tutorials/hpo_quickstart_pytorch/main - .. raw:: html
.. only:: html - .. figure:: /tutorials/hpo_quickstart_pytorch/images/thumb/sphx_glr_model_thumb.png - :alt: Port PyTorch Quickstart to NNI + .. image:: /tutorials/hpo_quickstart_pytorch/images/thumb/sphx_glr_model_thumb.png + :alt: Port PyTorch Quickstart to NNI + + :ref:`sphx_glr_tutorials_hpo_quickstart_pytorch_model.py` + +.. raw:: html + +
Port PyTorch Quickstart to NNI
+
- :ref:`sphx_glr_tutorials_hpo_quickstart_pytorch_model.py` .. raw:: html
-.. toctree:: - :hidden: - /tutorials/hpo_quickstart_pytorch/model + .. raw:: html -
- - - -.. _sphx_glr_tutorials_hpo_quickstart_tensorflow: - - +
.. raw:: html @@ -245,31 +241,33 @@ Tutorials .. only:: html - .. figure:: /tutorials/hpo_quickstart_tensorflow/images/thumb/sphx_glr_main_thumb.png - :alt: HPO Quickstart with TensorFlow + .. image:: /tutorials/hpo_quickstart_tensorflow/images/thumb/sphx_glr_main_thumb.png + :alt: HPO Quickstart with TensorFlow - :ref:`sphx_glr_tutorials_hpo_quickstart_tensorflow_main.py` + :ref:`sphx_glr_tutorials_hpo_quickstart_tensorflow_main.py` .. raw:: html +
HPO Quickstart with TensorFlow
-.. toctree:: - :hidden: - - /tutorials/hpo_quickstart_tensorflow/main - .. raw:: html
.. only:: html - .. figure:: /tutorials/hpo_quickstart_tensorflow/images/thumb/sphx_glr_model_thumb.png - :alt: Port TensorFlow Quickstart to NNI + .. image:: /tutorials/hpo_quickstart_tensorflow/images/thumb/sphx_glr_model_thumb.png + :alt: Port TensorFlow Quickstart to NNI + + :ref:`sphx_glr_tutorials_hpo_quickstart_tensorflow_model.py` + +.. raw:: html + +
Port TensorFlow Quickstart to NNI
+
- :ref:`sphx_glr_tutorials_hpo_quickstart_tensorflow_model.py` .. raw:: html @@ -278,11 +276,10 @@ Tutorials .. toctree:: :hidden: + :includehidden: - /tutorials/hpo_quickstart_tensorflow/model -.. raw:: html - -
+ /tutorials/hpo_quickstart_pytorch/index.rst + /tutorials/hpo_quickstart_tensorflow/index.rst diff --git a/docs/source/tutorials/pruning_bert_glue.ipynb b/docs/source/tutorials/pruning_bert_glue.ipynb new file mode 100644 index 000000000..2e0559655 --- /dev/null +++ b/docs/source/tutorials/pruning_bert_glue.ipynb @@ -0,0 +1,223 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n# Pruning Transformer with NNI\n\n## Workable Pruning Process\n\nHere we show an effective transformer pruning process that NNI team has tried, and users can use NNI to discover better processes.\n\nThe entire pruning process can be divided into the following steps:\n\n1. Finetune the pre-trained model on the downstream task. From our experience,\n the final performance of pruning on the finetuned model is better than pruning directly on the pre-trained model.\n At the same time, the finetuned model obtained in this step will also be used as the teacher model for the following\n distillation training.\n2. Pruning the attention layer at first. Here we apply block-sparse on attention layer weight,\n and directly prune the head (condense the weight) if the head was fully masked.\n If the head was partially masked, we will not prune it and recover its weight.\n3. Retrain the head-pruned model with distillation. Recover the model precision before pruning FFN layer.\n4. Pruning the FFN layer. Here we apply the output channels pruning on the 1st FFN layer,\n and the 2nd FFN layer input channels will be pruned due to the pruning of 1st layer output channels.\n5. Retrain the final pruned model with distillation.\n\nDuring the process of pruning transformer, we gained some of the following experiences:\n\n* We using `movement-pruner` in step 2 and `taylor-fo-weight-pruner` in step 4. `movement-pruner` has good performance on attention layers,\n and `taylor-fo-weight-pruner` method has good performance on FFN layers. These two pruners are all some kinds of gradient-based pruning algorithms,\n we also try weight-based pruning algorithms like `l1-norm-pruner`, but it doesn't seem to work well in this scenario.\n* Distillation is a good way to recover model precision. In terms of results, usually 1~2% improvement in accuracy can be achieved when we prune bert on mnli task.\n* It is necessary to gradually increase the sparsity rather than reaching a very high sparsity all at once.\n\n## Experiment\n\n### Preparation\nPlease set ``dev_mode`` to ``False`` to run this tutorial. Here ``dev_mode`` is ``True`` by default is for generating documents.\n\nThe complete pruning process takes about 8 hours on one A100.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "dev_mode = True" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Some basic setting.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "from pathlib import Path\nfrom typing import Callable\n\npretrained_model_name_or_path = 'bert-base-uncased'\ntask_name = 'mnli'\nexperiment_id = 'pruning_bert'\n\n# heads_num and layers_num should align with pretrained_model_name_or_path\nheads_num = 12\nlayers_num = 12\n\n# used to save the experiment log\nlog_dir = Path(f'./pruning_log/{pretrained_model_name_or_path}/{task_name}/{experiment_id}')\nlog_dir.mkdir(parents=True, exist_ok=True)\n\n# used to save the finetuned model and share between different experiemnts with same pretrained_model_name_or_path and task_name\nmodel_dir = Path(f'./models/{pretrained_model_name_or_path}/{task_name}')\nmodel_dir.mkdir(parents=True, exist_ok=True)\n\nfrom transformers import set_seed\nset_seed(1024)\n\nimport torch\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The function used to create dataloaders, note that 'mnli' has two evaluation dataset.\nIf teacher_model is set, will run all dataset on teacher model to get the 'teacher_logits' for distillation.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "from torch.utils.data import DataLoader\n\nfrom datasets import load_dataset\nfrom transformers import BertTokenizerFast, DataCollatorWithPadding\n\ntask_to_keys = {\n 'cola': ('sentence', None),\n 'mnli': ('premise', 'hypothesis'),\n 'mrpc': ('sentence1', 'sentence2'),\n 'qnli': ('question', 'sentence'),\n 'qqp': ('question1', 'question2'),\n 'rte': ('sentence1', 'sentence2'),\n 'sst2': ('sentence', None),\n 'stsb': ('sentence1', 'sentence2'),\n 'wnli': ('sentence1', 'sentence2'),\n}\n\ndef prepare_data(cache_dir='./data', train_batch_size=32, eval_batch_size=32,\n teacher_model: torch.nn.Module = None):\n tokenizer = BertTokenizerFast.from_pretrained(pretrained_model_name_or_path)\n sentence1_key, sentence2_key = task_to_keys[task_name]\n data_collator = DataCollatorWithPadding(tokenizer)\n\n # used to preprocess the raw data\n def preprocess_function(examples):\n # Tokenize the texts\n args = (\n (examples[sentence1_key],) if sentence2_key is None else (examples[sentence1_key], examples[sentence2_key])\n )\n result = tokenizer(*args, padding=False, max_length=128, truncation=True)\n\n if 'label' in examples:\n # In all cases, rename the column to labels because the model will expect that.\n result['labels'] = examples['label']\n return result\n\n raw_datasets = load_dataset('glue', task_name, cache_dir=cache_dir)\n for key in list(raw_datasets.keys()):\n if 'test' in key:\n raw_datasets.pop(key)\n\n processed_datasets = raw_datasets.map(preprocess_function, batched=True,\n remove_columns=raw_datasets['train'].column_names)\n\n # if has teacher model, add 'teacher_logits' to datasets who has 'labels'.\n # 'teacher_logits' is used for distillation and avoid the double counting.\n if teacher_model:\n teacher_model_training = teacher_model.training\n teacher_model.eval()\n model_device = next(teacher_model.parameters()).device\n\n def add_teacher_logits(examples):\n result = {k: v for k, v in examples.items()}\n samples = data_collator(result).to(model_device)\n if 'labels' in samples:\n with torch.no_grad():\n logits = teacher_model(**samples).logits.tolist()\n result['teacher_logits'] = logits\n return result\n\n processed_datasets = processed_datasets.map(add_teacher_logits, batched=True,\n batch_size=train_batch_size)\n teacher_model.train(teacher_model_training)\n\n train_dataset = processed_datasets['train']\n validation_dataset = processed_datasets['validation_matched' if task_name == 'mnli' else 'validation']\n validation_dataset2 = processed_datasets['validation_mismatched'] if task_name == 'mnli' else None\n\n train_dataloader = DataLoader(train_dataset,\n shuffle=True,\n collate_fn=data_collator,\n batch_size=train_batch_size)\n validation_dataloader = DataLoader(validation_dataset,\n collate_fn=data_collator,\n batch_size=eval_batch_size)\n validation_dataloader2 = DataLoader(validation_dataset2,\n collate_fn=data_collator,\n batch_size=eval_batch_size) if task_name == 'mnli' else None\n\n return train_dataloader, validation_dataloader, validation_dataloader2" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Training function & evaluation function.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import time\nimport torch.nn.functional as F\nfrom datasets import load_metric\n\ndef training(train_dataloader: DataLoader,\n model: torch.nn.Module,\n optimizer: torch.optim.Optimizer,\n criterion: Callable[[torch.Tensor, torch.Tensor], torch.Tensor],\n lr_scheduler: torch.optim.lr_scheduler._LRScheduler = None,\n max_steps: int = None, max_epochs: int = None,\n save_best_model: bool = False, save_path: str = None,\n log_path: str = Path(log_dir) / 'training.log',\n distillation: bool = False,\n evaluation_func=None):\n model.train()\n current_step = 0\n best_result = 0\n\n for current_epoch in range(max_epochs if max_epochs else 1):\n for batch in train_dataloader:\n batch.to(device)\n teacher_logits = batch.pop('teacher_logits', None)\n optimizer.zero_grad()\n outputs = model(**batch)\n loss = outputs.loss\n\n if distillation:\n assert teacher_logits is not None\n distil_loss = F.kl_div(F.log_softmax(outputs.logits / 2, dim=-1),\n F.softmax(teacher_logits / 2, dim=-1), reduction='batchmean') * (2 ** 2)\n loss = 0.1 * loss + 0.9 * distil_loss\n\n loss = criterion(loss, None)\n loss.backward()\n optimizer.step()\n\n if lr_scheduler:\n lr_scheduler.step()\n\n current_step += 1\n\n # evaluation for every 1000 steps\n if current_step % 1000 == 0 or current_step % len(train_dataloader) == 0:\n result = evaluation_func(model) if evaluation_func else None\n with (log_path).open('a+') as f:\n msg = '[{}] Epoch {}, Step {}: {}\\n'.format(time.asctime(time.localtime(time.time())), current_epoch, current_step, result)\n f.write(msg)\n # if it's the best model, save it.\n if save_best_model and best_result < result['default']:\n assert save_path is not None\n torch.save(model.state_dict(), save_path)\n best_result = result['default']\n\n if max_steps and current_step >= max_steps:\n return\n\ndef evaluation(validation_dataloader: DataLoader,\n validation_dataloader2: DataLoader,\n model: torch.nn.Module):\n training = model.training\n model.eval()\n is_regression = task_name == 'stsb'\n metric = load_metric('glue', task_name)\n\n for batch in validation_dataloader:\n batch.pop('teacher_logits', None)\n batch.to(device)\n outputs = model(**batch)\n predictions = outputs.logits.argmax(dim=-1) if not is_regression else outputs.logits.squeeze()\n metric.add_batch(\n predictions=predictions,\n references=batch['labels'],\n )\n result = metric.compute()\n\n if validation_dataloader2:\n for batch in validation_dataloader2:\n batch.pop('teacher_logits', None)\n batch.to(device)\n outputs = model(**batch)\n predictions = outputs.logits.argmax(dim=-1) if not is_regression else outputs.logits.squeeze()\n metric.add_batch(\n predictions=predictions,\n references=batch['labels'],\n )\n result = {'matched': result, 'mismatched': metric.compute()}\n result['default'] = (result['matched']['accuracy'] + result['mismatched']['accuracy']) / 2\n else:\n result['default'] = result.get('f1', result.get('accuracy', None))\n\n model.train(training)\n return result\n\n# using huggingface native loss\ndef fake_criterion(outputs, targets):\n return outputs" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Prepare pre-trained model and finetuning on downstream task.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import functools\n\nfrom torch.optim import Adam\nfrom torch.optim.lr_scheduler import LambdaLR\nfrom transformers import BertForSequenceClassification\n\ndef create_pretrained_model():\n is_regression = task_name == 'stsb'\n num_labels = 1 if is_regression else (3 if task_name == 'mnli' else 2)\n return BertForSequenceClassification.from_pretrained(pretrained_model_name_or_path, num_labels=num_labels)\n\ndef create_finetuned_model():\n pretrained_model = create_pretrained_model().to(device)\n\n train_dataloader, validation_dataloader, validation_dataloader2 = prepare_data()\n evaluation_func = functools.partial(evaluation, validation_dataloader, validation_dataloader2)\n steps_per_epoch = len(train_dataloader)\n training_epochs = 3\n\n finetuned_model_state_path = Path(model_dir) / 'finetuned_model_state.pth'\n\n if finetuned_model_state_path.exists():\n pretrained_model.load_state_dict(torch.load(finetuned_model_state_path))\n elif dev_mode:\n pass\n else:\n optimizer = Adam(pretrained_model.parameters(), lr=3e-5, eps=1e-8)\n\n def lr_lambda(current_step: int):\n return max(0.0, float(training_epochs * steps_per_epoch - current_step) / float(training_epochs * steps_per_epoch))\n\n lr_scheduler = LambdaLR(optimizer, lr_lambda)\n training(train_dataloader, pretrained_model, optimizer, fake_criterion, lr_scheduler=lr_scheduler, max_epochs=training_epochs,\n save_best_model=True, save_path=finetuned_model_state_path, evaluation_func=evaluation_func)\n return pretrained_model\n\nfinetuned_model = create_finetuned_model()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Using finetuned model as teacher model to create dataloader.\nAdd 'teacher_logits' to dataset, it is used to do the distillation, it can be seen as a kind of data label.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "if not dev_mode:\n train_dataloader, validation_dataloader, validation_dataloader2 = prepare_data(teacher_model=finetuned_model)\nelse:\n train_dataloader, validation_dataloader, validation_dataloader2 = prepare_data()\n\nevaluation_func = functools.partial(evaluation, validation_dataloader, validation_dataloader2)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Pruning\nFirst, using MovementPruner to prune attention head.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "steps_per_epoch = len(train_dataloader)\n\n# Set training steps/epochs for pruning.\n\nif not dev_mode:\n total_epochs = 4\n total_steps = total_epochs * steps_per_epoch\n warmup_steps = 1 * steps_per_epoch\n cooldown_steps = 1 * steps_per_epoch\nelse:\n total_epochs = 1\n total_steps = 3\n warmup_steps = 1\n cooldown_steps = 1\n\n# Initialize evaluator used by MovementPruner.\n\nimport nni\nfrom nni.algorithms.compression.v2.pytorch import TorchEvaluator\n\nmovement_training = functools.partial(training, train_dataloader, log_path=log_dir / 'movement_pruning.log',\n evaluation_func=evaluation_func)\ntraced_optimizer = nni.trace(Adam)(finetuned_model.parameters(), lr=3e-5, eps=1e-8)\n\ndef lr_lambda(current_step: int):\n if current_step < warmup_steps:\n return float(current_step) / warmup_steps\n return max(0.0, float(total_steps - current_step) / float(total_steps - warmup_steps))\n\ntraced_scheduler = nni.trace(LambdaLR)(traced_optimizer, lr_lambda)\nevaluator = TorchEvaluator(movement_training, traced_optimizer, fake_criterion, traced_scheduler)\n\n# Apply block-soft-movement pruning on attention layers.\n\nfrom nni.compression.pytorch.pruning import MovementPruner\n\nconfig_list = [{'op_types': ['Linear'], 'op_partial_names': ['bert.encoder.layer.{}.'.format(i) for i in range(layers_num)], 'sparsity': 0.1}]\npruner = MovementPruner(model=finetuned_model,\n config_list=config_list,\n evaluator=evaluator,\n training_epochs=total_epochs,\n training_steps=total_steps,\n warm_up_step=warmup_steps,\n cool_down_beginning_step=total_steps - cooldown_steps,\n regular_scale=10,\n movement_mode='soft',\n sparse_granularity='auto')\n_, attention_masks = pruner.compress()\npruner.show_pruned_weights()\n\ntorch.save(attention_masks, Path(log_dir) / 'attention_masks.pth')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Load a new finetuned model to do the speedup.\nNote that nni speedup don't support replace attention module, so here we manully replace the attention module.\n\nIf the head is entire masked, physically prune it and create config_list for FFN pruning.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "attention_pruned_model = create_finetuned_model().to(device)\nattention_masks = torch.load(Path(log_dir) / 'attention_masks.pth')\n\nffn_config_list = []\nlayer_count = 0\nmodule_list = []\nfor i in range(0, layers_num):\n prefix = f'bert.encoder.layer.{i}.'\n value_mask: torch.Tensor = attention_masks[prefix + 'attention.self.value']['weight']\n head_mask = (value_mask.reshape(heads_num, -1).sum(-1) == 0.)\n head_idx = torch.arange(len(head_mask))[head_mask].long().tolist()\n print(f'layer {i} pruner {len(head_idx)} head: {head_idx}')\n if len(head_idx) != heads_num:\n attention_pruned_model.bert.encoder.layer[i].attention.prune_heads(head_idx)\n module_list.append(attention_pruned_model.bert.encoder.layer[i])\n # The final ffn weight remaining ratio is the half of the attention weight remaining ratio.\n # This is just an empirical configuration, you can use any other method to determine this sparsity.\n sparsity = 1 - (1 - len(head_idx) / heads_num) * 0.5\n # here we use a simple sparsity schedule, we will prune ffn in 12 iterations, each iteration prune `sparsity_per_iter`.\n sparsity_per_iter = 1 - (1 - sparsity) ** (1 / heads_num)\n ffn_config_list.append({'op_names': [f'bert.encoder.layer.{layer_count}.intermediate.dense'], 'sparsity': sparsity_per_iter})\n layer_count += 1\n\nattention_pruned_model.bert.encoder.layer = torch.nn.ModuleList(module_list)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Retrain the attention pruned model with distillation.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "if not dev_mode:\n total_epochs = 5\n total_steps = None\n distillation = True\nelse:\n total_epochs = 1\n total_steps = 1\n distillation = False\n\noptimizer = Adam(attention_pruned_model.parameters(), lr=3e-5, eps=1e-8)\n\ndef lr_lambda(current_step: int):\n return max(0.0, float(total_epochs * steps_per_epoch - current_step) / float(total_epochs * steps_per_epoch))\n\nlr_scheduler = LambdaLR(optimizer, lr_lambda)\nat_model_save_path = log_dir / 'attention_pruned_model_state.pth'\ntraining(train_dataloader, attention_pruned_model, optimizer, fake_criterion, lr_scheduler=lr_scheduler,\n max_epochs=total_epochs, max_steps=total_steps, save_best_model=True, save_path=at_model_save_path,\n distillation=distillation, evaluation_func=evaluation_func)\n\nif not dev_mode:\n attention_pruned_model.load_state_dict(torch.load(at_model_save_path))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Iterative pruning FFN with TaylorFOWeightPruner in 12 iterations.\nFinetuning 2000 steps after each iteration, then finetuning 2 epochs after pruning finished.\n\nNNI will support per-step-pruning-schedule in the future, then can use an pruner to replace the following code.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "if not dev_mode:\n total_epochs = 4\n total_steps = None\n taylor_pruner_steps = 1000\n steps_per_iteration = 2000\n total_pruning_steps = 24000\n distillation = True\nelse:\n total_epochs = 1\n total_steps = 6\n taylor_pruner_steps = 2\n steps_per_iteration = 2\n total_pruning_steps = 4\n distillation = False\n\nfrom nni.compression.pytorch.pruning import TaylorFOWeightPruner\nfrom nni.compression.pytorch.speedup import ModelSpeedup\n\ndistil_training = functools.partial(training, train_dataloader, log_path=log_dir / 'taylor_pruning.log',\n distillation=distillation, evaluation_func=evaluation_func)\ntraced_optimizer = nni.trace(Adam)(attention_pruned_model.parameters(), lr=3e-5, eps=1e-8)\nevaluator = TorchEvaluator(distil_training, traced_optimizer, fake_criterion)\n\ncurrent_step = 0\nbest_result = 0\ninit_lr = 3e-5\n\ndummy_input = torch.rand(8, 128, 768).to(device)\n\nattention_pruned_model.train()\nfor current_epoch in range(total_epochs):\n for batch in train_dataloader:\n if total_steps and current_step >= total_steps:\n break\n # pruning 12 times\n if current_step % steps_per_iteration == 0 and current_step < total_pruning_steps:\n check_point = attention_pruned_model.state_dict()\n pruner = TaylorFOWeightPruner(attention_pruned_model, ffn_config_list, evaluator, taylor_pruner_steps)\n _, ffn_masks = pruner.compress()\n renamed_ffn_masks = {}\n # rename the masks keys, because we only speedup the bert.encoder\n for model_name, targets_mask in ffn_masks.items():\n renamed_ffn_masks[model_name.split('bert.encoder.')[1]] = targets_mask\n pruner._unwrap_model()\n attention_pruned_model.load_state_dict(check_point)\n ModelSpeedup(attention_pruned_model.bert.encoder, dummy_input, renamed_ffn_masks).speedup_model()\n optimizer = Adam(attention_pruned_model.parameters(), lr=init_lr)\n\n batch.to(device)\n teacher_logits = batch.pop('teacher_logits', None)\n optimizer.zero_grad()\n\n # manually schedule lr\n for params_group in optimizer.param_groups:\n params_group['lr'] = (1 - current_step / (total_epochs * steps_per_epoch)) * init_lr\n\n outputs = attention_pruned_model(**batch)\n loss = outputs.loss\n\n # distillation\n if teacher_logits is not None:\n distil_loss = F.kl_div(F.log_softmax(outputs.logits / 2, dim=-1),\n F.softmax(teacher_logits / 2, dim=-1), reduction='batchmean') * (2 ** 2)\n loss = 0.1 * loss + 0.9 * distil_loss\n loss.backward()\n optimizer.step()\n\n current_step += 1\n if current_step % 1000 == 0 or current_step % len(train_dataloader) == 0:\n result = evaluation_func(attention_pruned_model)\n with (log_dir / 'ffn_pruning.log').open('a+') as f:\n msg = '[{}] Epoch {}, Step {}: {}\\n'.format(time.asctime(time.localtime(time.time())),\n current_epoch, current_step, result)\n f.write(msg)\n if current_step >= total_pruning_steps and best_result < result['default']:\n torch.save(attention_pruned_model, log_dir / 'best_model.pth')\n best_result = result['default']" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Result\nThe speedup is test on the entire validation dataset with batch size 32 on A100.\nWe test under two pytorch version and found the latency varying widely.\n\nSetting 1: pytorch 1.12.1\n\nSetting 2: pytorch 1.10.0\n\n.. list-table:: Prune Bert-base-uncased on MNLI\n :header-rows: 1\n :widths: auto\n\n * - Attention Pruning Method\n - FFN Pruning Method\n - Total Sparsity\n - Accuracy\n - Acc. Drop\n - Speedup (S1)\n - Speedup (S2)\n * -\n -\n - 0%\n - 84.73 / 84.63\n - +0.0 / +0.0\n - 12.56s (x1.00)\n - 4.05s (x1.00)\n * - `movement-pruner` (soft, th=0.1, lambda=5)\n - `taylor-fo-weight-pruner`\n - 51.39%\n - 84.25 / 84.96\n - -0.48 / +0.33\n - 6.85s (x1.83)\n - 2.7s (x1.50)\n * - `movement-pruner` (soft, th=0.1, lambda=10)\n - `taylor-fo-weight-pruner`\n - 66.67%\n - 83.98 / 83.75\n - -0.75 / -0.88\n - 4.73s (x2.66)\n - 2.16s (x1.86)\n * - `movement-pruner` (soft, th=0.1, lambda=20)\n - `taylor-fo-weight-pruner`\n - 77.78%\n - 83.02 / 83.06\n - -1.71 / -1.57\n - 3.35s (x3.75)\n - 1.72s (x2.35)\n * - `movement-pruner` (soft, th=0.1, lambda=30)\n - `taylor-fo-weight-pruner`\n - 87.04%\n - 81.24 / 80.99\n - -3.49 / -3.64\n - 2.19s (x5.74)\n - 1.31s (x3.09)\n\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.13" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} \ No newline at end of file diff --git a/docs/source/tutorials/pruning_bert_glue.py b/docs/source/tutorials/pruning_bert_glue.py new file mode 100644 index 000000000..16724fd87 --- /dev/null +++ b/docs/source/tutorials/pruning_bert_glue.py @@ -0,0 +1,563 @@ +""" +Pruning Transformer with NNI +============================ + +Workable Pruning Process +------------------------ + +Here we show an effective transformer pruning process that NNI team has tried, and users can use NNI to discover better processes. + +The entire pruning process can be divided into the following steps: + +1. Finetune the pre-trained model on the downstream task. From our experience, + the final performance of pruning on the finetuned model is better than pruning directly on the pre-trained model. + At the same time, the finetuned model obtained in this step will also be used as the teacher model for the following + distillation training. +2. Pruning the attention layer at first. Here we apply block-sparse on attention layer weight, + and directly prune the head (condense the weight) if the head was fully masked. + If the head was partially masked, we will not prune it and recover its weight. +3. Retrain the head-pruned model with distillation. Recover the model precision before pruning FFN layer. +4. Pruning the FFN layer. Here we apply the output channels pruning on the 1st FFN layer, + and the 2nd FFN layer input channels will be pruned due to the pruning of 1st layer output channels. +5. Retrain the final pruned model with distillation. + +During the process of pruning transformer, we gained some of the following experiences: + +* We using :ref:`movement-pruner` in step 2 and :ref:`taylor-fo-weight-pruner` in step 4. :ref:`movement-pruner` has good performance on attention layers, + and :ref:`taylor-fo-weight-pruner` method has good performance on FFN layers. These two pruners are all some kinds of gradient-based pruning algorithms, + we also try weight-based pruning algorithms like :ref:`l1-norm-pruner`, but it doesn't seem to work well in this scenario. +* Distillation is a good way to recover model precision. In terms of results, usually 1~2% improvement in accuracy can be achieved when we prune bert on mnli task. +* It is necessary to gradually increase the sparsity rather than reaching a very high sparsity all at once. + +Experiment +---------- + +Preparation +^^^^^^^^^^^ +Please set ``dev_mode`` to ``False`` to run this tutorial. Here ``dev_mode`` is ``True`` by default is for generating documents. + +The complete pruning process takes about 8 hours on one A100. +""" + +dev_mode = True + +# %% +# Some basic setting. + +from pathlib import Path +from typing import Callable + +pretrained_model_name_or_path = 'bert-base-uncased' +task_name = 'mnli' +experiment_id = 'pruning_bert' + +# heads_num and layers_num should align with pretrained_model_name_or_path +heads_num = 12 +layers_num = 12 + +# used to save the experiment log +log_dir = Path(f'./pruning_log/{pretrained_model_name_or_path}/{task_name}/{experiment_id}') +log_dir.mkdir(parents=True, exist_ok=True) + +# used to save the finetuned model and share between different experiemnts with same pretrained_model_name_or_path and task_name +model_dir = Path(f'./models/{pretrained_model_name_or_path}/{task_name}') +model_dir.mkdir(parents=True, exist_ok=True) + +from transformers import set_seed +set_seed(1024) + +import torch +device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + +# %% +# The function used to create dataloaders, note that 'mnli' has two evaluation dataset. +# If teacher_model is set, will run all dataset on teacher model to get the 'teacher_logits' for distillation. + +from torch.utils.data import DataLoader + +from datasets import load_dataset +from transformers import BertTokenizerFast, DataCollatorWithPadding + +task_to_keys = { + 'cola': ('sentence', None), + 'mnli': ('premise', 'hypothesis'), + 'mrpc': ('sentence1', 'sentence2'), + 'qnli': ('question', 'sentence'), + 'qqp': ('question1', 'question2'), + 'rte': ('sentence1', 'sentence2'), + 'sst2': ('sentence', None), + 'stsb': ('sentence1', 'sentence2'), + 'wnli': ('sentence1', 'sentence2'), +} + +def prepare_data(cache_dir='./data', train_batch_size=32, eval_batch_size=32, + teacher_model: torch.nn.Module = None): + tokenizer = BertTokenizerFast.from_pretrained(pretrained_model_name_or_path) + sentence1_key, sentence2_key = task_to_keys[task_name] + data_collator = DataCollatorWithPadding(tokenizer) + + # used to preprocess the raw data + def preprocess_function(examples): + # Tokenize the texts + args = ( + (examples[sentence1_key],) if sentence2_key is None else (examples[sentence1_key], examples[sentence2_key]) + ) + result = tokenizer(*args, padding=False, max_length=128, truncation=True) + + if 'label' in examples: + # In all cases, rename the column to labels because the model will expect that. + result['labels'] = examples['label'] + return result + + raw_datasets = load_dataset('glue', task_name, cache_dir=cache_dir) + for key in list(raw_datasets.keys()): + if 'test' in key: + raw_datasets.pop(key) + + processed_datasets = raw_datasets.map(preprocess_function, batched=True, + remove_columns=raw_datasets['train'].column_names) + + # if has teacher model, add 'teacher_logits' to datasets who has 'labels'. + # 'teacher_logits' is used for distillation and avoid the double counting. + if teacher_model: + teacher_model_training = teacher_model.training + teacher_model.eval() + model_device = next(teacher_model.parameters()).device + + def add_teacher_logits(examples): + result = {k: v for k, v in examples.items()} + samples = data_collator(result).to(model_device) + if 'labels' in samples: + with torch.no_grad(): + logits = teacher_model(**samples).logits.tolist() + result['teacher_logits'] = logits + return result + + processed_datasets = processed_datasets.map(add_teacher_logits, batched=True, + batch_size=train_batch_size) + teacher_model.train(teacher_model_training) + + train_dataset = processed_datasets['train'] + validation_dataset = processed_datasets['validation_matched' if task_name == 'mnli' else 'validation'] + validation_dataset2 = processed_datasets['validation_mismatched'] if task_name == 'mnli' else None + + train_dataloader = DataLoader(train_dataset, + shuffle=True, + collate_fn=data_collator, + batch_size=train_batch_size) + validation_dataloader = DataLoader(validation_dataset, + collate_fn=data_collator, + batch_size=eval_batch_size) + validation_dataloader2 = DataLoader(validation_dataset2, + collate_fn=data_collator, + batch_size=eval_batch_size) if task_name == 'mnli' else None + + return train_dataloader, validation_dataloader, validation_dataloader2 + +# %% +# Training function & evaluation function. + +import time +import torch.nn.functional as F +from datasets import load_metric + +def training(train_dataloader: DataLoader, + model: torch.nn.Module, + optimizer: torch.optim.Optimizer, + criterion: Callable[[torch.Tensor, torch.Tensor], torch.Tensor], + lr_scheduler: torch.optim.lr_scheduler._LRScheduler = None, + max_steps: int = None, max_epochs: int = None, + save_best_model: bool = False, save_path: str = None, + log_path: str = Path(log_dir) / 'training.log', + distillation: bool = False, + evaluation_func=None): + model.train() + current_step = 0 + best_result = 0 + + for current_epoch in range(max_epochs if max_epochs else 1): + for batch in train_dataloader: + batch.to(device) + teacher_logits = batch.pop('teacher_logits', None) + optimizer.zero_grad() + outputs = model(**batch) + loss = outputs.loss + + if distillation: + assert teacher_logits is not None + distil_loss = F.kl_div(F.log_softmax(outputs.logits / 2, dim=-1), + F.softmax(teacher_logits / 2, dim=-1), reduction='batchmean') * (2 ** 2) + loss = 0.1 * loss + 0.9 * distil_loss + + loss = criterion(loss, None) + loss.backward() + optimizer.step() + + if lr_scheduler: + lr_scheduler.step() + + current_step += 1 + + # evaluation for every 1000 steps + if current_step % 1000 == 0 or current_step % len(train_dataloader) == 0: + result = evaluation_func(model) if evaluation_func else None + with (log_path).open('a+') as f: + msg = '[{}] Epoch {}, Step {}: {}\n'.format(time.asctime(time.localtime(time.time())), current_epoch, current_step, result) + f.write(msg) + # if it's the best model, save it. + if save_best_model and best_result < result['default']: + assert save_path is not None + torch.save(model.state_dict(), save_path) + best_result = result['default'] + + if max_steps and current_step >= max_steps: + return + +def evaluation(validation_dataloader: DataLoader, + validation_dataloader2: DataLoader, + model: torch.nn.Module): + training = model.training + model.eval() + is_regression = task_name == 'stsb' + metric = load_metric('glue', task_name) + + for batch in validation_dataloader: + batch.pop('teacher_logits', None) + batch.to(device) + outputs = model(**batch) + predictions = outputs.logits.argmax(dim=-1) if not is_regression else outputs.logits.squeeze() + metric.add_batch( + predictions=predictions, + references=batch['labels'], + ) + result = metric.compute() + + if validation_dataloader2: + for batch in validation_dataloader2: + batch.pop('teacher_logits', None) + batch.to(device) + outputs = model(**batch) + predictions = outputs.logits.argmax(dim=-1) if not is_regression else outputs.logits.squeeze() + metric.add_batch( + predictions=predictions, + references=batch['labels'], + ) + result = {'matched': result, 'mismatched': metric.compute()} + result['default'] = (result['matched']['accuracy'] + result['mismatched']['accuracy']) / 2 + else: + result['default'] = result.get('f1', result.get('accuracy', None)) + + model.train(training) + return result + +# using huggingface native loss +def fake_criterion(outputs, targets): + return outputs + + +# %% +# Prepare pre-trained model and finetuning on downstream task. + +import functools + +from torch.optim import Adam +from torch.optim.lr_scheduler import LambdaLR +from transformers import BertForSequenceClassification + +def create_pretrained_model(): + is_regression = task_name == 'stsb' + num_labels = 1 if is_regression else (3 if task_name == 'mnli' else 2) + return BertForSequenceClassification.from_pretrained(pretrained_model_name_or_path, num_labels=num_labels) + +def create_finetuned_model(): + pretrained_model = create_pretrained_model().to(device) + + train_dataloader, validation_dataloader, validation_dataloader2 = prepare_data() + evaluation_func = functools.partial(evaluation, validation_dataloader, validation_dataloader2) + steps_per_epoch = len(train_dataloader) + training_epochs = 3 + + finetuned_model_state_path = Path(model_dir) / 'finetuned_model_state.pth' + + if finetuned_model_state_path.exists(): + pretrained_model.load_state_dict(torch.load(finetuned_model_state_path)) + elif dev_mode: + pass + else: + optimizer = Adam(pretrained_model.parameters(), lr=3e-5, eps=1e-8) + + def lr_lambda(current_step: int): + return max(0.0, float(training_epochs * steps_per_epoch - current_step) / float(training_epochs * steps_per_epoch)) + + lr_scheduler = LambdaLR(optimizer, lr_lambda) + training(train_dataloader, pretrained_model, optimizer, fake_criterion, lr_scheduler=lr_scheduler, max_epochs=training_epochs, + save_best_model=True, save_path=finetuned_model_state_path, evaluation_func=evaluation_func) + return pretrained_model + +finetuned_model = create_finetuned_model() + +# %% +# Using finetuned model as teacher model to create dataloader. +# Add 'teacher_logits' to dataset, it is used to do the distillation, it can be seen as a kind of data label. + +if not dev_mode: + train_dataloader, validation_dataloader, validation_dataloader2 = prepare_data(teacher_model=finetuned_model) +else: + train_dataloader, validation_dataloader, validation_dataloader2 = prepare_data() + +evaluation_func = functools.partial(evaluation, validation_dataloader, validation_dataloader2) + +# %% +# Pruning +# ^^^^^^^ +# First, using MovementPruner to prune attention head. + +steps_per_epoch = len(train_dataloader) + +# Set training steps/epochs for pruning. + +if not dev_mode: + total_epochs = 4 + total_steps = total_epochs * steps_per_epoch + warmup_steps = 1 * steps_per_epoch + cooldown_steps = 1 * steps_per_epoch +else: + total_epochs = 1 + total_steps = 3 + warmup_steps = 1 + cooldown_steps = 1 + +# Initialize evaluator used by MovementPruner. + +import nni +from nni.algorithms.compression.v2.pytorch import TorchEvaluator + +movement_training = functools.partial(training, train_dataloader, log_path=log_dir / 'movement_pruning.log', + evaluation_func=evaluation_func) +traced_optimizer = nni.trace(Adam)(finetuned_model.parameters(), lr=3e-5, eps=1e-8) + +def lr_lambda(current_step: int): + if current_step < warmup_steps: + return float(current_step) / warmup_steps + return max(0.0, float(total_steps - current_step) / float(total_steps - warmup_steps)) + +traced_scheduler = nni.trace(LambdaLR)(traced_optimizer, lr_lambda) +evaluator = TorchEvaluator(movement_training, traced_optimizer, fake_criterion, traced_scheduler) + +# Apply block-soft-movement pruning on attention layers. + +from nni.compression.pytorch.pruning import MovementPruner + +config_list = [{'op_types': ['Linear'], 'op_partial_names': ['bert.encoder.layer.{}.'.format(i) for i in range(layers_num)], 'sparsity': 0.1}] +pruner = MovementPruner(model=finetuned_model, + config_list=config_list, + evaluator=evaluator, + training_epochs=total_epochs, + training_steps=total_steps, + warm_up_step=warmup_steps, + cool_down_beginning_step=total_steps - cooldown_steps, + regular_scale=10, + movement_mode='soft', + sparse_granularity='auto') +_, attention_masks = pruner.compress() +pruner.show_pruned_weights() + +torch.save(attention_masks, Path(log_dir) / 'attention_masks.pth') + +# %% +# Load a new finetuned model to do the speedup. +# Note that nni speedup don't support replace attention module, so here we manully replace the attention module. +# +# If the head is entire masked, physically prune it and create config_list for FFN pruning. + +attention_pruned_model = create_finetuned_model().to(device) +attention_masks = torch.load(Path(log_dir) / 'attention_masks.pth') + +ffn_config_list = [] +layer_count = 0 +module_list = [] +for i in range(0, layers_num): + prefix = f'bert.encoder.layer.{i}.' + value_mask: torch.Tensor = attention_masks[prefix + 'attention.self.value']['weight'] + head_mask = (value_mask.reshape(heads_num, -1).sum(-1) == 0.) + head_idx = torch.arange(len(head_mask))[head_mask].long().tolist() + print(f'layer {i} pruner {len(head_idx)} head: {head_idx}') + if len(head_idx) != heads_num: + attention_pruned_model.bert.encoder.layer[i].attention.prune_heads(head_idx) + module_list.append(attention_pruned_model.bert.encoder.layer[i]) + # The final ffn weight remaining ratio is the half of the attention weight remaining ratio. + # This is just an empirical configuration, you can use any other method to determine this sparsity. + sparsity = 1 - (1 - len(head_idx) / heads_num) * 0.5 + # here we use a simple sparsity schedule, we will prune ffn in 12 iterations, each iteration prune `sparsity_per_iter`. + sparsity_per_iter = 1 - (1 - sparsity) ** (1 / heads_num) + ffn_config_list.append({'op_names': [f'bert.encoder.layer.{layer_count}.intermediate.dense'], 'sparsity': sparsity_per_iter}) + layer_count += 1 + +attention_pruned_model.bert.encoder.layer = torch.nn.ModuleList(module_list) + +# %% +# Retrain the attention pruned model with distillation. + +if not dev_mode: + total_epochs = 5 + total_steps = None + distillation = True +else: + total_epochs = 1 + total_steps = 1 + distillation = False + +optimizer = Adam(attention_pruned_model.parameters(), lr=3e-5, eps=1e-8) + +def lr_lambda(current_step: int): + return max(0.0, float(total_epochs * steps_per_epoch - current_step) / float(total_epochs * steps_per_epoch)) + +lr_scheduler = LambdaLR(optimizer, lr_lambda) +at_model_save_path = log_dir / 'attention_pruned_model_state.pth' +training(train_dataloader, attention_pruned_model, optimizer, fake_criterion, lr_scheduler=lr_scheduler, + max_epochs=total_epochs, max_steps=total_steps, save_best_model=True, save_path=at_model_save_path, + distillation=distillation, evaluation_func=evaluation_func) + +if not dev_mode: + attention_pruned_model.load_state_dict(torch.load(at_model_save_path)) + +# %% +# Iterative pruning FFN with TaylorFOWeightPruner in 12 iterations. +# Finetuning 2000 steps after each iteration, then finetuning 2 epochs after pruning finished. +# +# NNI will support per-step-pruning-schedule in the future, then can use an pruner to replace the following code. + +if not dev_mode: + total_epochs = 4 + total_steps = None + taylor_pruner_steps = 1000 + steps_per_iteration = 2000 + total_pruning_steps = 24000 + distillation = True +else: + total_epochs = 1 + total_steps = 6 + taylor_pruner_steps = 2 + steps_per_iteration = 2 + total_pruning_steps = 4 + distillation = False + +from nni.compression.pytorch.pruning import TaylorFOWeightPruner +from nni.compression.pytorch.speedup import ModelSpeedup + +distil_training = functools.partial(training, train_dataloader, log_path=log_dir / 'taylor_pruning.log', + distillation=distillation, evaluation_func=evaluation_func) +traced_optimizer = nni.trace(Adam)(attention_pruned_model.parameters(), lr=3e-5, eps=1e-8) +evaluator = TorchEvaluator(distil_training, traced_optimizer, fake_criterion) + +current_step = 0 +best_result = 0 +init_lr = 3e-5 + +dummy_input = torch.rand(8, 128, 768).to(device) + +attention_pruned_model.train() +for current_epoch in range(total_epochs): + for batch in train_dataloader: + if total_steps and current_step >= total_steps: + break + # pruning 12 times + if current_step % steps_per_iteration == 0 and current_step < total_pruning_steps: + check_point = attention_pruned_model.state_dict() + pruner = TaylorFOWeightPruner(attention_pruned_model, ffn_config_list, evaluator, taylor_pruner_steps) + _, ffn_masks = pruner.compress() + renamed_ffn_masks = {} + # rename the masks keys, because we only speedup the bert.encoder + for model_name, targets_mask in ffn_masks.items(): + renamed_ffn_masks[model_name.split('bert.encoder.')[1]] = targets_mask + pruner._unwrap_model() + attention_pruned_model.load_state_dict(check_point) + ModelSpeedup(attention_pruned_model.bert.encoder, dummy_input, renamed_ffn_masks).speedup_model() + optimizer = Adam(attention_pruned_model.parameters(), lr=init_lr) + + batch.to(device) + teacher_logits = batch.pop('teacher_logits', None) + optimizer.zero_grad() + + # manually schedule lr + for params_group in optimizer.param_groups: + params_group['lr'] = (1 - current_step / (total_epochs * steps_per_epoch)) * init_lr + + outputs = attention_pruned_model(**batch) + loss = outputs.loss + + # distillation + if teacher_logits is not None: + distil_loss = F.kl_div(F.log_softmax(outputs.logits / 2, dim=-1), + F.softmax(teacher_logits / 2, dim=-1), reduction='batchmean') * (2 ** 2) + loss = 0.1 * loss + 0.9 * distil_loss + loss.backward() + optimizer.step() + + current_step += 1 + if current_step % 1000 == 0 or current_step % len(train_dataloader) == 0: + result = evaluation_func(attention_pruned_model) + with (log_dir / 'ffn_pruning.log').open('a+') as f: + msg = '[{}] Epoch {}, Step {}: {}\n'.format(time.asctime(time.localtime(time.time())), + current_epoch, current_step, result) + f.write(msg) + if current_step >= total_pruning_steps and best_result < result['default']: + torch.save(attention_pruned_model, log_dir / 'best_model.pth') + best_result = result['default'] + +# %% +# Result +# ------ +# The speedup is test on the entire validation dataset with batch size 32 on A100. +# We test under two pytorch version and found the latency varying widely. +# +# Setting 1: pytorch 1.12.1 +# +# Setting 2: pytorch 1.10.0 +# +# .. list-table:: Prune Bert-base-uncased on MNLI +# :header-rows: 1 +# :widths: auto +# +# * - Attention Pruning Method +# - FFN Pruning Method +# - Total Sparsity +# - Accuracy +# - Acc. Drop +# - Speedup (S1) +# - Speedup (S2) +# * - +# - +# - 0% +# - 84.73 / 84.63 +# - +0.0 / +0.0 +# - 12.56s (x1.00) +# - 4.05s (x1.00) +# * - :ref:`movement-pruner` (soft, th=0.1, lambda=5) +# - :ref:`taylor-fo-weight-pruner` +# - 51.39% +# - 84.25 / 84.96 +# - -0.48 / +0.33 +# - 6.85s (x1.83) +# - 2.7s (x1.50) +# * - :ref:`movement-pruner` (soft, th=0.1, lambda=10) +# - :ref:`taylor-fo-weight-pruner` +# - 66.67% +# - 83.98 / 83.75 +# - -0.75 / -0.88 +# - 4.73s (x2.66) +# - 2.16s (x1.86) +# * - :ref:`movement-pruner` (soft, th=0.1, lambda=20) +# - :ref:`taylor-fo-weight-pruner` +# - 77.78% +# - 83.02 / 83.06 +# - -1.71 / -1.57 +# - 3.35s (x3.75) +# - 1.72s (x2.35) +# * - :ref:`movement-pruner` (soft, th=0.1, lambda=30) +# - :ref:`taylor-fo-weight-pruner` +# - 87.04% +# - 81.24 / 80.99 +# - -3.49 / -3.64 +# - 2.19s (x5.74) +# - 1.31s (x3.09) diff --git a/docs/source/tutorials/pruning_bert_glue.py.md5 b/docs/source/tutorials/pruning_bert_glue.py.md5 new file mode 100644 index 000000000..9df87d82f --- /dev/null +++ b/docs/source/tutorials/pruning_bert_glue.py.md5 @@ -0,0 +1 @@ +7d8ff24fe5a88d208ad2ad051f060df4 \ No newline at end of file diff --git a/docs/source/tutorials/pruning_bert_glue.rst b/docs/source/tutorials/pruning_bert_glue.rst new file mode 100644 index 000000000..76b5d95e1 --- /dev/null +++ b/docs/source/tutorials/pruning_bert_glue.rst @@ -0,0 +1,809 @@ + +.. DO NOT EDIT. +.. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY. +.. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE: +.. "tutorials/pruning_bert_glue.py" +.. LINE NUMBERS ARE GIVEN BELOW. + +.. only:: html + + .. note:: + :class: sphx-glr-download-link-note + + Click :ref:`here ` + to download the full example code + +.. rst-class:: sphx-glr-example-title + +.. _sphx_glr_tutorials_pruning_bert_glue.py: + + +Pruning Transformer with NNI +============================ + +Workable Pruning Process +------------------------ + +Here we show an effective transformer pruning process that NNI team has tried, and users can use NNI to discover better processes. + +The entire pruning process can be divided into the following steps: + +1. Finetune the pre-trained model on the downstream task. From our experience, + the final performance of pruning on the finetuned model is better than pruning directly on the pre-trained model. + At the same time, the finetuned model obtained in this step will also be used as the teacher model for the following + distillation training. +2. Pruning the attention layer at first. Here we apply block-sparse on attention layer weight, + and directly prune the head (condense the weight) if the head was fully masked. + If the head was partially masked, we will not prune it and recover its weight. +3. Retrain the head-pruned model with distillation. Recover the model precision before pruning FFN layer. +4. Pruning the FFN layer. Here we apply the output channels pruning on the 1st FFN layer, + and the 2nd FFN layer input channels will be pruned due to the pruning of 1st layer output channels. +5. Retrain the final pruned model with distillation. + +During the process of pruning transformer, we gained some of the following experiences: + +* We using :ref:`movement-pruner` in step 2 and :ref:`taylor-fo-weight-pruner` in step 4. :ref:`movement-pruner` has good performance on attention layers, + and :ref:`taylor-fo-weight-pruner` method has good performance on FFN layers. These two pruners are all some kinds of gradient-based pruning algorithms, + we also try weight-based pruning algorithms like :ref:`l1-norm-pruner`, but it doesn't seem to work well in this scenario. +* Distillation is a good way to recover model precision. In terms of results, usually 1~2% improvement in accuracy can be achieved when we prune bert on mnli task. +* It is necessary to gradually increase the sparsity rather than reaching a very high sparsity all at once. + +Experiment +---------- + +Preparation +^^^^^^^^^^^ +Please set ``dev_mode`` to ``False`` to run this tutorial. Here ``dev_mode`` is ``True`` by default is for generating documents. + +The complete pruning process takes about 8 hours on one A100. + +.. GENERATED FROM PYTHON SOURCE LINES 41-44 + +.. code-block:: default + + + dev_mode = True + + + + + + + + +.. GENERATED FROM PYTHON SOURCE LINES 45-46 + +Some basic setting. + +.. GENERATED FROM PYTHON SOURCE LINES 46-72 + +.. code-block:: default + + + from pathlib import Path + from typing import Callable + + pretrained_model_name_or_path = 'bert-base-uncased' + task_name = 'mnli' + experiment_id = 'pruning_bert' + + # heads_num and layers_num should align with pretrained_model_name_or_path + heads_num = 12 + layers_num = 12 + + # used to save the experiment log + log_dir = Path(f'./pruning_log/{pretrained_model_name_or_path}/{task_name}/{experiment_id}') + log_dir.mkdir(parents=True, exist_ok=True) + + # used to save the finetuned model and share between different experiemnts with same pretrained_model_name_or_path and task_name + model_dir = Path(f'./models/{pretrained_model_name_or_path}/{task_name}') + model_dir.mkdir(parents=True, exist_ok=True) + + from transformers import set_seed + set_seed(1024) + + import torch + device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + + + + + + + + +.. GENERATED FROM PYTHON SOURCE LINES 73-75 + +The function used to create dataloaders, note that 'mnli' has two evaluation dataset. +If teacher_model is set, will run all dataset on teacher model to get the 'teacher_logits' for distillation. + +.. GENERATED FROM PYTHON SOURCE LINES 75-157 + +.. code-block:: default + + + from torch.utils.data import DataLoader + + from datasets import load_dataset + from transformers import BertTokenizerFast, DataCollatorWithPadding + + task_to_keys = { + 'cola': ('sentence', None), + 'mnli': ('premise', 'hypothesis'), + 'mrpc': ('sentence1', 'sentence2'), + 'qnli': ('question', 'sentence'), + 'qqp': ('question1', 'question2'), + 'rte': ('sentence1', 'sentence2'), + 'sst2': ('sentence', None), + 'stsb': ('sentence1', 'sentence2'), + 'wnli': ('sentence1', 'sentence2'), + } + + def prepare_data(cache_dir='./data', train_batch_size=32, eval_batch_size=32, + teacher_model: torch.nn.Module = None): + tokenizer = BertTokenizerFast.from_pretrained(pretrained_model_name_or_path) + sentence1_key, sentence2_key = task_to_keys[task_name] + data_collator = DataCollatorWithPadding(tokenizer) + + # used to preprocess the raw data + def preprocess_function(examples): + # Tokenize the texts + args = ( + (examples[sentence1_key],) if sentence2_key is None else (examples[sentence1_key], examples[sentence2_key]) + ) + result = tokenizer(*args, padding=False, max_length=128, truncation=True) + + if 'label' in examples: + # In all cases, rename the column to labels because the model will expect that. + result['labels'] = examples['label'] + return result + + raw_datasets = load_dataset('glue', task_name, cache_dir=cache_dir) + for key in list(raw_datasets.keys()): + if 'test' in key: + raw_datasets.pop(key) + + processed_datasets = raw_datasets.map(preprocess_function, batched=True, + remove_columns=raw_datasets['train'].column_names) + + # if has teacher model, add 'teacher_logits' to datasets who has 'labels'. + # 'teacher_logits' is used for distillation and avoid the double counting. + if teacher_model: + teacher_model_training = teacher_model.training + teacher_model.eval() + model_device = next(teacher_model.parameters()).device + + def add_teacher_logits(examples): + result = {k: v for k, v in examples.items()} + samples = data_collator(result).to(model_device) + if 'labels' in samples: + with torch.no_grad(): + logits = teacher_model(**samples).logits.tolist() + result['teacher_logits'] = logits + return result + + processed_datasets = processed_datasets.map(add_teacher_logits, batched=True, + batch_size=train_batch_size) + teacher_model.train(teacher_model_training) + + train_dataset = processed_datasets['train'] + validation_dataset = processed_datasets['validation_matched' if task_name == 'mnli' else 'validation'] + validation_dataset2 = processed_datasets['validation_mismatched'] if task_name == 'mnli' else None + + train_dataloader = DataLoader(train_dataset, + shuffle=True, + collate_fn=data_collator, + batch_size=train_batch_size) + validation_dataloader = DataLoader(validation_dataset, + collate_fn=data_collator, + batch_size=eval_batch_size) + validation_dataloader2 = DataLoader(validation_dataset2, + collate_fn=data_collator, + batch_size=eval_batch_size) if task_name == 'mnli' else None + + return train_dataloader, validation_dataloader, validation_dataloader2 + + + + + + + + +.. GENERATED FROM PYTHON SOURCE LINES 158-159 + +Training function & evaluation function. + +.. GENERATED FROM PYTHON SOURCE LINES 159-258 + +.. code-block:: default + + + import time + import torch.nn.functional as F + from datasets import load_metric + + def training(train_dataloader: DataLoader, + model: torch.nn.Module, + optimizer: torch.optim.Optimizer, + criterion: Callable[[torch.Tensor, torch.Tensor], torch.Tensor], + lr_scheduler: torch.optim.lr_scheduler._LRScheduler = None, + max_steps: int = None, max_epochs: int = None, + save_best_model: bool = False, save_path: str = None, + log_path: str = Path(log_dir) / 'training.log', + distillation: bool = False, + evaluation_func=None): + model.train() + current_step = 0 + best_result = 0 + + for current_epoch in range(max_epochs if max_epochs else 1): + for batch in train_dataloader: + batch.to(device) + teacher_logits = batch.pop('teacher_logits', None) + optimizer.zero_grad() + outputs = model(**batch) + loss = outputs.loss + + if distillation: + assert teacher_logits is not None + distil_loss = F.kl_div(F.log_softmax(outputs.logits / 2, dim=-1), + F.softmax(teacher_logits / 2, dim=-1), reduction='batchmean') * (2 ** 2) + loss = 0.1 * loss + 0.9 * distil_loss + + loss = criterion(loss, None) + loss.backward() + optimizer.step() + + if lr_scheduler: + lr_scheduler.step() + + current_step += 1 + + # evaluation for every 1000 steps + if current_step % 1000 == 0 or current_step % len(train_dataloader) == 0: + result = evaluation_func(model) if evaluation_func else None + with (log_path).open('a+') as f: + msg = '[{}] Epoch {}, Step {}: {}\n'.format(time.asctime(time.localtime(time.time())), current_epoch, current_step, result) + f.write(msg) + # if it's the best model, save it. + if save_best_model and best_result < result['default']: + assert save_path is not None + torch.save(model.state_dict(), save_path) + best_result = result['default'] + + if max_steps and current_step >= max_steps: + return + + def evaluation(validation_dataloader: DataLoader, + validation_dataloader2: DataLoader, + model: torch.nn.Module): + training = model.training + model.eval() + is_regression = task_name == 'stsb' + metric = load_metric('glue', task_name) + + for batch in validation_dataloader: + batch.pop('teacher_logits', None) + batch.to(device) + outputs = model(**batch) + predictions = outputs.logits.argmax(dim=-1) if not is_regression else outputs.logits.squeeze() + metric.add_batch( + predictions=predictions, + references=batch['labels'], + ) + result = metric.compute() + + if validation_dataloader2: + for batch in validation_dataloader2: + batch.pop('teacher_logits', None) + batch.to(device) + outputs = model(**batch) + predictions = outputs.logits.argmax(dim=-1) if not is_regression else outputs.logits.squeeze() + metric.add_batch( + predictions=predictions, + references=batch['labels'], + ) + result = {'matched': result, 'mismatched': metric.compute()} + result['default'] = (result['matched']['accuracy'] + result['mismatched']['accuracy']) / 2 + else: + result['default'] = result.get('f1', result.get('accuracy', None)) + + model.train(training) + return result + + # using huggingface native loss + def fake_criterion(outputs, targets): + return outputs + + + + + + + + + +.. GENERATED FROM PYTHON SOURCE LINES 259-260 + +Prepare pre-trained model and finetuning on downstream task. + +.. GENERATED FROM PYTHON SOURCE LINES 260-299 + +.. code-block:: default + + + import functools + + from torch.optim import Adam + from torch.optim.lr_scheduler import LambdaLR + from transformers import BertForSequenceClassification + + def create_pretrained_model(): + is_regression = task_name == 'stsb' + num_labels = 1 if is_regression else (3 if task_name == 'mnli' else 2) + return BertForSequenceClassification.from_pretrained(pretrained_model_name_or_path, num_labels=num_labels) + + def create_finetuned_model(): + pretrained_model = create_pretrained_model().to(device) + + train_dataloader, validation_dataloader, validation_dataloader2 = prepare_data() + evaluation_func = functools.partial(evaluation, validation_dataloader, validation_dataloader2) + steps_per_epoch = len(train_dataloader) + training_epochs = 3 + + finetuned_model_state_path = Path(model_dir) / 'finetuned_model_state.pth' + + if finetuned_model_state_path.exists(): + pretrained_model.load_state_dict(torch.load(finetuned_model_state_path)) + elif dev_mode: + pass + else: + optimizer = Adam(pretrained_model.parameters(), lr=3e-5, eps=1e-8) + + def lr_lambda(current_step: int): + return max(0.0, float(training_epochs * steps_per_epoch - current_step) / float(training_epochs * steps_per_epoch)) + + lr_scheduler = LambdaLR(optimizer, lr_lambda) + training(train_dataloader, pretrained_model, optimizer, fake_criterion, lr_scheduler=lr_scheduler, max_epochs=training_epochs, + save_best_model=True, save_path=finetuned_model_state_path, evaluation_func=evaluation_func) + return pretrained_model + + finetuned_model = create_finetuned_model() + + + + + +.. rst-class:: sphx-glr-script-out + + .. code-block:: none + + Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForSequenceClassification: ['cls.seq_relationship.bias', 'cls.predictions.transform.dense.bias', 'cls.predictions.transform.LayerNorm.weight', 'cls.seq_relationship.weight', 'cls.predictions.decoder.weight', 'cls.predictions.transform.LayerNorm.bias', 'cls.predictions.bias', 'cls.predictions.transform.dense.weight'] + - This IS expected if you are initializing BertForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model). + - This IS NOT expected if you are initializing BertForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model). + Some weights of BertForSequenceClassification were not initialized from the model checkpoint at bert-base-uncased and are newly initialized: ['classifier.weight', 'classifier.bias'] + You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. + Reusing dataset glue (./data/glue/mnli/1.0.0/dacbe3125aa31d7f70367a07a8a9e72a5a0bfeb5fc42e75c9db75b96da6053ad) + 0%| | 0/5 [00:00= total_steps: + break + # pruning 12 times + if current_step % steps_per_iteration == 0 and current_step < total_pruning_steps: + check_point = attention_pruned_model.state_dict() + pruner = TaylorFOWeightPruner(attention_pruned_model, ffn_config_list, evaluator, taylor_pruner_steps) + _, ffn_masks = pruner.compress() + renamed_ffn_masks = {} + # rename the masks keys, because we only speedup the bert.encoder + for model_name, targets_mask in ffn_masks.items(): + renamed_ffn_masks[model_name.split('bert.encoder.')[1]] = targets_mask + pruner._unwrap_model() + attention_pruned_model.load_state_dict(check_point) + ModelSpeedup(attention_pruned_model.bert.encoder, dummy_input, renamed_ffn_masks).speedup_model() + optimizer = Adam(attention_pruned_model.parameters(), lr=init_lr) + + batch.to(device) + teacher_logits = batch.pop('teacher_logits', None) + optimizer.zero_grad() + + # manually schedule lr + for params_group in optimizer.param_groups: + params_group['lr'] = (1 - current_step / (total_epochs * steps_per_epoch)) * init_lr + + outputs = attention_pruned_model(**batch) + loss = outputs.loss + + # distillation + if teacher_logits is not None: + distil_loss = F.kl_div(F.log_softmax(outputs.logits / 2, dim=-1), + F.softmax(teacher_logits / 2, dim=-1), reduction='batchmean') * (2 ** 2) + loss = 0.1 * loss + 0.9 * distil_loss + loss.backward() + optimizer.step() + + current_step += 1 + if current_step % 1000 == 0 or current_step % len(train_dataloader) == 0: + result = evaluation_func(attention_pruned_model) + with (log_dir / 'ffn_pruning.log').open('a+') as f: + msg = '[{}] Epoch {}, Step {}: {}\n'.format(time.asctime(time.localtime(time.time())), + current_epoch, current_step, result) + f.write(msg) + if current_step >= total_pruning_steps and best_result < result['default']: + torch.save(attention_pruned_model, log_dir / 'best_model.pth') + best_result = result['default'] + + + + + +.. rst-class:: sphx-glr-script-out + + .. code-block:: none + + Did not bind any model, no need to unbind model. + no multi-dimension masks found. + /home/nishang/anaconda3/envs/nni-dev/lib/python3.7/site-packages/torch/_tensor.py:1083: UserWarning: The .grad attribute of a Tensor that is not a leaf Tensor is being accessed. Its .grad attribute won't be populated during autograd.backward(). If you indeed want the .grad field to be populated for a non-leaf Tensor, use .retain_grad() on the non-leaf Tensor. If you access the non-leaf Tensor by mistake, make sure you access the leaf Tensor instead. See github.com/pytorch/pytorch/pull/30531 for more informations. (Triggered internally at aten/src/ATen/core/TensorBody.h:477.) + return self._grad + Did not bind any model, no need to unbind model. + no multi-dimension masks found. + + + + +.. GENERATED FROM PYTHON SOURCE LINES 509-564 + +Result +------ +The speedup is test on the entire validation dataset with batch size 32 on A100. +We test under two pytorch version and found the latency varying widely. + +Setting 1: pytorch 1.12.1 + +Setting 2: pytorch 1.10.0 + +.. list-table:: Prune Bert-base-uncased on MNLI + :header-rows: 1 + :widths: auto + + * - Attention Pruning Method + - FFN Pruning Method + - Total Sparsity + - Accuracy + - Acc. Drop + - Speedup (S1) + - Speedup (S2) + * - + - + - 0% + - 84.73 / 84.63 + - +0.0 / +0.0 + - 12.56s (x1.00) + - 4.05s (x1.00) + * - :ref:`movement-pruner` (soft, th=0.1, lambda=5) + - :ref:`taylor-fo-weight-pruner` + - 51.39% + - 84.25 / 84.96 + - -0.48 / +0.33 + - 6.85s (x1.83) + - 2.7s (x1.50) + * - :ref:`movement-pruner` (soft, th=0.1, lambda=10) + - :ref:`taylor-fo-weight-pruner` + - 66.67% + - 83.98 / 83.75 + - -0.75 / -0.88 + - 4.73s (x2.66) + - 2.16s (x1.86) + * - :ref:`movement-pruner` (soft, th=0.1, lambda=20) + - :ref:`taylor-fo-weight-pruner` + - 77.78% + - 83.02 / 83.06 + - -1.71 / -1.57 + - 3.35s (x3.75) + - 1.72s (x2.35) + * - :ref:`movement-pruner` (soft, th=0.1, lambda=30) + - :ref:`taylor-fo-weight-pruner` + - 87.04% + - 81.24 / 80.99 + - -3.49 / -3.64 + - 2.19s (x5.74) + - 1.31s (x3.09) + + +.. rst-class:: sphx-glr-timing + + **Total running time of the script:** ( 0 minutes 27.206 seconds) + + +.. _sphx_glr_download_tutorials_pruning_bert_glue.py: + +.. only:: html + + .. container:: sphx-glr-footer sphx-glr-footer-example + + + .. container:: sphx-glr-download sphx-glr-download-python + + :download:`Download Python source code: pruning_bert_glue.py ` + + .. container:: sphx-glr-download sphx-glr-download-jupyter + + :download:`Download Jupyter notebook: pruning_bert_glue.ipynb ` + + +.. only:: html + + .. rst-class:: sphx-glr-signature + + `Gallery generated by Sphinx-Gallery `_ diff --git a/docs/source/tutorials/pruning_bert_glue_codeobj.pickle b/docs/source/tutorials/pruning_bert_glue_codeobj.pickle new file mode 100644 index 0000000000000000000000000000000000000000..710923d8fd2624b1160e0baf8e66cd35b5433734 GIT binary patch literal 46998 zcmeHQeXu1(bzk7^zK^^69{UB$;<5;ei-ZdS#Xw=uU3P)6>u&7_pmB3E@7{iUXWspI z+&gza2q>s1gb5Y(DJ7-RgbGQ-1cL#!q=G>yi5O!7rbv;*mY?B5^cSPb&R&D` zNaISs9xi@x@!Z{uo1%%Z=lLDKb6|1*;w|v+v5wa!i_yf~pu@x#qh;-GW6+|%%?|ul z=u@QS{I^=a+3kgk&8cX@@7L-ruisz1GpfM=treT&UXbiR}LVsrVO%@ubJY?X6GKic}d+N!l+m*#rMmTo9S zridFH5!dG&%YYP%hZhD$r`BohklmWt14*z>dUjG4Ex!iq}R@=AIzIYzIwmy%agoeEB1 zBd*(?Li>%TG_&WIjTi>v)Qtcd&deEw61wQ`r>cK-iiXHld7q7&e{yrMAO-GQ42cO43V zk7BK>HO-+;@CSx$&2kZ!0)9vWJezjyt{&M-cb}xOgIYY(p+}`-Y-=7xci7gI*9o5} zWwVUw?~`CQTA2NGvG;Hc3_`!vr-MMLLL|Noydqv{ep1r#6rck{9CUm*m2dYu9jX+R zVS|-MCz_S!@7sc$tb$PKPuogQB+^vsGqzGonG_XyL=riZw{;)2><<61pPklV6d1ar z`FVH9Ocy*VVQjpLK*Wa4v$w_v*oHu2!c= zS}|3s&A}|S{1dKNtFl-W=Sf$bOsk%f5N6bs47&WaQq_eEAyw(?>4IAcyP?;r^+U2i z18qE%b&}u7F0@al3+;61w@J0tt$PsPgYaJ(pIaY?{Ax)!u-5I>+9Ygt8<}VvuMVc${v|68(vx;9-FFl z-XKYK7r-H+s{u&HN14(9P}YN4n{5QxI6`+)JCJ@|lv_IM zg>2--#9XnN{(Ebu-fhq{0Z`U<{>>~Lf=8D{&CE(4L9IwM8G8Ns8cR+{2KPxi&+9i( z$ct8|hJjyT~D?qg4bU_S~q!pMRLuo;{wt9K?-hOF^gVPfFZ1DjT0 z)s{684~p}!WQj5e*nmf{0k9O%(M@V?=)!btYNwSipcD`o=GT6`Nobr$m?$TqJnq zXP9O}naPw&)PmLo%0*C2rV=k#4psb?s-kVnV^>3J1GX;hfa9;l|E9KBR?%S6xY?&( z>4n}F$nc`sDc3`O)Gn{V=IFHHxh0_EH%k6o1E8w(Kd{_zK!fLB1idl?Jp@>0T{z)H zx}_IrRisKrdW=cO7F?cqn>0j}PEFc1auVerqm55H5QgUATM&&`%VqHoVBmsSHKg6K zsj1VEPmKg=U69&BOaQJ}6vr{RUbU*AOD-fSEKZy4vjclPf?C;=Rbl?PUr3B zVcC>(u@FqzajWs~va_q8X-d07#3PMWNZr!&u$ymTDT+#!Xs{}lB+*(qdRO&&2ihJ6 zi5%jn!NrKnoH*+D$1y-;nI4!DQejT)Md#asi-iBh}!LrzOdUnp@ zdd2fpO%$I&af|(QLlmvRrU)D_oBgye%1yHELv^7`=hL|BR!Un4_BZIfQEVgh$eBsM zhZ=HjEZ^5BJ(>rdC?jP`!#prhC06Yigx+k6?C=&AXr?i3AT(k|$gr+@vvoSOWGvCu z%mI-FD#6WaQ~ny9`dk(0^2g*T?r0a>#j%uPPX{IaaCX z1~u&I`GTXl&CGn!5z#=(A2`Bk`1nIdh=htiR^`qR?mpzuvGg^F;D1Wg=0>;Rgp-tn zCyNHzQ)#ROx|vSnB~T5NnkoUQq1B2KFbTCzMp+U~chmE`IMy7;Mkd)QB>N3^SB{7O zvarpm0pCW*av0;=fGYEX)9GnSHBnJIY)M%YKvNwi+0FWi`U6@g~%mM5icG z*go7FAva-$sY@94d<2FtEF}FwCIew1?sWT|f6#xCD7+@#gVzR~Lp^VSXDeWE>@4l> zTQ2;Q-FI7R=&0Sp*Dw*o8&B$xF zr=uqIWy8dx#NG5+cIrAaJlI{*F!r?FSF#2(ZEq-vY@qEgmW0*N_LoXRCA9Ta`P0Om z@^Q=7VlCZC+H^C6JvZmwfX$TbxB(iNc%vJdhJc|PLc+U4s@R%%|C^!QG2Z#+>fyEt zDgR19t^)R?dzZCw&3wDv8p}YnJFI~;EW68EI3d{Gs>C{BSDPVO>HTc^elFDKSdpzz z346kQ#L3uZ(tXqk&p^D7JArA)_ghW?2?2i_rAYJ&FAO0&9Fn_}BS@BTU1eEAxDjHv zrSq@*^U|;r8qsBamb=6~ecPH#UwbN1SKh^^htRTa%4d=y9#)Y~qnA4H`V(ZbkyhME zN5$;L^^L;V#Wm!a_=|3a$r$!as{GnqqZ)2@7iG}rgJu-^s;ba(o~4MLYn!Vm5IKWk z%vdZEE>1DzOwZJaVW7WOTNu9~un+Imr}Kxw=E2`MU}a4GEme4}dXqlY!+3u__y>Ei zj3UpgvTKyP^Qj*BH|B%yT7%?VPy4<#K&AozilPuc4T%R=0pP-wd^>vLE!_^;dt`y< zH>M9%LIT&nqqrBHowEYX!h~0ar)6NK6K)t(=sd_Syuhz+GoSU^Bt{?9`if@0;J>k5 zd{n5?d9&L8nglaFaN<~clt7|$a!g2bcj4-s7ea89zAL&vN%}LqfTPN}*}1?(qGjE6 zC;%?LLoG~h?zQIi*$nmo?`(JIOs}p|F}1JM9Fy*`t$7rkVp~^US8OP2woC_{sUl^b z%+VO%uxJ%I3O4W=C10Z|nLNs)(XE8@P=My*Miju`x=vUT@D?0A*2^gBv0q12aY+{C-#X_E7M!n)#UDVx+Le{#(o=v5bjjv7u}Dw zk!WIz2e-u_$r{)FwYBTKp6`J(z&u~S{^pkN!IkgHXjyoKrq0uAHnRxA7d8Zv3lc0y zF9Fkcib_9G6a)yyYmMI}!Uu|iHWq=_rB_W;6XH*xXX)Fgf}tfq=AY+AFC3e z%DmrU4HJBDsWckulUR<>7ZT{@I(Uf8uL~2{viJv}2ns%^T25Q)ArXERED6aCsw&(X zA|3ij1aFMXqVQP+^`Z@siNbd7V0Bz_=(B9n%NN?W5grwR*20~9A8ugMs4TsRN!Ma= z6Ra?!%wQ3^LqT)n88?{%p$?Wm#l~sNV^@9+OTrZ2Ct?#C?kq)l`o0DY-O#ZQEVfZ_3Z=P%G6Lv15u>pe zepX;{SdclR=^H&ZRjooW92ud$3^heAmxo zgdUo$lZ4t#|UEtkt;pr!rW&LVIiz&{qB*I>cW#BO=SPMw2%+>f^ z2osYc|7!dmwXDt6`0G?5!`1k=VtMwWCcUGIqil^XBbIuymkpEeH{8dUa&>Y}w;C$z zHCGM$bcH#zJtsphra4u?x5hH0DYUGCG${&QYvE)a?V$wq%-BcB;;)9;%p-?T+>0ig z1eQ_r+g3M=Ld!8z-SpJnZIT>cMq#q<91s>#mwD&Ch{yYXS&SylJa=L#m}0r70q+cn z6Q%aO=HPA=1lw$@_PcW-e+ngbb@SJQ527g7Y&;PL)zE)V-lC?j3RZG{PZoz6l0GcR z-mr#ACWcOeV*o8bO!!gM0v`II5Ae~x?N?!)XyZG2E*Aey5wQYW<@$VWp-b=XC)!9~ z<^zq?rvUC35hnNx8(|k`%#-B@HsDVHGK%*#w7{8-&V#5Pw{9hUUW2Qmw&x*LV5Rc9 z1{c#jt`R(JC#u%?IaO>bc~T?4sxKI1)H?oHEe}saqz`K3SMw)kF}0>It0EKpIgR{E zzEUWm;(t{wT}dC)$gkqhOhRfc-%tgX<5L>>)jVyKQtSCElz=nQ4BOBUqHmU~F#Doq5az?c! z%hG@er`1Zn`KD)M@S$5aaA6eMhNXn9xE{!5Gp|Rm+0*MKace)Lw)%A5Y9Sj=Zoi>N zf>o}7nT}YiBCJ>Q;$)4?XVmK)5i=^Ct^%B5 z5AsEmt(VJ2?s^1;cPil<=MXt$@80`(vZpr4?VsUEi$PDu5HDp3)hoj9Jjr0V5Pp|6>m zYR{ZMH$j;R^Oq(F15dtblGo7VS(8k{is#gV%W!fz7SBKki-%D2QN4?=uhO0p-zhMe znGOF`AZ;MRKNpB?X%CuRvaH`5c)*X z;8;;Xsn(tjFBwc|ro@tQ8ZOQ^fq@n$8N@ZzSY{AO=rO65%XP*!(j9xk064F&E*#-W z1N0d>e*Gdcmq`xLPtm3cUxYymB`x>3&l6tH3@HTil?Gr4X#j z)7X*r3n*uy7`(qf2OkCqA-yTi))vnl(rt{>QP;}{hWz`H7oeGWJk!aFnc(?FDxAfk z+UUE&qSBx6f#DlV*ePS|H>n_oV?D$1)F=-OuToXhpSAsz9T;AxwtCnD!?(HuW;pV8 z6=CQD!EFl@S^WZE;2@^AzPN5rHBAK-@XZ3>@ml79w2lzD+V z_oB~5Q`dtZL!6r9jmY1<%hZzVpv1M%6ONM5`C_FZXOu>QcEQcexPmL0X=CErz0+87 zNNXe!*lpE$nm&FIowxj4%sY@9e0g0K%K`<;rD9Iv@l@*hXd=*-`$XB&AXwGsT|Ot7tp1AQ(b*1DlEgO3ISM(=r= z>l}sSX|5$NQSFOpL2hp|RA#;V5@OI#tIIpHm(5R!m1q8Mpy^7R|7paaPxYtYLttrE z{N@?v*Xs`lpG63`dRFfd`s|c?BN}>=>zKc~{jG?Tm)om*)XK2Q6hw~7D0QlS6a&_& z!aVYP53S8R#tAhujQ>T2)9#v&c2`(bI*Jaks3yLf@HSpvY|)0RhN-7 zs+~BIliMrH4Y5_1fy(6e$_OsGy|N@mC0DAg&TiFZQA3XeYg_>{9dW9PklCus8kuu@ z*E=F+RN9~ds9SYeqk`PtGu@DKw(4iQp=8=~4$6~gWsh_qdA(7yy~=VPN_)|(`Dp-K zf2&fWs+i-6?oj5{pVIlN;$~Q?3)V_C;q!G&bLZB;6n+@#!}|oR zw+^;82*8}g(AH1Zf%|%5&S&u69YG(3UE<>NB1D1X}0lsc-jQ@51`r` z?b21_qp)f_Mvm|P(x{Saj)Qnws8PrjZsZDB#}xv?j4lj0O>@jagQf+FA!smf0ig*R zbS%V@puvr5+1#|L%iI_R=kI=9pwm#|BDZoCbY@s^*t+rdoJ1~&6IfDfMB4-69#b&HJ&txB=mSnEvIq1a*zK?WpqvPXQb#3GTlpP zFReYpRAW0JFG^neOvARgK^&uBu%&yDI6%U8mLB`DqHOHMyWFXqviGwQ!Oh>dQA>E- zfnGA{*E)kXHjTcs0?J8Gn0*I@;36O25O=x|gZ6~0vg64wARTWYy+PQeg{t&F?EOu; zWk{3}msnAFnpcnr+lVBP@Er@4AnR&Zl6tr20VsVggB%WbcpdKm|5Qbxo&|ptRWGa{ z+m_%*P8wp%UQ*>mn_;mflWCZbMC0+hNyxr+dkB2xw^JuuS{|$=b3W= max_steps: + return + +def evaluation(validation_dataloader: DataLoader, + validation_dataloader2: DataLoader, + model: torch.nn.Module): + training = model.training + model.eval() + is_regression = task_name == 'stsb' + metric = load_metric('glue', task_name) + + for batch in validation_dataloader: + batch.pop('teacher_logits', None) + batch.to(device) + outputs = model(**batch) + predictions = outputs.logits.argmax(dim=-1) if not is_regression else outputs.logits.squeeze() + metric.add_batch( + predictions=predictions, + references=batch['labels'], + ) + result = metric.compute() + + if validation_dataloader2: + for batch in validation_dataloader2: + batch.pop('teacher_logits', None) + batch.to(device) + outputs = model(**batch) + predictions = outputs.logits.argmax(dim=-1) if not is_regression else outputs.logits.squeeze() + metric.add_batch( + predictions=predictions, + references=batch['labels'], + ) + result = {'matched': result, 'mismatched': metric.compute()} + result['default'] = (result['matched']['accuracy'] + result['mismatched']['accuracy']) / 2 + else: + result['default'] = result.get('f1', result.get('accuracy', None)) + + model.train(training) + return result + +# using huggingface native loss +def fake_criterion(outputs, targets): + return outputs + + +# %% +# Prepare pre-trained model and finetuning on downstream task. + +import functools + +from torch.optim import Adam +from torch.optim.lr_scheduler import LambdaLR +from transformers import BertForSequenceClassification + +def create_pretrained_model(): + is_regression = task_name == 'stsb' + num_labels = 1 if is_regression else (3 if task_name == 'mnli' else 2) + return BertForSequenceClassification.from_pretrained(pretrained_model_name_or_path, num_labels=num_labels) + +def create_finetuned_model(): + pretrained_model = create_pretrained_model().to(device) + + train_dataloader, validation_dataloader, validation_dataloader2 = prepare_data() + evaluation_func = functools.partial(evaluation, validation_dataloader, validation_dataloader2) + steps_per_epoch = len(train_dataloader) + training_epochs = 3 + + finetuned_model_state_path = Path(model_dir) / 'finetuned_model_state.pth' + + if finetuned_model_state_path.exists(): + pretrained_model.load_state_dict(torch.load(finetuned_model_state_path)) + elif dev_mode: + pass + else: + optimizer = Adam(pretrained_model.parameters(), lr=3e-5, eps=1e-8) + + def lr_lambda(current_step: int): + return max(0.0, float(training_epochs * steps_per_epoch - current_step) / float(training_epochs * steps_per_epoch)) + + lr_scheduler = LambdaLR(optimizer, lr_lambda) + training(train_dataloader, pretrained_model, optimizer, fake_criterion, lr_scheduler=lr_scheduler, max_epochs=training_epochs, + save_best_model=True, save_path=finetuned_model_state_path, evaluation_func=evaluation_func) + return pretrained_model + +finetuned_model = create_finetuned_model() + +# %% +# Using finetuned model as teacher model to create dataloader. +# Add 'teacher_logits' to dataset, it is used to do the distillation, it can be seen as a kind of data label. + +if not dev_mode: + train_dataloader, validation_dataloader, validation_dataloader2 = prepare_data(teacher_model=finetuned_model) +else: + train_dataloader, validation_dataloader, validation_dataloader2 = prepare_data() + +evaluation_func = functools.partial(evaluation, validation_dataloader, validation_dataloader2) + +# %% +# Pruning +# ^^^^^^^ +# First, using MovementPruner to prune attention head. + +steps_per_epoch = len(train_dataloader) + +# Set training steps/epochs for pruning. + +if not dev_mode: + total_epochs = 4 + total_steps = total_epochs * steps_per_epoch + warmup_steps = 1 * steps_per_epoch + cooldown_steps = 1 * steps_per_epoch +else: + total_epochs = 1 + total_steps = 3 + warmup_steps = 1 + cooldown_steps = 1 + +# Initialize evaluator used by MovementPruner. + +import nni +from nni.algorithms.compression.v2.pytorch import TorchEvaluator + +movement_training = functools.partial(training, train_dataloader, log_path=log_dir / 'movement_pruning.log', + evaluation_func=evaluation_func) +traced_optimizer = nni.trace(Adam)(finetuned_model.parameters(), lr=3e-5, eps=1e-8) + +def lr_lambda(current_step: int): + if current_step < warmup_steps: + return float(current_step) / warmup_steps + return max(0.0, float(total_steps - current_step) / float(total_steps - warmup_steps)) + +traced_scheduler = nni.trace(LambdaLR)(traced_optimizer, lr_lambda) +evaluator = TorchEvaluator(movement_training, traced_optimizer, fake_criterion, traced_scheduler) + +# Apply block-soft-movement pruning on attention layers. + +from nni.compression.pytorch.pruning import MovementPruner + +config_list = [{'op_types': ['Linear'], 'op_partial_names': ['bert.encoder.layer.{}.'.format(i) for i in range(layers_num)], 'sparsity': 0.1}] +pruner = MovementPruner(model=finetuned_model, + config_list=config_list, + evaluator=evaluator, + training_epochs=total_epochs, + training_steps=total_steps, + warm_up_step=warmup_steps, + cool_down_beginning_step=total_steps - cooldown_steps, + regular_scale=10, + movement_mode='soft', + sparse_granularity='auto') +_, attention_masks = pruner.compress() +pruner.show_pruned_weights() + +torch.save(attention_masks, Path(log_dir) / 'attention_masks.pth') + +# %% +# Load a new finetuned model to do the speedup. +# Note that nni speedup don't support replace attention module, so here we manully replace the attention module. +# +# If the head is entire masked, physically prune it and create config_list for FFN pruning. + +attention_pruned_model = create_finetuned_model().to(device) +attention_masks = torch.load(Path(log_dir) / 'attention_masks.pth') + +ffn_config_list = [] +layer_count = 0 +module_list = [] +for i in range(0, layers_num): + prefix = f'bert.encoder.layer.{i}.' + value_mask: torch.Tensor = attention_masks[prefix + 'attention.self.value']['weight'] + head_mask = (value_mask.reshape(heads_num, -1).sum(-1) == 0.) + head_idx = torch.arange(len(head_mask))[head_mask].long().tolist() + print(f'layer {i} pruner {len(head_idx)} head: {head_idx}') + if len(head_idx) != heads_num: + attention_pruned_model.bert.encoder.layer[i].attention.prune_heads(head_idx) + module_list.append(attention_pruned_model.bert.encoder.layer[i]) + # The final ffn weight remaining ratio is the half of the attention weight remaining ratio. + # This is just an empirical configuration, you can use any other method to determine this sparsity. + sparsity = 1 - (1 - len(head_idx) / heads_num) * 0.5 + # here we use a simple sparsity schedule, we will prune ffn in 12 iterations, each iteration prune `sparsity_per_iter`. + sparsity_per_iter = 1 - (1 - sparsity) ** (1 / heads_num) + ffn_config_list.append({'op_names': [f'bert.encoder.layer.{layer_count}.intermediate.dense'], 'sparsity': sparsity_per_iter}) + layer_count += 1 + +attention_pruned_model.bert.encoder.layer = torch.nn.ModuleList(module_list) + +# %% +# Retrain the attention pruned model with distillation. + +if not dev_mode: + total_epochs = 5 + total_steps = None + distillation = True +else: + total_epochs = 1 + total_steps = 1 + distillation = False + +optimizer = Adam(attention_pruned_model.parameters(), lr=3e-5, eps=1e-8) + +def lr_lambda(current_step: int): + return max(0.0, float(total_epochs * steps_per_epoch - current_step) / float(total_epochs * steps_per_epoch)) + +lr_scheduler = LambdaLR(optimizer, lr_lambda) +at_model_save_path = log_dir / 'attention_pruned_model_state.pth' +training(train_dataloader, attention_pruned_model, optimizer, fake_criterion, lr_scheduler=lr_scheduler, + max_epochs=total_epochs, max_steps=total_steps, save_best_model=True, save_path=at_model_save_path, + distillation=distillation, evaluation_func=evaluation_func) + +if not dev_mode: + attention_pruned_model.load_state_dict(torch.load(at_model_save_path)) + +# %% +# Iterative pruning FFN with TaylorFOWeightPruner in 12 iterations. +# Finetuning 2000 steps after each iteration, then finetuning 2 epochs after pruning finished. +# +# NNI will support per-step-pruning-schedule in the future, then can use an pruner to replace the following code. + +if not dev_mode: + total_epochs = 4 + total_steps = None + taylor_pruner_steps = 1000 + steps_per_iteration = 2000 + total_pruning_steps = 24000 + distillation = True +else: + total_epochs = 1 + total_steps = 6 + taylor_pruner_steps = 2 + steps_per_iteration = 2 + total_pruning_steps = 4 + distillation = False + +from nni.compression.pytorch.pruning import TaylorFOWeightPruner +from nni.compression.pytorch.speedup import ModelSpeedup + +distil_training = functools.partial(training, train_dataloader, log_path=log_dir / 'taylor_pruning.log', + distillation=distillation, evaluation_func=evaluation_func) +traced_optimizer = nni.trace(Adam)(attention_pruned_model.parameters(), lr=3e-5, eps=1e-8) +evaluator = TorchEvaluator(distil_training, traced_optimizer, fake_criterion) + +current_step = 0 +best_result = 0 +init_lr = 3e-5 + +dummy_input = torch.rand(8, 128, 768).to(device) + +attention_pruned_model.train() +for current_epoch in range(total_epochs): + for batch in train_dataloader: + if total_steps and current_step >= total_steps: + break + # pruning 12 times + if current_step % steps_per_iteration == 0 and current_step < total_pruning_steps: + check_point = attention_pruned_model.state_dict() + pruner = TaylorFOWeightPruner(attention_pruned_model, ffn_config_list, evaluator, taylor_pruner_steps) + _, ffn_masks = pruner.compress() + renamed_ffn_masks = {} + # rename the masks keys, because we only speedup the bert.encoder + for model_name, targets_mask in ffn_masks.items(): + renamed_ffn_masks[model_name.split('bert.encoder.')[1]] = targets_mask + pruner._unwrap_model() + attention_pruned_model.load_state_dict(check_point) + ModelSpeedup(attention_pruned_model.bert.encoder, dummy_input, renamed_ffn_masks).speedup_model() + optimizer = Adam(attention_pruned_model.parameters(), lr=init_lr) + + batch.to(device) + teacher_logits = batch.pop('teacher_logits', None) + optimizer.zero_grad() + + # manually schedule lr + for params_group in optimizer.param_groups: + params_group['lr'] = (1 - current_step / (total_epochs * steps_per_epoch)) * init_lr + + outputs = attention_pruned_model(**batch) + loss = outputs.loss + + # distillation + if teacher_logits is not None: + distil_loss = F.kl_div(F.log_softmax(outputs.logits / 2, dim=-1), + F.softmax(teacher_logits / 2, dim=-1), reduction='batchmean') * (2 ** 2) + loss = 0.1 * loss + 0.9 * distil_loss + loss.backward() + optimizer.step() + + current_step += 1 + if current_step % 1000 == 0 or current_step % len(train_dataloader) == 0: + result = evaluation_func(attention_pruned_model) + with (log_dir / 'ffn_pruning.log').open('a+') as f: + msg = '[{}] Epoch {}, Step {}: {}\n'.format(time.asctime(time.localtime(time.time())), + current_epoch, current_step, result) + f.write(msg) + if current_step >= total_pruning_steps and best_result < result['default']: + torch.save(attention_pruned_model, log_dir / 'best_model.pth') + best_result = result['default'] + +# %% +# Result +# ------ +# The speedup is test on the entire validation dataset with batch size 32 on A100. +# We test under two pytorch version and found the latency varying widely. +# +# Setting 1: pytorch 1.12.1 +# +# Setting 2: pytorch 1.10.0 +# +# .. list-table:: Prune Bert-base-uncased on MNLI +# :header-rows: 1 +# :widths: auto +# +# * - Attention Pruning Method +# - FFN Pruning Method +# - Total Sparsity +# - Accuracy +# - Acc. Drop +# - Speedup (S1) +# - Speedup (S2) +# * - +# - +# - 0% +# - 84.73 / 84.63 +# - +0.0 / +0.0 +# - 12.56s (x1.00) +# - 4.05s (x1.00) +# * - :ref:`movement-pruner` (soft, th=0.1, lambda=5) +# - :ref:`taylor-fo-weight-pruner` +# - 51.39% +# - 84.25 / 84.96 +# - -0.48 / +0.33 +# - 6.85s (x1.83) +# - 2.7s (x1.50) +# * - :ref:`movement-pruner` (soft, th=0.1, lambda=10) +# - :ref:`taylor-fo-weight-pruner` +# - 66.67% +# - 83.98 / 83.75 +# - -0.75 / -0.88 +# - 4.73s (x2.66) +# - 2.16s (x1.86) +# * - :ref:`movement-pruner` (soft, th=0.1, lambda=20) +# - :ref:`taylor-fo-weight-pruner` +# - 77.78% +# - 83.02 / 83.06 +# - -1.71 / -1.57 +# - 3.35s (x3.75) +# - 1.72s (x2.35) +# * - :ref:`movement-pruner` (soft, th=0.1, lambda=30) +# - :ref:`taylor-fo-weight-pruner` +# - 87.04% +# - 81.24 / 80.99 +# - -3.49 / -3.64 +# - 2.19s (x5.74) +# - 1.31s (x3.09) diff --git a/nni/algorithms/compression/v2/pytorch/pruning/basic_pruner.py b/nni/algorithms/compression/v2/pytorch/pruning/basic_pruner.py index dff3ed00c..9a2049be3 100644 --- a/nni/algorithms/compression/v2/pytorch/pruning/basic_pruner.py +++ b/nni/algorithms/compression/v2/pytorch/pruning/basic_pruner.py @@ -189,7 +189,7 @@ class EvaluatorBasedPruner(BasicPruner): raise TypeError(f"{self.__class__.__name__}.__init__() got multiple values for argument '{key}'") merged_kwargs[key] = value for key, value in def_kwargs.items(): - if key not in merged_kwargs: + if key not in merged_kwargs and key in arg_names: merged_kwargs[key] = value diff = set(arg_names).difference(merged_kwargs.keys()) if diff: @@ -734,6 +734,8 @@ class ActivationPruner(EvaluatorBasedPruner): def _choose_activation(self, activation: str = 'relu') -> Callable: if activation == 'relu': return F.relu + elif activation == 'gelu': + return F.gelu elif activation == 'relu6': return F.relu6 else: diff --git a/nni/algorithms/compression/v2/pytorch/pruning/basic_scheduler.py b/nni/algorithms/compression/v2/pytorch/pruning/basic_scheduler.py index bb9168df8..8193ffd71 100644 --- a/nni/algorithms/compression/v2/pytorch/pruning/basic_scheduler.py +++ b/nni/algorithms/compression/v2/pytorch/pruning/basic_scheduler.py @@ -60,7 +60,7 @@ class EvaluatorBasedPruningScheduler(BasePruningScheduler): raise TypeError(f"{self.__class__.__name__}.__init__() got multiple values for argument '{key}'") merged_kwargs[key] = value for key, value in def_kwargs.items(): - if key not in merged_kwargs: + if key not in merged_kwargs and key in arg_names: merged_kwargs[key] = value diff = set(arg_names).difference(merged_kwargs.keys()) if diff: diff --git a/nni/algorithms/compression/v2/pytorch/pruning/movement_pruner.py b/nni/algorithms/compression/v2/pytorch/pruning/movement_pruner.py index 2772374f9..30fdcadb2 100644 --- a/nni/algorithms/compression/v2/pytorch/pruning/movement_pruner.py +++ b/nni/algorithms/compression/v2/pytorch/pruning/movement_pruner.py @@ -6,6 +6,7 @@ from __future__ import annotations from copy import deepcopy import logging from typing import Dict, List, Tuple, Callable, overload +from typing_extensions import Literal import torch from torch import autograd, Tensor @@ -21,15 +22,18 @@ from .tools.base import EvaluatorBasedDataCollector, TrainerBasedDataCollector from .tools import ( NormalSparsityAllocator, + ThresholdSparsityAllocator, StraightMetricsCalculator ) from ..utils import ( LightningEvaluator, - TorchEvaluator + TorchEvaluator, + Scaling ) from ..utils.docstring import _EVALUATOR_DOCSTRING +from ..utils.external.huggingface import parser_factory _logger = logging.getLogger(__name__) @@ -48,14 +52,18 @@ class PrunerScoredModuleWrapper(PrunerModuleWrapper): module_name The name of the module to compress, wrapper module shares same name. """ - def __init__(self, module: Module, module_name: str, config: Dict): + def __init__(self, module: Module, module_name: str, config: Dict, score_size: List[int] | None = None): super().__init__(module, module_name, config) - self.weight_score = Parameter(torch.empty(self.weight.size())) # type: ignore + self.weight_score = Parameter(torch.empty(score_size)) \ + if score_size is not None else Parameter(torch.empty_like(module.weight)) # type: ignore torch.nn.init.constant_(self.weight_score, val=0.0) def forward(self, *inputs): - # apply mask to weight, bias - self.module.weight = torch.mul(self.weight, _StraightThrough.apply(self.weight_score, self.weight_mask)) # type: ignore + repeat = [a // b for a, b in zip(self.weight.shape, self.weight_score.shape)] # type: ignore + weight_score = self.weight_score + for dim, num in enumerate(repeat): + weight_score = weight_score.repeat_interleave(num, dim=dim) + self.module.weight = torch.mul(self.weight, _StraightThrough.apply(weight_score, self.weight_mask)) # type: ignore if hasattr(self.module, 'bias') and self.module.bias is not None: self.module.bias = torch.mul(self.bias, self.bias_mask) # type: ignore return self.module(*inputs) @@ -124,9 +132,9 @@ class MovementPruner(EvaluatorBasedPruner): Parameters ---------- - model : torch.nn.Module + model Model to be pruned. - config_list : List[Dict] + config_list Supported keys: - sparsity : This is to specify the sparsity for each layer in this config to be compressed. - sparsity_per_layer : Equals to sparsity. @@ -140,16 +148,39 @@ class MovementPruner(EvaluatorBasedPruner): {evaluator_docstring} The old API (``trainer``, ``traced_optimizer`` and ``criterion``) is still supported and will be deprecated in v3.0. If you want to consult the old API, please refer to `v2.8 pruner API `__. - training_epochs : int - The total epoch number for training the model. - Make sure the total `optimizer.step()` in `training_epochs` is bigger than `cool_down_beginning_step`. - warm_up_step : int + warm_up_step The total `optimizer.step()` number before start pruning for warm up. - Make sure `warm_up_step` is smaller than `cool_down_beginning_step`. - cool_down_beginning_step: int + Make sure ``warm_up_step`` is smaller than ``cool_down_beginning_step``. + cool_down_beginning_step The number of steps at which sparsity stops growing, note that the sparsity stop growing doesn't mean masks not changed. The sparsity after each `optimizer.step()` is: total_sparsity * (1 - (1 - (current_step - warm_up_step) / (cool_down_beginning_step - warm_up_step)) ** 3). + training_epochs + The total epoch number for training the model. + Make sure the total `optimizer.step()` in ``training_epochs`` is bigger than `cool_down_beginning_step`. + If both ``training_epochs`` and ``training_steps`` are set, pruning will stop when either is reached. + training_steps + The total step number for training the model. + Make sure ``training_epochs`` is bigger than ``cool_down_beginning_step``. + If both ``training_epochs`` and ``training_steps`` are set, pruning will stop when either is reached. + regular_scale + Use to scale the movement score regular loss. In 'soft' mode, higher regular scale means higher final sparsity. + The recommended range is 1 ~ 30. + movement_mode + 'hard' or 'soft'. Note that in 'soft' mode, ``sparsity`` set in the ``config_list`` means the sparsify threshold, + 'soft' mode cannot precisely control the sparsity rate, but usually has higher performance compared with 'hard' mode. + ``sparsity`` in 'soft' mode usually set to ``0.1``, and using ``regular_scale`` to control the final relative sparsity. + + For detailed differences between 'hard' and 'soft', please refer to the paper. + In short, 'hard' means that the corresponding layer is pruned to a fixed ratio by the topk method according to the movement score, + which is the sparsity ratio set in config_list. + 'soft' means that the final sparsity size will not be fixed, but the generation of the mask will be controlled by a threshold, + and the positions corresponding to scores below the threshold will be masked during the movement training process. + sparse_granularity + This is an experimental interface, by default, apply 'finegrained' pruning. If 'auto' is set, will try to apply structure pruning. + For the attention layer, will apply block sparse with size [head_width, head_width]. For the following two linear layers (FFN), + will apply output channel pruning for the first linear, and the input channel pruning for the second one. + 'auto' only support partial hugingface transformers right now (bart, bert, t5). Notes ----- @@ -157,8 +188,10 @@ class MovementPruner(EvaluatorBasedPruner): """.format(evaluator_docstring=_EVALUATOR_DOCSTRING) @overload - def __init__(self, model: Module, config_list: List[Dict], evaluator: LightningEvaluator | TorchEvaluator, training_epochs: int, - warm_up_step: int, cool_down_beginning_step: int): + def __init__(self, model: Module, config_list: List[Dict], evaluator: LightningEvaluator | TorchEvaluator, warm_up_step: int, + cool_down_beginning_step: int, training_epochs: int | None = None, training_steps: int | None = None, + regular_scale: float | None = None, movement_mode: Literal['hard', 'soft'] = 'hard', + sparse_granularity: Literal['auto', 'finegrained'] = 'finegrained'): ... @overload @@ -169,14 +202,23 @@ class MovementPruner(EvaluatorBasedPruner): def __init__(self, model: Module, config_list: List[Dict], *args, **kwargs): # TODO: remove in nni v3.0. Fake overload. - new_api = ['evaluator', 'training_epochs', 'warm_up_step', 'cool_down_beginning_step'] + new_api = ['evaluator', 'warm_up_step', 'cool_down_beginning_step', 'training_epochs', 'training_steps', 'regular_scale', + 'movement_mode', 'sparse_granularity'] old_api = ['trainer', 'traced_optimizer', 'criterion', 'training_epochs', 'warm_up_step', 'cool_down_beginning_step'] - init_kwargs = self._init_evaluator(model, new_api, old_api, {}, args, kwargs) + init_kwargs = {'training_epochs': None, 'training_steps': None, 'regular_scale': None, 'movement_mode': 'hard', + 'sparse_granularity': 'finegrained'} + init_kwargs = self._init_evaluator(model, new_api, old_api, init_kwargs, args, kwargs) self.training_epochs: int = init_kwargs['training_epochs'] + self.training_steps: int | None = init_kwargs['training_steps'] if self.using_evaluator else None self.warm_up_step: int = init_kwargs['warm_up_step'] self.cool_down_beginning_step: int = init_kwargs['cool_down_beginning_step'] + self.regular_scale: int | None = init_kwargs['regular_scale'] if self.using_evaluator else None + self.movement_mode: Literal['hard', 'soft'] | None = init_kwargs['movement_mode'] if self.using_evaluator else None + self.sparse_granularity = init_kwargs['sparse_granularity'] if self.using_evaluator else None assert self.warm_up_step < self.cool_down_beginning_step, '`warm_up_step` should smaller than `cool_down_beginning_step`' + + self._model_parser = parser_factory(model) super().__init__(model, config_list) def _validate_config_before_canonical(self, model: Module, config_list: List[Dict]): @@ -185,20 +227,61 @@ class MovementPruner(EvaluatorBasedPruner): schema.validate(config_list) def cubic_schedule(self, current_step: int): - if self.warm_up_step < current_step <= self.cool_down_beginning_step: - wrapper_dict = self.get_modules_wrapper() - for config in self.config_list: - scale = 1 - (1 - (current_step - self.warm_up_step) / (self.cool_down_beginning_step - self.warm_up_step)) ** 3 - current_sparsity = config['total_sparsity'] * scale - for op_name in config['op_names']: - wrapper = wrapper_dict[op_name] - wrapper.config['total_sparsity'] = current_sparsity + wrapper_dict = self.get_modules_wrapper() + for config in self.config_list: + current_sparsity = config['total_sparsity'] * self._cubic_scale(current_step) + for op_name in config['op_names']: + # There is an unreachable pyright error if `wrapper_dict[op_name].config['total_sparsity'] = current_sparsity`, + # seems a pyright bug... + wrapper_config = wrapper_dict[op_name].config + wrapper_config['total_sparsity'] = current_sparsity + + def _cubic_scale(self, current_step: int): + if self.warm_up_step > current_step: + return 0 + elif current_step > self.cool_down_beginning_step: + return 1 + else: + return 1 - (1 - (current_step - self.warm_up_step) / (self.cool_down_beginning_step - self.warm_up_step)) ** 3 + + def _create_scalers(self) -> Scaling | Dict[str, Dict[str, Scaling]]: + assert self.bound_model is not None + if self.sparse_granularity and self.sparse_granularity == 'auto' and self._model_parser: + scalers = {} + for module_name, wrapper in self.get_modules_wrapper().items(): + if self._model_parser.is_attention(module_name): + num_heads = self._model_parser.get_num_heads(module_name, self.bound_model) + if num_heads <= 0: + scalers[module_name] = {'_default': Scaling([1])} + else: + # assume attention layer weights are 2D + weight_h: int = wrapper.module.weight.shape[0] # type: ignore + weight_w: int = wrapper.module.weight.shape[1] # type: ignore + if weight_h % num_heads != 0 or weight_w % num_heads != 0: + scalers[module_name] = {'_default': Scaling([1])} + else: + block_h = weight_h // num_heads + block_w = weight_w // num_heads + scalers[module_name] = {'_default': Scaling([block_h, block_w])} + elif self._model_parser.is_ffn(module_name, ffn_num=1): + scalers[module_name] = {'_default': Scaling([1, wrapper.module.weight.shape[1]])} # type: ignore + elif self._model_parser.is_ffn(module_name, ffn_num=2): + scalers[module_name] = {'_default': Scaling([wrapper.module.weight.shape[0], 1])} # type: ignore + else: + scalers[module_name] = {'_default': Scaling([1])} + else: + scalers = Scaling([1]) + return scalers def reset_tools(self): + scalers = self._create_scalers() if not hasattr(self, 'metrics_calculator'): self.metrics_calculator = StraightMetricsCalculator() if not hasattr(self, 'sparsity_allocator'): - self.sparsity_allocator = NormalSparsityAllocator(self, continuous_mask=False) + if self.movement_mode == 'soft': + self.sparsity_allocator = ThresholdSparsityAllocator(self, scalers=scalers, continuous_mask=False) + else: + self.sparsity_allocator = NormalSparsityAllocator(self, scalers=scalers, continuous_mask=False) # use Adam to update the weight_score assert self.bound_model is not None @@ -206,6 +289,14 @@ class MovementPruner(EvaluatorBasedPruner): optimizer = Adam(params, 1e-2) self.step_counter = 0 + # TODO: waiting for api stable and experiemnts to prove this scheduler is needed. + # def lr_lambda(current_step: int): + # if current_step < self.warm_up_step: + # return float(current_step) / self.warm_up_step + # return max(0.0, float(147264 - current_step) / float(147264 - self.warm_up_step)) + + # lr_scheduler = LambdaLR(optimizer, lr_lambda) + # update the masks after each optimzier step def _optimizer_patch(): optimizer.step() @@ -221,6 +312,17 @@ class MovementPruner(EvaluatorBasedPruner): masks = self.sparsity_allocator.generate_sparsity(metrics) # type: ignore self.load_masks(masks) + def _loss_patch(origin_loss: Tensor): + if self.regular_scale is not None: + l1_reg = 0 + count = 0 + for wrapper in self.get_modules_wrapper().values(): + l1_reg += torch.norm(torch.sigmoid(wrapper.weight_score), p=1) / wrapper.weight_score.numel() # type: ignore + count += 1 + return origin_loss + self.regular_scale * self._cubic_scale(self.step_counter) * l1_reg / count + else: + return origin_loss + if self.using_evaluator: # TODO: move to other place in nni v3.0 self.evaluator.unbind_model() @@ -228,7 +330,9 @@ class MovementPruner(EvaluatorBasedPruner): if not hasattr(self, 'data_collector'): self.data_collector = EvaluatorBasedScoreDataCollector(self, self.evaluator, after_opt_step_tasks=[_optimizer_patch], - max_epochs=self.training_epochs) + max_epochs=self.training_epochs, + max_steps=self.training_steps, + loss_patch=_loss_patch) else: self.data_collector.reset(after_opt_step_tasks=[_optimizer_patch]) else: @@ -252,7 +356,27 @@ class MovementPruner(EvaluatorBasedPruner): The configuration for generating the mask. """ _logger.debug("Module detected to compress : %s.", layer.name) - wrapper = PrunerScoredModuleWrapper(layer.module, layer.name, config) + assert self.bound_model is not None + # TODO: merge with _create_scalers after nni v3.0 + if self.sparse_granularity and self.sparse_granularity == 'auto' and self._model_parser: + if self._model_parser.is_attention(layer.name): + num_heads = self._model_parser.get_num_heads(layer.name, self.bound_model) + if num_heads <= 0: + score_size = None + else: + if layer.module.weight.shape[0] % num_heads != 0 or layer.module.weight.shape[1] % num_heads != 0: # type: ignore + score_size = None + else: + score_size = [num_heads, num_heads] + elif self._model_parser.is_ffn(layer.name, ffn_num=1): + score_size = [layer.module.weight.shape[0], 1] # type: ignore + elif self._model_parser.is_ffn(layer.name, ffn_num=2): + score_size = [1, layer.module.weight.shape[1]] # type: ignore + else: + score_size = None + else: + score_size = None + wrapper = PrunerScoredModuleWrapper(layer.module, layer.name, config, score_size) assert hasattr(layer.module, 'weight'), "module %s does not have 'weight' attribute" % layer.name # move newly registered buffers to the same device of weight wrapper.to(layer.module.weight.device) # type: ignore diff --git a/nni/algorithms/compression/v2/pytorch/pruning/tools/__init__.py b/nni/algorithms/compression/v2/pytorch/pruning/tools/__init__.py index c595805a3..ba15490c0 100644 --- a/nni/algorithms/compression/v2/pytorch/pruning/tools/__init__.py +++ b/nni/algorithms/compression/v2/pytorch/pruning/tools/__init__.py @@ -29,6 +29,7 @@ from .metrics_calculator import ( ) from .sparsity_allocator import ( NormalSparsityAllocator, + ThresholdSparsityAllocator, BankSparsityAllocator, GlobalSparsityAllocator, DependencyAwareAllocator diff --git a/nni/algorithms/compression/v2/pytorch/pruning/tools/base.py b/nni/algorithms/compression/v2/pytorch/pruning/tools/base.py index 3a8d418d6..3d9820129 100644 --- a/nni/algorithms/compression/v2/pytorch/pruning/tools/base.py +++ b/nni/algorithms/compression/v2/pytorch/pruning/tools/base.py @@ -6,7 +6,8 @@ from datetime import datetime import logging from pathlib import Path import types -from typing import List, Dict, Literal, Tuple, Optional, Callable, Union +from typing import List, Dict, Tuple, Optional, Callable, Union +from typing_extensions import Literal import json_tricks import torch diff --git a/nni/algorithms/compression/v2/pytorch/pruning/tools/metrics_calculator.py b/nni/algorithms/compression/v2/pytorch/pruning/tools/metrics_calculator.py index 7433e6a5a..2628751ad 100644 --- a/nni/algorithms/compression/v2/pytorch/pruning/tools/metrics_calculator.py +++ b/nni/algorithms/compression/v2/pytorch/pruning/tools/metrics_calculator.py @@ -24,7 +24,7 @@ class StraightMetricsCalculator(MetricsCalculator): for module_name, targets_data in data.items(): metrics[module_name] = {} for target_name, target_data in targets_data.items(): - metrics[module_name][target_name] = target_data.clone().detach() + metrics[module_name][target_name] = self._get_scaler(module_name, target_name).shrink(target_data) return metrics diff --git a/nni/algorithms/compression/v2/pytorch/pruning/tools/sparsity_allocator.py b/nni/algorithms/compression/v2/pytorch/pruning/tools/sparsity_allocator.py index ae4b901d6..b0eee661b 100644 --- a/nni/algorithms/compression/v2/pytorch/pruning/tools/sparsity_allocator.py +++ b/nni/algorithms/compression/v2/pytorch/pruning/tools/sparsity_allocator.py @@ -31,13 +31,28 @@ class NormalSparsityAllocator(SparsityAllocator): wrapper = self.pruner.get_modules_wrapper()[module_name] for target_name, target_metric in targets_metric.items(): sparsity_rate = wrapper.config['total_sparsity'] - prune_num = int(sparsity_rate * target_metric.numel()) - if prune_num != 0: - threshold = torch.topk(target_metric.reshape(-1), prune_num, largest=False)[0].max() - shrinked_mask = torch.gt(target_metric, threshold).type_as(target_metric) - else: - # target_metric should have the same size as shrinked_mask - shrinked_mask = torch.ones_like(target_metric) + flatten_metric = target_metric.reshape(-1) + kept_num = flatten_metric.numel() - int(sparsity_rate * flatten_metric.numel()) + kept_indices = torch.topk(flatten_metric, kept_num).indices + shrinked_mask = torch.zeros_like(flatten_metric).scatter(0, kept_indices, 1.0).reshape_as(target_metric) + masks[module_name][target_name] = self._expand_mask(module_name, target_name, shrinked_mask) + return masks + + +class ThresholdSparsityAllocator(SparsityAllocator): + """ + Note: This allocator is an experimental allocator. + It takes 'total_sparsity' as threshold to mask the pruning target where metric is lower then threshold. + """ + def common_target_masks_generation(self, metrics: Dict[str, Dict[str, Tensor]]) -> Dict[str, Dict[str, Tensor]]: + masks = {} + # TODO: Support more target type in wrapper & config list refactor + for module_name, targets_metric in metrics.items(): + masks[module_name] = {} + wrapper = self.pruner.get_modules_wrapper()[module_name] + for target_name, target_metric in targets_metric.items(): + threshold = wrapper.config['total_sparsity'] + shrinked_mask = torch.gt(torch.sigmoid(target_metric), threshold).type_as(target_metric) masks[module_name][target_name] = self._expand_mask(module_name, target_name, shrinked_mask) return masks @@ -115,10 +130,10 @@ class GlobalSparsityAllocator(SparsityAllocator): assert global_sparsity_rate == wrapper.config['total_sparsity'] # find the largest metric value among all metrics - max_metric_value = list(list(metrics.values())[0].values())[0].max() + max_metric_value = list(list(metrics.values())[0].values())[0].max().item() for targets_metric in metrics.values(): for target_metric in targets_metric.values(): - max_metric_value = max_metric_value if max_metric_value >= target_metric.max() else target_metric.max() + max_metric_value = max_metric_value if max_metric_value >= target_metric.max().item() else target_metric.max().item() # prevent each module from being over-pruned, prevent ratio is 'max_sparsity_per_layer' for module_name, targets_metric in metrics.items(): @@ -127,10 +142,10 @@ class GlobalSparsityAllocator(SparsityAllocator): max_sparsity = wrapper.config.get('max_sparsity_per_layer', {}).get(module_name, 0.99) assert 0 <= max_sparsity <= 1 old_target_mask: Tensor = getattr(wrapper, f'{target_name}_mask') - expand_times = old_target_mask.numel() // target_metric.numel() - max_pruning_numel = int(max_sparsity * target_metric.numel()) * expand_times - threshold = torch.topk(target_metric.reshape(-1), max_pruning_numel, largest=False)[0].max() - metrics[module_name][target_name] = torch.where(target_metric <= threshold, target_metric, max_metric_value) + flatten_metric = target_metric.reshape(-1) + protected_pruning_numel = target_metric.numel() - int(max_sparsity * target_metric.numel()) + protected_indices = torch.topk(flatten_metric, protected_pruning_numel).indices + metrics[module_name][target_name] = flatten_metric.scatter(0, protected_indices, max_metric_value).reshape_as(target_metric) # build the global_matric & calculate global threshold metric_list = [] @@ -207,7 +222,7 @@ class DependencyAwareAllocator(SparsityAllocator): fused_metrics = self._metric_fuse(sub_metrics) for target_name, fused_metric in fused_metrics.items(): - sparsity_rates = {module_name: self.pruner.get_modules_wrapper()[module_name].config['total_sparsity'] \ + sparsity_rates = {module_name: self.pruner.get_modules_wrapper()[module_name].config['total_sparsity'] for module_name in sub_metrics.keys()} min_sparsity_rate = min(sparsity_rates.values()) diff --git a/nni/algorithms/compression/v2/pytorch/utils/evaluator.py b/nni/algorithms/compression/v2/pytorch/utils/evaluator.py index d3d9f6040..9a9403db6 100644 --- a/nni/algorithms/compression/v2/pytorch/utils/evaluator.py +++ b/nni/algorithms/compression/v2/pytorch/utils/evaluator.py @@ -14,8 +14,13 @@ from torch.optim import Optimizer from torch.optim.lr_scheduler import _LRScheduler from torch.utils.hooks import RemovableHandle -import pytorch_lightning as pl -from pytorch_lightning.callbacks import Callback +try: + import pytorch_lightning as pl + from pytorch_lightning.callbacks import Callback +except ImportError: + LightingInstalled = False +else: + LightingInstalled = True from nni.common import is_traceable from .constructor_helper import OptimizerConstructHelper, LRSchedulerConstructHelper @@ -292,6 +297,7 @@ class LightningEvaluator(Evaluator): def __init__(self, trainer: pl.Trainer, data_module: pl.LightningDataModule, dummy_input: Any | None = None): + assert LightingInstalled, 'pytorch_lightning is not installed.' err_msg_p = 'Only support traced {}, please use nni.trace({}) to initialize the trainer.' err_msg = err_msg_p.format('pytorch_lightning.Trainer', 'pytorch_lightning.Trainer') assert isinstance(trainer, pl.Trainer) and is_traceable(trainer), err_msg diff --git a/nni/algorithms/compression/v2/pytorch/utils/external/__init__.py b/nni/algorithms/compression/v2/pytorch/utils/external/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/nni/algorithms/compression/v2/pytorch/utils/external/huggingface.py b/nni/algorithms/compression/v2/pytorch/utils/external/huggingface.py new file mode 100644 index 000000000..74ac20885 --- /dev/null +++ b/nni/algorithms/compression/v2/pytorch/utils/external/huggingface.py @@ -0,0 +1,141 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +from __future__ import annotations + +import logging +import re +from typing import Tuple + +from torch.nn import Module + +try: + from transformers import ( + PreTrainedModel, + BartConfig, + BertConfig, + T5Config + ) +except ImportError: + TRANSFORMERS_INSTALLED = False +else: + TRANSFORMERS_INSTALLED = True + +from nni.algorithms.compression.v2.pytorch.utils.attr import get_nested_attr + +_logger = logging.getLogger(__name__) + + +# huggingface transformers pretrained model parser supported: bart, bert, t5 +def parser_factory(model: Module) -> HuggingfaceModelParser | None: + if TRANSFORMERS_INSTALLED and isinstance(model, PreTrainedModel): + cls2parser = { + BartConfig: HuggingfaceBartParser, + BertConfig: HuggingfaceBertParser, + T5Config: HuggingfaceT5Parser + } + type2parser = { + 'bart': HuggingfaceBartParser, + 'bert': HuggingfaceBertParser, + 't5': HuggingfaceT5Parser + } + + if hasattr(model, 'config_class'): + parser = cls2parser.get(getattr(model, 'config_class')) + elif hasattr(model, 'model_type'): + parser = type2parser.get(getattr(model, 'model_type')) + else: + parser = None + + return parser + else: + return None + + +class HuggingfaceModelParser: + # This class is used to verify that a module name belongs to a specific huggingface transformers pretrained model. + # Further, verify that the module with this name is some kind of special layer (QKVO or FFN). + TRANSFORMER_PREFIX: str + QKV: Tuple[str, ...] + QKVO: Tuple[str, ...] + FFN1: Tuple[str, ...] + FFN2: Tuple[str, ...] + ATTENTION: Tuple[str, ...] + + @classmethod + def is_huggingface_model(cls, model: Module): + return model.__module__.split('.')[0] == 'transformers' + + @classmethod + def is_attention(cls, module_name: str, include_output: bool = True) -> bool: + patterns = cls.QKVO if include_output else cls.QKV + for pattern in patterns: + if pattern in module_name: + return True + return False + + @classmethod + def is_ffn(cls, module_name: str, ffn_num: int = 1) -> bool: + if cls.is_attention(module_name): + return False + if ffn_num == 1: + for pattern in cls.FFN1: + if pattern in module_name: + return True + if ffn_num == 2: + for pattern in cls.FFN2: + if pattern in module_name: + return True + return False + + @classmethod + def get_num_heads(cls, module_name: str, model: Module) -> int: + if cls.is_attention(module_name, include_output=True): + for pattern in cls.ATTENTION: + match = re.search(pattern, module_name) + if match: + attention_module_name = module_name[0: match.span()[1]] + module = get_nested_attr(model, attention_module_name) + if hasattr(module, 'num_attention_heads'): + num_heads = module.num_attention_heads + elif hasattr(module, 'num_heads'): + num_heads = module.num_heads + elif hasattr(module, 'n_heads'): + num_heads = module.n_heads + else: + warn_msg = f'Can not get the heads number of attention layer : {attention_module_name}.' + _logger.warning(warn_msg) + num_heads = 0 + return num_heads + return 0 + else: + warn_msg = f'The layer `{module_name}` might not an (Q|K|V) attention layer.' + _logger.warning(warn_msg) + return 0 + + +class HuggingfaceBertParser(HuggingfaceModelParser): + TRANSFORMER_PREFIX = r'bert\.encoder\.layer\.[0-9]+\.' + QKV = ('attention.self.query', 'attention.self.key', 'attention.self.value') + QKVO = QKV + ('attention.output.dense',) + FFN1 = ('intermediate.dense',) + FFN2 = ('output.dense',) + ATTENTION = ('attention.self',) + + +class HuggingfaceBartParser(HuggingfaceModelParser): + TRANSFORMER_PREFIX = r'(en|de)coder\.layer\.[0-9]+\.' + QKV = ('self_attn.q_proj', 'self_attn.k_proj', 'self_attn.v_proj', 'encoder_attn.q_proj', 'encoder_attn.k_proj', 'encoder_attn.v_proj') + QKVO = QKV + ('self_attn.out_proj', 'encoder_attn.out_proj') + FFN1 = ('fc1',) + FFN2 = ('fc2',) + ATTENTION = ('self_attn', 'encoder_attn') + + +class HuggingfaceT5Parser(HuggingfaceModelParser): + TRANSFORMER_PREFIX = r'(en|de)coder\.block\.[0-9]+\.layer\.[0-9]+.' + QKV = ('SelfAttention.q', 'SelfAttention.k', 'SelfAttention.v', 'EncDecAttention.q', 'EncDecAttention.k', 'EncDecAttention.v') + QKVO = QKV + ('SelfAttention.o', 'EncDecAttention.o') + FFN1 = ('DenseReluDense.wi',) + FFN2 = ('DenseReluDense.wo',) + ATTENTION = ('SelfAttention', 'EncDecAttention') diff --git a/nni/algorithms/compression/v2/pytorch/utils/scaling.py b/nni/algorithms/compression/v2/pytorch/utils/scaling.py index e2bb04a03..a5f2142b1 100644 --- a/nni/algorithms/compression/v2/pytorch/utils/scaling.py +++ b/nni/algorithms/compression/v2/pytorch/utils/scaling.py @@ -122,8 +122,9 @@ class Scaling: permute_dims = [2 * _ for _ in range(len(kernel_size))] + [2 * _ + 1 for _ in range(len(kernel_size))] converted_target = target.reshape(reshape_size).permute(permute_dims).reshape(final_size + [-1]) - # step 2: reduce the converted_target last dim with a certain way, by default is converted_target.sum(-1). - result = reduce_func(converted_target) if reduce_func else converted_target.sum(-1) + # step 2: reduce the converted_target last dim with a certain way, by default is converted_target.mean(-1). + # `sum` does not take into account the metric scale problem, it is better to use `mean` here. + result = reduce_func(converted_target) if reduce_func else converted_target.mean(-1) # step 3: reduce the dims where kernel_size is -1. # e.g., target size is [10, 40], kernel_size is [-1, 4], result size is [1, 10], then reduce result to size [10]. diff --git a/nni/common/graph_utils.py b/nni/common/graph_utils.py index 0c146ddee..9ab17834f 100644 --- a/nni/common/graph_utils.py +++ b/nni/common/graph_utils.py @@ -75,7 +75,19 @@ class TorchGraph: if torch.__version__ >= '1.6.0': # only pytorch with version greater than 1.6.0 has the strict option kw_args['strict'] = False - self.trace = torch.jit.trace(model, dummy_input, **kw_args) + try: + import pytorch_lightning as pl + except ImportError: + is_lightning_module = False + else: + if isinstance(model, pl.LightningModule): + is_lightning_module = True + else: + is_lightning_module = False + if is_lightning_module: + self.trace = model.to_torchscript(method="trace", example_inputs=dummy_input, **kw_args) + else: + self.trace = torch.jit.trace(model, dummy_input, **kw_args) torch._C._jit_pass_inline(self.trace.graph) model.train(training) diff --git a/nni/compression/pytorch/speedup/compress_modules.py b/nni/compression/pytorch/speedup/compress_modules.py index 1e59f4276..b5e9285fb 100644 --- a/nni/compression/pytorch/speedup/compress_modules.py +++ b/nni/compression/pytorch/speedup/compress_modules.py @@ -31,6 +31,7 @@ replace_module = { 'SELU': lambda module, masks: no_replace(module, masks), 'CELU': lambda module, masks: no_replace(module, masks), 'GELU': lambda module, masks: no_replace(module, masks), + 'GELUActivation': lambda module, masks: no_replace(module, masks), 'Sigmoid': lambda module, masks: no_replace(module, masks), 'SiLU': lambda module, masks: no_replace(module, masks), 'Mish': lambda module, masks: no_replace(module, masks), @@ -74,6 +75,7 @@ def convert_to_coarse_mask(t_mask, dim): n_dims = len(shape) dim_list = list(range(n_dims)) # try to reduce the mask from the dim-th dimension + dim = dim if dim >= 0 else n_dims + dim dim_list.remove(dim) t_merged = torch.sum(t_mask, dim_list) @@ -190,12 +192,9 @@ def replace_linear(linear, masks): in_mask = in_masks[0] weight_mask = weight_mask['weight'] - # the input of the linear may have two dimensions(CV models) or three - # dimensions(Bert, for example) - n_dim = len(in_mask.size()) # N C K - pruned_in, remained_in = convert_to_coarse_mask(in_mask, n_dim-1) - pruned_out, remained_out = convert_to_coarse_mask(output_mask, n_dim-1) + pruned_in, remained_in = convert_to_coarse_mask(in_mask, -1) + pruned_out, remained_out = convert_to_coarse_mask(output_mask, -1) n_remained_in = weight_mask.size(1) - pruned_in.size(0) n_remained_out = weight_mask.size(0) - pruned_out.size(0) remained_in, remained_out = remained_in.to( @@ -610,11 +609,29 @@ def replace_layernorm(layernorm, masks): if len(in_masks) != 1: raise InputsNumberError() in_mask = in_masks[0] - dense_shape = convert_dense_shape(in_mask) - norm_shape = layernorm.normalized_shape - dim_n = len(dense_shape) - len(norm_shape) - return nn.LayerNorm(dense_shape[dim_n:], layernorm.eps, layernorm.elementwise_affine) + old_normalized_shape = layernorm.normalized_shape + new_normalized_shape = [] + remained_list = [] + for i in range(-len(old_normalized_shape), 0): + pruned, remained = convert_to_coarse_mask(in_mask, i) + new_normalized_shape.append(old_normalized_shape[i] - pruned.size()[0]) + remained_list.append(remained) + + new_layernorm = nn.LayerNorm(tuple(new_normalized_shape), layernorm.eps, layernorm.elementwise_affine) + + if new_layernorm.elementwise_affine: + new_layernorm.to(layernorm.weight.device) + # NOTE: should we keep the weight & bias? + with torch.no_grad(): + tmp_weight_data = layernorm.weight.data + tmp_bias_data = layernorm.bias.data + for i, remained in enumerate(remained_list): + tmp_weight_data = torch.index_select(tmp_weight_data, i, remained) + tmp_bias_data = torch.index_select(tmp_bias_data, i, remained) + new_layernorm.weight.data = tmp_weight_data + new_layernorm.bias.data = tmp_bias_data + return new_layernorm def replace_embedding(embedding, masks): """ diff --git a/nni/compression/pytorch/utils/mask_conflict.py b/nni/compression/pytorch/utils/mask_conflict.py index 1d95bbf4f..4b51a0582 100644 --- a/nni/compression/pytorch/utils/mask_conflict.py +++ b/nni/compression/pytorch/utils/mask_conflict.py @@ -45,7 +45,19 @@ def fix_mask_conflict(masks, model, dummy_input, traced=None): if torch.__version__ >= '1.6.0': # only pytorch with version greater than 1.6.0 has the strict option kw_args['strict'] = False - traced = torch.jit.trace(model, dummy_input, **kw_args) + try: + import pytorch_lightning as pl + except ImportError: + is_lightning_module = False + else: + if isinstance(model, pl.LightningModule): + is_lightning_module = True + else: + is_lightning_module = False + if is_lightning_module: + traced = model.to_torchscript(method="trace", example_inputs=dummy_input, **kw_args) + else: + traced = torch.jit.trace(model, dummy_input, **kw_args) model.train(training) fix_group_mask = GroupMaskConflict(masks, model, dummy_input, traced) diff --git a/pipelines/full-test-compression.yml b/pipelines/full-test-compression.yml index a96bd206c..5f5128618 100644 --- a/pipelines/full-test-compression.yml +++ b/pipelines/full-test-compression.yml @@ -42,10 +42,6 @@ stages: platform: ubuntu-latest-gpu python_env: venv - - script: | - python -m pip install "pytorch-lightning<1.7" - displayName: Pin PytorchLightning version - - template: templates/install-nni.yml - template: templates/download-test-data.yml diff --git a/test/algo/compression/v2/test_scaling.py b/test/algo/compression/v2/test_scaling.py index 347d296ca..5a3678cba 100644 --- a/test/algo/compression/v2/test_scaling.py +++ b/test/algo/compression/v2/test_scaling.py @@ -8,7 +8,7 @@ from nni.algorithms.compression.v2.pytorch.utils.scaling import Scaling def test_scaling(): - data = torch.tensor([_ for _ in range(100)]).reshape(10, 10) + data = torch.tensor([_ for _ in range(100)], dtype=torch.float32).reshape(10, 10) scaler = Scaling([5], kernel_padding_mode='front') shrinked_data = scaler.shrink(data)