PNG  IHDRX cHRMz&u0`:pQ<bKGD pHYsodtIME MeqIDATxw]Wug^Qd˶ 6`!N:!@xI~)%7%@Bh&`lnjVF29gΨ4E$|>cɚ{gk= %,a KX%,a KX%,a KX%,a KX%,a KX%,a KX%, b` ǟzeאfp]<!SJmɤY޲ڿ,%c ~ع9VH.!Ͳz&QynֺTkRR.BLHi٪:l;@(!MԴ=žI,:o&N'Kù\vRmJ雵֫AWic H@" !: Cé||]k-Ha oݜ:y F())u]aG7*JV@J415p=sZH!=!DRʯvɱh~V\}v/GKY$n]"X"}t@ xS76^[bw4dsce)2dU0 CkMa-U5tvLƀ~mlMwfGE/-]7XAƟ`׮g ewxwC4\[~7@O-Q( a*XGƒ{ ՟}$_y3tĐƤatgvێi|K=uVyrŲlLӪuܿzwk$m87k( `múcE)"@rK( z4$D; 2kW=Xb$V[Ru819קR~qloѱDyįݎ*mxw]y5e4K@ЃI0A D@"BDk_)N\8͜9dz"fK0zɿvM /.:2O{ Nb=M=7>??Zuo32 DLD@D| &+֎C #B8ַ`bOb $D#ͮҪtx]%`ES`Ru[=¾!@Od37LJ0!OIR4m]GZRJu$‡c=%~s@6SKy?CeIh:[vR@Lh | (BhAMy=݃  G"'wzn޺~8ԽSh ~T*A:xR[ܹ?X[uKL_=fDȊ؂p0}7=D$Ekq!/t.*2ʼnDbŞ}DijYaȲ(""6HA;:LzxQ‘(SQQ}*PL*fc\s `/d'QXW, e`#kPGZuŞuO{{wm[&NBTiiI0bukcA9<4@SӊH*؎4U/'2U5.(9JuDfrޱtycU%j(:RUbArLֺN)udA':uGQN"-"Is.*+k@ `Ojs@yU/ H:l;@yyTn}_yw!VkRJ4P)~y#)r,D =ě"Q]ci'%HI4ZL0"MJy 8A{ aN<8D"1#IJi >XjX֔#@>-{vN!8tRݻ^)N_╗FJEk]CT՟ YP:_|H1@ CBk]yKYp|og?*dGvzنzӴzjֺNkC~AbZƷ`.H)=!QͷVTT(| u78y֮}|[8-Vjp%2JPk[}ԉaH8Wpqhwr:vWª<}l77_~{s۴V+RCģ%WRZ\AqHifɤL36: #F:p]Bq/z{0CU6ݳEv_^k7'>sq*+kH%a`0ԣisqにtү04gVgW΂iJiS'3w.w}l6MC2uԯ|>JF5`fV5m`Y**Db1FKNttu]4ccsQNnex/87+}xaUW9y>ͯ骵G{䩓Գ3+vU}~jJ.NFRD7<aJDB1#ҳgSb,+CS?/ VG J?|?,2#M9}B)MiE+G`-wo߫V`fio(}S^4e~V4bHOYb"b#E)dda:'?}׮4繏`{7Z"uny-?ǹ;0MKx{:_pÚmFמ:F " .LFQLG)Q8qN q¯¯3wOvxDb\. BKD9_NN &L:4D{mm o^tֽ:q!ƥ}K+<"m78N< ywsard5+вz~mnG)=}lYݧNj'QJS{S :UYS-952?&O-:W}(!6Mk4+>A>j+i|<<|;ر^߉=HE|V#F)Emm#}/"y GII웻Jі94+v뾧xu~5C95~ūH>c@덉pʃ1/4-A2G%7>m;–Y,cyyaln" ?ƻ!ʪ<{~h~i y.zZB̃/,雋SiC/JFMmBH&&FAbϓO^tubbb_hZ{_QZ-sύodFgO(6]TJA˯#`۶ɟ( %$&+V'~hiYy>922 Wp74Zkq+Ovn錄c>8~GqܲcWꂎz@"1A.}T)uiW4="jJ2W7mU/N0gcqܗOO}?9/wìXžΏ0 >֩(V^Rh32!Hj5`;O28؇2#ݕf3 ?sJd8NJ@7O0 b־?lldщ̡&|9C.8RTWwxWy46ah嘦mh٤&l zCy!PY?: CJyв]dm4ǜҐR޻RլhX{FƯanшQI@x' ao(kUUuxW_Ñ줮[w8 FRJ(8˼)_mQ _!RJhm=!cVmm ?sFOnll6Qk}alY}; "baӌ~M0w,Ggw2W:G/k2%R,_=u`WU R.9T"v,<\Ik޽/2110Ӿxc0gyC&Ny޽JҢrV6N ``یeA16"J³+Rj*;BϜkZPJaÍ<Jyw:NP8/D$ 011z֊Ⱳ3ι֘k1V_"h!JPIΣ'ɜ* aEAd:ݺ>y<}Lp&PlRfTb1]o .2EW\ͮ]38؋rTJsǏP@芎sF\> P^+dYJLbJ C-xϐn> ι$nj,;Ǖa FU *择|h ~izť3ᤓ`K'-f tL7JK+vf2)V'-sFuB4i+m+@My=O҈0"|Yxoj,3]:cо3 $#uŘ%Y"y죯LebqtҢVzq¼X)~>4L׶m~[1_k?kxֺQ`\ |ٛY4Ѯr!)N9{56(iNq}O()Em]=F&u?$HypWUeB\k]JɩSع9 Zqg4ZĊo oMcjZBU]B\TUd34ݝ~:7ڶSUsB0Z3srx 7`:5xcx !qZA!;%͚7&P H<WL!džOb5kF)xor^aujƍ7 Ǡ8/p^(L>ὴ-B,{ۇWzֺ^k]3\EE@7>lYBȝR.oHnXO/}sB|.i@ɥDB4tcm,@ӣgdtJ!lH$_vN166L__'Z)y&kH;:,Y7=J 9cG) V\hjiE;gya~%ks_nC~Er er)muuMg2;֫R)Md) ,¶ 2-wr#F7<-BBn~_(o=KO㭇[Xv eN_SMgSҐ BS헃D%g_N:/pe -wkG*9yYSZS.9cREL !k}<4_Xs#FmҶ:7R$i,fi!~' # !6/S6y@kZkZcX)%5V4P]VGYq%H1!;e1MV<!ϐHO021Dp= HMs~~a)ަu7G^];git!Frl]H/L$=AeUvZE4P\.,xi {-~p?2b#amXAHq)MWǾI_r`S Hz&|{ +ʖ_= (YS(_g0a03M`I&'9vl?MM+m~}*xT۲(fY*V4x@29s{DaY"toGNTO+xCAO~4Ϳ;p`Ѫ:>Ҵ7K 3}+0 387x\)a"/E>qpWB=1 ¨"MP(\xp߫́A3+J] n[ʼnӼaTbZUWb={~2ooKױӰp(CS\S筐R*JغV&&"FA}J>G֐p1ٸbk7 ŘH$JoN <8s^yk_[;gy-;߉DV{c B yce% aJhDȶ 2IdйIB/^n0tNtџdcKj4϶v~- CBcgqx9= PJ) dMsjpYB] GD4RDWX +h{y`,3ꊕ$`zj*N^TP4L:Iz9~6s) Ga:?y*J~?OrMwP\](21sZUD ?ܟQ5Q%ggW6QdO+\@ ̪X'GxN @'4=ˋ+*VwN ne_|(/BDfj5(Dq<*tNt1х!MV.C0 32b#?n0pzj#!38}޴o1KovCJ`8ŗ_"]] rDUy޲@ Ȗ-;xџ'^Y`zEd?0„ DAL18IS]VGq\4o !swV7ˣι%4FѮ~}6)OgS[~Q vcYbL!wG3 7띸*E Pql8=jT\꘿I(z<[6OrR8ºC~ډ]=rNl[g|v TMTղb-o}OrP^Q]<98S¤!k)G(Vkwyqyr޽Nv`N/e p/~NAOk \I:G6]4+K;j$R:Mi #*[AȚT,ʰ,;N{HZTGMoּy) ]%dHء9Պ䠬|<45,\=[bƟ8QXeB3- &dҩ^{>/86bXmZ]]yޚN[(WAHL$YAgDKp=5GHjU&99v簪C0vygln*P)9^͞}lMuiH!̍#DoRBn9l@ xA/_v=ȺT{7Yt2N"4!YN`ae >Q<XMydEB`VU}u]嫇.%e^ánE87Mu\t`cP=AD/G)sI"@MP;)]%fH9'FNsj1pVhY&9=0pfuJ&gޤx+k:!r˭wkl03׼Ku C &ѓYt{.O.zҏ z}/tf_wEp2gvX)GN#I ݭ߽v/ .& и(ZF{e"=V!{zW`, ]+LGz"(UJp|j( #V4, 8B 0 9OkRrlɱl94)'VH9=9W|>PS['G(*I1==C<5"Pg+x'K5EMd؞Af8lG ?D FtoB[je?{k3zQ vZ;%Ɠ,]E>KZ+T/ EJxOZ1i #T<@ I}q9/t'zi(EMqw`mYkU6;[t4DPeckeM;H}_g pMww}k6#H㶏+b8雡Sxp)&C $@'b,fPߑt$RbJ'vznuS ~8='72_`{q纶|Q)Xk}cPz9p7O:'|G~8wx(a 0QCko|0ASD>Ip=4Q, d|F8RcU"/KM opKle M3#i0c%<7׿p&pZq[TR"BpqauIp$ 8~Ĩ!8Սx\ւdT>>Z40ks7 z2IQ}ItԀ<-%S⍤};zIb$I 5K}Q͙D8UguWE$Jh )cu4N tZl+[]M4k8֦Zeq֮M7uIqG 1==tLtR,ƜSrHYt&QP윯Lg' I,3@P'}'R˪e/%-Auv·ñ\> vDJzlӾNv5:|K/Jb6KI9)Zh*ZAi`?S {aiVDԲuy5W7pWeQJk֤#5&V<̺@/GH?^τZL|IJNvI:'P=Ϛt"¨=cud S Q.Ki0 !cJy;LJR;G{BJy޺[^8fK6)=yʊ+(k|&xQ2`L?Ȓ2@Mf 0C`6-%pKpm')c$׻K5[J*U[/#hH!6acB JA _|uMvDyk y)6OPYjœ50VT K}cǻP[ $:]4MEA.y)|B)cf-A?(e|lɉ#P9V)[9t.EiQPDѠ3ϴ;E:+Օ t ȥ~|_N2,ZJLt4! %ա]u {+=p.GhNcŞQI?Nd'yeh n7zi1DB)1S | S#ًZs2|Ɛy$F SxeX{7Vl.Src3E℃Q>b6G ўYCmtկ~=K0f(=LrAS GN'ɹ9<\!a`)֕y[uՍ[09` 9 +57ts6}b4{oqd+J5fa/,97J#6yν99mRWxJyѡyu_TJc`~W>l^q#Ts#2"nD1%fS)FU w{ܯ R{ ˎ󅃏џDsZSQS;LV;7 Od1&1n$ N /.q3~eNɪ]E#oM~}v֯FڦwyZ=<<>Xo稯lfMFV6p02|*=tV!c~]fa5Y^Q_WN|Vs 0ҘދU97OI'N2'8N֭fgg-}V%y]U4 峧p*91#9U kCac_AFңĪy뚇Y_AiuYyTTYЗ-(!JFLt›17uTozc. S;7A&&<ԋ5y;Ro+:' *eYJkWR[@F %SHWP 72k4 qLd'J "zB6{AC0ƁA6U.'F3:Ȅ(9ΜL;D]m8ڥ9}dU "v!;*13Rg^fJyShyy5auA?ɩGHRjo^]׽S)Fm\toy 4WQS@mE#%5ʈfFYDX ~D5Ϡ9tE9So_aU4?Ѽm%&c{n>.KW1Tlb}:j uGi(JgcYj0qn+>) %\!4{LaJso d||u//P_y7iRJ߬nHOy) l+@$($VFIQ9%EeKʈU. ia&FY̒mZ=)+qqoQn >L!qCiDB;Y<%} OgBxB!ØuG)WG9y(Ą{_yesuZmZZey'Wg#C~1Cev@0D $a@˲(.._GimA:uyw֬%;@!JkQVM_Ow:P.s\)ot- ˹"`B,e CRtaEUP<0'}r3[>?G8xU~Nqu;Wm8\RIkբ^5@k+5(By'L&'gBJ3ݶ!/㮻w҅ yqPWUg<e"Qy*167΃sJ\oz]T*UQ<\FԎ`HaNmڜ6DysCask8wP8y9``GJ9lF\G g's Nn͵MLN֪u$| /|7=]O)6s !ĴAKh]q_ap $HH'\1jB^s\|- W1:=6lJBqjY^LsPk""`]w)󭃈,(HC ?䔨Y$Sʣ{4Z+0NvQkhol6C.婧/u]FwiVjZka&%6\F*Ny#8O,22+|Db~d ~Çwc N:FuuCe&oZ(l;@ee-+Wn`44AMK➝2BRՈt7g*1gph9N) *"TF*R(#'88pm=}X]u[i7bEc|\~EMn}P瘊J)K.0i1M6=7'_\kaZ(Th{K*GJyytw"IO-PWJk)..axӝ47"89Cc7ĐBiZx 7m!fy|ϿF9CbȩV 9V-՛^pV̌ɄS#Bv4-@]Vxt-Z, &ֺ*diؠ2^VXbs֔Ìl.jQ]Y[47gj=幽ex)A0ip׳ W2[ᎇhuE^~q흙L} #-b۸oFJ_QP3r6jr+"nfzRJTUqoaۍ /$d8Mx'ݓ= OՃ| )$2mcM*cЙj}f };n YG w0Ia!1Q.oYfr]DyISaP}"dIӗթO67jqR ҊƐƈaɤGG|h;t]䗖oSv|iZqX)oalv;۩meEJ\!8=$4QU4Xo&VEĊ YS^E#d,yX_> ۘ-e\ "Wa6uLĜZi`aD9.% w~mB(02G[6y.773a7 /=o7D)$Z 66 $bY^\CuP. (x'"J60׿Y:Oi;F{w佩b+\Yi`TDWa~|VH)8q/=9!g߆2Y)?ND)%?Ǐ`k/sn:;O299yB=a[Ng 3˲N}vLNy;*?x?~L&=xyӴ~}q{qE*IQ^^ͧvü{Huu=R|>JyUlZV, B~/YF!Y\u_ݼF{_C)LD]m {H 0ihhadd nUkf3oٺCvE\)QJi+֥@tDJkB$1!Đr0XQ|q?d2) Ӣ_}qv-< FŊ߫%roppVBwü~JidY4:}L6M7f٬F "?71<2#?Jyy4뷢<_a7_=Q E=S1И/9{+93֮E{ǂw{))?maÆm(uLE#lïZ  ~d];+]h j?!|$F}*"4(v'8s<ŏUkm7^7no1w2ؗ}TrͿEk>p'8OB7d7R(A 9.*Mi^ͳ; eeUwS+C)uO@ =Sy]` }l8^ZzRXj[^iUɺ$tj))<sbDJfg=Pk_{xaKo1:-uyG0M ԃ\0Lvuy'ȱc2Ji AdyVgVh!{]/&}}ċJ#%d !+87<;qN޼Nفl|1N:8ya  8}k¾+-$4FiZYÔXk*I&'@iI99)HSh4+2G:tGhS^繿 Kتm0 вDk}֚+QT4;sC}rՅE,8CX-e~>G&'9xpW,%Fh,Ry56Y–hW-(v_,? ; qrBk4-V7HQ;ˇ^Gv1JVV%,ik;D_W!))+BoS4QsTM;gt+ndS-~:11Sgv!0qRVh!"Ȋ(̦Yl.]PQWgٳE'`%W1{ndΗBk|Ž7ʒR~,lnoa&:ü$ 3<a[CBݮwt"o\ePJ=Hz"_c^Z.#ˆ*x z̝grY]tdkP*:97YľXyBkD4N.C_[;F9`8& !AMO c `@BA& Ost\-\NX+Xp < !bj3C&QL+*&kAQ=04}cC!9~820G'PC9xa!w&bo_1 Sw"ܱ V )Yl3+ס2KoXOx]"`^WOy :3GO0g;%Yv㐫(R/r (s } u B &FeYZh0y> =2<Ϟc/ -u= c&׭,.0"g"7 6T!vl#sc>{u/Oh Bᾈ)۴74]x7 gMӒ"d]U)}" v4co[ ɡs 5Gg=XR14?5A}D "b{0$L .\4y{_fe:kVS\\O]c^W52LSBDM! C3Dhr̦RtArx4&agaN3Cf<Ԉp4~ B'"1@.b_/xQ} _߃҉/gٓ2Qkqp0շpZ2fԫYz< 4L.Cyυι1t@鎫Fe sYfsF}^ V}N<_`p)alٶ "(XEAVZ<)2},:Ir*#m_YӼ R%a||EƼIJ,,+f"96r/}0jE/)s)cjW#w'Sʯ5<66lj$a~3Kʛy 2:cZ:Yh))+a߭K::N,Q F'qB]={.]h85C9cr=}*rk?vwV렵ٸW Rs%}rNAkDv|uFLBkWY YkX מ|)1!$#3%y?pF<@<Rr0}: }\J [5FRxY<9"SQdE(Q*Qʻ)q1E0B_O24[U'],lOb ]~WjHޏTQ5Syu wq)xnw8~)c 쫬gٲߠ H% k5dƝk> kEj,0% b"vi2Wس_CuK)K{n|>t{P1򨾜j>'kEkƗBg*H%'_aY6Bn!TL&ɌOb{c`'d^{t\i^[uɐ[}q0lM˕G:‚4kb祔c^:?bpg… +37stH:0}en6x˟%/<]BL&* 5&fK9Mq)/iyqtA%kUe[ڛKN]Ě^,"`/ s[EQQm?|XJ߅92m]G.E΃ח U*Cn.j_)Tѧj̿30ڇ!A0=͜ar I3$C^-9#|pk!)?7.x9 @OO;WƝZBFU keZ75F6Tc6"ZȚs2y/1 ʵ:u4xa`C>6Rb/Yм)^=+~uRd`/|_8xbB0?Ft||Z\##|K 0>>zxv8۴吅q 8ĥ)"6>~\8:qM}#͚'ĉ#p\׶ l#bA?)|g g9|8jP(cr,BwV (WliVxxᡁ@0Okn;ɥh$_ckCgriv}>=wGzβ KkBɛ[˪ !J)h&k2%07δt}!d<9;I&0wV/ v 0<H}L&8ob%Hi|޶o&h1L|u֦y~󛱢8fٲUsւ)0oiFx2}X[zVYr_;N(w]_4B@OanC?gĦx>мgx>ΛToZoOMp>40>V Oy V9iq!4 LN,ˢu{jsz]|"R޻&'ƚ{53ўFu(<٪9:΋]B;)B>1::8;~)Yt|0(pw2N%&X,URBK)3\zz&}ax4;ǟ(tLNg{N|Ǽ\G#C9g$^\}p?556]/RP.90 k,U8/u776s ʪ_01چ|\N 0VV*3H鴃J7iI!wG_^ypl}r*jɤSR 5QN@ iZ#1ٰy;_\3\BQQ x:WJv츟ٯ$"@6 S#qe딇(/P( Dy~TOϻ<4:-+F`0||;Xl-"uw$Цi󼕝mKʩorz"mϺ$F:~E'ҐvD\y?Rr8_He@ e~O,T.(ފR*cY^m|cVR[8 JҡSm!ΆԨb)RHG{?MpqrmN>߶Y)\p,d#xۆWY*,l6]v0h15M˙MS8+EdI='LBJIH7_9{Caз*Lq,dt >+~ّeʏ?xԕ4bBAŚjﵫ!'\Ը$WNvKO}ӽmSşذqsOy?\[,d@'73'j%kOe`1.g2"e =YIzS2|zŐƄa\U,dP;jhhhaxǶ?КZ՚.q SE+XrbOu%\GتX(H,N^~]JyEZQKceTQ]VGYqnah;y$cQahT&QPZ*iZ8UQQM.qo/T\7X"u?Mttl2Xq(IoW{R^ ux*SYJ! 4S.Jy~ BROS[V|žKNɛP(L6V^|cR7i7nZW1Fd@ Ara{詑|(T*dN]Ko?s=@ |_EvF]׍kR)eBJc" MUUbY6`~V޴dJKß&~'d3i WWWWWW
Current Directory: /usr/lib/imh-dnskeyapi/venv/lib/python3.13/site-packages/jinja2
Viewing File: /usr/lib/imh-dnskeyapi/venv/lib/python3.13/site-packages/jinja2/parser.py
"""Parse tokens from the lexer into nodes for the compiler.""" import typing import typing as t from . import nodes from .exceptions import TemplateAssertionError from .exceptions import TemplateSyntaxError from .lexer import describe_token from .lexer import describe_token_expr if t.TYPE_CHECKING: import typing_extensions as te from .environment import Environment _ImportInclude = t.TypeVar("_ImportInclude", nodes.Import, nodes.Include) _MacroCall = t.TypeVar("_MacroCall", nodes.Macro, nodes.CallBlock) _statement_keywords = frozenset( [ "for", "if", "block", "extends", "print", "macro", "include", "from", "import", "set", "with", "autoescape", ] ) _compare_operators = frozenset(["eq", "ne", "lt", "lteq", "gt", "gteq"]) _math_nodes: t.Dict[str, t.Type[nodes.Expr]] = { "add": nodes.Add, "sub": nodes.Sub, "mul": nodes.Mul, "div": nodes.Div, "floordiv": nodes.FloorDiv, "mod": nodes.Mod, } class Parser: """This is the central parsing class Jinja uses. It's passed to extensions and can be used to parse expressions or statements. """ def __init__( self, environment: "Environment", source: str, name: t.Optional[str] = None, filename: t.Optional[str] = None, state: t.Optional[str] = None, ) -> None: self.environment = environment self.stream = environment._tokenize(source, name, filename, state) self.name = name self.filename = filename self.closed = False self.extensions: t.Dict[ str, t.Callable[[Parser], t.Union[nodes.Node, t.List[nodes.Node]]] ] = {} for extension in environment.iter_extensions(): for tag in extension.tags: self.extensions[tag] = extension.parse self._last_identifier = 0 self._tag_stack: t.List[str] = [] self._end_token_stack: t.List[t.Tuple[str, ...]] = [] def fail( self, msg: str, lineno: t.Optional[int] = None, exc: t.Type[TemplateSyntaxError] = TemplateSyntaxError, ) -> "te.NoReturn": """Convenience method that raises `exc` with the message, passed line number or last line number as well as the current name and filename. """ if lineno is None: lineno = self.stream.current.lineno raise exc(msg, lineno, self.name, self.filename) def _fail_ut_eof( self, name: t.Optional[str], end_token_stack: t.List[t.Tuple[str, ...]], lineno: t.Optional[int], ) -> "te.NoReturn": expected: t.Set[str] = set() for exprs in end_token_stack: expected.update(map(describe_token_expr, exprs)) if end_token_stack: currently_looking: t.Optional[str] = " or ".join( map(repr, map(describe_token_expr, end_token_stack[-1])) ) else: currently_looking = None if name is None: message = ["Unexpected end of template."] else: message = [f"Encountered unknown tag {name!r}."] if currently_looking: if name is not None and name in expected: message.append( "You probably made a nesting mistake. Jinja is expecting this tag," f" but currently looking for {currently_looking}." ) else: message.append( f"Jinja was looking for the following tags: {currently_looking}." ) if self._tag_stack: message.append( "The innermost block that needs to be closed is" f" {self._tag_stack[-1]!r}." ) self.fail(" ".join(message), lineno) def fail_unknown_tag( self, name: str, lineno: t.Optional[int] = None ) -> "te.NoReturn": """Called if the parser encounters an unknown tag. Tries to fail with a human readable error message that could help to identify the problem. """ self._fail_ut_eof(name, self._end_token_stack, lineno) def fail_eof( self, end_tokens: t.Optional[t.Tuple[str, ...]] = None, lineno: t.Optional[int] = None, ) -> "te.NoReturn": """Like fail_unknown_tag but for end of template situations.""" stack = list(self._end_token_stack) if end_tokens is not None: stack.append(end_tokens) self._fail_ut_eof(None, stack, lineno) def is_tuple_end( self, extra_end_rules: t.Optional[t.Tuple[str, ...]] = None ) -> bool: """Are we at the end of a tuple?""" if self.stream.current.type in ("variable_end", "block_end", "rparen"): return True elif extra_end_rules is not None: return self.stream.current.test_any(extra_end_rules) # type: ignore return False def free_identifier(self, lineno: t.Optional[int] = None) -> nodes.InternalName: """Return a new free identifier as :class:`~jinja2.nodes.InternalName`.""" self._last_identifier += 1 rv = object.__new__(nodes.InternalName) nodes.Node.__init__(rv, f"fi{self._last_identifier}", lineno=lineno) return rv def parse_statement(self) -> t.Union[nodes.Node, t.List[nodes.Node]]: """Parse a single statement.""" token = self.stream.current if token.type != "name": self.fail("tag name expected", token.lineno) self._tag_stack.append(token.value) pop_tag = True try: if token.value in _statement_keywords: f = getattr(self, f"parse_{self.stream.current.value}") return f() # type: ignore if token.value == "call": return self.parse_call_block() if token.value == "filter": return self.parse_filter_block() ext = self.extensions.get(token.value) if ext is not None: return ext(self) # did not work out, remove the token we pushed by accident # from the stack so that the unknown tag fail function can # produce a proper error message. self._tag_stack.pop() pop_tag = False self.fail_unknown_tag(token.value, token.lineno) finally: if pop_tag: self._tag_stack.pop() def parse_statements( self, end_tokens: t.Tuple[str, ...], drop_needle: bool = False ) -> t.List[nodes.Node]: """Parse multiple statements into a list until one of the end tokens is reached. This is used to parse the body of statements as it also parses template data if appropriate. The parser checks first if the current token is a colon and skips it if there is one. Then it checks for the block end and parses until if one of the `end_tokens` is reached. Per default the active token in the stream at the end of the call is the matched end token. If this is not wanted `drop_needle` can be set to `True` and the end token is removed. """ # the first token may be a colon for python compatibility self.stream.skip_if("colon") # in the future it would be possible to add whole code sections # by adding some sort of end of statement token and parsing those here. self.stream.expect("block_end") result = self.subparse(end_tokens) # we reached the end of the template too early, the subparser # does not check for this, so we do that now if self.stream.current.type == "eof": self.fail_eof(end_tokens) if drop_needle: next(self.stream) return result def parse_set(self) -> t.Union[nodes.Assign, nodes.AssignBlock]: """Parse an assign statement.""" lineno = next(self.stream).lineno target = self.parse_assign_target(with_namespace=True) if self.stream.skip_if("assign"): expr = self.parse_tuple() return nodes.Assign(target, expr, lineno=lineno) filter_node = self.parse_filter(None) body = self.parse_statements(("name:endset",), drop_needle=True) return nodes.AssignBlock(target, filter_node, body, lineno=lineno) def parse_for(self) -> nodes.For: """Parse a for loop.""" lineno = self.stream.expect("name:for").lineno target = self.parse_assign_target(extra_end_rules=("name:in",)) self.stream.expect("name:in") iter = self.parse_tuple( with_condexpr=False, extra_end_rules=("name:recursive",) ) test = None if self.stream.skip_if("name:if"): test = self.parse_expression() recursive = self.stream.skip_if("name:recursive") body = self.parse_statements(("name:endfor", "name:else")) if next(self.stream).value == "endfor": else_ = [] else: else_ = self.parse_statements(("name:endfor",), drop_needle=True) return nodes.For(target, iter, body, else_, test, recursive, lineno=lineno) def parse_if(self) -> nodes.If: """Parse an if construct.""" node = result = nodes.If(lineno=self.stream.expect("name:if").lineno) while True: node.test = self.parse_tuple(with_condexpr=False) node.body = self.parse_statements(("name:elif", "name:else", "name:endif")) node.elif_ = [] node.else_ = [] token = next(self.stream) if token.test("name:elif"): node = nodes.If(lineno=self.stream.current.lineno) result.elif_.append(node) continue elif token.test("name:else"): result.else_ = self.parse_statements(("name:endif",), drop_needle=True) break return result def parse_with(self) -> nodes.With: node = nodes.With(lineno=next(self.stream).lineno) targets: t.List[nodes.Expr] = [] values: t.List[nodes.Expr] = [] while self.stream.current.type != "block_end": if targets: self.stream.expect("comma") target = self.parse_assign_target() target.set_ctx("param") targets.append(target) self.stream.expect("assign") values.append(self.parse_expression()) node.targets = targets node.values = values node.body = self.parse_statements(("name:endwith",), drop_needle=True) return node def parse_autoescape(self) -> nodes.Scope: node = nodes.ScopedEvalContextModifier(lineno=next(self.stream).lineno) node.options = [nodes.Keyword("autoescape", self.parse_expression())] node.body = self.parse_statements(("name:endautoescape",), drop_needle=True) return nodes.Scope([node]) def parse_block(self) -> nodes.Block: node = nodes.Block(lineno=next(self.stream).lineno) node.name = self.stream.expect("name").value node.scoped = self.stream.skip_if("name:scoped") node.required = self.stream.skip_if("name:required") # common problem people encounter when switching from django # to jinja. we do not support hyphens in block names, so let's # raise a nicer error message in that case. if self.stream.current.type == "sub": self.fail( "Block names in Jinja have to be valid Python identifiers and may not" " contain hyphens, use an underscore instead." ) node.body = self.parse_statements(("name:endblock",), drop_needle=True) # enforce that required blocks only contain whitespace or comments # by asserting that the body, if not empty, is just TemplateData nodes # with whitespace data if node.required: for body_node in node.body: if not isinstance(body_node, nodes.Output) or any( not isinstance(output_node, nodes.TemplateData) or not output_node.data.isspace() for output_node in body_node.nodes ): self.fail("Required blocks can only contain comments or whitespace") self.stream.skip_if("name:" + node.name) return node def parse_extends(self) -> nodes.Extends: node = nodes.Extends(lineno=next(self.stream).lineno) node.template = self.parse_expression() return node def parse_import_context( self, node: _ImportInclude, default: bool ) -> _ImportInclude: if self.stream.current.test_any( "name:with", "name:without" ) and self.stream.look().test("name:context"): node.with_context = next(self.stream).value == "with" self.stream.skip() else: node.with_context = default return node def parse_include(self) -> nodes.Include: node = nodes.Include(lineno=next(self.stream).lineno) node.template = self.parse_expression() if self.stream.current.test("name:ignore") and self.stream.look().test( "name:missing" ): node.ignore_missing = True self.stream.skip(2) else: node.ignore_missing = False return self.parse_import_context(node, True) def parse_import(self) -> nodes.Import: node = nodes.Import(lineno=next(self.stream).lineno) node.template = self.parse_expression() self.stream.expect("name:as") node.target = self.parse_assign_target(name_only=True).name return self.parse_import_context(node, False) def parse_from(self) -> nodes.FromImport: node = nodes.FromImport(lineno=next(self.stream).lineno) node.template = self.parse_expression() self.stream.expect("name:import") node.names = [] def parse_context() -> bool: if self.stream.current.value in { "with", "without", } and self.stream.look().test("name:context"): node.with_context = next(self.stream).value == "with" self.stream.skip() return True return False while True: if node.names: self.stream.expect("comma") if self.stream.current.type == "name": if parse_context(): break target = self.parse_assign_target(name_only=True) if target.name.startswith("_"): self.fail( "names starting with an underline can not be imported", target.lineno, exc=TemplateAssertionError, ) if self.stream.skip_if("name:as"): alias = self.parse_assign_target(name_only=True) node.names.append((target.name, alias.name)) else: node.names.append(target.name) if parse_context() or self.stream.current.type != "comma": break else: self.stream.expect("name") if not hasattr(node, "with_context"): node.with_context = False return node def parse_signature(self, node: _MacroCall) -> None: args = node.args = [] defaults = node.defaults = [] self.stream.expect("lparen") while self.stream.current.type != "rparen": if args: self.stream.expect("comma") arg = self.parse_assign_target(name_only=True) arg.set_ctx("param") if self.stream.skip_if("assign"): defaults.append(self.parse_expression()) elif defaults: self.fail("non-default argument follows default argument") args.append(arg) self.stream.expect("rparen") def parse_call_block(self) -> nodes.CallBlock: node = nodes.CallBlock(lineno=next(self.stream).lineno) if self.stream.current.type == "lparen": self.parse_signature(node) else: node.args = [] node.defaults = [] call_node = self.parse_expression() if not isinstance(call_node, nodes.Call): self.fail("expected call", node.lineno) node.call = call_node node.body = self.parse_statements(("name:endcall",), drop_needle=True) return node def parse_filter_block(self) -> nodes.FilterBlock: node = nodes.FilterBlock(lineno=next(self.stream).lineno) node.filter = self.parse_filter(None, start_inline=True) # type: ignore node.body = self.parse_statements(("name:endfilter",), drop_needle=True) return node def parse_macro(self) -> nodes.Macro: node = nodes.Macro(lineno=next(self.stream).lineno) node.name = self.parse_assign_target(name_only=True).name self.parse_signature(node) node.body = self.parse_statements(("name:endmacro",), drop_needle=True) return node def parse_print(self) -> nodes.Output: node = nodes.Output(lineno=next(self.stream).lineno) node.nodes = [] while self.stream.current.type != "block_end": if node.nodes: self.stream.expect("comma") node.nodes.append(self.parse_expression()) return node @typing.overload def parse_assign_target( self, with_tuple: bool = ..., name_only: "te.Literal[True]" = ... ) -> nodes.Name: ... @typing.overload def parse_assign_target( self, with_tuple: bool = True, name_only: bool = False, extra_end_rules: t.Optional[t.Tuple[str, ...]] = None, with_namespace: bool = False, ) -> t.Union[nodes.NSRef, nodes.Name, nodes.Tuple]: ... def parse_assign_target( self, with_tuple: bool = True, name_only: bool = False, extra_end_rules: t.Optional[t.Tuple[str, ...]] = None, with_namespace: bool = False, ) -> t.Union[nodes.NSRef, nodes.Name, nodes.Tuple]: """Parse an assignment target. As Jinja allows assignments to tuples, this function can parse all allowed assignment targets. Per default assignments to tuples are parsed, that can be disable however by setting `with_tuple` to `False`. If only assignments to names are wanted `name_only` can be set to `True`. The `extra_end_rules` parameter is forwarded to the tuple parsing function. If `with_namespace` is enabled, a namespace assignment may be parsed. """ target: nodes.Expr if name_only: token = self.stream.expect("name") target = nodes.Name(token.value, "store", lineno=token.lineno) else: if with_tuple: target = self.parse_tuple( simplified=True, extra_end_rules=extra_end_rules, with_namespace=with_namespace, ) else: target = self.parse_primary(with_namespace=with_namespace) target.set_ctx("store") if not target.can_assign(): self.fail( f"can't assign to {type(target).__name__.lower()!r}", target.lineno ) return target # type: ignore def parse_expression(self, with_condexpr: bool = True) -> nodes.Expr: """Parse an expression. Per default all expressions are parsed, if the optional `with_condexpr` parameter is set to `False` conditional expressions are not parsed. """ if with_condexpr: return self.parse_condexpr() return self.parse_or() def parse_condexpr(self) -> nodes.Expr: lineno = self.stream.current.lineno expr1 = self.parse_or() expr3: t.Optional[nodes.Expr] while self.stream.skip_if("name:if"): expr2 = self.parse_or() if self.stream.skip_if("name:else"): expr3 = self.parse_condexpr() else: expr3 = None expr1 = nodes.CondExpr(expr2, expr1, expr3, lineno=lineno) lineno = self.stream.current.lineno return expr1 def parse_or(self) -> nodes.Expr: lineno = self.stream.current.lineno left = self.parse_and() while self.stream.skip_if("name:or"): right = self.parse_and() left = nodes.Or(left, right, lineno=lineno) lineno = self.stream.current.lineno return left def parse_and(self) -> nodes.Expr: lineno = self.stream.current.lineno left = self.parse_not() while self.stream.skip_if("name:and"): right = self.parse_not() left = nodes.And(left, right, lineno=lineno) lineno = self.stream.current.lineno return left def parse_not(self) -> nodes.Expr: if self.stream.current.test("name:not"): lineno = next(self.stream).lineno return nodes.Not(self.parse_not(), lineno=lineno) return self.parse_compare() def parse_compare(self) -> nodes.Expr: lineno = self.stream.current.lineno expr = self.parse_math1() ops = [] while True: token_type = self.stream.current.type if token_type in _compare_operators: next(self.stream) ops.append(nodes.Operand(token_type, self.parse_math1())) elif self.stream.skip_if("name:in"): ops.append(nodes.Operand("in", self.parse_math1())) elif self.stream.current.test("name:not") and self.stream.look().test( "name:in" ): self.stream.skip(2) ops.append(nodes.Operand("notin", self.parse_math1())) else: break lineno = self.stream.current.lineno if not ops: return expr return nodes.Compare(expr, ops, lineno=lineno) def parse_math1(self) -> nodes.Expr: lineno = self.stream.current.lineno left = self.parse_concat() while self.stream.current.type in ("add", "sub"): cls = _math_nodes[self.stream.current.type] next(self.stream) right = self.parse_concat() left = cls(left, right, lineno=lineno) lineno = self.stream.current.lineno return left def parse_concat(self) -> nodes.Expr: lineno = self.stream.current.lineno args = [self.parse_math2()] while self.stream.current.type == "tilde": next(self.stream) args.append(self.parse_math2()) if len(args) == 1: return args[0] return nodes.Concat(args, lineno=lineno) def parse_math2(self) -> nodes.Expr: lineno = self.stream.current.lineno left = self.parse_pow() while self.stream.current.type in ("mul", "div", "floordiv", "mod"): cls = _math_nodes[self.stream.current.type] next(self.stream) right = self.parse_pow() left = cls(left, right, lineno=lineno) lineno = self.stream.current.lineno return left def parse_pow(self) -> nodes.Expr: lineno = self.stream.current.lineno left = self.parse_unary() while self.stream.current.type == "pow": next(self.stream) right = self.parse_unary() left = nodes.Pow(left, right, lineno=lineno) lineno = self.stream.current.lineno return left def parse_unary(self, with_filter: bool = True) -> nodes.Expr: token_type = self.stream.current.type lineno = self.stream.current.lineno node: nodes.Expr if token_type == "sub": next(self.stream) node = nodes.Neg(self.parse_unary(False), lineno=lineno) elif token_type == "add": next(self.stream) node = nodes.Pos(self.parse_unary(False), lineno=lineno) else: node = self.parse_primary() node = self.parse_postfix(node) if with_filter: node = self.parse_filter_expr(node) return node def parse_primary(self, with_namespace: bool = False) -> nodes.Expr: """Parse a name or literal value. If ``with_namespace`` is enabled, also parse namespace attr refs, for use in assignments.""" token = self.stream.current node: nodes.Expr if token.type == "name": next(self.stream) if token.value in ("true", "false", "True", "False"): node = nodes.Const(token.value in ("true", "True"), lineno=token.lineno) elif token.value in ("none", "None"): node = nodes.Const(None, lineno=token.lineno) elif with_namespace and self.stream.current.type == "dot": # If namespace attributes are allowed at this point, and the next # token is a dot, produce a namespace reference. next(self.stream) attr = self.stream.expect("name") node = nodes.NSRef(token.value, attr.value, lineno=token.lineno) else: node = nodes.Name(token.value, "load", lineno=token.lineno) elif token.type == "string": next(self.stream) buf = [token.value] lineno = token.lineno while self.stream.current.type == "string": buf.append(self.stream.current.value) next(self.stream) node = nodes.Const("".join(buf), lineno=lineno) elif token.type in ("integer", "float"): next(self.stream) node = nodes.Const(token.value, lineno=token.lineno) elif token.type == "lparen": next(self.stream) node = self.parse_tuple(explicit_parentheses=True) self.stream.expect("rparen") elif token.type == "lbracket": node = self.parse_list() elif token.type == "lbrace": node = self.parse_dict() else: self.fail(f"unexpected {describe_token(token)!r}", token.lineno) return node def parse_tuple( self, simplified: bool = False, with_condexpr: bool = True, extra_end_rules: t.Optional[t.Tuple[str, ...]] = None, explicit_parentheses: bool = False, with_namespace: bool = False, ) -> t.Union[nodes.Tuple, nodes.Expr]: """Works like `parse_expression` but if multiple expressions are delimited by a comma a :class:`~jinja2.nodes.Tuple` node is created. This method could also return a regular expression instead of a tuple if no commas where found. The default parsing mode is a full tuple. If `simplified` is `True` only names and literals are parsed; ``with_namespace`` allows namespace attr refs as well. The `no_condexpr` parameter is forwarded to :meth:`parse_expression`. Because tuples do not require delimiters and may end in a bogus comma an extra hint is needed that marks the end of a tuple. For example for loops support tuples between `for` and `in`. In that case the `extra_end_rules` is set to ``['name:in']``. `explicit_parentheses` is true if the parsing was triggered by an expression in parentheses. This is used to figure out if an empty tuple is a valid expression or not. """ lineno = self.stream.current.lineno if simplified: def parse() -> nodes.Expr: return self.parse_primary(with_namespace=with_namespace) else: def parse() -> nodes.Expr: return self.parse_expression(with_condexpr=with_condexpr) args: t.List[nodes.Expr] = [] is_tuple = False while True: if args: self.stream.expect("comma") if self.is_tuple_end(extra_end_rules): break args.append(parse()) if self.stream.current.type == "comma": is_tuple = True else: break lineno = self.stream.current.lineno if not is_tuple: if args: return args[0] # if we don't have explicit parentheses, an empty tuple is # not a valid expression. This would mean nothing (literally # nothing) in the spot of an expression would be an empty # tuple. if not explicit_parentheses: self.fail( "Expected an expression," f" got {describe_token(self.stream.current)!r}" ) return nodes.Tuple(args, "load", lineno=lineno) def parse_list(self) -> nodes.List: token = self.stream.expect("lbracket") items: t.List[nodes.Expr] = [] while self.stream.current.type != "rbracket": if items: self.stream.expect("comma") if self.stream.current.type == "rbracket": break items.append(self.parse_expression()) self.stream.expect("rbracket") return nodes.List(items, lineno=token.lineno) def parse_dict(self) -> nodes.Dict: token = self.stream.expect("lbrace") items: t.List[nodes.Pair] = [] while self.stream.current.type != "rbrace": if items: self.stream.expect("comma") if self.stream.current.type == "rbrace": break key = self.parse_expression() self.stream.expect("colon") value = self.parse_expression() items.append(nodes.Pair(key, value, lineno=key.lineno)) self.stream.expect("rbrace") return nodes.Dict(items, lineno=token.lineno) def parse_postfix(self, node: nodes.Expr) -> nodes.Expr: while True: token_type = self.stream.current.type if token_type == "dot" or token_type == "lbracket": node = self.parse_subscript(node) # calls are valid both after postfix expressions (getattr # and getitem) as well as filters and tests elif token_type == "lparen": node = self.parse_call(node) else: break return node def parse_filter_expr(self, node: nodes.Expr) -> nodes.Expr: while True: token_type = self.stream.current.type if token_type == "pipe": node = self.parse_filter(node) # type: ignore elif token_type == "name" and self.stream.current.value == "is": node = self.parse_test(node) # calls are valid both after postfix expressions (getattr # and getitem) as well as filters and tests elif token_type == "lparen": node = self.parse_call(node) else: break return node def parse_subscript( self, node: nodes.Expr ) -> t.Union[nodes.Getattr, nodes.Getitem]: token = next(self.stream) arg: nodes.Expr if token.type == "dot": attr_token = self.stream.current next(self.stream) if attr_token.type == "name": return nodes.Getattr( node, attr_token.value, "load", lineno=token.lineno ) elif attr_token.type != "integer": self.fail("expected name or number", attr_token.lineno) arg = nodes.Const(attr_token.value, lineno=attr_token.lineno) return nodes.Getitem(node, arg, "load", lineno=token.lineno) if token.type == "lbracket": args: t.List[nodes.Expr] = [] while self.stream.current.type != "rbracket": if args: self.stream.expect("comma") args.append(self.parse_subscribed()) self.stream.expect("rbracket") if len(args) == 1: arg = args[0] else: arg = nodes.Tuple(args, "load", lineno=token.lineno) return nodes.Getitem(node, arg, "load", lineno=token.lineno) self.fail("expected subscript expression", token.lineno) def parse_subscribed(self) -> nodes.Expr: lineno = self.stream.current.lineno args: t.List[t.Optional[nodes.Expr]] if self.stream.current.type == "colon": next(self.stream) args = [None] else: node = self.parse_expression() if self.stream.current.type != "colon": return node next(self.stream) args = [node] if self.stream.current.type == "colon": args.append(None) elif self.stream.current.type not in ("rbracket", "comma"): args.append(self.parse_expression()) else: args.append(None) if self.stream.current.type == "colon": next(self.stream) if self.stream.current.type not in ("rbracket", "comma"): args.append(self.parse_expression()) else: args.append(None) else: args.append(None) return nodes.Slice(lineno=lineno, *args) # noqa: B026 def parse_call_args( self, ) -> t.Tuple[ t.List[nodes.Expr], t.List[nodes.Keyword], t.Optional[nodes.Expr], t.Optional[nodes.Expr], ]: token = self.stream.expect("lparen") args = [] kwargs = [] dyn_args = None dyn_kwargs = None require_comma = False def ensure(expr: bool) -> None: if not expr: self.fail("invalid syntax for function call expression", token.lineno) while self.stream.current.type != "rparen": if require_comma: self.stream.expect("comma") # support for trailing comma if self.stream.current.type == "rparen": break if self.stream.current.type == "mul": ensure(dyn_args is None and dyn_kwargs is None) next(self.stream) dyn_args = self.parse_expression() elif self.stream.current.type == "pow": ensure(dyn_kwargs is None) next(self.stream) dyn_kwargs = self.parse_expression() else: if ( self.stream.current.type == "name" and self.stream.look().type == "assign" ): # Parsing a kwarg ensure(dyn_kwargs is None) key = self.stream.current.value self.stream.skip(2) value = self.parse_expression() kwargs.append(nodes.Keyword(key, value, lineno=value.lineno)) else: # Parsing an arg ensure(dyn_args is None and dyn_kwargs is None and not kwargs) args.append(self.parse_expression()) require_comma = True self.stream.expect("rparen") return args, kwargs, dyn_args, dyn_kwargs def parse_call(self, node: nodes.Expr) -> nodes.Call: # The lparen will be expected in parse_call_args, but the lineno # needs to be recorded before the stream is advanced. token = self.stream.current args, kwargs, dyn_args, dyn_kwargs = self.parse_call_args() return nodes.Call(node, args, kwargs, dyn_args, dyn_kwargs, lineno=token.lineno) def parse_filter( self, node: t.Optional[nodes.Expr], start_inline: bool = False ) -> t.Optional[nodes.Expr]: while self.stream.current.type == "pipe" or start_inline: if not start_inline: next(self.stream) token = self.stream.expect("name") name = token.value while self.stream.current.type == "dot": next(self.stream) name += "." + self.stream.expect("name").value if self.stream.current.type == "lparen": args, kwargs, dyn_args, dyn_kwargs = self.parse_call_args() else: args = [] kwargs = [] dyn_args = dyn_kwargs = None node = nodes.Filter( node, name, args, kwargs, dyn_args, dyn_kwargs, lineno=token.lineno ) start_inline = False return node def parse_test(self, node: nodes.Expr) -> nodes.Expr: token = next(self.stream) if self.stream.current.test("name:not"): next(self.stream) negated = True else: negated = False name = self.stream.expect("name").value while self.stream.current.type == "dot": next(self.stream) name += "." + self.stream.expect("name").value dyn_args = dyn_kwargs = None kwargs: t.List[nodes.Keyword] = [] if self.stream.current.type == "lparen": args, kwargs, dyn_args, dyn_kwargs = self.parse_call_args() elif self.stream.current.type in { "name", "string", "integer", "float", "lparen", "lbracket", "lbrace", } and not self.stream.current.test_any("name:else", "name:or", "name:and"): if self.stream.current.test("name:is"): self.fail("You cannot chain multiple tests with is") arg_node = self.parse_primary() arg_node = self.parse_postfix(arg_node) args = [arg_node] else: args = [] node = nodes.Test( node, name, args, kwargs, dyn_args, dyn_kwargs, lineno=token.lineno ) if negated: node = nodes.Not(node, lineno=token.lineno) return node def subparse( self, end_tokens: t.Optional[t.Tuple[str, ...]] = None ) -> t.List[nodes.Node]: body: t.List[nodes.Node] = [] data_buffer: t.List[nodes.Node] = [] add_data = data_buffer.append if end_tokens is not None: self._end_token_stack.append(end_tokens) def flush_data() -> None: if data_buffer: lineno = data_buffer[0].lineno body.append(nodes.Output(data_buffer[:], lineno=lineno)) del data_buffer[:] try: while self.stream: token = self.stream.current if token.type == "data": if token.value: add_data(nodes.TemplateData(token.value, lineno=token.lineno)) next(self.stream) elif token.type == "variable_begin": next(self.stream) add_data(self.parse_tuple(with_condexpr=True)) self.stream.expect("variable_end") elif token.type == "block_begin": flush_data() next(self.stream) if end_tokens is not None and self.stream.current.test_any( *end_tokens ): return body rv = self.parse_statement() if isinstance(rv, list): body.extend(rv) else: body.append(rv) self.stream.expect("block_end") else: raise AssertionError("internal parsing error") flush_data() finally: if end_tokens is not None: self._end_token_stack.pop() return body def parse(self) -> nodes.Template: """Parse the whole template into a `Template` node.""" result = nodes.Template(self.subparse(), lineno=1) result.set_environment(self.environment) return result